blob: ac486894dd885017c3f873b97f3c388a80986a44 [file] [log] [blame]
Chris Lattner1c08c712005-01-07 07:47:53 +00001//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the SelectionDAGISel class.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "isel"
15#include "llvm/CodeGen/SelectionDAGISel.h"
16#include "llvm/Constants.h"
17#include "llvm/DerivedTypes.h"
18#include "llvm/Function.h"
19#include "llvm/Instructions.h"
20#include "llvm/Intrinsics.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/SelectionDAG.h"
25#include "llvm/CodeGen/SSARegMap.h"
26#include "llvm/Target/TargetData.h"
27#include "llvm/Target/TargetFrameInfo.h"
28#include "llvm/Target/TargetInstrInfo.h"
29#include "llvm/Target/TargetLowering.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/Debug.h"
32#include <map>
33#include <iostream>
34using namespace llvm;
35
36namespace llvm {
37 //===--------------------------------------------------------------------===//
38 /// FunctionLoweringInfo - This contains information that is global to a
39 /// function that is used when lowering a region of the function.
Chris Lattnerf26bc8e2005-01-08 19:52:31 +000040 class FunctionLoweringInfo {
41 public:
Chris Lattner1c08c712005-01-07 07:47:53 +000042 TargetLowering &TLI;
43 Function &Fn;
44 MachineFunction &MF;
45 SSARegMap *RegMap;
46
47 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
48
49 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
50 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
51
52 /// ValueMap - Since we emit code for the function a basic block at a time,
53 /// we must remember which virtual registers hold the values for
54 /// cross-basic-block values.
55 std::map<const Value*, unsigned> ValueMap;
56
57 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
58 /// the entry block. This allows the allocas to be efficiently referenced
59 /// anywhere in the function.
60 std::map<const AllocaInst*, int> StaticAllocaMap;
61
62 unsigned MakeReg(MVT::ValueType VT) {
63 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
64 }
65
66 unsigned CreateRegForValue(const Value *V) {
67 MVT::ValueType VT = TLI.getValueType(V->getType());
68 // The common case is that we will only create one register for this
69 // value. If we have that case, create and return the virtual register.
70 unsigned NV = TLI.getNumElements(VT);
71 if (NV == 1) return MakeReg(VT);
72
73 // If this value is represented with multiple target registers, make sure
74 // to create enough consequtive registers of the right (smaller) type.
75 unsigned NT = VT-1; // Find the type to use.
76 while (TLI.getNumElements((MVT::ValueType)NT) != 1)
77 --NT;
78
79 unsigned R = MakeReg((MVT::ValueType)NT);
80 for (unsigned i = 1; i != NV; ++i)
81 MakeReg((MVT::ValueType)NT);
82 return R;
83 }
84
85 unsigned InitializeRegForValue(const Value *V) {
86 unsigned &R = ValueMap[V];
87 assert(R == 0 && "Already initialized this value register!");
88 return R = CreateRegForValue(V);
89 }
90 };
91}
92
93/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
94/// PHI nodes or outside of the basic block that defines it.
95static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
96 if (isa<PHINode>(I)) return true;
97 BasicBlock *BB = I->getParent();
98 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
99 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
100 return true;
101 return false;
102}
103
104FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
105 Function &fn, MachineFunction &mf)
106 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
107
108 // Initialize the mapping of values to registers. This is only set up for
109 // instruction values that are used outside of the block that defines
110 // them.
111 for (Function::aiterator AI = Fn.abegin(), E = Fn.aend(); AI != E; ++AI)
112 InitializeRegForValue(AI);
113
114 Function::iterator BB = Fn.begin(), E = Fn.end();
115 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
116 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
117 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
118 const Type *Ty = AI->getAllocatedType();
119 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
120 unsigned Align = TLI.getTargetData().getTypeAlignment(Ty);
121 TySize *= CUI->getValue(); // Get total allocated size.
122 StaticAllocaMap[AI] =
Chris Lattnerf26bc8e2005-01-08 19:52:31 +0000123 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
Chris Lattner1c08c712005-01-07 07:47:53 +0000124 }
125
126 for (; BB != E; ++BB)
Chris Lattnerf26bc8e2005-01-08 19:52:31 +0000127 for (BasicBlock::iterator I = BB->begin(), e = BB->end(); I != e; ++I)
Chris Lattner1c08c712005-01-07 07:47:53 +0000128 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
129 if (!isa<AllocaInst>(I) ||
130 !StaticAllocaMap.count(cast<AllocaInst>(I)))
131 InitializeRegForValue(I);
132
133 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
134 // also creates the initial PHI MachineInstrs, though none of the input
135 // operands are populated.
136 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
137 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
138 MBBMap[BB] = MBB;
139 MF.getBasicBlockList().push_back(MBB);
140
141 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
142 // appropriate.
143 PHINode *PN;
144 for (BasicBlock::iterator I = BB->begin();
Chris Lattnerf44fd882005-01-07 21:34:19 +0000145 (PN = dyn_cast<PHINode>(I)); ++I)
146 if (!PN->use_empty()) {
147 unsigned NumElements =
148 TLI.getNumElements(TLI.getValueType(PN->getType()));
149 unsigned PHIReg = ValueMap[PN];
150 assert(PHIReg &&"PHI node does not have an assigned virtual register!");
151 for (unsigned i = 0; i != NumElements; ++i)
152 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
153 }
Chris Lattner1c08c712005-01-07 07:47:53 +0000154 }
155}
156
157
158
159//===----------------------------------------------------------------------===//
160/// SelectionDAGLowering - This is the common target-independent lowering
161/// implementation that is parameterized by a TargetLowering object.
162/// Also, targets can overload any lowering method.
163///
164namespace llvm {
165class SelectionDAGLowering {
166 MachineBasicBlock *CurMBB;
167
168 std::map<const Value*, SDOperand> NodeMap;
169
170public:
171 // TLI - This is information that describes the available target features we
172 // need for lowering. This indicates when operations are unavailable,
173 // implemented with a libcall, etc.
174 TargetLowering &TLI;
175 SelectionDAG &DAG;
176 const TargetData &TD;
177
178 /// FuncInfo - Information about the function as a whole.
179 ///
180 FunctionLoweringInfo &FuncInfo;
181
182 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
183 FunctionLoweringInfo &funcinfo)
184 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
185 FuncInfo(funcinfo) {
186 }
187
188 void visit(Instruction &I) { visit(I.getOpcode(), I); }
189
190 void visit(unsigned Opcode, User &I) {
191 switch (Opcode) {
192 default: assert(0 && "Unknown instruction type encountered!");
193 abort();
194 // Build the switch statement using the Instruction.def file.
195#define HANDLE_INST(NUM, OPCODE, CLASS) \
196 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
197#include "llvm/Instruction.def"
198 }
199 }
200
201 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
202
203
204 SDOperand getIntPtrConstant(uint64_t Val) {
205 return DAG.getConstant(Val, TLI.getPointerTy());
206 }
207
208 SDOperand getValue(const Value *V) {
209 SDOperand &N = NodeMap[V];
210 if (N.Val) return N;
211
212 MVT::ValueType VT = TLI.getValueType(V->getType());
213 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V)))
214 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
215 visit(CE->getOpcode(), *CE);
216 assert(N.Val && "visit didn't populate the ValueMap!");
217 return N;
218 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
219 return N = DAG.getGlobalAddress(GV, VT);
220 } else if (isa<ConstantPointerNull>(C)) {
221 return N = DAG.getConstant(0, TLI.getPointerTy());
222 } else if (isa<UndefValue>(C)) {
223 /// FIXME: Implement UNDEFVALUE better.
224 if (MVT::isInteger(VT))
225 return N = DAG.getConstant(0, VT);
226 else if (MVT::isFloatingPoint(VT))
227 return N = DAG.getConstantFP(0, VT);
228 else
229 assert(0 && "Unknown value type!");
230
231 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
232 return N = DAG.getConstantFP(CFP->getValue(), VT);
233 } else {
234 // Canonicalize all constant ints to be unsigned.
235 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
236 }
237
238 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
239 std::map<const AllocaInst*, int>::iterator SI =
240 FuncInfo.StaticAllocaMap.find(AI);
241 if (SI != FuncInfo.StaticAllocaMap.end())
242 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
243 }
244
245 std::map<const Value*, unsigned>::const_iterator VMI =
246 FuncInfo.ValueMap.find(V);
247 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
248 return N = DAG.getCopyFromReg(VMI->second, VT);
249 }
250
251 const SDOperand &setValue(const Value *V, SDOperand NewN) {
252 SDOperand &N = NodeMap[V];
253 assert(N.Val == 0 && "Already set a value for this node!");
254 return N = NewN;
255 }
256
257 // Terminator instructions.
258 void visitRet(ReturnInst &I);
259 void visitBr(BranchInst &I);
260 void visitUnreachable(UnreachableInst &I) { /* noop */ }
261
262 // These all get lowered before this pass.
263 void visitSwitch(SwitchInst &I) { assert(0 && "TODO"); }
264 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
265 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
266
267 //
268 void visitBinary(User &I, unsigned Opcode);
269 void visitAdd(User &I) { visitBinary(I, ISD::ADD); }
270 void visitSub(User &I) { visitBinary(I, ISD::SUB); }
271 void visitMul(User &I) { visitBinary(I, ISD::MUL); }
272 void visitDiv(User &I) {
273 visitBinary(I, I.getType()->isUnsigned() ? ISD::UDIV : ISD::SDIV);
274 }
275 void visitRem(User &I) {
276 visitBinary(I, I.getType()->isUnsigned() ? ISD::UREM : ISD::SREM);
277 }
278 void visitAnd(User &I) { visitBinary(I, ISD::AND); }
279 void visitOr (User &I) { visitBinary(I, ISD::OR); }
280 void visitXor(User &I) { visitBinary(I, ISD::XOR); }
281 void visitShl(User &I) { visitBinary(I, ISD::SHL); }
282 void visitShr(User &I) {
283 visitBinary(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
284 }
285
286 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc);
287 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); }
288 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); }
289 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); }
290 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); }
291 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
292 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
293
294 void visitGetElementPtr(User &I);
295 void visitCast(User &I);
296 void visitSelect(User &I);
297 //
298
299 void visitMalloc(MallocInst &I);
300 void visitFree(FreeInst &I);
301 void visitAlloca(AllocaInst &I);
302 void visitLoad(LoadInst &I);
303 void visitStore(StoreInst &I);
304 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
305 void visitCall(CallInst &I);
306
307 // FIXME: These should go through the FunctionLoweringInfo object!!!
308 void visitVAStart(CallInst &I);
309 void visitVANext(VANextInst &I);
310 void visitVAArg(VAArgInst &I);
311 void visitVAEnd(CallInst &I);
312 void visitVACopy(CallInst &I);
313 void visitReturnAddress(CallInst &I);
314 void visitFrameAddress(CallInst &I);
315
316 void visitMemSet(CallInst &I);
317 void visitMemCpy(CallInst &I);
318 void visitMemMove(CallInst &I);
319
320 void visitUserOp1(Instruction &I) {
321 assert(0 && "UserOp1 should not exist at instruction selection time!");
322 abort();
323 }
324 void visitUserOp2(Instruction &I) {
325 assert(0 && "UserOp2 should not exist at instruction selection time!");
326 abort();
327 }
328};
329} // end namespace llvm
330
331void SelectionDAGLowering::visitRet(ReturnInst &I) {
332 if (I.getNumOperands() == 0) {
333 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, DAG.getRoot()));
334 return;
335 }
336
337 SDOperand Op1 = getValue(I.getOperand(0));
338 switch (Op1.getValueType()) {
339 default: assert(0 && "Unknown value type!");
340 case MVT::i1:
341 case MVT::i8:
342 case MVT::i16:
343 // Extend integer types to 32-bits.
344 if (I.getOperand(0)->getType()->isSigned())
345 Op1 = DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Op1);
346 else
347 Op1 = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op1);
348 break;
349 case MVT::f32:
350 // Extend float to double.
351 Op1 = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Op1);
352 break;
353 case MVT::i32:
354 case MVT::i64:
355 case MVT::f64:
356 break; // No extension needed!
357 }
358
359 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, DAG.getRoot(), Op1));
360}
361
362void SelectionDAGLowering::visitBr(BranchInst &I) {
363 // Update machine-CFG edges.
364 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
365 CurMBB->addSuccessor(Succ0MBB);
366
367 // Figure out which block is immediately after the current one.
368 MachineBasicBlock *NextBlock = 0;
369 MachineFunction::iterator BBI = CurMBB;
370 if (++BBI != CurMBB->getParent()->end())
371 NextBlock = BBI;
372
373 if (I.isUnconditional()) {
374 // If this is not a fall-through branch, emit the branch.
375 if (Succ0MBB != NextBlock)
376 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, DAG.getRoot(),
377 DAG.getBasicBlock(Succ0MBB)));
378 } else {
379 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
380 CurMBB->addSuccessor(Succ1MBB);
381
382 SDOperand Cond = getValue(I.getCondition());
383
384 if (Succ1MBB == NextBlock) {
385 // If the condition is false, fall through. This means we should branch
386 // if the condition is true to Succ #0.
387 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, DAG.getRoot(),
388 Cond, DAG.getBasicBlock(Succ0MBB)));
389 } else if (Succ0MBB == NextBlock) {
390 // If the condition is true, fall through. This means we should branch if
391 // the condition is false to Succ #1. Invert the condition first.
392 SDOperand True = DAG.getConstant(1, Cond.getValueType());
393 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
394 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, DAG.getRoot(),
395 Cond, DAG.getBasicBlock(Succ1MBB)));
396 } else {
397 // Neither edge is a fall through. If the comparison is true, jump to
398 // Succ#0, otherwise branch unconditionally to succ #1.
399 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, DAG.getRoot(),
400 Cond, DAG.getBasicBlock(Succ0MBB)));
401 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, DAG.getRoot(),
402 DAG.getBasicBlock(Succ1MBB)));
403 }
404 }
405}
406
407void SelectionDAGLowering::visitBinary(User &I, unsigned Opcode) {
408 SDOperand Op1 = getValue(I.getOperand(0));
409 SDOperand Op2 = getValue(I.getOperand(1));
410 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
411}
412
413void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
414 ISD::CondCode UnsignedOpcode) {
415 SDOperand Op1 = getValue(I.getOperand(0));
416 SDOperand Op2 = getValue(I.getOperand(1));
417 ISD::CondCode Opcode = SignedOpcode;
418 if (I.getOperand(0)->getType()->isUnsigned())
419 Opcode = UnsignedOpcode;
420 setValue(&I, DAG.getSetCC(Opcode, Op1, Op2));
421}
422
423void SelectionDAGLowering::visitSelect(User &I) {
424 SDOperand Cond = getValue(I.getOperand(0));
425 SDOperand TrueVal = getValue(I.getOperand(1));
426 SDOperand FalseVal = getValue(I.getOperand(2));
427 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
428 TrueVal, FalseVal));
429}
430
431void SelectionDAGLowering::visitCast(User &I) {
432 SDOperand N = getValue(I.getOperand(0));
433 MVT::ValueType SrcTy = TLI.getValueType(I.getOperand(0)->getType());
434 MVT::ValueType DestTy = TLI.getValueType(I.getType());
435
436 if (N.getValueType() == DestTy) {
437 setValue(&I, N); // noop cast.
Chris Lattnerae0aacb2005-01-08 08:08:56 +0000438 } else if (isInteger(SrcTy)) {
439 if (isInteger(DestTy)) { // Int -> Int cast
440 if (DestTy < SrcTy) // Truncating cast?
441 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestTy, N));
442 else if (I.getOperand(0)->getType()->isSigned())
443 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestTy, N));
444 else
445 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestTy, N));
446 } else { // Int -> FP cast
447 if (I.getOperand(0)->getType()->isSigned())
448 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestTy, N));
449 else
450 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestTy, N));
451 }
Chris Lattner1c08c712005-01-07 07:47:53 +0000452 } else {
Chris Lattnerae0aacb2005-01-08 08:08:56 +0000453 assert(isFloatingPoint(SrcTy) && "Unknown value type!");
454 if (isFloatingPoint(DestTy)) { // FP -> FP cast
455 if (DestTy < SrcTy) // Rounding cast?
456 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestTy, N));
457 else
458 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestTy, N));
459 } else { // FP -> Int cast.
460 if (I.getType()->isSigned())
461 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestTy, N));
462 else
463 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestTy, N));
464 }
Chris Lattner1c08c712005-01-07 07:47:53 +0000465 }
466}
467
468void SelectionDAGLowering::visitGetElementPtr(User &I) {
469 SDOperand N = getValue(I.getOperand(0));
470 const Type *Ty = I.getOperand(0)->getType();
471 const Type *UIntPtrTy = TD.getIntPtrType();
472
473 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
474 OI != E; ++OI) {
475 Value *Idx = *OI;
476 if (const StructType *StTy = dyn_cast<StructType> (Ty)) {
477 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
478 if (Field) {
479 // N = N + Offset
480 uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
481 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
482 getIntPtrConstant(Offset));
483 }
484 Ty = StTy->getElementType(Field);
485 } else {
486 Ty = cast<SequentialType>(Ty)->getElementType();
487 if (!isa<Constant>(Idx) || !cast<Constant>(Idx)->isNullValue()) {
488 // N = N + Idx * ElementSize;
489 uint64_t ElementSize = TD.getTypeSize(Ty);
Chris Lattner7cc47772005-01-07 21:56:57 +0000490 SDOperand IdxN = getValue(Idx), Scale = getIntPtrConstant(ElementSize);
491
492 // If the index is smaller or larger than intptr_t, truncate or extend
493 // it.
494 if (IdxN.getValueType() < Scale.getValueType()) {
495 if (Idx->getType()->isSigned())
496 IdxN = DAG.getNode(ISD::SIGN_EXTEND, Scale.getValueType(), IdxN);
497 else
498 IdxN = DAG.getNode(ISD::ZERO_EXTEND, Scale.getValueType(), IdxN);
499 } else if (IdxN.getValueType() > Scale.getValueType())
500 IdxN = DAG.getNode(ISD::TRUNCATE, Scale.getValueType(), IdxN);
501
502 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
503
Chris Lattner1c08c712005-01-07 07:47:53 +0000504 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
505 }
506 }
507 }
508 setValue(&I, N);
509}
510
511void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
512 // If this is a fixed sized alloca in the entry block of the function,
513 // allocate it statically on the stack.
514 if (FuncInfo.StaticAllocaMap.count(&I))
515 return; // getValue will auto-populate this.
516
517 const Type *Ty = I.getAllocatedType();
518 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
519 unsigned Align = TLI.getTargetData().getTypeAlignment(Ty);
520
521 SDOperand AllocSize = getValue(I.getArraySize());
522
523 assert(AllocSize.getValueType() == TLI.getPointerTy() &&
524 "FIXME: should extend or truncate to pointer size!");
525
526 AllocSize = DAG.getNode(ISD::MUL, TLI.getPointerTy(), AllocSize,
527 getIntPtrConstant(TySize));
528
529 // Handle alignment. If the requested alignment is less than or equal to the
530 // stack alignment, ignore it and round the size of the allocation up to the
531 // stack alignment size. If the size is greater than the stack alignment, we
532 // note this in the DYNAMIC_STACKALLOC node.
533 unsigned StackAlign =
534 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
535 if (Align <= StackAlign) {
536 Align = 0;
537 // Add SA-1 to the size.
538 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
539 getIntPtrConstant(StackAlign-1));
540 // Mask out the low bits for alignment purposes.
541 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
542 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
543 }
544
545 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, AllocSize.getValueType(),
546 DAG.getRoot(), AllocSize,
547 getIntPtrConstant(Align));
548 DAG.setRoot(setValue(&I, DSA).getValue(1));
549
550 // Inform the Frame Information that we have just allocated a variable-sized
551 // object.
552 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
553}
554
555
556void SelectionDAGLowering::visitLoad(LoadInst &I) {
557 SDOperand Ptr = getValue(I.getOperand(0));
558 SDOperand L = DAG.getLoad(TLI.getValueType(I.getType()), DAG.getRoot(), Ptr);
559 DAG.setRoot(setValue(&I, L).getValue(1));
560}
561
562
563void SelectionDAGLowering::visitStore(StoreInst &I) {
564 Value *SrcV = I.getOperand(0);
565 SDOperand Src = getValue(SrcV);
566 SDOperand Ptr = getValue(I.getOperand(1));
567 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, DAG.getRoot(), Src, Ptr));
568 return;
569}
570
571void SelectionDAGLowering::visitCall(CallInst &I) {
572 if (Function *F = I.getCalledFunction())
573 switch (F->getIntrinsicID()) {
574 case 0: break; // Not an intrinsic.
575 case Intrinsic::vastart: visitVAStart(I); return;
576 case Intrinsic::vaend: visitVAEnd(I); return;
577 case Intrinsic::vacopy: visitVACopy(I); return;
578 case Intrinsic::returnaddress:
579 visitReturnAddress(I); return;
580 case Intrinsic::frameaddress:
581 visitFrameAddress(I); return;
582 default:
583 // FIXME: IMPLEMENT THESE.
584 // readport, writeport, readio, writeio
585 assert(0 && "This intrinsic is not implemented yet!");
586 return;
587 case Intrinsic::memcpy: visitMemCpy(I); return;
588 case Intrinsic::memset: visitMemSet(I); return;
589 case Intrinsic::memmove: visitMemMove(I); return;
590
591 case Intrinsic::isunordered:
592 setValue(&I, DAG.getSetCC(ISD::SETUO, getValue(I.getOperand(1)),
593 getValue(I.getOperand(2))));
594 return;
595 }
596
597 SDOperand Callee = getValue(I.getOperand(0));
598 std::vector<std::pair<SDOperand, const Type*> > Args;
599
600 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
601 Value *Arg = I.getOperand(i);
602 SDOperand ArgNode = getValue(Arg);
603 Args.push_back(std::make_pair(ArgNode, Arg->getType()));
604 }
605
Chris Lattnercf5734d2005-01-08 19:26:18 +0000606 std::pair<SDOperand,SDOperand> Result =
607 TLI.LowerCallTo(DAG.getRoot(), I.getType(), Callee, Args, DAG);
Chris Lattner1c08c712005-01-07 07:47:53 +0000608 if (I.getType() != Type::VoidTy)
Chris Lattnercf5734d2005-01-08 19:26:18 +0000609 setValue(&I, Result.first);
610 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000611}
612
613void SelectionDAGLowering::visitMalloc(MallocInst &I) {
614 SDOperand Src = getValue(I.getOperand(0));
615
616 MVT::ValueType IntPtr = TLI.getPointerTy();
617 // FIXME: Extend or truncate to the intptr size.
618 assert(Src.getValueType() == IntPtr && "Need to adjust the amount!");
619
620 // Scale the source by the type size.
621 uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
622 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
623 Src, getIntPtrConstant(ElementSize));
624
625 std::vector<std::pair<SDOperand, const Type*> > Args;
626 Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
Chris Lattnercf5734d2005-01-08 19:26:18 +0000627
628 std::pair<SDOperand,SDOperand> Result =
629 TLI.LowerCallTo(DAG.getRoot(), I.getType(),
630 DAG.getExternalSymbol("malloc", IntPtr),
631 Args, DAG);
632 setValue(&I, Result.first); // Pointers always fit in registers
633 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000634}
635
636void SelectionDAGLowering::visitFree(FreeInst &I) {
637 std::vector<std::pair<SDOperand, const Type*> > Args;
638 Args.push_back(std::make_pair(getValue(I.getOperand(0)),
639 TLI.getTargetData().getIntPtrType()));
640 MVT::ValueType IntPtr = TLI.getPointerTy();
Chris Lattnercf5734d2005-01-08 19:26:18 +0000641 std::pair<SDOperand,SDOperand> Result =
642 TLI.LowerCallTo(DAG.getRoot(), Type::VoidTy,
643 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
644 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000645}
646
647void SelectionDAGLowering::visitVAStart(CallInst &I) {
648 // We have no sane default behavior, just emit a useful error message and bail
649 // out.
650 std::cerr << "Variable arguments support not implemented for this target!\n";
651 abort();
652}
653
654void SelectionDAGLowering::visitVANext(VANextInst &I) {
655 // We have no sane default behavior, just emit a useful error message and bail
656 // out.
657 std::cerr << "Variable arguments support not implemented for this target!\n";
658 abort();
659}
660void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
661 // We have no sane default behavior, just emit a useful error message and bail
662 // out.
663 std::cerr << "Variable arguments support not implemented for this target!\n";
664 abort();
665}
666
667void SelectionDAGLowering::visitVAEnd(CallInst &I) {
668 // By default, this is a noop. On almost all targets, this is fine.
669}
670
671void SelectionDAGLowering::visitVACopy(CallInst &I) {
672 // By default, vacopy just does a simple pointer copy.
673 setValue(&I, getValue(I.getOperand(1)));
674}
675
676void SelectionDAGLowering::visitReturnAddress(CallInst &I) {
677 // It is always conservatively correct for llvm.returnaddress to return 0.
678 setValue(&I, getIntPtrConstant(0));
679}
680
681void SelectionDAGLowering::visitFrameAddress(CallInst &I) {
682 // It is always conservatively correct for llvm.frameaddress to return 0.
683 setValue(&I, getIntPtrConstant(0));
684}
685
686
687void SelectionDAGLowering::visitMemSet(CallInst &I) {
688 MVT::ValueType IntPtr = TLI.getPointerTy();
689 const Type *IntPtrTy = TLI.getTargetData().getIntPtrType();
690
691 // Extend the ubyte argument to be an int value for the call.
692 SDOperand Val = getValue(I.getOperand(2));
693 Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Val);
694
695 std::vector<std::pair<SDOperand, const Type*> > Args;
696 Args.push_back(std::make_pair(getValue(I.getOperand(1)), IntPtrTy));
697 Args.push_back(std::make_pair(Val, Type::IntTy));
698 Args.push_back(std::make_pair(getValue(I.getOperand(3)), IntPtrTy));
699
Chris Lattnercf5734d2005-01-08 19:26:18 +0000700 std::pair<SDOperand,SDOperand> Result =
701 TLI.LowerCallTo(DAG.getRoot(), Type::VoidTy,
702 DAG.getExternalSymbol("memset", IntPtr), Args, DAG);
703 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000704}
705
706void SelectionDAGLowering::visitMemCpy(CallInst &I) {
707 MVT::ValueType IntPtr = TLI.getPointerTy();
708 const Type *IntPtrTy = TLI.getTargetData().getIntPtrType();
709
710 std::vector<std::pair<SDOperand, const Type*> > Args;
711 Args.push_back(std::make_pair(getValue(I.getOperand(1)), IntPtrTy));
712 Args.push_back(std::make_pair(getValue(I.getOperand(2)), IntPtrTy));
713 Args.push_back(std::make_pair(getValue(I.getOperand(3)), IntPtrTy));
714
Chris Lattnercf5734d2005-01-08 19:26:18 +0000715 std::pair<SDOperand,SDOperand> Result =
716 TLI.LowerCallTo(DAG.getRoot(), Type::VoidTy,
717 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG);
718 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000719}
720
721void SelectionDAGLowering::visitMemMove(CallInst &I) {
722 MVT::ValueType IntPtr = TLI.getPointerTy();
723 const Type *IntPtrTy = TLI.getTargetData().getIntPtrType();
724
725 std::vector<std::pair<SDOperand, const Type*> > Args;
726 Args.push_back(std::make_pair(getValue(I.getOperand(1)), IntPtrTy));
727 Args.push_back(std::make_pair(getValue(I.getOperand(2)), IntPtrTy));
728 Args.push_back(std::make_pair(getValue(I.getOperand(3)), IntPtrTy));
729
Chris Lattnercf5734d2005-01-08 19:26:18 +0000730 std::pair<SDOperand,SDOperand> Result =
731 TLI.LowerCallTo(DAG.getRoot(), Type::VoidTy,
732 DAG.getExternalSymbol("memmove", IntPtr), Args, DAG);
733 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000734}
735
736unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
737 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
738}
739
740
741
742bool SelectionDAGISel::runOnFunction(Function &Fn) {
743 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
744 RegMap = MF.getSSARegMap();
745 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
746
747 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
748
749 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
750 SelectBasicBlock(I, MF, FuncInfo);
751
752 return true;
753}
754
755
756void SelectionDAGISel::CopyValueToVirtualRegister(SelectionDAGLowering &SDL,
757 Value *V, unsigned Reg) {
758 SelectionDAG &DAG = SDL.DAG;
759 DAG.setRoot(DAG.getCopyToReg(DAG.getRoot(), SDL.getValue(V), Reg));
760}
761
762void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
763 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
764 FunctionLoweringInfo &FuncInfo) {
765 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
766
767 // If this is the entry block, emit arguments.
768 Function *F = LLVMBB->getParent();
769 if (LLVMBB == &F->front()) {
770 // FIXME: If an argument is only used in one basic block, we could directly
771 // emit it (ONLY) into that block, not emitting the COPY_TO_VREG node. This
772 // would improve codegen in several cases on X86 by allowing the loads to be
773 // folded into the user operation.
774 std::vector<SDOperand> Args = TLI.LowerArguments(*LLVMBB->getParent(), DAG);
775
776 unsigned a = 0;
777 for (Function::aiterator AI = F->abegin(), E = F->aend(); AI != E; ++AI,++a)
778 if (!AI->use_empty()) {
779 SDL.setValue(AI, Args[a]);
780 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
781 }
782 }
783
784 BB = FuncInfo.MBBMap[LLVMBB];
785 SDL.setCurrentBasicBlock(BB);
786
787 // Lower all of the non-terminator instructions.
788 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
789 I != E; ++I)
790 SDL.visit(*I);
791
792 // Ensure that all instructions which are used outside of their defining
793 // blocks are available as virtual registers.
794 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
795 if (!I->use_empty()) {
796 std::map<const Value*, unsigned>::iterator VMI =
797 FuncInfo.ValueMap.find(I);
798 if (VMI != FuncInfo.ValueMap.end())
799 CopyValueToVirtualRegister(SDL, I, VMI->second);
800 }
801
802 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
803 // ensure constants are generated when needed. Remember the virtual registers
804 // that need to be added to the Machine PHI nodes as input. We cannot just
805 // directly add them, because expansion might result in multiple MBB's for one
806 // BB. As such, the start of the BB might correspond to a different MBB than
807 // the end.
808 //
809
810 // Emit constants only once even if used by multiple PHI nodes.
811 std::map<Constant*, unsigned> ConstantsOut;
812
813 // Check successor nodes PHI nodes that expect a constant to be available from
814 // this block.
815 TerminatorInst *TI = LLVMBB->getTerminator();
816 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
817 BasicBlock *SuccBB = TI->getSuccessor(succ);
818 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
819 PHINode *PN;
820
821 // At this point we know that there is a 1-1 correspondence between LLVM PHI
822 // nodes and Machine PHI nodes, but the incoming operands have not been
823 // emitted yet.
824 for (BasicBlock::iterator I = SuccBB->begin();
Chris Lattnerf44fd882005-01-07 21:34:19 +0000825 (PN = dyn_cast<PHINode>(I)); ++I)
826 if (!PN->use_empty()) {
827 unsigned Reg;
828 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
829 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
830 unsigned &RegOut = ConstantsOut[C];
831 if (RegOut == 0) {
832 RegOut = FuncInfo.CreateRegForValue(C);
833 CopyValueToVirtualRegister(SDL, C, RegOut);
834 }
835 Reg = RegOut;
836 } else {
837 Reg = FuncInfo.ValueMap[PHIOp];
838 assert(Reg && "Didn't codegen value into a register!??");
Chris Lattner1c08c712005-01-07 07:47:53 +0000839 }
Chris Lattnerf44fd882005-01-07 21:34:19 +0000840
841 // Remember that this register needs to added to the machine PHI node as
842 // the input for this MBB.
843 unsigned NumElements =
844 TLI.getNumElements(TLI.getValueType(PN->getType()));
845 for (unsigned i = 0, e = NumElements; i != e; ++i)
846 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
Chris Lattner1c08c712005-01-07 07:47:53 +0000847 }
Chris Lattner1c08c712005-01-07 07:47:53 +0000848 }
849 ConstantsOut.clear();
850
851 // Lower the terminator after the copies are emitted.
852 SDL.visit(*LLVMBB->getTerminator());
853}
854
855void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
856 FunctionLoweringInfo &FuncInfo) {
857 SelectionDAG DAG(TLI.getTargetMachine(), MF);
858 CurDAG = &DAG;
859 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
860
861 // First step, lower LLVM code to some DAG. This DAG may use operations and
862 // types that are not supported by the target.
863 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
864
865 DEBUG(std::cerr << "Lowered selection DAG:\n");
866 DEBUG(DAG.dump());
867
868 // Second step, hack on the DAG until it only uses operations and types that
869 // the target supports.
870 DAG.Legalize(TLI);
871
872 DEBUG(std::cerr << "Legalized selection DAG:\n");
873 DEBUG(DAG.dump());
874
875 // Finally, instruction select all of the operations to machine code, adding
876 // the code to the MachineBasicBlock.
877 InstructionSelectBasicBlock(DAG);
878
879 DEBUG(std::cerr << "Selected machine code:\n");
880 DEBUG(BB->dump());
881
882 // Finally, now that we know what the last MBB the LLVM BB expanded is, update
883 // PHI nodes in successors.
884 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
885 MachineInstr *PHI = PHINodesToUpdate[i].first;
886 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
887 "This is not a machine PHI node that we are updating!");
888 PHI->addRegOperand(PHINodesToUpdate[i].second);
889 PHI->addMachineBasicBlockOperand(BB);
890 }
891}