blob: 5f16af6571c028230b7a31c5d0aa79d4c4b9e0c2 [file] [log] [blame]
Chris Lattner7a60d912005-01-07 07:47:53 +00001//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
Misha Brukman835702a2005-04-21 22:36:52 +00002//
Chris Lattner7a60d912005-01-07 07:47:53 +00003// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
Misha Brukman835702a2005-04-21 22:36:52 +00007//
Chris Lattner7a60d912005-01-07 07:47:53 +00008//===----------------------------------------------------------------------===//
9//
10// This implements the SelectionDAGISel class.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "isel"
15#include "llvm/CodeGen/SelectionDAGISel.h"
Evan Cheng739a6a42006-01-21 02:32:06 +000016#include "llvm/CodeGen/ScheduleDAG.h"
Chris Lattner2e77db62005-05-13 18:50:42 +000017#include "llvm/CallingConv.h"
Chris Lattner7a60d912005-01-07 07:47:53 +000018#include "llvm/Constants.h"
19#include "llvm/DerivedTypes.h"
20#include "llvm/Function.h"
Chris Lattner435b4022005-11-29 06:21:05 +000021#include "llvm/GlobalVariable.h"
Chris Lattner476e67b2006-01-26 22:24:51 +000022#include "llvm/InlineAsm.h"
Chris Lattner7a60d912005-01-07 07:47:53 +000023#include "llvm/Instructions.h"
24#include "llvm/Intrinsics.h"
Chris Lattnerf2b62f32005-11-16 07:22:30 +000025#include "llvm/CodeGen/IntrinsicLowering.h"
Jim Laskey219d5592006-01-04 22:28:25 +000026#include "llvm/CodeGen/MachineDebugInfo.h"
Chris Lattner7a60d912005-01-07 07:47:53 +000027#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/SelectionDAG.h"
31#include "llvm/CodeGen/SSARegMap.h"
Chris Lattnerd4382f02005-09-13 19:30:54 +000032#include "llvm/Target/MRegisterInfo.h"
Chris Lattner7a60d912005-01-07 07:47:53 +000033#include "llvm/Target/TargetData.h"
34#include "llvm/Target/TargetFrameInfo.h"
35#include "llvm/Target/TargetInstrInfo.h"
36#include "llvm/Target/TargetLowering.h"
37#include "llvm/Target/TargetMachine.h"
Chris Lattnerc9950c12005-08-17 06:37:43 +000038#include "llvm/Transforms/Utils/BasicBlockUtils.h"
Chris Lattnere05a4612005-01-12 03:41:21 +000039#include "llvm/Support/CommandLine.h"
Chris Lattner43535a12005-11-09 04:45:33 +000040#include "llvm/Support/MathExtras.h"
Chris Lattner7a60d912005-01-07 07:47:53 +000041#include "llvm/Support/Debug.h"
42#include <map>
43#include <iostream>
44using namespace llvm;
45
Chris Lattner975f5c92005-09-01 18:44:10 +000046#ifndef NDEBUG
Chris Lattnere05a4612005-01-12 03:41:21 +000047static cl::opt<bool>
Evan Cheng739a6a42006-01-21 02:32:06 +000048ViewISelDAGs("view-isel-dags", cl::Hidden,
49 cl::desc("Pop up a window to show isel dags as they are selected"));
50static cl::opt<bool>
51ViewSchedDAGs("view-sched-dags", cl::Hidden,
52 cl::desc("Pop up a window to show sched dags as they are processed"));
Chris Lattnere05a4612005-01-12 03:41:21 +000053#else
Evan Cheng739a6a42006-01-21 02:32:06 +000054static const bool ViewISelDAGs = 0;
55static const bool ViewSchedDAGs = 0;
Chris Lattnere05a4612005-01-12 03:41:21 +000056#endif
57
Evan Chengc1e1d972006-01-23 07:01:07 +000058namespace {
59 cl::opt<SchedHeuristics>
60 ISHeuristic(
61 "sched",
62 cl::desc("Choose scheduling style"),
Evan Chenga6eff8a2006-01-25 09:12:57 +000063 cl::init(defaultScheduling),
Evan Chengc1e1d972006-01-23 07:01:07 +000064 cl::values(
Evan Chenga6eff8a2006-01-25 09:12:57 +000065 clEnumValN(defaultScheduling, "default",
66 "Target preferred scheduling style"),
Evan Chengc1e1d972006-01-23 07:01:07 +000067 clEnumValN(noScheduling, "none",
Jim Laskeyb8566fa2006-01-23 13:34:04 +000068 "No scheduling: breadth first sequencing"),
Evan Chengc1e1d972006-01-23 07:01:07 +000069 clEnumValN(simpleScheduling, "simple",
70 "Simple two pass scheduling: minimize critical path "
71 "and maximize processor utilization"),
72 clEnumValN(simpleNoItinScheduling, "simple-noitin",
73 "Simple two pass scheduling: Same as simple "
74 "except using generic latency"),
Evan Chenga6eff8a2006-01-25 09:12:57 +000075 clEnumValN(listSchedulingBURR, "list-burr",
Evan Cheng31272342006-01-23 08:26:10 +000076 "Bottom up register reduction list scheduling"),
Evan Chengc1e1d972006-01-23 07:01:07 +000077 clEnumValEnd));
78} // namespace
79
80
Chris Lattner7a60d912005-01-07 07:47:53 +000081namespace llvm {
82 //===--------------------------------------------------------------------===//
83 /// FunctionLoweringInfo - This contains information that is global to a
84 /// function that is used when lowering a region of the function.
Chris Lattnerd0061952005-01-08 19:52:31 +000085 class FunctionLoweringInfo {
86 public:
Chris Lattner7a60d912005-01-07 07:47:53 +000087 TargetLowering &TLI;
88 Function &Fn;
89 MachineFunction &MF;
90 SSARegMap *RegMap;
91
92 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
93
94 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
95 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
96
97 /// ValueMap - Since we emit code for the function a basic block at a time,
98 /// we must remember which virtual registers hold the values for
99 /// cross-basic-block values.
100 std::map<const Value*, unsigned> ValueMap;
101
102 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
103 /// the entry block. This allows the allocas to be efficiently referenced
104 /// anywhere in the function.
105 std::map<const AllocaInst*, int> StaticAllocaMap;
106
107 unsigned MakeReg(MVT::ValueType VT) {
108 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
109 }
Misha Brukman835702a2005-04-21 22:36:52 +0000110
Chris Lattner7a60d912005-01-07 07:47:53 +0000111 unsigned CreateRegForValue(const Value *V) {
112 MVT::ValueType VT = TLI.getValueType(V->getType());
113 // The common case is that we will only create one register for this
114 // value. If we have that case, create and return the virtual register.
115 unsigned NV = TLI.getNumElements(VT);
Chris Lattnera8d34fb2005-01-16 00:37:38 +0000116 if (NV == 1) {
117 // If we are promoting this value, pick the next largest supported type.
Chris Lattnerd58384f2005-01-16 01:11:19 +0000118 return MakeReg(TLI.getTypeToTransformTo(VT));
Chris Lattnera8d34fb2005-01-16 00:37:38 +0000119 }
Misha Brukman835702a2005-04-21 22:36:52 +0000120
Chris Lattner7a60d912005-01-07 07:47:53 +0000121 // If this value is represented with multiple target registers, make sure
122 // to create enough consequtive registers of the right (smaller) type.
123 unsigned NT = VT-1; // Find the type to use.
124 while (TLI.getNumElements((MVT::ValueType)NT) != 1)
125 --NT;
Misha Brukman835702a2005-04-21 22:36:52 +0000126
Chris Lattner7a60d912005-01-07 07:47:53 +0000127 unsigned R = MakeReg((MVT::ValueType)NT);
128 for (unsigned i = 1; i != NV; ++i)
129 MakeReg((MVT::ValueType)NT);
130 return R;
131 }
Misha Brukman835702a2005-04-21 22:36:52 +0000132
Chris Lattner7a60d912005-01-07 07:47:53 +0000133 unsigned InitializeRegForValue(const Value *V) {
134 unsigned &R = ValueMap[V];
135 assert(R == 0 && "Already initialized this value register!");
136 return R = CreateRegForValue(V);
137 }
138 };
139}
140
141/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
142/// PHI nodes or outside of the basic block that defines it.
143static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
144 if (isa<PHINode>(I)) return true;
145 BasicBlock *BB = I->getParent();
146 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
147 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
148 return true;
149 return false;
150}
151
Chris Lattner6871b232005-10-30 19:42:35 +0000152/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
153/// entry block, return true.
154static bool isOnlyUsedInEntryBlock(Argument *A) {
155 BasicBlock *Entry = A->getParent()->begin();
156 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
157 if (cast<Instruction>(*UI)->getParent() != Entry)
158 return false; // Use not in entry block.
159 return true;
160}
161
Chris Lattner7a60d912005-01-07 07:47:53 +0000162FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
Misha Brukman835702a2005-04-21 22:36:52 +0000163 Function &fn, MachineFunction &mf)
Chris Lattner7a60d912005-01-07 07:47:53 +0000164 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
165
Chris Lattner6871b232005-10-30 19:42:35 +0000166 // Create a vreg for each argument register that is not dead and is used
167 // outside of the entry block for the function.
168 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
169 AI != E; ++AI)
170 if (!isOnlyUsedInEntryBlock(AI))
171 InitializeRegForValue(AI);
172
Chris Lattner7a60d912005-01-07 07:47:53 +0000173 // Initialize the mapping of values to registers. This is only set up for
174 // instruction values that are used outside of the block that defines
175 // them.
Jeff Cohenf8a5e5ae2005-10-01 03:57:14 +0000176 Function::iterator BB = Fn.begin(), EB = Fn.end();
Chris Lattner7a60d912005-01-07 07:47:53 +0000177 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
178 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
179 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
180 const Type *Ty = AI->getAllocatedType();
181 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
Nate Begeman3ee3e692005-11-06 09:00:38 +0000182 unsigned Align =
183 std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
184 AI->getAlignment());
Chris Lattnercbefe722005-05-13 23:14:17 +0000185
186 // If the alignment of the value is smaller than the size of the value,
187 // and if the size of the value is particularly small (<= 8 bytes),
188 // round up to the size of the value for potentially better performance.
189 //
190 // FIXME: This could be made better with a preferred alignment hook in
191 // TargetData. It serves primarily to 8-byte align doubles for X86.
192 if (Align < TySize && TySize <= 8) Align = TySize;
Chris Lattner8396a302005-10-18 22:11:42 +0000193 TySize *= CUI->getValue(); // Get total allocated size.
Chris Lattner0a71a9a2005-10-18 22:14:06 +0000194 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
Chris Lattner7a60d912005-01-07 07:47:53 +0000195 StaticAllocaMap[AI] =
Chris Lattnerd0061952005-01-08 19:52:31 +0000196 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
Chris Lattner7a60d912005-01-07 07:47:53 +0000197 }
198
Jeff Cohenf8a5e5ae2005-10-01 03:57:14 +0000199 for (; BB != EB; ++BB)
200 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
Chris Lattner7a60d912005-01-07 07:47:53 +0000201 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
202 if (!isa<AllocaInst>(I) ||
203 !StaticAllocaMap.count(cast<AllocaInst>(I)))
204 InitializeRegForValue(I);
205
206 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
207 // also creates the initial PHI MachineInstrs, though none of the input
208 // operands are populated.
Jeff Cohenf8a5e5ae2005-10-01 03:57:14 +0000209 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) {
Chris Lattner7a60d912005-01-07 07:47:53 +0000210 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
211 MBBMap[BB] = MBB;
212 MF.getBasicBlockList().push_back(MBB);
213
214 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
215 // appropriate.
216 PHINode *PN;
217 for (BasicBlock::iterator I = BB->begin();
Chris Lattner8ea875f2005-01-07 21:34:19 +0000218 (PN = dyn_cast<PHINode>(I)); ++I)
219 if (!PN->use_empty()) {
220 unsigned NumElements =
221 TLI.getNumElements(TLI.getValueType(PN->getType()));
222 unsigned PHIReg = ValueMap[PN];
223 assert(PHIReg &&"PHI node does not have an assigned virtual register!");
224 for (unsigned i = 0; i != NumElements; ++i)
225 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
226 }
Chris Lattner7a60d912005-01-07 07:47:53 +0000227 }
228}
229
230
231
232//===----------------------------------------------------------------------===//
233/// SelectionDAGLowering - This is the common target-independent lowering
234/// implementation that is parameterized by a TargetLowering object.
235/// Also, targets can overload any lowering method.
236///
237namespace llvm {
238class SelectionDAGLowering {
239 MachineBasicBlock *CurMBB;
240
241 std::map<const Value*, SDOperand> NodeMap;
242
Chris Lattner4d9651c2005-01-17 22:19:26 +0000243 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
244 /// them up and then emit token factor nodes when possible. This allows us to
245 /// get simple disambiguation between loads without worrying about alias
246 /// analysis.
247 std::vector<SDOperand> PendingLoads;
248
Chris Lattner7a60d912005-01-07 07:47:53 +0000249public:
250 // TLI - This is information that describes the available target features we
251 // need for lowering. This indicates when operations are unavailable,
252 // implemented with a libcall, etc.
253 TargetLowering &TLI;
254 SelectionDAG &DAG;
255 const TargetData &TD;
256
257 /// FuncInfo - Information about the function as a whole.
258 ///
259 FunctionLoweringInfo &FuncInfo;
260
261 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
Misha Brukman835702a2005-04-21 22:36:52 +0000262 FunctionLoweringInfo &funcinfo)
Chris Lattner7a60d912005-01-07 07:47:53 +0000263 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
264 FuncInfo(funcinfo) {
265 }
266
Chris Lattner4108bb02005-01-17 19:43:36 +0000267 /// getRoot - Return the current virtual root of the Selection DAG.
268 ///
269 SDOperand getRoot() {
Chris Lattner4d9651c2005-01-17 22:19:26 +0000270 if (PendingLoads.empty())
271 return DAG.getRoot();
Misha Brukman835702a2005-04-21 22:36:52 +0000272
Chris Lattner4d9651c2005-01-17 22:19:26 +0000273 if (PendingLoads.size() == 1) {
274 SDOperand Root = PendingLoads[0];
275 DAG.setRoot(Root);
276 PendingLoads.clear();
277 return Root;
278 }
279
280 // Otherwise, we have to make a token factor node.
281 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, PendingLoads);
282 PendingLoads.clear();
283 DAG.setRoot(Root);
284 return Root;
Chris Lattner4108bb02005-01-17 19:43:36 +0000285 }
286
Chris Lattner7a60d912005-01-07 07:47:53 +0000287 void visit(Instruction &I) { visit(I.getOpcode(), I); }
288
289 void visit(unsigned Opcode, User &I) {
290 switch (Opcode) {
291 default: assert(0 && "Unknown instruction type encountered!");
292 abort();
293 // Build the switch statement using the Instruction.def file.
294#define HANDLE_INST(NUM, OPCODE, CLASS) \
295 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
296#include "llvm/Instruction.def"
297 }
298 }
299
300 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
301
302
303 SDOperand getIntPtrConstant(uint64_t Val) {
304 return DAG.getConstant(Val, TLI.getPointerTy());
305 }
306
307 SDOperand getValue(const Value *V) {
308 SDOperand &N = NodeMap[V];
309 if (N.Val) return N;
310
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000311 const Type *VTy = V->getType();
312 MVT::ValueType VT = TLI.getValueType(VTy);
Chris Lattner7a60d912005-01-07 07:47:53 +0000313 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V)))
314 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
315 visit(CE->getOpcode(), *CE);
316 assert(N.Val && "visit didn't populate the ValueMap!");
317 return N;
318 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
319 return N = DAG.getGlobalAddress(GV, VT);
320 } else if (isa<ConstantPointerNull>(C)) {
321 return N = DAG.getConstant(0, TLI.getPointerTy());
322 } else if (isa<UndefValue>(C)) {
Nate Begemanaf1c0f72005-04-12 23:12:17 +0000323 return N = DAG.getNode(ISD::UNDEF, VT);
Chris Lattner7a60d912005-01-07 07:47:53 +0000324 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
325 return N = DAG.getConstantFP(CFP->getValue(), VT);
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000326 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
327 unsigned NumElements = PTy->getNumElements();
328 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
329 MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
330
331 // Now that we know the number and type of the elements, push a
332 // Constant or ConstantFP node onto the ops list for each element of
333 // the packed constant.
334 std::vector<SDOperand> Ops;
Chris Lattner803a5752005-12-21 02:43:26 +0000335 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
336 if (MVT::isFloatingPoint(PVT)) {
337 for (unsigned i = 0; i != NumElements; ++i) {
338 const ConstantFP *El = cast<ConstantFP>(CP->getOperand(i));
339 Ops.push_back(DAG.getConstantFP(El->getValue(), PVT));
340 }
341 } else {
342 for (unsigned i = 0; i != NumElements; ++i) {
343 const ConstantIntegral *El =
344 cast<ConstantIntegral>(CP->getOperand(i));
345 Ops.push_back(DAG.getConstant(El->getRawValue(), PVT));
346 }
347 }
348 } else {
349 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
350 SDOperand Op;
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000351 if (MVT::isFloatingPoint(PVT))
Chris Lattner803a5752005-12-21 02:43:26 +0000352 Op = DAG.getConstantFP(0, PVT);
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000353 else
Chris Lattner803a5752005-12-21 02:43:26 +0000354 Op = DAG.getConstant(0, PVT);
355 Ops.assign(NumElements, Op);
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000356 }
Chris Lattner803a5752005-12-21 02:43:26 +0000357
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000358 // Handle the case where we have a 1-element vector, in which
359 // case we want to immediately turn it into a scalar constant.
Nate Begemanae89d862005-12-07 19:48:11 +0000360 if (Ops.size() == 1) {
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000361 return N = Ops[0];
Nate Begemanae89d862005-12-07 19:48:11 +0000362 } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
363 return N = DAG.getNode(ISD::ConstantVec, TVT, Ops);
364 } else {
365 // If the packed type isn't legal, then create a ConstantVec node with
366 // generic Vector type instead.
367 return N = DAG.getNode(ISD::ConstantVec, MVT::Vector, Ops);
368 }
Chris Lattner7a60d912005-01-07 07:47:53 +0000369 } else {
370 // Canonicalize all constant ints to be unsigned.
371 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
372 }
373
374 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
375 std::map<const AllocaInst*, int>::iterator SI =
376 FuncInfo.StaticAllocaMap.find(AI);
377 if (SI != FuncInfo.StaticAllocaMap.end())
378 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
379 }
380
381 std::map<const Value*, unsigned>::const_iterator VMI =
382 FuncInfo.ValueMap.find(V);
383 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
Chris Lattner209f5852005-01-16 02:23:07 +0000384
Chris Lattner33182322005-08-16 21:55:35 +0000385 unsigned InReg = VMI->second;
386
387 // If this type is not legal, make it so now.
388 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
389
390 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
391 if (DestVT < VT) {
392 // Source must be expanded. This input value is actually coming from the
393 // register pair VMI->second and VMI->second+1.
394 N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
395 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
396 } else {
397 if (DestVT > VT) { // Promotion case
398 if (MVT::isFloatingPoint(VT))
399 N = DAG.getNode(ISD::FP_ROUND, VT, N);
400 else
401 N = DAG.getNode(ISD::TRUNCATE, VT, N);
402 }
403 }
404
405 return N;
Chris Lattner7a60d912005-01-07 07:47:53 +0000406 }
407
408 const SDOperand &setValue(const Value *V, SDOperand NewN) {
409 SDOperand &N = NodeMap[V];
410 assert(N.Val == 0 && "Already set a value for this node!");
411 return N = NewN;
412 }
413
414 // Terminator instructions.
415 void visitRet(ReturnInst &I);
416 void visitBr(BranchInst &I);
417 void visitUnreachable(UnreachableInst &I) { /* noop */ }
418
419 // These all get lowered before this pass.
Robert Bocchino2c966e72006-01-10 19:04:57 +0000420 void visitExtractElement(ExtractElementInst &I) { assert(0 && "TODO"); }
Robert Bocchino03e95af2006-01-17 20:06:42 +0000421 void visitInsertElement(InsertElementInst &I) { assert(0 && "TODO"); }
Chris Lattner7a60d912005-01-07 07:47:53 +0000422 void visitSwitch(SwitchInst &I) { assert(0 && "TODO"); }
423 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
424 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
425
426 //
Nate Begemanb2e089c2005-11-19 00:36:38 +0000427 void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp);
Nate Begeman127321b2005-11-18 07:42:56 +0000428 void visitShift(User &I, unsigned Opcode);
Nate Begemanb2e089c2005-11-19 00:36:38 +0000429 void visitAdd(User &I) {
430 visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD);
Chris Lattner6f3b5772005-09-28 22:28:18 +0000431 }
Chris Lattnerf68fd0b2005-04-02 05:04:50 +0000432 void visitSub(User &I);
Nate Begemanb2e089c2005-11-19 00:36:38 +0000433 void visitMul(User &I) {
434 visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL);
Chris Lattner6f3b5772005-09-28 22:28:18 +0000435 }
Chris Lattner7a60d912005-01-07 07:47:53 +0000436 void visitDiv(User &I) {
Chris Lattner6f3b5772005-09-28 22:28:18 +0000437 const Type *Ty = I.getType();
Nate Begemanb2e089c2005-11-19 00:36:38 +0000438 visitBinary(I, Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV, 0);
Chris Lattner7a60d912005-01-07 07:47:53 +0000439 }
440 void visitRem(User &I) {
Chris Lattner6f3b5772005-09-28 22:28:18 +0000441 const Type *Ty = I.getType();
Nate Begemanb2e089c2005-11-19 00:36:38 +0000442 visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0);
Chris Lattner7a60d912005-01-07 07:47:53 +0000443 }
Nate Begemanb2e089c2005-11-19 00:36:38 +0000444 void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, 0); }
445 void visitOr (User &I) { visitBinary(I, ISD::OR, 0, 0); }
446 void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, 0); }
Nate Begeman127321b2005-11-18 07:42:56 +0000447 void visitShl(User &I) { visitShift(I, ISD::SHL); }
448 void visitShr(User &I) {
449 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
Chris Lattner7a60d912005-01-07 07:47:53 +0000450 }
451
452 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc);
453 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); }
454 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); }
455 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); }
456 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); }
457 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
458 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
459
460 void visitGetElementPtr(User &I);
461 void visitCast(User &I);
462 void visitSelect(User &I);
463 //
464
465 void visitMalloc(MallocInst &I);
466 void visitFree(FreeInst &I);
467 void visitAlloca(AllocaInst &I);
468 void visitLoad(LoadInst &I);
469 void visitStore(StoreInst &I);
470 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
471 void visitCall(CallInst &I);
Chris Lattner476e67b2006-01-26 22:24:51 +0000472 void visitInlineAsm(CallInst &I);
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000473 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
Chris Lattner7a60d912005-01-07 07:47:53 +0000474
Chris Lattner7a60d912005-01-07 07:47:53 +0000475 void visitVAStart(CallInst &I);
Chris Lattner7a60d912005-01-07 07:47:53 +0000476 void visitVAArg(VAArgInst &I);
477 void visitVAEnd(CallInst &I);
478 void visitVACopy(CallInst &I);
Chris Lattner58cfd792005-01-09 00:00:49 +0000479 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
Chris Lattner7a60d912005-01-07 07:47:53 +0000480
Chris Lattner875def92005-01-11 05:56:49 +0000481 void visitMemIntrinsic(CallInst &I, unsigned Op);
Chris Lattner7a60d912005-01-07 07:47:53 +0000482
483 void visitUserOp1(Instruction &I) {
484 assert(0 && "UserOp1 should not exist at instruction selection time!");
485 abort();
486 }
487 void visitUserOp2(Instruction &I) {
488 assert(0 && "UserOp2 should not exist at instruction selection time!");
489 abort();
490 }
491};
492} // end namespace llvm
493
494void SelectionDAGLowering::visitRet(ReturnInst &I) {
495 if (I.getNumOperands() == 0) {
Chris Lattner4108bb02005-01-17 19:43:36 +0000496 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
Chris Lattner7a60d912005-01-07 07:47:53 +0000497 return;
498 }
499
500 SDOperand Op1 = getValue(I.getOperand(0));
Chris Lattnerdb45f7d2005-03-29 19:09:56 +0000501 MVT::ValueType TmpVT;
502
Chris Lattner7a60d912005-01-07 07:47:53 +0000503 switch (Op1.getValueType()) {
504 default: assert(0 && "Unknown value type!");
505 case MVT::i1:
506 case MVT::i8:
507 case MVT::i16:
Chris Lattnerdb45f7d2005-03-29 19:09:56 +0000508 case MVT::i32:
509 // If this is a machine where 32-bits is legal or expanded, promote to
510 // 32-bits, otherwise, promote to 64-bits.
511 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
512 TmpVT = TLI.getTypeToTransformTo(MVT::i32);
Chris Lattner7a60d912005-01-07 07:47:53 +0000513 else
Chris Lattnerdb45f7d2005-03-29 19:09:56 +0000514 TmpVT = MVT::i32;
515
516 // Extend integer types to result type.
517 if (I.getOperand(0)->getType()->isSigned())
518 Op1 = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, Op1);
519 else
520 Op1 = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, Op1);
Chris Lattner7a60d912005-01-07 07:47:53 +0000521 break;
522 case MVT::f32:
Chris Lattner222ceab2006-01-20 18:38:32 +0000523 // If this is a machine where f32 is promoted to f64, do so now.
524 if (TLI.getTypeAction(MVT::f32) == TargetLowering::Promote)
525 Op1 = DAG.getNode(ISD::FP_EXTEND, TLI.getTypeToTransformTo(MVT::f32),Op1);
526 break;
Chris Lattner7a60d912005-01-07 07:47:53 +0000527 case MVT::i64:
528 case MVT::f64:
529 break; // No extension needed!
530 }
Nate Begeman78afac22005-10-18 23:23:37 +0000531 // Allow targets to lower this further to meet ABI requirements
532 DAG.setRoot(TLI.LowerReturnTo(getRoot(), Op1, DAG));
Chris Lattner7a60d912005-01-07 07:47:53 +0000533}
534
535void SelectionDAGLowering::visitBr(BranchInst &I) {
536 // Update machine-CFG edges.
537 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
Chris Lattner7a60d912005-01-07 07:47:53 +0000538
539 // Figure out which block is immediately after the current one.
540 MachineBasicBlock *NextBlock = 0;
541 MachineFunction::iterator BBI = CurMBB;
542 if (++BBI != CurMBB->getParent()->end())
543 NextBlock = BBI;
544
545 if (I.isUnconditional()) {
546 // If this is not a fall-through branch, emit the branch.
547 if (Succ0MBB != NextBlock)
Chris Lattner4108bb02005-01-17 19:43:36 +0000548 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
Misha Brukman77451162005-04-22 04:01:18 +0000549 DAG.getBasicBlock(Succ0MBB)));
Chris Lattner7a60d912005-01-07 07:47:53 +0000550 } else {
551 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
Chris Lattner7a60d912005-01-07 07:47:53 +0000552
553 SDOperand Cond = getValue(I.getCondition());
Chris Lattner7a60d912005-01-07 07:47:53 +0000554 if (Succ1MBB == NextBlock) {
555 // If the condition is false, fall through. This means we should branch
556 // if the condition is true to Succ #0.
Chris Lattner4108bb02005-01-17 19:43:36 +0000557 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
Misha Brukman77451162005-04-22 04:01:18 +0000558 Cond, DAG.getBasicBlock(Succ0MBB)));
Chris Lattner7a60d912005-01-07 07:47:53 +0000559 } else if (Succ0MBB == NextBlock) {
560 // If the condition is true, fall through. This means we should branch if
561 // the condition is false to Succ #1. Invert the condition first.
562 SDOperand True = DAG.getConstant(1, Cond.getValueType());
563 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
Chris Lattner4108bb02005-01-17 19:43:36 +0000564 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
Misha Brukman77451162005-04-22 04:01:18 +0000565 Cond, DAG.getBasicBlock(Succ1MBB)));
Chris Lattner7a60d912005-01-07 07:47:53 +0000566 } else {
Chris Lattner8a98c7f2005-04-09 03:30:29 +0000567 std::vector<SDOperand> Ops;
568 Ops.push_back(getRoot());
569 Ops.push_back(Cond);
570 Ops.push_back(DAG.getBasicBlock(Succ0MBB));
571 Ops.push_back(DAG.getBasicBlock(Succ1MBB));
572 DAG.setRoot(DAG.getNode(ISD::BRCONDTWOWAY, MVT::Other, Ops));
Chris Lattner7a60d912005-01-07 07:47:53 +0000573 }
574 }
575}
576
Chris Lattnerf68fd0b2005-04-02 05:04:50 +0000577void SelectionDAGLowering::visitSub(User &I) {
578 // -0.0 - X --> fneg
Chris Lattner6f3b5772005-09-28 22:28:18 +0000579 if (I.getType()->isFloatingPoint()) {
580 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
581 if (CFP->isExactlyValue(-0.0)) {
582 SDOperand Op2 = getValue(I.getOperand(1));
583 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
584 return;
585 }
Chris Lattner6f3b5772005-09-28 22:28:18 +0000586 }
Nate Begemanb2e089c2005-11-19 00:36:38 +0000587 visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB);
Chris Lattnerf68fd0b2005-04-02 05:04:50 +0000588}
589
Nate Begemanb2e089c2005-11-19 00:36:38 +0000590void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp,
591 unsigned VecOp) {
592 const Type *Ty = I.getType();
Chris Lattner7a60d912005-01-07 07:47:53 +0000593 SDOperand Op1 = getValue(I.getOperand(0));
594 SDOperand Op2 = getValue(I.getOperand(1));
Chris Lattner96c26752005-01-19 22:31:21 +0000595
Chris Lattner19baba62005-11-19 18:40:42 +0000596 if (Ty->isIntegral()) {
Nate Begemanb2e089c2005-11-19 00:36:38 +0000597 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
598 } else if (Ty->isFloatingPoint()) {
599 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
600 } else {
601 const PackedType *PTy = cast<PackedType>(Ty);
Nate Begeman07890bb2005-11-22 01:29:36 +0000602 unsigned NumElements = PTy->getNumElements();
603 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
Nate Begeman1064d6e2005-11-30 08:22:07 +0000604 MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
Nate Begeman07890bb2005-11-22 01:29:36 +0000605
606 // Immediately scalarize packed types containing only one element, so that
Nate Begeman1064d6e2005-11-30 08:22:07 +0000607 // the Legalize pass does not have to deal with them. Similarly, if the
608 // abstract vector is going to turn into one that the target natively
609 // supports, generate that type now so that Legalize doesn't have to deal
610 // with that either. These steps ensure that Legalize only has to handle
611 // vector types in its Expand case.
612 unsigned Opc = MVT::isFloatingPoint(PVT) ? FPOp : IntOp;
Nate Begeman07890bb2005-11-22 01:29:36 +0000613 if (NumElements == 1) {
Nate Begeman07890bb2005-11-22 01:29:36 +0000614 setValue(&I, DAG.getNode(Opc, PVT, Op1, Op2));
Nate Begeman1064d6e2005-11-30 08:22:07 +0000615 } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
616 setValue(&I, DAG.getNode(Opc, TVT, Op1, Op2));
Nate Begeman07890bb2005-11-22 01:29:36 +0000617 } else {
618 SDOperand Num = DAG.getConstant(NumElements, MVT::i32);
619 SDOperand Typ = DAG.getValueType(PVT);
Nate Begemand37c1312005-11-22 18:16:00 +0000620 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
Nate Begeman07890bb2005-11-22 01:29:36 +0000621 }
Nate Begemanb2e089c2005-11-19 00:36:38 +0000622 }
Nate Begeman127321b2005-11-18 07:42:56 +0000623}
Chris Lattner96c26752005-01-19 22:31:21 +0000624
Nate Begeman127321b2005-11-18 07:42:56 +0000625void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
626 SDOperand Op1 = getValue(I.getOperand(0));
627 SDOperand Op2 = getValue(I.getOperand(1));
628
629 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
630
Chris Lattner7a60d912005-01-07 07:47:53 +0000631 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
632}
633
634void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
635 ISD::CondCode UnsignedOpcode) {
636 SDOperand Op1 = getValue(I.getOperand(0));
637 SDOperand Op2 = getValue(I.getOperand(1));
638 ISD::CondCode Opcode = SignedOpcode;
639 if (I.getOperand(0)->getType()->isUnsigned())
640 Opcode = UnsignedOpcode;
Chris Lattnerd47675e2005-08-09 20:20:18 +0000641 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
Chris Lattner7a60d912005-01-07 07:47:53 +0000642}
643
644void SelectionDAGLowering::visitSelect(User &I) {
645 SDOperand Cond = getValue(I.getOperand(0));
646 SDOperand TrueVal = getValue(I.getOperand(1));
647 SDOperand FalseVal = getValue(I.getOperand(2));
648 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
649 TrueVal, FalseVal));
650}
651
652void SelectionDAGLowering::visitCast(User &I) {
653 SDOperand N = getValue(I.getOperand(0));
654 MVT::ValueType SrcTy = TLI.getValueType(I.getOperand(0)->getType());
655 MVT::ValueType DestTy = TLI.getValueType(I.getType());
656
657 if (N.getValueType() == DestTy) {
658 setValue(&I, N); // noop cast.
Chris Lattner2d8b55c2005-05-09 22:17:13 +0000659 } else if (DestTy == MVT::i1) {
660 // Cast to bool is a comparison against zero, not truncation to zero.
661 SDOperand Zero = isInteger(SrcTy) ? DAG.getConstant(0, N.getValueType()) :
662 DAG.getConstantFP(0.0, N.getValueType());
Chris Lattnerd47675e2005-08-09 20:20:18 +0000663 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
Chris Lattner2a6db3c2005-01-08 08:08:56 +0000664 } else if (isInteger(SrcTy)) {
665 if (isInteger(DestTy)) { // Int -> Int cast
666 if (DestTy < SrcTy) // Truncating cast?
667 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestTy, N));
668 else if (I.getOperand(0)->getType()->isSigned())
669 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestTy, N));
670 else
671 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestTy, N));
672 } else { // Int -> FP cast
673 if (I.getOperand(0)->getType()->isSigned())
674 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestTy, N));
675 else
676 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestTy, N));
677 }
Chris Lattner7a60d912005-01-07 07:47:53 +0000678 } else {
Chris Lattner2a6db3c2005-01-08 08:08:56 +0000679 assert(isFloatingPoint(SrcTy) && "Unknown value type!");
680 if (isFloatingPoint(DestTy)) { // FP -> FP cast
681 if (DestTy < SrcTy) // Rounding cast?
682 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestTy, N));
683 else
684 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestTy, N));
685 } else { // FP -> Int cast.
686 if (I.getType()->isSigned())
687 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestTy, N));
688 else
689 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestTy, N));
690 }
Chris Lattner7a60d912005-01-07 07:47:53 +0000691 }
692}
693
694void SelectionDAGLowering::visitGetElementPtr(User &I) {
695 SDOperand N = getValue(I.getOperand(0));
696 const Type *Ty = I.getOperand(0)->getType();
697 const Type *UIntPtrTy = TD.getIntPtrType();
698
699 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
700 OI != E; ++OI) {
701 Value *Idx = *OI;
Chris Lattner35397782005-12-05 07:10:48 +0000702 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
Chris Lattner7a60d912005-01-07 07:47:53 +0000703 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
704 if (Field) {
705 // N = N + Offset
706 uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
707 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
Misha Brukman77451162005-04-22 04:01:18 +0000708 getIntPtrConstant(Offset));
Chris Lattner7a60d912005-01-07 07:47:53 +0000709 }
710 Ty = StTy->getElementType(Field);
711 } else {
712 Ty = cast<SequentialType>(Ty)->getElementType();
Chris Lattner19a83992005-01-07 21:56:57 +0000713
Chris Lattner43535a12005-11-09 04:45:33 +0000714 // If this is a constant subscript, handle it quickly.
715 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
716 if (CI->getRawValue() == 0) continue;
Chris Lattner19a83992005-01-07 21:56:57 +0000717
Chris Lattner43535a12005-11-09 04:45:33 +0000718 uint64_t Offs;
719 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
720 Offs = (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
721 else
722 Offs = TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
723 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
724 continue;
Chris Lattner7a60d912005-01-07 07:47:53 +0000725 }
Chris Lattner43535a12005-11-09 04:45:33 +0000726
727 // N = N + Idx * ElementSize;
728 uint64_t ElementSize = TD.getTypeSize(Ty);
729 SDOperand IdxN = getValue(Idx);
730
731 // If the index is smaller or larger than intptr_t, truncate or extend
732 // it.
733 if (IdxN.getValueType() < N.getValueType()) {
734 if (Idx->getType()->isSigned())
735 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
736 else
737 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN);
738 } else if (IdxN.getValueType() > N.getValueType())
739 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
740
741 // If this is a multiply by a power of two, turn it into a shl
742 // immediately. This is a very common case.
743 if (isPowerOf2_64(ElementSize)) {
744 unsigned Amt = Log2_64(ElementSize);
745 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
Chris Lattner41fd6d52005-11-09 16:50:40 +0000746 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
Chris Lattner43535a12005-11-09 04:45:33 +0000747 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
748 continue;
749 }
750
751 SDOperand Scale = getIntPtrConstant(ElementSize);
752 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
753 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
Chris Lattner7a60d912005-01-07 07:47:53 +0000754 }
755 }
756 setValue(&I, N);
757}
758
759void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
760 // If this is a fixed sized alloca in the entry block of the function,
761 // allocate it statically on the stack.
762 if (FuncInfo.StaticAllocaMap.count(&I))
763 return; // getValue will auto-populate this.
764
765 const Type *Ty = I.getAllocatedType();
766 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
Nate Begeman3ee3e692005-11-06 09:00:38 +0000767 unsigned Align = std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
768 I.getAlignment());
Chris Lattner7a60d912005-01-07 07:47:53 +0000769
770 SDOperand AllocSize = getValue(I.getArraySize());
Chris Lattnereccb73d2005-01-22 23:04:37 +0000771 MVT::ValueType IntPtr = TLI.getPointerTy();
772 if (IntPtr < AllocSize.getValueType())
773 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
774 else if (IntPtr > AllocSize.getValueType())
775 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
Chris Lattner7a60d912005-01-07 07:47:53 +0000776
Chris Lattnereccb73d2005-01-22 23:04:37 +0000777 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
Chris Lattner7a60d912005-01-07 07:47:53 +0000778 getIntPtrConstant(TySize));
779
780 // Handle alignment. If the requested alignment is less than or equal to the
781 // stack alignment, ignore it and round the size of the allocation up to the
782 // stack alignment size. If the size is greater than the stack alignment, we
783 // note this in the DYNAMIC_STACKALLOC node.
784 unsigned StackAlign =
785 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
786 if (Align <= StackAlign) {
787 Align = 0;
788 // Add SA-1 to the size.
789 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
790 getIntPtrConstant(StackAlign-1));
791 // Mask out the low bits for alignment purposes.
792 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
793 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
794 }
795
Chris Lattner96c262e2005-05-14 07:29:57 +0000796 std::vector<MVT::ValueType> VTs;
797 VTs.push_back(AllocSize.getValueType());
798 VTs.push_back(MVT::Other);
799 std::vector<SDOperand> Ops;
800 Ops.push_back(getRoot());
801 Ops.push_back(AllocSize);
802 Ops.push_back(getIntPtrConstant(Align));
803 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, Ops);
Chris Lattner7a60d912005-01-07 07:47:53 +0000804 DAG.setRoot(setValue(&I, DSA).getValue(1));
805
806 // Inform the Frame Information that we have just allocated a variable-sized
807 // object.
808 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
809}
810
Chris Lattner435b4022005-11-29 06:21:05 +0000811/// getStringValue - Turn an LLVM constant pointer that eventually points to a
812/// global into a string value. Return an empty string if we can't do it.
813///
814static std::string getStringValue(Value *V, unsigned Offset = 0) {
815 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
816 if (GV->hasInitializer() && isa<ConstantArray>(GV->getInitializer())) {
817 ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
818 if (Init->isString()) {
819 std::string Result = Init->getAsString();
820 if (Offset < Result.size()) {
821 // If we are pointing INTO The string, erase the beginning...
822 Result.erase(Result.begin(), Result.begin()+Offset);
823
824 // Take off the null terminator, and any string fragments after it.
825 std::string::size_type NullPos = Result.find_first_of((char)0);
826 if (NullPos != std::string::npos)
827 Result.erase(Result.begin()+NullPos, Result.end());
828 return Result;
829 }
830 }
831 }
832 } else if (Constant *C = dyn_cast<Constant>(V)) {
833 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
834 return getStringValue(GV, Offset);
835 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
836 if (CE->getOpcode() == Instruction::GetElementPtr) {
837 // Turn a gep into the specified offset.
838 if (CE->getNumOperands() == 3 &&
839 cast<Constant>(CE->getOperand(1))->isNullValue() &&
840 isa<ConstantInt>(CE->getOperand(2))) {
841 return getStringValue(CE->getOperand(0),
842 Offset+cast<ConstantInt>(CE->getOperand(2))->getRawValue());
843 }
844 }
845 }
846 }
847 return "";
848}
Chris Lattner7a60d912005-01-07 07:47:53 +0000849
850void SelectionDAGLowering::visitLoad(LoadInst &I) {
851 SDOperand Ptr = getValue(I.getOperand(0));
Misha Brukman835702a2005-04-21 22:36:52 +0000852
Chris Lattner4d9651c2005-01-17 22:19:26 +0000853 SDOperand Root;
854 if (I.isVolatile())
855 Root = getRoot();
856 else {
857 // Do not serialize non-volatile loads against each other.
858 Root = DAG.getRoot();
859 }
Nate Begemanb2e089c2005-11-19 00:36:38 +0000860
861 const Type *Ty = I.getType();
862 SDOperand L;
863
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000864 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
Nate Begeman07890bb2005-11-22 01:29:36 +0000865 unsigned NumElements = PTy->getNumElements();
866 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
Nate Begeman1064d6e2005-11-30 08:22:07 +0000867 MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
Nate Begeman07890bb2005-11-22 01:29:36 +0000868
869 // Immediately scalarize packed types containing only one element, so that
870 // the Legalize pass does not have to deal with them.
871 if (NumElements == 1) {
872 L = DAG.getLoad(PVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
Nate Begeman1064d6e2005-11-30 08:22:07 +0000873 } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
874 L = DAG.getLoad(TVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
Nate Begeman07890bb2005-11-22 01:29:36 +0000875 } else {
876 L = DAG.getVecLoad(NumElements, PVT, Root, Ptr,
877 DAG.getSrcValue(I.getOperand(0)));
878 }
Nate Begemanb2e089c2005-11-19 00:36:38 +0000879 } else {
880 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr,
881 DAG.getSrcValue(I.getOperand(0)));
882 }
Chris Lattner4d9651c2005-01-17 22:19:26 +0000883 setValue(&I, L);
884
885 if (I.isVolatile())
886 DAG.setRoot(L.getValue(1));
887 else
888 PendingLoads.push_back(L.getValue(1));
Chris Lattner7a60d912005-01-07 07:47:53 +0000889}
890
891
892void SelectionDAGLowering::visitStore(StoreInst &I) {
893 Value *SrcV = I.getOperand(0);
894 SDOperand Src = getValue(SrcV);
895 SDOperand Ptr = getValue(I.getOperand(1));
Chris Lattnerf5675a02005-05-09 04:08:33 +0000896 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
Andrew Lenharth2edc1882005-06-29 18:54:02 +0000897 DAG.getSrcValue(I.getOperand(1))));
Chris Lattner7a60d912005-01-07 07:47:53 +0000898}
899
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000900/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
901/// we want to emit this as a call to a named external function, return the name
902/// otherwise lower it and return null.
903const char *
904SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
905 switch (Intrinsic) {
906 case Intrinsic::vastart: visitVAStart(I); return 0;
907 case Intrinsic::vaend: visitVAEnd(I); return 0;
908 case Intrinsic::vacopy: visitVACopy(I); return 0;
909 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
910 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0;
911 case Intrinsic::setjmp:
912 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
913 break;
914 case Intrinsic::longjmp:
915 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
916 break;
917 case Intrinsic::memcpy: visitMemIntrinsic(I, ISD::MEMCPY); return 0;
918 case Intrinsic::memset: visitMemIntrinsic(I, ISD::MEMSET); return 0;
919 case Intrinsic::memmove: visitMemIntrinsic(I, ISD::MEMMOVE); return 0;
920
921 case Intrinsic::readport:
922 case Intrinsic::readio: {
923 std::vector<MVT::ValueType> VTs;
924 VTs.push_back(TLI.getValueType(I.getType()));
925 VTs.push_back(MVT::Other);
926 std::vector<SDOperand> Ops;
927 Ops.push_back(getRoot());
928 Ops.push_back(getValue(I.getOperand(1)));
929 SDOperand Tmp = DAG.getNode(Intrinsic == Intrinsic::readport ?
930 ISD::READPORT : ISD::READIO, VTs, Ops);
931
932 setValue(&I, Tmp);
933 DAG.setRoot(Tmp.getValue(1));
934 return 0;
935 }
936 case Intrinsic::writeport:
937 case Intrinsic::writeio:
938 DAG.setRoot(DAG.getNode(Intrinsic == Intrinsic::writeport ?
939 ISD::WRITEPORT : ISD::WRITEIO, MVT::Other,
940 getRoot(), getValue(I.getOperand(1)),
941 getValue(I.getOperand(2))));
942 return 0;
Chris Lattnerf2b62f32005-11-16 07:22:30 +0000943
Chris Lattner5d4e61d2005-12-13 17:40:33 +0000944 case Intrinsic::dbg_stoppoint: {
Chris Lattnerf2b62f32005-11-16 07:22:30 +0000945 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
946 return "llvm_debugger_stop";
Chris Lattner435b4022005-11-29 06:21:05 +0000947
948 std::string fname = "<unknown>";
949 std::vector<SDOperand> Ops;
950
Chris Lattner435b4022005-11-29 06:21:05 +0000951 // Input Chain
952 Ops.push_back(getRoot());
953
954 // line number
955 Ops.push_back(getValue(I.getOperand(2)));
956
957 // column
958 Ops.push_back(getValue(I.getOperand(3)));
959
Chris Lattner5d4e61d2005-12-13 17:40:33 +0000960 // filename/working dir
961 // Pull the filename out of the the compilation unit.
962 const GlobalVariable *cunit = dyn_cast<GlobalVariable>(I.getOperand(4));
963 if (cunit && cunit->hasInitializer()) {
964 if (ConstantStruct *CS =
965 dyn_cast<ConstantStruct>(cunit->getInitializer())) {
966 if (CS->getNumOperands() > 0) {
Chris Lattner5d4e61d2005-12-13 17:40:33 +0000967 Ops.push_back(DAG.getString(getStringValue(CS->getOperand(3))));
Jim Laskey7c462762005-12-16 22:45:29 +0000968 Ops.push_back(DAG.getString(getStringValue(CS->getOperand(4))));
Chris Lattner5d4e61d2005-12-13 17:40:33 +0000969 }
970 }
971 }
972
973 if (Ops.size() == 5) // Found filename/workingdir.
974 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
Chris Lattner8782b782005-12-03 18:50:48 +0000975 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
Chris Lattnerf2b62f32005-11-16 07:22:30 +0000976 return 0;
Chris Lattner435b4022005-11-29 06:21:05 +0000977 }
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000978 case Intrinsic::dbg_region_start:
Chris Lattnerf2b62f32005-11-16 07:22:30 +0000979 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
980 return "llvm_dbg_region_start";
981 if (I.getType() != Type::VoidTy)
982 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
983 return 0;
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000984 case Intrinsic::dbg_region_end:
Chris Lattnerf2b62f32005-11-16 07:22:30 +0000985 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
986 return "llvm_dbg_region_end";
987 if (I.getType() != Type::VoidTy)
988 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
989 return 0;
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000990 case Intrinsic::dbg_func_start:
Chris Lattnerf2b62f32005-11-16 07:22:30 +0000991 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
992 return "llvm_dbg_subprogram";
993 if (I.getType() != Type::VoidTy)
994 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
995 return 0;
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000996 case Intrinsic::dbg_declare:
997 if (I.getType() != Type::VoidTy)
998 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
999 return 0;
1000
Reid Spencerb4f9a6f2006-01-16 21:12:35 +00001001 case Intrinsic::isunordered_f32:
1002 case Intrinsic::isunordered_f64:
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001003 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
1004 getValue(I.getOperand(2)), ISD::SETUO));
1005 return 0;
1006
Reid Spencerb4f9a6f2006-01-16 21:12:35 +00001007 case Intrinsic::sqrt_f32:
1008 case Intrinsic::sqrt_f64:
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001009 setValue(&I, DAG.getNode(ISD::FSQRT,
1010 getValue(I.getOperand(1)).getValueType(),
1011 getValue(I.getOperand(1))));
1012 return 0;
1013 case Intrinsic::pcmarker: {
1014 SDOperand Tmp = getValue(I.getOperand(1));
1015 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
1016 return 0;
1017 }
Andrew Lenharthde1b5d62005-11-11 22:48:54 +00001018 case Intrinsic::readcyclecounter: {
1019 std::vector<MVT::ValueType> VTs;
1020 VTs.push_back(MVT::i64);
1021 VTs.push_back(MVT::Other);
1022 std::vector<SDOperand> Ops;
1023 Ops.push_back(getRoot());
1024 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, VTs, Ops);
1025 setValue(&I, Tmp);
1026 DAG.setRoot(Tmp.getValue(1));
Andrew Lenharth01aa5632005-11-11 16:47:30 +00001027 return 0;
Andrew Lenharthde1b5d62005-11-11 22:48:54 +00001028 }
Nate Begeman2fba8a32006-01-14 03:14:10 +00001029 case Intrinsic::bswap_i16:
Nate Begeman2fba8a32006-01-14 03:14:10 +00001030 case Intrinsic::bswap_i32:
Nate Begeman2fba8a32006-01-14 03:14:10 +00001031 case Intrinsic::bswap_i64:
1032 setValue(&I, DAG.getNode(ISD::BSWAP,
1033 getValue(I.getOperand(1)).getValueType(),
1034 getValue(I.getOperand(1))));
1035 return 0;
Reid Spencerb4f9a6f2006-01-16 21:12:35 +00001036 case Intrinsic::cttz_i8:
1037 case Intrinsic::cttz_i16:
1038 case Intrinsic::cttz_i32:
1039 case Intrinsic::cttz_i64:
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001040 setValue(&I, DAG.getNode(ISD::CTTZ,
1041 getValue(I.getOperand(1)).getValueType(),
1042 getValue(I.getOperand(1))));
1043 return 0;
Reid Spencerb4f9a6f2006-01-16 21:12:35 +00001044 case Intrinsic::ctlz_i8:
1045 case Intrinsic::ctlz_i16:
1046 case Intrinsic::ctlz_i32:
1047 case Intrinsic::ctlz_i64:
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001048 setValue(&I, DAG.getNode(ISD::CTLZ,
1049 getValue(I.getOperand(1)).getValueType(),
1050 getValue(I.getOperand(1))));
1051 return 0;
Reid Spencerb4f9a6f2006-01-16 21:12:35 +00001052 case Intrinsic::ctpop_i8:
1053 case Intrinsic::ctpop_i16:
1054 case Intrinsic::ctpop_i32:
1055 case Intrinsic::ctpop_i64:
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001056 setValue(&I, DAG.getNode(ISD::CTPOP,
1057 getValue(I.getOperand(1)).getValueType(),
1058 getValue(I.getOperand(1))));
1059 return 0;
Chris Lattnerb3266452006-01-13 02:50:02 +00001060 case Intrinsic::stacksave: {
1061 std::vector<MVT::ValueType> VTs;
1062 VTs.push_back(TLI.getPointerTy());
1063 VTs.push_back(MVT::Other);
1064 std::vector<SDOperand> Ops;
1065 Ops.push_back(getRoot());
1066 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, VTs, Ops);
1067 setValue(&I, Tmp);
1068 DAG.setRoot(Tmp.getValue(1));
1069 return 0;
1070 }
Chris Lattnerdeda32a2006-01-23 05:22:07 +00001071 case Intrinsic::stackrestore: {
1072 SDOperand Tmp = getValue(I.getOperand(1));
1073 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
Chris Lattnerb3266452006-01-13 02:50:02 +00001074 return 0;
Chris Lattnerdeda32a2006-01-23 05:22:07 +00001075 }
Chris Lattner9e8b6332005-12-12 22:51:16 +00001076 case Intrinsic::prefetch:
1077 // FIXME: Currently discarding prefetches.
1078 return 0;
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001079 default:
1080 std::cerr << I;
1081 assert(0 && "This intrinsic is not implemented yet!");
1082 return 0;
1083 }
1084}
1085
1086
Chris Lattner7a60d912005-01-07 07:47:53 +00001087void SelectionDAGLowering::visitCall(CallInst &I) {
Chris Lattner18d2b342005-01-08 22:48:57 +00001088 const char *RenameFn = 0;
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001089 if (Function *F = I.getCalledFunction()) {
Chris Lattner0c140002005-04-02 05:26:53 +00001090 if (F->isExternal())
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001091 if (unsigned IID = F->getIntrinsicID()) {
1092 RenameFn = visitIntrinsicCall(I, IID);
1093 if (!RenameFn)
1094 return;
1095 } else { // Not an LLVM intrinsic.
1096 const std::string &Name = F->getName();
1097 if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
Chris Lattner0c140002005-04-02 05:26:53 +00001098 if (I.getNumOperands() == 2 && // Basic sanity checks.
1099 I.getOperand(1)->getType()->isFloatingPoint() &&
1100 I.getType() == I.getOperand(1)->getType()) {
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001101 SDOperand Tmp = getValue(I.getOperand(1));
Chris Lattner0c140002005-04-02 05:26:53 +00001102 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
1103 return;
1104 }
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001105 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) {
Chris Lattner80026402005-04-30 04:43:14 +00001106 if (I.getNumOperands() == 2 && // Basic sanity checks.
1107 I.getOperand(1)->getType()->isFloatingPoint() &&
Chris Lattnere2ee1902006-01-18 21:50:14 +00001108 I.getType() == I.getOperand(1)->getType() &&
1109 TLI.isOperationLegal(ISD::FSIN,
1110 TLI.getValueType(I.getOperand(1)->getType()))) {
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001111 SDOperand Tmp = getValue(I.getOperand(1));
Chris Lattner80026402005-04-30 04:43:14 +00001112 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
1113 return;
1114 }
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001115 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) {
Chris Lattner80026402005-04-30 04:43:14 +00001116 if (I.getNumOperands() == 2 && // Basic sanity checks.
1117 I.getOperand(1)->getType()->isFloatingPoint() &&
Chris Lattnere2ee1902006-01-18 21:50:14 +00001118 I.getType() == I.getOperand(1)->getType() &&
1119 TLI.isOperationLegal(ISD::FCOS,
1120 TLI.getValueType(I.getOperand(1)->getType()))) {
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001121 SDOperand Tmp = getValue(I.getOperand(1));
Chris Lattner80026402005-04-30 04:43:14 +00001122 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
1123 return;
1124 }
1125 }
Chris Lattnere4f71d02005-05-14 13:56:55 +00001126 }
Chris Lattner476e67b2006-01-26 22:24:51 +00001127 } else if (isa<InlineAsm>(I.getOperand(0))) {
1128 visitInlineAsm(I);
1129 return;
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001130 }
Misha Brukman835702a2005-04-21 22:36:52 +00001131
Chris Lattner18d2b342005-01-08 22:48:57 +00001132 SDOperand Callee;
1133 if (!RenameFn)
1134 Callee = getValue(I.getOperand(0));
1135 else
1136 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
Chris Lattner7a60d912005-01-07 07:47:53 +00001137 std::vector<std::pair<SDOperand, const Type*> > Args;
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001138 Args.reserve(I.getNumOperands());
Chris Lattner7a60d912005-01-07 07:47:53 +00001139 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1140 Value *Arg = I.getOperand(i);
1141 SDOperand ArgNode = getValue(Arg);
1142 Args.push_back(std::make_pair(ArgNode, Arg->getType()));
1143 }
Misha Brukman835702a2005-04-21 22:36:52 +00001144
Nate Begemanf6565252005-03-26 01:29:23 +00001145 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
1146 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
Misha Brukman835702a2005-04-21 22:36:52 +00001147
Chris Lattner1f45cd72005-01-08 19:26:18 +00001148 std::pair<SDOperand,SDOperand> Result =
Chris Lattner111778e2005-05-12 19:56:57 +00001149 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(),
Chris Lattner2e77db62005-05-13 18:50:42 +00001150 I.isTailCall(), Callee, Args, DAG);
Chris Lattner7a60d912005-01-07 07:47:53 +00001151 if (I.getType() != Type::VoidTy)
Chris Lattner1f45cd72005-01-08 19:26:18 +00001152 setValue(&I, Result.first);
1153 DAG.setRoot(Result.second);
Chris Lattner7a60d912005-01-07 07:47:53 +00001154}
1155
Chris Lattner476e67b2006-01-26 22:24:51 +00001156/// visitInlineAsm - Handle a call to an InlineAsm object.
1157///
1158void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
1159 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0));
1160
1161 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
1162 MVT::Other);
1163
1164 // Note, we treat inline asms both with and without side-effects as the same.
1165 // If an inline asm doesn't have side effects and doesn't access memory, we
1166 // could not choose to not chain it.
1167 bool hasSideEffects = IA->hasSideEffects();
1168
1169 std::vector<std::pair<InlineAsm::ConstraintPrefix, std::string> >
1170 Constraints = IA->ParseConstraints();
1171
1172
1173 /// AsmNodeOperands - A list of pairs. The first element is a register, the
1174 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set
1175 /// if it is a def of that register.
1176 std::vector<SDOperand> AsmNodeOperands;
1177 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain
1178 AsmNodeOperands.push_back(AsmStr);
1179
1180 SDOperand Chain = getRoot();
1181 SDOperand Flag;
1182
1183 // FIXME: input copies.
1184
1185 // Finish up input operands.
1186 AsmNodeOperands[0] = Chain;
1187 if (Flag.Val) AsmNodeOperands.push_back(Flag);
1188
1189 std::vector<MVT::ValueType> VTs;
1190 VTs.push_back(MVT::Other);
1191 VTs.push_back(MVT::Flag);
1192 Chain = DAG.getNode(ISD::INLINEASM, VTs, AsmNodeOperands);
1193 Flag = Chain.getValue(1);
1194
1195 // FIXME: Copies out of registers here, setValue(CI).
1196
1197 DAG.setRoot(Chain);
1198}
1199
1200
Chris Lattner7a60d912005-01-07 07:47:53 +00001201void SelectionDAGLowering::visitMalloc(MallocInst &I) {
1202 SDOperand Src = getValue(I.getOperand(0));
1203
1204 MVT::ValueType IntPtr = TLI.getPointerTy();
Chris Lattnereccb73d2005-01-22 23:04:37 +00001205
1206 if (IntPtr < Src.getValueType())
1207 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
1208 else if (IntPtr > Src.getValueType())
1209 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
Chris Lattner7a60d912005-01-07 07:47:53 +00001210
1211 // Scale the source by the type size.
1212 uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
1213 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
1214 Src, getIntPtrConstant(ElementSize));
1215
1216 std::vector<std::pair<SDOperand, const Type*> > Args;
1217 Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
Chris Lattner1f45cd72005-01-08 19:26:18 +00001218
1219 std::pair<SDOperand,SDOperand> Result =
Chris Lattner2e77db62005-05-13 18:50:42 +00001220 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
Chris Lattner1f45cd72005-01-08 19:26:18 +00001221 DAG.getExternalSymbol("malloc", IntPtr),
1222 Args, DAG);
1223 setValue(&I, Result.first); // Pointers always fit in registers
1224 DAG.setRoot(Result.second);
Chris Lattner7a60d912005-01-07 07:47:53 +00001225}
1226
1227void SelectionDAGLowering::visitFree(FreeInst &I) {
1228 std::vector<std::pair<SDOperand, const Type*> > Args;
1229 Args.push_back(std::make_pair(getValue(I.getOperand(0)),
1230 TLI.getTargetData().getIntPtrType()));
1231 MVT::ValueType IntPtr = TLI.getPointerTy();
Chris Lattner1f45cd72005-01-08 19:26:18 +00001232 std::pair<SDOperand,SDOperand> Result =
Chris Lattner2e77db62005-05-13 18:50:42 +00001233 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
Chris Lattner1f45cd72005-01-08 19:26:18 +00001234 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
1235 DAG.setRoot(Result.second);
Chris Lattner7a60d912005-01-07 07:47:53 +00001236}
1237
Chris Lattner13d7c252005-08-26 20:54:47 +00001238// InsertAtEndOfBasicBlock - This method should be implemented by targets that
1239// mark instructions with the 'usesCustomDAGSchedInserter' flag. These
1240// instructions are special in various ways, which require special support to
1241// insert. The specified MachineInstr is created but not inserted into any
1242// basic blocks, and the scheduler passes ownership of it to this method.
1243MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1244 MachineBasicBlock *MBB) {
1245 std::cerr << "If a target marks an instruction with "
1246 "'usesCustomDAGSchedInserter', it must implement "
1247 "TargetLowering::InsertAtEndOfBasicBlock!\n";
1248 abort();
1249 return 0;
1250}
1251
Nate Begeman78afac22005-10-18 23:23:37 +00001252SDOperand TargetLowering::LowerReturnTo(SDOperand Chain, SDOperand Op,
1253 SelectionDAG &DAG) {
1254 return DAG.getNode(ISD::RET, MVT::Other, Chain, Op);
1255}
1256
Chris Lattner58cfd792005-01-09 00:00:49 +00001257void SelectionDAGLowering::visitVAStart(CallInst &I) {
Nate Begemane74795c2006-01-25 18:21:52 +00001258 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(),
1259 getValue(I.getOperand(1)),
1260 DAG.getSrcValue(I.getOperand(1))));
Chris Lattner58cfd792005-01-09 00:00:49 +00001261}
1262
1263void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
Nate Begemane74795c2006-01-25 18:21:52 +00001264 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
1265 getValue(I.getOperand(0)),
1266 DAG.getSrcValue(I.getOperand(0)));
1267 setValue(&I, V);
1268 DAG.setRoot(V.getValue(1));
Chris Lattner7a60d912005-01-07 07:47:53 +00001269}
1270
1271void SelectionDAGLowering::visitVAEnd(CallInst &I) {
Nate Begemane74795c2006-01-25 18:21:52 +00001272 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(),
1273 getValue(I.getOperand(1)),
1274 DAG.getSrcValue(I.getOperand(1))));
Chris Lattner7a60d912005-01-07 07:47:53 +00001275}
1276
1277void SelectionDAGLowering::visitVACopy(CallInst &I) {
Nate Begemane74795c2006-01-25 18:21:52 +00001278 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(),
1279 getValue(I.getOperand(1)),
1280 getValue(I.getOperand(2)),
1281 DAG.getSrcValue(I.getOperand(1)),
1282 DAG.getSrcValue(I.getOperand(2))));
Chris Lattner7a60d912005-01-07 07:47:53 +00001283}
1284
Chris Lattner58cfd792005-01-09 00:00:49 +00001285// It is always conservatively correct for llvm.returnaddress and
1286// llvm.frameaddress to return 0.
1287std::pair<SDOperand, SDOperand>
1288TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
1289 unsigned Depth, SelectionDAG &DAG) {
1290 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
Chris Lattner7a60d912005-01-07 07:47:53 +00001291}
1292
Chris Lattner29dcc712005-05-14 05:50:48 +00001293SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
Chris Lattner897cd7d2005-01-16 07:28:41 +00001294 assert(0 && "LowerOperation not implemented for this target!");
1295 abort();
Misha Brukman73e929f2005-02-17 21:39:27 +00001296 return SDOperand();
Chris Lattner897cd7d2005-01-16 07:28:41 +00001297}
1298
Chris Lattner58cfd792005-01-09 00:00:49 +00001299void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
1300 unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue();
1301 std::pair<SDOperand,SDOperand> Result =
Chris Lattner4108bb02005-01-17 19:43:36 +00001302 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
Chris Lattner58cfd792005-01-09 00:00:49 +00001303 setValue(&I, Result.first);
1304 DAG.setRoot(Result.second);
Chris Lattner7a60d912005-01-07 07:47:53 +00001305}
1306
Chris Lattner875def92005-01-11 05:56:49 +00001307void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
Reid Spencer3fd1b4c2005-11-30 05:21:10 +00001308#if 0
1309 // If the size of the cpy/move/set is constant (known)
1310 if (ConstantUInt* op3 = dyn_cast<ConstantUInt>(I.getOperand(3))) {
1311 uint64_t size = op3->getValue();
1312 switch (Op) {
1313 case ISD::MEMSET:
1314 if (size <= TLI.getMaxStoresPerMemSet()) {
1315 if (ConstantUInt* op4 = dyn_cast<ConstantUInt>(I.getOperand(4))) {
1316 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
1317 uint64_t align = op4.getValue();
1318 while (size > align) {
1319 size -=align;
1320 }
1321 Value *SrcV = I.getOperand(0);
1322 SDOperand Src = getValue(SrcV);
1323 SDOperand Ptr = getValue(I.getOperand(1));
1324 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
1325 DAG.getSrcValue(I.getOperand(1))));
1326 }
1327 break;
1328 }
1329 break; // don't do this optimization, use a normal memset
1330 case ISD::MEMMOVE:
1331 case ISD::MEMCPY:
1332 break; // FIXME: not implemented yet
1333 }
1334 }
1335#endif
1336
1337 // Non-optimized version
Chris Lattner875def92005-01-11 05:56:49 +00001338 std::vector<SDOperand> Ops;
Chris Lattner4108bb02005-01-17 19:43:36 +00001339 Ops.push_back(getRoot());
Chris Lattner875def92005-01-11 05:56:49 +00001340 Ops.push_back(getValue(I.getOperand(1)));
1341 Ops.push_back(getValue(I.getOperand(2)));
1342 Ops.push_back(getValue(I.getOperand(3)));
1343 Ops.push_back(getValue(I.getOperand(4)));
1344 DAG.setRoot(DAG.getNode(Op, MVT::Other, Ops));
Chris Lattner7a60d912005-01-07 07:47:53 +00001345}
1346
Chris Lattner875def92005-01-11 05:56:49 +00001347//===----------------------------------------------------------------------===//
1348// SelectionDAGISel code
1349//===----------------------------------------------------------------------===//
Chris Lattner7a60d912005-01-07 07:47:53 +00001350
1351unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
1352 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
1353}
1354
Chris Lattnerc9950c12005-08-17 06:37:43 +00001355void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
Chris Lattner1a908c82005-08-18 17:35:14 +00001356 // FIXME: we only modify the CFG to split critical edges. This
1357 // updates dom and loop info.
Chris Lattnerc9950c12005-08-17 06:37:43 +00001358}
Chris Lattner7a60d912005-01-07 07:47:53 +00001359
Chris Lattner35397782005-12-05 07:10:48 +00001360
1361/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
1362/// casting to the type of GEPI.
1363static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI,
1364 Value *Ptr, Value *PtrOffset) {
1365 if (V) return V; // Already computed.
1366
1367 BasicBlock::iterator InsertPt;
1368 if (BB == GEPI->getParent()) {
1369 // If insert into the GEP's block, insert right after the GEP.
1370 InsertPt = GEPI;
1371 ++InsertPt;
1372 } else {
1373 // Otherwise, insert at the top of BB, after any PHI nodes
1374 InsertPt = BB->begin();
1375 while (isa<PHINode>(InsertPt)) ++InsertPt;
1376 }
1377
Chris Lattnerbe73d6e2005-12-08 08:00:12 +00001378 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
1379 // BB so that there is only one value live across basic blocks (the cast
1380 // operand).
1381 if (CastInst *CI = dyn_cast<CastInst>(Ptr))
1382 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
1383 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
1384
Chris Lattner35397782005-12-05 07:10:48 +00001385 // Add the offset, cast it to the right type.
1386 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
1387 Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
1388 return V = Ptr;
1389}
1390
1391
1392/// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
1393/// selection, we want to be a bit careful about some things. In particular, if
1394/// we have a GEP instruction that is used in a different block than it is
1395/// defined, the addressing expression of the GEP cannot be folded into loads or
1396/// stores that use it. In this case, decompose the GEP and move constant
1397/// indices into blocks that use it.
1398static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
1399 const TargetData &TD) {
Chris Lattner35397782005-12-05 07:10:48 +00001400 // If this GEP is only used inside the block it is defined in, there is no
1401 // need to rewrite it.
1402 bool isUsedOutsideDefBB = false;
1403 BasicBlock *DefBB = GEPI->getParent();
1404 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
1405 UI != E; ++UI) {
1406 if (cast<Instruction>(*UI)->getParent() != DefBB) {
1407 isUsedOutsideDefBB = true;
1408 break;
1409 }
1410 }
1411 if (!isUsedOutsideDefBB) return;
1412
1413 // If this GEP has no non-zero constant indices, there is nothing we can do,
1414 // ignore it.
1415 bool hasConstantIndex = false;
1416 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
1417 E = GEPI->op_end(); OI != E; ++OI) {
1418 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI))
1419 if (CI->getRawValue()) {
1420 hasConstantIndex = true;
1421 break;
1422 }
1423 }
Chris Lattnerf1a54c02005-12-11 09:05:13 +00001424 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
1425 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) return;
Chris Lattner35397782005-12-05 07:10:48 +00001426
1427 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
1428 // constant offset (which we now know is non-zero) and deal with it later.
1429 uint64_t ConstantOffset = 0;
1430 const Type *UIntPtrTy = TD.getIntPtrType();
1431 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
1432 const Type *Ty = GEPI->getOperand(0)->getType();
1433
1434 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
1435 E = GEPI->op_end(); OI != E; ++OI) {
1436 Value *Idx = *OI;
1437 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
1438 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
1439 if (Field)
1440 ConstantOffset += TD.getStructLayout(StTy)->MemberOffsets[Field];
1441 Ty = StTy->getElementType(Field);
1442 } else {
1443 Ty = cast<SequentialType>(Ty)->getElementType();
1444
1445 // Handle constant subscripts.
1446 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
1447 if (CI->getRawValue() == 0) continue;
1448
1449 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
1450 ConstantOffset += (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
1451 else
1452 ConstantOffset+=TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
1453 continue;
1454 }
1455
1456 // Ptr = Ptr + Idx * ElementSize;
1457
1458 // Cast Idx to UIntPtrTy if needed.
1459 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
1460
1461 uint64_t ElementSize = TD.getTypeSize(Ty);
1462 // Mask off bits that should not be set.
1463 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
1464 Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize);
1465
1466 // Multiply by the element size and add to the base.
1467 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
1468 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
1469 }
1470 }
1471
1472 // Make sure that the offset fits in uintptr_t.
1473 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
1474 Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset);
1475
1476 // Okay, we have now emitted all of the variable index parts to the BB that
1477 // the GEP is defined in. Loop over all of the using instructions, inserting
1478 // an "add Ptr, ConstantOffset" into each block that uses it and update the
Chris Lattnerbe73d6e2005-12-08 08:00:12 +00001479 // instruction to use the newly computed value, making GEPI dead. When the
1480 // user is a load or store instruction address, we emit the add into the user
1481 // block, otherwise we use a canonical version right next to the gep (these
1482 // won't be foldable as addresses, so we might as well share the computation).
1483
Chris Lattner35397782005-12-05 07:10:48 +00001484 std::map<BasicBlock*,Value*> InsertedExprs;
1485 while (!GEPI->use_empty()) {
1486 Instruction *User = cast<Instruction>(GEPI->use_back());
Chris Lattnerbe73d6e2005-12-08 08:00:12 +00001487
1488 // If this use is not foldable into the addressing mode, use a version
1489 // emitted in the GEP block.
1490 Value *NewVal;
1491 if (!isa<LoadInst>(User) &&
1492 (!isa<StoreInst>(User) || User->getOperand(0) == GEPI)) {
1493 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
1494 Ptr, PtrOffset);
1495 } else {
1496 // Otherwise, insert the code in the User's block so it can be folded into
1497 // any users in that block.
1498 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
Chris Lattner35397782005-12-05 07:10:48 +00001499 User->getParent(), GEPI,
1500 Ptr, PtrOffset);
Chris Lattner35397782005-12-05 07:10:48 +00001501 }
Chris Lattnerbe73d6e2005-12-08 08:00:12 +00001502 User->replaceUsesOfWith(GEPI, NewVal);
1503 }
Chris Lattner35397782005-12-05 07:10:48 +00001504
1505 // Finally, the GEP is dead, remove it.
1506 GEPI->eraseFromParent();
1507}
1508
Chris Lattner7a60d912005-01-07 07:47:53 +00001509bool SelectionDAGISel::runOnFunction(Function &Fn) {
1510 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
1511 RegMap = MF.getSSARegMap();
1512 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
1513
Chris Lattner35397782005-12-05 07:10:48 +00001514 // First, split all critical edges for PHI nodes with incoming values that are
1515 // constants, this way the load of the constant into a vreg will not be placed
1516 // into MBBs that are used some other way.
1517 //
1518 // In this pass we also look for GEP instructions that are used across basic
1519 // blocks and rewrites them to improve basic-block-at-a-time selection.
1520 //
Chris Lattner1a908c82005-08-18 17:35:14 +00001521 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
1522 PHINode *PN;
Chris Lattner35397782005-12-05 07:10:48 +00001523 BasicBlock::iterator BBI;
1524 for (BBI = BB->begin(); (PN = dyn_cast<PHINode>(BBI)); ++BBI)
Chris Lattner1a908c82005-08-18 17:35:14 +00001525 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1526 if (isa<Constant>(PN->getIncomingValue(i)))
1527 SplitCriticalEdge(PN->getIncomingBlock(i), BB);
Chris Lattner35397782005-12-05 07:10:48 +00001528
1529 for (BasicBlock::iterator E = BB->end(); BBI != E; )
1530 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(BBI++))
1531 OptimizeGEPExpression(GEPI, TLI.getTargetData());
Chris Lattner1a908c82005-08-18 17:35:14 +00001532 }
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001533
Chris Lattner7a60d912005-01-07 07:47:53 +00001534 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
1535
1536 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
1537 SelectBasicBlock(I, MF, FuncInfo);
Misha Brukman835702a2005-04-21 22:36:52 +00001538
Chris Lattner7a60d912005-01-07 07:47:53 +00001539 return true;
1540}
1541
1542
Chris Lattner718b5c22005-01-13 17:59:43 +00001543SDOperand SelectionDAGISel::
1544CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) {
Chris Lattner613f79f2005-01-11 22:03:46 +00001545 SDOperand Op = SDL.getValue(V);
Chris Lattnere727af02005-01-13 20:50:02 +00001546 assert((Op.getOpcode() != ISD::CopyFromReg ||
Chris Lattner33182322005-08-16 21:55:35 +00001547 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
Chris Lattnere727af02005-01-13 20:50:02 +00001548 "Copy from a reg to the same reg!");
Chris Lattner33182322005-08-16 21:55:35 +00001549
1550 // If this type is not legal, we must make sure to not create an invalid
1551 // register use.
1552 MVT::ValueType SrcVT = Op.getValueType();
1553 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT);
1554 SelectionDAG &DAG = SDL.DAG;
1555 if (SrcVT == DestVT) {
1556 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
1557 } else if (SrcVT < DestVT) {
1558 // The src value is promoted to the register.
Chris Lattnerba28c272005-08-17 06:06:25 +00001559 if (MVT::isFloatingPoint(SrcVT))
1560 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
1561 else
Chris Lattnera66403d2005-09-02 00:19:37 +00001562 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
Chris Lattner33182322005-08-16 21:55:35 +00001563 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
1564 } else {
1565 // The src value is expanded into multiple registers.
1566 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
1567 Op, DAG.getConstant(0, MVT::i32));
1568 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
1569 Op, DAG.getConstant(1, MVT::i32));
1570 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo);
1571 return DAG.getCopyToReg(Op, Reg+1, Hi);
1572 }
Chris Lattner7a60d912005-01-07 07:47:53 +00001573}
1574
Chris Lattner16f64df2005-01-17 17:15:02 +00001575void SelectionDAGISel::
1576LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL,
1577 std::vector<SDOperand> &UnorderedChains) {
1578 // If this is the entry block, emit arguments.
1579 Function &F = *BB->getParent();
Chris Lattnere3c2cf42005-01-17 17:55:19 +00001580 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
Chris Lattner6871b232005-10-30 19:42:35 +00001581 SDOperand OldRoot = SDL.DAG.getRoot();
1582 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
Chris Lattner16f64df2005-01-17 17:15:02 +00001583
Chris Lattner6871b232005-10-30 19:42:35 +00001584 unsigned a = 0;
1585 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
1586 AI != E; ++AI, ++a)
1587 if (!AI->use_empty()) {
1588 SDL.setValue(AI, Args[a]);
Chris Lattnerd4382f02005-09-13 19:30:54 +00001589
Chris Lattner6871b232005-10-30 19:42:35 +00001590 // If this argument is live outside of the entry block, insert a copy from
1591 // whereever we got it to the vreg that other BB's will reference it as.
1592 if (FuncInfo.ValueMap.count(AI)) {
1593 SDOperand Copy =
1594 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
1595 UnorderedChains.push_back(Copy);
1596 }
Chris Lattnere3c2cf42005-01-17 17:55:19 +00001597 }
Chris Lattner6871b232005-10-30 19:42:35 +00001598
1599 // Next, if the function has live ins that need to be copied into vregs,
1600 // emit the copies now, into the top of the block.
1601 MachineFunction &MF = SDL.DAG.getMachineFunction();
1602 if (MF.livein_begin() != MF.livein_end()) {
1603 SSARegMap *RegMap = MF.getSSARegMap();
1604 const MRegisterInfo &MRI = *MF.getTarget().getRegisterInfo();
1605 for (MachineFunction::livein_iterator LI = MF.livein_begin(),
1606 E = MF.livein_end(); LI != E; ++LI)
1607 if (LI->second)
1608 MRI.copyRegToReg(*MF.begin(), MF.begin()->end(), LI->second,
1609 LI->first, RegMap->getRegClass(LI->second));
Chris Lattner16f64df2005-01-17 17:15:02 +00001610 }
Chris Lattner6871b232005-10-30 19:42:35 +00001611
1612 // Finally, if the target has anything special to do, allow it to do so.
1613 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction());
Chris Lattner16f64df2005-01-17 17:15:02 +00001614}
1615
1616
Chris Lattner7a60d912005-01-07 07:47:53 +00001617void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
1618 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
1619 FunctionLoweringInfo &FuncInfo) {
1620 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
Chris Lattner718b5c22005-01-13 17:59:43 +00001621
1622 std::vector<SDOperand> UnorderedChains;
Misha Brukman835702a2005-04-21 22:36:52 +00001623
Chris Lattner6871b232005-10-30 19:42:35 +00001624 // Lower any arguments needed in this block if this is the entry block.
1625 if (LLVMBB == &LLVMBB->getParent()->front())
1626 LowerArguments(LLVMBB, SDL, UnorderedChains);
Chris Lattner7a60d912005-01-07 07:47:53 +00001627
1628 BB = FuncInfo.MBBMap[LLVMBB];
1629 SDL.setCurrentBasicBlock(BB);
1630
1631 // Lower all of the non-terminator instructions.
1632 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
1633 I != E; ++I)
1634 SDL.visit(*I);
1635
1636 // Ensure that all instructions which are used outside of their defining
1637 // blocks are available as virtual registers.
1638 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
Chris Lattner613f79f2005-01-11 22:03:46 +00001639 if (!I->use_empty() && !isa<PHINode>(I)) {
Chris Lattnera2c5d912005-01-09 01:16:24 +00001640 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
Chris Lattner7a60d912005-01-07 07:47:53 +00001641 if (VMI != FuncInfo.ValueMap.end())
Chris Lattner718b5c22005-01-13 17:59:43 +00001642 UnorderedChains.push_back(
1643 CopyValueToVirtualRegister(SDL, I, VMI->second));
Chris Lattner7a60d912005-01-07 07:47:53 +00001644 }
1645
1646 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
1647 // ensure constants are generated when needed. Remember the virtual registers
1648 // that need to be added to the Machine PHI nodes as input. We cannot just
1649 // directly add them, because expansion might result in multiple MBB's for one
1650 // BB. As such, the start of the BB might correspond to a different MBB than
1651 // the end.
Misha Brukman835702a2005-04-21 22:36:52 +00001652 //
Chris Lattner7a60d912005-01-07 07:47:53 +00001653
1654 // Emit constants only once even if used by multiple PHI nodes.
1655 std::map<Constant*, unsigned> ConstantsOut;
1656
1657 // Check successor nodes PHI nodes that expect a constant to be available from
1658 // this block.
1659 TerminatorInst *TI = LLVMBB->getTerminator();
1660 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1661 BasicBlock *SuccBB = TI->getSuccessor(succ);
1662 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
1663 PHINode *PN;
1664
1665 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1666 // nodes and Machine PHI nodes, but the incoming operands have not been
1667 // emitted yet.
1668 for (BasicBlock::iterator I = SuccBB->begin();
Chris Lattner8ea875f2005-01-07 21:34:19 +00001669 (PN = dyn_cast<PHINode>(I)); ++I)
1670 if (!PN->use_empty()) {
1671 unsigned Reg;
1672 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1673 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
1674 unsigned &RegOut = ConstantsOut[C];
1675 if (RegOut == 0) {
1676 RegOut = FuncInfo.CreateRegForValue(C);
Chris Lattner718b5c22005-01-13 17:59:43 +00001677 UnorderedChains.push_back(
1678 CopyValueToVirtualRegister(SDL, C, RegOut));
Chris Lattner8ea875f2005-01-07 21:34:19 +00001679 }
1680 Reg = RegOut;
1681 } else {
1682 Reg = FuncInfo.ValueMap[PHIOp];
Chris Lattnera2c5d912005-01-09 01:16:24 +00001683 if (Reg == 0) {
Misha Brukman835702a2005-04-21 22:36:52 +00001684 assert(isa<AllocaInst>(PHIOp) &&
Chris Lattnera2c5d912005-01-09 01:16:24 +00001685 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
1686 "Didn't codegen value into a register!??");
1687 Reg = FuncInfo.CreateRegForValue(PHIOp);
Chris Lattner718b5c22005-01-13 17:59:43 +00001688 UnorderedChains.push_back(
1689 CopyValueToVirtualRegister(SDL, PHIOp, Reg));
Chris Lattnera2c5d912005-01-09 01:16:24 +00001690 }
Chris Lattner7a60d912005-01-07 07:47:53 +00001691 }
Misha Brukman835702a2005-04-21 22:36:52 +00001692
Chris Lattner8ea875f2005-01-07 21:34:19 +00001693 // Remember that this register needs to added to the machine PHI node as
1694 // the input for this MBB.
1695 unsigned NumElements =
1696 TLI.getNumElements(TLI.getValueType(PN->getType()));
1697 for (unsigned i = 0, e = NumElements; i != e; ++i)
1698 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
Chris Lattner7a60d912005-01-07 07:47:53 +00001699 }
Chris Lattner7a60d912005-01-07 07:47:53 +00001700 }
1701 ConstantsOut.clear();
1702
Chris Lattner718b5c22005-01-13 17:59:43 +00001703 // Turn all of the unordered chains into one factored node.
Chris Lattner24516842005-01-13 19:53:14 +00001704 if (!UnorderedChains.empty()) {
Chris Lattnerb7cad902005-11-09 05:03:03 +00001705 SDOperand Root = SDL.getRoot();
1706 if (Root.getOpcode() != ISD::EntryToken) {
1707 unsigned i = 0, e = UnorderedChains.size();
1708 for (; i != e; ++i) {
1709 assert(UnorderedChains[i].Val->getNumOperands() > 1);
1710 if (UnorderedChains[i].Val->getOperand(0) == Root)
1711 break; // Don't add the root if we already indirectly depend on it.
1712 }
1713
1714 if (i == e)
1715 UnorderedChains.push_back(Root);
1716 }
Chris Lattner718b5c22005-01-13 17:59:43 +00001717 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, UnorderedChains));
1718 }
1719
Chris Lattner7a60d912005-01-07 07:47:53 +00001720 // Lower the terminator after the copies are emitted.
1721 SDL.visit(*LLVMBB->getTerminator());
Chris Lattner4108bb02005-01-17 19:43:36 +00001722
1723 // Make sure the root of the DAG is up-to-date.
1724 DAG.setRoot(SDL.getRoot());
Chris Lattner7a60d912005-01-07 07:47:53 +00001725}
1726
1727void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
1728 FunctionLoweringInfo &FuncInfo) {
Jim Laskey219d5592006-01-04 22:28:25 +00001729 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
Chris Lattner7a60d912005-01-07 07:47:53 +00001730 CurDAG = &DAG;
1731 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
1732
1733 // First step, lower LLVM code to some DAG. This DAG may use operations and
1734 // types that are not supported by the target.
1735 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
1736
Chris Lattnerbcfebeb2005-10-10 16:47:10 +00001737 // Run the DAG combiner in pre-legalize mode.
1738 DAG.Combine(false);
Nate Begeman007c6502005-09-07 00:15:36 +00001739
Chris Lattner7a60d912005-01-07 07:47:53 +00001740 DEBUG(std::cerr << "Lowered selection DAG:\n");
1741 DEBUG(DAG.dump());
1742
1743 // Second step, hack on the DAG until it only uses operations and types that
1744 // the target supports.
Chris Lattnerffcb0ae2005-01-23 04:36:26 +00001745 DAG.Legalize();
Chris Lattner7a60d912005-01-07 07:47:53 +00001746
1747 DEBUG(std::cerr << "Legalized selection DAG:\n");
1748 DEBUG(DAG.dump());
1749
Chris Lattnerbcfebeb2005-10-10 16:47:10 +00001750 // Run the DAG combiner in post-legalize mode.
1751 DAG.Combine(true);
Nate Begeman007c6502005-09-07 00:15:36 +00001752
Evan Cheng739a6a42006-01-21 02:32:06 +00001753 if (ViewISelDAGs) DAG.viewGraph();
Chris Lattner6bd8fd02005-10-05 06:09:10 +00001754
Chris Lattner5ca31d92005-03-30 01:10:47 +00001755 // Third, instruction select all of the operations to machine code, adding the
1756 // code to the MachineBasicBlock.
Chris Lattner7a60d912005-01-07 07:47:53 +00001757 InstructionSelectBasicBlock(DAG);
1758
Chris Lattner7a60d912005-01-07 07:47:53 +00001759 DEBUG(std::cerr << "Selected machine code:\n");
1760 DEBUG(BB->dump());
1761
Chris Lattner5ca31d92005-03-30 01:10:47 +00001762 // Next, now that we know what the last MBB the LLVM BB expanded is, update
Chris Lattner7a60d912005-01-07 07:47:53 +00001763 // PHI nodes in successors.
1764 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
1765 MachineInstr *PHI = PHINodesToUpdate[i].first;
1766 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
1767 "This is not a machine PHI node that we are updating!");
1768 PHI->addRegOperand(PHINodesToUpdate[i].second);
1769 PHI->addMachineBasicBlockOperand(BB);
1770 }
Chris Lattner5ca31d92005-03-30 01:10:47 +00001771
1772 // Finally, add the CFG edges from the last selected MBB to the successor
1773 // MBBs.
1774 TerminatorInst *TI = LLVMBB->getTerminator();
1775 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
1776 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[TI->getSuccessor(i)];
1777 BB->addSuccessor(Succ0MBB);
1778 }
Chris Lattner7a60d912005-01-07 07:47:53 +00001779}
Evan Cheng739a6a42006-01-21 02:32:06 +00001780
1781//===----------------------------------------------------------------------===//
1782/// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
1783/// target node in the graph.
1784void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) {
1785 if (ViewSchedDAGs) DAG.viewGraph();
Evan Chengc1e1d972006-01-23 07:01:07 +00001786 ScheduleDAG *SL = NULL;
1787
1788 switch (ISHeuristic) {
1789 default: assert(0 && "Unrecognized scheduling heuristic");
Evan Chenga6eff8a2006-01-25 09:12:57 +00001790 case defaultScheduling:
1791 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency)
1792 SL = createSimpleDAGScheduler(noScheduling, DAG, BB);
1793 else /* TargetLowering::SchedulingForRegPressure */
1794 SL = createBURRListDAGScheduler(DAG, BB);
1795 break;
Evan Chengc1e1d972006-01-23 07:01:07 +00001796 case noScheduling:
1797 case simpleScheduling:
1798 case simpleNoItinScheduling:
1799 SL = createSimpleDAGScheduler(ISHeuristic, DAG, BB);
1800 break;
Evan Cheng31272342006-01-23 08:26:10 +00001801 case listSchedulingBURR:
1802 SL = createBURRListDAGScheduler(DAG, BB);
Evan Chengc1e1d972006-01-23 07:01:07 +00001803 }
Chris Lattnere23928c2006-01-21 19:12:11 +00001804 BB = SL->Run();
Evan Cheng739a6a42006-01-21 02:32:06 +00001805}