blob: 8ab62190c0576c70ec5de2928aebd35d71751e1f [file] [log] [blame]
Chris Lattner7a60d912005-01-07 07:47:53 +00001//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
Misha Brukman835702a2005-04-21 22:36:52 +00002//
Chris Lattner7a60d912005-01-07 07:47:53 +00003// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
Misha Brukman835702a2005-04-21 22:36:52 +00007//
Chris Lattner7a60d912005-01-07 07:47:53 +00008//===----------------------------------------------------------------------===//
9//
10// This implements the SelectionDAGISel class.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "isel"
15#include "llvm/CodeGen/SelectionDAGISel.h"
Evan Cheng739a6a42006-01-21 02:32:06 +000016#include "llvm/CodeGen/ScheduleDAG.h"
Chris Lattner2e77db62005-05-13 18:50:42 +000017#include "llvm/CallingConv.h"
Chris Lattner7a60d912005-01-07 07:47:53 +000018#include "llvm/Constants.h"
19#include "llvm/DerivedTypes.h"
20#include "llvm/Function.h"
Chris Lattner435b4022005-11-29 06:21:05 +000021#include "llvm/GlobalVariable.h"
Chris Lattner476e67b2006-01-26 22:24:51 +000022#include "llvm/InlineAsm.h"
Chris Lattner7a60d912005-01-07 07:47:53 +000023#include "llvm/Instructions.h"
24#include "llvm/Intrinsics.h"
Chris Lattnerf2b62f32005-11-16 07:22:30 +000025#include "llvm/CodeGen/IntrinsicLowering.h"
Jim Laskey219d5592006-01-04 22:28:25 +000026#include "llvm/CodeGen/MachineDebugInfo.h"
Chris Lattner7a60d912005-01-07 07:47:53 +000027#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/SelectionDAG.h"
31#include "llvm/CodeGen/SSARegMap.h"
Chris Lattnerd4382f02005-09-13 19:30:54 +000032#include "llvm/Target/MRegisterInfo.h"
Chris Lattner7a60d912005-01-07 07:47:53 +000033#include "llvm/Target/TargetData.h"
34#include "llvm/Target/TargetFrameInfo.h"
35#include "llvm/Target/TargetInstrInfo.h"
36#include "llvm/Target/TargetLowering.h"
37#include "llvm/Target/TargetMachine.h"
Chris Lattnerc9950c12005-08-17 06:37:43 +000038#include "llvm/Transforms/Utils/BasicBlockUtils.h"
Chris Lattnere05a4612005-01-12 03:41:21 +000039#include "llvm/Support/CommandLine.h"
Chris Lattner43535a12005-11-09 04:45:33 +000040#include "llvm/Support/MathExtras.h"
Chris Lattner7a60d912005-01-07 07:47:53 +000041#include "llvm/Support/Debug.h"
42#include <map>
43#include <iostream>
44using namespace llvm;
45
Chris Lattner975f5c92005-09-01 18:44:10 +000046#ifndef NDEBUG
Chris Lattnere05a4612005-01-12 03:41:21 +000047static cl::opt<bool>
Evan Cheng739a6a42006-01-21 02:32:06 +000048ViewISelDAGs("view-isel-dags", cl::Hidden,
49 cl::desc("Pop up a window to show isel dags as they are selected"));
50static cl::opt<bool>
51ViewSchedDAGs("view-sched-dags", cl::Hidden,
52 cl::desc("Pop up a window to show sched dags as they are processed"));
Chris Lattnere05a4612005-01-12 03:41:21 +000053#else
Evan Cheng739a6a42006-01-21 02:32:06 +000054static const bool ViewISelDAGs = 0;
55static const bool ViewSchedDAGs = 0;
Chris Lattnere05a4612005-01-12 03:41:21 +000056#endif
57
Evan Chengc1e1d972006-01-23 07:01:07 +000058namespace {
59 cl::opt<SchedHeuristics>
60 ISHeuristic(
61 "sched",
62 cl::desc("Choose scheduling style"),
Evan Chenga6eff8a2006-01-25 09:12:57 +000063 cl::init(defaultScheduling),
Evan Chengc1e1d972006-01-23 07:01:07 +000064 cl::values(
Evan Chenga6eff8a2006-01-25 09:12:57 +000065 clEnumValN(defaultScheduling, "default",
66 "Target preferred scheduling style"),
Evan Chengc1e1d972006-01-23 07:01:07 +000067 clEnumValN(noScheduling, "none",
Jim Laskeyb8566fa2006-01-23 13:34:04 +000068 "No scheduling: breadth first sequencing"),
Evan Chengc1e1d972006-01-23 07:01:07 +000069 clEnumValN(simpleScheduling, "simple",
70 "Simple two pass scheduling: minimize critical path "
71 "and maximize processor utilization"),
72 clEnumValN(simpleNoItinScheduling, "simple-noitin",
73 "Simple two pass scheduling: Same as simple "
74 "except using generic latency"),
Evan Chenga6eff8a2006-01-25 09:12:57 +000075 clEnumValN(listSchedulingBURR, "list-burr",
Evan Cheng31272342006-01-23 08:26:10 +000076 "Bottom up register reduction list scheduling"),
Evan Chengc1e1d972006-01-23 07:01:07 +000077 clEnumValEnd));
78} // namespace
79
80
Chris Lattner7a60d912005-01-07 07:47:53 +000081namespace llvm {
82 //===--------------------------------------------------------------------===//
83 /// FunctionLoweringInfo - This contains information that is global to a
84 /// function that is used when lowering a region of the function.
Chris Lattnerd0061952005-01-08 19:52:31 +000085 class FunctionLoweringInfo {
86 public:
Chris Lattner7a60d912005-01-07 07:47:53 +000087 TargetLowering &TLI;
88 Function &Fn;
89 MachineFunction &MF;
90 SSARegMap *RegMap;
91
92 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
93
94 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
95 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
96
97 /// ValueMap - Since we emit code for the function a basic block at a time,
98 /// we must remember which virtual registers hold the values for
99 /// cross-basic-block values.
100 std::map<const Value*, unsigned> ValueMap;
101
102 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
103 /// the entry block. This allows the allocas to be efficiently referenced
104 /// anywhere in the function.
105 std::map<const AllocaInst*, int> StaticAllocaMap;
106
107 unsigned MakeReg(MVT::ValueType VT) {
108 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
109 }
Misha Brukman835702a2005-04-21 22:36:52 +0000110
Chris Lattner7a60d912005-01-07 07:47:53 +0000111 unsigned CreateRegForValue(const Value *V) {
112 MVT::ValueType VT = TLI.getValueType(V->getType());
113 // The common case is that we will only create one register for this
114 // value. If we have that case, create and return the virtual register.
115 unsigned NV = TLI.getNumElements(VT);
Chris Lattnera8d34fb2005-01-16 00:37:38 +0000116 if (NV == 1) {
117 // If we are promoting this value, pick the next largest supported type.
Chris Lattnerd58384f2005-01-16 01:11:19 +0000118 return MakeReg(TLI.getTypeToTransformTo(VT));
Chris Lattnera8d34fb2005-01-16 00:37:38 +0000119 }
Misha Brukman835702a2005-04-21 22:36:52 +0000120
Chris Lattner7a60d912005-01-07 07:47:53 +0000121 // If this value is represented with multiple target registers, make sure
122 // to create enough consequtive registers of the right (smaller) type.
123 unsigned NT = VT-1; // Find the type to use.
124 while (TLI.getNumElements((MVT::ValueType)NT) != 1)
125 --NT;
Misha Brukman835702a2005-04-21 22:36:52 +0000126
Chris Lattner7a60d912005-01-07 07:47:53 +0000127 unsigned R = MakeReg((MVT::ValueType)NT);
128 for (unsigned i = 1; i != NV; ++i)
129 MakeReg((MVT::ValueType)NT);
130 return R;
131 }
Misha Brukman835702a2005-04-21 22:36:52 +0000132
Chris Lattner7a60d912005-01-07 07:47:53 +0000133 unsigned InitializeRegForValue(const Value *V) {
134 unsigned &R = ValueMap[V];
135 assert(R == 0 && "Already initialized this value register!");
136 return R = CreateRegForValue(V);
137 }
138 };
139}
140
141/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
142/// PHI nodes or outside of the basic block that defines it.
143static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
144 if (isa<PHINode>(I)) return true;
145 BasicBlock *BB = I->getParent();
146 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
147 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
148 return true;
149 return false;
150}
151
Chris Lattner6871b232005-10-30 19:42:35 +0000152/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
153/// entry block, return true.
154static bool isOnlyUsedInEntryBlock(Argument *A) {
155 BasicBlock *Entry = A->getParent()->begin();
156 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
157 if (cast<Instruction>(*UI)->getParent() != Entry)
158 return false; // Use not in entry block.
159 return true;
160}
161
Chris Lattner7a60d912005-01-07 07:47:53 +0000162FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
Misha Brukman835702a2005-04-21 22:36:52 +0000163 Function &fn, MachineFunction &mf)
Chris Lattner7a60d912005-01-07 07:47:53 +0000164 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
165
Chris Lattner6871b232005-10-30 19:42:35 +0000166 // Create a vreg for each argument register that is not dead and is used
167 // outside of the entry block for the function.
168 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
169 AI != E; ++AI)
170 if (!isOnlyUsedInEntryBlock(AI))
171 InitializeRegForValue(AI);
172
Chris Lattner7a60d912005-01-07 07:47:53 +0000173 // Initialize the mapping of values to registers. This is only set up for
174 // instruction values that are used outside of the block that defines
175 // them.
Jeff Cohenf8a5e5ae2005-10-01 03:57:14 +0000176 Function::iterator BB = Fn.begin(), EB = Fn.end();
Chris Lattner7a60d912005-01-07 07:47:53 +0000177 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
178 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
179 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
180 const Type *Ty = AI->getAllocatedType();
181 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
Nate Begeman3ee3e692005-11-06 09:00:38 +0000182 unsigned Align =
183 std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
184 AI->getAlignment());
Chris Lattnercbefe722005-05-13 23:14:17 +0000185
186 // If the alignment of the value is smaller than the size of the value,
187 // and if the size of the value is particularly small (<= 8 bytes),
188 // round up to the size of the value for potentially better performance.
189 //
190 // FIXME: This could be made better with a preferred alignment hook in
191 // TargetData. It serves primarily to 8-byte align doubles for X86.
192 if (Align < TySize && TySize <= 8) Align = TySize;
Chris Lattner8396a302005-10-18 22:11:42 +0000193 TySize *= CUI->getValue(); // Get total allocated size.
Chris Lattner0a71a9a2005-10-18 22:14:06 +0000194 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
Chris Lattner7a60d912005-01-07 07:47:53 +0000195 StaticAllocaMap[AI] =
Chris Lattnerd0061952005-01-08 19:52:31 +0000196 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
Chris Lattner7a60d912005-01-07 07:47:53 +0000197 }
198
Jeff Cohenf8a5e5ae2005-10-01 03:57:14 +0000199 for (; BB != EB; ++BB)
200 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
Chris Lattner7a60d912005-01-07 07:47:53 +0000201 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
202 if (!isa<AllocaInst>(I) ||
203 !StaticAllocaMap.count(cast<AllocaInst>(I)))
204 InitializeRegForValue(I);
205
206 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
207 // also creates the initial PHI MachineInstrs, though none of the input
208 // operands are populated.
Jeff Cohenf8a5e5ae2005-10-01 03:57:14 +0000209 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) {
Chris Lattner7a60d912005-01-07 07:47:53 +0000210 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
211 MBBMap[BB] = MBB;
212 MF.getBasicBlockList().push_back(MBB);
213
214 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
215 // appropriate.
216 PHINode *PN;
217 for (BasicBlock::iterator I = BB->begin();
Chris Lattner8ea875f2005-01-07 21:34:19 +0000218 (PN = dyn_cast<PHINode>(I)); ++I)
219 if (!PN->use_empty()) {
220 unsigned NumElements =
221 TLI.getNumElements(TLI.getValueType(PN->getType()));
222 unsigned PHIReg = ValueMap[PN];
223 assert(PHIReg &&"PHI node does not have an assigned virtual register!");
224 for (unsigned i = 0; i != NumElements; ++i)
225 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
226 }
Chris Lattner7a60d912005-01-07 07:47:53 +0000227 }
228}
229
230
231
232//===----------------------------------------------------------------------===//
233/// SelectionDAGLowering - This is the common target-independent lowering
234/// implementation that is parameterized by a TargetLowering object.
235/// Also, targets can overload any lowering method.
236///
237namespace llvm {
238class SelectionDAGLowering {
239 MachineBasicBlock *CurMBB;
240
241 std::map<const Value*, SDOperand> NodeMap;
242
Chris Lattner4d9651c2005-01-17 22:19:26 +0000243 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
244 /// them up and then emit token factor nodes when possible. This allows us to
245 /// get simple disambiguation between loads without worrying about alias
246 /// analysis.
247 std::vector<SDOperand> PendingLoads;
248
Chris Lattner7a60d912005-01-07 07:47:53 +0000249public:
250 // TLI - This is information that describes the available target features we
251 // need for lowering. This indicates when operations are unavailable,
252 // implemented with a libcall, etc.
253 TargetLowering &TLI;
254 SelectionDAG &DAG;
255 const TargetData &TD;
256
257 /// FuncInfo - Information about the function as a whole.
258 ///
259 FunctionLoweringInfo &FuncInfo;
260
261 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
Misha Brukman835702a2005-04-21 22:36:52 +0000262 FunctionLoweringInfo &funcinfo)
Chris Lattner7a60d912005-01-07 07:47:53 +0000263 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
264 FuncInfo(funcinfo) {
265 }
266
Chris Lattner4108bb02005-01-17 19:43:36 +0000267 /// getRoot - Return the current virtual root of the Selection DAG.
268 ///
269 SDOperand getRoot() {
Chris Lattner4d9651c2005-01-17 22:19:26 +0000270 if (PendingLoads.empty())
271 return DAG.getRoot();
Misha Brukman835702a2005-04-21 22:36:52 +0000272
Chris Lattner4d9651c2005-01-17 22:19:26 +0000273 if (PendingLoads.size() == 1) {
274 SDOperand Root = PendingLoads[0];
275 DAG.setRoot(Root);
276 PendingLoads.clear();
277 return Root;
278 }
279
280 // Otherwise, we have to make a token factor node.
281 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, PendingLoads);
282 PendingLoads.clear();
283 DAG.setRoot(Root);
284 return Root;
Chris Lattner4108bb02005-01-17 19:43:36 +0000285 }
286
Chris Lattner7a60d912005-01-07 07:47:53 +0000287 void visit(Instruction &I) { visit(I.getOpcode(), I); }
288
289 void visit(unsigned Opcode, User &I) {
290 switch (Opcode) {
291 default: assert(0 && "Unknown instruction type encountered!");
292 abort();
293 // Build the switch statement using the Instruction.def file.
294#define HANDLE_INST(NUM, OPCODE, CLASS) \
295 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
296#include "llvm/Instruction.def"
297 }
298 }
299
300 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
301
302
303 SDOperand getIntPtrConstant(uint64_t Val) {
304 return DAG.getConstant(Val, TLI.getPointerTy());
305 }
306
307 SDOperand getValue(const Value *V) {
308 SDOperand &N = NodeMap[V];
309 if (N.Val) return N;
310
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000311 const Type *VTy = V->getType();
312 MVT::ValueType VT = TLI.getValueType(VTy);
Chris Lattner7a60d912005-01-07 07:47:53 +0000313 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V)))
314 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
315 visit(CE->getOpcode(), *CE);
316 assert(N.Val && "visit didn't populate the ValueMap!");
317 return N;
318 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
319 return N = DAG.getGlobalAddress(GV, VT);
320 } else if (isa<ConstantPointerNull>(C)) {
321 return N = DAG.getConstant(0, TLI.getPointerTy());
322 } else if (isa<UndefValue>(C)) {
Nate Begemanaf1c0f72005-04-12 23:12:17 +0000323 return N = DAG.getNode(ISD::UNDEF, VT);
Chris Lattner7a60d912005-01-07 07:47:53 +0000324 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
325 return N = DAG.getConstantFP(CFP->getValue(), VT);
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000326 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
327 unsigned NumElements = PTy->getNumElements();
328 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
329 MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
330
331 // Now that we know the number and type of the elements, push a
332 // Constant or ConstantFP node onto the ops list for each element of
333 // the packed constant.
334 std::vector<SDOperand> Ops;
Chris Lattner803a5752005-12-21 02:43:26 +0000335 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
336 if (MVT::isFloatingPoint(PVT)) {
337 for (unsigned i = 0; i != NumElements; ++i) {
338 const ConstantFP *El = cast<ConstantFP>(CP->getOperand(i));
339 Ops.push_back(DAG.getConstantFP(El->getValue(), PVT));
340 }
341 } else {
342 for (unsigned i = 0; i != NumElements; ++i) {
343 const ConstantIntegral *El =
344 cast<ConstantIntegral>(CP->getOperand(i));
345 Ops.push_back(DAG.getConstant(El->getRawValue(), PVT));
346 }
347 }
348 } else {
349 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
350 SDOperand Op;
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000351 if (MVT::isFloatingPoint(PVT))
Chris Lattner803a5752005-12-21 02:43:26 +0000352 Op = DAG.getConstantFP(0, PVT);
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000353 else
Chris Lattner803a5752005-12-21 02:43:26 +0000354 Op = DAG.getConstant(0, PVT);
355 Ops.assign(NumElements, Op);
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000356 }
Chris Lattner803a5752005-12-21 02:43:26 +0000357
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000358 // Handle the case where we have a 1-element vector, in which
359 // case we want to immediately turn it into a scalar constant.
Nate Begemanae89d862005-12-07 19:48:11 +0000360 if (Ops.size() == 1) {
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000361 return N = Ops[0];
Nate Begemanae89d862005-12-07 19:48:11 +0000362 } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
363 return N = DAG.getNode(ISD::ConstantVec, TVT, Ops);
364 } else {
365 // If the packed type isn't legal, then create a ConstantVec node with
366 // generic Vector type instead.
367 return N = DAG.getNode(ISD::ConstantVec, MVT::Vector, Ops);
368 }
Chris Lattner7a60d912005-01-07 07:47:53 +0000369 } else {
370 // Canonicalize all constant ints to be unsigned.
371 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
372 }
373
374 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
375 std::map<const AllocaInst*, int>::iterator SI =
376 FuncInfo.StaticAllocaMap.find(AI);
377 if (SI != FuncInfo.StaticAllocaMap.end())
378 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
379 }
380
381 std::map<const Value*, unsigned>::const_iterator VMI =
382 FuncInfo.ValueMap.find(V);
383 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
Chris Lattner209f5852005-01-16 02:23:07 +0000384
Chris Lattner33182322005-08-16 21:55:35 +0000385 unsigned InReg = VMI->second;
386
387 // If this type is not legal, make it so now.
388 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
389
390 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
391 if (DestVT < VT) {
392 // Source must be expanded. This input value is actually coming from the
393 // register pair VMI->second and VMI->second+1.
394 N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
395 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
396 } else {
397 if (DestVT > VT) { // Promotion case
398 if (MVT::isFloatingPoint(VT))
399 N = DAG.getNode(ISD::FP_ROUND, VT, N);
400 else
401 N = DAG.getNode(ISD::TRUNCATE, VT, N);
402 }
403 }
404
405 return N;
Chris Lattner7a60d912005-01-07 07:47:53 +0000406 }
407
408 const SDOperand &setValue(const Value *V, SDOperand NewN) {
409 SDOperand &N = NodeMap[V];
410 assert(N.Val == 0 && "Already set a value for this node!");
411 return N = NewN;
412 }
413
414 // Terminator instructions.
415 void visitRet(ReturnInst &I);
416 void visitBr(BranchInst &I);
417 void visitUnreachable(UnreachableInst &I) { /* noop */ }
418
419 // These all get lowered before this pass.
Robert Bocchino2c966e72006-01-10 19:04:57 +0000420 void visitExtractElement(ExtractElementInst &I) { assert(0 && "TODO"); }
Robert Bocchino03e95af2006-01-17 20:06:42 +0000421 void visitInsertElement(InsertElementInst &I) { assert(0 && "TODO"); }
Chris Lattner7a60d912005-01-07 07:47:53 +0000422 void visitSwitch(SwitchInst &I) { assert(0 && "TODO"); }
423 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
424 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
425
426 //
Nate Begemanb2e089c2005-11-19 00:36:38 +0000427 void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp);
Nate Begeman127321b2005-11-18 07:42:56 +0000428 void visitShift(User &I, unsigned Opcode);
Nate Begemanb2e089c2005-11-19 00:36:38 +0000429 void visitAdd(User &I) {
430 visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD);
Chris Lattner6f3b5772005-09-28 22:28:18 +0000431 }
Chris Lattnerf68fd0b2005-04-02 05:04:50 +0000432 void visitSub(User &I);
Nate Begemanb2e089c2005-11-19 00:36:38 +0000433 void visitMul(User &I) {
434 visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL);
Chris Lattner6f3b5772005-09-28 22:28:18 +0000435 }
Chris Lattner7a60d912005-01-07 07:47:53 +0000436 void visitDiv(User &I) {
Chris Lattner6f3b5772005-09-28 22:28:18 +0000437 const Type *Ty = I.getType();
Nate Begemanb2e089c2005-11-19 00:36:38 +0000438 visitBinary(I, Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV, 0);
Chris Lattner7a60d912005-01-07 07:47:53 +0000439 }
440 void visitRem(User &I) {
Chris Lattner6f3b5772005-09-28 22:28:18 +0000441 const Type *Ty = I.getType();
Nate Begemanb2e089c2005-11-19 00:36:38 +0000442 visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0);
Chris Lattner7a60d912005-01-07 07:47:53 +0000443 }
Nate Begemanb2e089c2005-11-19 00:36:38 +0000444 void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, 0); }
445 void visitOr (User &I) { visitBinary(I, ISD::OR, 0, 0); }
446 void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, 0); }
Nate Begeman127321b2005-11-18 07:42:56 +0000447 void visitShl(User &I) { visitShift(I, ISD::SHL); }
448 void visitShr(User &I) {
449 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
Chris Lattner7a60d912005-01-07 07:47:53 +0000450 }
451
452 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc);
453 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); }
454 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); }
455 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); }
456 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); }
457 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
458 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
459
460 void visitGetElementPtr(User &I);
461 void visitCast(User &I);
462 void visitSelect(User &I);
463 //
464
465 void visitMalloc(MallocInst &I);
466 void visitFree(FreeInst &I);
467 void visitAlloca(AllocaInst &I);
468 void visitLoad(LoadInst &I);
469 void visitStore(StoreInst &I);
470 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
471 void visitCall(CallInst &I);
Chris Lattner476e67b2006-01-26 22:24:51 +0000472 void visitInlineAsm(CallInst &I);
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000473 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
Chris Lattner7a60d912005-01-07 07:47:53 +0000474
Chris Lattner7a60d912005-01-07 07:47:53 +0000475 void visitVAStart(CallInst &I);
Chris Lattner7a60d912005-01-07 07:47:53 +0000476 void visitVAArg(VAArgInst &I);
477 void visitVAEnd(CallInst &I);
478 void visitVACopy(CallInst &I);
Chris Lattner58cfd792005-01-09 00:00:49 +0000479 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
Chris Lattner7a60d912005-01-07 07:47:53 +0000480
Chris Lattner875def92005-01-11 05:56:49 +0000481 void visitMemIntrinsic(CallInst &I, unsigned Op);
Chris Lattner7a60d912005-01-07 07:47:53 +0000482
483 void visitUserOp1(Instruction &I) {
484 assert(0 && "UserOp1 should not exist at instruction selection time!");
485 abort();
486 }
487 void visitUserOp2(Instruction &I) {
488 assert(0 && "UserOp2 should not exist at instruction selection time!");
489 abort();
490 }
491};
492} // end namespace llvm
493
494void SelectionDAGLowering::visitRet(ReturnInst &I) {
495 if (I.getNumOperands() == 0) {
Chris Lattner4108bb02005-01-17 19:43:36 +0000496 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
Chris Lattner7a60d912005-01-07 07:47:53 +0000497 return;
498 }
Nate Begeman8c47c3a2006-01-27 21:09:22 +0000499 std::vector<SDOperand> NewValues;
500 NewValues.push_back(getRoot());
501 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
502 SDOperand RetOp = getValue(I.getOperand(i));
503
504 // If this is an integer return value, we need to promote it ourselves to
505 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather
506 // than sign/zero.
507 if (MVT::isInteger(RetOp.getValueType()) &&
508 RetOp.getValueType() < MVT::i64) {
509 MVT::ValueType TmpVT;
510 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
511 TmpVT = TLI.getTypeToTransformTo(MVT::i32);
512 else
513 TmpVT = MVT::i32;
Chris Lattner7a60d912005-01-07 07:47:53 +0000514
Nate Begeman8c47c3a2006-01-27 21:09:22 +0000515 if (I.getOperand(i)->getType()->isSigned())
516 RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp);
517 else
518 RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp);
519 }
520 NewValues.push_back(RetOp);
Chris Lattner7a60d912005-01-07 07:47:53 +0000521 }
Nate Begeman8c47c3a2006-01-27 21:09:22 +0000522 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, NewValues));
Chris Lattner7a60d912005-01-07 07:47:53 +0000523}
524
525void SelectionDAGLowering::visitBr(BranchInst &I) {
526 // Update machine-CFG edges.
527 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
Chris Lattner7a60d912005-01-07 07:47:53 +0000528
529 // Figure out which block is immediately after the current one.
530 MachineBasicBlock *NextBlock = 0;
531 MachineFunction::iterator BBI = CurMBB;
532 if (++BBI != CurMBB->getParent()->end())
533 NextBlock = BBI;
534
535 if (I.isUnconditional()) {
536 // If this is not a fall-through branch, emit the branch.
537 if (Succ0MBB != NextBlock)
Chris Lattner4108bb02005-01-17 19:43:36 +0000538 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
Misha Brukman77451162005-04-22 04:01:18 +0000539 DAG.getBasicBlock(Succ0MBB)));
Chris Lattner7a60d912005-01-07 07:47:53 +0000540 } else {
541 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
Chris Lattner7a60d912005-01-07 07:47:53 +0000542
543 SDOperand Cond = getValue(I.getCondition());
Chris Lattner7a60d912005-01-07 07:47:53 +0000544 if (Succ1MBB == NextBlock) {
545 // If the condition is false, fall through. This means we should branch
546 // if the condition is true to Succ #0.
Chris Lattner4108bb02005-01-17 19:43:36 +0000547 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
Misha Brukman77451162005-04-22 04:01:18 +0000548 Cond, DAG.getBasicBlock(Succ0MBB)));
Chris Lattner7a60d912005-01-07 07:47:53 +0000549 } else if (Succ0MBB == NextBlock) {
550 // If the condition is true, fall through. This means we should branch if
551 // the condition is false to Succ #1. Invert the condition first.
552 SDOperand True = DAG.getConstant(1, Cond.getValueType());
553 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
Chris Lattner4108bb02005-01-17 19:43:36 +0000554 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
Misha Brukman77451162005-04-22 04:01:18 +0000555 Cond, DAG.getBasicBlock(Succ1MBB)));
Chris Lattner7a60d912005-01-07 07:47:53 +0000556 } else {
Chris Lattner8a98c7f2005-04-09 03:30:29 +0000557 std::vector<SDOperand> Ops;
558 Ops.push_back(getRoot());
559 Ops.push_back(Cond);
560 Ops.push_back(DAG.getBasicBlock(Succ0MBB));
561 Ops.push_back(DAG.getBasicBlock(Succ1MBB));
562 DAG.setRoot(DAG.getNode(ISD::BRCONDTWOWAY, MVT::Other, Ops));
Chris Lattner7a60d912005-01-07 07:47:53 +0000563 }
564 }
565}
566
Chris Lattnerf68fd0b2005-04-02 05:04:50 +0000567void SelectionDAGLowering::visitSub(User &I) {
568 // -0.0 - X --> fneg
Chris Lattner6f3b5772005-09-28 22:28:18 +0000569 if (I.getType()->isFloatingPoint()) {
570 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
571 if (CFP->isExactlyValue(-0.0)) {
572 SDOperand Op2 = getValue(I.getOperand(1));
573 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
574 return;
575 }
Chris Lattner6f3b5772005-09-28 22:28:18 +0000576 }
Nate Begemanb2e089c2005-11-19 00:36:38 +0000577 visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB);
Chris Lattnerf68fd0b2005-04-02 05:04:50 +0000578}
579
Nate Begemanb2e089c2005-11-19 00:36:38 +0000580void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp,
581 unsigned VecOp) {
582 const Type *Ty = I.getType();
Chris Lattner7a60d912005-01-07 07:47:53 +0000583 SDOperand Op1 = getValue(I.getOperand(0));
584 SDOperand Op2 = getValue(I.getOperand(1));
Chris Lattner96c26752005-01-19 22:31:21 +0000585
Chris Lattner19baba62005-11-19 18:40:42 +0000586 if (Ty->isIntegral()) {
Nate Begemanb2e089c2005-11-19 00:36:38 +0000587 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
588 } else if (Ty->isFloatingPoint()) {
589 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
590 } else {
591 const PackedType *PTy = cast<PackedType>(Ty);
Nate Begeman07890bb2005-11-22 01:29:36 +0000592 unsigned NumElements = PTy->getNumElements();
593 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
Nate Begeman1064d6e2005-11-30 08:22:07 +0000594 MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
Nate Begeman07890bb2005-11-22 01:29:36 +0000595
596 // Immediately scalarize packed types containing only one element, so that
Nate Begeman1064d6e2005-11-30 08:22:07 +0000597 // the Legalize pass does not have to deal with them. Similarly, if the
598 // abstract vector is going to turn into one that the target natively
599 // supports, generate that type now so that Legalize doesn't have to deal
600 // with that either. These steps ensure that Legalize only has to handle
601 // vector types in its Expand case.
602 unsigned Opc = MVT::isFloatingPoint(PVT) ? FPOp : IntOp;
Nate Begeman07890bb2005-11-22 01:29:36 +0000603 if (NumElements == 1) {
Nate Begeman07890bb2005-11-22 01:29:36 +0000604 setValue(&I, DAG.getNode(Opc, PVT, Op1, Op2));
Nate Begeman1064d6e2005-11-30 08:22:07 +0000605 } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
606 setValue(&I, DAG.getNode(Opc, TVT, Op1, Op2));
Nate Begeman07890bb2005-11-22 01:29:36 +0000607 } else {
608 SDOperand Num = DAG.getConstant(NumElements, MVT::i32);
609 SDOperand Typ = DAG.getValueType(PVT);
Nate Begemand37c1312005-11-22 18:16:00 +0000610 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
Nate Begeman07890bb2005-11-22 01:29:36 +0000611 }
Nate Begemanb2e089c2005-11-19 00:36:38 +0000612 }
Nate Begeman127321b2005-11-18 07:42:56 +0000613}
Chris Lattner96c26752005-01-19 22:31:21 +0000614
Nate Begeman127321b2005-11-18 07:42:56 +0000615void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
616 SDOperand Op1 = getValue(I.getOperand(0));
617 SDOperand Op2 = getValue(I.getOperand(1));
618
619 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
620
Chris Lattner7a60d912005-01-07 07:47:53 +0000621 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
622}
623
624void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
625 ISD::CondCode UnsignedOpcode) {
626 SDOperand Op1 = getValue(I.getOperand(0));
627 SDOperand Op2 = getValue(I.getOperand(1));
628 ISD::CondCode Opcode = SignedOpcode;
629 if (I.getOperand(0)->getType()->isUnsigned())
630 Opcode = UnsignedOpcode;
Chris Lattnerd47675e2005-08-09 20:20:18 +0000631 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
Chris Lattner7a60d912005-01-07 07:47:53 +0000632}
633
634void SelectionDAGLowering::visitSelect(User &I) {
635 SDOperand Cond = getValue(I.getOperand(0));
636 SDOperand TrueVal = getValue(I.getOperand(1));
637 SDOperand FalseVal = getValue(I.getOperand(2));
638 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
639 TrueVal, FalseVal));
640}
641
642void SelectionDAGLowering::visitCast(User &I) {
643 SDOperand N = getValue(I.getOperand(0));
644 MVT::ValueType SrcTy = TLI.getValueType(I.getOperand(0)->getType());
645 MVT::ValueType DestTy = TLI.getValueType(I.getType());
646
647 if (N.getValueType() == DestTy) {
648 setValue(&I, N); // noop cast.
Chris Lattner2d8b55c2005-05-09 22:17:13 +0000649 } else if (DestTy == MVT::i1) {
650 // Cast to bool is a comparison against zero, not truncation to zero.
651 SDOperand Zero = isInteger(SrcTy) ? DAG.getConstant(0, N.getValueType()) :
652 DAG.getConstantFP(0.0, N.getValueType());
Chris Lattnerd47675e2005-08-09 20:20:18 +0000653 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
Chris Lattner2a6db3c2005-01-08 08:08:56 +0000654 } else if (isInteger(SrcTy)) {
655 if (isInteger(DestTy)) { // Int -> Int cast
656 if (DestTy < SrcTy) // Truncating cast?
657 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestTy, N));
658 else if (I.getOperand(0)->getType()->isSigned())
659 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestTy, N));
660 else
661 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestTy, N));
662 } else { // Int -> FP cast
663 if (I.getOperand(0)->getType()->isSigned())
664 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestTy, N));
665 else
666 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestTy, N));
667 }
Chris Lattner7a60d912005-01-07 07:47:53 +0000668 } else {
Chris Lattner2a6db3c2005-01-08 08:08:56 +0000669 assert(isFloatingPoint(SrcTy) && "Unknown value type!");
670 if (isFloatingPoint(DestTy)) { // FP -> FP cast
671 if (DestTy < SrcTy) // Rounding cast?
672 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestTy, N));
673 else
674 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestTy, N));
675 } else { // FP -> Int cast.
676 if (I.getType()->isSigned())
677 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestTy, N));
678 else
679 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestTy, N));
680 }
Chris Lattner7a60d912005-01-07 07:47:53 +0000681 }
682}
683
684void SelectionDAGLowering::visitGetElementPtr(User &I) {
685 SDOperand N = getValue(I.getOperand(0));
686 const Type *Ty = I.getOperand(0)->getType();
687 const Type *UIntPtrTy = TD.getIntPtrType();
688
689 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
690 OI != E; ++OI) {
691 Value *Idx = *OI;
Chris Lattner35397782005-12-05 07:10:48 +0000692 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
Chris Lattner7a60d912005-01-07 07:47:53 +0000693 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
694 if (Field) {
695 // N = N + Offset
696 uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
697 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
Misha Brukman77451162005-04-22 04:01:18 +0000698 getIntPtrConstant(Offset));
Chris Lattner7a60d912005-01-07 07:47:53 +0000699 }
700 Ty = StTy->getElementType(Field);
701 } else {
702 Ty = cast<SequentialType>(Ty)->getElementType();
Chris Lattner19a83992005-01-07 21:56:57 +0000703
Chris Lattner43535a12005-11-09 04:45:33 +0000704 // If this is a constant subscript, handle it quickly.
705 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
706 if (CI->getRawValue() == 0) continue;
Chris Lattner19a83992005-01-07 21:56:57 +0000707
Chris Lattner43535a12005-11-09 04:45:33 +0000708 uint64_t Offs;
709 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
710 Offs = (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
711 else
712 Offs = TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
713 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
714 continue;
Chris Lattner7a60d912005-01-07 07:47:53 +0000715 }
Chris Lattner43535a12005-11-09 04:45:33 +0000716
717 // N = N + Idx * ElementSize;
718 uint64_t ElementSize = TD.getTypeSize(Ty);
719 SDOperand IdxN = getValue(Idx);
720
721 // If the index is smaller or larger than intptr_t, truncate or extend
722 // it.
723 if (IdxN.getValueType() < N.getValueType()) {
724 if (Idx->getType()->isSigned())
725 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
726 else
727 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN);
728 } else if (IdxN.getValueType() > N.getValueType())
729 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
730
731 // If this is a multiply by a power of two, turn it into a shl
732 // immediately. This is a very common case.
733 if (isPowerOf2_64(ElementSize)) {
734 unsigned Amt = Log2_64(ElementSize);
735 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
Chris Lattner41fd6d52005-11-09 16:50:40 +0000736 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
Chris Lattner43535a12005-11-09 04:45:33 +0000737 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
738 continue;
739 }
740
741 SDOperand Scale = getIntPtrConstant(ElementSize);
742 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
743 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
Chris Lattner7a60d912005-01-07 07:47:53 +0000744 }
745 }
746 setValue(&I, N);
747}
748
749void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
750 // If this is a fixed sized alloca in the entry block of the function,
751 // allocate it statically on the stack.
752 if (FuncInfo.StaticAllocaMap.count(&I))
753 return; // getValue will auto-populate this.
754
755 const Type *Ty = I.getAllocatedType();
756 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
Nate Begeman3ee3e692005-11-06 09:00:38 +0000757 unsigned Align = std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
758 I.getAlignment());
Chris Lattner7a60d912005-01-07 07:47:53 +0000759
760 SDOperand AllocSize = getValue(I.getArraySize());
Chris Lattnereccb73d2005-01-22 23:04:37 +0000761 MVT::ValueType IntPtr = TLI.getPointerTy();
762 if (IntPtr < AllocSize.getValueType())
763 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
764 else if (IntPtr > AllocSize.getValueType())
765 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
Chris Lattner7a60d912005-01-07 07:47:53 +0000766
Chris Lattnereccb73d2005-01-22 23:04:37 +0000767 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
Chris Lattner7a60d912005-01-07 07:47:53 +0000768 getIntPtrConstant(TySize));
769
770 // Handle alignment. If the requested alignment is less than or equal to the
771 // stack alignment, ignore it and round the size of the allocation up to the
772 // stack alignment size. If the size is greater than the stack alignment, we
773 // note this in the DYNAMIC_STACKALLOC node.
774 unsigned StackAlign =
775 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
776 if (Align <= StackAlign) {
777 Align = 0;
778 // Add SA-1 to the size.
779 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
780 getIntPtrConstant(StackAlign-1));
781 // Mask out the low bits for alignment purposes.
782 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
783 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
784 }
785
Chris Lattner96c262e2005-05-14 07:29:57 +0000786 std::vector<MVT::ValueType> VTs;
787 VTs.push_back(AllocSize.getValueType());
788 VTs.push_back(MVT::Other);
789 std::vector<SDOperand> Ops;
790 Ops.push_back(getRoot());
791 Ops.push_back(AllocSize);
792 Ops.push_back(getIntPtrConstant(Align));
793 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, Ops);
Chris Lattner7a60d912005-01-07 07:47:53 +0000794 DAG.setRoot(setValue(&I, DSA).getValue(1));
795
796 // Inform the Frame Information that we have just allocated a variable-sized
797 // object.
798 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
799}
800
Chris Lattner435b4022005-11-29 06:21:05 +0000801/// getStringValue - Turn an LLVM constant pointer that eventually points to a
802/// global into a string value. Return an empty string if we can't do it.
803///
804static std::string getStringValue(Value *V, unsigned Offset = 0) {
805 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
806 if (GV->hasInitializer() && isa<ConstantArray>(GV->getInitializer())) {
807 ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
808 if (Init->isString()) {
809 std::string Result = Init->getAsString();
810 if (Offset < Result.size()) {
811 // If we are pointing INTO The string, erase the beginning...
812 Result.erase(Result.begin(), Result.begin()+Offset);
813
814 // Take off the null terminator, and any string fragments after it.
815 std::string::size_type NullPos = Result.find_first_of((char)0);
816 if (NullPos != std::string::npos)
817 Result.erase(Result.begin()+NullPos, Result.end());
818 return Result;
819 }
820 }
821 }
822 } else if (Constant *C = dyn_cast<Constant>(V)) {
823 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
824 return getStringValue(GV, Offset);
825 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
826 if (CE->getOpcode() == Instruction::GetElementPtr) {
827 // Turn a gep into the specified offset.
828 if (CE->getNumOperands() == 3 &&
829 cast<Constant>(CE->getOperand(1))->isNullValue() &&
830 isa<ConstantInt>(CE->getOperand(2))) {
831 return getStringValue(CE->getOperand(0),
832 Offset+cast<ConstantInt>(CE->getOperand(2))->getRawValue());
833 }
834 }
835 }
836 }
837 return "";
838}
Chris Lattner7a60d912005-01-07 07:47:53 +0000839
840void SelectionDAGLowering::visitLoad(LoadInst &I) {
841 SDOperand Ptr = getValue(I.getOperand(0));
Misha Brukman835702a2005-04-21 22:36:52 +0000842
Chris Lattner4d9651c2005-01-17 22:19:26 +0000843 SDOperand Root;
844 if (I.isVolatile())
845 Root = getRoot();
846 else {
847 // Do not serialize non-volatile loads against each other.
848 Root = DAG.getRoot();
849 }
Nate Begemanb2e089c2005-11-19 00:36:38 +0000850
851 const Type *Ty = I.getType();
852 SDOperand L;
853
Nate Begeman41b1cdc2005-12-06 06:18:55 +0000854 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
Nate Begeman07890bb2005-11-22 01:29:36 +0000855 unsigned NumElements = PTy->getNumElements();
856 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
Nate Begeman1064d6e2005-11-30 08:22:07 +0000857 MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
Nate Begeman07890bb2005-11-22 01:29:36 +0000858
859 // Immediately scalarize packed types containing only one element, so that
860 // the Legalize pass does not have to deal with them.
861 if (NumElements == 1) {
862 L = DAG.getLoad(PVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
Nate Begeman1064d6e2005-11-30 08:22:07 +0000863 } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
864 L = DAG.getLoad(TVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
Nate Begeman07890bb2005-11-22 01:29:36 +0000865 } else {
866 L = DAG.getVecLoad(NumElements, PVT, Root, Ptr,
867 DAG.getSrcValue(I.getOperand(0)));
868 }
Nate Begemanb2e089c2005-11-19 00:36:38 +0000869 } else {
870 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr,
871 DAG.getSrcValue(I.getOperand(0)));
872 }
Chris Lattner4d9651c2005-01-17 22:19:26 +0000873 setValue(&I, L);
874
875 if (I.isVolatile())
876 DAG.setRoot(L.getValue(1));
877 else
878 PendingLoads.push_back(L.getValue(1));
Chris Lattner7a60d912005-01-07 07:47:53 +0000879}
880
881
882void SelectionDAGLowering::visitStore(StoreInst &I) {
883 Value *SrcV = I.getOperand(0);
884 SDOperand Src = getValue(SrcV);
885 SDOperand Ptr = getValue(I.getOperand(1));
Chris Lattnerf5675a02005-05-09 04:08:33 +0000886 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
Andrew Lenharth2edc1882005-06-29 18:54:02 +0000887 DAG.getSrcValue(I.getOperand(1))));
Chris Lattner7a60d912005-01-07 07:47:53 +0000888}
889
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000890/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
891/// we want to emit this as a call to a named external function, return the name
892/// otherwise lower it and return null.
893const char *
894SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
895 switch (Intrinsic) {
896 case Intrinsic::vastart: visitVAStart(I); return 0;
897 case Intrinsic::vaend: visitVAEnd(I); return 0;
898 case Intrinsic::vacopy: visitVACopy(I); return 0;
899 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
900 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0;
901 case Intrinsic::setjmp:
902 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
903 break;
904 case Intrinsic::longjmp:
905 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
906 break;
907 case Intrinsic::memcpy: visitMemIntrinsic(I, ISD::MEMCPY); return 0;
908 case Intrinsic::memset: visitMemIntrinsic(I, ISD::MEMSET); return 0;
909 case Intrinsic::memmove: visitMemIntrinsic(I, ISD::MEMMOVE); return 0;
910
911 case Intrinsic::readport:
912 case Intrinsic::readio: {
913 std::vector<MVT::ValueType> VTs;
914 VTs.push_back(TLI.getValueType(I.getType()));
915 VTs.push_back(MVT::Other);
916 std::vector<SDOperand> Ops;
917 Ops.push_back(getRoot());
918 Ops.push_back(getValue(I.getOperand(1)));
919 SDOperand Tmp = DAG.getNode(Intrinsic == Intrinsic::readport ?
920 ISD::READPORT : ISD::READIO, VTs, Ops);
921
922 setValue(&I, Tmp);
923 DAG.setRoot(Tmp.getValue(1));
924 return 0;
925 }
926 case Intrinsic::writeport:
927 case Intrinsic::writeio:
928 DAG.setRoot(DAG.getNode(Intrinsic == Intrinsic::writeport ?
929 ISD::WRITEPORT : ISD::WRITEIO, MVT::Other,
930 getRoot(), getValue(I.getOperand(1)),
931 getValue(I.getOperand(2))));
932 return 0;
Chris Lattnerf2b62f32005-11-16 07:22:30 +0000933
Chris Lattner5d4e61d2005-12-13 17:40:33 +0000934 case Intrinsic::dbg_stoppoint: {
Chris Lattnerf2b62f32005-11-16 07:22:30 +0000935 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
936 return "llvm_debugger_stop";
Chris Lattner435b4022005-11-29 06:21:05 +0000937
938 std::string fname = "<unknown>";
939 std::vector<SDOperand> Ops;
940
Chris Lattner435b4022005-11-29 06:21:05 +0000941 // Input Chain
942 Ops.push_back(getRoot());
943
944 // line number
945 Ops.push_back(getValue(I.getOperand(2)));
946
947 // column
948 Ops.push_back(getValue(I.getOperand(3)));
949
Chris Lattner5d4e61d2005-12-13 17:40:33 +0000950 // filename/working dir
951 // Pull the filename out of the the compilation unit.
952 const GlobalVariable *cunit = dyn_cast<GlobalVariable>(I.getOperand(4));
953 if (cunit && cunit->hasInitializer()) {
954 if (ConstantStruct *CS =
955 dyn_cast<ConstantStruct>(cunit->getInitializer())) {
956 if (CS->getNumOperands() > 0) {
Chris Lattner5d4e61d2005-12-13 17:40:33 +0000957 Ops.push_back(DAG.getString(getStringValue(CS->getOperand(3))));
Jim Laskey7c462762005-12-16 22:45:29 +0000958 Ops.push_back(DAG.getString(getStringValue(CS->getOperand(4))));
Chris Lattner5d4e61d2005-12-13 17:40:33 +0000959 }
960 }
961 }
962
963 if (Ops.size() == 5) // Found filename/workingdir.
964 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
Chris Lattner8782b782005-12-03 18:50:48 +0000965 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
Chris Lattnerf2b62f32005-11-16 07:22:30 +0000966 return 0;
Chris Lattner435b4022005-11-29 06:21:05 +0000967 }
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000968 case Intrinsic::dbg_region_start:
Chris Lattnerf2b62f32005-11-16 07:22:30 +0000969 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
970 return "llvm_dbg_region_start";
971 if (I.getType() != Type::VoidTy)
972 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
973 return 0;
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000974 case Intrinsic::dbg_region_end:
Chris Lattnerf2b62f32005-11-16 07:22:30 +0000975 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
976 return "llvm_dbg_region_end";
977 if (I.getType() != Type::VoidTy)
978 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
979 return 0;
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000980 case Intrinsic::dbg_func_start:
Chris Lattnerf2b62f32005-11-16 07:22:30 +0000981 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
982 return "llvm_dbg_subprogram";
983 if (I.getType() != Type::VoidTy)
984 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
985 return 0;
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000986 case Intrinsic::dbg_declare:
987 if (I.getType() != Type::VoidTy)
988 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
989 return 0;
990
Reid Spencerb4f9a6f2006-01-16 21:12:35 +0000991 case Intrinsic::isunordered_f32:
992 case Intrinsic::isunordered_f64:
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000993 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
994 getValue(I.getOperand(2)), ISD::SETUO));
995 return 0;
996
Reid Spencerb4f9a6f2006-01-16 21:12:35 +0000997 case Intrinsic::sqrt_f32:
998 case Intrinsic::sqrt_f64:
Chris Lattnercd6f0f42005-11-09 19:44:01 +0000999 setValue(&I, DAG.getNode(ISD::FSQRT,
1000 getValue(I.getOperand(1)).getValueType(),
1001 getValue(I.getOperand(1))));
1002 return 0;
1003 case Intrinsic::pcmarker: {
1004 SDOperand Tmp = getValue(I.getOperand(1));
1005 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
1006 return 0;
1007 }
Andrew Lenharthde1b5d62005-11-11 22:48:54 +00001008 case Intrinsic::readcyclecounter: {
1009 std::vector<MVT::ValueType> VTs;
1010 VTs.push_back(MVT::i64);
1011 VTs.push_back(MVT::Other);
1012 std::vector<SDOperand> Ops;
1013 Ops.push_back(getRoot());
1014 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, VTs, Ops);
1015 setValue(&I, Tmp);
1016 DAG.setRoot(Tmp.getValue(1));
Andrew Lenharth01aa5632005-11-11 16:47:30 +00001017 return 0;
Andrew Lenharthde1b5d62005-11-11 22:48:54 +00001018 }
Nate Begeman2fba8a32006-01-14 03:14:10 +00001019 case Intrinsic::bswap_i16:
Nate Begeman2fba8a32006-01-14 03:14:10 +00001020 case Intrinsic::bswap_i32:
Nate Begeman2fba8a32006-01-14 03:14:10 +00001021 case Intrinsic::bswap_i64:
1022 setValue(&I, DAG.getNode(ISD::BSWAP,
1023 getValue(I.getOperand(1)).getValueType(),
1024 getValue(I.getOperand(1))));
1025 return 0;
Reid Spencerb4f9a6f2006-01-16 21:12:35 +00001026 case Intrinsic::cttz_i8:
1027 case Intrinsic::cttz_i16:
1028 case Intrinsic::cttz_i32:
1029 case Intrinsic::cttz_i64:
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001030 setValue(&I, DAG.getNode(ISD::CTTZ,
1031 getValue(I.getOperand(1)).getValueType(),
1032 getValue(I.getOperand(1))));
1033 return 0;
Reid Spencerb4f9a6f2006-01-16 21:12:35 +00001034 case Intrinsic::ctlz_i8:
1035 case Intrinsic::ctlz_i16:
1036 case Intrinsic::ctlz_i32:
1037 case Intrinsic::ctlz_i64:
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001038 setValue(&I, DAG.getNode(ISD::CTLZ,
1039 getValue(I.getOperand(1)).getValueType(),
1040 getValue(I.getOperand(1))));
1041 return 0;
Reid Spencerb4f9a6f2006-01-16 21:12:35 +00001042 case Intrinsic::ctpop_i8:
1043 case Intrinsic::ctpop_i16:
1044 case Intrinsic::ctpop_i32:
1045 case Intrinsic::ctpop_i64:
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001046 setValue(&I, DAG.getNode(ISD::CTPOP,
1047 getValue(I.getOperand(1)).getValueType(),
1048 getValue(I.getOperand(1))));
1049 return 0;
Chris Lattnerb3266452006-01-13 02:50:02 +00001050 case Intrinsic::stacksave: {
1051 std::vector<MVT::ValueType> VTs;
1052 VTs.push_back(TLI.getPointerTy());
1053 VTs.push_back(MVT::Other);
1054 std::vector<SDOperand> Ops;
1055 Ops.push_back(getRoot());
1056 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, VTs, Ops);
1057 setValue(&I, Tmp);
1058 DAG.setRoot(Tmp.getValue(1));
1059 return 0;
1060 }
Chris Lattnerdeda32a2006-01-23 05:22:07 +00001061 case Intrinsic::stackrestore: {
1062 SDOperand Tmp = getValue(I.getOperand(1));
1063 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
Chris Lattnerb3266452006-01-13 02:50:02 +00001064 return 0;
Chris Lattnerdeda32a2006-01-23 05:22:07 +00001065 }
Chris Lattner9e8b6332005-12-12 22:51:16 +00001066 case Intrinsic::prefetch:
1067 // FIXME: Currently discarding prefetches.
1068 return 0;
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001069 default:
1070 std::cerr << I;
1071 assert(0 && "This intrinsic is not implemented yet!");
1072 return 0;
1073 }
1074}
1075
1076
Chris Lattner7a60d912005-01-07 07:47:53 +00001077void SelectionDAGLowering::visitCall(CallInst &I) {
Chris Lattner18d2b342005-01-08 22:48:57 +00001078 const char *RenameFn = 0;
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001079 if (Function *F = I.getCalledFunction()) {
Chris Lattner0c140002005-04-02 05:26:53 +00001080 if (F->isExternal())
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001081 if (unsigned IID = F->getIntrinsicID()) {
1082 RenameFn = visitIntrinsicCall(I, IID);
1083 if (!RenameFn)
1084 return;
1085 } else { // Not an LLVM intrinsic.
1086 const std::string &Name = F->getName();
1087 if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
Chris Lattner0c140002005-04-02 05:26:53 +00001088 if (I.getNumOperands() == 2 && // Basic sanity checks.
1089 I.getOperand(1)->getType()->isFloatingPoint() &&
1090 I.getType() == I.getOperand(1)->getType()) {
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001091 SDOperand Tmp = getValue(I.getOperand(1));
Chris Lattner0c140002005-04-02 05:26:53 +00001092 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
1093 return;
1094 }
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001095 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) {
Chris Lattner80026402005-04-30 04:43:14 +00001096 if (I.getNumOperands() == 2 && // Basic sanity checks.
1097 I.getOperand(1)->getType()->isFloatingPoint() &&
Chris Lattnere2ee1902006-01-18 21:50:14 +00001098 I.getType() == I.getOperand(1)->getType() &&
1099 TLI.isOperationLegal(ISD::FSIN,
1100 TLI.getValueType(I.getOperand(1)->getType()))) {
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001101 SDOperand Tmp = getValue(I.getOperand(1));
Chris Lattner80026402005-04-30 04:43:14 +00001102 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
1103 return;
1104 }
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001105 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) {
Chris Lattner80026402005-04-30 04:43:14 +00001106 if (I.getNumOperands() == 2 && // Basic sanity checks.
1107 I.getOperand(1)->getType()->isFloatingPoint() &&
Chris Lattnere2ee1902006-01-18 21:50:14 +00001108 I.getType() == I.getOperand(1)->getType() &&
1109 TLI.isOperationLegal(ISD::FCOS,
1110 TLI.getValueType(I.getOperand(1)->getType()))) {
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001111 SDOperand Tmp = getValue(I.getOperand(1));
Chris Lattner80026402005-04-30 04:43:14 +00001112 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
1113 return;
1114 }
1115 }
Chris Lattnere4f71d02005-05-14 13:56:55 +00001116 }
Chris Lattner476e67b2006-01-26 22:24:51 +00001117 } else if (isa<InlineAsm>(I.getOperand(0))) {
1118 visitInlineAsm(I);
1119 return;
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001120 }
Misha Brukman835702a2005-04-21 22:36:52 +00001121
Chris Lattner18d2b342005-01-08 22:48:57 +00001122 SDOperand Callee;
1123 if (!RenameFn)
1124 Callee = getValue(I.getOperand(0));
1125 else
1126 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
Chris Lattner7a60d912005-01-07 07:47:53 +00001127 std::vector<std::pair<SDOperand, const Type*> > Args;
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001128 Args.reserve(I.getNumOperands());
Chris Lattner7a60d912005-01-07 07:47:53 +00001129 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1130 Value *Arg = I.getOperand(i);
1131 SDOperand ArgNode = getValue(Arg);
1132 Args.push_back(std::make_pair(ArgNode, Arg->getType()));
1133 }
Misha Brukman835702a2005-04-21 22:36:52 +00001134
Nate Begemanf6565252005-03-26 01:29:23 +00001135 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
1136 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
Misha Brukman835702a2005-04-21 22:36:52 +00001137
Chris Lattner1f45cd72005-01-08 19:26:18 +00001138 std::pair<SDOperand,SDOperand> Result =
Chris Lattner111778e2005-05-12 19:56:57 +00001139 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(),
Chris Lattner2e77db62005-05-13 18:50:42 +00001140 I.isTailCall(), Callee, Args, DAG);
Chris Lattner7a60d912005-01-07 07:47:53 +00001141 if (I.getType() != Type::VoidTy)
Chris Lattner1f45cd72005-01-08 19:26:18 +00001142 setValue(&I, Result.first);
1143 DAG.setRoot(Result.second);
Chris Lattner7a60d912005-01-07 07:47:53 +00001144}
1145
Chris Lattner476e67b2006-01-26 22:24:51 +00001146/// visitInlineAsm - Handle a call to an InlineAsm object.
1147///
1148void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
1149 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0));
1150
1151 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
1152 MVT::Other);
1153
1154 // Note, we treat inline asms both with and without side-effects as the same.
1155 // If an inline asm doesn't have side effects and doesn't access memory, we
1156 // could not choose to not chain it.
1157 bool hasSideEffects = IA->hasSideEffects();
1158
Chris Lattner3a5ed552006-02-01 01:28:23 +00001159 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints();
Chris Lattner476e67b2006-01-26 22:24:51 +00001160
1161 /// AsmNodeOperands - A list of pairs. The first element is a register, the
1162 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set
1163 /// if it is a def of that register.
1164 std::vector<SDOperand> AsmNodeOperands;
1165 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain
1166 AsmNodeOperands.push_back(AsmStr);
1167
1168 SDOperand Chain = getRoot();
1169 SDOperand Flag;
1170
Chris Lattner2e56e892006-01-31 02:03:41 +00001171 // Loop over all of the inputs, copying the operand values into the
1172 // appropriate registers and processing the output regs.
1173 unsigned RetValReg = 0;
1174 std::vector<std::pair<unsigned, Value*> > IndirectStoresToEmit;
1175 unsigned OpNum = 1;
1176 bool FoundOutputConstraint = false;
Chris Lattner3a5ed552006-02-01 01:28:23 +00001177 //std::set<unsigned> OutputRegs;
1178 //std::set<unsigned> InputRegs;
1179
Chris Lattner2e56e892006-01-31 02:03:41 +00001180 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
Chris Lattner3a5ed552006-02-01 01:28:23 +00001181 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
1182 std::string &ConstraintCode = Constraints[i].Codes[0];
1183 switch (Constraints[i].Type) {
1184 case InlineAsm::isOutput: {
1185 bool isEarlyClobber = Constraints[i].isEarlyClobber;
1186
Chris Lattner2e56e892006-01-31 02:03:41 +00001187 // Copy the output from the appropriate register.
1188 std::vector<unsigned> Regs =
Chris Lattner3a5ed552006-02-01 01:28:23 +00001189 TLI.getRegForInlineAsmConstraint(ConstraintCode);
Chris Lattner2e56e892006-01-31 02:03:41 +00001190 assert(Regs.size() == 1 && "Only handle simple regs right now!");
Chris Lattner3a5ed552006-02-01 01:28:23 +00001191 unsigned DestReg = Regs[0];
1192
1193 const Type *OpTy;
1194 if (!Constraints[i].isIndirectOutput) {
1195 assert(!FoundOutputConstraint &&
1196 "Cannot have multiple output constraints yet!");
1197 FoundOutputConstraint = true;
1198 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
1199
1200 RetValReg = DestReg;
1201 OpTy = I.getType();
1202 } else {
1203 IndirectStoresToEmit.push_back(std::make_pair(DestReg,
1204 I.getOperand(OpNum)));
1205 OpTy = I.getOperand(OpNum)->getType();
1206 OpTy = cast<PointerType>(OpTy)->getElementType();
1207 OpNum++; // Consumes a call operand.
1208 }
Chris Lattner2e56e892006-01-31 02:03:41 +00001209
1210 // Add information to the INLINEASM node to know that this register is
1211 // set.
Chris Lattner3a5ed552006-02-01 01:28:23 +00001212 AsmNodeOperands.push_back(DAG.getRegister(DestReg,
1213 TLI.getValueType(OpTy)));
Chris Lattner2e56e892006-01-31 02:03:41 +00001214 AsmNodeOperands.push_back(DAG.getConstant(2, MVT::i32)); // ISDEF
Chris Lattner2e56e892006-01-31 02:03:41 +00001215
Chris Lattner2e56e892006-01-31 02:03:41 +00001216 break;
1217 }
1218 case InlineAsm::isInput: {
Chris Lattner3a5ed552006-02-01 01:28:23 +00001219 Value *Operand = I.getOperand(OpNum);
1220 const Type *OpTy = Operand->getType();
1221
Chris Lattner2e56e892006-01-31 02:03:41 +00001222 // Copy the input into the appropriate register.
1223 std::vector<unsigned> Regs =
Chris Lattner3a5ed552006-02-01 01:28:23 +00001224 TLI.getRegForInlineAsmConstraint(ConstraintCode);
Chris Lattner2e56e892006-01-31 02:03:41 +00001225 assert(Regs.size() == 1 && "Only handle simple regs right now!");
Chris Lattner3a5ed552006-02-01 01:28:23 +00001226 unsigned SrcReg = Regs[0];
1227 Chain = DAG.getCopyToReg(Chain, SrcReg, getValue(Operand), Flag);
Chris Lattner2e56e892006-01-31 02:03:41 +00001228 Flag = Chain.getValue(1);
1229
1230 // Add information to the INLINEASM node to know that this register is
1231 // read.
Chris Lattner3a5ed552006-02-01 01:28:23 +00001232 AsmNodeOperands.push_back(DAG.getRegister(SrcReg,TLI.getValueType(OpTy)));
Chris Lattner2e56e892006-01-31 02:03:41 +00001233 AsmNodeOperands.push_back(DAG.getConstant(1, MVT::i32)); // ISUSE
1234 break;
1235 }
1236 case InlineAsm::isClobber:
1237 // Nothing to do.
1238 break;
1239 }
1240 }
Chris Lattner476e67b2006-01-26 22:24:51 +00001241
1242 // Finish up input operands.
1243 AsmNodeOperands[0] = Chain;
1244 if (Flag.Val) AsmNodeOperands.push_back(Flag);
1245
1246 std::vector<MVT::ValueType> VTs;
1247 VTs.push_back(MVT::Other);
1248 VTs.push_back(MVT::Flag);
1249 Chain = DAG.getNode(ISD::INLINEASM, VTs, AsmNodeOperands);
1250 Flag = Chain.getValue(1);
1251
Chris Lattner2e56e892006-01-31 02:03:41 +00001252 // If this asm returns a register value, copy the result from that register
1253 // and set it as the value of the call.
1254 if (RetValReg) {
1255 SDOperand Val = DAG.getCopyFromReg(Chain, RetValReg,
1256 TLI.getValueType(I.getType()), Flag);
1257 Chain = Val.getValue(1);
1258 Flag = Val.getValue(2);
1259 setValue(&I, Val);
1260 }
Chris Lattner476e67b2006-01-26 22:24:51 +00001261
Chris Lattner2e56e892006-01-31 02:03:41 +00001262 std::vector<std::pair<SDOperand, Value*> > StoresToEmit;
1263
1264 // Process indirect outputs, first output all of the flagged copies out of
1265 // physregs.
1266 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
1267 Value *Ptr = IndirectStoresToEmit[i].second;
1268 const Type *Ty = cast<PointerType>(Ptr->getType())->getElementType();
1269 SDOperand Val = DAG.getCopyFromReg(Chain, IndirectStoresToEmit[i].first,
1270 TLI.getValueType(Ty), Flag);
1271 Chain = Val.getValue(1);
1272 Flag = Val.getValue(2);
1273 StoresToEmit.push_back(std::make_pair(Val, Ptr));
1274 OpNum++; // Consumes a call operand.
1275 }
1276
1277 // Emit the non-flagged stores from the physregs.
1278 std::vector<SDOperand> OutChains;
1279 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
1280 OutChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1281 StoresToEmit[i].first,
1282 getValue(StoresToEmit[i].second),
1283 DAG.getSrcValue(StoresToEmit[i].second)));
1284 if (!OutChains.empty())
1285 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains);
Chris Lattner476e67b2006-01-26 22:24:51 +00001286 DAG.setRoot(Chain);
1287}
1288
1289
Chris Lattner7a60d912005-01-07 07:47:53 +00001290void SelectionDAGLowering::visitMalloc(MallocInst &I) {
1291 SDOperand Src = getValue(I.getOperand(0));
1292
1293 MVT::ValueType IntPtr = TLI.getPointerTy();
Chris Lattnereccb73d2005-01-22 23:04:37 +00001294
1295 if (IntPtr < Src.getValueType())
1296 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
1297 else if (IntPtr > Src.getValueType())
1298 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
Chris Lattner7a60d912005-01-07 07:47:53 +00001299
1300 // Scale the source by the type size.
1301 uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
1302 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
1303 Src, getIntPtrConstant(ElementSize));
1304
1305 std::vector<std::pair<SDOperand, const Type*> > Args;
1306 Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
Chris Lattner1f45cd72005-01-08 19:26:18 +00001307
1308 std::pair<SDOperand,SDOperand> Result =
Chris Lattner2e77db62005-05-13 18:50:42 +00001309 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
Chris Lattner1f45cd72005-01-08 19:26:18 +00001310 DAG.getExternalSymbol("malloc", IntPtr),
1311 Args, DAG);
1312 setValue(&I, Result.first); // Pointers always fit in registers
1313 DAG.setRoot(Result.second);
Chris Lattner7a60d912005-01-07 07:47:53 +00001314}
1315
1316void SelectionDAGLowering::visitFree(FreeInst &I) {
1317 std::vector<std::pair<SDOperand, const Type*> > Args;
1318 Args.push_back(std::make_pair(getValue(I.getOperand(0)),
1319 TLI.getTargetData().getIntPtrType()));
1320 MVT::ValueType IntPtr = TLI.getPointerTy();
Chris Lattner1f45cd72005-01-08 19:26:18 +00001321 std::pair<SDOperand,SDOperand> Result =
Chris Lattner2e77db62005-05-13 18:50:42 +00001322 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
Chris Lattner1f45cd72005-01-08 19:26:18 +00001323 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
1324 DAG.setRoot(Result.second);
Chris Lattner7a60d912005-01-07 07:47:53 +00001325}
1326
Chris Lattner13d7c252005-08-26 20:54:47 +00001327// InsertAtEndOfBasicBlock - This method should be implemented by targets that
1328// mark instructions with the 'usesCustomDAGSchedInserter' flag. These
1329// instructions are special in various ways, which require special support to
1330// insert. The specified MachineInstr is created but not inserted into any
1331// basic blocks, and the scheduler passes ownership of it to this method.
1332MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1333 MachineBasicBlock *MBB) {
1334 std::cerr << "If a target marks an instruction with "
1335 "'usesCustomDAGSchedInserter', it must implement "
1336 "TargetLowering::InsertAtEndOfBasicBlock!\n";
1337 abort();
1338 return 0;
1339}
1340
Chris Lattner58cfd792005-01-09 00:00:49 +00001341void SelectionDAGLowering::visitVAStart(CallInst &I) {
Nate Begemane74795c2006-01-25 18:21:52 +00001342 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(),
1343 getValue(I.getOperand(1)),
1344 DAG.getSrcValue(I.getOperand(1))));
Chris Lattner58cfd792005-01-09 00:00:49 +00001345}
1346
1347void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
Nate Begemane74795c2006-01-25 18:21:52 +00001348 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
1349 getValue(I.getOperand(0)),
1350 DAG.getSrcValue(I.getOperand(0)));
1351 setValue(&I, V);
1352 DAG.setRoot(V.getValue(1));
Chris Lattner7a60d912005-01-07 07:47:53 +00001353}
1354
1355void SelectionDAGLowering::visitVAEnd(CallInst &I) {
Nate Begemane74795c2006-01-25 18:21:52 +00001356 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(),
1357 getValue(I.getOperand(1)),
1358 DAG.getSrcValue(I.getOperand(1))));
Chris Lattner7a60d912005-01-07 07:47:53 +00001359}
1360
1361void SelectionDAGLowering::visitVACopy(CallInst &I) {
Nate Begemane74795c2006-01-25 18:21:52 +00001362 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(),
1363 getValue(I.getOperand(1)),
1364 getValue(I.getOperand(2)),
1365 DAG.getSrcValue(I.getOperand(1)),
1366 DAG.getSrcValue(I.getOperand(2))));
Chris Lattner7a60d912005-01-07 07:47:53 +00001367}
1368
Chris Lattner58cfd792005-01-09 00:00:49 +00001369// It is always conservatively correct for llvm.returnaddress and
1370// llvm.frameaddress to return 0.
1371std::pair<SDOperand, SDOperand>
1372TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
1373 unsigned Depth, SelectionDAG &DAG) {
1374 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
Chris Lattner7a60d912005-01-07 07:47:53 +00001375}
1376
Chris Lattner29dcc712005-05-14 05:50:48 +00001377SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
Chris Lattner897cd7d2005-01-16 07:28:41 +00001378 assert(0 && "LowerOperation not implemented for this target!");
1379 abort();
Misha Brukman73e929f2005-02-17 21:39:27 +00001380 return SDOperand();
Chris Lattner897cd7d2005-01-16 07:28:41 +00001381}
1382
Nate Begeman595ec732006-01-28 03:14:31 +00001383SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op,
1384 SelectionDAG &DAG) {
1385 assert(0 && "CustomPromoteOperation not implemented for this target!");
1386 abort();
1387 return SDOperand();
1388}
1389
Chris Lattner58cfd792005-01-09 00:00:49 +00001390void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
1391 unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue();
1392 std::pair<SDOperand,SDOperand> Result =
Chris Lattner4108bb02005-01-17 19:43:36 +00001393 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
Chris Lattner58cfd792005-01-09 00:00:49 +00001394 setValue(&I, Result.first);
1395 DAG.setRoot(Result.second);
Chris Lattner7a60d912005-01-07 07:47:53 +00001396}
1397
Chris Lattner875def92005-01-11 05:56:49 +00001398void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
Reid Spencer3fd1b4c2005-11-30 05:21:10 +00001399#if 0
1400 // If the size of the cpy/move/set is constant (known)
1401 if (ConstantUInt* op3 = dyn_cast<ConstantUInt>(I.getOperand(3))) {
1402 uint64_t size = op3->getValue();
1403 switch (Op) {
1404 case ISD::MEMSET:
1405 if (size <= TLI.getMaxStoresPerMemSet()) {
1406 if (ConstantUInt* op4 = dyn_cast<ConstantUInt>(I.getOperand(4))) {
1407 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
1408 uint64_t align = op4.getValue();
1409 while (size > align) {
1410 size -=align;
1411 }
1412 Value *SrcV = I.getOperand(0);
1413 SDOperand Src = getValue(SrcV);
1414 SDOperand Ptr = getValue(I.getOperand(1));
1415 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
1416 DAG.getSrcValue(I.getOperand(1))));
1417 }
1418 break;
1419 }
1420 break; // don't do this optimization, use a normal memset
1421 case ISD::MEMMOVE:
1422 case ISD::MEMCPY:
1423 break; // FIXME: not implemented yet
1424 }
1425 }
1426#endif
1427
1428 // Non-optimized version
Chris Lattner875def92005-01-11 05:56:49 +00001429 std::vector<SDOperand> Ops;
Chris Lattner4108bb02005-01-17 19:43:36 +00001430 Ops.push_back(getRoot());
Chris Lattner875def92005-01-11 05:56:49 +00001431 Ops.push_back(getValue(I.getOperand(1)));
1432 Ops.push_back(getValue(I.getOperand(2)));
1433 Ops.push_back(getValue(I.getOperand(3)));
1434 Ops.push_back(getValue(I.getOperand(4)));
1435 DAG.setRoot(DAG.getNode(Op, MVT::Other, Ops));
Chris Lattner7a60d912005-01-07 07:47:53 +00001436}
1437
Chris Lattner875def92005-01-11 05:56:49 +00001438//===----------------------------------------------------------------------===//
1439// SelectionDAGISel code
1440//===----------------------------------------------------------------------===//
Chris Lattner7a60d912005-01-07 07:47:53 +00001441
1442unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
1443 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
1444}
1445
Chris Lattnerc9950c12005-08-17 06:37:43 +00001446void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
Chris Lattner1a908c82005-08-18 17:35:14 +00001447 // FIXME: we only modify the CFG to split critical edges. This
1448 // updates dom and loop info.
Chris Lattnerc9950c12005-08-17 06:37:43 +00001449}
Chris Lattner7a60d912005-01-07 07:47:53 +00001450
Chris Lattner35397782005-12-05 07:10:48 +00001451
1452/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
1453/// casting to the type of GEPI.
1454static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI,
1455 Value *Ptr, Value *PtrOffset) {
1456 if (V) return V; // Already computed.
1457
1458 BasicBlock::iterator InsertPt;
1459 if (BB == GEPI->getParent()) {
1460 // If insert into the GEP's block, insert right after the GEP.
1461 InsertPt = GEPI;
1462 ++InsertPt;
1463 } else {
1464 // Otherwise, insert at the top of BB, after any PHI nodes
1465 InsertPt = BB->begin();
1466 while (isa<PHINode>(InsertPt)) ++InsertPt;
1467 }
1468
Chris Lattnerbe73d6e2005-12-08 08:00:12 +00001469 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
1470 // BB so that there is only one value live across basic blocks (the cast
1471 // operand).
1472 if (CastInst *CI = dyn_cast<CastInst>(Ptr))
1473 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
1474 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
1475
Chris Lattner35397782005-12-05 07:10:48 +00001476 // Add the offset, cast it to the right type.
1477 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
1478 Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
1479 return V = Ptr;
1480}
1481
1482
1483/// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
1484/// selection, we want to be a bit careful about some things. In particular, if
1485/// we have a GEP instruction that is used in a different block than it is
1486/// defined, the addressing expression of the GEP cannot be folded into loads or
1487/// stores that use it. In this case, decompose the GEP and move constant
1488/// indices into blocks that use it.
1489static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
1490 const TargetData &TD) {
Chris Lattner35397782005-12-05 07:10:48 +00001491 // If this GEP is only used inside the block it is defined in, there is no
1492 // need to rewrite it.
1493 bool isUsedOutsideDefBB = false;
1494 BasicBlock *DefBB = GEPI->getParent();
1495 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
1496 UI != E; ++UI) {
1497 if (cast<Instruction>(*UI)->getParent() != DefBB) {
1498 isUsedOutsideDefBB = true;
1499 break;
1500 }
1501 }
1502 if (!isUsedOutsideDefBB) return;
1503
1504 // If this GEP has no non-zero constant indices, there is nothing we can do,
1505 // ignore it.
1506 bool hasConstantIndex = false;
1507 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
1508 E = GEPI->op_end(); OI != E; ++OI) {
1509 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI))
1510 if (CI->getRawValue()) {
1511 hasConstantIndex = true;
1512 break;
1513 }
1514 }
Chris Lattnerf1a54c02005-12-11 09:05:13 +00001515 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
1516 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) return;
Chris Lattner35397782005-12-05 07:10:48 +00001517
1518 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
1519 // constant offset (which we now know is non-zero) and deal with it later.
1520 uint64_t ConstantOffset = 0;
1521 const Type *UIntPtrTy = TD.getIntPtrType();
1522 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
1523 const Type *Ty = GEPI->getOperand(0)->getType();
1524
1525 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
1526 E = GEPI->op_end(); OI != E; ++OI) {
1527 Value *Idx = *OI;
1528 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
1529 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
1530 if (Field)
1531 ConstantOffset += TD.getStructLayout(StTy)->MemberOffsets[Field];
1532 Ty = StTy->getElementType(Field);
1533 } else {
1534 Ty = cast<SequentialType>(Ty)->getElementType();
1535
1536 // Handle constant subscripts.
1537 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
1538 if (CI->getRawValue() == 0) continue;
1539
1540 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
1541 ConstantOffset += (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
1542 else
1543 ConstantOffset+=TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
1544 continue;
1545 }
1546
1547 // Ptr = Ptr + Idx * ElementSize;
1548
1549 // Cast Idx to UIntPtrTy if needed.
1550 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
1551
1552 uint64_t ElementSize = TD.getTypeSize(Ty);
1553 // Mask off bits that should not be set.
1554 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
1555 Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize);
1556
1557 // Multiply by the element size and add to the base.
1558 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
1559 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
1560 }
1561 }
1562
1563 // Make sure that the offset fits in uintptr_t.
1564 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
1565 Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset);
1566
1567 // Okay, we have now emitted all of the variable index parts to the BB that
1568 // the GEP is defined in. Loop over all of the using instructions, inserting
1569 // an "add Ptr, ConstantOffset" into each block that uses it and update the
Chris Lattnerbe73d6e2005-12-08 08:00:12 +00001570 // instruction to use the newly computed value, making GEPI dead. When the
1571 // user is a load or store instruction address, we emit the add into the user
1572 // block, otherwise we use a canonical version right next to the gep (these
1573 // won't be foldable as addresses, so we might as well share the computation).
1574
Chris Lattner35397782005-12-05 07:10:48 +00001575 std::map<BasicBlock*,Value*> InsertedExprs;
1576 while (!GEPI->use_empty()) {
1577 Instruction *User = cast<Instruction>(GEPI->use_back());
Chris Lattnerbe73d6e2005-12-08 08:00:12 +00001578
1579 // If this use is not foldable into the addressing mode, use a version
1580 // emitted in the GEP block.
1581 Value *NewVal;
1582 if (!isa<LoadInst>(User) &&
1583 (!isa<StoreInst>(User) || User->getOperand(0) == GEPI)) {
1584 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
1585 Ptr, PtrOffset);
1586 } else {
1587 // Otherwise, insert the code in the User's block so it can be folded into
1588 // any users in that block.
1589 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
Chris Lattner35397782005-12-05 07:10:48 +00001590 User->getParent(), GEPI,
1591 Ptr, PtrOffset);
Chris Lattner35397782005-12-05 07:10:48 +00001592 }
Chris Lattnerbe73d6e2005-12-08 08:00:12 +00001593 User->replaceUsesOfWith(GEPI, NewVal);
1594 }
Chris Lattner35397782005-12-05 07:10:48 +00001595
1596 // Finally, the GEP is dead, remove it.
1597 GEPI->eraseFromParent();
1598}
1599
Chris Lattner7a60d912005-01-07 07:47:53 +00001600bool SelectionDAGISel::runOnFunction(Function &Fn) {
1601 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
1602 RegMap = MF.getSSARegMap();
1603 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
1604
Chris Lattner35397782005-12-05 07:10:48 +00001605 // First, split all critical edges for PHI nodes with incoming values that are
1606 // constants, this way the load of the constant into a vreg will not be placed
1607 // into MBBs that are used some other way.
1608 //
1609 // In this pass we also look for GEP instructions that are used across basic
1610 // blocks and rewrites them to improve basic-block-at-a-time selection.
1611 //
Chris Lattner1a908c82005-08-18 17:35:14 +00001612 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
1613 PHINode *PN;
Chris Lattner35397782005-12-05 07:10:48 +00001614 BasicBlock::iterator BBI;
1615 for (BBI = BB->begin(); (PN = dyn_cast<PHINode>(BBI)); ++BBI)
Chris Lattner1a908c82005-08-18 17:35:14 +00001616 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1617 if (isa<Constant>(PN->getIncomingValue(i)))
1618 SplitCriticalEdge(PN->getIncomingBlock(i), BB);
Chris Lattner35397782005-12-05 07:10:48 +00001619
1620 for (BasicBlock::iterator E = BB->end(); BBI != E; )
1621 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(BBI++))
1622 OptimizeGEPExpression(GEPI, TLI.getTargetData());
Chris Lattner1a908c82005-08-18 17:35:14 +00001623 }
Chris Lattnercd6f0f42005-11-09 19:44:01 +00001624
Chris Lattner7a60d912005-01-07 07:47:53 +00001625 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
1626
1627 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
1628 SelectBasicBlock(I, MF, FuncInfo);
Misha Brukman835702a2005-04-21 22:36:52 +00001629
Chris Lattner7a60d912005-01-07 07:47:53 +00001630 return true;
1631}
1632
1633
Chris Lattner718b5c22005-01-13 17:59:43 +00001634SDOperand SelectionDAGISel::
1635CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) {
Chris Lattner613f79f2005-01-11 22:03:46 +00001636 SDOperand Op = SDL.getValue(V);
Chris Lattnere727af02005-01-13 20:50:02 +00001637 assert((Op.getOpcode() != ISD::CopyFromReg ||
Chris Lattner33182322005-08-16 21:55:35 +00001638 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
Chris Lattnere727af02005-01-13 20:50:02 +00001639 "Copy from a reg to the same reg!");
Chris Lattner33182322005-08-16 21:55:35 +00001640
1641 // If this type is not legal, we must make sure to not create an invalid
1642 // register use.
1643 MVT::ValueType SrcVT = Op.getValueType();
1644 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT);
1645 SelectionDAG &DAG = SDL.DAG;
1646 if (SrcVT == DestVT) {
1647 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
1648 } else if (SrcVT < DestVT) {
1649 // The src value is promoted to the register.
Chris Lattnerba28c272005-08-17 06:06:25 +00001650 if (MVT::isFloatingPoint(SrcVT))
1651 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
1652 else
Chris Lattnera66403d2005-09-02 00:19:37 +00001653 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
Chris Lattner33182322005-08-16 21:55:35 +00001654 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
1655 } else {
1656 // The src value is expanded into multiple registers.
1657 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
1658 Op, DAG.getConstant(0, MVT::i32));
1659 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
1660 Op, DAG.getConstant(1, MVT::i32));
1661 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo);
1662 return DAG.getCopyToReg(Op, Reg+1, Hi);
1663 }
Chris Lattner7a60d912005-01-07 07:47:53 +00001664}
1665
Chris Lattner16f64df2005-01-17 17:15:02 +00001666void SelectionDAGISel::
1667LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL,
1668 std::vector<SDOperand> &UnorderedChains) {
1669 // If this is the entry block, emit arguments.
1670 Function &F = *BB->getParent();
Chris Lattnere3c2cf42005-01-17 17:55:19 +00001671 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
Chris Lattner6871b232005-10-30 19:42:35 +00001672 SDOperand OldRoot = SDL.DAG.getRoot();
1673 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
Chris Lattner16f64df2005-01-17 17:15:02 +00001674
Chris Lattner6871b232005-10-30 19:42:35 +00001675 unsigned a = 0;
1676 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
1677 AI != E; ++AI, ++a)
1678 if (!AI->use_empty()) {
1679 SDL.setValue(AI, Args[a]);
Chris Lattnerd4382f02005-09-13 19:30:54 +00001680
Chris Lattner6871b232005-10-30 19:42:35 +00001681 // If this argument is live outside of the entry block, insert a copy from
1682 // whereever we got it to the vreg that other BB's will reference it as.
1683 if (FuncInfo.ValueMap.count(AI)) {
1684 SDOperand Copy =
1685 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
1686 UnorderedChains.push_back(Copy);
1687 }
Chris Lattnere3c2cf42005-01-17 17:55:19 +00001688 }
Chris Lattner6871b232005-10-30 19:42:35 +00001689
1690 // Next, if the function has live ins that need to be copied into vregs,
1691 // emit the copies now, into the top of the block.
1692 MachineFunction &MF = SDL.DAG.getMachineFunction();
1693 if (MF.livein_begin() != MF.livein_end()) {
1694 SSARegMap *RegMap = MF.getSSARegMap();
1695 const MRegisterInfo &MRI = *MF.getTarget().getRegisterInfo();
1696 for (MachineFunction::livein_iterator LI = MF.livein_begin(),
1697 E = MF.livein_end(); LI != E; ++LI)
1698 if (LI->second)
1699 MRI.copyRegToReg(*MF.begin(), MF.begin()->end(), LI->second,
1700 LI->first, RegMap->getRegClass(LI->second));
Chris Lattner16f64df2005-01-17 17:15:02 +00001701 }
Chris Lattner6871b232005-10-30 19:42:35 +00001702
1703 // Finally, if the target has anything special to do, allow it to do so.
1704 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction());
Chris Lattner16f64df2005-01-17 17:15:02 +00001705}
1706
1707
Chris Lattner7a60d912005-01-07 07:47:53 +00001708void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
1709 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
1710 FunctionLoweringInfo &FuncInfo) {
1711 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
Chris Lattner718b5c22005-01-13 17:59:43 +00001712
1713 std::vector<SDOperand> UnorderedChains;
Misha Brukman835702a2005-04-21 22:36:52 +00001714
Chris Lattner6871b232005-10-30 19:42:35 +00001715 // Lower any arguments needed in this block if this is the entry block.
1716 if (LLVMBB == &LLVMBB->getParent()->front())
1717 LowerArguments(LLVMBB, SDL, UnorderedChains);
Chris Lattner7a60d912005-01-07 07:47:53 +00001718
1719 BB = FuncInfo.MBBMap[LLVMBB];
1720 SDL.setCurrentBasicBlock(BB);
1721
1722 // Lower all of the non-terminator instructions.
1723 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
1724 I != E; ++I)
1725 SDL.visit(*I);
1726
1727 // Ensure that all instructions which are used outside of their defining
1728 // blocks are available as virtual registers.
1729 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
Chris Lattner613f79f2005-01-11 22:03:46 +00001730 if (!I->use_empty() && !isa<PHINode>(I)) {
Chris Lattnera2c5d912005-01-09 01:16:24 +00001731 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
Chris Lattner7a60d912005-01-07 07:47:53 +00001732 if (VMI != FuncInfo.ValueMap.end())
Chris Lattner718b5c22005-01-13 17:59:43 +00001733 UnorderedChains.push_back(
1734 CopyValueToVirtualRegister(SDL, I, VMI->second));
Chris Lattner7a60d912005-01-07 07:47:53 +00001735 }
1736
1737 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
1738 // ensure constants are generated when needed. Remember the virtual registers
1739 // that need to be added to the Machine PHI nodes as input. We cannot just
1740 // directly add them, because expansion might result in multiple MBB's for one
1741 // BB. As such, the start of the BB might correspond to a different MBB than
1742 // the end.
Misha Brukman835702a2005-04-21 22:36:52 +00001743 //
Chris Lattner7a60d912005-01-07 07:47:53 +00001744
1745 // Emit constants only once even if used by multiple PHI nodes.
1746 std::map<Constant*, unsigned> ConstantsOut;
1747
1748 // Check successor nodes PHI nodes that expect a constant to be available from
1749 // this block.
1750 TerminatorInst *TI = LLVMBB->getTerminator();
1751 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1752 BasicBlock *SuccBB = TI->getSuccessor(succ);
1753 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
1754 PHINode *PN;
1755
1756 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1757 // nodes and Machine PHI nodes, but the incoming operands have not been
1758 // emitted yet.
1759 for (BasicBlock::iterator I = SuccBB->begin();
Chris Lattner8ea875f2005-01-07 21:34:19 +00001760 (PN = dyn_cast<PHINode>(I)); ++I)
1761 if (!PN->use_empty()) {
1762 unsigned Reg;
1763 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1764 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
1765 unsigned &RegOut = ConstantsOut[C];
1766 if (RegOut == 0) {
1767 RegOut = FuncInfo.CreateRegForValue(C);
Chris Lattner718b5c22005-01-13 17:59:43 +00001768 UnorderedChains.push_back(
1769 CopyValueToVirtualRegister(SDL, C, RegOut));
Chris Lattner8ea875f2005-01-07 21:34:19 +00001770 }
1771 Reg = RegOut;
1772 } else {
1773 Reg = FuncInfo.ValueMap[PHIOp];
Chris Lattnera2c5d912005-01-09 01:16:24 +00001774 if (Reg == 0) {
Misha Brukman835702a2005-04-21 22:36:52 +00001775 assert(isa<AllocaInst>(PHIOp) &&
Chris Lattnera2c5d912005-01-09 01:16:24 +00001776 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
1777 "Didn't codegen value into a register!??");
1778 Reg = FuncInfo.CreateRegForValue(PHIOp);
Chris Lattner718b5c22005-01-13 17:59:43 +00001779 UnorderedChains.push_back(
1780 CopyValueToVirtualRegister(SDL, PHIOp, Reg));
Chris Lattnera2c5d912005-01-09 01:16:24 +00001781 }
Chris Lattner7a60d912005-01-07 07:47:53 +00001782 }
Misha Brukman835702a2005-04-21 22:36:52 +00001783
Chris Lattner8ea875f2005-01-07 21:34:19 +00001784 // Remember that this register needs to added to the machine PHI node as
1785 // the input for this MBB.
1786 unsigned NumElements =
1787 TLI.getNumElements(TLI.getValueType(PN->getType()));
1788 for (unsigned i = 0, e = NumElements; i != e; ++i)
1789 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
Chris Lattner7a60d912005-01-07 07:47:53 +00001790 }
Chris Lattner7a60d912005-01-07 07:47:53 +00001791 }
1792 ConstantsOut.clear();
1793
Chris Lattner718b5c22005-01-13 17:59:43 +00001794 // Turn all of the unordered chains into one factored node.
Chris Lattner24516842005-01-13 19:53:14 +00001795 if (!UnorderedChains.empty()) {
Chris Lattnerb7cad902005-11-09 05:03:03 +00001796 SDOperand Root = SDL.getRoot();
1797 if (Root.getOpcode() != ISD::EntryToken) {
1798 unsigned i = 0, e = UnorderedChains.size();
1799 for (; i != e; ++i) {
1800 assert(UnorderedChains[i].Val->getNumOperands() > 1);
1801 if (UnorderedChains[i].Val->getOperand(0) == Root)
1802 break; // Don't add the root if we already indirectly depend on it.
1803 }
1804
1805 if (i == e)
1806 UnorderedChains.push_back(Root);
1807 }
Chris Lattner718b5c22005-01-13 17:59:43 +00001808 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, UnorderedChains));
1809 }
1810
Chris Lattner7a60d912005-01-07 07:47:53 +00001811 // Lower the terminator after the copies are emitted.
1812 SDL.visit(*LLVMBB->getTerminator());
Chris Lattner4108bb02005-01-17 19:43:36 +00001813
1814 // Make sure the root of the DAG is up-to-date.
1815 DAG.setRoot(SDL.getRoot());
Chris Lattner7a60d912005-01-07 07:47:53 +00001816}
1817
1818void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
1819 FunctionLoweringInfo &FuncInfo) {
Jim Laskey219d5592006-01-04 22:28:25 +00001820 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
Chris Lattner7a60d912005-01-07 07:47:53 +00001821 CurDAG = &DAG;
1822 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
1823
1824 // First step, lower LLVM code to some DAG. This DAG may use operations and
1825 // types that are not supported by the target.
1826 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
1827
Chris Lattnerbcfebeb2005-10-10 16:47:10 +00001828 // Run the DAG combiner in pre-legalize mode.
1829 DAG.Combine(false);
Nate Begeman007c6502005-09-07 00:15:36 +00001830
Chris Lattner7a60d912005-01-07 07:47:53 +00001831 DEBUG(std::cerr << "Lowered selection DAG:\n");
1832 DEBUG(DAG.dump());
1833
1834 // Second step, hack on the DAG until it only uses operations and types that
1835 // the target supports.
Chris Lattnerffcb0ae2005-01-23 04:36:26 +00001836 DAG.Legalize();
Chris Lattner7a60d912005-01-07 07:47:53 +00001837
1838 DEBUG(std::cerr << "Legalized selection DAG:\n");
1839 DEBUG(DAG.dump());
1840
Chris Lattnerbcfebeb2005-10-10 16:47:10 +00001841 // Run the DAG combiner in post-legalize mode.
1842 DAG.Combine(true);
Nate Begeman007c6502005-09-07 00:15:36 +00001843
Evan Cheng739a6a42006-01-21 02:32:06 +00001844 if (ViewISelDAGs) DAG.viewGraph();
Chris Lattner6bd8fd02005-10-05 06:09:10 +00001845
Chris Lattner5ca31d92005-03-30 01:10:47 +00001846 // Third, instruction select all of the operations to machine code, adding the
1847 // code to the MachineBasicBlock.
Chris Lattner7a60d912005-01-07 07:47:53 +00001848 InstructionSelectBasicBlock(DAG);
1849
Chris Lattner7a60d912005-01-07 07:47:53 +00001850 DEBUG(std::cerr << "Selected machine code:\n");
1851 DEBUG(BB->dump());
1852
Chris Lattner5ca31d92005-03-30 01:10:47 +00001853 // Next, now that we know what the last MBB the LLVM BB expanded is, update
Chris Lattner7a60d912005-01-07 07:47:53 +00001854 // PHI nodes in successors.
1855 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
1856 MachineInstr *PHI = PHINodesToUpdate[i].first;
1857 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
1858 "This is not a machine PHI node that we are updating!");
1859 PHI->addRegOperand(PHINodesToUpdate[i].second);
1860 PHI->addMachineBasicBlockOperand(BB);
1861 }
Chris Lattner5ca31d92005-03-30 01:10:47 +00001862
1863 // Finally, add the CFG edges from the last selected MBB to the successor
1864 // MBBs.
1865 TerminatorInst *TI = LLVMBB->getTerminator();
1866 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
1867 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[TI->getSuccessor(i)];
1868 BB->addSuccessor(Succ0MBB);
1869 }
Chris Lattner7a60d912005-01-07 07:47:53 +00001870}
Evan Cheng739a6a42006-01-21 02:32:06 +00001871
1872//===----------------------------------------------------------------------===//
1873/// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
1874/// target node in the graph.
1875void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) {
1876 if (ViewSchedDAGs) DAG.viewGraph();
Evan Chengc1e1d972006-01-23 07:01:07 +00001877 ScheduleDAG *SL = NULL;
1878
1879 switch (ISHeuristic) {
1880 default: assert(0 && "Unrecognized scheduling heuristic");
Evan Chenga6eff8a2006-01-25 09:12:57 +00001881 case defaultScheduling:
1882 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency)
1883 SL = createSimpleDAGScheduler(noScheduling, DAG, BB);
1884 else /* TargetLowering::SchedulingForRegPressure */
1885 SL = createBURRListDAGScheduler(DAG, BB);
1886 break;
Evan Chengc1e1d972006-01-23 07:01:07 +00001887 case noScheduling:
1888 case simpleScheduling:
1889 case simpleNoItinScheduling:
1890 SL = createSimpleDAGScheduler(ISHeuristic, DAG, BB);
1891 break;
Evan Cheng31272342006-01-23 08:26:10 +00001892 case listSchedulingBURR:
1893 SL = createBURRListDAGScheduler(DAG, BB);
Evan Chengc1e1d972006-01-23 07:01:07 +00001894 }
Chris Lattnere23928c2006-01-21 19:12:11 +00001895 BB = SL->Run();
Evan Cheng739a6a42006-01-21 02:32:06 +00001896}