blob: cc95caac30993ad1517297fe00f667c5d4ac2e64 [file] [log] [blame]
Chris Lattner1c809c52004-02-29 00:27:00 +00001//===-- InstSelectSimple.cpp - A simple instruction selector for SparcV8 --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines a simple peephole instruction selector for the V8 target
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcV8.h"
Brian Gaekebc1d27a2004-03-03 23:03:14 +000015#include "SparcV8InstrInfo.h"
Chris Lattner1c809c52004-02-29 00:27:00 +000016#include "llvm/Instructions.h"
17#include "llvm/IntrinsicLowering.h"
18#include "llvm/Pass.h"
Brian Gaekebc1d27a2004-03-03 23:03:14 +000019#include "llvm/Constants.h"
Chris Lattner1c809c52004-02-29 00:27:00 +000020#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineFunction.h"
Brian Gaekebc1d27a2004-03-03 23:03:14 +000022#include "llvm/CodeGen/SSARegMap.h"
Chris Lattner1c809c52004-02-29 00:27:00 +000023#include "llvm/Target/TargetMachine.h"
24#include "llvm/Support/GetElementPtrTypeIterator.h"
25#include "llvm/Support/InstVisitor.h"
26#include "llvm/Support/CFG.h"
27using namespace llvm;
28
29namespace {
30 struct V8ISel : public FunctionPass, public InstVisitor<V8ISel> {
31 TargetMachine &TM;
32 MachineFunction *F; // The function we are compiling into
33 MachineBasicBlock *BB; // The current MBB we are compiling
34
35 std::map<Value*, unsigned> RegMap; // Mapping between Val's and SSA Regs
36
37 // MBBMap - Mapping between LLVM BB -> Machine BB
38 std::map<const BasicBlock*, MachineBasicBlock*> MBBMap;
39
40 V8ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
41
42 /// runOnFunction - Top level implementation of instruction selection for
43 /// the entire function.
44 ///
45 bool runOnFunction(Function &Fn);
46
47 virtual const char *getPassName() const {
48 return "SparcV8 Simple Instruction Selection";
49 }
50
Brian Gaeke532e60c2004-05-08 04:21:17 +000051 /// emitGEPOperation - Common code shared between visitGetElementPtrInst and
52 /// constant expression GEP support.
53 ///
54 void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
55 Value *Src, User::op_iterator IdxBegin,
56 User::op_iterator IdxEnd, unsigned TargetReg);
57
Chris Lattner1c809c52004-02-29 00:27:00 +000058 /// visitBasicBlock - This method is called when we are visiting a new basic
59 /// block. This simply creates a new MachineBasicBlock to emit code into
60 /// and adds it to the current MachineFunction. Subsequent visit* for
61 /// instructions will be invoked for all instructions in the basic block.
62 ///
63 void visitBasicBlock(BasicBlock &LLVM_BB) {
64 BB = MBBMap[&LLVM_BB];
65 }
66
Chris Lattner4be7ca52004-04-07 04:27:16 +000067 void visitBinaryOperator(Instruction &I);
68 void visitShiftInstruction(Instruction &I) { visitBinaryOperator(I); }
Chris Lattner4d0cda42004-04-07 05:04:51 +000069 void visitSetCondInst(Instruction &I);
Chris Lattner4be7ca52004-04-07 04:27:16 +000070 void visitCallInst(CallInst &I);
Brian Gaekef3334eb2004-04-07 17:29:37 +000071 void visitReturnInst(ReturnInst &I);
Brian Gaeke532e60c2004-05-08 04:21:17 +000072 void visitBranchInst(BranchInst &I);
Brian Gaeke3d11e8a2004-04-13 18:27:46 +000073 void visitCastInst(CastInst &I);
Brian Gaekef3334eb2004-04-07 17:29:37 +000074 void visitLoadInst(LoadInst &I);
75 void visitStoreInst(StoreInst &I);
Brian Gaeke532e60c2004-05-08 04:21:17 +000076 void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass
77 void visitGetElementPtrInst(GetElementPtrInst &I);
78
79
Chris Lattner1c809c52004-02-29 00:27:00 +000080
81 void visitInstruction(Instruction &I) {
82 std::cerr << "Unhandled instruction: " << I;
83 abort();
84 }
85
86 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
87 /// function, lowering any calls to unknown intrinsic functions into the
88 /// equivalent LLVM code.
89 void LowerUnknownIntrinsicFunctionCalls(Function &F);
Chris Lattner1c809c52004-02-29 00:27:00 +000090 void visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI);
91
Brian Gaeke562cb162004-04-07 17:04:09 +000092 void LoadArgumentsToVirtualRegs(Function *F);
93
Brian Gaekebc1d27a2004-03-03 23:03:14 +000094 /// copyConstantToRegister - Output the instructions required to put the
95 /// specified constant into the specified register.
96 ///
97 void copyConstantToRegister(MachineBasicBlock *MBB,
98 MachineBasicBlock::iterator IP,
99 Constant *C, unsigned R);
100
101 /// makeAnotherReg - This method returns the next register number we haven't
102 /// yet used.
103 ///
104 /// Long values are handled somewhat specially. They are always allocated
105 /// as pairs of 32 bit integer values. The register number returned is the
106 /// lower 32 bits of the long value, and the regNum+1 is the upper 32 bits
107 /// of the long value.
108 ///
109 unsigned makeAnotherReg(const Type *Ty) {
110 assert(dynamic_cast<const SparcV8RegisterInfo*>(TM.getRegisterInfo()) &&
111 "Current target doesn't have SparcV8 reg info??");
112 const SparcV8RegisterInfo *MRI =
113 static_cast<const SparcV8RegisterInfo*>(TM.getRegisterInfo());
114 if (Ty == Type::LongTy || Ty == Type::ULongTy) {
115 const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy);
116 // Create the lower part
117 F->getSSARegMap()->createVirtualRegister(RC);
118 // Create the upper part.
119 return F->getSSARegMap()->createVirtualRegister(RC)-1;
120 }
121
122 // Add the mapping of regnumber => reg class to MachineFunction
123 const TargetRegisterClass *RC = MRI->getRegClassForType(Ty);
124 return F->getSSARegMap()->createVirtualRegister(RC);
125 }
126
127 unsigned getReg(Value &V) { return getReg (&V); } // allow refs.
128 unsigned getReg(Value *V) {
129 // Just append to the end of the current bb.
130 MachineBasicBlock::iterator It = BB->end();
131 return getReg(V, BB, It);
132 }
133 unsigned getReg(Value *V, MachineBasicBlock *MBB,
134 MachineBasicBlock::iterator IPt) {
135 unsigned &Reg = RegMap[V];
136 if (Reg == 0) {
137 Reg = makeAnotherReg(V->getType());
138 RegMap[V] = Reg;
139 }
140 // If this operand is a constant, emit the code to copy the constant into
141 // the register here...
142 //
143 if (Constant *C = dyn_cast<Constant>(V)) {
144 copyConstantToRegister(MBB, IPt, C, Reg);
145 RegMap.erase(V); // Assign a new name to this constant if ref'd again
146 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
147 // Move the address of the global into the register
Brian Gaekecf471982004-03-09 04:49:13 +0000148 unsigned TmpReg = makeAnotherReg(V->getType());
149 BuildMI (*MBB, IPt, V8::SETHIi, 1, TmpReg).addGlobalAddress (GV);
150 BuildMI (*MBB, IPt, V8::ORri, 2, Reg).addReg (TmpReg)
151 .addGlobalAddress (GV);
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000152 RegMap.erase(V); // Assign a new name to this address if ref'd again
153 }
154
155 return Reg;
156 }
157
Chris Lattner1c809c52004-02-29 00:27:00 +0000158 };
159}
160
161FunctionPass *llvm::createSparcV8SimpleInstructionSelector(TargetMachine &TM) {
162 return new V8ISel(TM);
163}
164
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000165enum TypeClass {
Brian Gaekef57e3642004-03-16 22:37:11 +0000166 cByte, cShort, cInt, cLong, cFloat, cDouble
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000167};
168
169static TypeClass getClass (const Type *T) {
170 switch (T->getPrimitiveID ()) {
171 case Type::UByteTyID: case Type::SByteTyID: return cByte;
172 case Type::UShortTyID: case Type::ShortTyID: return cShort;
Brian Gaeke562cb162004-04-07 17:04:09 +0000173 case Type::PointerTyID:
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000174 case Type::UIntTyID: case Type::IntTyID: return cInt;
Brian Gaekef57e3642004-03-16 22:37:11 +0000175 case Type::ULongTyID: case Type::LongTyID: return cLong;
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000176 case Type::FloatTyID: return cFloat;
177 case Type::DoubleTyID: return cDouble;
178 default:
179 assert (0 && "Type of unknown class passed to getClass?");
180 return cByte;
181 }
182}
Chris Lattner0d538bb2004-04-07 04:36:53 +0000183static TypeClass getClassB(const Type *T) {
184 if (T == Type::BoolTy) return cByte;
185 return getClass(T);
186}
187
188
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000189
190/// copyConstantToRegister - Output the instructions required to put the
191/// specified constant into the specified register.
192///
193void V8ISel::copyConstantToRegister(MachineBasicBlock *MBB,
194 MachineBasicBlock::iterator IP,
195 Constant *C, unsigned R) {
Brian Gaekee302a7e2004-05-07 21:39:30 +0000196 if (C->getType()->isIntegral ()) {
197 uint64_t Val;
198 if (C->getType() == Type::BoolTy) {
199 Val = (C == ConstantBool::True);
200 } else {
201 ConstantInt *CI = dyn_cast<ConstantInt> (C);
202 Val = CI->getRawValue ();
203 }
204 switch (getClassB (C->getType ())) {
Brian Gaekee8061732004-03-04 00:56:25 +0000205 case cByte:
Chris Lattner4be7ca52004-04-07 04:27:16 +0000206 BuildMI (*MBB, IP, V8::ORri, 2, R).addReg (V8::G0).addImm((uint8_t)Val);
Brian Gaekee8061732004-03-04 00:56:25 +0000207 return;
208 case cShort: {
209 unsigned TmpReg = makeAnotherReg (C->getType ());
Chris Lattner4be7ca52004-04-07 04:27:16 +0000210 BuildMI (*MBB, IP, V8::SETHIi, 1, TmpReg)
211 .addImm (((uint16_t) Val) >> 10);
212 BuildMI (*MBB, IP, V8::ORri, 2, R).addReg (TmpReg)
213 .addImm (((uint16_t) Val) & 0x03ff);
Brian Gaekee8061732004-03-04 00:56:25 +0000214 return;
215 }
216 case cInt: {
217 unsigned TmpReg = makeAnotherReg (C->getType ());
Chris Lattner4be7ca52004-04-07 04:27:16 +0000218 BuildMI (*MBB, IP, V8::SETHIi, 1, TmpReg).addImm(((uint32_t)Val) >> 10);
219 BuildMI (*MBB, IP, V8::ORri, 2, R).addReg (TmpReg)
220 .addImm (((uint32_t) Val) & 0x03ff);
Brian Gaekee8061732004-03-04 00:56:25 +0000221 return;
222 }
Brian Gaeke2d4fa8f2004-04-07 04:00:49 +0000223 case cLong: {
224 unsigned TmpReg = makeAnotherReg (Type::UIntTy);
Chris Lattner4be7ca52004-04-07 04:27:16 +0000225 uint32_t topHalf = (uint32_t) (Val >> 32);
226 uint32_t bottomHalf = (uint32_t)Val;
Brian Gaekee302a7e2004-05-07 21:39:30 +0000227#if 0 // FIXME: This does not appear to be correct; it assigns SSA reg R twice.
Brian Gaeke2d4fa8f2004-04-07 04:00:49 +0000228 BuildMI (*MBB, IP, V8::SETHIi, 1, TmpReg).addImm (topHalf >> 10);
Chris Lattner4be7ca52004-04-07 04:27:16 +0000229 BuildMI (*MBB, IP, V8::ORri, 2, R).addReg (TmpReg)
230 .addImm (topHalf & 0x03ff);
Brian Gaeke2d4fa8f2004-04-07 04:00:49 +0000231 BuildMI (*MBB, IP, V8::SETHIi, 1, TmpReg).addImm (bottomHalf >> 10);
Chris Lattner4be7ca52004-04-07 04:27:16 +0000232 BuildMI (*MBB, IP, V8::ORri, 2, R).addReg (TmpReg)
233 .addImm (bottomHalf & 0x03ff);
Brian Gaekee302a7e2004-05-07 21:39:30 +0000234#else
235 std::cerr << "Offending constant: " << *C << "\n";
236 assert (0 && "Can't copy this kind of constant into register yet");
237#endif
Brian Gaeke2d4fa8f2004-04-07 04:00:49 +0000238 return;
239 }
Brian Gaekee8061732004-03-04 00:56:25 +0000240 default:
Brian Gaeke2d4fa8f2004-04-07 04:00:49 +0000241 std::cerr << "Offending constant: " << *C << "\n";
Brian Gaeke775158d2004-03-04 04:37:45 +0000242 assert (0 && "Can't copy this kind of constant into register yet");
Brian Gaekee8061732004-03-04 00:56:25 +0000243 return;
244 }
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000245 }
246
Brian Gaeke2d4fa8f2004-04-07 04:00:49 +0000247 std::cerr << "Offending constant: " << *C << "\n";
Brian Gaeke775158d2004-03-04 04:37:45 +0000248 assert (0 && "Can't copy this kind of constant into register yet");
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000249}
Chris Lattner1c809c52004-02-29 00:27:00 +0000250
Brian Gaeke562cb162004-04-07 17:04:09 +0000251void V8ISel::LoadArgumentsToVirtualRegs (Function *F) {
252 unsigned ArgOffset = 0;
253 static const unsigned IncomingArgRegs[] = { V8::I0, V8::I1, V8::I2,
254 V8::I3, V8::I4, V8::I5 };
255 assert (F->asize () < 7
256 && "Can't handle loading excess call args off the stack yet");
257
258 for (Function::aiterator I = F->abegin(), E = F->aend(); I != E; ++I) {
259 unsigned Reg = getReg(*I);
260 switch (getClassB(I->getType())) {
261 case cByte:
262 case cShort:
263 case cInt:
264 BuildMI(BB, V8::ORrr, 2, Reg).addReg (V8::G0)
265 .addReg (IncomingArgRegs[ArgOffset]);
266 break;
267 default:
268 assert (0 && "Only <=32-bit, integral arguments currently handled");
269 return;
270 }
271 ++ArgOffset;
272 }
273}
274
Chris Lattner1c809c52004-02-29 00:27:00 +0000275bool V8ISel::runOnFunction(Function &Fn) {
276 // First pass over the function, lower any unknown intrinsic functions
277 // with the IntrinsicLowering class.
278 LowerUnknownIntrinsicFunctionCalls(Fn);
279
280 F = &MachineFunction::construct(&Fn, TM);
281
282 // Create all of the machine basic blocks for the function...
283 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
284 F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I));
285
286 BB = &F->front();
287
288 // Set up a frame object for the return address. This is used by the
289 // llvm.returnaddress & llvm.frameaddress intrinisics.
290 //ReturnAddressIndex = F->getFrameInfo()->CreateFixedObject(4, -4);
291
292 // Copy incoming arguments off of the stack and out of fixed registers.
Brian Gaeke562cb162004-04-07 17:04:09 +0000293 LoadArgumentsToVirtualRegs(&Fn);
Chris Lattner1c809c52004-02-29 00:27:00 +0000294
295 // Instruction select everything except PHI nodes
296 visit(Fn);
297
298 // Select the PHI nodes
299 //SelectPHINodes();
300
301 RegMap.clear();
302 MBBMap.clear();
303 F = 0;
304 // We always build a machine code representation for the function
305 return true;
306}
307
Brian Gaeke3d11e8a2004-04-13 18:27:46 +0000308void V8ISel::visitCastInst(CastInst &I) {
309 unsigned SrcReg = getReg (I.getOperand (0));
Brian Gaekee302a7e2004-05-07 21:39:30 +0000310 unsigned DestReg = getReg (I);
Brian Gaeke3d11e8a2004-04-13 18:27:46 +0000311 const Type *oldTy = I.getOperand (0)->getType ();
312 const Type *newTy = I.getType ();
Brian Gaekee302a7e2004-05-07 21:39:30 +0000313 unsigned oldTyClass = getClassB (oldTy);
314 unsigned newTyClass = getClassB (newTy);
Brian Gaeke3d11e8a2004-04-13 18:27:46 +0000315
Brian Gaekee302a7e2004-05-07 21:39:30 +0000316 if (oldTyClass < cLong && newTyClass < cLong && oldTyClass >= newTyClass) {
317 // Emit a reg->reg copy to do a equal-size or non-narrowing cast,
318 // and do sign/zero extension (necessary if we change signedness).
319 unsigned TempReg1 = makeAnotherReg (newTy);
320 unsigned TempReg2 = makeAnotherReg (newTy);
321 BuildMI (BB, V8::ORrr, 2, TempReg1).addReg (V8::G0).addReg (SrcReg);
322 unsigned shiftWidth = 32 - (8 * TM.getTargetData ().getTypeSize (newTy));
323 BuildMI (BB, V8::SLLri, 2, TempReg2).addZImm (shiftWidth).addReg (TempReg1);
324 if (newTy->isSigned ()) { // sign-extend with SRA
325 BuildMI(BB, V8::SRAri, 2, DestReg).addZImm (shiftWidth).addReg (TempReg2);
326 } else { // zero-extend with SRL
327 BuildMI(BB, V8::SRLri, 2, DestReg).addZImm (shiftWidth).addReg (TempReg2);
328 }
329 } else {
330 std::cerr << "Casts w/ long, fp, double, or widening still unsupported: "
331 << I;
332 abort ();
333 }
Brian Gaeke3d11e8a2004-04-13 18:27:46 +0000334}
335
Brian Gaekef3334eb2004-04-07 17:29:37 +0000336void V8ISel::visitLoadInst(LoadInst &I) {
337 unsigned DestReg = getReg (I);
338 unsigned PtrReg = getReg (I.getOperand (0));
Brian Gaeke532e60c2004-05-08 04:21:17 +0000339 switch (getClassB (I.getType ())) {
Brian Gaekef3334eb2004-04-07 17:29:37 +0000340 case cByte:
341 if (I.getType ()->isSigned ())
342 BuildMI (BB, V8::LDSBmr, 1, DestReg).addReg (PtrReg).addSImm(0);
343 else
344 BuildMI (BB, V8::LDUBmr, 1, DestReg).addReg (PtrReg).addSImm(0);
345 return;
346 case cShort:
347 if (I.getType ()->isSigned ())
348 BuildMI (BB, V8::LDSHmr, 1, DestReg).addReg (PtrReg).addSImm(0);
349 else
350 BuildMI (BB, V8::LDUHmr, 1, DestReg).addReg (PtrReg).addSImm(0);
351 return;
352 case cInt:
353 BuildMI (BB, V8::LDmr, 1, DestReg).addReg (PtrReg).addSImm(0);
354 return;
355 case cLong:
356 BuildMI (BB, V8::LDDmr, 1, DestReg).addReg (PtrReg).addSImm(0);
357 return;
358 default:
359 std::cerr << "Load instruction not handled: " << I;
360 abort ();
361 return;
362 }
363}
364
365void V8ISel::visitStoreInst(StoreInst &I) {
Brian Gaeke532e60c2004-05-08 04:21:17 +0000366 Value *SrcVal = I.getOperand (0);
367 unsigned SrcReg = getReg (SrcVal);
Brian Gaekef3334eb2004-04-07 17:29:37 +0000368 unsigned PtrReg = getReg (I.getOperand (1));
Brian Gaeke532e60c2004-05-08 04:21:17 +0000369 switch (getClassB (SrcVal->getType ())) {
370 case cByte:
371 BuildMI (BB, V8::STBrm, 1, SrcReg).addReg (PtrReg).addSImm(0);
372 return;
373 case cShort:
374 BuildMI (BB, V8::STHrm, 1, SrcReg).addReg (PtrReg).addSImm(0);
375 return;
376 case cInt:
377 BuildMI (BB, V8::STrm, 1, SrcReg).addReg (PtrReg).addSImm(0);
378 return;
379 case cLong:
380 BuildMI (BB, V8::STDrm, 1, SrcReg).addReg (PtrReg).addSImm(0);
381 return;
382 default:
383 std::cerr << "Store instruction not handled: " << I;
384 abort ();
385 return;
386 }
Brian Gaekef3334eb2004-04-07 17:29:37 +0000387}
388
Brian Gaekef7e44ef2004-04-02 20:53:33 +0000389void V8ISel::visitCallInst(CallInst &I) {
Brian Gaeked54c38b2004-04-07 16:41:22 +0000390 assert (I.getNumOperands () < 8
391 && "Can't handle pushing excess call args on the stack yet");
Brian Gaeke562cb162004-04-07 17:04:09 +0000392 static const unsigned OutgoingArgRegs[] = { V8::O0, V8::O1, V8::O2, V8::O3,
Brian Gaeked54c38b2004-04-07 16:41:22 +0000393 V8::O4, V8::O5 };
394 for (unsigned i = 1; i < 7; ++i)
395 if (i < I.getNumOperands ()) {
396 unsigned ArgReg = getReg (I.getOperand (i));
397 // Schlep it over into the incoming arg register
Brian Gaeke562cb162004-04-07 17:04:09 +0000398 BuildMI (BB, V8::ORrr, 2, OutgoingArgRegs[i - 1]).addReg (V8::G0)
Brian Gaeked54c38b2004-04-07 16:41:22 +0000399 .addReg (ArgReg);
400 }
401
Brian Gaekeea8494b2004-04-06 22:09:23 +0000402 unsigned DestReg = getReg (I);
Brian Gaekef7e44ef2004-04-02 20:53:33 +0000403 BuildMI (BB, V8::CALL, 1).addPCDisp (I.getOperand (0));
Brian Gaekeea8494b2004-04-06 22:09:23 +0000404 if (I.getType ()->getPrimitiveID () == Type::VoidTyID)
405 return;
406 // Deal w/ return value
407 switch (getClass (I.getType ())) {
408 case cByte:
409 case cShort:
410 case cInt:
411 // Schlep it over into the destination register
412 BuildMI (BB, V8::ORrr, 2, DestReg).addReg(V8::G0).addReg(V8::O0);
413 break;
414 default:
Brian Gaeke532e60c2004-05-08 04:21:17 +0000415 std::cerr << "Return type of call instruction not handled: " << I;
416 abort ();
Brian Gaekeea8494b2004-04-06 22:09:23 +0000417 }
Brian Gaekef7e44ef2004-04-02 20:53:33 +0000418}
Chris Lattner1c809c52004-02-29 00:27:00 +0000419
420void V8ISel::visitReturnInst(ReturnInst &I) {
Brian Gaeke08f64c32004-03-06 05:32:28 +0000421 if (I.getNumOperands () == 1) {
422 unsigned RetValReg = getReg (I.getOperand (0));
423 switch (getClass (I.getOperand (0)->getType ())) {
424 case cByte:
425 case cShort:
426 case cInt:
427 // Schlep it over into i0 (where it will become o0 after restore).
428 BuildMI (BB, V8::ORrr, 2, V8::I0).addReg(V8::G0).addReg(RetValReg);
429 break;
430 default:
Brian Gaeke532e60c2004-05-08 04:21:17 +0000431 std::cerr << "Return instruction of this type not handled: " << I;
432 abort ();
Brian Gaeke08f64c32004-03-06 05:32:28 +0000433 }
Chris Lattner1c809c52004-02-29 00:27:00 +0000434 }
Chris Lattner0d538bb2004-04-07 04:36:53 +0000435
Brian Gaeke08f64c32004-03-06 05:32:28 +0000436 // Just emit a 'retl' instruction to return.
437 BuildMI(BB, V8::RETL, 0);
438 return;
Chris Lattner1c809c52004-02-29 00:27:00 +0000439}
440
Brian Gaeke532e60c2004-05-08 04:21:17 +0000441static inline BasicBlock *getBlockAfter(BasicBlock *BB) {
442 Function::iterator I = BB; ++I; // Get iterator to next block
443 return I != BB->getParent()->end() ? &*I : 0;
444}
445
446/// visitBranchInst - Handles conditional and unconditional branches.
447///
448void V8ISel::visitBranchInst(BranchInst &I) {
449 // Update machine-CFG edges
450 BB->addSuccessor (MBBMap[I.getSuccessor(0)]);
451 if (I.isConditional())
452 BB->addSuccessor (MBBMap[I.getSuccessor(1)]);
453
454 BasicBlock *NextBB = getBlockAfter(I.getParent()); // BB after current one
455
456 BasicBlock *takenSucc = I.getSuccessor (0);
457 if (!I.isConditional()) { // Unconditional branch?
458 if (I.getSuccessor(0) != NextBB)
459 BuildMI (BB, V8::BA, 1).addPCDisp (takenSucc);
460 return;
461 }
462
463 unsigned CondReg = getReg (I.getCondition ());
464 BasicBlock *notTakenSucc = I.getSuccessor (1);
465 // Set Z condition code if CondReg was false
466 BuildMI (BB, V8::CMPri, 2).addSImm (0).addReg (CondReg);
467 if (notTakenSucc == NextBB) {
468 if (takenSucc != NextBB)
469 BuildMI (BB, V8::BNE, 1).addPCDisp (takenSucc);
470 } else {
471 BuildMI (BB, V8::BE, 1).addPCDisp (notTakenSucc);
472 if (takenSucc != NextBB)
473 BuildMI (BB, V8::BA, 1).addPCDisp (takenSucc);
474 }
475}
476
477/// emitGEPOperation - Common code shared between visitGetElementPtrInst and
478/// constant expression GEP support.
479///
Brian Gaeke9f564822004-05-08 05:27:20 +0000480void V8ISel::emitGEPOperation (MachineBasicBlock *MBB,
Brian Gaeke532e60c2004-05-08 04:21:17 +0000481 MachineBasicBlock::iterator IP,
482 Value *Src, User::op_iterator IdxBegin,
483 User::op_iterator IdxEnd, unsigned TargetReg) {
Brian Gaeke9f564822004-05-08 05:27:20 +0000484 const TargetData &TD = TM.getTargetData ();
485 const Type *Ty = Src->getType ();
486 unsigned basePtrReg = getReg (Src);
487
488 // GEPs have zero or more indices; we must perform a struct access
489 // or array access for each one.
490 for (GetElementPtrInst::op_iterator oi = IdxBegin, oe = IdxEnd; oi != oe;
491 ++oi) {
492 Value *idx = *oi;
493 unsigned nextBasePtrReg = makeAnotherReg (Type::UIntTy);
494 if (const StructType *StTy = dyn_cast<StructType> (Ty)) {
495 // It's a struct access. idx is the index into the structure,
496 // which names the field. Use the TargetData structure to
497 // pick out what the layout of the structure is in memory.
498 // Use the (constant) structure index's value to find the
499 // right byte offset from the StructLayout class's list of
500 // structure member offsets.
501 unsigned fieldIndex = cast<ConstantUInt> (idx)->getValue ();
502 unsigned memberOffset =
503 TD.getStructLayout (StTy)->MemberOffsets[fieldIndex];
504 // Emit an ADD to add memberOffset to the basePtr.
505 BuildMI (*MBB, IP, V8::ADDri, 2,
506 nextBasePtrReg).addReg (basePtrReg).addZImm (memberOffset);
507 // The next type is the member of the structure selected by the
508 // index.
509 Ty = StTy->getElementType (fieldIndex);
510 } else if (const SequentialType *SqTy = dyn_cast<SequentialType> (Ty)) {
511 // It's an array or pointer access: [ArraySize x ElementType].
512 // We want to add basePtrReg to (idxReg * sizeof ElementType). First, we
513 // must find the size of the pointed-to type (Not coincidentally, the next
514 // type is the type of the elements in the array).
515 Ty = SqTy->getElementType ();
516 unsigned elementSize = TD.getTypeSize (Ty);
517 unsigned idxReg = getReg (idx, MBB, IP);
518 unsigned OffsetReg = makeAnotherReg (Type::IntTy);
519 unsigned elementSizeReg = makeAnotherReg (Type::UIntTy);
520 BuildMI (*MBB, IP, V8::ORri, 2,
521 elementSizeReg).addZImm (elementSize).addReg (V8::G0);
522 // Emit a SMUL to multiply the register holding the index by
523 // elementSize, putting the result in OffsetReg.
524 BuildMI (*MBB, IP, V8::SMULrr, 2,
525 OffsetReg).addReg (elementSizeReg).addReg (idxReg);
526 // Emit an ADD to add OffsetReg to the basePtr.
527 BuildMI (*MBB, IP, V8::ADDrr, 2,
528 nextBasePtrReg).addReg (basePtrReg).addReg (OffsetReg);
529 }
530 basePtrReg = nextBasePtrReg;
531 }
532 // After we have processed all the indices, the result is left in
533 // basePtrReg. Move it to the register where we were expected to
534 // put the answer.
535 BuildMI (BB, V8::ORrr, 1, TargetReg).addReg (V8::G0).addReg (basePtrReg);
Brian Gaeke532e60c2004-05-08 04:21:17 +0000536}
537
538void V8ISel::visitGetElementPtrInst (GetElementPtrInst &I) {
539 unsigned outputReg = getReg (I);
540 emitGEPOperation (BB, BB->end (), I.getOperand (0),
541 I.op_begin ()+1, I.op_end (), outputReg);
542}
543
Chris Lattner4be7ca52004-04-07 04:27:16 +0000544void V8ISel::visitBinaryOperator (Instruction &I) {
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000545 unsigned DestReg = getReg (I);
546 unsigned Op0Reg = getReg (I.getOperand (0));
547 unsigned Op1Reg = getReg (I.getOperand (1));
548
Chris Lattner0d538bb2004-04-07 04:36:53 +0000549 unsigned ResultReg = DestReg;
550 if (getClassB(I.getType()) != cInt)
551 ResultReg = makeAnotherReg (I.getType ());
Chris Lattner22ede702004-04-07 04:06:46 +0000552 unsigned OpCase = ~0;
553
Brian Gaeke2d4fa8f2004-04-07 04:00:49 +0000554 // FIXME: support long, ulong, fp.
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000555 switch (I.getOpcode ()) {
Chris Lattner22ede702004-04-07 04:06:46 +0000556 case Instruction::Add: OpCase = 0; break;
557 case Instruction::Sub: OpCase = 1; break;
558 case Instruction::Mul: OpCase = 2; break;
559 case Instruction::And: OpCase = 3; break;
560 case Instruction::Or: OpCase = 4; break;
561 case Instruction::Xor: OpCase = 5; break;
Chris Lattner4be7ca52004-04-07 04:27:16 +0000562 case Instruction::Shl: OpCase = 6; break;
563 case Instruction::Shr: OpCase = 7+I.getType()->isSigned(); break;
Chris Lattner22ede702004-04-07 04:06:46 +0000564
565 case Instruction::Div:
566 case Instruction::Rem: {
567 unsigned Dest = ResultReg;
568 if (I.getOpcode() == Instruction::Rem)
569 Dest = makeAnotherReg(I.getType());
570
571 // FIXME: this is probably only right for 32 bit operands.
572 if (I.getType ()->isSigned()) {
573 unsigned Tmp = makeAnotherReg (I.getType ());
574 // Sign extend into the Y register
575 BuildMI (BB, V8::SRAri, 2, Tmp).addReg (Op0Reg).addZImm (31);
576 BuildMI (BB, V8::WRrr, 2, V8::Y).addReg (Tmp).addReg (V8::G0);
577 BuildMI (BB, V8::SDIVrr, 2, Dest).addReg (Op0Reg).addReg (Op1Reg);
578 } else {
579 // Zero extend into the Y register, ie, just set it to zero
580 BuildMI (BB, V8::WRrr, 2, V8::Y).addReg (V8::G0).addReg (V8::G0);
581 BuildMI (BB, V8::UDIVrr, 2, Dest).addReg (Op0Reg).addReg (Op1Reg);
Brian Gaeke2d4fa8f2004-04-07 04:00:49 +0000582 }
Chris Lattner22ede702004-04-07 04:06:46 +0000583
584 if (I.getOpcode() == Instruction::Rem) {
585 unsigned Tmp = makeAnotherReg (I.getType ());
586 BuildMI (BB, V8::SMULrr, 2, Tmp).addReg(Dest).addReg(Op1Reg);
587 BuildMI (BB, V8::SUBrr, 2, ResultReg).addReg(Op0Reg).addReg(Tmp);
Brian Gaekef57e3642004-03-16 22:37:11 +0000588 }
Chris Lattner22ede702004-04-07 04:06:46 +0000589 break;
590 }
591 default:
592 visitInstruction (I);
593 return;
594 }
595
596 if (OpCase != ~0U) {
597 static const unsigned Opcodes[] = {
Chris Lattner4be7ca52004-04-07 04:27:16 +0000598 V8::ADDrr, V8::SUBrr, V8::SMULrr, V8::ANDrr, V8::ORrr, V8::XORrr,
599 V8::SLLrr, V8::SRLrr, V8::SRArr
Chris Lattner22ede702004-04-07 04:06:46 +0000600 };
601 BuildMI (BB, Opcodes[OpCase], 2, ResultReg).addReg (Op0Reg).addReg (Op1Reg);
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000602 }
603
604 switch (getClass (I.getType ())) {
605 case cByte:
Brian Gaeke08f64c32004-03-06 05:32:28 +0000606 if (I.getType ()->isSigned ()) { // add byte
607 BuildMI (BB, V8::ANDri, 2, DestReg).addReg (ResultReg).addZImm (0xff);
608 } else { // add ubyte
609 unsigned TmpReg = makeAnotherReg (I.getType ());
610 BuildMI (BB, V8::SLLri, 2, TmpReg).addReg (ResultReg).addZImm (24);
611 BuildMI (BB, V8::SRAri, 2, DestReg).addReg (TmpReg).addZImm (24);
612 }
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000613 break;
614 case cShort:
Brian Gaeke08f64c32004-03-06 05:32:28 +0000615 if (I.getType ()->isSigned ()) { // add short
616 unsigned TmpReg = makeAnotherReg (I.getType ());
617 BuildMI (BB, V8::SLLri, 2, TmpReg).addReg (ResultReg).addZImm (16);
618 BuildMI (BB, V8::SRAri, 2, DestReg).addReg (TmpReg).addZImm (16);
619 } else { // add ushort
620 unsigned TmpReg = makeAnotherReg (I.getType ());
Brian Gaeke6d339f92004-03-16 22:45:42 +0000621 BuildMI (BB, V8::SLLri, 2, TmpReg).addReg (ResultReg).addZImm (16);
622 BuildMI (BB, V8::SRLri, 2, DestReg).addReg (TmpReg).addZImm (16);
Brian Gaeke08f64c32004-03-06 05:32:28 +0000623 }
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000624 break;
625 case cInt:
Chris Lattner0d538bb2004-04-07 04:36:53 +0000626 // Nothing todo here.
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000627 break;
628 default:
Brian Gaeke08f64c32004-03-06 05:32:28 +0000629 visitInstruction (I);
Brian Gaekebc1d27a2004-03-03 23:03:14 +0000630 return;
631 }
632}
633
Chris Lattner4d0cda42004-04-07 05:04:51 +0000634void V8ISel::visitSetCondInst(Instruction &I) {
635 unsigned Op0Reg = getReg (I.getOperand (0));
636 unsigned Op1Reg = getReg (I.getOperand (1));
637 unsigned DestReg = getReg (I);
638
639 // Compare the two values.
640 BuildMI(BB, V8::SUBCCrr, 2, V8::G0).addReg(Op0Reg).addReg(Op1Reg);
641
642 // Put 0 into a register.
643 //unsigned ZeroReg = makeAnotheRReg(Type::IntTy);
644 //BuildMI(BB, V8::ORri, 2, ZeroReg).addReg(V8::G0).addReg(V8::G0);
645
646 unsigned Opcode;
647 switch (I.getOpcode()) {
648 default: assert(0 && "Unknown setcc instruction!");
649 case Instruction::SetEQ:
650 case Instruction::SetNE:
651 case Instruction::SetLT:
652 case Instruction::SetGT:
653 case Instruction::SetLE:
654 case Instruction::SetGE:
Brian Gaeked54c38b2004-04-07 16:41:22 +0000655 ;
Chris Lattner4d0cda42004-04-07 05:04:51 +0000656 }
657
658 // FIXME: We need either conditional moves like the V9 has (e.g. movge), or we
659 // need to be able to turn a single LLVM basic block into multiple machine
660 // code basic blocks. For now, it probably makes sense to emit Sparc V9
661 // instructions until the code generator is upgraded. Note that this should
662 // only happen when the setcc cannot be folded into the branch, but this needs
663 // to be handled correctly!
664
665 visitInstruction(I);
666}
667
668
Chris Lattner1c809c52004-02-29 00:27:00 +0000669
670/// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
671/// function, lowering any calls to unknown intrinsic functions into the
672/// equivalent LLVM code.
673void V8ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
674 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
675 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
676 if (CallInst *CI = dyn_cast<CallInst>(I++))
677 if (Function *F = CI->getCalledFunction())
678 switch (F->getIntrinsicID()) {
679 case Intrinsic::not_intrinsic: break;
680 default:
681 // All other intrinsic calls we must lower.
682 Instruction *Before = CI->getPrev();
683 TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
684 if (Before) { // Move iterator to instruction after call
685 I = Before; ++I;
686 } else {
687 I = BB->begin();
688 }
689 }
690}
691
692
693void V8ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
694 unsigned TmpReg1, TmpReg2;
695 switch (ID) {
696 default: assert(0 && "Intrinsic not supported!");
697 }
698}