blob: f5fee0b5f42a8981ee7bf367f49eabc98a468d99 [file] [log] [blame]
Eugene Zelenko76bf48d2017-06-26 22:44:03 +00001//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
Quentin Colombet105cf2b2016-01-20 20:58:56 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file implements the IRTranslator class.
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000014#include "llvm/ADT/STLExtras.h"
Ahmed Bougachaeceabdd2017-02-23 23:57:28 +000015#include "llvm/ADT/ScopeExit.h"
Tim Northoverb6636fd2017-01-17 22:13:50 +000016#include "llvm/ADT/SmallSet.h"
Quentin Colombetfd9d0a02016-02-11 19:59:41 +000017#include "llvm/ADT/SmallVector.h"
Adam Nemet0965da22017-10-09 23:19:02 +000018#include "llvm/Analysis/OptimizationRemarkEmitter.h"
Tim Northovera9105be2016-11-09 22:39:54 +000019#include "llvm/CodeGen/Analysis.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000020#include "llvm/CodeGen/GlobalISel/CallLowering.h"
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000021#include "llvm/CodeGen/LowLevelType.h"
22#include "llvm/CodeGen/MachineBasicBlock.h"
Tim Northoverbd505462016-07-22 16:59:52 +000023#include "llvm/CodeGen/MachineFrameInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000024#include "llvm/CodeGen/MachineFunction.h"
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000025#include "llvm/CodeGen/MachineInstrBuilder.h"
26#include "llvm/CodeGen/MachineMemOperand.h"
27#include "llvm/CodeGen/MachineOperand.h"
Quentin Colombet17c494b2016-02-11 17:51:31 +000028#include "llvm/CodeGen/MachineRegisterInfo.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000029#include "llvm/CodeGen/TargetFrameLowering.h"
30#include "llvm/CodeGen/TargetLowering.h"
Quentin Colombet3bb32cc2016-08-26 23:49:05 +000031#include "llvm/CodeGen/TargetPassConfig.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000032#include "llvm/CodeGen/TargetRegisterInfo.h"
33#include "llvm/CodeGen/TargetSubtargetInfo.h"
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000034#include "llvm/IR/BasicBlock.h"
Quentin Colombet17c494b2016-02-11 17:51:31 +000035#include "llvm/IR/Constant.h"
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000036#include "llvm/IR/Constants.h"
37#include "llvm/IR/DataLayout.h"
Tim Northover09aac4a2017-01-26 23:39:14 +000038#include "llvm/IR/DebugInfo.h"
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000039#include "llvm/IR/DerivedTypes.h"
Quentin Colombet2ecff3b2016-02-10 22:59:27 +000040#include "llvm/IR/Function.h"
Tim Northovera7653b32016-09-12 11:20:22 +000041#include "llvm/IR/GetElementPtrTypeIterator.h"
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000042#include "llvm/IR/InlineAsm.h"
43#include "llvm/IR/InstrTypes.h"
44#include "llvm/IR/Instructions.h"
Tim Northover5fb414d2016-07-29 22:32:36 +000045#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000046#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/Metadata.h"
Quentin Colombet17c494b2016-02-11 17:51:31 +000049#include "llvm/IR/Type.h"
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000050#include "llvm/IR/User.h"
Quentin Colombet17c494b2016-02-11 17:51:31 +000051#include "llvm/IR/Value.h"
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000052#include "llvm/MC/MCContext.h"
53#include "llvm/Pass.h"
54#include "llvm/Support/Casting.h"
55#include "llvm/Support/CodeGen.h"
56#include "llvm/Support/Debug.h"
57#include "llvm/Support/ErrorHandling.h"
58#include "llvm/Support/LowLevelTypeImpl.h"
59#include "llvm/Support/MathExtras.h"
60#include "llvm/Support/raw_ostream.h"
Tim Northover5fb414d2016-07-29 22:32:36 +000061#include "llvm/Target/TargetIntrinsicInfo.h"
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000062#include "llvm/Target/TargetMachine.h"
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000063#include <algorithm>
64#include <cassert>
65#include <cstdint>
66#include <iterator>
67#include <string>
68#include <utility>
69#include <vector>
Quentin Colombet2ecff3b2016-02-10 22:59:27 +000070
71#define DEBUG_TYPE "irtranslator"
72
Quentin Colombet105cf2b2016-01-20 20:58:56 +000073using namespace llvm;
74
75char IRTranslator::ID = 0;
Eugene Zelenko76bf48d2017-06-26 22:44:03 +000076
Quentin Colombet3bb32cc2016-08-26 23:49:05 +000077INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
78 false, false)
79INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
80INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
Tim Northover884b47e2016-07-26 03:29:18 +000081 false, false)
Quentin Colombet105cf2b2016-01-20 20:58:56 +000082
Ahmed Bougachaae9dade2017-02-23 21:05:42 +000083static void reportTranslationError(MachineFunction &MF,
84 const TargetPassConfig &TPC,
85 OptimizationRemarkEmitter &ORE,
86 OptimizationRemarkMissed &R) {
87 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
88
89 // Print the function name explicitly if we don't have a debug location (which
90 // makes the diagnostic less useful) or if we're going to emit a raw error.
91 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
92 R << (" (in function: " + MF.getName() + ")").str();
93
94 if (TPC.isGlobalISelAbortEnabled())
95 report_fatal_error(R.getMsg());
96 else
97 ORE.emit(R);
Tim Northover60f23492016-11-08 01:12:17 +000098}
99
Eugene Zelenko76bf48d2017-06-26 22:44:03 +0000100IRTranslator::IRTranslator() : MachineFunctionPass(ID) {
Quentin Colombet39293d32016-03-08 01:38:55 +0000101 initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
Quentin Colombeta7fae162016-02-11 17:53:23 +0000102}
103
Quentin Colombet3bb32cc2016-08-26 23:49:05 +0000104void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
105 AU.addRequired<TargetPassConfig>();
106 MachineFunctionPass::getAnalysisUsage(AU);
107}
108
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000109static void computeValueLLTs(const DataLayout &DL, Type &Ty,
110 SmallVectorImpl<LLT> &ValueTys,
111 SmallVectorImpl<uint64_t> *Offsets = nullptr,
112 uint64_t StartingOffset = 0) {
113 // Given a struct type, recursively traverse the elements.
114 if (StructType *STy = dyn_cast<StructType>(&Ty)) {
115 const StructLayout *SL = DL.getStructLayout(STy);
116 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
117 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
118 StartingOffset + SL->getElementOffset(I));
119 return;
120 }
121 // Given an array type, recursively traverse the elements.
122 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
123 Type *EltTy = ATy->getElementType();
124 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
125 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
126 computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
127 StartingOffset + i * EltSize);
128 return;
129 }
130 // Interpret void as zero return values.
131 if (Ty.isVoidTy())
132 return;
133 // Base case: we can get an LLT for this LLVM IR type.
134 ValueTys.push_back(getLLTForType(Ty, DL));
135 if (Offsets != nullptr)
136 Offsets->push_back(StartingOffset * 8);
137}
Tim Northover5ed648e2016-08-09 21:28:04 +0000138
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000139IRTranslator::ValueToVRegInfo::VRegListT &
140IRTranslator::allocateVRegs(const Value &Val) {
141 assert(!VMap.contains(Val) && "Value already allocated in VMap");
142 auto *Regs = VMap.getVRegs(Val);
143 auto *Offsets = VMap.getOffsets(Val);
144 SmallVector<LLT, 4> SplitTys;
145 computeValueLLTs(*DL, *Val.getType(), SplitTys,
146 Offsets->empty() ? Offsets : nullptr);
147 for (unsigned i = 0; i < SplitTys.size(); ++i)
148 Regs->push_back(0);
149 return *Regs;
150}
Tim Northover9e35f1e2017-01-25 20:58:22 +0000151
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000152ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
153 auto VRegsIt = VMap.findVRegs(Val);
154 if (VRegsIt != VMap.vregs_end())
155 return *VRegsIt->second;
156
157 if (Val.getType()->isVoidTy())
158 return *VMap.getVRegs(Val);
159
160 // Create entry for this type.
161 auto *VRegs = VMap.getVRegs(Val);
162 auto *Offsets = VMap.getOffsets(Val);
163
Tim Northover9e35f1e2017-01-25 20:58:22 +0000164 assert(Val.getType()->isSized() &&
165 "Don't know how to create an empty vreg");
Tim Northover9e35f1e2017-01-25 20:58:22 +0000166
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000167 SmallVector<LLT, 4> SplitTys;
168 computeValueLLTs(*DL, *Val.getType(), SplitTys,
169 Offsets->empty() ? Offsets : nullptr);
170
171 if (!isa<Constant>(Val)) {
172 for (auto Ty : SplitTys)
173 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
174 return *VRegs;
175 }
176
177 if (Val.getType()->isAggregateType()) {
178 // UndefValue, ConstantAggregateZero
179 auto &C = cast<Constant>(Val);
180 unsigned Idx = 0;
181 while (auto Elt = C.getAggregateElement(Idx++)) {
182 auto EltRegs = getOrCreateVRegs(*Elt);
183 std::copy(EltRegs.begin(), EltRegs.end(), std::back_inserter(*VRegs));
184 }
185 } else {
186 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
187 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
188 bool Success = translate(cast<Constant>(Val), VRegs->front());
Tim Northover9e35f1e2017-01-25 20:58:22 +0000189 if (!Success) {
Ahmed Bougachaae9dade2017-02-23 21:05:42 +0000190 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
Matthias Braunf1caa282017-12-15 22:22:58 +0000191 MF->getFunction().getSubprogram(),
192 &MF->getFunction().getEntryBlock());
Ahmed Bougachaae9dade2017-02-23 21:05:42 +0000193 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
194 reportTranslationError(*MF, *TPC, *ORE, R);
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000195 return *VRegs;
Tim Northover5ed648e2016-08-09 21:28:04 +0000196 }
Quentin Colombet17c494b2016-02-11 17:51:31 +0000197 }
Tim Northover7f3ad2e2017-01-20 23:25:17 +0000198
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000199 return *VRegs;
Quentin Colombet17c494b2016-02-11 17:51:31 +0000200}
201
Tim Northovercdf23f12016-10-31 18:30:59 +0000202int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
203 if (FrameIndices.find(&AI) != FrameIndices.end())
204 return FrameIndices[&AI];
205
Tim Northovercdf23f12016-10-31 18:30:59 +0000206 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
207 unsigned Size =
208 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
209
210 // Always allocate at least one byte.
211 Size = std::max(Size, 1u);
212
213 unsigned Alignment = AI.getAlignment();
214 if (!Alignment)
215 Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
216
217 int &FI = FrameIndices[&AI];
Tim Northover50db7f412016-12-07 21:17:47 +0000218 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
Tim Northovercdf23f12016-10-31 18:30:59 +0000219 return FI;
220}
221
Tim Northoverad2b7172016-07-26 20:23:26 +0000222unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
223 unsigned Alignment = 0;
224 Type *ValTy = nullptr;
225 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
226 Alignment = SI->getAlignment();
227 ValTy = SI->getValueOperand()->getType();
228 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
229 Alignment = LI->getAlignment();
230 ValTy = LI->getType();
Daniel Sanders94813992018-07-09 19:33:40 +0000231 } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
232 // TODO(PR27168): This instruction has no alignment attribute, but unlike
233 // the default alignment for load/store, the default here is to assume
234 // it has NATURAL alignment, not DataLayout-specified alignment.
235 const DataLayout &DL = AI->getModule()->getDataLayout();
236 Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
237 ValTy = AI->getCompareOperand()->getType();
238 } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
239 // TODO(PR27168): This instruction has no alignment attribute, but unlike
240 // the default alignment for load/store, the default here is to assume
241 // it has NATURAL alignment, not DataLayout-specified alignment.
242 const DataLayout &DL = AI->getModule()->getDataLayout();
243 Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
244 ValTy = AI->getType();
Ahmed Bougachaae9dade2017-02-23 21:05:42 +0000245 } else {
246 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
247 R << "unable to translate memop: " << ore::NV("Opcode", &I);
248 reportTranslationError(*MF, *TPC, *ORE, R);
Quentin Colombet3bb32cc2016-08-26 23:49:05 +0000249 return 1;
Ahmed Bougachaae9dade2017-02-23 21:05:42 +0000250 }
Tim Northoverad2b7172016-07-26 20:23:26 +0000251
252 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
253}
254
Ahmed Bougachaa61c2142017-03-15 18:22:33 +0000255MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
Quentin Colombet53237a92016-03-11 17:27:43 +0000256 MachineBasicBlock *&MBB = BBToMBB[&BB];
Ahmed Bougachaa61c2142017-03-15 18:22:33 +0000257 assert(MBB && "BasicBlock was not encountered before");
Quentin Colombet17c494b2016-02-11 17:51:31 +0000258 return *MBB;
259}
260
Tim Northoverb6636fd2017-01-17 22:13:50 +0000261void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
262 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
263 MachinePreds[Edge].push_back(NewPred);
264}
265
Tim Northoverc53606e2016-12-07 21:29:15 +0000266bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
267 MachineIRBuilder &MIRBuilder) {
Tim Northover0d56e052016-07-29 18:11:21 +0000268 // FIXME: handle signed/unsigned wrapping flags.
269
Quentin Colombet2ecff3b2016-02-10 22:59:27 +0000270 // Get or create a virtual register for each value.
271 // Unless the value is a Constant => loadimm cst?
272 // or inline constant each time?
273 // Creation of a virtual register needs to have a size.
Tim Northover357f1be2016-08-10 23:02:41 +0000274 unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
275 unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
276 unsigned Res = getOrCreateVReg(U);
Tim Northover0f140c72016-09-09 11:46:34 +0000277 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
Quentin Colombet17c494b2016-02-11 17:51:31 +0000278 return true;
Quentin Colombet105cf2b2016-01-20 20:58:56 +0000279}
280
Volkan Keles20d3c422017-03-07 18:03:28 +0000281bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
282 // -0.0 - X --> G_FNEG
283 if (isa<Constant>(U.getOperand(0)) &&
284 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
285 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
286 .addDef(getOrCreateVReg(U))
287 .addUse(getOrCreateVReg(*U.getOperand(1)));
288 return true;
289 }
290 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
291}
292
Tim Northoverc53606e2016-12-07 21:29:15 +0000293bool IRTranslator::translateCompare(const User &U,
294 MachineIRBuilder &MIRBuilder) {
Tim Northoverd5c23bc2016-08-19 20:48:16 +0000295 const CmpInst *CI = dyn_cast<CmpInst>(&U);
296 unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
297 unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
298 unsigned Res = getOrCreateVReg(U);
299 CmpInst::Predicate Pred =
300 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
301 cast<ConstantExpr>(U).getPredicate());
Tim Northoverd5c23bc2016-08-19 20:48:16 +0000302 if (CmpInst::isIntPredicate(Pred))
Tim Northover0f140c72016-09-09 11:46:34 +0000303 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
Tim Northover7596bd72017-03-08 18:49:54 +0000304 else if (Pred == CmpInst::FCMP_FALSE)
Ahmed Bougacha2fb80302017-03-15 19:21:11 +0000305 MIRBuilder.buildCopy(
306 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
307 else if (Pred == CmpInst::FCMP_TRUE)
308 MIRBuilder.buildCopy(
309 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
Tim Northoverd5c23bc2016-08-19 20:48:16 +0000310 else
Tim Northover0f140c72016-09-09 11:46:34 +0000311 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
Tim Northoverd5c23bc2016-08-19 20:48:16 +0000312
Tim Northoverde3aea0412016-08-17 20:25:25 +0000313 return true;
314}
315
Tim Northoverc53606e2016-12-07 21:29:15 +0000316bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
Tim Northover357f1be2016-08-10 23:02:41 +0000317 const ReturnInst &RI = cast<ReturnInst>(U);
Tim Northover0d56e052016-07-29 18:11:21 +0000318 const Value *Ret = RI.getReturnValue();
Amara Emersond78d65c2017-11-30 20:06:02 +0000319 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
320 Ret = nullptr;
Quentin Colombet74d7d2f2016-02-11 18:53:28 +0000321 // The target may mess up with the insertion point, but
322 // this is not important as a return is the last instruction
323 // of the block anyway.
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000324
325 // FIXME: this interface should simplify when CallLowering gets adapted to
326 // multiple VRegs per Value.
327 unsigned VReg = Ret ? packRegs(*Ret, MIRBuilder) : 0;
328 return CLI->lowerReturn(MIRBuilder, Ret, VReg);
Quentin Colombet74d7d2f2016-02-11 18:53:28 +0000329}
330
Tim Northoverc53606e2016-12-07 21:29:15 +0000331bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
Tim Northover357f1be2016-08-10 23:02:41 +0000332 const BranchInst &BrInst = cast<BranchInst>(U);
Tim Northover69c2ba52016-07-29 17:58:00 +0000333 unsigned Succ = 0;
334 if (!BrInst.isUnconditional()) {
335 // We want a G_BRCOND to the true BB followed by an unconditional branch.
336 unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
337 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
Ahmed Bougachaa61c2142017-03-15 18:22:33 +0000338 MachineBasicBlock &TrueBB = getMBB(TrueTgt);
Tim Northover0f140c72016-09-09 11:46:34 +0000339 MIRBuilder.buildBrCond(Tst, TrueBB);
Quentin Colombetdd4b1372016-03-11 17:28:03 +0000340 }
Tim Northover69c2ba52016-07-29 17:58:00 +0000341
342 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
Ahmed Bougachaa61c2142017-03-15 18:22:33 +0000343 MachineBasicBlock &TgtBB = getMBB(BrTgt);
Ahmed Bougachae8e1fa32017-03-21 23:42:50 +0000344 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
345
346 // If the unconditional target is the layout successor, fallthrough.
347 if (!CurBB.isLayoutSuccessor(&TgtBB))
348 MIRBuilder.buildBr(TgtBB);
Tim Northover69c2ba52016-07-29 17:58:00 +0000349
Quentin Colombetdd4b1372016-03-11 17:28:03 +0000350 // Link successors.
Quentin Colombetdd4b1372016-03-11 17:28:03 +0000351 for (const BasicBlock *Succ : BrInst.successors())
Ahmed Bougachaa61c2142017-03-15 18:22:33 +0000352 CurBB.addSuccessor(&getMBB(*Succ));
Quentin Colombetdd4b1372016-03-11 17:28:03 +0000353 return true;
354}
355
Kristof Beylseced0712017-01-05 11:28:51 +0000356bool IRTranslator::translateSwitch(const User &U,
357 MachineIRBuilder &MIRBuilder) {
358 // For now, just translate as a chain of conditional branches.
359 // FIXME: could we share most of the logic/code in
360 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
361 // At first sight, it seems most of the logic in there is independent of
362 // SelectionDAG-specifics and a lot of work went in to optimize switch
363 // lowering in there.
364
365 const SwitchInst &SwInst = cast<SwitchInst>(U);
366 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
Tim Northoverb6636fd2017-01-17 22:13:50 +0000367 const BasicBlock *OrigBB = SwInst.getParent();
Kristof Beylseced0712017-01-05 11:28:51 +0000368
Daniel Sanders52b4ce72017-03-07 23:20:35 +0000369 LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
Kristof Beylseced0712017-01-05 11:28:51 +0000370 for (auto &CaseIt : SwInst.cases()) {
371 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
372 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
373 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
Tim Northoverb6636fd2017-01-17 22:13:50 +0000374 MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
375 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
Ahmed Bougachaa61c2142017-03-15 18:22:33 +0000376 MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
Kristof Beylseced0712017-01-05 11:28:51 +0000377
Tim Northoverb6636fd2017-01-17 22:13:50 +0000378 MIRBuilder.buildBrCond(Tst, TrueMBB);
379 CurMBB.addSuccessor(&TrueMBB);
380 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
Kristof Beylseced0712017-01-05 11:28:51 +0000381
Tim Northoverb6636fd2017-01-17 22:13:50 +0000382 MachineBasicBlock *FalseMBB =
Kristof Beylseced0712017-01-05 11:28:51 +0000383 MF->CreateMachineBasicBlock(SwInst.getParent());
Ahmed Bougacha07f247b2017-03-15 18:22:37 +0000384 // Insert the comparison blocks one after the other.
385 MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
Tim Northoverb6636fd2017-01-17 22:13:50 +0000386 MIRBuilder.buildBr(*FalseMBB);
387 CurMBB.addSuccessor(FalseMBB);
Kristof Beylseced0712017-01-05 11:28:51 +0000388
Tim Northoverb6636fd2017-01-17 22:13:50 +0000389 MIRBuilder.setMBB(*FalseMBB);
Kristof Beylseced0712017-01-05 11:28:51 +0000390 }
391 // handle default case
Tim Northoverb6636fd2017-01-17 22:13:50 +0000392 const BasicBlock *DefaultBB = SwInst.getDefaultDest();
Ahmed Bougachaa61c2142017-03-15 18:22:33 +0000393 MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
Tim Northoverb6636fd2017-01-17 22:13:50 +0000394 MIRBuilder.buildBr(DefaultMBB);
395 MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
396 CurMBB.addSuccessor(&DefaultMBB);
397 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
Kristof Beylseced0712017-01-05 11:28:51 +0000398
399 return true;
400}
401
Kristof Beyls65a12c02017-01-30 09:13:18 +0000402bool IRTranslator::translateIndirectBr(const User &U,
403 MachineIRBuilder &MIRBuilder) {
404 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
405
406 const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
407 MIRBuilder.buildBrIndirect(Tgt);
408
409 // Link successors.
410 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
411 for (const BasicBlock *Succ : BrInst.successors())
Ahmed Bougachaa61c2142017-03-15 18:22:33 +0000412 CurBB.addSuccessor(&getMBB(*Succ));
Kristof Beyls65a12c02017-01-30 09:13:18 +0000413
414 return true;
415}
416
Tim Northoverc53606e2016-12-07 21:29:15 +0000417bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
Tim Northover357f1be2016-08-10 23:02:41 +0000418 const LoadInst &LI = cast<LoadInst>(U);
Quentin Colombet3bb32cc2016-08-26 23:49:05 +0000419
Tim Northover7152dca2016-10-19 15:55:06 +0000420 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
421 : MachineMemOperand::MONone;
422 Flags |= MachineMemOperand::MOLoad;
Tim Northoverad2b7172016-07-26 20:23:26 +0000423
Amara Emersond78d65c2017-11-30 20:06:02 +0000424 if (DL->getTypeStoreSize(LI.getType()) == 0)
425 return true;
426
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000427 ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
428 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
429 unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
Daniel Sanders52b4ce72017-03-07 23:20:35 +0000430
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000431 for (unsigned i = 0; i < Regs.size(); ++i) {
432 unsigned Addr = 0;
433 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
434
435 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
436 unsigned BaseAlign = getMemOpAlignment(LI);
437 auto MMO = MF->getMachineMemOperand(
438 Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
439 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
440 LI.getSyncScopeID(), LI.getOrdering());
441 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
442 }
443
Tim Northoverad2b7172016-07-26 20:23:26 +0000444 return true;
445}
446
Tim Northoverc53606e2016-12-07 21:29:15 +0000447bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
Tim Northover357f1be2016-08-10 23:02:41 +0000448 const StoreInst &SI = cast<StoreInst>(U);
Tim Northover7152dca2016-10-19 15:55:06 +0000449 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
450 : MachineMemOperand::MONone;
451 Flags |= MachineMemOperand::MOStore;
Tim Northoverad2b7172016-07-26 20:23:26 +0000452
Amara Emersond78d65c2017-11-30 20:06:02 +0000453 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
454 return true;
455
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000456 ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
457 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
458 unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
Tim Northoverad2b7172016-07-26 20:23:26 +0000459
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000460 for (unsigned i = 0; i < Vals.size(); ++i) {
461 unsigned Addr = 0;
462 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
463
464 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
465 unsigned BaseAlign = getMemOpAlignment(SI);
466 auto MMO = MF->getMachineMemOperand(
467 Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
468 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
469 SI.getSyncScopeID(), SI.getOrdering());
470 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
471 }
Tim Northoverad2b7172016-07-26 20:23:26 +0000472 return true;
473}
474
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000475static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
Tim Northoverb6046222016-08-19 20:09:03 +0000476 const Value *Src = U.getOperand(0);
477 Type *Int32Ty = Type::getInt32Ty(U.getContext());
Volkan Keles6a36c642017-05-19 09:47:02 +0000478
Tim Northover6f80b082016-08-19 17:47:05 +0000479 // getIndexedOffsetInType is designed for GEPs, so the first index is the
480 // usual array element rather than looking into the actual aggregate.
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000481 SmallVector<Value *, 1> Indices;
Tim Northover6f80b082016-08-19 17:47:05 +0000482 Indices.push_back(ConstantInt::get(Int32Ty, 0));
Tim Northoverb6046222016-08-19 20:09:03 +0000483
484 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
485 for (auto Idx : EVI->indices())
486 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000487 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
488 for (auto Idx : IVI->indices())
489 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
Tim Northoverb6046222016-08-19 20:09:03 +0000490 } else {
491 for (unsigned i = 1; i < U.getNumOperands(); ++i)
492 Indices.push_back(U.getOperand(i));
493 }
Tim Northover6f80b082016-08-19 17:47:05 +0000494
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000495 return 8 * static_cast<uint64_t>(
496 DL.getIndexedOffsetInType(Src->getType(), Indices));
497}
Tim Northover6f80b082016-08-19 17:47:05 +0000498
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000499bool IRTranslator::translateExtractValue(const User &U,
500 MachineIRBuilder &MIRBuilder) {
501 const Value *Src = U.getOperand(0);
502 uint64_t Offset = getOffsetFromIndices(U, *DL);
503 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
504 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
505 unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) -
506 Offsets.begin();
507 auto &DstRegs = allocateVRegs(U);
508
509 for (unsigned i = 0; i < DstRegs.size(); ++i)
510 DstRegs[i] = SrcRegs[Idx++];
Tim Northover6f80b082016-08-19 17:47:05 +0000511
512 return true;
513}
514
Tim Northoverc53606e2016-12-07 21:29:15 +0000515bool IRTranslator::translateInsertValue(const User &U,
516 MachineIRBuilder &MIRBuilder) {
Tim Northoverb6046222016-08-19 20:09:03 +0000517 const Value *Src = U.getOperand(0);
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000518 uint64_t Offset = getOffsetFromIndices(U, *DL);
519 auto &DstRegs = allocateVRegs(U);
520 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
521 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
522 ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
523 auto InsertedIt = InsertedRegs.begin();
Tim Northoverbbbfb1c2016-08-19 20:08:55 +0000524
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000525 for (unsigned i = 0; i < DstRegs.size(); ++i) {
526 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
527 DstRegs[i] = *InsertedIt++;
528 else
529 DstRegs[i] = SrcRegs[i];
Tim Northoverb6046222016-08-19 20:09:03 +0000530 }
Tim Northoverbbbfb1c2016-08-19 20:08:55 +0000531
Tim Northoverbbbfb1c2016-08-19 20:08:55 +0000532 return true;
533}
534
Tim Northoverc53606e2016-12-07 21:29:15 +0000535bool IRTranslator::translateSelect(const User &U,
536 MachineIRBuilder &MIRBuilder) {
Kristof Beyls7a713502017-04-19 06:38:37 +0000537 unsigned Tst = getOrCreateVReg(*U.getOperand(0));
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000538 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
539 ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
540 ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
541
542 for (unsigned i = 0; i < ResRegs.size(); ++i)
543 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
544
Tim Northover5a28c362016-08-19 20:09:07 +0000545 return true;
546}
547
Tim Northoverc53606e2016-12-07 21:29:15 +0000548bool IRTranslator::translateBitCast(const User &U,
549 MachineIRBuilder &MIRBuilder) {
Ahmed Bougacha5c7924f2017-03-07 20:53:06 +0000550 // If we're bitcasting to the source type, we can reuse the source vreg.
Daniel Sanders52b4ce72017-03-07 23:20:35 +0000551 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
552 getLLTForType(*U.getType(), *DL)) {
Ahmed Bougacha5c7924f2017-03-07 20:53:06 +0000553 unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000554 auto &Regs = *VMap.getVRegs(U);
Ahmed Bougacha5c7924f2017-03-07 20:53:06 +0000555 // If we already assigned a vreg for this bitcast, we can't change that.
556 // Emit a copy to satisfy the users we already emitted.
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000557 if (!Regs.empty())
558 MIRBuilder.buildCopy(Regs[0], SrcReg);
559 else {
560 Regs.push_back(SrcReg);
561 VMap.getOffsets(U)->push_back(0);
562 }
Tim Northover7c9eba92016-07-25 21:01:29 +0000563 return true;
564 }
Tim Northoverc53606e2016-12-07 21:29:15 +0000565 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
Tim Northover7c9eba92016-07-25 21:01:29 +0000566}
567
Tim Northoverc53606e2016-12-07 21:29:15 +0000568bool IRTranslator::translateCast(unsigned Opcode, const User &U,
569 MachineIRBuilder &MIRBuilder) {
Tim Northover357f1be2016-08-10 23:02:41 +0000570 unsigned Op = getOrCreateVReg(*U.getOperand(0));
571 unsigned Res = getOrCreateVReg(U);
Tim Northover0f140c72016-09-09 11:46:34 +0000572 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
Tim Northover7c9eba92016-07-25 21:01:29 +0000573 return true;
574}
575
Tim Northoverc53606e2016-12-07 21:29:15 +0000576bool IRTranslator::translateGetElementPtr(const User &U,
577 MachineIRBuilder &MIRBuilder) {
Tim Northovera7653b32016-09-12 11:20:22 +0000578 // FIXME: support vector GEPs.
579 if (U.getType()->isVectorTy())
580 return false;
581
582 Value &Op0 = *U.getOperand(0);
583 unsigned BaseReg = getOrCreateVReg(Op0);
Ahmed Bougacha2fb80302017-03-15 19:21:11 +0000584 Type *PtrIRTy = Op0.getType();
585 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
586 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
587 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
Tim Northovera7653b32016-09-12 11:20:22 +0000588
589 int64_t Offset = 0;
590 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
591 GTI != E; ++GTI) {
592 const Value *Idx = GTI.getOperand();
Peter Collingbourne25a40752016-12-02 02:55:30 +0000593 if (StructType *StTy = GTI.getStructTypeOrNull()) {
Tim Northovera7653b32016-09-12 11:20:22 +0000594 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
595 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
596 continue;
597 } else {
598 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
599
600 // If this is a scalar constant or a splat vector of constants,
601 // handle it quickly.
602 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
603 Offset += ElementSize * CI->getSExtValue();
604 continue;
605 }
606
607 if (Offset != 0) {
608 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
Ahmed Bougacha2fb80302017-03-15 19:21:11 +0000609 unsigned OffsetReg =
610 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
Tim Northovera7653b32016-09-12 11:20:22 +0000611 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
612
613 BaseReg = NewBaseReg;
614 Offset = 0;
615 }
616
Tim Northovera7653b32016-09-12 11:20:22 +0000617 unsigned IdxReg = getOrCreateVReg(*Idx);
618 if (MRI->getType(IdxReg) != OffsetTy) {
619 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
620 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
621 IdxReg = NewIdxReg;
622 }
623
Aditya Nandakumar5710c442018-01-05 02:56:28 +0000624 // N = N + Idx * ElementSize;
625 // Avoid doing it for ElementSize of 1.
626 unsigned GepOffsetReg;
627 if (ElementSize != 1) {
628 unsigned ElementSizeReg =
629 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
630
631 GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
632 MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg);
633 } else
634 GepOffsetReg = IdxReg;
Tim Northovera7653b32016-09-12 11:20:22 +0000635
636 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
Aditya Nandakumar5710c442018-01-05 02:56:28 +0000637 MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
Tim Northovera7653b32016-09-12 11:20:22 +0000638 BaseReg = NewBaseReg;
639 }
640 }
641
642 if (Offset != 0) {
Ahmed Bougacha2fb80302017-03-15 19:21:11 +0000643 unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
Tim Northovera7653b32016-09-12 11:20:22 +0000644 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
645 return true;
646 }
647
648 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
649 return true;
650}
651
Tim Northover79f43f12017-01-30 19:33:07 +0000652bool IRTranslator::translateMemfunc(const CallInst &CI,
653 MachineIRBuilder &MIRBuilder,
654 unsigned ID) {
Daniel Sanders52b4ce72017-03-07 23:20:35 +0000655 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
Tim Northover79f43f12017-01-30 19:33:07 +0000656 Type *DstTy = CI.getArgOperand(0)->getType();
657 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
Tim Northover3f186032016-10-18 20:03:45 +0000658 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
659 return false;
660
661 SmallVector<CallLowering::ArgInfo, 8> Args;
662 for (int i = 0; i < 3; ++i) {
663 const auto &Arg = CI.getArgOperand(i);
664 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
665 }
666
Tim Northover79f43f12017-01-30 19:33:07 +0000667 const char *Callee;
668 switch (ID) {
669 case Intrinsic::memmove:
670 case Intrinsic::memcpy: {
671 Type *SrcTy = CI.getArgOperand(1)->getType();
672 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
673 return false;
674 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
675 break;
676 }
677 case Intrinsic::memset:
678 Callee = "memset";
679 break;
680 default:
681 return false;
682 }
Tim Northover3f186032016-10-18 20:03:45 +0000683
Diana Picusd79253a2017-03-20 14:40:18 +0000684 return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
685 MachineOperand::CreateES(Callee),
Tim Northover3f186032016-10-18 20:03:45 +0000686 CallLowering::ArgInfo(0, CI.getType()), Args);
687}
Tim Northovera7653b32016-09-12 11:20:22 +0000688
Tim Northoverc53606e2016-12-07 21:29:15 +0000689void IRTranslator::getStackGuard(unsigned DstReg,
690 MachineIRBuilder &MIRBuilder) {
Tim Northoverd8b85582017-01-27 21:31:24 +0000691 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
692 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
Tim Northovercdf23f12016-10-31 18:30:59 +0000693 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
694 MIB.addDef(DstReg);
695
Tim Northover50db7f412016-12-07 21:17:47 +0000696 auto &TLI = *MF->getSubtarget().getTargetLowering();
Matthias Braunf1caa282017-12-15 22:22:58 +0000697 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
Tim Northovercdf23f12016-10-31 18:30:59 +0000698 if (!Global)
699 return;
700
701 MachinePointerInfo MPInfo(Global);
Tim Northover50db7f412016-12-07 21:17:47 +0000702 MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1);
Tim Northovercdf23f12016-10-31 18:30:59 +0000703 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
704 MachineMemOperand::MODereferenceable;
705 *MemRefs =
Tim Northover50db7f412016-12-07 21:17:47 +0000706 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
Fangrui Songe73534462017-11-15 06:17:32 +0000707 DL->getPointerABIAlignment(0));
Tim Northovercdf23f12016-10-31 18:30:59 +0000708 MIB.setMemRefs(MemRefs, MemRefs + 1);
709}
710
Tim Northover1e656ec2016-12-08 22:44:00 +0000711bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
712 MachineIRBuilder &MIRBuilder) {
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000713 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
Tim Northover1e656ec2016-12-08 22:44:00 +0000714 auto MIB = MIRBuilder.buildInstr(Op)
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000715 .addDef(ResRegs[0])
716 .addDef(ResRegs[1])
Tim Northover1e656ec2016-12-08 22:44:00 +0000717 .addUse(getOrCreateVReg(*CI.getOperand(0)))
718 .addUse(getOrCreateVReg(*CI.getOperand(1)));
719
720 if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
Ahmed Bougacha2fb80302017-03-15 19:21:11 +0000721 unsigned Zero = getOrCreateVReg(
722 *Constant::getNullValue(Type::getInt1Ty(CI.getContext())));
Tim Northover1e656ec2016-12-08 22:44:00 +0000723 MIB.addUse(Zero);
724 }
725
Tim Northover1e656ec2016-12-08 22:44:00 +0000726 return true;
727}
728
Tim Northoverc53606e2016-12-07 21:29:15 +0000729bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
730 MachineIRBuilder &MIRBuilder) {
Tim Northover91c81732016-08-19 17:17:06 +0000731 switch (ID) {
Tim Northover1e656ec2016-12-08 22:44:00 +0000732 default:
733 break;
Tim Northover0e011702017-02-10 19:10:38 +0000734 case Intrinsic::lifetime_start:
735 case Intrinsic::lifetime_end:
736 // Stack coloring is not enabled in O0 (which we care about now) so we can
737 // drop these. Make sure someone notices when we start compiling at higher
738 // opts though.
739 if (MF->getTarget().getOptLevel() != CodeGenOpt::None)
740 return false;
741 return true;
Tim Northover09aac4a2017-01-26 23:39:14 +0000742 case Intrinsic::dbg_declare: {
743 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
744 assert(DI.getVariable() && "Missing variable");
745
746 const Value *Address = DI.getAddress();
747 if (!Address || isa<UndefValue>(Address)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000748 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
Tim Northover09aac4a2017-01-26 23:39:14 +0000749 return true;
750 }
751
Tim Northover09aac4a2017-01-26 23:39:14 +0000752 assert(DI.getVariable()->isValidLocationForIntrinsic(
753 MIRBuilder.getDebugLoc()) &&
754 "Expected inlined-at fields to agree");
Tim Northover7a9ea8f2017-03-09 21:12:06 +0000755 auto AI = dyn_cast<AllocaInst>(Address);
756 if (AI && AI->isStaticAlloca()) {
757 // Static allocas are tracked at the MF level, no need for DBG_VALUE
758 // instructions (in fact, they get ignored if they *do* exist).
759 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
760 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
Tim Northover09aac4a2017-01-26 23:39:14 +0000761 } else
Tim Northover7a9ea8f2017-03-09 21:12:06 +0000762 MIRBuilder.buildDirectDbgValue(getOrCreateVReg(*Address),
763 DI.getVariable(), DI.getExpression());
Tim Northoverb58346f2016-12-08 22:44:13 +0000764 return true;
Tim Northover09aac4a2017-01-26 23:39:14 +0000765 }
Tim Northoverd0d025a2017-02-07 20:08:59 +0000766 case Intrinsic::vaend:
767 // No target I know of cares about va_end. Certainly no in-tree target
768 // does. Simplest intrinsic ever!
769 return true;
Tim Northoverf19d4672017-02-08 17:57:20 +0000770 case Intrinsic::vastart: {
771 auto &TLI = *MF->getSubtarget().getTargetLowering();
772 Value *Ptr = CI.getArgOperand(0);
773 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
774
775 MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
776 .addUse(getOrCreateVReg(*Ptr))
777 .addMemOperand(MF->getMachineMemOperand(
778 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0));
779 return true;
780 }
Tim Northover09aac4a2017-01-26 23:39:14 +0000781 case Intrinsic::dbg_value: {
782 // This form of DBG_VALUE is target-independent.
783 const DbgValueInst &DI = cast<DbgValueInst>(CI);
784 const Value *V = DI.getValue();
785 assert(DI.getVariable()->isValidLocationForIntrinsic(
786 MIRBuilder.getDebugLoc()) &&
787 "Expected inlined-at fields to agree");
788 if (!V) {
789 // Currently the optimizer can produce this; insert an undef to
790 // help debugging. Probably the optimizer should not do this.
Adrian Prantld92ac5a2017-07-28 22:46:20 +0000791 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
Tim Northover09aac4a2017-01-26 23:39:14 +0000792 } else if (const auto *CI = dyn_cast<Constant>(V)) {
Adrian Prantld92ac5a2017-07-28 22:46:20 +0000793 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
Tim Northover09aac4a2017-01-26 23:39:14 +0000794 } else {
795 unsigned Reg = getOrCreateVReg(*V);
796 // FIXME: This does not handle register-indirect values at offset 0. The
797 // direct/indirect thing shouldn't really be handled by something as
798 // implicit as reg+noreg vs reg+imm in the first palce, but it seems
799 // pretty baked in right now.
Adrian Prantlabe04752017-07-28 20:21:02 +0000800 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
Tim Northover09aac4a2017-01-26 23:39:14 +0000801 }
802 return true;
803 }
Tim Northover1e656ec2016-12-08 22:44:00 +0000804 case Intrinsic::uadd_with_overflow:
805 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder);
806 case Intrinsic::sadd_with_overflow:
807 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
808 case Intrinsic::usub_with_overflow:
809 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder);
810 case Intrinsic::ssub_with_overflow:
811 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
812 case Intrinsic::umul_with_overflow:
813 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
814 case Intrinsic::smul_with_overflow:
815 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
Tim Northoverb38b4e22017-02-08 23:23:32 +0000816 case Intrinsic::pow:
817 MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
818 .addDef(getOrCreateVReg(CI))
819 .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
820 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
821 return true;
Aditya Nandakumarcca75d22017-06-27 22:19:32 +0000822 case Intrinsic::exp:
823 MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
824 .addDef(getOrCreateVReg(CI))
825 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
826 return true;
827 case Intrinsic::exp2:
828 MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
829 .addDef(getOrCreateVReg(CI))
830 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
831 return true;
Aditya Nandakumar20f62072017-06-29 23:43:44 +0000832 case Intrinsic::log:
833 MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
834 .addDef(getOrCreateVReg(CI))
835 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
836 return true;
837 case Intrinsic::log2:
838 MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
839 .addDef(getOrCreateVReg(CI))
840 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
841 return true;
Volkan Keles2bc42e92018-03-05 22:31:55 +0000842 case Intrinsic::fabs:
843 MIRBuilder.buildInstr(TargetOpcode::G_FABS)
844 .addDef(getOrCreateVReg(CI))
845 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
846 return true;
Aditya Nandakumarc6a41912017-06-20 19:25:23 +0000847 case Intrinsic::fma:
848 MIRBuilder.buildInstr(TargetOpcode::G_FMA)
849 .addDef(getOrCreateVReg(CI))
850 .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
851 .addUse(getOrCreateVReg(*CI.getArgOperand(1)))
852 .addUse(getOrCreateVReg(*CI.getArgOperand(2)));
853 return true;
Volkan Keles92837632018-02-13 00:47:46 +0000854 case Intrinsic::fmuladd: {
855 const TargetMachine &TM = MF->getTarget();
856 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
857 unsigned Dst = getOrCreateVReg(CI);
858 unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
859 unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
860 unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
861 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
862 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
863 // TODO: Revisit this to see if we should move this part of the
864 // lowering to the combiner.
865 MIRBuilder.buildInstr(TargetOpcode::G_FMA, Dst, Op0, Op1, Op2);
866 } else {
867 LLT Ty = getLLTForType(*CI.getType(), *DL);
868 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, Ty, Op0, Op1);
869 MIRBuilder.buildInstr(TargetOpcode::G_FADD, Dst, FMul, Op2);
870 }
871 return true;
872 }
Tim Northover3f186032016-10-18 20:03:45 +0000873 case Intrinsic::memcpy:
Tim Northover79f43f12017-01-30 19:33:07 +0000874 case Intrinsic::memmove:
875 case Intrinsic::memset:
876 return translateMemfunc(CI, MIRBuilder, ID);
Tim Northovera9105be2016-11-09 22:39:54 +0000877 case Intrinsic::eh_typeid_for: {
878 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
879 unsigned Reg = getOrCreateVReg(CI);
Tim Northover50db7f412016-12-07 21:17:47 +0000880 unsigned TypeID = MF->getTypeIDFor(GV);
Tim Northovera9105be2016-11-09 22:39:54 +0000881 MIRBuilder.buildConstant(Reg, TypeID);
882 return true;
883 }
Tim Northover6e904302016-10-18 20:03:51 +0000884 case Intrinsic::objectsize: {
885 // If we don't know by now, we're never going to know.
886 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
887
888 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
889 return true;
890 }
Tim Northovercdf23f12016-10-31 18:30:59 +0000891 case Intrinsic::stackguard:
Tim Northoverc53606e2016-12-07 21:29:15 +0000892 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
Tim Northovercdf23f12016-10-31 18:30:59 +0000893 return true;
894 case Intrinsic::stackprotector: {
Daniel Sanders52b4ce72017-03-07 23:20:35 +0000895 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
Tim Northovercdf23f12016-10-31 18:30:59 +0000896 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
Tim Northoverc53606e2016-12-07 21:29:15 +0000897 getStackGuard(GuardVal, MIRBuilder);
Tim Northovercdf23f12016-10-31 18:30:59 +0000898
899 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
900 MIRBuilder.buildStore(
901 GuardVal, getOrCreateVReg(*Slot),
Tim Northover50db7f412016-12-07 21:17:47 +0000902 *MF->getMachineMemOperand(
903 MachinePointerInfo::getFixedStack(*MF,
904 getOrCreateFrameIndex(*Slot)),
Tim Northovercdf23f12016-10-31 18:30:59 +0000905 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
906 PtrTy.getSizeInBits() / 8, 8));
907 return true;
908 }
Tim Northover91c81732016-08-19 17:17:06 +0000909 }
Tim Northover1e656ec2016-12-08 22:44:00 +0000910 return false;
Tim Northover91c81732016-08-19 17:17:06 +0000911}
912
Tim Northoveraa995c92017-03-09 23:36:26 +0000913bool IRTranslator::translateInlineAsm(const CallInst &CI,
914 MachineIRBuilder &MIRBuilder) {
915 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
916 if (!IA.getConstraintString().empty())
917 return false;
918
919 unsigned ExtraInfo = 0;
920 if (IA.hasSideEffects())
921 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
922 if (IA.getDialect() == InlineAsm::AD_Intel)
923 ExtraInfo |= InlineAsm::Extra_AsmDialect;
924
925 MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
926 .addExternalSymbol(IA.getAsmString().c_str())
927 .addImm(ExtraInfo);
928
929 return true;
930}
931
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000932unsigned IRTranslator::packRegs(const Value &V,
933 MachineIRBuilder &MIRBuilder) {
934 ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
935 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
936 LLT BigTy = getLLTForType(*V.getType(), *DL);
937
938 if (Regs.size() == 1)
939 return Regs[0];
940
941 unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
942 MIRBuilder.buildUndef(Dst);
943 for (unsigned i = 0; i < Regs.size(); ++i) {
944 unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
945 MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
946 Dst = NewDst;
947 }
948 return Dst;
949}
950
951void IRTranslator::unpackRegs(const Value &V, unsigned Src,
952 MachineIRBuilder &MIRBuilder) {
953 ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
954 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
955
956 for (unsigned i = 0; i < Regs.size(); ++i)
957 MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
958}
959
Tim Northoverc53606e2016-12-07 21:29:15 +0000960bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
Tim Northover357f1be2016-08-10 23:02:41 +0000961 const CallInst &CI = cast<CallInst>(U);
Tim Northover50db7f412016-12-07 21:17:47 +0000962 auto TII = MF->getTarget().getIntrinsicInfo();
Tim Northover406024a2016-08-10 21:44:01 +0000963 const Function *F = CI.getCalledFunction();
Tim Northover5fb414d2016-07-29 22:32:36 +0000964
Martin Storsjocc981d22018-01-30 19:50:58 +0000965 // FIXME: support Windows dllimport function calls.
966 if (F && F->hasDLLImportStorageClass())
967 return false;
968
Tim Northover3babfef2017-01-19 23:59:35 +0000969 if (CI.isInlineAsm())
Tim Northoveraa995c92017-03-09 23:36:26 +0000970 return translateInlineAsm(CI, MIRBuilder);
Tim Northover3babfef2017-01-19 23:59:35 +0000971
Amara Emerson913918c2018-01-02 18:56:39 +0000972 Intrinsic::ID ID = Intrinsic::not_intrinsic;
973 if (F && F->isIntrinsic()) {
974 ID = F->getIntrinsicID();
975 if (TII && ID == Intrinsic::not_intrinsic)
976 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
977 }
978
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000979 bool IsSplitType = valueIsSplit(CI);
Amara Emerson913918c2018-01-02 18:56:39 +0000980 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000981 unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
982 getLLTForType(*CI.getType(), *DL))
983 : getOrCreateVReg(CI);
984
Tim Northover406024a2016-08-10 21:44:01 +0000985 SmallVector<unsigned, 8> Args;
986 for (auto &Arg: CI.arg_operands())
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000987 Args.push_back(packRegs(*Arg, MIRBuilder));
Tim Northover406024a2016-08-10 21:44:01 +0000988
Tim Northoverd1e951e2017-03-09 22:00:39 +0000989 MF->getFrameInfo().setHasCalls(true);
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000990 bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
Tim Northoverfe5f89b2016-08-29 19:07:08 +0000991 return getOrCreateVReg(*CI.getCalledValue());
992 });
Amara Emerson0d6a26d2018-05-16 10:32:02 +0000993
994 if (IsSplitType)
995 unpackRegs(CI, Res, MIRBuilder);
996 return Success;
Tim Northover406024a2016-08-10 21:44:01 +0000997 }
998
Tim Northover406024a2016-08-10 21:44:01 +0000999 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
Tim Northover5fb414d2016-07-29 22:32:36 +00001000
Tim Northoverc53606e2016-12-07 21:29:15 +00001001 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
Tim Northover91c81732016-08-19 17:17:06 +00001002 return true;
1003
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001004 unsigned Res = 0;
1005 if (!CI.getType()->isVoidTy()) {
1006 if (IsSplitType)
1007 Res =
1008 MRI->createGenericVirtualRegister(getLLTForType(*CI.getType(), *DL));
1009 else
1010 Res = getOrCreateVReg(CI);
1011 }
Tim Northover5fb414d2016-07-29 22:32:36 +00001012 MachineInstrBuilder MIB =
Tim Northover0f140c72016-09-09 11:46:34 +00001013 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
Tim Northover5fb414d2016-07-29 22:32:36 +00001014
1015 for (auto &Arg : CI.arg_operands()) {
Ahmed Bougacha55d10422017-03-07 20:53:09 +00001016 // Some intrinsics take metadata parameters. Reject them.
1017 if (isa<MetadataAsValue>(Arg))
1018 return false;
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001019 MIB.addUse(packRegs(*Arg, MIRBuilder));
Tim Northover5fb414d2016-07-29 22:32:36 +00001020 }
Volkan Kelesebe6bb92017-06-05 22:17:17 +00001021
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001022 if (IsSplitType)
1023 unpackRegs(CI, Res, MIRBuilder);
1024
Volkan Kelesebe6bb92017-06-05 22:17:17 +00001025 // Add a MachineMemOperand if it is a target mem intrinsic.
1026 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1027 TargetLowering::IntrinsicInfo Info;
1028 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
Matt Arsenault7d7adf42017-12-14 22:34:10 +00001029 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
Jonas Paulssonf0ff20f2017-11-28 14:44:32 +00001030 uint64_t Size = Info.memVT.getStoreSize();
Volkan Kelesebe6bb92017-06-05 22:17:17 +00001031 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
Matt Arsenault11171332017-12-14 21:39:51 +00001032 Info.flags, Size, Info.align));
Volkan Kelesebe6bb92017-06-05 22:17:17 +00001033 }
1034
Tim Northover5fb414d2016-07-29 22:32:36 +00001035 return true;
1036}
1037
Tim Northoverc53606e2016-12-07 21:29:15 +00001038bool IRTranslator::translateInvoke(const User &U,
1039 MachineIRBuilder &MIRBuilder) {
Tim Northovera9105be2016-11-09 22:39:54 +00001040 const InvokeInst &I = cast<InvokeInst>(U);
Tim Northover50db7f412016-12-07 21:17:47 +00001041 MCContext &Context = MF->getContext();
Tim Northovera9105be2016-11-09 22:39:54 +00001042
1043 const BasicBlock *ReturnBB = I.getSuccessor(0);
1044 const BasicBlock *EHPadBB = I.getSuccessor(1);
1045
Ahmed Bougacha4ec6d5a2017-03-10 00:25:35 +00001046 const Value *Callee = I.getCalledValue();
Tim Northovera9105be2016-11-09 22:39:54 +00001047 const Function *Fn = dyn_cast<Function>(Callee);
1048 if (isa<InlineAsm>(Callee))
1049 return false;
1050
1051 // FIXME: support invoking patchpoint and statepoint intrinsics.
1052 if (Fn && Fn->isIntrinsic())
1053 return false;
1054
1055 // FIXME: support whatever these are.
1056 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1057 return false;
1058
1059 // FIXME: support Windows exception handling.
1060 if (!isa<LandingPadInst>(EHPadBB->front()))
1061 return false;
1062
Matthias Braund0ee66c2016-12-01 19:32:15 +00001063 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
Tim Northovera9105be2016-11-09 22:39:54 +00001064 // the region covered by the try.
Matthias Braund0ee66c2016-12-01 19:32:15 +00001065 MCSymbol *BeginSymbol = Context.createTempSymbol();
Tim Northovera9105be2016-11-09 22:39:54 +00001066 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1067
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001068 unsigned Res =
1069 MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
Tim Northover293f7432017-01-31 18:36:11 +00001070 SmallVector<unsigned, 8> Args;
Tim Northovera9105be2016-11-09 22:39:54 +00001071 for (auto &Arg: I.arg_operands())
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001072 Args.push_back(packRegs(*Arg, MIRBuilder));
Tim Northovera9105be2016-11-09 22:39:54 +00001073
Ahmed Bougachad22b84b2017-03-10 00:25:44 +00001074 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
Ahmed Bougacha4ec6d5a2017-03-10 00:25:35 +00001075 [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1076 return false;
Tim Northovera9105be2016-11-09 22:39:54 +00001077
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001078 unpackRegs(I, Res, MIRBuilder);
1079
Matthias Braund0ee66c2016-12-01 19:32:15 +00001080 MCSymbol *EndSymbol = Context.createTempSymbol();
Tim Northovera9105be2016-11-09 22:39:54 +00001081 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1082
1083 // FIXME: track probabilities.
Ahmed Bougachaa61c2142017-03-15 18:22:33 +00001084 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1085 &ReturnMBB = getMBB(*ReturnBB);
Tim Northover50db7f412016-12-07 21:17:47 +00001086 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
Tim Northovera9105be2016-11-09 22:39:54 +00001087 MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1088 MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
Tim Northoverc6bfa482017-01-31 20:12:18 +00001089 MIRBuilder.buildBr(ReturnMBB);
Tim Northovera9105be2016-11-09 22:39:54 +00001090
1091 return true;
1092}
1093
Tim Northoverc53606e2016-12-07 21:29:15 +00001094bool IRTranslator::translateLandingPad(const User &U,
1095 MachineIRBuilder &MIRBuilder) {
Tim Northovera9105be2016-11-09 22:39:54 +00001096 const LandingPadInst &LP = cast<LandingPadInst>(U);
1097
1098 MachineBasicBlock &MBB = MIRBuilder.getMBB();
Matthias Braund0ee66c2016-12-01 19:32:15 +00001099 addLandingPadInfo(LP, MBB);
Tim Northovera9105be2016-11-09 22:39:54 +00001100
1101 MBB.setIsEHPad();
1102
1103 // If there aren't registers to copy the values into (e.g., during SjLj
1104 // exceptions), then don't bother.
Tim Northover50db7f412016-12-07 21:17:47 +00001105 auto &TLI = *MF->getSubtarget().getTargetLowering();
Matthias Braunf1caa282017-12-15 22:22:58 +00001106 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
Tim Northovera9105be2016-11-09 22:39:54 +00001107 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1108 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1109 return true;
1110
1111 // If landingpad's return type is token type, we don't create DAG nodes
1112 // for its exception pointer and selector value. The extraction of exception
1113 // pointer or selector value from token type landingpads is not currently
1114 // supported.
1115 if (LP.getType()->isTokenTy())
1116 return true;
1117
1118 // Add a label to mark the beginning of the landing pad. Deletion of the
1119 // landing pad can thus be detected via the MachineModuleInfo.
1120 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
Tim Northover50db7f412016-12-07 21:17:47 +00001121 .addSym(MF->addLandingPad(&MBB));
Tim Northovera9105be2016-11-09 22:39:54 +00001122
Daniel Sanders1351db42017-03-07 23:32:10 +00001123 LLT Ty = getLLTForType(*LP.getType(), *DL);
Tim Northover542d1c12017-03-07 23:04:06 +00001124 unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1125 MIRBuilder.buildUndef(Undef);
1126
Justin Bognera0295312017-01-25 00:16:53 +00001127 SmallVector<LLT, 2> Tys;
1128 for (Type *Ty : cast<StructType>(LP.getType())->elements())
Daniel Sanders52b4ce72017-03-07 23:20:35 +00001129 Tys.push_back(getLLTForType(*Ty, *DL));
Justin Bognera0295312017-01-25 00:16:53 +00001130 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1131
Tim Northovera9105be2016-11-09 22:39:54 +00001132 // Mark exception register as live in.
Tim Northover542d1c12017-03-07 23:04:06 +00001133 unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1134 if (!ExceptionReg)
1135 return false;
Tim Northovera9105be2016-11-09 22:39:54 +00001136
Tim Northover542d1c12017-03-07 23:04:06 +00001137 MBB.addLiveIn(ExceptionReg);
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001138 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1139 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
Tim Northoverc9449702017-01-30 20:52:42 +00001140
Tim Northover542d1c12017-03-07 23:04:06 +00001141 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1142 if (!SelectorReg)
1143 return false;
Tim Northoverc9449702017-01-30 20:52:42 +00001144
Tim Northover542d1c12017-03-07 23:04:06 +00001145 MBB.addLiveIn(SelectorReg);
Tim Northover542d1c12017-03-07 23:04:06 +00001146 unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1147 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001148 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
Tim Northover542d1c12017-03-07 23:04:06 +00001149
Tim Northovera9105be2016-11-09 22:39:54 +00001150 return true;
1151}
1152
Tim Northoverc3e3f592017-02-03 18:22:45 +00001153bool IRTranslator::translateAlloca(const User &U,
1154 MachineIRBuilder &MIRBuilder) {
1155 auto &AI = cast<AllocaInst>(U);
Quentin Colombet3bb32cc2016-08-26 23:49:05 +00001156
Tim Northoverc3e3f592017-02-03 18:22:45 +00001157 if (AI.isStaticAlloca()) {
1158 unsigned Res = getOrCreateVReg(AI);
1159 int FI = getOrCreateFrameIndex(AI);
1160 MIRBuilder.buildFrameIndex(Res, FI);
1161 return true;
1162 }
1163
Martin Storsjoa63a5b92018-02-17 14:26:32 +00001164 // FIXME: support stack probing for Windows.
1165 if (MF->getTarget().getTargetTriple().isOSWindows())
1166 return false;
1167
Tim Northoverc3e3f592017-02-03 18:22:45 +00001168 // Now we're in the harder dynamic case.
1169 Type *Ty = AI.getAllocatedType();
1170 unsigned Align =
1171 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1172
1173 unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1174
Ahmed Bougacha2fb80302017-03-15 19:21:11 +00001175 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1176 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
Tim Northoverc3e3f592017-02-03 18:22:45 +00001177 if (MRI->getType(NumElts) != IntPtrTy) {
1178 unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1179 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1180 NumElts = ExtElts;
1181 }
1182
1183 unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
Ahmed Bougacha2fb80302017-03-15 19:21:11 +00001184 unsigned TySize =
1185 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
Tim Northoverc3e3f592017-02-03 18:22:45 +00001186 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1187
Daniel Sanders52b4ce72017-03-07 23:20:35 +00001188 LLT PtrTy = getLLTForType(*AI.getType(), *DL);
Tim Northoverc3e3f592017-02-03 18:22:45 +00001189 auto &TLI = *MF->getSubtarget().getTargetLowering();
1190 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1191
1192 unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1193 MIRBuilder.buildCopy(SPTmp, SPReg);
1194
Tim Northoverc2f89562017-02-14 20:56:18 +00001195 unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1196 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
Tim Northoverc3e3f592017-02-03 18:22:45 +00001197
1198 // Handle alignment. We have to realign if the allocation granule was smaller
1199 // than stack alignment, or the specific alloca requires more than stack
1200 // alignment.
1201 unsigned StackAlign =
1202 MF->getSubtarget().getFrameLowering()->getStackAlignment();
1203 Align = std::max(Align, StackAlign);
1204 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1205 // Round the size of the allocation up to the stack alignment size
1206 // by add SA-1 to the size. This doesn't overflow because we're computing
1207 // an address inside an alloca.
Tim Northoverc2f89562017-02-14 20:56:18 +00001208 unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1209 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1210 AllocTmp = AlignedAlloc;
Tim Northoverc3e3f592017-02-03 18:22:45 +00001211 }
1212
Tim Northoverc2f89562017-02-14 20:56:18 +00001213 MIRBuilder.buildCopy(SPReg, AllocTmp);
1214 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
Tim Northoverc3e3f592017-02-03 18:22:45 +00001215
1216 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1217 assert(MF->getFrameInfo().hasVarSizedObjects());
Tim Northoverbd505462016-07-22 16:59:52 +00001218 return true;
1219}
1220
Tim Northover4a652222017-02-15 23:22:33 +00001221bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1222 // FIXME: We may need more info about the type. Because of how LLT works,
1223 // we're completely discarding the i64/double distinction here (amongst
1224 // others). Fortunately the ABIs I know of where that matters don't use va_arg
1225 // anyway but that's not guaranteed.
1226 MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1227 .addDef(getOrCreateVReg(U))
1228 .addUse(getOrCreateVReg(*U.getOperand(0)))
1229 .addImm(DL->getABITypeAlignment(U.getType()));
1230 return true;
1231}
1232
Volkan Keles04cb08c2017-03-10 19:08:28 +00001233bool IRTranslator::translateInsertElement(const User &U,
1234 MachineIRBuilder &MIRBuilder) {
1235 // If it is a <1 x Ty> vector, use the scalar as it is
1236 // not a legal vector type in LLT.
1237 if (U.getType()->getVectorNumElements() == 1) {
1238 unsigned Elt = getOrCreateVReg(*U.getOperand(1));
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001239 auto &Regs = *VMap.getVRegs(U);
1240 if (Regs.empty()) {
1241 Regs.push_back(Elt);
1242 VMap.getOffsets(U)->push_back(0);
1243 } else {
1244 MIRBuilder.buildCopy(Regs[0], Elt);
1245 }
Volkan Keles04cb08c2017-03-10 19:08:28 +00001246 return true;
1247 }
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001248
Kristof Beyls7a713502017-04-19 06:38:37 +00001249 unsigned Res = getOrCreateVReg(U);
1250 unsigned Val = getOrCreateVReg(*U.getOperand(0));
1251 unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1252 unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1253 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
Volkan Keles04cb08c2017-03-10 19:08:28 +00001254 return true;
1255}
1256
1257bool IRTranslator::translateExtractElement(const User &U,
1258 MachineIRBuilder &MIRBuilder) {
1259 // If it is a <1 x Ty> vector, use the scalar as it is
1260 // not a legal vector type in LLT.
1261 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1262 unsigned Elt = getOrCreateVReg(*U.getOperand(0));
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001263 auto &Regs = *VMap.getVRegs(U);
1264 if (Regs.empty()) {
1265 Regs.push_back(Elt);
1266 VMap.getOffsets(U)->push_back(0);
1267 } else {
1268 MIRBuilder.buildCopy(Regs[0], Elt);
1269 }
Volkan Keles04cb08c2017-03-10 19:08:28 +00001270 return true;
1271 }
Kristof Beyls7a713502017-04-19 06:38:37 +00001272 unsigned Res = getOrCreateVReg(U);
1273 unsigned Val = getOrCreateVReg(*U.getOperand(0));
1274 unsigned Idx = getOrCreateVReg(*U.getOperand(1));
1275 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
Volkan Keles04cb08c2017-03-10 19:08:28 +00001276 return true;
1277}
1278
Volkan Keles75bdc762017-03-21 08:44:13 +00001279bool IRTranslator::translateShuffleVector(const User &U,
1280 MachineIRBuilder &MIRBuilder) {
1281 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1282 .addDef(getOrCreateVReg(U))
1283 .addUse(getOrCreateVReg(*U.getOperand(0)))
1284 .addUse(getOrCreateVReg(*U.getOperand(1)))
1285 .addUse(getOrCreateVReg(*U.getOperand(2)));
1286 return true;
1287}
1288
Tim Northoverc53606e2016-12-07 21:29:15 +00001289bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
Tim Northover357f1be2016-08-10 23:02:41 +00001290 const PHINode &PI = cast<PHINode>(U);
Tim Northover97d0cb32016-08-05 17:16:40 +00001291
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001292 SmallVector<MachineInstr *, 4> Insts;
1293 for (auto Reg : getOrCreateVRegs(PI)) {
1294 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, Reg);
1295 Insts.push_back(MIB.getInstr());
1296 }
1297
1298 PendingPHIs.emplace_back(&PI, std::move(Insts));
Tim Northover97d0cb32016-08-05 17:16:40 +00001299 return true;
1300}
1301
Daniel Sanders94813992018-07-09 19:33:40 +00001302bool IRTranslator::translateAtomicCmpXchg(const User &U,
1303 MachineIRBuilder &MIRBuilder) {
1304 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1305
1306 if (I.isWeak())
1307 return false;
1308
1309 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1310 : MachineMemOperand::MONone;
1311 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1312
1313 Type *ResType = I.getType();
1314 Type *ValType = ResType->Type::getStructElementType(0);
1315
1316 auto Res = getOrCreateVRegs(I);
1317 unsigned OldValRes = Res[0];
1318 unsigned SuccessRes = Res[1];
1319 unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1320 unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
1321 unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
1322
1323 MIRBuilder.buildAtomicCmpXchgWithSuccess(
1324 OldValRes, SuccessRes, Addr, Cmp, NewVal,
1325 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1326 Flags, DL->getTypeStoreSize(ValType),
1327 getMemOpAlignment(I), AAMDNodes(), nullptr,
1328 I.getSyncScopeID(), I.getSuccessOrdering(),
1329 I.getFailureOrdering()));
1330 return true;
1331}
1332
1333bool IRTranslator::translateAtomicRMW(const User &U,
1334 MachineIRBuilder &MIRBuilder) {
1335 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1336
1337 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1338 : MachineMemOperand::MONone;
1339 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1340
1341 Type *ResType = I.getType();
1342
1343 unsigned Res = getOrCreateVReg(I);
1344 unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1345 unsigned Val = getOrCreateVReg(*I.getValOperand());
1346
1347 unsigned Opcode = 0;
1348 switch (I.getOperation()) {
1349 default:
1350 llvm_unreachable("Unknown atomicrmw op");
1351 return false;
1352 case AtomicRMWInst::Xchg:
1353 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1354 break;
1355 case AtomicRMWInst::Add:
1356 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1357 break;
1358 case AtomicRMWInst::Sub:
1359 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1360 break;
1361 case AtomicRMWInst::And:
1362 Opcode = TargetOpcode::G_ATOMICRMW_AND;
1363 break;
1364 case AtomicRMWInst::Nand:
1365 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1366 break;
1367 case AtomicRMWInst::Or:
1368 Opcode = TargetOpcode::G_ATOMICRMW_OR;
1369 break;
1370 case AtomicRMWInst::Xor:
1371 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
1372 break;
1373 case AtomicRMWInst::Max:
1374 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
1375 break;
1376 case AtomicRMWInst::Min:
1377 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
1378 break;
1379 case AtomicRMWInst::UMax:
1380 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
1381 break;
1382 case AtomicRMWInst::UMin:
1383 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
1384 break;
1385 }
1386
1387 MIRBuilder.buildAtomicRMW(
1388 Opcode, Res, Addr, Val,
1389 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1390 Flags, DL->getTypeStoreSize(ResType),
1391 getMemOpAlignment(I), AAMDNodes(), nullptr,
1392 I.getSyncScopeID(), I.getOrdering()));
1393 return true;
1394}
1395
Tim Northover97d0cb32016-08-05 17:16:40 +00001396void IRTranslator::finishPendingPhis() {
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001397 for (auto &Phi : PendingPHIs) {
Tim Northover97d0cb32016-08-05 17:16:40 +00001398 const PHINode *PI = Phi.first;
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001399 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
Tim Northover97d0cb32016-08-05 17:16:40 +00001400
1401 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1402 // won't create extra control flow here, otherwise we need to find the
1403 // dominating predecessor here (or perhaps force the weirder IRTranslators
1404 // to provide a simple boundary).
Tim Northoverb6636fd2017-01-17 22:13:50 +00001405 SmallSet<const BasicBlock *, 4> HandledPreds;
1406
Tim Northover97d0cb32016-08-05 17:16:40 +00001407 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
Tim Northoverb6636fd2017-01-17 22:13:50 +00001408 auto IRPred = PI->getIncomingBlock(i);
1409 if (HandledPreds.count(IRPred))
1410 continue;
1411
1412 HandledPreds.insert(IRPred);
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001413 ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
Tim Northoverb6636fd2017-01-17 22:13:50 +00001414 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001415 assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
Tim Northoverb6636fd2017-01-17 22:13:50 +00001416 "incorrect CFG at MachineBasicBlock level");
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001417 for (unsigned j = 0; j < ValRegs.size(); ++j) {
1418 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1419 MIB.addUse(ValRegs[j]);
1420 MIB.addMBB(Pred);
1421 }
Tim Northoverb6636fd2017-01-17 22:13:50 +00001422 }
Tim Northover97d0cb32016-08-05 17:16:40 +00001423 }
1424 }
1425}
1426
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001427bool IRTranslator::valueIsSplit(const Value &V,
1428 SmallVectorImpl<uint64_t> *Offsets) {
1429 SmallVector<LLT, 4> SplitTys;
1430 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1431 return SplitTys.size() > 1;
1432}
1433
Quentin Colombet2ecff3b2016-02-10 22:59:27 +00001434bool IRTranslator::translate(const Instruction &Inst) {
Tim Northoverc53606e2016-12-07 21:29:15 +00001435 CurBuilder.setDebugLoc(Inst.getDebugLoc());
Quentin Colombet2ecff3b2016-02-10 22:59:27 +00001436 switch(Inst.getOpcode()) {
Tim Northover357f1be2016-08-10 23:02:41 +00001437#define HANDLE_INST(NUM, OPCODE, CLASS) \
Tim Northoverc53606e2016-12-07 21:29:15 +00001438 case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
Tim Northover357f1be2016-08-10 23:02:41 +00001439#include "llvm/IR/Instruction.def"
Quentin Colombet74d7d2f2016-02-11 18:53:28 +00001440 default:
Quentin Colombetee8a4f52017-03-11 00:28:33 +00001441 return false;
Quentin Colombet2ecff3b2016-02-10 22:59:27 +00001442 }
Quentin Colombet105cf2b2016-01-20 20:58:56 +00001443}
1444
Tim Northover5ed648e2016-08-09 21:28:04 +00001445bool IRTranslator::translate(const Constant &C, unsigned Reg) {
Tim Northoverd403a3d2016-08-09 23:01:30 +00001446 if (auto CI = dyn_cast<ConstantInt>(&C))
Tim Northovercc35f902016-12-05 21:54:17 +00001447 EntryBuilder.buildConstant(Reg, *CI);
Tim Northoverb16734f2016-08-19 20:09:15 +00001448 else if (auto CF = dyn_cast<ConstantFP>(&C))
Tim Northover0f140c72016-09-09 11:46:34 +00001449 EntryBuilder.buildFConstant(Reg, *CF);
Tim Northoverd403a3d2016-08-09 23:01:30 +00001450 else if (isa<UndefValue>(C))
Tim Northover81dafc12017-03-06 18:36:40 +00001451 EntryBuilder.buildUndef(Reg);
Aditya Nandakumarb3297ef2018-03-22 17:31:38 +00001452 else if (isa<ConstantPointerNull>(C)) {
1453 // As we are trying to build a constant val of 0 into a pointer,
1454 // insert a cast to make them correct with respect to types.
1455 unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1456 auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1457 auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1458 unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1459 EntryBuilder.buildCast(Reg, ZeroReg);
1460 } else if (auto GV = dyn_cast<GlobalValue>(&C))
Tim Northover032548f2016-09-12 12:10:41 +00001461 EntryBuilder.buildGlobalValue(Reg, GV);
Volkan Keles970fee42017-03-10 21:23:13 +00001462 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1463 if (!CAZ->getType()->isVectorTy())
1464 return false;
Volkan Keles4862c632017-03-14 23:45:06 +00001465 // Return the scalar if it is a <1 x Ty> vector.
1466 if (CAZ->getNumElements() == 1)
1467 return translate(*CAZ->getElementValue(0u), Reg);
Volkan Keles970fee42017-03-10 21:23:13 +00001468 std::vector<unsigned> Ops;
1469 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1470 Constant &Elt = *CAZ->getElementValue(i);
1471 Ops.push_back(getOrCreateVReg(Elt));
1472 }
1473 EntryBuilder.buildMerge(Reg, Ops);
Volkan Keles38a91a02017-03-13 21:36:19 +00001474 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
Volkan Keles4862c632017-03-14 23:45:06 +00001475 // Return the scalar if it is a <1 x Ty> vector.
1476 if (CV->getNumElements() == 1)
1477 return translate(*CV->getElementAsConstant(0), Reg);
Volkan Keles38a91a02017-03-13 21:36:19 +00001478 std::vector<unsigned> Ops;
1479 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1480 Constant &Elt = *CV->getElementAsConstant(i);
1481 Ops.push_back(getOrCreateVReg(Elt));
1482 }
1483 EntryBuilder.buildMerge(Reg, Ops);
Volkan Keles970fee42017-03-10 21:23:13 +00001484 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
Tim Northover357f1be2016-08-10 23:02:41 +00001485 switch(CE->getOpcode()) {
1486#define HANDLE_INST(NUM, OPCODE, CLASS) \
Tim Northoverc53606e2016-12-07 21:29:15 +00001487 case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
Tim Northover357f1be2016-08-10 23:02:41 +00001488#include "llvm/IR/Instruction.def"
1489 default:
Quentin Colombetee8a4f52017-03-11 00:28:33 +00001490 return false;
Tim Northover357f1be2016-08-10 23:02:41 +00001491 }
Aditya Nandakumar117b6672017-05-04 21:43:12 +00001492 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1493 if (CV->getNumOperands() == 1)
1494 return translate(*CV->getOperand(0), Reg);
1495 SmallVector<unsigned, 4> Ops;
1496 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1497 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1498 }
1499 EntryBuilder.buildMerge(Reg, Ops);
Quentin Colombetee8a4f52017-03-11 00:28:33 +00001500 } else
Quentin Colombet3bb32cc2016-08-26 23:49:05 +00001501 return false;
Tim Northover5ed648e2016-08-09 21:28:04 +00001502
Tim Northoverd403a3d2016-08-09 23:01:30 +00001503 return true;
Tim Northover5ed648e2016-08-09 21:28:04 +00001504}
1505
Tim Northover0d510442016-08-11 16:21:29 +00001506void IRTranslator::finalizeFunction() {
Quentin Colombet2ecff3b2016-02-10 22:59:27 +00001507 // Release the memory used by the different maps we
1508 // needed during the translation.
Tim Northover800638f2016-12-05 23:10:19 +00001509 PendingPHIs.clear();
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001510 VMap.reset();
Tim Northovercdf23f12016-10-31 18:30:59 +00001511 FrameIndices.clear();
Tim Northoverb6636fd2017-01-17 22:13:50 +00001512 MachinePreds.clear();
Aditya Nandakumarbe929932017-05-17 17:41:55 +00001513 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1514 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1515 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1516 EntryBuilder = MachineIRBuilder();
1517 CurBuilder = MachineIRBuilder();
Quentin Colombet105cf2b2016-01-20 20:58:56 +00001518}
1519
Tim Northover50db7f412016-12-07 21:17:47 +00001520bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
1521 MF = &CurMF;
Matthias Braunf1caa282017-12-15 22:22:58 +00001522 const Function &F = MF->getFunction();
Quentin Colombetfd9d0a02016-02-11 19:59:41 +00001523 if (F.empty())
1524 return false;
Tim Northover50db7f412016-12-07 21:17:47 +00001525 CLI = MF->getSubtarget().getCallLowering();
Tim Northoverc53606e2016-12-07 21:29:15 +00001526 CurBuilder.setMF(*MF);
Tim Northover50db7f412016-12-07 21:17:47 +00001527 EntryBuilder.setMF(*MF);
1528 MRI = &MF->getRegInfo();
Tim Northoverbd505462016-07-22 16:59:52 +00001529 DL = &F.getParent()->getDataLayout();
Quentin Colombet3bb32cc2016-08-26 23:49:05 +00001530 TPC = &getAnalysis<TargetPassConfig>();
Eugene Zelenko76bf48d2017-06-26 22:44:03 +00001531 ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
Tim Northoverbd505462016-07-22 16:59:52 +00001532
Tim Northover14e7f732016-08-05 17:50:36 +00001533 assert(PendingPHIs.empty() && "stale PHIs");
1534
Amara Emersondf9b5292017-12-11 16:58:29 +00001535 if (!DL->isLittleEndian()) {
1536 // Currently we don't properly handle big endian code.
1537 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
Matthias Braunf1caa282017-12-15 22:22:58 +00001538 F.getSubprogram(), &F.getEntryBlock());
Amara Emersondf9b5292017-12-11 16:58:29 +00001539 R << "unable to translate in big endian mode";
1540 reportTranslationError(*MF, *TPC, *ORE, R);
1541 }
1542
Ahmed Bougachaeceabdd2017-02-23 23:57:28 +00001543 // Release the per-function state when we return, whether we succeeded or not.
1544 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1545
Ahmed Bougachaa61c2142017-03-15 18:22:33 +00001546 // Setup a separate basic-block for the arguments and constants
Tim Northover50db7f412016-12-07 21:17:47 +00001547 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1548 MF->push_back(EntryBB);
Tim Northover05cc4852016-12-07 21:05:38 +00001549 EntryBuilder.setMBB(*EntryBB);
1550
Ahmed Bougachaa61c2142017-03-15 18:22:33 +00001551 // Create all blocks, in IR order, to preserve the layout.
1552 for (const BasicBlock &BB: F) {
1553 auto *&MBB = BBToMBB[&BB];
1554
1555 MBB = MF->CreateMachineBasicBlock(&BB);
1556 MF->push_back(MBB);
1557
1558 if (BB.hasAddressTaken())
1559 MBB->setHasAddressTaken();
1560 }
1561
1562 // Make our arguments/constants entry block fallthrough to the IR entry block.
1563 EntryBB->addSuccessor(&getMBB(F.front()));
1564
Tim Northover05cc4852016-12-07 21:05:38 +00001565 // Lower the actual args into this basic block.
Quentin Colombetfd9d0a02016-02-11 19:59:41 +00001566 SmallVector<unsigned, 8> VRegArgs;
Amara Emersond78d65c2017-11-30 20:06:02 +00001567 for (const Argument &Arg: F.args()) {
1568 if (DL->getTypeStoreSize(Arg.getType()) == 0)
1569 continue; // Don't handle zero sized types.
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001570 VRegArgs.push_back(
1571 MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
Amara Emersond78d65c2017-11-30 20:06:02 +00001572 }
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001573
Ahmed Bougacha8f9e99b2017-02-24 00:34:41 +00001574 if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
Ahmed Bougacha7c88a4e2017-02-24 00:34:44 +00001575 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
Matthias Braunf1caa282017-12-15 22:22:58 +00001576 F.getSubprogram(), &F.getEntryBlock());
Ahmed Bougachaae9dade2017-02-23 21:05:42 +00001577 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1578 reportTranslationError(*MF, *TPC, *ORE, R);
Ahmed Bougachaae9dade2017-02-23 21:05:42 +00001579 return false;
Quentin Colombet3bb32cc2016-08-26 23:49:05 +00001580 }
Quentin Colombetfd9d0a02016-02-11 19:59:41 +00001581
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001582 auto ArgIt = F.arg_begin();
1583 for (auto &VArg : VRegArgs) {
1584 // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1585 // creating redundant copies.
1586 if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1587 auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1588 assert(VRegs.empty() && "VRegs already populated?");
1589 VRegs.push_back(VArg);
1590 } else {
1591 unpackRegs(*ArgIt, VArg, EntryBuilder);
1592 }
1593 ArgIt++;
1594 }
1595
Tim Northover05cc4852016-12-07 21:05:38 +00001596 // And translate the function!
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001597 for (const BasicBlock &BB : F) {
Ahmed Bougachaa61c2142017-03-15 18:22:33 +00001598 MachineBasicBlock &MBB = getMBB(BB);
Quentin Colombet91ebd712016-03-11 17:27:47 +00001599 // Set the insertion point of all the following translations to
1600 // the end of this basic block.
Tim Northoverc53606e2016-12-07 21:29:15 +00001601 CurBuilder.setMBB(MBB);
Tim Northovera9105be2016-11-09 22:39:54 +00001602
Amara Emerson0d6a26d2018-05-16 10:32:02 +00001603 for (const Instruction &Inst : BB) {
Ahmed Bougacha8f9e99b2017-02-24 00:34:41 +00001604 if (translate(Inst))
1605 continue;
Ahmed Bougachaae9dade2017-02-23 21:05:42 +00001606
Ahmed Bougacha7daaf882017-02-24 00:34:47 +00001607 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1608 Inst.getDebugLoc(), &BB);
Ahmed Bougachad630a922017-09-18 18:50:09 +00001609 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1610
1611 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1612 std::string InstStrStorage;
1613 raw_string_ostream InstStr(InstStrStorage);
1614 InstStr << Inst;
1615
1616 R << ": '" << InstStr.str() << "'";
1617 }
1618
Ahmed Bougacha8f9e99b2017-02-24 00:34:41 +00001619 reportTranslationError(*MF, *TPC, *ORE, R);
1620 return false;
Quentin Colombet2ecff3b2016-02-10 22:59:27 +00001621 }
1622 }
Tim Northover72eebfa2016-07-12 22:23:42 +00001623
Ahmed Bougacha4f8dd022017-02-23 23:57:36 +00001624 finishPendingPhis();
Tim Northover97d0cb32016-08-05 17:16:40 +00001625
Ahmed Bougacha4f8dd022017-02-23 23:57:36 +00001626 // Merge the argument lowering and constants block with its single
1627 // successor, the LLVM-IR entry block. We want the basic block to
1628 // be maximal.
1629 assert(EntryBB->succ_size() == 1 &&
1630 "Custom BB used for lowering should have only one successor");
1631 // Get the successor of the current entry block.
1632 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1633 assert(NewEntryBB.pred_size() == 1 &&
1634 "LLVM-IR entry block has a predecessor!?");
1635 // Move all the instruction from the current entry block to the
1636 // new entry block.
1637 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1638 EntryBB->end());
Quentin Colombet327f9422016-12-15 23:32:25 +00001639
Ahmed Bougacha4f8dd022017-02-23 23:57:36 +00001640 // Update the live-in information for the new entry block.
1641 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1642 NewEntryBB.addLiveIn(LiveIn);
1643 NewEntryBB.sortUniqueLiveIns();
Quentin Colombet327f9422016-12-15 23:32:25 +00001644
Ahmed Bougacha4f8dd022017-02-23 23:57:36 +00001645 // Get rid of the now empty basic block.
1646 EntryBB->removeSuccessor(&NewEntryBB);
1647 MF->remove(EntryBB);
1648 MF->DeleteMachineBasicBlock(EntryBB);
Quentin Colombet327f9422016-12-15 23:32:25 +00001649
Ahmed Bougacha4f8dd022017-02-23 23:57:36 +00001650 assert(&MF->front() == &NewEntryBB &&
1651 "New entry wasn't next in the list of basic block!");
Tim Northover800638f2016-12-05 23:10:19 +00001652
Quentin Colombet105cf2b2016-01-20 20:58:56 +00001653 return false;
1654}