blob: 0dec008a3e992f591576c4a3333e49c5158bfb8c [file] [log] [blame]
Alex Bradbury89718422017-10-19 21:37:38 +00001//===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that RISCV uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "RISCVISelLowering.h"
16#include "RISCV.h"
Alex Bradburyc85be0d2018-01-10 19:41:03 +000017#include "RISCVMachineFunctionInfo.h"
Alex Bradbury89718422017-10-19 21:37:38 +000018#include "RISCVRegisterInfo.h"
19#include "RISCVSubtarget.h"
20#include "RISCVTargetMachine.h"
21#include "llvm/CodeGen/CallingConvLower.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineFunction.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/SelectionDAGISel.h"
27#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28#include "llvm/CodeGen/ValueTypes.h"
29#include "llvm/IR/DiagnosticInfo.h"
30#include "llvm/IR/DiagnosticPrinter.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Support/raw_ostream.h"
34
35using namespace llvm;
36
37#define DEBUG_TYPE "riscv-lower"
38
39RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
40 const RISCVSubtarget &STI)
41 : TargetLowering(TM), Subtarget(STI) {
42
43 MVT XLenVT = Subtarget.getXLenVT();
44
45 // Set up the register classes.
46 addRegisterClass(XLenVT, &RISCV::GPRRegClass);
47
48 // Compute derived properties from the register classes.
49 computeRegisterProperties(STI.getRegisterInfo());
50
51 setStackPointerRegisterToSaveRestore(RISCV::X2);
52
Alex Bradburycfa62912017-11-08 12:20:01 +000053 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
54 setLoadExtAction(N, XLenVT, MVT::i1, Promote);
55
Alex Bradbury89718422017-10-19 21:37:38 +000056 // TODO: add all necessary setOperationAction calls.
Alex Bradburybfb00d42017-12-11 12:38:17 +000057 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
58
Alex Bradburyffc435e2017-11-21 08:11:03 +000059 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
Alex Bradbury74913e12017-11-08 13:31:40 +000060 setOperationAction(ISD::BR_CC, XLenVT, Expand);
Alex Bradbury65385162017-11-21 07:51:32 +000061 setOperationAction(ISD::SELECT, XLenVT, Custom);
62 setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
63
Alex Bradburybfb00d42017-12-11 12:38:17 +000064 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
65 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
66
Alex Bradburyc85be0d2018-01-10 19:41:03 +000067 setOperationAction(ISD::VASTART, MVT::Other, Custom);
68 setOperationAction(ISD::VAARG, MVT::Other, Expand);
69 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
70 setOperationAction(ISD::VAEND, MVT::Other, Expand);
71
Alex Bradburyffc435e2017-11-21 08:11:03 +000072 for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
73 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
74
75 setOperationAction(ISD::ADDC, XLenVT, Expand);
76 setOperationAction(ISD::ADDE, XLenVT, Expand);
77 setOperationAction(ISD::SUBC, XLenVT, Expand);
78 setOperationAction(ISD::SUBE, XLenVT, Expand);
79
80 setOperationAction(ISD::SREM, XLenVT, Expand);
81 setOperationAction(ISD::SDIVREM, XLenVT, Expand);
82 setOperationAction(ISD::SDIV, XLenVT, Expand);
83 setOperationAction(ISD::UREM, XLenVT, Expand);
84 setOperationAction(ISD::UDIVREM, XLenVT, Expand);
85 setOperationAction(ISD::UDIV, XLenVT, Expand);
86
87 setOperationAction(ISD::MUL, XLenVT, Expand);
88 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
89 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
90 setOperationAction(ISD::MULHS, XLenVT, Expand);
91 setOperationAction(ISD::MULHU, XLenVT, Expand);
92
93 setOperationAction(ISD::SHL_PARTS, XLenVT, Expand);
94 setOperationAction(ISD::SRL_PARTS, XLenVT, Expand);
95 setOperationAction(ISD::SRA_PARTS, XLenVT, Expand);
96
97 setOperationAction(ISD::ROTL, XLenVT, Expand);
98 setOperationAction(ISD::ROTR, XLenVT, Expand);
99 setOperationAction(ISD::BSWAP, XLenVT, Expand);
100 setOperationAction(ISD::CTTZ, XLenVT, Expand);
101 setOperationAction(ISD::CTLZ, XLenVT, Expand);
102 setOperationAction(ISD::CTPOP, XLenVT, Expand);
103
104 setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
105 setOperationAction(ISD::BlockAddress, XLenVT, Custom);
106
Alex Bradbury89718422017-10-19 21:37:38 +0000107 setBooleanContents(ZeroOrOneBooleanContent);
108
109 // Function alignments (log2).
110 setMinFunctionAlignment(3);
111 setPrefFunctionAlignment(3);
Alex Bradburyffc435e2017-11-21 08:11:03 +0000112
113 // Effectively disable jump table generation.
114 setMinimumJumpTableEntries(INT_MAX);
Alex Bradbury89718422017-10-19 21:37:38 +0000115}
116
Alex Bradbury65385162017-11-21 07:51:32 +0000117// Changes the condition code and swaps operands if necessary, so the SetCC
118// operation matches one of the comparisons supported directly in the RISC-V
119// ISA.
120static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
121 switch (CC) {
122 default:
123 break;
124 case ISD::SETGT:
125 case ISD::SETLE:
126 case ISD::SETUGT:
127 case ISD::SETULE:
128 CC = ISD::getSetCCSwappedOperands(CC);
129 std::swap(LHS, RHS);
130 break;
131 }
132}
133
134// Return the RISC-V branch opcode that matches the given DAG integer
135// condition code. The CondCode must be one of those supported by the RISC-V
136// ISA (see normaliseSetCC).
137static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
138 switch (CC) {
139 default:
140 llvm_unreachable("Unsupported CondCode");
141 case ISD::SETEQ:
142 return RISCV::BEQ;
143 case ISD::SETNE:
144 return RISCV::BNE;
145 case ISD::SETLT:
146 return RISCV::BLT;
147 case ISD::SETGE:
148 return RISCV::BGE;
149 case ISD::SETULT:
150 return RISCV::BLTU;
151 case ISD::SETUGE:
152 return RISCV::BGEU;
153 }
154}
155
Alex Bradbury89718422017-10-19 21:37:38 +0000156SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
157 SelectionDAG &DAG) const {
158 switch (Op.getOpcode()) {
159 default:
160 report_fatal_error("unimplemented operand");
Alex Bradburyec8aa912017-11-08 13:24:21 +0000161 case ISD::GlobalAddress:
162 return lowerGlobalAddress(Op, DAG);
Alex Bradburyffc435e2017-11-21 08:11:03 +0000163 case ISD::BlockAddress:
164 return lowerBlockAddress(Op, DAG);
Alex Bradbury65385162017-11-21 07:51:32 +0000165 case ISD::SELECT:
166 return lowerSELECT(Op, DAG);
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000167 case ISD::VASTART:
168 return lowerVASTART(Op, DAG);
Alex Bradburyec8aa912017-11-08 13:24:21 +0000169 }
170}
171
172SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
173 SelectionDAG &DAG) const {
174 SDLoc DL(Op);
175 EVT Ty = Op.getValueType();
176 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
177 const GlobalValue *GV = N->getGlobal();
178 int64_t Offset = N->getOffset();
179
Alex Bradburyffc435e2017-11-21 08:11:03 +0000180 if (isPositionIndependent() || Subtarget.is64Bit())
Alex Bradburyec8aa912017-11-08 13:24:21 +0000181 report_fatal_error("Unable to lowerGlobalAddress");
Alex Bradburyffc435e2017-11-21 08:11:03 +0000182
183 SDValue GAHi =
184 DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_HI);
185 SDValue GALo =
186 DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_LO);
187 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
188 SDValue MNLo =
189 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
190 return MNLo;
191}
192
193SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
194 SelectionDAG &DAG) const {
195 SDLoc DL(Op);
196 EVT Ty = Op.getValueType();
197 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
198 const BlockAddress *BA = N->getBlockAddress();
199 int64_t Offset = N->getOffset();
200
201 if (isPositionIndependent() || Subtarget.is64Bit())
202 report_fatal_error("Unable to lowerBlockAddress");
203
204 SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI);
205 SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO);
206 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0);
207 SDValue MNLo =
208 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0);
209 return MNLo;
210}
211
212SDValue RISCVTargetLowering::lowerExternalSymbol(SDValue Op,
213 SelectionDAG &DAG) const {
214 SDLoc DL(Op);
215 EVT Ty = Op.getValueType();
216 ExternalSymbolSDNode *N = cast<ExternalSymbolSDNode>(Op);
217 const char *Sym = N->getSymbol();
218
219 // TODO: should also handle gp-relative loads.
220
221 if (isPositionIndependent() || Subtarget.is64Bit())
222 report_fatal_error("Unable to lowerExternalSymbol");
223
224 SDValue GAHi = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_HI);
225 SDValue GALo = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_LO);
226 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
227 SDValue MNLo =
228 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
229 return MNLo;
Alex Bradbury89718422017-10-19 21:37:38 +0000230}
231
Alex Bradbury65385162017-11-21 07:51:32 +0000232SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
233 SDValue CondV = Op.getOperand(0);
234 SDValue TrueV = Op.getOperand(1);
235 SDValue FalseV = Op.getOperand(2);
236 SDLoc DL(Op);
237 MVT XLenVT = Subtarget.getXLenVT();
238
239 // If the result type is XLenVT and CondV is the output of a SETCC node
240 // which also operated on XLenVT inputs, then merge the SETCC node into the
241 // lowered RISCVISD::SELECT_CC to take advantage of the integer
242 // compare+branch instructions. i.e.:
243 // (select (setcc lhs, rhs, cc), truev, falsev)
244 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
245 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
246 CondV.getOperand(0).getSimpleValueType() == XLenVT) {
247 SDValue LHS = CondV.getOperand(0);
248 SDValue RHS = CondV.getOperand(1);
249 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
250 ISD::CondCode CCVal = CC->get();
251
252 normaliseSetCC(LHS, RHS, CCVal);
253
254 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
255 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
256 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
257 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
258 }
259
260 // Otherwise:
261 // (select condv, truev, falsev)
262 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
263 SDValue Zero = DAG.getConstant(0, DL, XLenVT);
264 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
265
266 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
267 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
268
269 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
270}
271
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000272SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
273 MachineFunction &MF = DAG.getMachineFunction();
274 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
275
276 SDLoc DL(Op);
277 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
278 getPointerTy(MF.getDataLayout()));
279
280 // vastart just stores the address of the VarArgsFrameIndex slot into the
281 // memory location argument.
282 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
283 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
284 MachinePointerInfo(SV));
285}
286
Alex Bradbury65385162017-11-21 07:51:32 +0000287MachineBasicBlock *
288RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
289 MachineBasicBlock *BB) const {
290 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
291 DebugLoc DL = MI.getDebugLoc();
292
293 assert(MI.getOpcode() == RISCV::Select_GPR_Using_CC_GPR &&
294 "Unexpected instr type to insert");
295
296 // To "insert" a SELECT instruction, we actually have to insert the triangle
297 // control-flow pattern. The incoming instruction knows the destination vreg
298 // to set, the condition code register to branch on, the true/false values to
299 // select between, and the condcode to use to select the appropriate branch.
300 //
301 // We produce the following control flow:
302 // HeadMBB
303 // | \
304 // | IfFalseMBB
305 // | /
306 // TailMBB
307 const BasicBlock *LLVM_BB = BB->getBasicBlock();
308 MachineFunction::iterator I = ++BB->getIterator();
309
310 MachineBasicBlock *HeadMBB = BB;
311 MachineFunction *F = BB->getParent();
312 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
313 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
314
315 F->insert(I, IfFalseMBB);
316 F->insert(I, TailMBB);
317 // Move all remaining instructions to TailMBB.
318 TailMBB->splice(TailMBB->begin(), HeadMBB,
319 std::next(MachineBasicBlock::iterator(MI)), HeadMBB->end());
320 // Update machine-CFG edges by transferring all successors of the current
321 // block to the new block which will contain the Phi node for the select.
322 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
323 // Set the successors for HeadMBB.
324 HeadMBB->addSuccessor(IfFalseMBB);
325 HeadMBB->addSuccessor(TailMBB);
326
327 // Insert appropriate branch.
328 unsigned LHS = MI.getOperand(1).getReg();
329 unsigned RHS = MI.getOperand(2).getReg();
330 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
331 unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
332
333 BuildMI(HeadMBB, DL, TII.get(Opcode))
334 .addReg(LHS)
335 .addReg(RHS)
336 .addMBB(TailMBB);
337
338 // IfFalseMBB just falls through to TailMBB.
339 IfFalseMBB->addSuccessor(TailMBB);
340
341 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
342 BuildMI(*TailMBB, TailMBB->begin(), DL, TII.get(RISCV::PHI),
343 MI.getOperand(0).getReg())
344 .addReg(MI.getOperand(4).getReg())
345 .addMBB(HeadMBB)
346 .addReg(MI.getOperand(5).getReg())
347 .addMBB(IfFalseMBB);
348
349 MI.eraseFromParent(); // The pseudo instruction is gone now.
350 return TailMBB;
351}
352
Alex Bradbury89718422017-10-19 21:37:38 +0000353// Calling Convention Implementation.
Alex Bradburydc31c612017-12-11 12:49:02 +0000354// The expectations for frontend ABI lowering vary from target to target.
355// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
356// details, but this is a longer term goal. For now, we simply try to keep the
357// role of the frontend as simple and well-defined as possible. The rules can
358// be summarised as:
359// * Never split up large scalar arguments. We handle them here.
360// * If a hardfloat calling convention is being used, and the struct may be
361// passed in a pair of registers (fp+fp, int+fp), and both registers are
362// available, then pass as two separate arguments. If either the GPRs or FPRs
363// are exhausted, then pass according to the rule below.
364// * If a struct could never be passed in registers or directly in a stack
365// slot (as it is larger than 2*XLEN and the floating point rules don't
366// apply), then pass it using a pointer with the byval attribute.
367// * If a struct is less than 2*XLEN, then coerce to either a two-element
368// word-sized array or a 2*XLEN scalar (depending on alignment).
369// * The frontend can determine whether a struct is returned by reference or
370// not based on its size and fields. If it will be returned by reference, the
371// frontend must modify the prototype so a pointer with the sret annotation is
372// passed as the first argument. This is not necessary for large scalar
373// returns.
374// * Struct return values and varargs should be coerced to structs containing
375// register-size fields in the same situations they would be for fixed
376// arguments.
377
378static const MCPhysReg ArgGPRs[] = {
379 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
380 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
381};
382
383// Pass a 2*XLEN argument that has been split into two XLEN values through
384// registers or the stack as necessary.
385static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
386 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
387 MVT ValVT2, MVT LocVT2,
388 ISD::ArgFlagsTy ArgFlags2) {
389 unsigned XLenInBytes = XLen / 8;
390 if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
391 // At least one half can be passed via register.
392 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
393 VA1.getLocVT(), CCValAssign::Full));
394 } else {
395 // Both halves must be passed on the stack, with proper alignment.
396 unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign());
397 State.addLoc(
398 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
399 State.AllocateStack(XLenInBytes, StackAlign),
400 VA1.getLocVT(), CCValAssign::Full));
401 State.addLoc(CCValAssign::getMem(
402 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
403 CCValAssign::Full));
404 return false;
405 }
406
407 if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
408 // The second half can also be passed via register.
409 State.addLoc(
410 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
411 } else {
412 // The second half is passed via the stack, without additional alignment.
413 State.addLoc(CCValAssign::getMem(
414 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
415 CCValAssign::Full));
416 }
417
418 return false;
419}
420
421// Implements the RISC-V calling convention. Returns true upon failure.
422static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT,
423 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000424 CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) {
Alex Bradburydc31c612017-12-11 12:49:02 +0000425 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
426 assert(XLen == 32 || XLen == 64);
427 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
428 assert(ValVT == XLenVT && "Unexpected ValVT");
429 assert(LocVT == XLenVT && "Unexpected LocVT");
Alex Bradburydc31c612017-12-11 12:49:02 +0000430
431 // Any return value split in to more than two values can't be returned
432 // directly.
433 if (IsRet && ValNo > 1)
434 return true;
435
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000436 // If this is a variadic argument, the RISC-V calling convention requires
437 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
438 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
439 // be used regardless of whether the original argument was split during
440 // legalisation or not. The argument will not be passed by registers if the
441 // original type is larger than 2*XLEN, so the register alignment rule does
442 // not apply.
443 unsigned TwoXLenInBytes = (2 * XLen) / 8;
444 if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes &&
445 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
446 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
447 // Skip 'odd' register if necessary.
448 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
449 State.AllocateReg(ArgGPRs);
450 }
451
Alex Bradburydc31c612017-12-11 12:49:02 +0000452 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
453 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
454 State.getPendingArgFlags();
455
456 assert(PendingLocs.size() == PendingArgFlags.size() &&
457 "PendingLocs and PendingArgFlags out of sync");
458
459 // Split arguments might be passed indirectly, so keep track of the pending
460 // values.
461 if (ArgFlags.isSplit() || !PendingLocs.empty()) {
462 LocVT = XLenVT;
463 LocInfo = CCValAssign::Indirect;
464 PendingLocs.push_back(
465 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
466 PendingArgFlags.push_back(ArgFlags);
467 if (!ArgFlags.isSplitEnd()) {
468 return false;
469 }
470 }
471
472 // If the split argument only had two elements, it should be passed directly
473 // in registers or on the stack.
474 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
475 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
476 // Apply the normal calling convention rules to the first half of the
477 // split argument.
478 CCValAssign VA = PendingLocs[0];
479 ISD::ArgFlagsTy AF = PendingArgFlags[0];
480 PendingLocs.clear();
481 PendingArgFlags.clear();
482 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
483 ArgFlags);
484 }
485
486 // Allocate to a register if possible, or else a stack slot.
487 unsigned Reg = State.AllocateReg(ArgGPRs);
488 unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8);
489
490 // If we reach this point and PendingLocs is non-empty, we must be at the
491 // end of a split argument that must be passed indirectly.
492 if (!PendingLocs.empty()) {
493 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
494 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
495
496 for (auto &It : PendingLocs) {
497 if (Reg)
498 It.convertToReg(Reg);
499 else
500 It.convertToMem(StackOffset);
501 State.addLoc(It);
502 }
503 PendingLocs.clear();
504 PendingArgFlags.clear();
505 return false;
506 }
507
508 assert(LocVT == XLenVT && "Expected an XLenVT at this stage");
509
510 if (Reg) {
511 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
512 } else {
513 State.addLoc(
514 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
515 }
516 return false;
517}
518
519void RISCVTargetLowering::analyzeInputArgs(
520 MachineFunction &MF, CCState &CCInfo,
521 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
522 unsigned NumArgs = Ins.size();
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000523 FunctionType *FType = MF.getFunction().getFunctionType();
Alex Bradburydc31c612017-12-11 12:49:02 +0000524
525 for (unsigned i = 0; i != NumArgs; ++i) {
526 MVT ArgVT = Ins[i].VT;
527 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
528
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000529 Type *ArgTy = nullptr;
530 if (IsRet)
531 ArgTy = FType->getReturnType();
532 else if (Ins[i].isOrigArg())
533 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
534
Alex Bradburydc31c612017-12-11 12:49:02 +0000535 if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000536 ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) {
Alex Bradburydc31c612017-12-11 12:49:02 +0000537 DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
538 << EVT(ArgVT).getEVTString() << '\n');
539 llvm_unreachable(nullptr);
540 }
541 }
542}
543
544void RISCVTargetLowering::analyzeOutputArgs(
545 MachineFunction &MF, CCState &CCInfo,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000546 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
547 CallLoweringInfo *CLI) const {
Alex Bradburydc31c612017-12-11 12:49:02 +0000548 unsigned NumArgs = Outs.size();
549
550 for (unsigned i = 0; i != NumArgs; i++) {
551 MVT ArgVT = Outs[i].VT;
552 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000553 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
Alex Bradburydc31c612017-12-11 12:49:02 +0000554
555 if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000556 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
Alex Bradburydc31c612017-12-11 12:49:02 +0000557 DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
558 << EVT(ArgVT).getEVTString() << "\n");
559 llvm_unreachable(nullptr);
560 }
561 }
562}
563
564// The caller is responsible for loading the full value if the argument is
565// passed with CCValAssign::Indirect.
566static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
567 const CCValAssign &VA, const SDLoc &DL) {
568 MachineFunction &MF = DAG.getMachineFunction();
569 MachineRegisterInfo &RegInfo = MF.getRegInfo();
570 EVT LocVT = VA.getLocVT();
571 SDValue Val;
572
573 unsigned VReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
574 RegInfo.addLiveIn(VA.getLocReg(), VReg);
575 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
576
577 switch (VA.getLocInfo()) {
578 default:
579 llvm_unreachable("Unexpected CCValAssign::LocInfo");
580 case CCValAssign::Full:
581 case CCValAssign::Indirect:
582 return Val;
583 }
584}
585
586// The caller is responsible for loading the full value if the argument is
587// passed with CCValAssign::Indirect.
588static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
589 const CCValAssign &VA, const SDLoc &DL) {
590 MachineFunction &MF = DAG.getMachineFunction();
591 MachineFrameInfo &MFI = MF.getFrameInfo();
592 EVT LocVT = VA.getLocVT();
593 EVT ValVT = VA.getValVT();
594 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
595 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
596 VA.getLocMemOffset(), /*Immutable=*/true);
597 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
598 SDValue Val;
599
600 ISD::LoadExtType ExtType;
601 switch (VA.getLocInfo()) {
602 default:
603 llvm_unreachable("Unexpected CCValAssign::LocInfo");
604 case CCValAssign::Full:
605 case CCValAssign::Indirect:
606 ExtType = ISD::NON_EXTLOAD;
607 break;
608 }
609 Val = DAG.getExtLoad(
610 ExtType, DL, LocVT, Chain, FIN,
611 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
612 return Val;
613}
Alex Bradbury89718422017-10-19 21:37:38 +0000614
615// Transform physical registers into virtual registers.
616SDValue RISCVTargetLowering::LowerFormalArguments(
617 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
618 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
619 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
620
621 switch (CallConv) {
622 default:
623 report_fatal_error("Unsupported calling convention");
624 case CallingConv::C:
Alex Bradburya3376752017-11-08 13:41:21 +0000625 case CallingConv::Fast:
Alex Bradbury89718422017-10-19 21:37:38 +0000626 break;
627 }
628
629 MachineFunction &MF = DAG.getMachineFunction();
Alex Bradburydc31c612017-12-11 12:49:02 +0000630 EVT PtrVT = getPointerTy(DAG.getDataLayout());
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000631 MVT XLenVT = Subtarget.getXLenVT();
632 unsigned XLenInBytes = Subtarget.getXLen() / 8;
633 // Used with vargs to acumulate store chains.
634 std::vector<SDValue> OutChains;
Alex Bradbury89718422017-10-19 21:37:38 +0000635
636 // Assign locations to all of the incoming arguments.
637 SmallVector<CCValAssign, 16> ArgLocs;
638 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
Alex Bradburydc31c612017-12-11 12:49:02 +0000639 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
Alex Bradbury89718422017-10-19 21:37:38 +0000640
Alex Bradburydc31c612017-12-11 12:49:02 +0000641 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
642 CCValAssign &VA = ArgLocs[i];
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000643 assert(VA.getLocVT() == XLenVT && "Unhandled argument type");
Alex Bradburydc31c612017-12-11 12:49:02 +0000644 SDValue ArgValue;
645 if (VA.isRegLoc())
646 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
647 else
648 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
Alex Bradbury89718422017-10-19 21:37:38 +0000649
Alex Bradburydc31c612017-12-11 12:49:02 +0000650 if (VA.getLocInfo() == CCValAssign::Indirect) {
651 // If the original argument was split and passed by reference (e.g. i128
652 // on RV32), we need to load all parts of it here (using the same
653 // address).
654 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
655 MachinePointerInfo()));
656 unsigned ArgIndex = Ins[i].OrigArgIndex;
657 assert(Ins[i].PartOffset == 0);
658 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
659 CCValAssign &PartVA = ArgLocs[i + 1];
660 unsigned PartOffset = Ins[i + 1].PartOffset;
661 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
662 DAG.getIntPtrConstant(PartOffset, DL));
663 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
664 MachinePointerInfo()));
665 ++i;
666 }
667 continue;
Alex Bradbury89718422017-10-19 21:37:38 +0000668 }
Alex Bradburydc31c612017-12-11 12:49:02 +0000669 InVals.push_back(ArgValue);
Alex Bradbury89718422017-10-19 21:37:38 +0000670 }
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000671
672 if (IsVarArg) {
673 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
674 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
675 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
676 MachineFrameInfo &MFI = MF.getFrameInfo();
677 MachineRegisterInfo &RegInfo = MF.getRegInfo();
678 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
679
680 // Offset of the first variable argument from stack pointer, and size of
681 // the vararg save area. For now, the varargs save area is either zero or
682 // large enough to hold a0-a7.
683 int VaArgOffset, VarArgsSaveSize;
684
685 // If all registers are allocated, then all varargs must be passed on the
686 // stack and we don't need to save any argregs.
687 if (ArgRegs.size() == Idx) {
688 VaArgOffset = CCInfo.getNextStackOffset();
689 VarArgsSaveSize = 0;
690 } else {
691 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
692 VaArgOffset = -VarArgsSaveSize;
693 }
694
695 // Record the frame index of the first variable argument
696 // which is a value necessary to VASTART.
697 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
698 RVFI->setVarArgsFrameIndex(FI);
699
700 // If saving an odd number of registers then create an extra stack slot to
701 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
702 // offsets to even-numbered registered remain 2*XLEN-aligned.
703 if (Idx % 2) {
704 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes,
705 true);
706 VarArgsSaveSize += XLenInBytes;
707 }
708
709 // Copy the integer registers that may have been used for passing varargs
710 // to the vararg save area.
711 for (unsigned I = Idx; I < ArgRegs.size();
712 ++I, VaArgOffset += XLenInBytes) {
713 const unsigned Reg = RegInfo.createVirtualRegister(RC);
714 RegInfo.addLiveIn(ArgRegs[I], Reg);
715 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
716 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
717 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
718 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
719 MachinePointerInfo::getFixedStack(MF, FI));
720 cast<StoreSDNode>(Store.getNode())
721 ->getMemOperand()
722 ->setValue((Value *)nullptr);
723 OutChains.push_back(Store);
724 }
725 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
726 }
727
728 // All stores are grouped in one node to allow the matching between
729 // the size of Ins and InVals. This only happens for vararg functions.
730 if (!OutChains.empty()) {
731 OutChains.push_back(Chain);
732 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
733 }
734
Alex Bradbury89718422017-10-19 21:37:38 +0000735 return Chain;
736}
737
Alex Bradburya3376752017-11-08 13:41:21 +0000738// Lower a call to a callseq_start + CALL + callseq_end chain, and add input
739// and output parameter nodes.
740SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
741 SmallVectorImpl<SDValue> &InVals) const {
742 SelectionDAG &DAG = CLI.DAG;
743 SDLoc &DL = CLI.DL;
744 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
745 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
746 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
747 SDValue Chain = CLI.Chain;
748 SDValue Callee = CLI.Callee;
749 CLI.IsTailCall = false;
750 CallingConv::ID CallConv = CLI.CallConv;
751 bool IsVarArg = CLI.IsVarArg;
752 EVT PtrVT = getPointerTy(DAG.getDataLayout());
Alex Bradburydc31c612017-12-11 12:49:02 +0000753 MVT XLenVT = Subtarget.getXLenVT();
Alex Bradburya3376752017-11-08 13:41:21 +0000754
Alex Bradburya3376752017-11-08 13:41:21 +0000755 MachineFunction &MF = DAG.getMachineFunction();
756
757 // Analyze the operands of the call, assigning locations to each operand.
758 SmallVector<CCValAssign, 16> ArgLocs;
759 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000760 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
Alex Bradburya3376752017-11-08 13:41:21 +0000761
762 // Get a count of how many bytes are to be pushed on the stack.
763 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
764
Alex Bradburydc31c612017-12-11 12:49:02 +0000765 // Create local copies for byval args
766 SmallVector<SDValue, 8> ByValArgs;
767 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
768 ISD::ArgFlagsTy Flags = Outs[i].Flags;
769 if (!Flags.isByVal())
Alex Bradburya3376752017-11-08 13:41:21 +0000770 continue;
Alex Bradburydc31c612017-12-11 12:49:02 +0000771
772 SDValue Arg = OutVals[i];
773 unsigned Size = Flags.getByValSize();
774 unsigned Align = Flags.getByValAlign();
775
776 int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false);
777 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
778 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
779
780 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align,
781 /*IsVolatile=*/false,
782 /*AlwaysInline=*/false,
783 /*isTailCall=*/false, MachinePointerInfo(),
784 MachinePointerInfo());
785 ByValArgs.push_back(FIPtr);
Alex Bradburya3376752017-11-08 13:41:21 +0000786 }
787
788 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
789
790 // Copy argument values to their designated locations.
791 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
Alex Bradburydc31c612017-12-11 12:49:02 +0000792 SmallVector<SDValue, 8> MemOpChains;
Alex Bradburya3376752017-11-08 13:41:21 +0000793 SDValue StackPtr;
Alex Bradburydc31c612017-12-11 12:49:02 +0000794 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
795 CCValAssign &VA = ArgLocs[i];
796 SDValue ArgValue = OutVals[i];
797 ISD::ArgFlagsTy Flags = Outs[i].Flags;
Alex Bradburya3376752017-11-08 13:41:21 +0000798
799 // Promote the value if needed.
Alex Bradburydc31c612017-12-11 12:49:02 +0000800 // For now, only handle fully promoted and indirect arguments.
Alex Bradburya3376752017-11-08 13:41:21 +0000801 switch (VA.getLocInfo()) {
802 case CCValAssign::Full:
803 break;
Alex Bradburydc31c612017-12-11 12:49:02 +0000804 case CCValAssign::Indirect: {
805 // Store the argument in a stack slot and pass its address.
806 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
807 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
808 MemOpChains.push_back(
809 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
810 MachinePointerInfo::getFixedStack(MF, FI)));
811 // If the original argument was split (e.g. i128), we need
812 // to store all parts of it here (and pass just one address).
813 unsigned ArgIndex = Outs[i].OrigArgIndex;
814 assert(Outs[i].PartOffset == 0);
815 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
816 SDValue PartValue = OutVals[i + 1];
817 unsigned PartOffset = Outs[i + 1].PartOffset;
818 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
819 DAG.getIntPtrConstant(PartOffset, DL));
820 MemOpChains.push_back(
821 DAG.getStore(Chain, DL, PartValue, Address,
822 MachinePointerInfo::getFixedStack(MF, FI)));
823 ++i;
824 }
825 ArgValue = SpillSlot;
826 break;
827 }
Alex Bradburya3376752017-11-08 13:41:21 +0000828 default:
829 llvm_unreachable("Unknown loc info!");
830 }
831
Alex Bradburydc31c612017-12-11 12:49:02 +0000832 // Use local copy if it is a byval arg.
833 if (Flags.isByVal())
834 ArgValue = ByValArgs[j++];
835
Alex Bradburya3376752017-11-08 13:41:21 +0000836 if (VA.isRegLoc()) {
837 // Queue up the argument copies and emit them at the end.
838 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
839 } else {
840 assert(VA.isMemLoc() && "Argument not register or memory");
Alex Bradburydc31c612017-12-11 12:49:02 +0000841
842 // Work out the address of the stack slot.
843 if (!StackPtr.getNode())
844 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
845 SDValue Address =
846 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
847 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
848
849 // Emit the store.
850 MemOpChains.push_back(
851 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
Alex Bradburya3376752017-11-08 13:41:21 +0000852 }
853 }
854
Alex Bradburydc31c612017-12-11 12:49:02 +0000855 // Join the stores, which are independent of one another.
856 if (!MemOpChains.empty())
857 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
858
Alex Bradburya3376752017-11-08 13:41:21 +0000859 SDValue Glue;
860
861 // Build a sequence of copy-to-reg nodes, chained and glued together.
862 for (auto &Reg : RegsToPass) {
863 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
864 Glue = Chain.getValue(1);
865 }
866
867 if (isa<GlobalAddressSDNode>(Callee)) {
868 Callee = lowerGlobalAddress(Callee, DAG);
869 } else if (isa<ExternalSymbolSDNode>(Callee)) {
Alex Bradburyffc435e2017-11-21 08:11:03 +0000870 Callee = lowerExternalSymbol(Callee, DAG);
Alex Bradburya3376752017-11-08 13:41:21 +0000871 }
872
873 // The first call operand is the chain and the second is the target address.
874 SmallVector<SDValue, 8> Ops;
875 Ops.push_back(Chain);
876 Ops.push_back(Callee);
877
878 // Add argument registers to the end of the list so that they are
879 // known live into the call.
880 for (auto &Reg : RegsToPass)
881 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
882
883 // Add a register mask operand representing the call-preserved registers.
884 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
885 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
886 assert(Mask && "Missing call preserved mask for calling convention");
887 Ops.push_back(DAG.getRegisterMask(Mask));
888
889 // Glue the call to the argument copies, if any.
890 if (Glue.getNode())
891 Ops.push_back(Glue);
892
893 // Emit the call.
894 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
895 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
896 Glue = Chain.getValue(1);
897
898 // Mark the end of the call, which is glued to the call itself.
899 Chain = DAG.getCALLSEQ_END(Chain,
900 DAG.getConstant(NumBytes, DL, PtrVT, true),
901 DAG.getConstant(0, DL, PtrVT, true),
902 Glue, DL);
903 Glue = Chain.getValue(1);
904
905 // Assign locations to each value returned by this call.
906 SmallVector<CCValAssign, 16> RVLocs;
907 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
Alex Bradburydc31c612017-12-11 12:49:02 +0000908 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
Alex Bradburya3376752017-11-08 13:41:21 +0000909
910 // Copy all of the result registers out of their specified physreg.
911 for (auto &VA : RVLocs) {
912 // Copy the value out, gluing the copy to the end of the call sequence.
913 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
914 VA.getLocVT(), Glue);
915 Chain = RetValue.getValue(1);
916 Glue = RetValue.getValue(2);
917
Alex Bradburydc31c612017-12-11 12:49:02 +0000918 assert(VA.getLocInfo() == CCValAssign::Full && "Unknown loc info!");
919 InVals.push_back(RetValue);
Alex Bradburya3376752017-11-08 13:41:21 +0000920 }
921
922 return Chain;
923}
924
Alex Bradburydc31c612017-12-11 12:49:02 +0000925bool RISCVTargetLowering::CanLowerReturn(
926 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
927 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
928 SmallVector<CCValAssign, 16> RVLocs;
929 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
930 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
931 MVT VT = Outs[i].VT;
932 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
933 if (CC_RISCV(MF.getDataLayout(), i, VT, VT, CCValAssign::Full, ArgFlags,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000934 CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
Alex Bradburydc31c612017-12-11 12:49:02 +0000935 return false;
936 }
937 return true;
938}
939
Alex Bradbury89718422017-10-19 21:37:38 +0000940SDValue
941RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
942 bool IsVarArg,
943 const SmallVectorImpl<ISD::OutputArg> &Outs,
944 const SmallVectorImpl<SDValue> &OutVals,
945 const SDLoc &DL, SelectionDAG &DAG) const {
Alex Bradbury89718422017-10-19 21:37:38 +0000946 // Stores the assignment of the return value to a location.
947 SmallVector<CCValAssign, 16> RVLocs;
948
949 // Info about the registers and stack slot.
950 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
951 *DAG.getContext());
952
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000953 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
954 nullptr);
Alex Bradbury89718422017-10-19 21:37:38 +0000955
956 SDValue Flag;
957 SmallVector<SDValue, 4> RetOps(1, Chain);
958
959 // Copy the result values into the output registers.
960 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
Alex Bradburydc31c612017-12-11 12:49:02 +0000961 SDValue Val = OutVals[i];
Alex Bradbury89718422017-10-19 21:37:38 +0000962 CCValAssign &VA = RVLocs[i];
963 assert(VA.isRegLoc() && "Can only return in registers!");
Alex Bradburydc31c612017-12-11 12:49:02 +0000964 assert(VA.getLocInfo() == CCValAssign::Full &&
965 "Unexpected CCValAssign::LocInfo");
Alex Bradbury89718422017-10-19 21:37:38 +0000966
Alex Bradburydc31c612017-12-11 12:49:02 +0000967 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag);
Alex Bradbury89718422017-10-19 21:37:38 +0000968
969 // Guarantee that all emitted copies are stuck together.
970 Flag = Chain.getValue(1);
971 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
972 }
973
974 RetOps[0] = Chain; // Update chain.
975
976 // Add the flag if we have it.
977 if (Flag.getNode()) {
978 RetOps.push_back(Flag);
979 }
980
981 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
982}
983
984const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
985 switch ((RISCVISD::NodeType)Opcode) {
986 case RISCVISD::FIRST_NUMBER:
987 break;
988 case RISCVISD::RET_FLAG:
989 return "RISCVISD::RET_FLAG";
Alex Bradburya3376752017-11-08 13:41:21 +0000990 case RISCVISD::CALL:
991 return "RISCVISD::CALL";
Alex Bradbury65385162017-11-21 07:51:32 +0000992 case RISCVISD::SELECT_CC:
993 return "RISCVISD::SELECT_CC";
Alex Bradbury89718422017-10-19 21:37:38 +0000994 }
995 return nullptr;
996}
Alex Bradbury9330e642018-01-10 20:05:09 +0000997
998std::pair<unsigned, const TargetRegisterClass *>
999RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1000 StringRef Constraint,
1001 MVT VT) const {
1002 // First, see if this is a constraint that directly corresponds to a
1003 // RISCV register class.
1004 if (Constraint.size() == 1) {
1005 switch (Constraint[0]) {
1006 case 'r':
1007 return std::make_pair(0U, &RISCV::GPRRegClass);
1008 default:
1009 break;
1010 }
1011 }
1012
1013 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1014}