blob: 40a9683e963351fa2ab99bdb7ba368b438ff432a [file] [log] [blame]
Alex Bradbury89718422017-10-19 21:37:38 +00001//===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that RISCV uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "RISCVISelLowering.h"
16#include "RISCV.h"
Alex Bradburyc85be0d2018-01-10 19:41:03 +000017#include "RISCVMachineFunctionInfo.h"
Alex Bradbury89718422017-10-19 21:37:38 +000018#include "RISCVRegisterInfo.h"
19#include "RISCVSubtarget.h"
20#include "RISCVTargetMachine.h"
21#include "llvm/CodeGen/CallingConvLower.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineFunction.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/SelectionDAGISel.h"
27#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28#include "llvm/CodeGen/ValueTypes.h"
29#include "llvm/IR/DiagnosticInfo.h"
30#include "llvm/IR/DiagnosticPrinter.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Support/raw_ostream.h"
34
35using namespace llvm;
36
37#define DEBUG_TYPE "riscv-lower"
38
39RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
40 const RISCVSubtarget &STI)
41 : TargetLowering(TM), Subtarget(STI) {
42
43 MVT XLenVT = Subtarget.getXLenVT();
44
45 // Set up the register classes.
46 addRegisterClass(XLenVT, &RISCV::GPRRegClass);
47
48 // Compute derived properties from the register classes.
49 computeRegisterProperties(STI.getRegisterInfo());
50
51 setStackPointerRegisterToSaveRestore(RISCV::X2);
52
Alex Bradburycfa62912017-11-08 12:20:01 +000053 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
54 setLoadExtAction(N, XLenVT, MVT::i1, Promote);
55
Alex Bradbury89718422017-10-19 21:37:38 +000056 // TODO: add all necessary setOperationAction calls.
Alex Bradburybfb00d42017-12-11 12:38:17 +000057 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
58
Alex Bradburyffc435e2017-11-21 08:11:03 +000059 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
Alex Bradbury74913e12017-11-08 13:31:40 +000060 setOperationAction(ISD::BR_CC, XLenVT, Expand);
Alex Bradbury65385162017-11-21 07:51:32 +000061 setOperationAction(ISD::SELECT, XLenVT, Custom);
62 setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
63
Alex Bradburybfb00d42017-12-11 12:38:17 +000064 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
65 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
66
Alex Bradburyc85be0d2018-01-10 19:41:03 +000067 setOperationAction(ISD::VASTART, MVT::Other, Custom);
68 setOperationAction(ISD::VAARG, MVT::Other, Expand);
69 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
70 setOperationAction(ISD::VAEND, MVT::Other, Expand);
71
Alex Bradburyffc435e2017-11-21 08:11:03 +000072 for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
73 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
74
75 setOperationAction(ISD::ADDC, XLenVT, Expand);
76 setOperationAction(ISD::ADDE, XLenVT, Expand);
77 setOperationAction(ISD::SUBC, XLenVT, Expand);
78 setOperationAction(ISD::SUBE, XLenVT, Expand);
79
Alex Bradbury92138382018-01-18 12:36:38 +000080 if (!Subtarget.hasStdExtM()) {
81 setOperationAction(ISD::MUL, XLenVT, Expand);
82 setOperationAction(ISD::MULHS, XLenVT, Expand);
83 setOperationAction(ISD::MULHU, XLenVT, Expand);
84 setOperationAction(ISD::SDIV, XLenVT, Expand);
85 setOperationAction(ISD::UDIV, XLenVT, Expand);
86 setOperationAction(ISD::SREM, XLenVT, Expand);
87 setOperationAction(ISD::UREM, XLenVT, Expand);
88 }
Alex Bradburyffc435e2017-11-21 08:11:03 +000089
Alex Bradbury92138382018-01-18 12:36:38 +000090 setOperationAction(ISD::SDIVREM, XLenVT, Expand);
91 setOperationAction(ISD::UDIVREM, XLenVT, Expand);
Alex Bradburyffc435e2017-11-21 08:11:03 +000092 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
93 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
Alex Bradburyffc435e2017-11-21 08:11:03 +000094
95 setOperationAction(ISD::SHL_PARTS, XLenVT, Expand);
96 setOperationAction(ISD::SRL_PARTS, XLenVT, Expand);
97 setOperationAction(ISD::SRA_PARTS, XLenVT, Expand);
98
99 setOperationAction(ISD::ROTL, XLenVT, Expand);
100 setOperationAction(ISD::ROTR, XLenVT, Expand);
101 setOperationAction(ISD::BSWAP, XLenVT, Expand);
102 setOperationAction(ISD::CTTZ, XLenVT, Expand);
103 setOperationAction(ISD::CTLZ, XLenVT, Expand);
104 setOperationAction(ISD::CTPOP, XLenVT, Expand);
105
106 setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
107 setOperationAction(ISD::BlockAddress, XLenVT, Custom);
108
Alex Bradbury89718422017-10-19 21:37:38 +0000109 setBooleanContents(ZeroOrOneBooleanContent);
110
111 // Function alignments (log2).
112 setMinFunctionAlignment(3);
113 setPrefFunctionAlignment(3);
Alex Bradburyffc435e2017-11-21 08:11:03 +0000114
115 // Effectively disable jump table generation.
116 setMinimumJumpTableEntries(INT_MAX);
Alex Bradbury89718422017-10-19 21:37:38 +0000117}
118
Alex Bradbury65385162017-11-21 07:51:32 +0000119// Changes the condition code and swaps operands if necessary, so the SetCC
120// operation matches one of the comparisons supported directly in the RISC-V
121// ISA.
122static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
123 switch (CC) {
124 default:
125 break;
126 case ISD::SETGT:
127 case ISD::SETLE:
128 case ISD::SETUGT:
129 case ISD::SETULE:
130 CC = ISD::getSetCCSwappedOperands(CC);
131 std::swap(LHS, RHS);
132 break;
133 }
134}
135
136// Return the RISC-V branch opcode that matches the given DAG integer
137// condition code. The CondCode must be one of those supported by the RISC-V
138// ISA (see normaliseSetCC).
139static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
140 switch (CC) {
141 default:
142 llvm_unreachable("Unsupported CondCode");
143 case ISD::SETEQ:
144 return RISCV::BEQ;
145 case ISD::SETNE:
146 return RISCV::BNE;
147 case ISD::SETLT:
148 return RISCV::BLT;
149 case ISD::SETGE:
150 return RISCV::BGE;
151 case ISD::SETULT:
152 return RISCV::BLTU;
153 case ISD::SETUGE:
154 return RISCV::BGEU;
155 }
156}
157
Alex Bradbury89718422017-10-19 21:37:38 +0000158SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
159 SelectionDAG &DAG) const {
160 switch (Op.getOpcode()) {
161 default:
162 report_fatal_error("unimplemented operand");
Alex Bradburyec8aa912017-11-08 13:24:21 +0000163 case ISD::GlobalAddress:
164 return lowerGlobalAddress(Op, DAG);
Alex Bradburyffc435e2017-11-21 08:11:03 +0000165 case ISD::BlockAddress:
166 return lowerBlockAddress(Op, DAG);
Alex Bradbury65385162017-11-21 07:51:32 +0000167 case ISD::SELECT:
168 return lowerSELECT(Op, DAG);
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000169 case ISD::VASTART:
170 return lowerVASTART(Op, DAG);
Alex Bradbury70f137b2018-01-10 20:12:00 +0000171 case ISD::FRAMEADDR:
172 return LowerFRAMEADDR(Op, DAG);
173 case ISD::RETURNADDR:
174 return LowerRETURNADDR(Op, DAG);
Alex Bradburyec8aa912017-11-08 13:24:21 +0000175 }
176}
177
178SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
179 SelectionDAG &DAG) const {
180 SDLoc DL(Op);
181 EVT Ty = Op.getValueType();
182 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
183 const GlobalValue *GV = N->getGlobal();
184 int64_t Offset = N->getOffset();
185
Alex Bradburyffc435e2017-11-21 08:11:03 +0000186 if (isPositionIndependent() || Subtarget.is64Bit())
Alex Bradburyec8aa912017-11-08 13:24:21 +0000187 report_fatal_error("Unable to lowerGlobalAddress");
Alex Bradburyffc435e2017-11-21 08:11:03 +0000188
189 SDValue GAHi =
190 DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_HI);
191 SDValue GALo =
192 DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_LO);
193 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
194 SDValue MNLo =
195 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
196 return MNLo;
197}
198
199SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
200 SelectionDAG &DAG) const {
201 SDLoc DL(Op);
202 EVT Ty = Op.getValueType();
203 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
204 const BlockAddress *BA = N->getBlockAddress();
205 int64_t Offset = N->getOffset();
206
207 if (isPositionIndependent() || Subtarget.is64Bit())
208 report_fatal_error("Unable to lowerBlockAddress");
209
210 SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI);
211 SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO);
212 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0);
213 SDValue MNLo =
214 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0);
215 return MNLo;
216}
217
218SDValue RISCVTargetLowering::lowerExternalSymbol(SDValue Op,
219 SelectionDAG &DAG) const {
220 SDLoc DL(Op);
221 EVT Ty = Op.getValueType();
222 ExternalSymbolSDNode *N = cast<ExternalSymbolSDNode>(Op);
223 const char *Sym = N->getSymbol();
224
225 // TODO: should also handle gp-relative loads.
226
227 if (isPositionIndependent() || Subtarget.is64Bit())
228 report_fatal_error("Unable to lowerExternalSymbol");
229
230 SDValue GAHi = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_HI);
231 SDValue GALo = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_LO);
232 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
233 SDValue MNLo =
234 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
235 return MNLo;
Alex Bradbury89718422017-10-19 21:37:38 +0000236}
237
Alex Bradbury65385162017-11-21 07:51:32 +0000238SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
239 SDValue CondV = Op.getOperand(0);
240 SDValue TrueV = Op.getOperand(1);
241 SDValue FalseV = Op.getOperand(2);
242 SDLoc DL(Op);
243 MVT XLenVT = Subtarget.getXLenVT();
244
245 // If the result type is XLenVT and CondV is the output of a SETCC node
246 // which also operated on XLenVT inputs, then merge the SETCC node into the
247 // lowered RISCVISD::SELECT_CC to take advantage of the integer
248 // compare+branch instructions. i.e.:
249 // (select (setcc lhs, rhs, cc), truev, falsev)
250 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
251 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
252 CondV.getOperand(0).getSimpleValueType() == XLenVT) {
253 SDValue LHS = CondV.getOperand(0);
254 SDValue RHS = CondV.getOperand(1);
255 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
256 ISD::CondCode CCVal = CC->get();
257
258 normaliseSetCC(LHS, RHS, CCVal);
259
260 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
261 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
262 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
263 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
264 }
265
266 // Otherwise:
267 // (select condv, truev, falsev)
268 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
269 SDValue Zero = DAG.getConstant(0, DL, XLenVT);
270 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
271
272 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
273 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
274
275 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
276}
277
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000278SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
279 MachineFunction &MF = DAG.getMachineFunction();
280 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
281
282 SDLoc DL(Op);
283 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
284 getPointerTy(MF.getDataLayout()));
285
286 // vastart just stores the address of the VarArgsFrameIndex slot into the
287 // memory location argument.
288 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
289 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
290 MachinePointerInfo(SV));
291}
292
Alex Bradbury70f137b2018-01-10 20:12:00 +0000293SDValue RISCVTargetLowering::LowerFRAMEADDR(SDValue Op,
294 SelectionDAG &DAG) const {
295 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
296 MachineFunction &MF = DAG.getMachineFunction();
297 MachineFrameInfo &MFI = MF.getFrameInfo();
298 MFI.setFrameAddressIsTaken(true);
299 unsigned FrameReg = RI.getFrameRegister(MF);
300 int XLenInBytes = Subtarget.getXLen() / 8;
301
302 EVT VT = Op.getValueType();
303 SDLoc DL(Op);
304 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
305 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
306 while (Depth--) {
307 int Offset = -(XLenInBytes * 2);
308 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
309 DAG.getIntPtrConstant(Offset, DL));
310 FrameAddr =
311 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
312 }
313 return FrameAddr;
314}
315
316SDValue RISCVTargetLowering::LowerRETURNADDR(SDValue Op,
317 SelectionDAG &DAG) const {
318 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
319 MachineFunction &MF = DAG.getMachineFunction();
320 MachineFrameInfo &MFI = MF.getFrameInfo();
321 MFI.setReturnAddressIsTaken(true);
322 MVT XLenVT = Subtarget.getXLenVT();
323 int XLenInBytes = Subtarget.getXLen() / 8;
324
325 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
326 return SDValue();
327
328 EVT VT = Op.getValueType();
329 SDLoc DL(Op);
330 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
331 if (Depth) {
332 int Off = -XLenInBytes;
333 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
334 SDValue Offset = DAG.getConstant(Off, DL, VT);
335 return DAG.getLoad(VT, DL, DAG.getEntryNode(),
336 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
337 MachinePointerInfo());
338 }
339
340 // Return the value of the return address register, marking it an implicit
341 // live-in.
342 unsigned Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
343 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
344}
345
Alex Bradbury65385162017-11-21 07:51:32 +0000346MachineBasicBlock *
347RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
348 MachineBasicBlock *BB) const {
349 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
350 DebugLoc DL = MI.getDebugLoc();
351
352 assert(MI.getOpcode() == RISCV::Select_GPR_Using_CC_GPR &&
353 "Unexpected instr type to insert");
354
355 // To "insert" a SELECT instruction, we actually have to insert the triangle
356 // control-flow pattern. The incoming instruction knows the destination vreg
357 // to set, the condition code register to branch on, the true/false values to
358 // select between, and the condcode to use to select the appropriate branch.
359 //
360 // We produce the following control flow:
361 // HeadMBB
362 // | \
363 // | IfFalseMBB
364 // | /
365 // TailMBB
366 const BasicBlock *LLVM_BB = BB->getBasicBlock();
367 MachineFunction::iterator I = ++BB->getIterator();
368
369 MachineBasicBlock *HeadMBB = BB;
370 MachineFunction *F = BB->getParent();
371 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
372 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
373
374 F->insert(I, IfFalseMBB);
375 F->insert(I, TailMBB);
376 // Move all remaining instructions to TailMBB.
377 TailMBB->splice(TailMBB->begin(), HeadMBB,
378 std::next(MachineBasicBlock::iterator(MI)), HeadMBB->end());
379 // Update machine-CFG edges by transferring all successors of the current
380 // block to the new block which will contain the Phi node for the select.
381 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
382 // Set the successors for HeadMBB.
383 HeadMBB->addSuccessor(IfFalseMBB);
384 HeadMBB->addSuccessor(TailMBB);
385
386 // Insert appropriate branch.
387 unsigned LHS = MI.getOperand(1).getReg();
388 unsigned RHS = MI.getOperand(2).getReg();
389 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
390 unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
391
392 BuildMI(HeadMBB, DL, TII.get(Opcode))
393 .addReg(LHS)
394 .addReg(RHS)
395 .addMBB(TailMBB);
396
397 // IfFalseMBB just falls through to TailMBB.
398 IfFalseMBB->addSuccessor(TailMBB);
399
400 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
401 BuildMI(*TailMBB, TailMBB->begin(), DL, TII.get(RISCV::PHI),
402 MI.getOperand(0).getReg())
403 .addReg(MI.getOperand(4).getReg())
404 .addMBB(HeadMBB)
405 .addReg(MI.getOperand(5).getReg())
406 .addMBB(IfFalseMBB);
407
408 MI.eraseFromParent(); // The pseudo instruction is gone now.
409 return TailMBB;
410}
411
Alex Bradbury89718422017-10-19 21:37:38 +0000412// Calling Convention Implementation.
Alex Bradburydc31c612017-12-11 12:49:02 +0000413// The expectations for frontend ABI lowering vary from target to target.
414// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
415// details, but this is a longer term goal. For now, we simply try to keep the
416// role of the frontend as simple and well-defined as possible. The rules can
417// be summarised as:
418// * Never split up large scalar arguments. We handle them here.
419// * If a hardfloat calling convention is being used, and the struct may be
420// passed in a pair of registers (fp+fp, int+fp), and both registers are
421// available, then pass as two separate arguments. If either the GPRs or FPRs
422// are exhausted, then pass according to the rule below.
423// * If a struct could never be passed in registers or directly in a stack
424// slot (as it is larger than 2*XLEN and the floating point rules don't
425// apply), then pass it using a pointer with the byval attribute.
426// * If a struct is less than 2*XLEN, then coerce to either a two-element
427// word-sized array or a 2*XLEN scalar (depending on alignment).
428// * The frontend can determine whether a struct is returned by reference or
429// not based on its size and fields. If it will be returned by reference, the
430// frontend must modify the prototype so a pointer with the sret annotation is
431// passed as the first argument. This is not necessary for large scalar
432// returns.
433// * Struct return values and varargs should be coerced to structs containing
434// register-size fields in the same situations they would be for fixed
435// arguments.
436
437static const MCPhysReg ArgGPRs[] = {
438 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
439 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
440};
441
442// Pass a 2*XLEN argument that has been split into two XLEN values through
443// registers or the stack as necessary.
444static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
445 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
446 MVT ValVT2, MVT LocVT2,
447 ISD::ArgFlagsTy ArgFlags2) {
448 unsigned XLenInBytes = XLen / 8;
449 if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
450 // At least one half can be passed via register.
451 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
452 VA1.getLocVT(), CCValAssign::Full));
453 } else {
454 // Both halves must be passed on the stack, with proper alignment.
455 unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign());
456 State.addLoc(
457 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
458 State.AllocateStack(XLenInBytes, StackAlign),
459 VA1.getLocVT(), CCValAssign::Full));
460 State.addLoc(CCValAssign::getMem(
461 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
462 CCValAssign::Full));
463 return false;
464 }
465
466 if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
467 // The second half can also be passed via register.
468 State.addLoc(
469 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
470 } else {
471 // The second half is passed via the stack, without additional alignment.
472 State.addLoc(CCValAssign::getMem(
473 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
474 CCValAssign::Full));
475 }
476
477 return false;
478}
479
480// Implements the RISC-V calling convention. Returns true upon failure.
481static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT,
482 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000483 CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) {
Alex Bradburydc31c612017-12-11 12:49:02 +0000484 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
485 assert(XLen == 32 || XLen == 64);
486 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
487 assert(ValVT == XLenVT && "Unexpected ValVT");
488 assert(LocVT == XLenVT && "Unexpected LocVT");
Alex Bradburydc31c612017-12-11 12:49:02 +0000489
490 // Any return value split in to more than two values can't be returned
491 // directly.
492 if (IsRet && ValNo > 1)
493 return true;
494
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000495 // If this is a variadic argument, the RISC-V calling convention requires
496 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
497 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
498 // be used regardless of whether the original argument was split during
499 // legalisation or not. The argument will not be passed by registers if the
500 // original type is larger than 2*XLEN, so the register alignment rule does
501 // not apply.
502 unsigned TwoXLenInBytes = (2 * XLen) / 8;
503 if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes &&
504 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
505 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
506 // Skip 'odd' register if necessary.
507 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
508 State.AllocateReg(ArgGPRs);
509 }
510
Alex Bradburydc31c612017-12-11 12:49:02 +0000511 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
512 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
513 State.getPendingArgFlags();
514
515 assert(PendingLocs.size() == PendingArgFlags.size() &&
516 "PendingLocs and PendingArgFlags out of sync");
517
518 // Split arguments might be passed indirectly, so keep track of the pending
519 // values.
520 if (ArgFlags.isSplit() || !PendingLocs.empty()) {
521 LocVT = XLenVT;
522 LocInfo = CCValAssign::Indirect;
523 PendingLocs.push_back(
524 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
525 PendingArgFlags.push_back(ArgFlags);
526 if (!ArgFlags.isSplitEnd()) {
527 return false;
528 }
529 }
530
531 // If the split argument only had two elements, it should be passed directly
532 // in registers or on the stack.
533 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
534 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
535 // Apply the normal calling convention rules to the first half of the
536 // split argument.
537 CCValAssign VA = PendingLocs[0];
538 ISD::ArgFlagsTy AF = PendingArgFlags[0];
539 PendingLocs.clear();
540 PendingArgFlags.clear();
541 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
542 ArgFlags);
543 }
544
545 // Allocate to a register if possible, or else a stack slot.
546 unsigned Reg = State.AllocateReg(ArgGPRs);
547 unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8);
548
549 // If we reach this point and PendingLocs is non-empty, we must be at the
550 // end of a split argument that must be passed indirectly.
551 if (!PendingLocs.empty()) {
552 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
553 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
554
555 for (auto &It : PendingLocs) {
556 if (Reg)
557 It.convertToReg(Reg);
558 else
559 It.convertToMem(StackOffset);
560 State.addLoc(It);
561 }
562 PendingLocs.clear();
563 PendingArgFlags.clear();
564 return false;
565 }
566
567 assert(LocVT == XLenVT && "Expected an XLenVT at this stage");
568
569 if (Reg) {
570 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
571 } else {
572 State.addLoc(
573 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
574 }
575 return false;
576}
577
578void RISCVTargetLowering::analyzeInputArgs(
579 MachineFunction &MF, CCState &CCInfo,
580 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
581 unsigned NumArgs = Ins.size();
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000582 FunctionType *FType = MF.getFunction().getFunctionType();
Alex Bradburydc31c612017-12-11 12:49:02 +0000583
584 for (unsigned i = 0; i != NumArgs; ++i) {
585 MVT ArgVT = Ins[i].VT;
586 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
587
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000588 Type *ArgTy = nullptr;
589 if (IsRet)
590 ArgTy = FType->getReturnType();
591 else if (Ins[i].isOrigArg())
592 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
593
Alex Bradburydc31c612017-12-11 12:49:02 +0000594 if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000595 ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) {
Alex Bradburydc31c612017-12-11 12:49:02 +0000596 DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
597 << EVT(ArgVT).getEVTString() << '\n');
598 llvm_unreachable(nullptr);
599 }
600 }
601}
602
603void RISCVTargetLowering::analyzeOutputArgs(
604 MachineFunction &MF, CCState &CCInfo,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000605 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
606 CallLoweringInfo *CLI) const {
Alex Bradburydc31c612017-12-11 12:49:02 +0000607 unsigned NumArgs = Outs.size();
608
609 for (unsigned i = 0; i != NumArgs; i++) {
610 MVT ArgVT = Outs[i].VT;
611 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000612 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
Alex Bradburydc31c612017-12-11 12:49:02 +0000613
614 if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000615 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
Alex Bradburydc31c612017-12-11 12:49:02 +0000616 DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
617 << EVT(ArgVT).getEVTString() << "\n");
618 llvm_unreachable(nullptr);
619 }
620 }
621}
622
623// The caller is responsible for loading the full value if the argument is
624// passed with CCValAssign::Indirect.
625static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
626 const CCValAssign &VA, const SDLoc &DL) {
627 MachineFunction &MF = DAG.getMachineFunction();
628 MachineRegisterInfo &RegInfo = MF.getRegInfo();
629 EVT LocVT = VA.getLocVT();
630 SDValue Val;
631
632 unsigned VReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
633 RegInfo.addLiveIn(VA.getLocReg(), VReg);
634 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
635
636 switch (VA.getLocInfo()) {
637 default:
638 llvm_unreachable("Unexpected CCValAssign::LocInfo");
639 case CCValAssign::Full:
640 case CCValAssign::Indirect:
641 return Val;
642 }
643}
644
645// The caller is responsible for loading the full value if the argument is
646// passed with CCValAssign::Indirect.
647static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
648 const CCValAssign &VA, const SDLoc &DL) {
649 MachineFunction &MF = DAG.getMachineFunction();
650 MachineFrameInfo &MFI = MF.getFrameInfo();
651 EVT LocVT = VA.getLocVT();
652 EVT ValVT = VA.getValVT();
653 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
654 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
655 VA.getLocMemOffset(), /*Immutable=*/true);
656 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
657 SDValue Val;
658
659 ISD::LoadExtType ExtType;
660 switch (VA.getLocInfo()) {
661 default:
662 llvm_unreachable("Unexpected CCValAssign::LocInfo");
663 case CCValAssign::Full:
664 case CCValAssign::Indirect:
665 ExtType = ISD::NON_EXTLOAD;
666 break;
667 }
668 Val = DAG.getExtLoad(
669 ExtType, DL, LocVT, Chain, FIN,
670 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
671 return Val;
672}
Alex Bradbury89718422017-10-19 21:37:38 +0000673
674// Transform physical registers into virtual registers.
675SDValue RISCVTargetLowering::LowerFormalArguments(
676 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
677 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
678 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
679
680 switch (CallConv) {
681 default:
682 report_fatal_error("Unsupported calling convention");
683 case CallingConv::C:
Alex Bradburya3376752017-11-08 13:41:21 +0000684 case CallingConv::Fast:
Alex Bradbury89718422017-10-19 21:37:38 +0000685 break;
686 }
687
688 MachineFunction &MF = DAG.getMachineFunction();
Alex Bradburydc31c612017-12-11 12:49:02 +0000689 EVT PtrVT = getPointerTy(DAG.getDataLayout());
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000690 MVT XLenVT = Subtarget.getXLenVT();
691 unsigned XLenInBytes = Subtarget.getXLen() / 8;
692 // Used with vargs to acumulate store chains.
693 std::vector<SDValue> OutChains;
Alex Bradbury89718422017-10-19 21:37:38 +0000694
695 // Assign locations to all of the incoming arguments.
696 SmallVector<CCValAssign, 16> ArgLocs;
697 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
Alex Bradburydc31c612017-12-11 12:49:02 +0000698 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
Alex Bradbury89718422017-10-19 21:37:38 +0000699
Alex Bradburydc31c612017-12-11 12:49:02 +0000700 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
701 CCValAssign &VA = ArgLocs[i];
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000702 assert(VA.getLocVT() == XLenVT && "Unhandled argument type");
Alex Bradburydc31c612017-12-11 12:49:02 +0000703 SDValue ArgValue;
704 if (VA.isRegLoc())
705 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
706 else
707 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
Alex Bradbury89718422017-10-19 21:37:38 +0000708
Alex Bradburydc31c612017-12-11 12:49:02 +0000709 if (VA.getLocInfo() == CCValAssign::Indirect) {
710 // If the original argument was split and passed by reference (e.g. i128
711 // on RV32), we need to load all parts of it here (using the same
712 // address).
713 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
714 MachinePointerInfo()));
715 unsigned ArgIndex = Ins[i].OrigArgIndex;
716 assert(Ins[i].PartOffset == 0);
717 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
718 CCValAssign &PartVA = ArgLocs[i + 1];
719 unsigned PartOffset = Ins[i + 1].PartOffset;
720 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
721 DAG.getIntPtrConstant(PartOffset, DL));
722 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
723 MachinePointerInfo()));
724 ++i;
725 }
726 continue;
Alex Bradbury89718422017-10-19 21:37:38 +0000727 }
Alex Bradburydc31c612017-12-11 12:49:02 +0000728 InVals.push_back(ArgValue);
Alex Bradbury89718422017-10-19 21:37:38 +0000729 }
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000730
731 if (IsVarArg) {
732 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
733 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
734 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
735 MachineFrameInfo &MFI = MF.getFrameInfo();
736 MachineRegisterInfo &RegInfo = MF.getRegInfo();
737 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
738
739 // Offset of the first variable argument from stack pointer, and size of
740 // the vararg save area. For now, the varargs save area is either zero or
741 // large enough to hold a0-a7.
742 int VaArgOffset, VarArgsSaveSize;
743
744 // If all registers are allocated, then all varargs must be passed on the
745 // stack and we don't need to save any argregs.
746 if (ArgRegs.size() == Idx) {
747 VaArgOffset = CCInfo.getNextStackOffset();
748 VarArgsSaveSize = 0;
749 } else {
750 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
751 VaArgOffset = -VarArgsSaveSize;
752 }
753
754 // Record the frame index of the first variable argument
755 // which is a value necessary to VASTART.
756 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
757 RVFI->setVarArgsFrameIndex(FI);
758
759 // If saving an odd number of registers then create an extra stack slot to
760 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
761 // offsets to even-numbered registered remain 2*XLEN-aligned.
762 if (Idx % 2) {
763 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes,
764 true);
765 VarArgsSaveSize += XLenInBytes;
766 }
767
768 // Copy the integer registers that may have been used for passing varargs
769 // to the vararg save area.
770 for (unsigned I = Idx; I < ArgRegs.size();
771 ++I, VaArgOffset += XLenInBytes) {
772 const unsigned Reg = RegInfo.createVirtualRegister(RC);
773 RegInfo.addLiveIn(ArgRegs[I], Reg);
774 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
775 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
776 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
777 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
778 MachinePointerInfo::getFixedStack(MF, FI));
779 cast<StoreSDNode>(Store.getNode())
780 ->getMemOperand()
781 ->setValue((Value *)nullptr);
782 OutChains.push_back(Store);
783 }
784 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
785 }
786
787 // All stores are grouped in one node to allow the matching between
788 // the size of Ins and InVals. This only happens for vararg functions.
789 if (!OutChains.empty()) {
790 OutChains.push_back(Chain);
791 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
792 }
793
Alex Bradbury89718422017-10-19 21:37:38 +0000794 return Chain;
795}
796
Alex Bradburya3376752017-11-08 13:41:21 +0000797// Lower a call to a callseq_start + CALL + callseq_end chain, and add input
798// and output parameter nodes.
799SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
800 SmallVectorImpl<SDValue> &InVals) const {
801 SelectionDAG &DAG = CLI.DAG;
802 SDLoc &DL = CLI.DL;
803 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
804 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
805 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
806 SDValue Chain = CLI.Chain;
807 SDValue Callee = CLI.Callee;
808 CLI.IsTailCall = false;
809 CallingConv::ID CallConv = CLI.CallConv;
810 bool IsVarArg = CLI.IsVarArg;
811 EVT PtrVT = getPointerTy(DAG.getDataLayout());
Alex Bradburydc31c612017-12-11 12:49:02 +0000812 MVT XLenVT = Subtarget.getXLenVT();
Alex Bradburya3376752017-11-08 13:41:21 +0000813
Alex Bradburya3376752017-11-08 13:41:21 +0000814 MachineFunction &MF = DAG.getMachineFunction();
815
816 // Analyze the operands of the call, assigning locations to each operand.
817 SmallVector<CCValAssign, 16> ArgLocs;
818 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000819 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
Alex Bradburya3376752017-11-08 13:41:21 +0000820
821 // Get a count of how many bytes are to be pushed on the stack.
822 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
823
Alex Bradburydc31c612017-12-11 12:49:02 +0000824 // Create local copies for byval args
825 SmallVector<SDValue, 8> ByValArgs;
826 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
827 ISD::ArgFlagsTy Flags = Outs[i].Flags;
828 if (!Flags.isByVal())
Alex Bradburya3376752017-11-08 13:41:21 +0000829 continue;
Alex Bradburydc31c612017-12-11 12:49:02 +0000830
831 SDValue Arg = OutVals[i];
832 unsigned Size = Flags.getByValSize();
833 unsigned Align = Flags.getByValAlign();
834
835 int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false);
836 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
837 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
838
839 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align,
840 /*IsVolatile=*/false,
841 /*AlwaysInline=*/false,
842 /*isTailCall=*/false, MachinePointerInfo(),
843 MachinePointerInfo());
844 ByValArgs.push_back(FIPtr);
Alex Bradburya3376752017-11-08 13:41:21 +0000845 }
846
847 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
848
849 // Copy argument values to their designated locations.
850 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
Alex Bradburydc31c612017-12-11 12:49:02 +0000851 SmallVector<SDValue, 8> MemOpChains;
Alex Bradburya3376752017-11-08 13:41:21 +0000852 SDValue StackPtr;
Alex Bradburydc31c612017-12-11 12:49:02 +0000853 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
854 CCValAssign &VA = ArgLocs[i];
855 SDValue ArgValue = OutVals[i];
856 ISD::ArgFlagsTy Flags = Outs[i].Flags;
Alex Bradburya3376752017-11-08 13:41:21 +0000857
858 // Promote the value if needed.
Alex Bradburydc31c612017-12-11 12:49:02 +0000859 // For now, only handle fully promoted and indirect arguments.
Alex Bradburya3376752017-11-08 13:41:21 +0000860 switch (VA.getLocInfo()) {
861 case CCValAssign::Full:
862 break;
Alex Bradburydc31c612017-12-11 12:49:02 +0000863 case CCValAssign::Indirect: {
864 // Store the argument in a stack slot and pass its address.
865 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
866 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
867 MemOpChains.push_back(
868 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
869 MachinePointerInfo::getFixedStack(MF, FI)));
870 // If the original argument was split (e.g. i128), we need
871 // to store all parts of it here (and pass just one address).
872 unsigned ArgIndex = Outs[i].OrigArgIndex;
873 assert(Outs[i].PartOffset == 0);
874 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
875 SDValue PartValue = OutVals[i + 1];
876 unsigned PartOffset = Outs[i + 1].PartOffset;
877 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
878 DAG.getIntPtrConstant(PartOffset, DL));
879 MemOpChains.push_back(
880 DAG.getStore(Chain, DL, PartValue, Address,
881 MachinePointerInfo::getFixedStack(MF, FI)));
882 ++i;
883 }
884 ArgValue = SpillSlot;
885 break;
886 }
Alex Bradburya3376752017-11-08 13:41:21 +0000887 default:
888 llvm_unreachable("Unknown loc info!");
889 }
890
Alex Bradburydc31c612017-12-11 12:49:02 +0000891 // Use local copy if it is a byval arg.
892 if (Flags.isByVal())
893 ArgValue = ByValArgs[j++];
894
Alex Bradburya3376752017-11-08 13:41:21 +0000895 if (VA.isRegLoc()) {
896 // Queue up the argument copies and emit them at the end.
897 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
898 } else {
899 assert(VA.isMemLoc() && "Argument not register or memory");
Alex Bradburydc31c612017-12-11 12:49:02 +0000900
901 // Work out the address of the stack slot.
902 if (!StackPtr.getNode())
903 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
904 SDValue Address =
905 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
906 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
907
908 // Emit the store.
909 MemOpChains.push_back(
910 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
Alex Bradburya3376752017-11-08 13:41:21 +0000911 }
912 }
913
Alex Bradburydc31c612017-12-11 12:49:02 +0000914 // Join the stores, which are independent of one another.
915 if (!MemOpChains.empty())
916 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
917
Alex Bradburya3376752017-11-08 13:41:21 +0000918 SDValue Glue;
919
920 // Build a sequence of copy-to-reg nodes, chained and glued together.
921 for (auto &Reg : RegsToPass) {
922 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
923 Glue = Chain.getValue(1);
924 }
925
926 if (isa<GlobalAddressSDNode>(Callee)) {
927 Callee = lowerGlobalAddress(Callee, DAG);
928 } else if (isa<ExternalSymbolSDNode>(Callee)) {
Alex Bradburyffc435e2017-11-21 08:11:03 +0000929 Callee = lowerExternalSymbol(Callee, DAG);
Alex Bradburya3376752017-11-08 13:41:21 +0000930 }
931
932 // The first call operand is the chain and the second is the target address.
933 SmallVector<SDValue, 8> Ops;
934 Ops.push_back(Chain);
935 Ops.push_back(Callee);
936
937 // Add argument registers to the end of the list so that they are
938 // known live into the call.
939 for (auto &Reg : RegsToPass)
940 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
941
942 // Add a register mask operand representing the call-preserved registers.
943 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
944 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
945 assert(Mask && "Missing call preserved mask for calling convention");
946 Ops.push_back(DAG.getRegisterMask(Mask));
947
948 // Glue the call to the argument copies, if any.
949 if (Glue.getNode())
950 Ops.push_back(Glue);
951
952 // Emit the call.
953 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
954 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
955 Glue = Chain.getValue(1);
956
957 // Mark the end of the call, which is glued to the call itself.
958 Chain = DAG.getCALLSEQ_END(Chain,
959 DAG.getConstant(NumBytes, DL, PtrVT, true),
960 DAG.getConstant(0, DL, PtrVT, true),
961 Glue, DL);
962 Glue = Chain.getValue(1);
963
964 // Assign locations to each value returned by this call.
965 SmallVector<CCValAssign, 16> RVLocs;
966 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
Alex Bradburydc31c612017-12-11 12:49:02 +0000967 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
Alex Bradburya3376752017-11-08 13:41:21 +0000968
969 // Copy all of the result registers out of their specified physreg.
970 for (auto &VA : RVLocs) {
971 // Copy the value out, gluing the copy to the end of the call sequence.
972 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
973 VA.getLocVT(), Glue);
974 Chain = RetValue.getValue(1);
975 Glue = RetValue.getValue(2);
976
Alex Bradburydc31c612017-12-11 12:49:02 +0000977 assert(VA.getLocInfo() == CCValAssign::Full && "Unknown loc info!");
978 InVals.push_back(RetValue);
Alex Bradburya3376752017-11-08 13:41:21 +0000979 }
980
981 return Chain;
982}
983
Alex Bradburydc31c612017-12-11 12:49:02 +0000984bool RISCVTargetLowering::CanLowerReturn(
985 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
986 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
987 SmallVector<CCValAssign, 16> RVLocs;
988 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
989 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
990 MVT VT = Outs[i].VT;
991 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
992 if (CC_RISCV(MF.getDataLayout(), i, VT, VT, CCValAssign::Full, ArgFlags,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000993 CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
Alex Bradburydc31c612017-12-11 12:49:02 +0000994 return false;
995 }
996 return true;
997}
998
Alex Bradbury89718422017-10-19 21:37:38 +0000999SDValue
1000RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1001 bool IsVarArg,
1002 const SmallVectorImpl<ISD::OutputArg> &Outs,
1003 const SmallVectorImpl<SDValue> &OutVals,
1004 const SDLoc &DL, SelectionDAG &DAG) const {
Alex Bradbury89718422017-10-19 21:37:38 +00001005 // Stores the assignment of the return value to a location.
1006 SmallVector<CCValAssign, 16> RVLocs;
1007
1008 // Info about the registers and stack slot.
1009 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
1010 *DAG.getContext());
1011
Alex Bradburyc85be0d2018-01-10 19:41:03 +00001012 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
1013 nullptr);
Alex Bradbury89718422017-10-19 21:37:38 +00001014
1015 SDValue Flag;
1016 SmallVector<SDValue, 4> RetOps(1, Chain);
1017
1018 // Copy the result values into the output registers.
1019 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
Alex Bradburydc31c612017-12-11 12:49:02 +00001020 SDValue Val = OutVals[i];
Alex Bradbury89718422017-10-19 21:37:38 +00001021 CCValAssign &VA = RVLocs[i];
1022 assert(VA.isRegLoc() && "Can only return in registers!");
Alex Bradburydc31c612017-12-11 12:49:02 +00001023 assert(VA.getLocInfo() == CCValAssign::Full &&
1024 "Unexpected CCValAssign::LocInfo");
Alex Bradbury89718422017-10-19 21:37:38 +00001025
Alex Bradburydc31c612017-12-11 12:49:02 +00001026 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag);
Alex Bradbury89718422017-10-19 21:37:38 +00001027
1028 // Guarantee that all emitted copies are stuck together.
1029 Flag = Chain.getValue(1);
1030 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1031 }
1032
1033 RetOps[0] = Chain; // Update chain.
1034
1035 // Add the flag if we have it.
1036 if (Flag.getNode()) {
1037 RetOps.push_back(Flag);
1038 }
1039
1040 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
1041}
1042
1043const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
1044 switch ((RISCVISD::NodeType)Opcode) {
1045 case RISCVISD::FIRST_NUMBER:
1046 break;
1047 case RISCVISD::RET_FLAG:
1048 return "RISCVISD::RET_FLAG";
Alex Bradburya3376752017-11-08 13:41:21 +00001049 case RISCVISD::CALL:
1050 return "RISCVISD::CALL";
Alex Bradbury65385162017-11-21 07:51:32 +00001051 case RISCVISD::SELECT_CC:
1052 return "RISCVISD::SELECT_CC";
Alex Bradbury89718422017-10-19 21:37:38 +00001053 }
1054 return nullptr;
1055}
Alex Bradbury9330e642018-01-10 20:05:09 +00001056
1057std::pair<unsigned, const TargetRegisterClass *>
1058RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1059 StringRef Constraint,
1060 MVT VT) const {
1061 // First, see if this is a constraint that directly corresponds to a
1062 // RISCV register class.
1063 if (Constraint.size() == 1) {
1064 switch (Constraint[0]) {
1065 case 'r':
1066 return std::make_pair(0U, &RISCV::GPRRegClass);
1067 default:
1068 break;
1069 }
1070 }
1071
1072 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1073}