blob: d5d23129ff9e4b64284e526816c172c8e945f793 [file] [log] [blame]
Alex Bradbury89718422017-10-19 21:37:38 +00001//===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that RISCV uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "RISCVISelLowering.h"
16#include "RISCV.h"
Alex Bradburyc85be0d2018-01-10 19:41:03 +000017#include "RISCVMachineFunctionInfo.h"
Alex Bradbury89718422017-10-19 21:37:38 +000018#include "RISCVRegisterInfo.h"
19#include "RISCVSubtarget.h"
20#include "RISCVTargetMachine.h"
21#include "llvm/CodeGen/CallingConvLower.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineFunction.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/SelectionDAGISel.h"
27#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28#include "llvm/CodeGen/ValueTypes.h"
29#include "llvm/IR/DiagnosticInfo.h"
30#include "llvm/IR/DiagnosticPrinter.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Support/raw_ostream.h"
34
35using namespace llvm;
36
37#define DEBUG_TYPE "riscv-lower"
38
39RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
40 const RISCVSubtarget &STI)
41 : TargetLowering(TM), Subtarget(STI) {
42
43 MVT XLenVT = Subtarget.getXLenVT();
44
45 // Set up the register classes.
46 addRegisterClass(XLenVT, &RISCV::GPRRegClass);
47
48 // Compute derived properties from the register classes.
49 computeRegisterProperties(STI.getRegisterInfo());
50
51 setStackPointerRegisterToSaveRestore(RISCV::X2);
52
Alex Bradburycfa62912017-11-08 12:20:01 +000053 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
54 setLoadExtAction(N, XLenVT, MVT::i1, Promote);
55
Alex Bradbury89718422017-10-19 21:37:38 +000056 // TODO: add all necessary setOperationAction calls.
Alex Bradburybfb00d42017-12-11 12:38:17 +000057 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
58
Alex Bradburyffc435e2017-11-21 08:11:03 +000059 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
Alex Bradbury74913e12017-11-08 13:31:40 +000060 setOperationAction(ISD::BR_CC, XLenVT, Expand);
Alex Bradbury65385162017-11-21 07:51:32 +000061 setOperationAction(ISD::SELECT, XLenVT, Custom);
62 setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
63
Alex Bradburybfb00d42017-12-11 12:38:17 +000064 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
65 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
66
Alex Bradburyc85be0d2018-01-10 19:41:03 +000067 setOperationAction(ISD::VASTART, MVT::Other, Custom);
68 setOperationAction(ISD::VAARG, MVT::Other, Expand);
69 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
70 setOperationAction(ISD::VAEND, MVT::Other, Expand);
71
Alex Bradburyffc435e2017-11-21 08:11:03 +000072 for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
73 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
74
75 setOperationAction(ISD::ADDC, XLenVT, Expand);
76 setOperationAction(ISD::ADDE, XLenVT, Expand);
77 setOperationAction(ISD::SUBC, XLenVT, Expand);
78 setOperationAction(ISD::SUBE, XLenVT, Expand);
79
80 setOperationAction(ISD::SREM, XLenVT, Expand);
81 setOperationAction(ISD::SDIVREM, XLenVT, Expand);
82 setOperationAction(ISD::SDIV, XLenVT, Expand);
83 setOperationAction(ISD::UREM, XLenVT, Expand);
84 setOperationAction(ISD::UDIVREM, XLenVT, Expand);
85 setOperationAction(ISD::UDIV, XLenVT, Expand);
86
87 setOperationAction(ISD::MUL, XLenVT, Expand);
88 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
89 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
90 setOperationAction(ISD::MULHS, XLenVT, Expand);
91 setOperationAction(ISD::MULHU, XLenVT, Expand);
92
93 setOperationAction(ISD::SHL_PARTS, XLenVT, Expand);
94 setOperationAction(ISD::SRL_PARTS, XLenVT, Expand);
95 setOperationAction(ISD::SRA_PARTS, XLenVT, Expand);
96
97 setOperationAction(ISD::ROTL, XLenVT, Expand);
98 setOperationAction(ISD::ROTR, XLenVT, Expand);
99 setOperationAction(ISD::BSWAP, XLenVT, Expand);
100 setOperationAction(ISD::CTTZ, XLenVT, Expand);
101 setOperationAction(ISD::CTLZ, XLenVT, Expand);
102 setOperationAction(ISD::CTPOP, XLenVT, Expand);
103
104 setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
105 setOperationAction(ISD::BlockAddress, XLenVT, Custom);
106
Alex Bradbury89718422017-10-19 21:37:38 +0000107 setBooleanContents(ZeroOrOneBooleanContent);
108
109 // Function alignments (log2).
110 setMinFunctionAlignment(3);
111 setPrefFunctionAlignment(3);
Alex Bradburyffc435e2017-11-21 08:11:03 +0000112
113 // Effectively disable jump table generation.
114 setMinimumJumpTableEntries(INT_MAX);
Alex Bradbury89718422017-10-19 21:37:38 +0000115}
116
Alex Bradbury65385162017-11-21 07:51:32 +0000117// Changes the condition code and swaps operands if necessary, so the SetCC
118// operation matches one of the comparisons supported directly in the RISC-V
119// ISA.
120static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
121 switch (CC) {
122 default:
123 break;
124 case ISD::SETGT:
125 case ISD::SETLE:
126 case ISD::SETUGT:
127 case ISD::SETULE:
128 CC = ISD::getSetCCSwappedOperands(CC);
129 std::swap(LHS, RHS);
130 break;
131 }
132}
133
134// Return the RISC-V branch opcode that matches the given DAG integer
135// condition code. The CondCode must be one of those supported by the RISC-V
136// ISA (see normaliseSetCC).
137static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
138 switch (CC) {
139 default:
140 llvm_unreachable("Unsupported CondCode");
141 case ISD::SETEQ:
142 return RISCV::BEQ;
143 case ISD::SETNE:
144 return RISCV::BNE;
145 case ISD::SETLT:
146 return RISCV::BLT;
147 case ISD::SETGE:
148 return RISCV::BGE;
149 case ISD::SETULT:
150 return RISCV::BLTU;
151 case ISD::SETUGE:
152 return RISCV::BGEU;
153 }
154}
155
Alex Bradbury89718422017-10-19 21:37:38 +0000156SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
157 SelectionDAG &DAG) const {
158 switch (Op.getOpcode()) {
159 default:
160 report_fatal_error("unimplemented operand");
Alex Bradburyec8aa912017-11-08 13:24:21 +0000161 case ISD::GlobalAddress:
162 return lowerGlobalAddress(Op, DAG);
Alex Bradburyffc435e2017-11-21 08:11:03 +0000163 case ISD::BlockAddress:
164 return lowerBlockAddress(Op, DAG);
Alex Bradbury65385162017-11-21 07:51:32 +0000165 case ISD::SELECT:
166 return lowerSELECT(Op, DAG);
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000167 case ISD::VASTART:
168 return lowerVASTART(Op, DAG);
Alex Bradbury70f137b2018-01-10 20:12:00 +0000169 case ISD::FRAMEADDR:
170 return LowerFRAMEADDR(Op, DAG);
171 case ISD::RETURNADDR:
172 return LowerRETURNADDR(Op, DAG);
Alex Bradburyec8aa912017-11-08 13:24:21 +0000173 }
174}
175
176SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
177 SelectionDAG &DAG) const {
178 SDLoc DL(Op);
179 EVT Ty = Op.getValueType();
180 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
181 const GlobalValue *GV = N->getGlobal();
182 int64_t Offset = N->getOffset();
183
Alex Bradburyffc435e2017-11-21 08:11:03 +0000184 if (isPositionIndependent() || Subtarget.is64Bit())
Alex Bradburyec8aa912017-11-08 13:24:21 +0000185 report_fatal_error("Unable to lowerGlobalAddress");
Alex Bradburyffc435e2017-11-21 08:11:03 +0000186
187 SDValue GAHi =
188 DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_HI);
189 SDValue GALo =
190 DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_LO);
191 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
192 SDValue MNLo =
193 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
194 return MNLo;
195}
196
197SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
198 SelectionDAG &DAG) const {
199 SDLoc DL(Op);
200 EVT Ty = Op.getValueType();
201 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
202 const BlockAddress *BA = N->getBlockAddress();
203 int64_t Offset = N->getOffset();
204
205 if (isPositionIndependent() || Subtarget.is64Bit())
206 report_fatal_error("Unable to lowerBlockAddress");
207
208 SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI);
209 SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO);
210 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0);
211 SDValue MNLo =
212 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0);
213 return MNLo;
214}
215
216SDValue RISCVTargetLowering::lowerExternalSymbol(SDValue Op,
217 SelectionDAG &DAG) const {
218 SDLoc DL(Op);
219 EVT Ty = Op.getValueType();
220 ExternalSymbolSDNode *N = cast<ExternalSymbolSDNode>(Op);
221 const char *Sym = N->getSymbol();
222
223 // TODO: should also handle gp-relative loads.
224
225 if (isPositionIndependent() || Subtarget.is64Bit())
226 report_fatal_error("Unable to lowerExternalSymbol");
227
228 SDValue GAHi = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_HI);
229 SDValue GALo = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_LO);
230 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
231 SDValue MNLo =
232 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
233 return MNLo;
Alex Bradbury89718422017-10-19 21:37:38 +0000234}
235
Alex Bradbury65385162017-11-21 07:51:32 +0000236SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
237 SDValue CondV = Op.getOperand(0);
238 SDValue TrueV = Op.getOperand(1);
239 SDValue FalseV = Op.getOperand(2);
240 SDLoc DL(Op);
241 MVT XLenVT = Subtarget.getXLenVT();
242
243 // If the result type is XLenVT and CondV is the output of a SETCC node
244 // which also operated on XLenVT inputs, then merge the SETCC node into the
245 // lowered RISCVISD::SELECT_CC to take advantage of the integer
246 // compare+branch instructions. i.e.:
247 // (select (setcc lhs, rhs, cc), truev, falsev)
248 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
249 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
250 CondV.getOperand(0).getSimpleValueType() == XLenVT) {
251 SDValue LHS = CondV.getOperand(0);
252 SDValue RHS = CondV.getOperand(1);
253 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
254 ISD::CondCode CCVal = CC->get();
255
256 normaliseSetCC(LHS, RHS, CCVal);
257
258 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
259 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
260 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
261 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
262 }
263
264 // Otherwise:
265 // (select condv, truev, falsev)
266 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
267 SDValue Zero = DAG.getConstant(0, DL, XLenVT);
268 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
269
270 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
271 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
272
273 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
274}
275
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000276SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
277 MachineFunction &MF = DAG.getMachineFunction();
278 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
279
280 SDLoc DL(Op);
281 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
282 getPointerTy(MF.getDataLayout()));
283
284 // vastart just stores the address of the VarArgsFrameIndex slot into the
285 // memory location argument.
286 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
287 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
288 MachinePointerInfo(SV));
289}
290
Alex Bradbury70f137b2018-01-10 20:12:00 +0000291SDValue RISCVTargetLowering::LowerFRAMEADDR(SDValue Op,
292 SelectionDAG &DAG) const {
293 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
294 MachineFunction &MF = DAG.getMachineFunction();
295 MachineFrameInfo &MFI = MF.getFrameInfo();
296 MFI.setFrameAddressIsTaken(true);
297 unsigned FrameReg = RI.getFrameRegister(MF);
298 int XLenInBytes = Subtarget.getXLen() / 8;
299
300 EVT VT = Op.getValueType();
301 SDLoc DL(Op);
302 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
303 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
304 while (Depth--) {
305 int Offset = -(XLenInBytes * 2);
306 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
307 DAG.getIntPtrConstant(Offset, DL));
308 FrameAddr =
309 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
310 }
311 return FrameAddr;
312}
313
314SDValue RISCVTargetLowering::LowerRETURNADDR(SDValue Op,
315 SelectionDAG &DAG) const {
316 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
317 MachineFunction &MF = DAG.getMachineFunction();
318 MachineFrameInfo &MFI = MF.getFrameInfo();
319 MFI.setReturnAddressIsTaken(true);
320 MVT XLenVT = Subtarget.getXLenVT();
321 int XLenInBytes = Subtarget.getXLen() / 8;
322
323 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
324 return SDValue();
325
326 EVT VT = Op.getValueType();
327 SDLoc DL(Op);
328 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
329 if (Depth) {
330 int Off = -XLenInBytes;
331 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
332 SDValue Offset = DAG.getConstant(Off, DL, VT);
333 return DAG.getLoad(VT, DL, DAG.getEntryNode(),
334 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
335 MachinePointerInfo());
336 }
337
338 // Return the value of the return address register, marking it an implicit
339 // live-in.
340 unsigned Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
341 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
342}
343
Alex Bradbury65385162017-11-21 07:51:32 +0000344MachineBasicBlock *
345RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
346 MachineBasicBlock *BB) const {
347 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
348 DebugLoc DL = MI.getDebugLoc();
349
350 assert(MI.getOpcode() == RISCV::Select_GPR_Using_CC_GPR &&
351 "Unexpected instr type to insert");
352
353 // To "insert" a SELECT instruction, we actually have to insert the triangle
354 // control-flow pattern. The incoming instruction knows the destination vreg
355 // to set, the condition code register to branch on, the true/false values to
356 // select between, and the condcode to use to select the appropriate branch.
357 //
358 // We produce the following control flow:
359 // HeadMBB
360 // | \
361 // | IfFalseMBB
362 // | /
363 // TailMBB
364 const BasicBlock *LLVM_BB = BB->getBasicBlock();
365 MachineFunction::iterator I = ++BB->getIterator();
366
367 MachineBasicBlock *HeadMBB = BB;
368 MachineFunction *F = BB->getParent();
369 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
370 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
371
372 F->insert(I, IfFalseMBB);
373 F->insert(I, TailMBB);
374 // Move all remaining instructions to TailMBB.
375 TailMBB->splice(TailMBB->begin(), HeadMBB,
376 std::next(MachineBasicBlock::iterator(MI)), HeadMBB->end());
377 // Update machine-CFG edges by transferring all successors of the current
378 // block to the new block which will contain the Phi node for the select.
379 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
380 // Set the successors for HeadMBB.
381 HeadMBB->addSuccessor(IfFalseMBB);
382 HeadMBB->addSuccessor(TailMBB);
383
384 // Insert appropriate branch.
385 unsigned LHS = MI.getOperand(1).getReg();
386 unsigned RHS = MI.getOperand(2).getReg();
387 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
388 unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
389
390 BuildMI(HeadMBB, DL, TII.get(Opcode))
391 .addReg(LHS)
392 .addReg(RHS)
393 .addMBB(TailMBB);
394
395 // IfFalseMBB just falls through to TailMBB.
396 IfFalseMBB->addSuccessor(TailMBB);
397
398 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
399 BuildMI(*TailMBB, TailMBB->begin(), DL, TII.get(RISCV::PHI),
400 MI.getOperand(0).getReg())
401 .addReg(MI.getOperand(4).getReg())
402 .addMBB(HeadMBB)
403 .addReg(MI.getOperand(5).getReg())
404 .addMBB(IfFalseMBB);
405
406 MI.eraseFromParent(); // The pseudo instruction is gone now.
407 return TailMBB;
408}
409
Alex Bradbury89718422017-10-19 21:37:38 +0000410// Calling Convention Implementation.
Alex Bradburydc31c612017-12-11 12:49:02 +0000411// The expectations for frontend ABI lowering vary from target to target.
412// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
413// details, but this is a longer term goal. For now, we simply try to keep the
414// role of the frontend as simple and well-defined as possible. The rules can
415// be summarised as:
416// * Never split up large scalar arguments. We handle them here.
417// * If a hardfloat calling convention is being used, and the struct may be
418// passed in a pair of registers (fp+fp, int+fp), and both registers are
419// available, then pass as two separate arguments. If either the GPRs or FPRs
420// are exhausted, then pass according to the rule below.
421// * If a struct could never be passed in registers or directly in a stack
422// slot (as it is larger than 2*XLEN and the floating point rules don't
423// apply), then pass it using a pointer with the byval attribute.
424// * If a struct is less than 2*XLEN, then coerce to either a two-element
425// word-sized array or a 2*XLEN scalar (depending on alignment).
426// * The frontend can determine whether a struct is returned by reference or
427// not based on its size and fields. If it will be returned by reference, the
428// frontend must modify the prototype so a pointer with the sret annotation is
429// passed as the first argument. This is not necessary for large scalar
430// returns.
431// * Struct return values and varargs should be coerced to structs containing
432// register-size fields in the same situations they would be for fixed
433// arguments.
434
435static const MCPhysReg ArgGPRs[] = {
436 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
437 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
438};
439
440// Pass a 2*XLEN argument that has been split into two XLEN values through
441// registers or the stack as necessary.
442static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
443 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
444 MVT ValVT2, MVT LocVT2,
445 ISD::ArgFlagsTy ArgFlags2) {
446 unsigned XLenInBytes = XLen / 8;
447 if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
448 // At least one half can be passed via register.
449 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
450 VA1.getLocVT(), CCValAssign::Full));
451 } else {
452 // Both halves must be passed on the stack, with proper alignment.
453 unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign());
454 State.addLoc(
455 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
456 State.AllocateStack(XLenInBytes, StackAlign),
457 VA1.getLocVT(), CCValAssign::Full));
458 State.addLoc(CCValAssign::getMem(
459 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
460 CCValAssign::Full));
461 return false;
462 }
463
464 if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
465 // The second half can also be passed via register.
466 State.addLoc(
467 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
468 } else {
469 // The second half is passed via the stack, without additional alignment.
470 State.addLoc(CCValAssign::getMem(
471 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
472 CCValAssign::Full));
473 }
474
475 return false;
476}
477
478// Implements the RISC-V calling convention. Returns true upon failure.
479static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT,
480 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000481 CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) {
Alex Bradburydc31c612017-12-11 12:49:02 +0000482 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
483 assert(XLen == 32 || XLen == 64);
484 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
485 assert(ValVT == XLenVT && "Unexpected ValVT");
486 assert(LocVT == XLenVT && "Unexpected LocVT");
Alex Bradburydc31c612017-12-11 12:49:02 +0000487
488 // Any return value split in to more than two values can't be returned
489 // directly.
490 if (IsRet && ValNo > 1)
491 return true;
492
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000493 // If this is a variadic argument, the RISC-V calling convention requires
494 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
495 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
496 // be used regardless of whether the original argument was split during
497 // legalisation or not. The argument will not be passed by registers if the
498 // original type is larger than 2*XLEN, so the register alignment rule does
499 // not apply.
500 unsigned TwoXLenInBytes = (2 * XLen) / 8;
501 if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes &&
502 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
503 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
504 // Skip 'odd' register if necessary.
505 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
506 State.AllocateReg(ArgGPRs);
507 }
508
Alex Bradburydc31c612017-12-11 12:49:02 +0000509 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
510 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
511 State.getPendingArgFlags();
512
513 assert(PendingLocs.size() == PendingArgFlags.size() &&
514 "PendingLocs and PendingArgFlags out of sync");
515
516 // Split arguments might be passed indirectly, so keep track of the pending
517 // values.
518 if (ArgFlags.isSplit() || !PendingLocs.empty()) {
519 LocVT = XLenVT;
520 LocInfo = CCValAssign::Indirect;
521 PendingLocs.push_back(
522 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
523 PendingArgFlags.push_back(ArgFlags);
524 if (!ArgFlags.isSplitEnd()) {
525 return false;
526 }
527 }
528
529 // If the split argument only had two elements, it should be passed directly
530 // in registers or on the stack.
531 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
532 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
533 // Apply the normal calling convention rules to the first half of the
534 // split argument.
535 CCValAssign VA = PendingLocs[0];
536 ISD::ArgFlagsTy AF = PendingArgFlags[0];
537 PendingLocs.clear();
538 PendingArgFlags.clear();
539 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
540 ArgFlags);
541 }
542
543 // Allocate to a register if possible, or else a stack slot.
544 unsigned Reg = State.AllocateReg(ArgGPRs);
545 unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8);
546
547 // If we reach this point and PendingLocs is non-empty, we must be at the
548 // end of a split argument that must be passed indirectly.
549 if (!PendingLocs.empty()) {
550 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
551 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
552
553 for (auto &It : PendingLocs) {
554 if (Reg)
555 It.convertToReg(Reg);
556 else
557 It.convertToMem(StackOffset);
558 State.addLoc(It);
559 }
560 PendingLocs.clear();
561 PendingArgFlags.clear();
562 return false;
563 }
564
565 assert(LocVT == XLenVT && "Expected an XLenVT at this stage");
566
567 if (Reg) {
568 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
569 } else {
570 State.addLoc(
571 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
572 }
573 return false;
574}
575
576void RISCVTargetLowering::analyzeInputArgs(
577 MachineFunction &MF, CCState &CCInfo,
578 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
579 unsigned NumArgs = Ins.size();
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000580 FunctionType *FType = MF.getFunction().getFunctionType();
Alex Bradburydc31c612017-12-11 12:49:02 +0000581
582 for (unsigned i = 0; i != NumArgs; ++i) {
583 MVT ArgVT = Ins[i].VT;
584 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
585
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000586 Type *ArgTy = nullptr;
587 if (IsRet)
588 ArgTy = FType->getReturnType();
589 else if (Ins[i].isOrigArg())
590 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
591
Alex Bradburydc31c612017-12-11 12:49:02 +0000592 if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000593 ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) {
Alex Bradburydc31c612017-12-11 12:49:02 +0000594 DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
595 << EVT(ArgVT).getEVTString() << '\n');
596 llvm_unreachable(nullptr);
597 }
598 }
599}
600
601void RISCVTargetLowering::analyzeOutputArgs(
602 MachineFunction &MF, CCState &CCInfo,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000603 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
604 CallLoweringInfo *CLI) const {
Alex Bradburydc31c612017-12-11 12:49:02 +0000605 unsigned NumArgs = Outs.size();
606
607 for (unsigned i = 0; i != NumArgs; i++) {
608 MVT ArgVT = Outs[i].VT;
609 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000610 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
Alex Bradburydc31c612017-12-11 12:49:02 +0000611
612 if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000613 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
Alex Bradburydc31c612017-12-11 12:49:02 +0000614 DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
615 << EVT(ArgVT).getEVTString() << "\n");
616 llvm_unreachable(nullptr);
617 }
618 }
619}
620
621// The caller is responsible for loading the full value if the argument is
622// passed with CCValAssign::Indirect.
623static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
624 const CCValAssign &VA, const SDLoc &DL) {
625 MachineFunction &MF = DAG.getMachineFunction();
626 MachineRegisterInfo &RegInfo = MF.getRegInfo();
627 EVT LocVT = VA.getLocVT();
628 SDValue Val;
629
630 unsigned VReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
631 RegInfo.addLiveIn(VA.getLocReg(), VReg);
632 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
633
634 switch (VA.getLocInfo()) {
635 default:
636 llvm_unreachable("Unexpected CCValAssign::LocInfo");
637 case CCValAssign::Full:
638 case CCValAssign::Indirect:
639 return Val;
640 }
641}
642
643// The caller is responsible for loading the full value if the argument is
644// passed with CCValAssign::Indirect.
645static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
646 const CCValAssign &VA, const SDLoc &DL) {
647 MachineFunction &MF = DAG.getMachineFunction();
648 MachineFrameInfo &MFI = MF.getFrameInfo();
649 EVT LocVT = VA.getLocVT();
650 EVT ValVT = VA.getValVT();
651 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
652 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
653 VA.getLocMemOffset(), /*Immutable=*/true);
654 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
655 SDValue Val;
656
657 ISD::LoadExtType ExtType;
658 switch (VA.getLocInfo()) {
659 default:
660 llvm_unreachable("Unexpected CCValAssign::LocInfo");
661 case CCValAssign::Full:
662 case CCValAssign::Indirect:
663 ExtType = ISD::NON_EXTLOAD;
664 break;
665 }
666 Val = DAG.getExtLoad(
667 ExtType, DL, LocVT, Chain, FIN,
668 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
669 return Val;
670}
Alex Bradbury89718422017-10-19 21:37:38 +0000671
672// Transform physical registers into virtual registers.
673SDValue RISCVTargetLowering::LowerFormalArguments(
674 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
675 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
676 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
677
678 switch (CallConv) {
679 default:
680 report_fatal_error("Unsupported calling convention");
681 case CallingConv::C:
Alex Bradburya3376752017-11-08 13:41:21 +0000682 case CallingConv::Fast:
Alex Bradbury89718422017-10-19 21:37:38 +0000683 break;
684 }
685
686 MachineFunction &MF = DAG.getMachineFunction();
Alex Bradburydc31c612017-12-11 12:49:02 +0000687 EVT PtrVT = getPointerTy(DAG.getDataLayout());
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000688 MVT XLenVT = Subtarget.getXLenVT();
689 unsigned XLenInBytes = Subtarget.getXLen() / 8;
690 // Used with vargs to acumulate store chains.
691 std::vector<SDValue> OutChains;
Alex Bradbury89718422017-10-19 21:37:38 +0000692
693 // Assign locations to all of the incoming arguments.
694 SmallVector<CCValAssign, 16> ArgLocs;
695 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
Alex Bradburydc31c612017-12-11 12:49:02 +0000696 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
Alex Bradbury89718422017-10-19 21:37:38 +0000697
Alex Bradburydc31c612017-12-11 12:49:02 +0000698 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
699 CCValAssign &VA = ArgLocs[i];
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000700 assert(VA.getLocVT() == XLenVT && "Unhandled argument type");
Alex Bradburydc31c612017-12-11 12:49:02 +0000701 SDValue ArgValue;
702 if (VA.isRegLoc())
703 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
704 else
705 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
Alex Bradbury89718422017-10-19 21:37:38 +0000706
Alex Bradburydc31c612017-12-11 12:49:02 +0000707 if (VA.getLocInfo() == CCValAssign::Indirect) {
708 // If the original argument was split and passed by reference (e.g. i128
709 // on RV32), we need to load all parts of it here (using the same
710 // address).
711 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
712 MachinePointerInfo()));
713 unsigned ArgIndex = Ins[i].OrigArgIndex;
714 assert(Ins[i].PartOffset == 0);
715 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
716 CCValAssign &PartVA = ArgLocs[i + 1];
717 unsigned PartOffset = Ins[i + 1].PartOffset;
718 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
719 DAG.getIntPtrConstant(PartOffset, DL));
720 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
721 MachinePointerInfo()));
722 ++i;
723 }
724 continue;
Alex Bradbury89718422017-10-19 21:37:38 +0000725 }
Alex Bradburydc31c612017-12-11 12:49:02 +0000726 InVals.push_back(ArgValue);
Alex Bradbury89718422017-10-19 21:37:38 +0000727 }
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000728
729 if (IsVarArg) {
730 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
731 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
732 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
733 MachineFrameInfo &MFI = MF.getFrameInfo();
734 MachineRegisterInfo &RegInfo = MF.getRegInfo();
735 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
736
737 // Offset of the first variable argument from stack pointer, and size of
738 // the vararg save area. For now, the varargs save area is either zero or
739 // large enough to hold a0-a7.
740 int VaArgOffset, VarArgsSaveSize;
741
742 // If all registers are allocated, then all varargs must be passed on the
743 // stack and we don't need to save any argregs.
744 if (ArgRegs.size() == Idx) {
745 VaArgOffset = CCInfo.getNextStackOffset();
746 VarArgsSaveSize = 0;
747 } else {
748 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
749 VaArgOffset = -VarArgsSaveSize;
750 }
751
752 // Record the frame index of the first variable argument
753 // which is a value necessary to VASTART.
754 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
755 RVFI->setVarArgsFrameIndex(FI);
756
757 // If saving an odd number of registers then create an extra stack slot to
758 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
759 // offsets to even-numbered registered remain 2*XLEN-aligned.
760 if (Idx % 2) {
761 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes,
762 true);
763 VarArgsSaveSize += XLenInBytes;
764 }
765
766 // Copy the integer registers that may have been used for passing varargs
767 // to the vararg save area.
768 for (unsigned I = Idx; I < ArgRegs.size();
769 ++I, VaArgOffset += XLenInBytes) {
770 const unsigned Reg = RegInfo.createVirtualRegister(RC);
771 RegInfo.addLiveIn(ArgRegs[I], Reg);
772 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
773 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
774 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
775 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
776 MachinePointerInfo::getFixedStack(MF, FI));
777 cast<StoreSDNode>(Store.getNode())
778 ->getMemOperand()
779 ->setValue((Value *)nullptr);
780 OutChains.push_back(Store);
781 }
782 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
783 }
784
785 // All stores are grouped in one node to allow the matching between
786 // the size of Ins and InVals. This only happens for vararg functions.
787 if (!OutChains.empty()) {
788 OutChains.push_back(Chain);
789 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
790 }
791
Alex Bradbury89718422017-10-19 21:37:38 +0000792 return Chain;
793}
794
Alex Bradburya3376752017-11-08 13:41:21 +0000795// Lower a call to a callseq_start + CALL + callseq_end chain, and add input
796// and output parameter nodes.
797SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
798 SmallVectorImpl<SDValue> &InVals) const {
799 SelectionDAG &DAG = CLI.DAG;
800 SDLoc &DL = CLI.DL;
801 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
802 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
803 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
804 SDValue Chain = CLI.Chain;
805 SDValue Callee = CLI.Callee;
806 CLI.IsTailCall = false;
807 CallingConv::ID CallConv = CLI.CallConv;
808 bool IsVarArg = CLI.IsVarArg;
809 EVT PtrVT = getPointerTy(DAG.getDataLayout());
Alex Bradburydc31c612017-12-11 12:49:02 +0000810 MVT XLenVT = Subtarget.getXLenVT();
Alex Bradburya3376752017-11-08 13:41:21 +0000811
Alex Bradburya3376752017-11-08 13:41:21 +0000812 MachineFunction &MF = DAG.getMachineFunction();
813
814 // Analyze the operands of the call, assigning locations to each operand.
815 SmallVector<CCValAssign, 16> ArgLocs;
816 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000817 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
Alex Bradburya3376752017-11-08 13:41:21 +0000818
819 // Get a count of how many bytes are to be pushed on the stack.
820 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
821
Alex Bradburydc31c612017-12-11 12:49:02 +0000822 // Create local copies for byval args
823 SmallVector<SDValue, 8> ByValArgs;
824 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
825 ISD::ArgFlagsTy Flags = Outs[i].Flags;
826 if (!Flags.isByVal())
Alex Bradburya3376752017-11-08 13:41:21 +0000827 continue;
Alex Bradburydc31c612017-12-11 12:49:02 +0000828
829 SDValue Arg = OutVals[i];
830 unsigned Size = Flags.getByValSize();
831 unsigned Align = Flags.getByValAlign();
832
833 int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false);
834 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
835 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
836
837 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align,
838 /*IsVolatile=*/false,
839 /*AlwaysInline=*/false,
840 /*isTailCall=*/false, MachinePointerInfo(),
841 MachinePointerInfo());
842 ByValArgs.push_back(FIPtr);
Alex Bradburya3376752017-11-08 13:41:21 +0000843 }
844
845 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
846
847 // Copy argument values to their designated locations.
848 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
Alex Bradburydc31c612017-12-11 12:49:02 +0000849 SmallVector<SDValue, 8> MemOpChains;
Alex Bradburya3376752017-11-08 13:41:21 +0000850 SDValue StackPtr;
Alex Bradburydc31c612017-12-11 12:49:02 +0000851 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
852 CCValAssign &VA = ArgLocs[i];
853 SDValue ArgValue = OutVals[i];
854 ISD::ArgFlagsTy Flags = Outs[i].Flags;
Alex Bradburya3376752017-11-08 13:41:21 +0000855
856 // Promote the value if needed.
Alex Bradburydc31c612017-12-11 12:49:02 +0000857 // For now, only handle fully promoted and indirect arguments.
Alex Bradburya3376752017-11-08 13:41:21 +0000858 switch (VA.getLocInfo()) {
859 case CCValAssign::Full:
860 break;
Alex Bradburydc31c612017-12-11 12:49:02 +0000861 case CCValAssign::Indirect: {
862 // Store the argument in a stack slot and pass its address.
863 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
864 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
865 MemOpChains.push_back(
866 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
867 MachinePointerInfo::getFixedStack(MF, FI)));
868 // If the original argument was split (e.g. i128), we need
869 // to store all parts of it here (and pass just one address).
870 unsigned ArgIndex = Outs[i].OrigArgIndex;
871 assert(Outs[i].PartOffset == 0);
872 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
873 SDValue PartValue = OutVals[i + 1];
874 unsigned PartOffset = Outs[i + 1].PartOffset;
875 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
876 DAG.getIntPtrConstant(PartOffset, DL));
877 MemOpChains.push_back(
878 DAG.getStore(Chain, DL, PartValue, Address,
879 MachinePointerInfo::getFixedStack(MF, FI)));
880 ++i;
881 }
882 ArgValue = SpillSlot;
883 break;
884 }
Alex Bradburya3376752017-11-08 13:41:21 +0000885 default:
886 llvm_unreachable("Unknown loc info!");
887 }
888
Alex Bradburydc31c612017-12-11 12:49:02 +0000889 // Use local copy if it is a byval arg.
890 if (Flags.isByVal())
891 ArgValue = ByValArgs[j++];
892
Alex Bradburya3376752017-11-08 13:41:21 +0000893 if (VA.isRegLoc()) {
894 // Queue up the argument copies and emit them at the end.
895 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
896 } else {
897 assert(VA.isMemLoc() && "Argument not register or memory");
Alex Bradburydc31c612017-12-11 12:49:02 +0000898
899 // Work out the address of the stack slot.
900 if (!StackPtr.getNode())
901 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
902 SDValue Address =
903 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
904 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
905
906 // Emit the store.
907 MemOpChains.push_back(
908 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
Alex Bradburya3376752017-11-08 13:41:21 +0000909 }
910 }
911
Alex Bradburydc31c612017-12-11 12:49:02 +0000912 // Join the stores, which are independent of one another.
913 if (!MemOpChains.empty())
914 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
915
Alex Bradburya3376752017-11-08 13:41:21 +0000916 SDValue Glue;
917
918 // Build a sequence of copy-to-reg nodes, chained and glued together.
919 for (auto &Reg : RegsToPass) {
920 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
921 Glue = Chain.getValue(1);
922 }
923
924 if (isa<GlobalAddressSDNode>(Callee)) {
925 Callee = lowerGlobalAddress(Callee, DAG);
926 } else if (isa<ExternalSymbolSDNode>(Callee)) {
Alex Bradburyffc435e2017-11-21 08:11:03 +0000927 Callee = lowerExternalSymbol(Callee, DAG);
Alex Bradburya3376752017-11-08 13:41:21 +0000928 }
929
930 // The first call operand is the chain and the second is the target address.
931 SmallVector<SDValue, 8> Ops;
932 Ops.push_back(Chain);
933 Ops.push_back(Callee);
934
935 // Add argument registers to the end of the list so that they are
936 // known live into the call.
937 for (auto &Reg : RegsToPass)
938 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
939
940 // Add a register mask operand representing the call-preserved registers.
941 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
942 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
943 assert(Mask && "Missing call preserved mask for calling convention");
944 Ops.push_back(DAG.getRegisterMask(Mask));
945
946 // Glue the call to the argument copies, if any.
947 if (Glue.getNode())
948 Ops.push_back(Glue);
949
950 // Emit the call.
951 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
952 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
953 Glue = Chain.getValue(1);
954
955 // Mark the end of the call, which is glued to the call itself.
956 Chain = DAG.getCALLSEQ_END(Chain,
957 DAG.getConstant(NumBytes, DL, PtrVT, true),
958 DAG.getConstant(0, DL, PtrVT, true),
959 Glue, DL);
960 Glue = Chain.getValue(1);
961
962 // Assign locations to each value returned by this call.
963 SmallVector<CCValAssign, 16> RVLocs;
964 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
Alex Bradburydc31c612017-12-11 12:49:02 +0000965 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
Alex Bradburya3376752017-11-08 13:41:21 +0000966
967 // Copy all of the result registers out of their specified physreg.
968 for (auto &VA : RVLocs) {
969 // Copy the value out, gluing the copy to the end of the call sequence.
970 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
971 VA.getLocVT(), Glue);
972 Chain = RetValue.getValue(1);
973 Glue = RetValue.getValue(2);
974
Alex Bradburydc31c612017-12-11 12:49:02 +0000975 assert(VA.getLocInfo() == CCValAssign::Full && "Unknown loc info!");
976 InVals.push_back(RetValue);
Alex Bradburya3376752017-11-08 13:41:21 +0000977 }
978
979 return Chain;
980}
981
Alex Bradburydc31c612017-12-11 12:49:02 +0000982bool RISCVTargetLowering::CanLowerReturn(
983 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
984 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
985 SmallVector<CCValAssign, 16> RVLocs;
986 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
987 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
988 MVT VT = Outs[i].VT;
989 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
990 if (CC_RISCV(MF.getDataLayout(), i, VT, VT, CCValAssign::Full, ArgFlags,
Alex Bradburyc85be0d2018-01-10 19:41:03 +0000991 CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
Alex Bradburydc31c612017-12-11 12:49:02 +0000992 return false;
993 }
994 return true;
995}
996
Alex Bradbury89718422017-10-19 21:37:38 +0000997SDValue
998RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
999 bool IsVarArg,
1000 const SmallVectorImpl<ISD::OutputArg> &Outs,
1001 const SmallVectorImpl<SDValue> &OutVals,
1002 const SDLoc &DL, SelectionDAG &DAG) const {
Alex Bradbury89718422017-10-19 21:37:38 +00001003 // Stores the assignment of the return value to a location.
1004 SmallVector<CCValAssign, 16> RVLocs;
1005
1006 // Info about the registers and stack slot.
1007 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
1008 *DAG.getContext());
1009
Alex Bradburyc85be0d2018-01-10 19:41:03 +00001010 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
1011 nullptr);
Alex Bradbury89718422017-10-19 21:37:38 +00001012
1013 SDValue Flag;
1014 SmallVector<SDValue, 4> RetOps(1, Chain);
1015
1016 // Copy the result values into the output registers.
1017 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
Alex Bradburydc31c612017-12-11 12:49:02 +00001018 SDValue Val = OutVals[i];
Alex Bradbury89718422017-10-19 21:37:38 +00001019 CCValAssign &VA = RVLocs[i];
1020 assert(VA.isRegLoc() && "Can only return in registers!");
Alex Bradburydc31c612017-12-11 12:49:02 +00001021 assert(VA.getLocInfo() == CCValAssign::Full &&
1022 "Unexpected CCValAssign::LocInfo");
Alex Bradbury89718422017-10-19 21:37:38 +00001023
Alex Bradburydc31c612017-12-11 12:49:02 +00001024 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag);
Alex Bradbury89718422017-10-19 21:37:38 +00001025
1026 // Guarantee that all emitted copies are stuck together.
1027 Flag = Chain.getValue(1);
1028 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1029 }
1030
1031 RetOps[0] = Chain; // Update chain.
1032
1033 // Add the flag if we have it.
1034 if (Flag.getNode()) {
1035 RetOps.push_back(Flag);
1036 }
1037
1038 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
1039}
1040
1041const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
1042 switch ((RISCVISD::NodeType)Opcode) {
1043 case RISCVISD::FIRST_NUMBER:
1044 break;
1045 case RISCVISD::RET_FLAG:
1046 return "RISCVISD::RET_FLAG";
Alex Bradburya3376752017-11-08 13:41:21 +00001047 case RISCVISD::CALL:
1048 return "RISCVISD::CALL";
Alex Bradbury65385162017-11-21 07:51:32 +00001049 case RISCVISD::SELECT_CC:
1050 return "RISCVISD::SELECT_CC";
Alex Bradbury89718422017-10-19 21:37:38 +00001051 }
1052 return nullptr;
1053}
Alex Bradbury9330e642018-01-10 20:05:09 +00001054
1055std::pair<unsigned, const TargetRegisterClass *>
1056RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1057 StringRef Constraint,
1058 MVT VT) const {
1059 // First, see if this is a constraint that directly corresponds to a
1060 // RISCV register class.
1061 if (Constraint.size() == 1) {
1062 switch (Constraint[0]) {
1063 case 'r':
1064 return std::make_pair(0U, &RISCV::GPRRegClass);
1065 default:
1066 break;
1067 }
1068 }
1069
1070 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1071}