Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 1 | //===-- llvm/lib/Target/AArch64/AArch64CallLowering.cpp - Call lowering ---===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | /// |
| 10 | /// \file |
| 11 | /// This file implements the lowering of LLVM calls to machine code calls for |
| 12 | /// GlobalISel. |
| 13 | /// |
| 14 | //===----------------------------------------------------------------------===// |
| 15 | |
| 16 | #include "AArch64CallLowering.h" |
| 17 | #include "AArch64ISelLowering.h" |
| 18 | |
| 19 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" |
| 20 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
Tim Northover | 406024a | 2016-08-10 21:44:01 +0000 | [diff] [blame] | 21 | #include "llvm/Target/TargetRegisterInfo.h" |
| 22 | #include "llvm/Target/TargetSubtargetInfo.h" |
Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 23 | using namespace llvm; |
| 24 | |
Quentin Colombet | 789ad56 | 2016-04-07 20:47:51 +0000 | [diff] [blame] | 25 | #ifndef LLVM_BUILD_GLOBAL_ISEL |
Quentin Colombet | 6cc73ce | 2016-04-07 20:49:15 +0000 | [diff] [blame] | 26 | #error "This shouldn't be built without GISel" |
Quentin Colombet | 789ad56 | 2016-04-07 20:47:51 +0000 | [diff] [blame] | 27 | #endif |
| 28 | |
Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 29 | AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI) |
| 30 | : CallLowering(&TLI) { |
| 31 | } |
| 32 | |
Tom Stellard | b72a65f | 2016-04-14 17:23:33 +0000 | [diff] [blame] | 33 | bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, |
Tim Northover | 406024a | 2016-08-10 21:44:01 +0000 | [diff] [blame] | 34 | const Value *Val, unsigned VReg) const { |
| 35 | MachineFunction &MF = MIRBuilder.getMF(); |
| 36 | const Function &F = *MF.getFunction(); |
| 37 | |
| 38 | MachineInstrBuilder MIB = MIRBuilder.buildInstr(AArch64::RET_ReallyLR); |
| 39 | assert(MIB.getInstr() && "Unable to build a return instruction?!"); |
Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 40 | |
| 41 | assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg"); |
| 42 | if (VReg) { |
Tim Northover | 406024a | 2016-08-10 21:44:01 +0000 | [diff] [blame] | 43 | MIRBuilder.setInstr(*MIB.getInstr(), /* Before */ true); |
| 44 | const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); |
| 45 | CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv()); |
| 46 | |
| 47 | handleAssignments( |
| 48 | MIRBuilder, AssignFn, MVT::getVT(Val->getType()), VReg, |
| 49 | [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) { |
| 50 | MIRBuilder.buildCopy(PhysReg, ValReg); |
| 51 | MIB.addUse(PhysReg, RegState::Implicit); |
| 52 | }); |
Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 53 | } |
| 54 | return true; |
| 55 | } |
| 56 | |
Tim Northover | 406024a | 2016-08-10 21:44:01 +0000 | [diff] [blame] | 57 | bool AArch64CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, |
| 58 | CCAssignFn *AssignFn, |
| 59 | ArrayRef<MVT> ArgTypes, |
| 60 | ArrayRef<unsigned> ArgRegs, |
| 61 | AssignFnTy AssignValToReg) const { |
Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 62 | MachineFunction &MF = MIRBuilder.getMF(); |
| 63 | const Function &F = *MF.getFunction(); |
| 64 | |
| 65 | SmallVector<CCValAssign, 16> ArgLocs; |
| 66 | CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); |
| 67 | |
Tim Northover | 406024a | 2016-08-10 21:44:01 +0000 | [diff] [blame] | 68 | unsigned NumArgs = ArgTypes.size(); |
| 69 | auto CurVT = ArgTypes.begin(); |
| 70 | for (unsigned i = 0; i != NumArgs; ++i, ++CurVT) { |
| 71 | bool Res = AssignFn(i, *CurVT, *CurVT, CCValAssign::Full, ISD::ArgFlagsTy(), |
| 72 | CCInfo); |
Quentin Colombet | a94caa5 | 2016-08-27 00:18:28 +0000 | [diff] [blame^] | 73 | if (Res) |
| 74 | return false; |
Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 75 | } |
Tim Northover | 406024a | 2016-08-10 21:44:01 +0000 | [diff] [blame] | 76 | assert(ArgLocs.size() == ArgTypes.size() && |
Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 77 | "We have a different number of location and args?!"); |
| 78 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
| 79 | CCValAssign &VA = ArgLocs[i]; |
| 80 | |
Quentin Colombet | a94caa5 | 2016-08-27 00:18:28 +0000 | [diff] [blame^] | 81 | // FIXME: Support non-register argument. |
| 82 | if (!VA.isRegLoc()) |
| 83 | return false; |
Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 84 | |
| 85 | switch (VA.getLocInfo()) { |
| 86 | default: |
Quentin Colombet | a94caa5 | 2016-08-27 00:18:28 +0000 | [diff] [blame^] | 87 | // Unknown loc info! |
| 88 | return false; |
Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 89 | case CCValAssign::Full: |
| 90 | break; |
| 91 | case CCValAssign::BCvt: |
| 92 | // We don't care about bitcast. |
| 93 | break; |
| 94 | case CCValAssign::AExt: |
Tim Northover | 97d0cb3 | 2016-08-05 17:16:40 +0000 | [diff] [blame] | 95 | // Existing high bits are fine for anyext (whatever they are). |
| 96 | break; |
Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 97 | case CCValAssign::SExt: |
| 98 | case CCValAssign::ZExt: |
| 99 | // Zero/Sign extend the register. |
Quentin Colombet | a94caa5 | 2016-08-27 00:18:28 +0000 | [diff] [blame^] | 100 | // FIXME: Not yet implemented |
| 101 | return false; |
Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 102 | } |
Tim Northover | 406024a | 2016-08-10 21:44:01 +0000 | [diff] [blame] | 103 | |
| 104 | // Everything checks out, tell the caller where we've decided this |
| 105 | // parameter/return value should go. |
| 106 | AssignValToReg(MIRBuilder, ArgRegs[i], VA.getLocReg()); |
Quentin Colombet | ba2a016 | 2016-02-16 19:26:02 +0000 | [diff] [blame] | 107 | } |
| 108 | return true; |
| 109 | } |
Tim Northover | 406024a | 2016-08-10 21:44:01 +0000 | [diff] [blame] | 110 | |
| 111 | bool AArch64CallLowering::lowerFormalArguments( |
| 112 | MachineIRBuilder &MIRBuilder, const Function::ArgumentListType &Args, |
| 113 | ArrayRef<unsigned> VRegs) const { |
| 114 | MachineFunction &MF = MIRBuilder.getMF(); |
| 115 | const Function &F = *MF.getFunction(); |
| 116 | |
| 117 | SmallVector<MVT, 8> ArgTys; |
| 118 | for (auto &Arg : Args) |
| 119 | ArgTys.push_back(MVT::getVT(Arg.getType())); |
| 120 | |
| 121 | const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); |
| 122 | CCAssignFn *AssignFn = |
| 123 | TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false); |
| 124 | |
| 125 | return handleAssignments( |
| 126 | MIRBuilder, AssignFn, ArgTys, VRegs, |
| 127 | [](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) { |
| 128 | MIRBuilder.getMBB().addLiveIn(PhysReg); |
| 129 | MIRBuilder.buildCopy(ValReg, PhysReg); |
| 130 | }); |
| 131 | } |
| 132 | |
| 133 | bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, |
| 134 | const CallInst &CI, unsigned CalleeReg, |
| 135 | unsigned ResReg, |
| 136 | ArrayRef<unsigned> ArgRegs) const { |
| 137 | MachineFunction &MF = MIRBuilder.getMF(); |
| 138 | const Function &F = *MF.getFunction(); |
| 139 | |
| 140 | // First step is to marshall all the function's parameters into the correct |
| 141 | // physregs and memory locations. Gather the sequence of argument types that |
| 142 | // we'll pass to the assigner function. |
| 143 | SmallVector<MVT, 8> ArgTys; |
| 144 | for (auto &Arg : CI.arg_operands()) |
| 145 | ArgTys.push_back(MVT::getVT(Arg->getType())); |
| 146 | |
| 147 | // Find out which ABI gets to decide where things go. |
| 148 | const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>(); |
| 149 | CCAssignFn *CallAssignFn = |
| 150 | TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false); |
| 151 | |
| 152 | // And finally we can do the actual assignments. For a call we need to keep |
| 153 | // track of the registers used because they'll be implicit uses of the BL. |
| 154 | SmallVector<unsigned, 8> PhysRegs; |
| 155 | handleAssignments( |
| 156 | MIRBuilder, CallAssignFn, ArgTys, ArgRegs, |
| 157 | [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) { |
| 158 | MIRBuilder.buildCopy(PhysReg, ValReg); |
| 159 | PhysRegs.push_back(PhysReg); |
| 160 | }); |
| 161 | |
| 162 | // Now we can build the actual call instruction. |
| 163 | MachineInstrBuilder MIB; |
| 164 | if (CalleeReg) |
| 165 | MIB = MIRBuilder.buildInstr(AArch64::BLR).addUse(CalleeReg); |
| 166 | else |
| 167 | MIB = MIRBuilder.buildInstr(AArch64::BL) |
| 168 | .addGlobalAddress(CI.getCalledFunction()); |
| 169 | |
| 170 | // Tell the call which registers are clobbered. |
| 171 | auto TRI = MF.getSubtarget().getRegisterInfo(); |
| 172 | MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv())); |
| 173 | |
| 174 | for (auto Reg : PhysRegs) |
| 175 | MIB.addUse(Reg, RegState::Implicit); |
| 176 | |
| 177 | // Finally we can copy the returned value back into its virtual-register. In |
| 178 | // symmetry with the arugments, the physical register must be an |
| 179 | // implicit-define of the call instruction. |
| 180 | CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(F.getCallingConv()); |
| 181 | if (!CI.getType()->isVoidTy()) |
| 182 | handleAssignments( |
| 183 | MIRBuilder, RetAssignFn, MVT::getVT(CI.getType()), ResReg, |
| 184 | [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) { |
| 185 | MIRBuilder.buildCopy(ValReg, PhysReg); |
| 186 | MIB.addDef(PhysReg, RegState::Implicit); |
| 187 | }); |
| 188 | |
| 189 | return true; |
| 190 | } |