blob: 4e9f251966acb4fec8e45dbc3ed34a66a68c28c6 [file] [log] [blame]
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001//===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AArch64.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000014#include "AArch64InstrInfo.h"
Tim Northovere9600d82017-02-08 17:57:27 +000015#include "AArch64MachineFunctionInfo.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000016#include "AArch64RegisterBankInfo.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
Tim Northoverbdf16242016-10-10 21:50:00 +000019#include "AArch64TargetMachine.h"
Tim Northover9ac0eba2016-11-08 00:45:29 +000020#include "MCTargetDesc/AArch64AddressingModes.h"
Amara Emerson2ff22982019-03-14 22:48:15 +000021#include "llvm/ADT/Optional.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000022#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
David Blaikie62651302017-10-26 23:39:54 +000023#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
Amara Emerson1e8c1642018-07-31 00:09:02 +000024#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
Amara Emerson761ca2e2019-03-19 21:43:05 +000025#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
Aditya Nandakumar75ad9cc2017-04-19 20:48:50 +000026#include "llvm/CodeGen/GlobalISel/Utils.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000027#include "llvm/CodeGen/MachineBasicBlock.h"
Amara Emerson1abe05c2019-02-21 20:20:16 +000028#include "llvm/CodeGen/MachineConstantPool.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000029#include "llvm/CodeGen/MachineFunction.h"
30#include "llvm/CodeGen/MachineInstr.h"
31#include "llvm/CodeGen/MachineInstrBuilder.h"
Daniel Sanders0b5293f2017-04-06 09:49:34 +000032#include "llvm/CodeGen/MachineOperand.h"
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +000033#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/IR/Type.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/raw_ostream.h"
37
38#define DEBUG_TYPE "aarch64-isel"
39
40using namespace llvm;
41
Daniel Sanders0b5293f2017-04-06 09:49:34 +000042namespace {
43
Daniel Sanderse7b0d662017-04-21 15:59:56 +000044#define GET_GLOBALISEL_PREDICATE_BITSET
45#include "AArch64GenGlobalISel.inc"
46#undef GET_GLOBALISEL_PREDICATE_BITSET
47
Daniel Sanders0b5293f2017-04-06 09:49:34 +000048class AArch64InstructionSelector : public InstructionSelector {
49public:
50 AArch64InstructionSelector(const AArch64TargetMachine &TM,
51 const AArch64Subtarget &STI,
52 const AArch64RegisterBankInfo &RBI);
53
Daniel Sandersf76f3152017-11-16 00:46:35 +000054 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
David Blaikie62651302017-10-26 23:39:54 +000055 static const char *getName() { return DEBUG_TYPE; }
Daniel Sanders0b5293f2017-04-06 09:49:34 +000056
57private:
58 /// tblgen-erated 'select' implementation, used as the initial selector for
59 /// the patterns that don't require complex C++.
Daniel Sandersf76f3152017-11-16 00:46:35 +000060 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +000061
Amara Emersoncac11512019-07-03 01:49:06 +000062 // A lowering phase that runs before any selection attempts.
63
64 void preISelLower(MachineInstr &I) const;
65
66 // An early selection function that runs before the selectImpl() call.
67 bool earlySelect(MachineInstr &I) const;
68
69 bool earlySelectSHL(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette7a1dcc52019-07-18 21:50:11 +000070 bool earlySelectLoad(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emersoncac11512019-07-03 01:49:06 +000071
Jessica Paquette41affad2019-07-20 01:55:35 +000072 /// Eliminate same-sized cross-bank copies into stores before selectImpl().
73 void contractCrossBankCopyIntoStore(MachineInstr &I,
74 MachineRegisterInfo &MRI) const;
75
Daniel Sanders0b5293f2017-04-06 09:49:34 +000076 bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
77 MachineRegisterInfo &MRI) const;
78 bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
79 MachineRegisterInfo &MRI) const;
80
81 bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
82 MachineRegisterInfo &MRI) const;
83
Amara Emerson9bf092d2019-04-09 21:22:43 +000084 bool selectVectorASHR(MachineInstr &I, MachineRegisterInfo &MRI) const;
85 bool selectVectorSHL(MachineInstr &I, MachineRegisterInfo &MRI) const;
86
Amara Emerson5ec14602018-12-10 18:44:58 +000087 // Helper to generate an equivalent of scalar_to_vector into a new register,
88 // returned via 'Dst'.
Amara Emerson8acb0d92019-03-04 19:16:00 +000089 MachineInstr *emitScalarToVector(unsigned EltSize,
Amara Emerson6bcfa1c2019-02-25 18:52:54 +000090 const TargetRegisterClass *DstRC,
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +000091 Register Scalar,
Amara Emerson6bcfa1c2019-02-25 18:52:54 +000092 MachineIRBuilder &MIRBuilder) const;
Jessica Paquette16d67a32019-03-13 23:22:23 +000093
94 /// Emit a lane insert into \p DstReg, or a new vector register if None is
95 /// provided.
96 ///
97 /// The lane inserted into is defined by \p LaneIdx. The vector source
98 /// register is given by \p SrcReg. The register containing the element is
99 /// given by \p EltReg.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000100 MachineInstr *emitLaneInsert(Optional<Register> DstReg, Register SrcReg,
101 Register EltReg, unsigned LaneIdx,
Jessica Paquette16d67a32019-03-13 23:22:23 +0000102 const RegisterBank &RB,
103 MachineIRBuilder &MIRBuilder) const;
Jessica Paquette5aff1f42019-03-14 18:01:30 +0000104 bool selectInsertElt(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson5ec14602018-12-10 18:44:58 +0000105 bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson8cb186c2018-12-20 01:11:04 +0000106 bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette245047d2019-01-24 22:00:41 +0000107 bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson5ec14602018-12-10 18:44:58 +0000108
Amara Emerson1abe05c2019-02-21 20:20:16 +0000109 void collectShuffleMaskIndices(MachineInstr &I, MachineRegisterInfo &MRI,
Amara Emerson2806fd02019-04-12 21:31:21 +0000110 SmallVectorImpl<Optional<int>> &Idxs) const;
Amara Emerson1abe05c2019-02-21 20:20:16 +0000111 bool selectShuffleVector(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette607774c2019-03-11 22:18:01 +0000112 bool selectExtractElt(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson2ff22982019-03-14 22:48:15 +0000113 bool selectConcatVectors(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emersond61b89b2019-03-14 22:48:18 +0000114 bool selectSplitVectorUnmerge(MachineInstr &I,
115 MachineRegisterInfo &MRI) const;
Jessica Paquette22c62152019-04-02 19:57:26 +0000116 bool selectIntrinsicWithSideEffects(MachineInstr &I,
117 MachineRegisterInfo &MRI) const;
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +0000118 bool selectIntrinsic(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson9bf092d2019-04-09 21:22:43 +0000119 bool selectVectorICmp(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette991cb392019-04-23 20:46:19 +0000120 bool selectIntrinsicTrunc(MachineInstr &I, MachineRegisterInfo &MRI) const;
Jessica Paquette4fe75742019-04-23 23:03:03 +0000121 bool selectIntrinsicRound(MachineInstr &I, MachineRegisterInfo &MRI) const;
Amara Emerson6e71b342019-06-21 18:10:41 +0000122 bool selectJumpTable(MachineInstr &I, MachineRegisterInfo &MRI) const;
123 bool selectBrJT(MachineInstr &I, MachineRegisterInfo &MRI) const;
124
Amara Emerson1abe05c2019-02-21 20:20:16 +0000125 unsigned emitConstantPoolEntry(Constant *CPVal, MachineFunction &MF) const;
126 MachineInstr *emitLoadFromConstantPool(Constant *CPVal,
127 MachineIRBuilder &MIRBuilder) const;
Amara Emerson2ff22982019-03-14 22:48:15 +0000128
129 // Emit a vector concat operation.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000130 MachineInstr *emitVectorConcat(Optional<Register> Dst, Register Op1,
131 Register Op2,
Amara Emerson8acb0d92019-03-04 19:16:00 +0000132 MachineIRBuilder &MIRBuilder) const;
Jessica Paquette99316042019-07-02 19:44:16 +0000133 MachineInstr *emitIntegerCompare(MachineOperand &LHS, MachineOperand &RHS,
134 MachineOperand &Predicate,
135 MachineIRBuilder &MIRBuilder) const;
136 MachineInstr *emitCMN(MachineOperand &LHS, MachineOperand &RHS,
137 MachineIRBuilder &MIRBuilder) const;
Jessica Paquette55d19242019-07-08 22:58:36 +0000138 MachineInstr *emitTST(const Register &LHS, const Register &RHS,
139 MachineIRBuilder &MIRBuilder) const;
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000140 MachineInstr *emitExtractVectorElt(Optional<Register> DstReg,
Amara Emersond61b89b2019-03-14 22:48:18 +0000141 const RegisterBank &DstRB, LLT ScalarTy,
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000142 Register VecReg, unsigned LaneIdx,
Amara Emersond61b89b2019-03-14 22:48:18 +0000143 MachineIRBuilder &MIRBuilder) const;
Amara Emerson1abe05c2019-02-21 20:20:16 +0000144
Jessica Paquettea3843fe2019-05-01 22:39:43 +0000145 /// Helper function for selecting G_FCONSTANT. If the G_FCONSTANT can be
146 /// materialized using a FMOV instruction, then update MI and return it.
147 /// Otherwise, do nothing and return a nullptr.
148 MachineInstr *emitFMovForFConstant(MachineInstr &MI,
149 MachineRegisterInfo &MRI) const;
150
Jessica Paquette49537bb2019-06-17 18:40:06 +0000151 /// Emit a CSet for a compare.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000152 MachineInstr *emitCSetForICMP(Register DefReg, unsigned Pred,
Jessica Paquette49537bb2019-06-17 18:40:06 +0000153 MachineIRBuilder &MIRBuilder) const;
154
Amara Emersoncac11512019-07-03 01:49:06 +0000155 // Equivalent to the i32shift_a and friends from AArch64InstrInfo.td.
156 // We use these manually instead of using the importer since it doesn't
157 // support SDNodeXForm.
158 ComplexRendererFns selectShiftA_32(const MachineOperand &Root) const;
159 ComplexRendererFns selectShiftB_32(const MachineOperand &Root) const;
160 ComplexRendererFns selectShiftA_64(const MachineOperand &Root) const;
161 ComplexRendererFns selectShiftB_64(const MachineOperand &Root) const;
162
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000163 ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000164
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000165 ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
166 unsigned Size) const;
Daniel Sandersea8711b2017-10-16 03:36:29 +0000167
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000168 ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000169 return selectAddrModeUnscaled(Root, 1);
170 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000171 ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000172 return selectAddrModeUnscaled(Root, 2);
173 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000174 ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000175 return selectAddrModeUnscaled(Root, 4);
176 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000177 ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000178 return selectAddrModeUnscaled(Root, 8);
179 }
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000180 ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000181 return selectAddrModeUnscaled(Root, 16);
182 }
183
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000184 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
185 unsigned Size) const;
Daniel Sandersea8711b2017-10-16 03:36:29 +0000186 template <int Width>
Daniel Sanders1e4569f2017-10-20 20:55:29 +0000187 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
Daniel Sandersea8711b2017-10-16 03:36:29 +0000188 return selectAddrModeIndexed(Root, Width / 8);
189 }
Jessica Paquette2b404d02019-07-23 16:09:42 +0000190
191 bool isWorthFoldingIntoExtendedReg(MachineInstr &MI,
192 const MachineRegisterInfo &MRI) const;
193 ComplexRendererFns
194 selectAddrModeShiftedExtendXReg(MachineOperand &Root,
195 unsigned SizeInBytes) const;
Jessica Paquette7a1dcc52019-07-18 21:50:11 +0000196 ComplexRendererFns selectAddrModeRegisterOffset(MachineOperand &Root) const;
Jessica Paquette2b404d02019-07-23 16:09:42 +0000197 ComplexRendererFns selectAddrModeXRO(MachineOperand &Root,
198 unsigned SizeInBytes) const;
Daniel Sandersea8711b2017-10-16 03:36:29 +0000199
Volkan Kelesf7f25682018-01-16 18:44:05 +0000200 void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI) const;
201
Amara Emerson1e8c1642018-07-31 00:09:02 +0000202 // Materialize a GlobalValue or BlockAddress using a movz+movk sequence.
203 void materializeLargeCMVal(MachineInstr &I, const Value *V,
204 unsigned char OpFlags) const;
205
Amara Emerson761ca2e2019-03-19 21:43:05 +0000206 // Optimization methods.
Amara Emerson761ca2e2019-03-19 21:43:05 +0000207 bool tryOptVectorShuffle(MachineInstr &I) const;
208 bool tryOptVectorDup(MachineInstr &MI) const;
Amara Emersonc37ff0d2019-06-05 23:46:16 +0000209 bool tryOptSelect(MachineInstr &MI) const;
Jessica Paquette55d19242019-07-08 22:58:36 +0000210 MachineInstr *tryFoldIntegerCompare(MachineOperand &LHS, MachineOperand &RHS,
211 MachineOperand &Predicate,
212 MachineIRBuilder &MIRBuilder) const;
Amara Emerson761ca2e2019-03-19 21:43:05 +0000213
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000214 const AArch64TargetMachine &TM;
215 const AArch64Subtarget &STI;
216 const AArch64InstrInfo &TII;
217 const AArch64RegisterInfo &TRI;
218 const AArch64RegisterBankInfo &RBI;
Daniel Sanderse7b0d662017-04-21 15:59:56 +0000219
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000220#define GET_GLOBALISEL_PREDICATES_DECL
221#include "AArch64GenGlobalISel.inc"
222#undef GET_GLOBALISEL_PREDICATES_DECL
Daniel Sanders0b5293f2017-04-06 09:49:34 +0000223
224// We declare the temporaries used by selectImpl() in the class to minimize the
225// cost of constructing placeholder values.
226#define GET_GLOBALISEL_TEMPORARIES_DECL
227#include "AArch64GenGlobalISel.inc"
228#undef GET_GLOBALISEL_TEMPORARIES_DECL
229};
230
231} // end anonymous namespace
232
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000233#define GET_GLOBALISEL_IMPL
Ahmed Bougacha36f70352016-12-21 23:26:20 +0000234#include "AArch64GenGlobalISel.inc"
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000235#undef GET_GLOBALISEL_IMPL
Ahmed Bougacha36f70352016-12-21 23:26:20 +0000236
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000237AArch64InstructionSelector::AArch64InstructionSelector(
Tim Northoverbdf16242016-10-10 21:50:00 +0000238 const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
239 const AArch64RegisterBankInfo &RBI)
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000240 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
Daniel Sanderse9fdba32017-04-29 17:30:09 +0000241 TRI(*STI.getRegisterInfo()), RBI(RBI),
242#define GET_GLOBALISEL_PREDICATES_INIT
243#include "AArch64GenGlobalISel.inc"
244#undef GET_GLOBALISEL_PREDICATES_INIT
Daniel Sanders8a4bae92017-03-14 21:32:08 +0000245#define GET_GLOBALISEL_TEMPORARIES_INIT
246#include "AArch64GenGlobalISel.inc"
247#undef GET_GLOBALISEL_TEMPORARIES_INIT
248{
249}
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000250
Tim Northoverfb8d9892016-10-12 22:49:15 +0000251// FIXME: This should be target-independent, inferred from the types declared
252// for each class in the bank.
253static const TargetRegisterClass *
254getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
Amara Emerson3838ed02018-02-02 18:03:30 +0000255 const RegisterBankInfo &RBI,
256 bool GetAllRegSet = false) {
Tim Northoverfb8d9892016-10-12 22:49:15 +0000257 if (RB.getID() == AArch64::GPRRegBankID) {
258 if (Ty.getSizeInBits() <= 32)
Amara Emerson3838ed02018-02-02 18:03:30 +0000259 return GetAllRegSet ? &AArch64::GPR32allRegClass
260 : &AArch64::GPR32RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000261 if (Ty.getSizeInBits() == 64)
Amara Emerson3838ed02018-02-02 18:03:30 +0000262 return GetAllRegSet ? &AArch64::GPR64allRegClass
263 : &AArch64::GPR64RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000264 return nullptr;
265 }
266
267 if (RB.getID() == AArch64::FPRRegBankID) {
Amara Emerson3838ed02018-02-02 18:03:30 +0000268 if (Ty.getSizeInBits() <= 16)
269 return &AArch64::FPR16RegClass;
Tim Northoverfb8d9892016-10-12 22:49:15 +0000270 if (Ty.getSizeInBits() == 32)
271 return &AArch64::FPR32RegClass;
272 if (Ty.getSizeInBits() == 64)
273 return &AArch64::FPR64RegClass;
274 if (Ty.getSizeInBits() == 128)
275 return &AArch64::FPR128RegClass;
276 return nullptr;
277 }
278
279 return nullptr;
280}
281
Jessica Paquette245047d2019-01-24 22:00:41 +0000282/// Given a register bank, and size in bits, return the smallest register class
283/// that can represent that combination.
Benjamin Kramer711950c2019-02-11 15:16:21 +0000284static const TargetRegisterClass *
285getMinClassForRegBank(const RegisterBank &RB, unsigned SizeInBits,
286 bool GetAllRegSet = false) {
Jessica Paquette245047d2019-01-24 22:00:41 +0000287 unsigned RegBankID = RB.getID();
288
289 if (RegBankID == AArch64::GPRRegBankID) {
290 if (SizeInBits <= 32)
291 return GetAllRegSet ? &AArch64::GPR32allRegClass
292 : &AArch64::GPR32RegClass;
293 if (SizeInBits == 64)
294 return GetAllRegSet ? &AArch64::GPR64allRegClass
295 : &AArch64::GPR64RegClass;
296 }
297
298 if (RegBankID == AArch64::FPRRegBankID) {
299 switch (SizeInBits) {
300 default:
301 return nullptr;
302 case 8:
303 return &AArch64::FPR8RegClass;
304 case 16:
305 return &AArch64::FPR16RegClass;
306 case 32:
307 return &AArch64::FPR32RegClass;
308 case 64:
309 return &AArch64::FPR64RegClass;
310 case 128:
311 return &AArch64::FPR128RegClass;
312 }
313 }
314
315 return nullptr;
316}
317
318/// Returns the correct subregister to use for a given register class.
319static bool getSubRegForClass(const TargetRegisterClass *RC,
320 const TargetRegisterInfo &TRI, unsigned &SubReg) {
321 switch (TRI.getRegSizeInBits(*RC)) {
322 case 8:
323 SubReg = AArch64::bsub;
324 break;
325 case 16:
326 SubReg = AArch64::hsub;
327 break;
328 case 32:
329 if (RC == &AArch64::GPR32RegClass)
330 SubReg = AArch64::sub_32;
331 else
332 SubReg = AArch64::ssub;
333 break;
334 case 64:
335 SubReg = AArch64::dsub;
336 break;
337 default:
338 LLVM_DEBUG(
339 dbgs() << "Couldn't find appropriate subregister for register class.");
340 return false;
341 }
342
343 return true;
344}
345
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000346/// Check whether \p I is a currently unsupported binary operation:
347/// - it has an unsized type
348/// - an operand is not a vreg
349/// - all operands are not in the same bank
350/// These are checks that should someday live in the verifier, but right now,
351/// these are mostly limitations of the aarch64 selector.
352static bool unsupportedBinOp(const MachineInstr &I,
353 const AArch64RegisterBankInfo &RBI,
354 const MachineRegisterInfo &MRI,
355 const AArch64RegisterInfo &TRI) {
Tim Northover0f140c72016-09-09 11:46:34 +0000356 LLT Ty = MRI.getType(I.getOperand(0).getReg());
Tim Northover32a078a2016-09-15 10:09:59 +0000357 if (!Ty.isValid()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000358 LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000359 return true;
360 }
361
362 const RegisterBank *PrevOpBank = nullptr;
363 for (auto &MO : I.operands()) {
364 // FIXME: Support non-register operands.
365 if (!MO.isReg()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000366 LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000367 return true;
368 }
369
370 // FIXME: Can generic operations have physical registers operands? If
371 // so, this will need to be taught about that, and we'll need to get the
372 // bank out of the minimal class for the register.
373 // Either way, this needs to be documented (and possibly verified).
374 if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000375 LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000376 return true;
377 }
378
379 const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
380 if (!OpBank) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000381 LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000382 return true;
383 }
384
385 if (PrevOpBank && OpBank != PrevOpBank) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000386 LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n");
Ahmed Bougacha59e160a2016-08-16 14:37:40 +0000387 return true;
388 }
389 PrevOpBank = OpBank;
390 }
391 return false;
392}
393
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000394/// Select the AArch64 opcode for the basic binary operation \p GenericOpc
Ahmed Bougachacfb384d2017-01-23 21:10:05 +0000395/// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000396/// and of size \p OpSize.
397/// \returns \p GenericOpc if the combination is unsupported.
398static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
399 unsigned OpSize) {
400 switch (RegBankID) {
401 case AArch64::GPRRegBankID:
Ahmed Bougacha05a5f7d2017-01-25 02:41:38 +0000402 if (OpSize == 32) {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000403 switch (GenericOpc) {
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000404 case TargetOpcode::G_SHL:
405 return AArch64::LSLVWr;
406 case TargetOpcode::G_LSHR:
407 return AArch64::LSRVWr;
408 case TargetOpcode::G_ASHR:
409 return AArch64::ASRVWr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000410 default:
411 return GenericOpc;
412 }
Tim Northover55782222016-10-18 20:03:48 +0000413 } else if (OpSize == 64) {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000414 switch (GenericOpc) {
Tim Northover2fda4b02016-10-10 21:49:49 +0000415 case TargetOpcode::G_GEP:
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000416 return AArch64::ADDXrr;
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +0000417 case TargetOpcode::G_SHL:
418 return AArch64::LSLVXr;
419 case TargetOpcode::G_LSHR:
420 return AArch64::LSRVXr;
421 case TargetOpcode::G_ASHR:
422 return AArch64::ASRVXr;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000423 default:
424 return GenericOpc;
425 }
426 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000427 break;
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000428 case AArch64::FPRRegBankID:
429 switch (OpSize) {
430 case 32:
431 switch (GenericOpc) {
432 case TargetOpcode::G_FADD:
433 return AArch64::FADDSrr;
434 case TargetOpcode::G_FSUB:
435 return AArch64::FSUBSrr;
436 case TargetOpcode::G_FMUL:
437 return AArch64::FMULSrr;
438 case TargetOpcode::G_FDIV:
439 return AArch64::FDIVSrr;
440 default:
441 return GenericOpc;
442 }
443 case 64:
444 switch (GenericOpc) {
445 case TargetOpcode::G_FADD:
446 return AArch64::FADDDrr;
447 case TargetOpcode::G_FSUB:
448 return AArch64::FSUBDrr;
449 case TargetOpcode::G_FMUL:
450 return AArch64::FMULDrr;
451 case TargetOpcode::G_FDIV:
452 return AArch64::FDIVDrr;
Quentin Colombet0e531272016-10-11 00:21:11 +0000453 case TargetOpcode::G_OR:
454 return AArch64::ORRv8i8;
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +0000455 default:
456 return GenericOpc;
457 }
458 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000459 break;
460 }
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000461 return GenericOpc;
462}
463
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000464/// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
465/// appropriate for the (value) register bank \p RegBankID and of memory access
466/// size \p OpSize. This returns the variant with the base+unsigned-immediate
467/// addressing mode (e.g., LDRXui).
468/// \returns \p GenericOpc if the combination is unsupported.
469static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
470 unsigned OpSize) {
471 const bool isStore = GenericOpc == TargetOpcode::G_STORE;
472 switch (RegBankID) {
473 case AArch64::GPRRegBankID:
474 switch (OpSize) {
Tim Northover020d1042016-10-17 18:36:53 +0000475 case 8:
476 return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
477 case 16:
478 return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000479 case 32:
480 return isStore ? AArch64::STRWui : AArch64::LDRWui;
481 case 64:
482 return isStore ? AArch64::STRXui : AArch64::LDRXui;
483 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000484 break;
Quentin Colombetd2623f8e2016-10-11 00:21:14 +0000485 case AArch64::FPRRegBankID:
486 switch (OpSize) {
Tim Northover020d1042016-10-17 18:36:53 +0000487 case 8:
488 return isStore ? AArch64::STRBui : AArch64::LDRBui;
489 case 16:
490 return isStore ? AArch64::STRHui : AArch64::LDRHui;
Quentin Colombetd2623f8e2016-10-11 00:21:14 +0000491 case 32:
492 return isStore ? AArch64::STRSui : AArch64::LDRSui;
493 case 64:
494 return isStore ? AArch64::STRDui : AArch64::LDRDui;
495 }
Simon Pilgrim9e901522017-07-08 19:28:24 +0000496 break;
497 }
Ahmed Bougacha7adfac52016-07-29 16:56:16 +0000498 return GenericOpc;
499}
500
Benjamin Kramer1411ecf2019-01-24 23:39:47 +0000501#ifndef NDEBUG
Jessica Paquette245047d2019-01-24 22:00:41 +0000502/// Helper function that verifies that we have a valid copy at the end of
503/// selectCopy. Verifies that the source and dest have the expected sizes and
504/// then returns true.
505static bool isValidCopy(const MachineInstr &I, const RegisterBank &DstBank,
506 const MachineRegisterInfo &MRI,
507 const TargetRegisterInfo &TRI,
508 const RegisterBankInfo &RBI) {
509 const unsigned DstReg = I.getOperand(0).getReg();
510 const unsigned SrcReg = I.getOperand(1).getReg();
511 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
512 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
Amara Emersondb211892018-02-20 05:11:57 +0000513
Jessica Paquette245047d2019-01-24 22:00:41 +0000514 // Make sure the size of the source and dest line up.
515 assert(
516 (DstSize == SrcSize ||
517 // Copies are a mean to setup initial types, the number of
518 // bits may not exactly match.
519 (TargetRegisterInfo::isPhysicalRegister(SrcReg) && DstSize <= SrcSize) ||
520 // Copies are a mean to copy bits around, as long as we are
521 // on the same register class, that's fine. Otherwise, that
522 // means we need some SUBREG_TO_REG or AND & co.
523 (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
524 "Copy with different width?!");
525
526 // Check the size of the destination.
527 assert((DstSize <= 64 || DstBank.getID() == AArch64::FPRRegBankID) &&
528 "GPRs cannot get more than 64-bit width values");
529
530 return true;
531}
Benjamin Kramer1411ecf2019-01-24 23:39:47 +0000532#endif
Jessica Paquette245047d2019-01-24 22:00:41 +0000533
534/// Helper function for selectCopy. Inserts a subregister copy from
535/// \p *From to \p *To, linking it up to \p I.
536///
537/// e.g, given I = "Dst = COPY SrcReg", we'll transform that into
538///
539/// CopyReg (From class) = COPY SrcReg
540/// SubRegCopy (To class) = COPY CopyReg:SubReg
541/// Dst = COPY SubRegCopy
Amara Emerson3739a202019-03-15 21:59:50 +0000542static bool selectSubregisterCopy(MachineInstr &I, MachineRegisterInfo &MRI,
Jessica Paquette245047d2019-01-24 22:00:41 +0000543 const RegisterBankInfo &RBI, unsigned SrcReg,
544 const TargetRegisterClass *From,
545 const TargetRegisterClass *To,
546 unsigned SubReg) {
Amara Emerson3739a202019-03-15 21:59:50 +0000547 MachineIRBuilder MIB(I);
548 auto Copy = MIB.buildCopy({From}, {SrcReg});
Amara Emerson86271782019-03-18 19:20:10 +0000549 auto SubRegCopy = MIB.buildInstr(TargetOpcode::COPY, {To}, {})
550 .addReg(Copy.getReg(0), 0, SubReg);
Amara Emersondb211892018-02-20 05:11:57 +0000551 MachineOperand &RegOp = I.getOperand(1);
Amara Emerson3739a202019-03-15 21:59:50 +0000552 RegOp.setReg(SubRegCopy.getReg(0));
Jessica Paquette245047d2019-01-24 22:00:41 +0000553
554 // It's possible that the destination register won't be constrained. Make
555 // sure that happens.
556 if (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()))
557 RBI.constrainGenericRegister(I.getOperand(0).getReg(), *To, MRI);
558
Amara Emersondb211892018-02-20 05:11:57 +0000559 return true;
560}
561
Jessica Paquette910630c2019-05-03 22:37:46 +0000562/// Helper function to get the source and destination register classes for a
563/// copy. Returns a std::pair containing the source register class for the
564/// copy, and the destination register class for the copy. If a register class
565/// cannot be determined, then it will be nullptr.
566static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
567getRegClassesForCopy(MachineInstr &I, const TargetInstrInfo &TII,
568 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
569 const RegisterBankInfo &RBI) {
570 unsigned DstReg = I.getOperand(0).getReg();
571 unsigned SrcReg = I.getOperand(1).getReg();
572 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
573 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
574 unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
575 unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
576
577 // Special casing for cross-bank copies of s1s. We can technically represent
578 // a 1-bit value with any size of register. The minimum size for a GPR is 32
579 // bits. So, we need to put the FPR on 32 bits as well.
580 //
581 // FIXME: I'm not sure if this case holds true outside of copies. If it does,
582 // then we can pull it into the helpers that get the appropriate class for a
583 // register bank. Or make a new helper that carries along some constraint
584 // information.
585 if (SrcRegBank != DstRegBank && (DstSize == 1 && SrcSize == 1))
586 SrcSize = DstSize = 32;
587
588 return {getMinClassForRegBank(SrcRegBank, SrcSize, true),
589 getMinClassForRegBank(DstRegBank, DstSize, true)};
590}
591
Quentin Colombetcb629a82016-10-12 03:57:49 +0000592static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
593 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
594 const RegisterBankInfo &RBI) {
595
596 unsigned DstReg = I.getOperand(0).getReg();
Amara Emersondb211892018-02-20 05:11:57 +0000597 unsigned SrcReg = I.getOperand(1).getReg();
Jessica Paquette245047d2019-01-24 22:00:41 +0000598 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
599 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
Jessica Paquette910630c2019-05-03 22:37:46 +0000600
601 // Find the correct register classes for the source and destination registers.
602 const TargetRegisterClass *SrcRC;
603 const TargetRegisterClass *DstRC;
604 std::tie(SrcRC, DstRC) = getRegClassesForCopy(I, TII, MRI, TRI, RBI);
605
Jessica Paquette245047d2019-01-24 22:00:41 +0000606 if (!DstRC) {
607 LLVM_DEBUG(dbgs() << "Unexpected dest size "
608 << RBI.getSizeInBits(DstReg, MRI, TRI) << '\n');
Amara Emerson3838ed02018-02-02 18:03:30 +0000609 return false;
Quentin Colombetcb629a82016-10-12 03:57:49 +0000610 }
611
Jessica Paquette245047d2019-01-24 22:00:41 +0000612 // A couple helpers below, for making sure that the copy we produce is valid.
613
614 // Set to true if we insert a SUBREG_TO_REG. If we do this, then we don't want
615 // to verify that the src and dst are the same size, since that's handled by
616 // the SUBREG_TO_REG.
617 bool KnownValid = false;
618
619 // Returns true, or asserts if something we don't expect happens. Instead of
620 // returning true, we return isValidCopy() to ensure that we verify the
621 // result.
Jessica Paquette76c40f82019-01-24 22:51:31 +0000622 auto CheckCopy = [&]() {
Jessica Paquette245047d2019-01-24 22:00:41 +0000623 // If we have a bitcast or something, we can't have physical registers.
624 assert(
Simon Pilgrimdea61742019-01-25 11:38:40 +0000625 (I.isCopy() ||
626 (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()) &&
627 !TargetRegisterInfo::isPhysicalRegister(I.getOperand(1).getReg()))) &&
628 "No phys reg on generic operator!");
Jessica Paquette245047d2019-01-24 22:00:41 +0000629 assert(KnownValid || isValidCopy(I, DstRegBank, MRI, TRI, RBI));
Jonas Hahnfeld65a401f2019-03-04 08:51:32 +0000630 (void)KnownValid;
Jessica Paquette245047d2019-01-24 22:00:41 +0000631 return true;
632 };
633
634 // Is this a copy? If so, then we may need to insert a subregister copy, or
635 // a SUBREG_TO_REG.
636 if (I.isCopy()) {
637 // Yes. Check if there's anything to fix up.
Amara Emerson7e9f3482018-02-18 17:10:49 +0000638 if (!SrcRC) {
Jessica Paquette245047d2019-01-24 22:00:41 +0000639 LLVM_DEBUG(dbgs() << "Couldn't determine source register class\n");
640 return false;
Amara Emerson7e9f3482018-02-18 17:10:49 +0000641 }
Jessica Paquette245047d2019-01-24 22:00:41 +0000642
643 // Is this a cross-bank copy?
644 if (DstRegBank.getID() != SrcRegBank.getID()) {
645 // If we're doing a cross-bank copy on different-sized registers, we need
646 // to do a bit more work.
647 unsigned SrcSize = TRI.getRegSizeInBits(*SrcRC);
648 unsigned DstSize = TRI.getRegSizeInBits(*DstRC);
649
650 if (SrcSize > DstSize) {
651 // We're doing a cross-bank copy into a smaller register. We need a
652 // subregister copy. First, get a register class that's on the same bank
653 // as the destination, but the same size as the source.
654 const TargetRegisterClass *SubregRC =
655 getMinClassForRegBank(DstRegBank, SrcSize, true);
656 assert(SubregRC && "Didn't get a register class for subreg?");
657
658 // Get the appropriate subregister for the destination.
659 unsigned SubReg = 0;
660 if (!getSubRegForClass(DstRC, TRI, SubReg)) {
661 LLVM_DEBUG(dbgs() << "Couldn't determine subregister for copy.\n");
662 return false;
663 }
664
665 // Now, insert a subregister copy using the new register class.
Amara Emerson3739a202019-03-15 21:59:50 +0000666 selectSubregisterCopy(I, MRI, RBI, SrcReg, SubregRC, DstRC, SubReg);
Jessica Paquette245047d2019-01-24 22:00:41 +0000667 return CheckCopy();
668 }
669
670 else if (DstRegBank.getID() == AArch64::GPRRegBankID && DstSize == 32 &&
671 SrcSize == 16) {
672 // Special case for FPR16 to GPR32.
673 // FIXME: This can probably be generalized like the above case.
674 unsigned PromoteReg =
675 MRI.createVirtualRegister(&AArch64::FPR32RegClass);
676 BuildMI(*I.getParent(), I, I.getDebugLoc(),
677 TII.get(AArch64::SUBREG_TO_REG), PromoteReg)
678 .addImm(0)
679 .addUse(SrcReg)
680 .addImm(AArch64::hsub);
681 MachineOperand &RegOp = I.getOperand(1);
682 RegOp.setReg(PromoteReg);
683
684 // Promise that the copy is implicitly validated by the SUBREG_TO_REG.
685 KnownValid = true;
686 }
Amara Emerson7e9f3482018-02-18 17:10:49 +0000687 }
Jessica Paquette245047d2019-01-24 22:00:41 +0000688
689 // If the destination is a physical register, then there's nothing to
690 // change, so we're done.
691 if (TargetRegisterInfo::isPhysicalRegister(DstReg))
692 return CheckCopy();
Amara Emerson7e9f3482018-02-18 17:10:49 +0000693 }
694
Jessica Paquette245047d2019-01-24 22:00:41 +0000695 // No need to constrain SrcReg. It will get constrained when we hit another
696 // of its use or its defs. Copies do not have constraints.
697 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000698 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
699 << " operand\n");
Quentin Colombetcb629a82016-10-12 03:57:49 +0000700 return false;
701 }
702 I.setDesc(TII.get(AArch64::COPY));
Jessica Paquette245047d2019-01-24 22:00:41 +0000703 return CheckCopy();
Quentin Colombetcb629a82016-10-12 03:57:49 +0000704}
705
Tim Northover69271c62016-10-12 22:49:11 +0000706static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
707 if (!DstTy.isScalar() || !SrcTy.isScalar())
708 return GenericOpc;
709
710 const unsigned DstSize = DstTy.getSizeInBits();
711 const unsigned SrcSize = SrcTy.getSizeInBits();
712
713 switch (DstSize) {
714 case 32:
715 switch (SrcSize) {
716 case 32:
717 switch (GenericOpc) {
718 case TargetOpcode::G_SITOFP:
719 return AArch64::SCVTFUWSri;
720 case TargetOpcode::G_UITOFP:
721 return AArch64::UCVTFUWSri;
722 case TargetOpcode::G_FPTOSI:
723 return AArch64::FCVTZSUWSr;
724 case TargetOpcode::G_FPTOUI:
725 return AArch64::FCVTZUUWSr;
726 default:
727 return GenericOpc;
728 }
729 case 64:
730 switch (GenericOpc) {
731 case TargetOpcode::G_SITOFP:
732 return AArch64::SCVTFUXSri;
733 case TargetOpcode::G_UITOFP:
734 return AArch64::UCVTFUXSri;
735 case TargetOpcode::G_FPTOSI:
736 return AArch64::FCVTZSUWDr;
737 case TargetOpcode::G_FPTOUI:
738 return AArch64::FCVTZUUWDr;
739 default:
740 return GenericOpc;
741 }
742 default:
743 return GenericOpc;
744 }
745 case 64:
746 switch (SrcSize) {
747 case 32:
748 switch (GenericOpc) {
749 case TargetOpcode::G_SITOFP:
750 return AArch64::SCVTFUWDri;
751 case TargetOpcode::G_UITOFP:
752 return AArch64::UCVTFUWDri;
753 case TargetOpcode::G_FPTOSI:
754 return AArch64::FCVTZSUXSr;
755 case TargetOpcode::G_FPTOUI:
756 return AArch64::FCVTZUUXSr;
757 default:
758 return GenericOpc;
759 }
760 case 64:
761 switch (GenericOpc) {
762 case TargetOpcode::G_SITOFP:
763 return AArch64::SCVTFUXDri;
764 case TargetOpcode::G_UITOFP:
765 return AArch64::UCVTFUXDri;
766 case TargetOpcode::G_FPTOSI:
767 return AArch64::FCVTZSUXDr;
768 case TargetOpcode::G_FPTOUI:
769 return AArch64::FCVTZUUXDr;
770 default:
771 return GenericOpc;
772 }
773 default:
774 return GenericOpc;
775 }
776 default:
777 return GenericOpc;
778 };
779 return GenericOpc;
780}
781
Amara Emersonc37ff0d2019-06-05 23:46:16 +0000782static unsigned selectSelectOpc(MachineInstr &I, MachineRegisterInfo &MRI,
783 const RegisterBankInfo &RBI) {
784 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
785 bool IsFP = (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() !=
786 AArch64::GPRRegBankID);
787 LLT Ty = MRI.getType(I.getOperand(0).getReg());
788 if (Ty == LLT::scalar(32))
789 return IsFP ? AArch64::FCSELSrrr : AArch64::CSELWr;
790 else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64))
791 return IsFP ? AArch64::FCSELDrrr : AArch64::CSELXr;
792 return 0;
793}
794
Jessica Paquetteb73ea75b2019-05-28 22:52:49 +0000795/// Helper function to select the opcode for a G_FCMP.
796static unsigned selectFCMPOpc(MachineInstr &I, MachineRegisterInfo &MRI) {
797 // If this is a compare against +0.0, then we don't have to explicitly
798 // materialize a constant.
799 const ConstantFP *FPImm = getConstantFPVRegVal(I.getOperand(3).getReg(), MRI);
800 bool ShouldUseImm = FPImm && (FPImm->isZero() && !FPImm->isNegative());
801 unsigned OpSize = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
802 if (OpSize != 32 && OpSize != 64)
803 return 0;
804 unsigned CmpOpcTbl[2][2] = {{AArch64::FCMPSrr, AArch64::FCMPDrr},
805 {AArch64::FCMPSri, AArch64::FCMPDri}};
806 return CmpOpcTbl[ShouldUseImm][OpSize == 64];
807}
808
Jessica Paquette55d19242019-07-08 22:58:36 +0000809/// Returns true if \p P is an unsigned integer comparison predicate.
810static bool isUnsignedICMPPred(const CmpInst::Predicate P) {
811 switch (P) {
812 default:
813 return false;
814 case CmpInst::ICMP_UGT:
815 case CmpInst::ICMP_UGE:
816 case CmpInst::ICMP_ULT:
817 case CmpInst::ICMP_ULE:
818 return true;
819 }
820}
821
Tim Northover6c02ad52016-10-12 22:49:04 +0000822static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) {
823 switch (P) {
824 default:
825 llvm_unreachable("Unknown condition code!");
826 case CmpInst::ICMP_NE:
827 return AArch64CC::NE;
828 case CmpInst::ICMP_EQ:
829 return AArch64CC::EQ;
830 case CmpInst::ICMP_SGT:
831 return AArch64CC::GT;
832 case CmpInst::ICMP_SGE:
833 return AArch64CC::GE;
834 case CmpInst::ICMP_SLT:
835 return AArch64CC::LT;
836 case CmpInst::ICMP_SLE:
837 return AArch64CC::LE;
838 case CmpInst::ICMP_UGT:
839 return AArch64CC::HI;
840 case CmpInst::ICMP_UGE:
841 return AArch64CC::HS;
842 case CmpInst::ICMP_ULT:
843 return AArch64CC::LO;
844 case CmpInst::ICMP_ULE:
845 return AArch64CC::LS;
846 }
847}
848
Tim Northover7dd378d2016-10-12 22:49:07 +0000849static void changeFCMPPredToAArch64CC(CmpInst::Predicate P,
850 AArch64CC::CondCode &CondCode,
851 AArch64CC::CondCode &CondCode2) {
852 CondCode2 = AArch64CC::AL;
853 switch (P) {
854 default:
855 llvm_unreachable("Unknown FP condition!");
856 case CmpInst::FCMP_OEQ:
857 CondCode = AArch64CC::EQ;
858 break;
859 case CmpInst::FCMP_OGT:
860 CondCode = AArch64CC::GT;
861 break;
862 case CmpInst::FCMP_OGE:
863 CondCode = AArch64CC::GE;
864 break;
865 case CmpInst::FCMP_OLT:
866 CondCode = AArch64CC::MI;
867 break;
868 case CmpInst::FCMP_OLE:
869 CondCode = AArch64CC::LS;
870 break;
871 case CmpInst::FCMP_ONE:
872 CondCode = AArch64CC::MI;
873 CondCode2 = AArch64CC::GT;
874 break;
875 case CmpInst::FCMP_ORD:
876 CondCode = AArch64CC::VC;
877 break;
878 case CmpInst::FCMP_UNO:
879 CondCode = AArch64CC::VS;
880 break;
881 case CmpInst::FCMP_UEQ:
882 CondCode = AArch64CC::EQ;
883 CondCode2 = AArch64CC::VS;
884 break;
885 case CmpInst::FCMP_UGT:
886 CondCode = AArch64CC::HI;
887 break;
888 case CmpInst::FCMP_UGE:
889 CondCode = AArch64CC::PL;
890 break;
891 case CmpInst::FCMP_ULT:
892 CondCode = AArch64CC::LT;
893 break;
894 case CmpInst::FCMP_ULE:
895 CondCode = AArch64CC::LE;
896 break;
897 case CmpInst::FCMP_UNE:
898 CondCode = AArch64CC::NE;
899 break;
900 }
901}
902
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000903bool AArch64InstructionSelector::selectCompareBranch(
904 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
905
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000906 const Register CondReg = I.getOperand(0).getReg();
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000907 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
908 MachineInstr *CCMI = MRI.getVRegDef(CondReg);
Aditya Nandakumar02c602e2017-07-31 17:00:16 +0000909 if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
910 CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000911 if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
912 return false;
913
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000914 Register LHS = CCMI->getOperand(2).getReg();
915 Register RHS = CCMI->getOperand(3).getReg();
Amara Emerson7a4d2df2019-07-10 19:21:43 +0000916 auto VRegAndVal = getConstantVRegValWithLookThrough(RHS, MRI);
917 if (!VRegAndVal)
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000918 std::swap(RHS, LHS);
919
Amara Emerson7a4d2df2019-07-10 19:21:43 +0000920 VRegAndVal = getConstantVRegValWithLookThrough(RHS, MRI);
921 if (!VRegAndVal || VRegAndVal->Value != 0) {
922 MachineIRBuilder MIB(I);
923 // If we can't select a CBZ then emit a cmp + Bcc.
924 if (!emitIntegerCompare(CCMI->getOperand(2), CCMI->getOperand(3),
925 CCMI->getOperand(1), MIB))
926 return false;
927 const AArch64CC::CondCode CC = changeICMPPredToAArch64CC(
928 (CmpInst::Predicate)CCMI->getOperand(1).getPredicate());
929 MIB.buildInstr(AArch64::Bcc, {}, {}).addImm(CC).addMBB(DestMBB);
930 I.eraseFromParent();
931 return true;
932 }
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000933
934 const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
935 if (RB.getID() != AArch64::GPRRegBankID)
936 return false;
937
938 const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
939 if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
940 return false;
941
942 const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
943 unsigned CBOpc = 0;
944 if (CmpWidth <= 32)
945 CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
946 else if (CmpWidth == 64)
947 CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
948 else
949 return false;
950
Aditya Nandakumar18b3f9d2018-01-17 19:31:33 +0000951 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
952 .addUse(LHS)
953 .addMBB(DestMBB)
954 .constrainAllUses(TII, TRI, RBI);
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000955
Ahmed Bougacha641cb202017-03-27 16:35:31 +0000956 I.eraseFromParent();
957 return true;
958}
959
Amara Emerson9bf092d2019-04-09 21:22:43 +0000960bool AArch64InstructionSelector::selectVectorSHL(
961 MachineInstr &I, MachineRegisterInfo &MRI) const {
962 assert(I.getOpcode() == TargetOpcode::G_SHL);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000963 Register DstReg = I.getOperand(0).getReg();
Amara Emerson9bf092d2019-04-09 21:22:43 +0000964 const LLT Ty = MRI.getType(DstReg);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000965 Register Src1Reg = I.getOperand(1).getReg();
966 Register Src2Reg = I.getOperand(2).getReg();
Amara Emerson9bf092d2019-04-09 21:22:43 +0000967
968 if (!Ty.isVector())
969 return false;
970
971 unsigned Opc = 0;
Amara Emerson9bf092d2019-04-09 21:22:43 +0000972 if (Ty == LLT::vector(4, 32)) {
973 Opc = AArch64::USHLv4i32;
Amara Emerson9bf092d2019-04-09 21:22:43 +0000974 } else if (Ty == LLT::vector(2, 32)) {
975 Opc = AArch64::USHLv2i32;
Amara Emerson9bf092d2019-04-09 21:22:43 +0000976 } else {
977 LLVM_DEBUG(dbgs() << "Unhandled G_SHL type");
978 return false;
979 }
980
981 MachineIRBuilder MIB(I);
982 auto UShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Src2Reg});
983 constrainSelectedInstRegOperands(*UShl, TII, TRI, RBI);
984 I.eraseFromParent();
985 return true;
986}
987
988bool AArch64InstructionSelector::selectVectorASHR(
989 MachineInstr &I, MachineRegisterInfo &MRI) const {
990 assert(I.getOpcode() == TargetOpcode::G_ASHR);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000991 Register DstReg = I.getOperand(0).getReg();
Amara Emerson9bf092d2019-04-09 21:22:43 +0000992 const LLT Ty = MRI.getType(DstReg);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000993 Register Src1Reg = I.getOperand(1).getReg();
994 Register Src2Reg = I.getOperand(2).getReg();
Amara Emerson9bf092d2019-04-09 21:22:43 +0000995
996 if (!Ty.isVector())
997 return false;
998
999 // There is not a shift right register instruction, but the shift left
1000 // register instruction takes a signed value, where negative numbers specify a
1001 // right shift.
1002
1003 unsigned Opc = 0;
1004 unsigned NegOpc = 0;
1005 const TargetRegisterClass *RC = nullptr;
1006 if (Ty == LLT::vector(4, 32)) {
1007 Opc = AArch64::SSHLv4i32;
1008 NegOpc = AArch64::NEGv4i32;
1009 RC = &AArch64::FPR128RegClass;
1010 } else if (Ty == LLT::vector(2, 32)) {
1011 Opc = AArch64::SSHLv2i32;
1012 NegOpc = AArch64::NEGv2i32;
1013 RC = &AArch64::FPR64RegClass;
1014 } else {
1015 LLVM_DEBUG(dbgs() << "Unhandled G_ASHR type");
1016 return false;
1017 }
1018
1019 MachineIRBuilder MIB(I);
1020 auto Neg = MIB.buildInstr(NegOpc, {RC}, {Src2Reg});
1021 constrainSelectedInstRegOperands(*Neg, TII, TRI, RBI);
1022 auto SShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Neg});
1023 constrainSelectedInstRegOperands(*SShl, TII, TRI, RBI);
1024 I.eraseFromParent();
1025 return true;
1026}
1027
Tim Northovere9600d82017-02-08 17:57:27 +00001028bool AArch64InstructionSelector::selectVaStartAAPCS(
1029 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
1030 return false;
1031}
1032
1033bool AArch64InstructionSelector::selectVaStartDarwin(
1034 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
1035 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001036 Register ListReg = I.getOperand(0).getReg();
Tim Northovere9600d82017-02-08 17:57:27 +00001037
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001038 Register ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
Tim Northovere9600d82017-02-08 17:57:27 +00001039
1040 auto MIB =
1041 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
1042 .addDef(ArgsAddrReg)
1043 .addFrameIndex(FuncInfo->getVarArgsStackIndex())
1044 .addImm(0)
1045 .addImm(0);
1046
1047 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1048
1049 MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
1050 .addUse(ArgsAddrReg)
1051 .addUse(ListReg)
1052 .addImm(0)
1053 .addMemOperand(*I.memoperands_begin());
1054
1055 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1056 I.eraseFromParent();
1057 return true;
1058}
1059
Amara Emerson1e8c1642018-07-31 00:09:02 +00001060void AArch64InstructionSelector::materializeLargeCMVal(
1061 MachineInstr &I, const Value *V, unsigned char OpFlags) const {
1062 MachineBasicBlock &MBB = *I.getParent();
1063 MachineFunction &MF = *MBB.getParent();
1064 MachineRegisterInfo &MRI = MF.getRegInfo();
1065 MachineIRBuilder MIB(I);
1066
Aditya Nandakumarcef44a22018-12-11 00:48:50 +00001067 auto MovZ = MIB.buildInstr(AArch64::MOVZXi, {&AArch64::GPR64RegClass}, {});
Amara Emerson1e8c1642018-07-31 00:09:02 +00001068 MovZ->addOperand(MF, I.getOperand(1));
1069 MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
1070 AArch64II::MO_NC);
1071 MovZ->addOperand(MF, MachineOperand::CreateImm(0));
1072 constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI);
1073
Matt Arsenaulte3a676e2019-06-24 15:50:29 +00001074 auto BuildMovK = [&](Register SrcReg, unsigned char Flags, unsigned Offset,
1075 Register ForceDstReg) {
1076 Register DstReg = ForceDstReg
Amara Emerson1e8c1642018-07-31 00:09:02 +00001077 ? ForceDstReg
1078 : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1079 auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg);
1080 if (auto *GV = dyn_cast<GlobalValue>(V)) {
1081 MovI->addOperand(MF, MachineOperand::CreateGA(
1082 GV, MovZ->getOperand(1).getOffset(), Flags));
1083 } else {
1084 MovI->addOperand(
1085 MF, MachineOperand::CreateBA(cast<BlockAddress>(V),
1086 MovZ->getOperand(1).getOffset(), Flags));
1087 }
1088 MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
1089 constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI);
1090 return DstReg;
1091 };
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001092 Register DstReg = BuildMovK(MovZ.getReg(0),
Amara Emerson1e8c1642018-07-31 00:09:02 +00001093 AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0);
1094 DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
1095 BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
1096 return;
1097}
1098
Amara Emersoncac11512019-07-03 01:49:06 +00001099void AArch64InstructionSelector::preISelLower(MachineInstr &I) const {
1100 MachineBasicBlock &MBB = *I.getParent();
1101 MachineFunction &MF = *MBB.getParent();
1102 MachineRegisterInfo &MRI = MF.getRegInfo();
1103
1104 switch (I.getOpcode()) {
1105 case TargetOpcode::G_SHL:
1106 case TargetOpcode::G_ASHR:
1107 case TargetOpcode::G_LSHR: {
1108 // These shifts are legalized to have 64 bit shift amounts because we want
1109 // to take advantage of the existing imported selection patterns that assume
1110 // the immediates are s64s. However, if the shifted type is 32 bits and for
1111 // some reason we receive input GMIR that has an s64 shift amount that's not
1112 // a G_CONSTANT, insert a truncate so that we can still select the s32
1113 // register-register variant.
1114 unsigned SrcReg = I.getOperand(1).getReg();
1115 unsigned ShiftReg = I.getOperand(2).getReg();
1116 const LLT ShiftTy = MRI.getType(ShiftReg);
1117 const LLT SrcTy = MRI.getType(SrcReg);
1118 if (SrcTy.isVector())
1119 return;
1120 assert(!ShiftTy.isVector() && "unexpected vector shift ty");
1121 if (SrcTy.getSizeInBits() != 32 || ShiftTy.getSizeInBits() != 64)
1122 return;
1123 auto *AmtMI = MRI.getVRegDef(ShiftReg);
1124 assert(AmtMI && "could not find a vreg definition for shift amount");
1125 if (AmtMI->getOpcode() != TargetOpcode::G_CONSTANT) {
1126 // Insert a subregister copy to implement a 64->32 trunc
1127 MachineIRBuilder MIB(I);
1128 auto Trunc = MIB.buildInstr(TargetOpcode::COPY, {SrcTy}, {})
1129 .addReg(ShiftReg, 0, AArch64::sub_32);
1130 MRI.setRegBank(Trunc.getReg(0), RBI.getRegBank(AArch64::GPRRegBankID));
1131 I.getOperand(2).setReg(Trunc.getReg(0));
1132 }
1133 return;
1134 }
Jessica Paquette41affad2019-07-20 01:55:35 +00001135 case TargetOpcode::G_STORE:
1136 contractCrossBankCopyIntoStore(I, MRI);
1137 return;
Amara Emersoncac11512019-07-03 01:49:06 +00001138 default:
1139 return;
1140 }
1141}
1142
1143bool AArch64InstructionSelector::earlySelectSHL(
1144 MachineInstr &I, MachineRegisterInfo &MRI) const {
1145 // We try to match the immediate variant of LSL, which is actually an alias
1146 // for a special case of UBFM. Otherwise, we fall back to the imported
1147 // selector which will match the register variant.
1148 assert(I.getOpcode() == TargetOpcode::G_SHL && "unexpected op");
1149 const auto &MO = I.getOperand(2);
1150 auto VRegAndVal = getConstantVRegVal(MO.getReg(), MRI);
1151 if (!VRegAndVal)
1152 return false;
1153
1154 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1155 if (DstTy.isVector())
1156 return false;
1157 bool Is64Bit = DstTy.getSizeInBits() == 64;
1158 auto Imm1Fn = Is64Bit ? selectShiftA_64(MO) : selectShiftA_32(MO);
1159 auto Imm2Fn = Is64Bit ? selectShiftB_64(MO) : selectShiftB_32(MO);
1160 MachineIRBuilder MIB(I);
1161
1162 if (!Imm1Fn || !Imm2Fn)
1163 return false;
1164
1165 auto NewI =
1166 MIB.buildInstr(Is64Bit ? AArch64::UBFMXri : AArch64::UBFMWri,
1167 {I.getOperand(0).getReg()}, {I.getOperand(1).getReg()});
1168
1169 for (auto &RenderFn : *Imm1Fn)
1170 RenderFn(NewI);
1171 for (auto &RenderFn : *Imm2Fn)
1172 RenderFn(NewI);
1173
1174 I.eraseFromParent();
1175 return constrainSelectedInstRegOperands(*NewI, TII, TRI, RBI);
1176}
1177
Jessica Paquette41affad2019-07-20 01:55:35 +00001178void AArch64InstructionSelector::contractCrossBankCopyIntoStore(
1179 MachineInstr &I, MachineRegisterInfo &MRI) const {
1180 assert(I.getOpcode() == TargetOpcode::G_STORE && "Expected G_STORE");
1181 // If we're storing a scalar, it doesn't matter what register bank that
1182 // scalar is on. All that matters is the size.
1183 //
1184 // So, if we see something like this (with a 32-bit scalar as an example):
1185 //
1186 // %x:gpr(s32) = ... something ...
1187 // %y:fpr(s32) = COPY %x:gpr(s32)
1188 // G_STORE %y:fpr(s32)
1189 //
1190 // We can fix this up into something like this:
1191 //
1192 // G_STORE %x:gpr(s32)
1193 //
1194 // And then continue the selection process normally.
1195 MachineInstr *Def = getDefIgnoringCopies(I.getOperand(0).getReg(), MRI);
1196 if (!Def)
1197 return;
1198 Register DefDstReg = Def->getOperand(0).getReg();
1199 LLT DefDstTy = MRI.getType(DefDstReg);
1200 Register StoreSrcReg = I.getOperand(0).getReg();
1201 LLT StoreSrcTy = MRI.getType(StoreSrcReg);
1202
1203 // If we get something strange like a physical register, then we shouldn't
1204 // go any further.
1205 if (!DefDstTy.isValid())
1206 return;
1207
1208 // Are the source and dst types the same size?
1209 if (DefDstTy.getSizeInBits() != StoreSrcTy.getSizeInBits())
1210 return;
1211
1212 if (RBI.getRegBank(StoreSrcReg, MRI, TRI) ==
1213 RBI.getRegBank(DefDstReg, MRI, TRI))
1214 return;
1215
1216 // We have a cross-bank copy, which is entering a store. Let's fold it.
1217 I.getOperand(0).setReg(DefDstReg);
1218}
1219
Jessica Paquette7a1dcc52019-07-18 21:50:11 +00001220bool AArch64InstructionSelector::earlySelectLoad(
1221 MachineInstr &I, MachineRegisterInfo &MRI) const {
1222 // Try to fold in shifts, etc into the addressing mode of a load.
1223 assert(I.getOpcode() == TargetOpcode::G_LOAD && "unexpected op");
1224
1225 // Don't handle atomic loads/stores yet.
1226 auto &MemOp = **I.memoperands_begin();
1227 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
1228 LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
1229 return false;
1230 }
1231
1232 unsigned MemBytes = MemOp.getSize();
1233
1234 // Only support 64-bit loads for now.
1235 if (MemBytes != 8)
1236 return false;
1237
1238 Register DstReg = I.getOperand(0).getReg();
1239 const LLT DstTy = MRI.getType(DstReg);
1240 // Don't handle vectors.
1241 if (DstTy.isVector())
1242 return false;
1243
1244 unsigned DstSize = DstTy.getSizeInBits();
1245 // TODO: 32-bit destinations.
1246 if (DstSize != 64)
1247 return false;
1248
Jessica Paquette2b404d02019-07-23 16:09:42 +00001249 // Check if we can do any folding from GEPs/shifts etc. into the load.
1250 auto ImmFn = selectAddrModeXRO(I.getOperand(1), MemBytes);
Jessica Paquette7a1dcc52019-07-18 21:50:11 +00001251 if (!ImmFn)
1252 return false;
1253
1254 // We can fold something. Emit the load here.
1255 MachineIRBuilder MIB(I);
1256
1257 // Choose the instruction based off the size of the element being loaded, and
1258 // whether or not we're loading into a FPR.
1259 const RegisterBank &RB = *RBI.getRegBank(DstReg, MRI, TRI);
1260 unsigned Opc =
1261 RB.getID() == AArch64::GPRRegBankID ? AArch64::LDRXroX : AArch64::LDRDroX;
1262 // Construct the load.
1263 auto LoadMI = MIB.buildInstr(Opc, {DstReg}, {});
1264 for (auto &RenderFn : *ImmFn)
1265 RenderFn(LoadMI);
1266 LoadMI.addMemOperand(*I.memoperands_begin());
1267 I.eraseFromParent();
1268 return constrainSelectedInstRegOperands(*LoadMI, TII, TRI, RBI);
1269}
1270
Amara Emersoncac11512019-07-03 01:49:06 +00001271bool AArch64InstructionSelector::earlySelect(MachineInstr &I) const {
1272 assert(I.getParent() && "Instruction should be in a basic block!");
1273 assert(I.getParent()->getParent() && "Instruction should be in a function!");
1274
1275 MachineBasicBlock &MBB = *I.getParent();
1276 MachineFunction &MF = *MBB.getParent();
1277 MachineRegisterInfo &MRI = MF.getRegInfo();
1278
1279 switch (I.getOpcode()) {
1280 case TargetOpcode::G_SHL:
1281 return earlySelectSHL(I, MRI);
Jessica Paquette7a1dcc52019-07-18 21:50:11 +00001282 case TargetOpcode::G_LOAD:
1283 return earlySelectLoad(I, MRI);
Amara Emersoncac11512019-07-03 01:49:06 +00001284 default:
1285 return false;
1286 }
1287}
1288
Daniel Sandersf76f3152017-11-16 00:46:35 +00001289bool AArch64InstructionSelector::select(MachineInstr &I,
1290 CodeGenCoverage &CoverageInfo) const {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001291 assert(I.getParent() && "Instruction should be in a basic block!");
1292 assert(I.getParent()->getParent() && "Instruction should be in a function!");
1293
1294 MachineBasicBlock &MBB = *I.getParent();
1295 MachineFunction &MF = *MBB.getParent();
1296 MachineRegisterInfo &MRI = MF.getRegInfo();
1297
Tim Northovercdf23f12016-10-31 18:30:59 +00001298 unsigned Opcode = I.getOpcode();
Aditya Nandakumarefd8a842017-08-23 20:45:48 +00001299 // G_PHI requires same handling as PHI
1300 if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
Tim Northovercdf23f12016-10-31 18:30:59 +00001301 // Certain non-generic instructions also need some special handling.
1302
1303 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
1304 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
Tim Northover7d88da62016-11-08 00:34:06 +00001305
Aditya Nandakumarefd8a842017-08-23 20:45:48 +00001306 if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001307 const Register DefReg = I.getOperand(0).getReg();
Tim Northover7d88da62016-11-08 00:34:06 +00001308 const LLT DefTy = MRI.getType(DefReg);
1309
Matt Arsenault732149b2019-07-01 17:02:24 +00001310 const RegClassOrRegBank &RegClassOrBank =
1311 MRI.getRegClassOrRegBank(DefReg);
Tim Northover7d88da62016-11-08 00:34:06 +00001312
Matt Arsenault732149b2019-07-01 17:02:24 +00001313 const TargetRegisterClass *DefRC
1314 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
1315 if (!DefRC) {
1316 if (!DefTy.isValid()) {
1317 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
1318 return false;
1319 }
1320 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
1321 DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
Tim Northover7d88da62016-11-08 00:34:06 +00001322 if (!DefRC) {
Matt Arsenault732149b2019-07-01 17:02:24 +00001323 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
1324 return false;
Tim Northover7d88da62016-11-08 00:34:06 +00001325 }
1326 }
Matt Arsenault732149b2019-07-01 17:02:24 +00001327
Aditya Nandakumarefd8a842017-08-23 20:45:48 +00001328 I.setDesc(TII.get(TargetOpcode::PHI));
Tim Northover7d88da62016-11-08 00:34:06 +00001329
1330 return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
1331 }
1332
1333 if (I.isCopy())
Tim Northovercdf23f12016-10-31 18:30:59 +00001334 return selectCopy(I, TII, MRI, TRI, RBI);
Tim Northover7d88da62016-11-08 00:34:06 +00001335
1336 return true;
Tim Northovercdf23f12016-10-31 18:30:59 +00001337 }
1338
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001339
1340 if (I.getNumOperands() != I.getNumExplicitOperands()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001341 LLVM_DEBUG(
1342 dbgs() << "Generic instruction has unexpected implicit operands\n");
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001343 return false;
1344 }
1345
Amara Emersoncac11512019-07-03 01:49:06 +00001346 // Try to do some lowering before we start instruction selecting. These
1347 // lowerings are purely transformations on the input G_MIR and so selection
1348 // must continue after any modification of the instruction.
1349 preISelLower(I);
1350
1351 // There may be patterns where the importer can't deal with them optimally,
1352 // but does select it to a suboptimal sequence so our custom C++ selection
1353 // code later never has a chance to work on it. Therefore, we have an early
1354 // selection attempt here to give priority to certain selection routines
1355 // over the imported ones.
1356 if (earlySelect(I))
1357 return true;
1358
Daniel Sandersf76f3152017-11-16 00:46:35 +00001359 if (selectImpl(I, CoverageInfo))
Ahmed Bougacha36f70352016-12-21 23:26:20 +00001360 return true;
1361
Tim Northover32a078a2016-09-15 10:09:59 +00001362 LLT Ty =
1363 I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001364
Amara Emerson3739a202019-03-15 21:59:50 +00001365 MachineIRBuilder MIB(I);
1366
Tim Northover69271c62016-10-12 22:49:11 +00001367 switch (Opcode) {
Tim Northover5e3dbf32016-10-12 22:49:01 +00001368 case TargetOpcode::G_BRCOND: {
1369 if (Ty.getSizeInBits() > 32) {
1370 // We shouldn't need this on AArch64, but it would be implemented as an
1371 // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
1372 // bit being tested is < 32.
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001373 LLVM_DEBUG(dbgs() << "G_BRCOND has type: " << Ty
1374 << ", expected at most 32-bits");
Tim Northover5e3dbf32016-10-12 22:49:01 +00001375 return false;
1376 }
1377
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001378 const Register CondReg = I.getOperand(0).getReg();
Tim Northover5e3dbf32016-10-12 22:49:01 +00001379 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1380
Kristof Beylse66bc1f2018-12-18 08:50:02 +00001381 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
1382 // instructions will not be produced, as they are conditional branch
1383 // instructions that do not set flags.
1384 bool ProduceNonFlagSettingCondBr =
1385 !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
1386 if (ProduceNonFlagSettingCondBr && selectCompareBranch(I, MF, MRI))
Ahmed Bougacha641cb202017-03-27 16:35:31 +00001387 return true;
1388
Kristof Beylse66bc1f2018-12-18 08:50:02 +00001389 if (ProduceNonFlagSettingCondBr) {
1390 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
1391 .addUse(CondReg)
1392 .addImm(/*bit offset=*/0)
1393 .addMBB(DestMBB);
Tim Northover5e3dbf32016-10-12 22:49:01 +00001394
Kristof Beylse66bc1f2018-12-18 08:50:02 +00001395 I.eraseFromParent();
1396 return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
1397 } else {
1398 auto CMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1399 .addDef(AArch64::WZR)
1400 .addUse(CondReg)
1401 .addImm(1);
1402 constrainSelectedInstRegOperands(*CMP.getInstr(), TII, TRI, RBI);
1403 auto Bcc =
1404 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::Bcc))
1405 .addImm(AArch64CC::EQ)
1406 .addMBB(DestMBB);
1407
1408 I.eraseFromParent();
1409 return constrainSelectedInstRegOperands(*Bcc.getInstr(), TII, TRI, RBI);
1410 }
Tim Northover5e3dbf32016-10-12 22:49:01 +00001411 }
1412
Kristof Beyls65a12c02017-01-30 09:13:18 +00001413 case TargetOpcode::G_BRINDIRECT: {
1414 I.setDesc(TII.get(AArch64::BR));
1415 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1416 }
1417
Amara Emerson6e71b342019-06-21 18:10:41 +00001418 case TargetOpcode::G_BRJT:
1419 return selectBrJT(I, MRI);
1420
Jessica Paquette67ab9eb2019-04-26 18:00:01 +00001421 case TargetOpcode::G_BSWAP: {
1422 // Handle vector types for G_BSWAP directly.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001423 Register DstReg = I.getOperand(0).getReg();
Jessica Paquette67ab9eb2019-04-26 18:00:01 +00001424 LLT DstTy = MRI.getType(DstReg);
1425
1426 // We should only get vector types here; everything else is handled by the
1427 // importer right now.
1428 if (!DstTy.isVector() || DstTy.getSizeInBits() > 128) {
1429 LLVM_DEBUG(dbgs() << "Dst type for G_BSWAP currently unsupported.\n");
1430 return false;
1431 }
1432
1433 // Only handle 4 and 2 element vectors for now.
1434 // TODO: 16-bit elements.
1435 unsigned NumElts = DstTy.getNumElements();
1436 if (NumElts != 4 && NumElts != 2) {
1437 LLVM_DEBUG(dbgs() << "Unsupported number of elements for G_BSWAP.\n");
1438 return false;
1439 }
1440
1441 // Choose the correct opcode for the supported types. Right now, that's
1442 // v2s32, v4s32, and v2s64.
1443 unsigned Opc = 0;
1444 unsigned EltSize = DstTy.getElementType().getSizeInBits();
1445 if (EltSize == 32)
1446 Opc = (DstTy.getNumElements() == 2) ? AArch64::REV32v8i8
1447 : AArch64::REV32v16i8;
1448 else if (EltSize == 64)
1449 Opc = AArch64::REV64v16i8;
1450
1451 // We should always get something by the time we get here...
1452 assert(Opc != 0 && "Didn't get an opcode for G_BSWAP?");
1453
1454 I.setDesc(TII.get(Opc));
1455 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1456 }
1457
Tim Northover4494d692016-10-18 19:47:57 +00001458 case TargetOpcode::G_FCONSTANT:
Tim Northover4edc60d2016-10-10 21:49:42 +00001459 case TargetOpcode::G_CONSTANT: {
Tim Northover4494d692016-10-18 19:47:57 +00001460 const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
1461
Amara Emerson8f25a022019-06-21 16:43:50 +00001462 const LLT s8 = LLT::scalar(8);
1463 const LLT s16 = LLT::scalar(16);
Tim Northover4494d692016-10-18 19:47:57 +00001464 const LLT s32 = LLT::scalar(32);
1465 const LLT s64 = LLT::scalar(64);
1466 const LLT p0 = LLT::pointer(0, 64);
1467
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001468 const Register DefReg = I.getOperand(0).getReg();
Tim Northover4494d692016-10-18 19:47:57 +00001469 const LLT DefTy = MRI.getType(DefReg);
1470 const unsigned DefSize = DefTy.getSizeInBits();
1471 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1472
1473 // FIXME: Redundant check, but even less readable when factored out.
1474 if (isFP) {
1475 if (Ty != s32 && Ty != s64) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001476 LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
1477 << " constant, expected: " << s32 << " or " << s64
1478 << '\n');
Tim Northover4494d692016-10-18 19:47:57 +00001479 return false;
1480 }
1481
1482 if (RB.getID() != AArch64::FPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001483 LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
1484 << " constant on bank: " << RB
1485 << ", expected: FPR\n");
Tim Northover4494d692016-10-18 19:47:57 +00001486 return false;
1487 }
Daniel Sanders11300ce2017-10-13 21:28:03 +00001488
1489 // The case when we have 0.0 is covered by tablegen. Reject it here so we
1490 // can be sure tablegen works correctly and isn't rescued by this code.
1491 if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0))
1492 return false;
Tim Northover4494d692016-10-18 19:47:57 +00001493 } else {
Daniel Sanders05540042017-08-08 10:44:31 +00001494 // s32 and s64 are covered by tablegen.
Amara Emerson8f25a022019-06-21 16:43:50 +00001495 if (Ty != p0 && Ty != s8 && Ty != s16) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001496 LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
1497 << " constant, expected: " << s32 << ", " << s64
1498 << ", or " << p0 << '\n');
Tim Northover4494d692016-10-18 19:47:57 +00001499 return false;
1500 }
1501
1502 if (RB.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001503 LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
1504 << " constant on bank: " << RB
1505 << ", expected: GPR\n");
Tim Northover4494d692016-10-18 19:47:57 +00001506 return false;
1507 }
1508 }
1509
Amara Emerson8f25a022019-06-21 16:43:50 +00001510 // We allow G_CONSTANT of types < 32b.
Tim Northover4494d692016-10-18 19:47:57 +00001511 const unsigned MovOpc =
Amara Emerson8f25a022019-06-21 16:43:50 +00001512 DefSize == 64 ? AArch64::MOVi64imm : AArch64::MOVi32imm;
Tim Northover4494d692016-10-18 19:47:57 +00001513
Tim Northover4494d692016-10-18 19:47:57 +00001514 if (isFP) {
Jessica Paquettea3843fe2019-05-01 22:39:43 +00001515 // Either emit a FMOV, or emit a copy to emit a normal mov.
Tim Northover4494d692016-10-18 19:47:57 +00001516 const TargetRegisterClass &GPRRC =
1517 DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
1518 const TargetRegisterClass &FPRRC =
1519 DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
1520
Jessica Paquettea3843fe2019-05-01 22:39:43 +00001521 // Can we use a FMOV instruction to represent the immediate?
1522 if (emitFMovForFConstant(I, MRI))
1523 return true;
1524
1525 // Nope. Emit a copy and use a normal mov instead.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001526 const Register DefGPRReg = MRI.createVirtualRegister(&GPRRC);
Tim Northover4494d692016-10-18 19:47:57 +00001527 MachineOperand &RegOp = I.getOperand(0);
1528 RegOp.setReg(DefGPRReg);
Amara Emerson3739a202019-03-15 21:59:50 +00001529 MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
1530 MIB.buildCopy({DefReg}, {DefGPRReg});
Tim Northover4494d692016-10-18 19:47:57 +00001531
1532 if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001533 LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
Tim Northover4494d692016-10-18 19:47:57 +00001534 return false;
1535 }
1536
1537 MachineOperand &ImmOp = I.getOperand(1);
1538 // FIXME: Is going through int64_t always correct?
1539 ImmOp.ChangeToImmediate(
1540 ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
Daniel Sanders066ebbf2017-02-24 15:43:30 +00001541 } else if (I.getOperand(1).isCImm()) {
Tim Northover9267ac52016-12-05 21:47:07 +00001542 uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
1543 I.getOperand(1).ChangeToImmediate(Val);
Daniel Sanders066ebbf2017-02-24 15:43:30 +00001544 } else if (I.getOperand(1).isImm()) {
1545 uint64_t Val = I.getOperand(1).getImm();
1546 I.getOperand(1).ChangeToImmediate(Val);
Tim Northover4494d692016-10-18 19:47:57 +00001547 }
1548
Jessica Paquettea3843fe2019-05-01 22:39:43 +00001549 I.setDesc(TII.get(MovOpc));
Tim Northover4494d692016-10-18 19:47:57 +00001550 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1551 return true;
Tim Northover4edc60d2016-10-10 21:49:42 +00001552 }
Tim Northover7b6d66c2017-07-20 22:58:38 +00001553 case TargetOpcode::G_EXTRACT: {
1554 LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
Amara Emersonbc03bae2018-02-18 17:03:02 +00001555 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
Amara Emerson242efdb2018-02-18 17:28:34 +00001556 (void)DstTy;
Amara Emersonbc03bae2018-02-18 17:03:02 +00001557 unsigned SrcSize = SrcTy.getSizeInBits();
Tim Northover7b6d66c2017-07-20 22:58:38 +00001558 // Larger extracts are vectors, same-size extracts should be something else
1559 // by now (either split up or simplified to a COPY).
1560 if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
1561 return false;
1562
Amara Emersonbc03bae2018-02-18 17:03:02 +00001563 I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri));
Tim Northover7b6d66c2017-07-20 22:58:38 +00001564 MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() +
1565 Ty.getSizeInBits() - 1);
1566
Amara Emersonbc03bae2018-02-18 17:03:02 +00001567 if (SrcSize < 64) {
1568 assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 &&
1569 "unexpected G_EXTRACT types");
1570 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1571 }
1572
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001573 Register DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
Amara Emerson3739a202019-03-15 21:59:50 +00001574 MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
Amara Emerson86271782019-03-18 19:20:10 +00001575 MIB.buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {})
1576 .addReg(DstReg, 0, AArch64::sub_32);
Tim Northover7b6d66c2017-07-20 22:58:38 +00001577 RBI.constrainGenericRegister(I.getOperand(0).getReg(),
1578 AArch64::GPR32RegClass, MRI);
1579 I.getOperand(0).setReg(DstReg);
1580
1581 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1582 }
1583
1584 case TargetOpcode::G_INSERT: {
1585 LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
Amara Emersonbc03bae2018-02-18 17:03:02 +00001586 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1587 unsigned DstSize = DstTy.getSizeInBits();
Tim Northover7b6d66c2017-07-20 22:58:38 +00001588 // Larger inserts are vectors, same-size ones should be something else by
1589 // now (split up or turned into COPYs).
1590 if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
1591 return false;
1592
Amara Emersonbc03bae2018-02-18 17:03:02 +00001593 I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri));
Tim Northover7b6d66c2017-07-20 22:58:38 +00001594 unsigned LSB = I.getOperand(3).getImm();
1595 unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
Amara Emersonbc03bae2018-02-18 17:03:02 +00001596 I.getOperand(3).setImm((DstSize - LSB) % DstSize);
Tim Northover7b6d66c2017-07-20 22:58:38 +00001597 MachineInstrBuilder(MF, I).addImm(Width - 1);
1598
Amara Emersonbc03bae2018-02-18 17:03:02 +00001599 if (DstSize < 64) {
1600 assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 &&
1601 "unexpected G_INSERT types");
1602 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1603 }
1604
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001605 Register SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
Tim Northover7b6d66c2017-07-20 22:58:38 +00001606 BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
1607 TII.get(AArch64::SUBREG_TO_REG))
1608 .addDef(SrcReg)
1609 .addImm(0)
1610 .addUse(I.getOperand(2).getReg())
1611 .addImm(AArch64::sub_32);
1612 RBI.constrainGenericRegister(I.getOperand(2).getReg(),
1613 AArch64::GPR32RegClass, MRI);
1614 I.getOperand(2).setReg(SrcReg);
1615
1616 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1617 }
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001618 case TargetOpcode::G_FRAME_INDEX: {
1619 // allocas and G_FRAME_INDEX are only supported in addrspace(0).
Tim Northover5ae83502016-09-15 09:20:34 +00001620 if (Ty != LLT::pointer(0, 64)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001621 LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
1622 << ", expected: " << LLT::pointer(0, 64) << '\n');
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001623 return false;
1624 }
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001625 I.setDesc(TII.get(AArch64::ADDXri));
Ahmed Bougacha0306b5e2016-08-16 14:02:42 +00001626
1627 // MOs for a #0 shifted immediate.
1628 I.addOperand(MachineOperand::CreateImm(0));
1629 I.addOperand(MachineOperand::CreateImm(0));
1630
1631 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1632 }
Tim Northoverbdf16242016-10-10 21:50:00 +00001633
1634 case TargetOpcode::G_GLOBAL_VALUE: {
1635 auto GV = I.getOperand(1).getGlobal();
1636 if (GV->isThreadLocal()) {
1637 // FIXME: we don't support TLS yet.
1638 return false;
1639 }
1640 unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
Tim Northoverfe7c59a2016-12-13 18:25:38 +00001641 if (OpFlags & AArch64II::MO_GOT) {
Tim Northoverbdf16242016-10-10 21:50:00 +00001642 I.setDesc(TII.get(AArch64::LOADgot));
Tim Northoverfe7c59a2016-12-13 18:25:38 +00001643 I.getOperand(1).setTargetFlags(OpFlags);
Amara Emersond5785772018-01-18 19:21:27 +00001644 } else if (TM.getCodeModel() == CodeModel::Large) {
1645 // Materialize the global using movz/movk instructions.
Amara Emerson1e8c1642018-07-31 00:09:02 +00001646 materializeLargeCMVal(I, GV, OpFlags);
Amara Emersond5785772018-01-18 19:21:27 +00001647 I.eraseFromParent();
1648 return true;
David Green9dd1d452018-08-22 11:31:39 +00001649 } else if (TM.getCodeModel() == CodeModel::Tiny) {
1650 I.setDesc(TII.get(AArch64::ADR));
1651 I.getOperand(1).setTargetFlags(OpFlags);
Tim Northoverfe7c59a2016-12-13 18:25:38 +00001652 } else {
Tim Northoverbdf16242016-10-10 21:50:00 +00001653 I.setDesc(TII.get(AArch64::MOVaddr));
1654 I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
1655 MachineInstrBuilder MIB(MF, I);
1656 MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
1657 OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1658 }
1659 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1660 }
1661
Amara Emersond3144a42019-06-06 07:58:37 +00001662 case TargetOpcode::G_ZEXTLOAD:
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001663 case TargetOpcode::G_LOAD:
1664 case TargetOpcode::G_STORE: {
Amara Emersond3144a42019-06-06 07:58:37 +00001665 bool IsZExtLoad = I.getOpcode() == TargetOpcode::G_ZEXTLOAD;
1666 MachineIRBuilder MIB(I);
1667
Tim Northover0f140c72016-09-09 11:46:34 +00001668 LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001669
Tim Northover5ae83502016-09-15 09:20:34 +00001670 if (PtrTy != LLT::pointer(0, 64)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001671 LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
1672 << ", expected: " << LLT::pointer(0, 64) << '\n');
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001673 return false;
1674 }
1675
Daniel Sanders3c1c4c02017-12-05 05:52:07 +00001676 auto &MemOp = **I.memoperands_begin();
1677 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001678 LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
Daniel Sanders3c1c4c02017-12-05 05:52:07 +00001679 return false;
1680 }
Daniel Sandersf84bc372018-05-05 20:53:24 +00001681 unsigned MemSizeInBits = MemOp.getSize() * 8;
Daniel Sanders3c1c4c02017-12-05 05:52:07 +00001682
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001683 const Register PtrReg = I.getOperand(1).getReg();
Ahmed Bougachaf0b22c42017-03-27 18:14:20 +00001684#ifndef NDEBUG
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001685 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
Ahmed Bougachaf0b22c42017-03-27 18:14:20 +00001686 // Sanity-check the pointer register.
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001687 assert(PtrRB.getID() == AArch64::GPRRegBankID &&
1688 "Load/Store pointer operand isn't a GPR");
Tim Northover0f140c72016-09-09 11:46:34 +00001689 assert(MRI.getType(PtrReg).isPointer() &&
1690 "Load/Store pointer operand isn't a pointer");
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001691#endif
1692
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001693 const Register ValReg = I.getOperand(0).getReg();
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001694 const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
1695
1696 const unsigned NewOpc =
Daniel Sandersf84bc372018-05-05 20:53:24 +00001697 selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemSizeInBits);
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001698 if (NewOpc == I.getOpcode())
1699 return false;
1700
1701 I.setDesc(TII.get(NewOpc));
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001702
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001703 uint64_t Offset = 0;
1704 auto *PtrMI = MRI.getVRegDef(PtrReg);
1705
1706 // Try to fold a GEP into our unsigned immediate addressing mode.
1707 if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
1708 if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
1709 int64_t Imm = *COff;
Daniel Sandersf84bc372018-05-05 20:53:24 +00001710 const unsigned Size = MemSizeInBits / 8;
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001711 const unsigned Scale = Log2_32(Size);
1712 if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
1713 unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
1714 I.getOperand(1).setReg(Ptr2Reg);
1715 PtrMI = MRI.getVRegDef(Ptr2Reg);
1716 Offset = Imm / Size;
1717 }
1718 }
1719 }
1720
Ahmed Bougachaf75782f2017-03-27 17:31:56 +00001721 // If we haven't folded anything into our addressing mode yet, try to fold
1722 // a frame index into the base+offset.
1723 if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1724 I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
1725
Ahmed Bougacha8a654082017-03-27 17:31:52 +00001726 I.addOperand(MachineOperand::CreateImm(Offset));
Ahmed Bougacha85a66a62017-03-27 17:31:48 +00001727
1728 // If we're storing a 0, use WZR/XZR.
1729 if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
1730 if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
1731 if (I.getOpcode() == AArch64::STRWui)
1732 I.getOperand(0).setReg(AArch64::WZR);
1733 else if (I.getOpcode() == AArch64::STRXui)
1734 I.getOperand(0).setReg(AArch64::XZR);
1735 }
1736 }
1737
Amara Emersond3144a42019-06-06 07:58:37 +00001738 if (IsZExtLoad) {
1739 // The zextload from a smaller type to i32 should be handled by the importer.
1740 if (MRI.getType(ValReg).getSizeInBits() != 64)
1741 return false;
1742 // If we have a ZEXTLOAD then change the load's type to be a narrower reg
1743 //and zero_extend with SUBREG_TO_REG.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001744 Register LdReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1745 Register DstReg = I.getOperand(0).getReg();
Amara Emersond3144a42019-06-06 07:58:37 +00001746 I.getOperand(0).setReg(LdReg);
1747
1748 MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
1749 MIB.buildInstr(AArch64::SUBREG_TO_REG, {DstReg}, {})
1750 .addImm(0)
1751 .addUse(LdReg)
1752 .addImm(AArch64::sub_32);
1753 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1754 return RBI.constrainGenericRegister(DstReg, AArch64::GPR64allRegClass,
1755 MRI);
1756 }
Ahmed Bougacha7adfac52016-07-29 16:56:16 +00001757 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1758 }
1759
Tim Northover9dd78f82017-02-08 21:22:25 +00001760 case TargetOpcode::G_SMULH:
1761 case TargetOpcode::G_UMULH: {
1762 // Reject the various things we don't support yet.
1763 if (unsupportedBinOp(I, RBI, MRI, TRI))
1764 return false;
1765
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001766 const Register DefReg = I.getOperand(0).getReg();
Tim Northover9dd78f82017-02-08 21:22:25 +00001767 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1768
1769 if (RB.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001770 LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
Tim Northover9dd78f82017-02-08 21:22:25 +00001771 return false;
1772 }
1773
1774 if (Ty != LLT::scalar(64)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001775 LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
1776 << ", expected: " << LLT::scalar(64) << '\n');
Tim Northover9dd78f82017-02-08 21:22:25 +00001777 return false;
1778 }
1779
1780 unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
1781 : AArch64::UMULHrr;
1782 I.setDesc(TII.get(NewOpc));
1783
1784 // Now that we selected an opcode, we need to constrain the register
1785 // operands to use appropriate classes.
1786 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1787 }
Ahmed Bougacha33e19fe2016-08-18 16:05:11 +00001788 case TargetOpcode::G_FADD:
1789 case TargetOpcode::G_FSUB:
1790 case TargetOpcode::G_FMUL:
1791 case TargetOpcode::G_FDIV:
1792
Ahmed Bougacha2ac5bf92016-08-16 14:02:47 +00001793 case TargetOpcode::G_ASHR:
Amara Emerson9bf092d2019-04-09 21:22:43 +00001794 if (MRI.getType(I.getOperand(0).getReg()).isVector())
1795 return selectVectorASHR(I, MRI);
1796 LLVM_FALLTHROUGH;
1797 case TargetOpcode::G_SHL:
1798 if (Opcode == TargetOpcode::G_SHL &&
1799 MRI.getType(I.getOperand(0).getReg()).isVector())
1800 return selectVectorSHL(I, MRI);
1801 LLVM_FALLTHROUGH;
1802 case TargetOpcode::G_OR:
1803 case TargetOpcode::G_LSHR:
Tim Northover2fda4b02016-10-10 21:49:49 +00001804 case TargetOpcode::G_GEP: {
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001805 // Reject the various things we don't support yet.
Ahmed Bougacha59e160a2016-08-16 14:37:40 +00001806 if (unsupportedBinOp(I, RBI, MRI, TRI))
1807 return false;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001808
Ahmed Bougacha59e160a2016-08-16 14:37:40 +00001809 const unsigned OpSize = Ty.getSizeInBits();
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001810
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001811 const Register DefReg = I.getOperand(0).getReg();
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001812 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1813
1814 const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
1815 if (NewOpc == I.getOpcode())
1816 return false;
1817
1818 I.setDesc(TII.get(NewOpc));
1819 // FIXME: Should the type be always reset in setDesc?
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00001820
1821 // Now that we selected an opcode, we need to constrain the register
1822 // operands to use appropriate classes.
1823 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1824 }
Tim Northover3d38b3a2016-10-11 20:50:21 +00001825
Jessica Paquette7d6784f2019-03-14 22:54:29 +00001826 case TargetOpcode::G_UADDO: {
1827 // TODO: Support other types.
1828 unsigned OpSize = Ty.getSizeInBits();
1829 if (OpSize != 32 && OpSize != 64) {
1830 LLVM_DEBUG(
1831 dbgs()
1832 << "G_UADDO currently only supported for 32 and 64 b types.\n");
1833 return false;
1834 }
1835
1836 // TODO: Support vectors.
1837 if (Ty.isVector()) {
1838 LLVM_DEBUG(dbgs() << "G_UADDO currently only supported for scalars.\n");
1839 return false;
1840 }
1841
1842 // Add and set the set condition flag.
1843 unsigned AddsOpc = OpSize == 32 ? AArch64::ADDSWrr : AArch64::ADDSXrr;
1844 MachineIRBuilder MIRBuilder(I);
1845 auto AddsMI = MIRBuilder.buildInstr(
1846 AddsOpc, {I.getOperand(0).getReg()},
1847 {I.getOperand(2).getReg(), I.getOperand(3).getReg()});
1848 constrainSelectedInstRegOperands(*AddsMI, TII, TRI, RBI);
1849
1850 // Now, put the overflow result in the register given by the first operand
1851 // to the G_UADDO. CSINC increments the result when the predicate is false,
1852 // so to get the increment when it's true, we need to use the inverse. In
1853 // this case, we want to increment when carry is set.
1854 auto CsetMI = MIRBuilder
1855 .buildInstr(AArch64::CSINCWr, {I.getOperand(1).getReg()},
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001856 {Register(AArch64::WZR), Register(AArch64::WZR)})
Jessica Paquette7d6784f2019-03-14 22:54:29 +00001857 .addImm(getInvertedCondCode(AArch64CC::HS));
1858 constrainSelectedInstRegOperands(*CsetMI, TII, TRI, RBI);
1859 I.eraseFromParent();
1860 return true;
1861 }
1862
Tim Northover398c5f52017-02-14 20:56:29 +00001863 case TargetOpcode::G_PTR_MASK: {
1864 uint64_t Align = I.getOperand(2).getImm();
1865 if (Align >= 64 || Align == 0)
1866 return false;
1867
1868 uint64_t Mask = ~((1ULL << Align) - 1);
1869 I.setDesc(TII.get(AArch64::ANDXri));
1870 I.getOperand(2).setImm(AArch64_AM::encodeLogicalImmediate(Mask, 64));
1871
1872 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1873 }
Tim Northover037af52c2016-10-31 18:31:09 +00001874 case TargetOpcode::G_PTRTOINT:
Tim Northoverfb8d9892016-10-12 22:49:15 +00001875 case TargetOpcode::G_TRUNC: {
1876 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1877 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1878
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001879 const Register DstReg = I.getOperand(0).getReg();
1880 const Register SrcReg = I.getOperand(1).getReg();
Tim Northoverfb8d9892016-10-12 22:49:15 +00001881
1882 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1883 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1884
1885 if (DstRB.getID() != SrcRB.getID()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001886 LLVM_DEBUG(
1887 dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001888 return false;
1889 }
1890
1891 if (DstRB.getID() == AArch64::GPRRegBankID) {
1892 const TargetRegisterClass *DstRC =
1893 getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1894 if (!DstRC)
1895 return false;
1896
1897 const TargetRegisterClass *SrcRC =
1898 getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
1899 if (!SrcRC)
1900 return false;
1901
1902 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1903 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001904 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001905 return false;
1906 }
1907
1908 if (DstRC == SrcRC) {
1909 // Nothing to be done
Daniel Sanderscc36dbf2017-06-27 10:11:39 +00001910 } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
1911 SrcTy == LLT::scalar(64)) {
1912 llvm_unreachable("TableGen can import this case");
1913 return false;
Tim Northoverfb8d9892016-10-12 22:49:15 +00001914 } else if (DstRC == &AArch64::GPR32RegClass &&
1915 SrcRC == &AArch64::GPR64RegClass) {
1916 I.getOperand(1).setSubReg(AArch64::sub_32);
1917 } else {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001918 LLVM_DEBUG(
1919 dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
Tim Northoverfb8d9892016-10-12 22:49:15 +00001920 return false;
1921 }
1922
1923 I.setDesc(TII.get(TargetOpcode::COPY));
1924 return true;
1925 } else if (DstRB.getID() == AArch64::FPRRegBankID) {
1926 if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
1927 I.setDesc(TII.get(AArch64::XTNv4i16));
1928 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1929 return true;
1930 }
1931 }
1932
1933 return false;
1934 }
1935
Tim Northover3d38b3a2016-10-11 20:50:21 +00001936 case TargetOpcode::G_ANYEXT: {
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001937 const Register DstReg = I.getOperand(0).getReg();
1938 const Register SrcReg = I.getOperand(1).getReg();
Tim Northover3d38b3a2016-10-11 20:50:21 +00001939
Quentin Colombetcb629a82016-10-12 03:57:49 +00001940 const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
1941 if (RBDst.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001942 LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst
1943 << ", expected: GPR\n");
Quentin Colombetcb629a82016-10-12 03:57:49 +00001944 return false;
1945 }
Tim Northover3d38b3a2016-10-11 20:50:21 +00001946
Quentin Colombetcb629a82016-10-12 03:57:49 +00001947 const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
1948 if (RBSrc.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001949 LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc
1950 << ", expected: GPR\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001951 return false;
1952 }
1953
1954 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
1955
1956 if (DstSize == 0) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001957 LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001958 return false;
1959 }
1960
Quentin Colombetcb629a82016-10-12 03:57:49 +00001961 if (DstSize != 64 && DstSize > 32) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001962 LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
1963 << ", expected: 32 or 64\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001964 return false;
1965 }
Quentin Colombetcb629a82016-10-12 03:57:49 +00001966 // At this point G_ANYEXT is just like a plain COPY, but we need
1967 // to explicitly form the 64-bit value if any.
1968 if (DstSize > 32) {
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001969 Register ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
Quentin Colombetcb629a82016-10-12 03:57:49 +00001970 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1971 .addDef(ExtSrc)
1972 .addImm(0)
1973 .addUse(SrcReg)
1974 .addImm(AArch64::sub_32);
1975 I.getOperand(1).setReg(ExtSrc);
Tim Northover3d38b3a2016-10-11 20:50:21 +00001976 }
Quentin Colombetcb629a82016-10-12 03:57:49 +00001977 return selectCopy(I, TII, MRI, TRI, RBI);
Tim Northover3d38b3a2016-10-11 20:50:21 +00001978 }
1979
1980 case TargetOpcode::G_ZEXT:
1981 case TargetOpcode::G_SEXT: {
1982 unsigned Opcode = I.getOpcode();
1983 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1984 SrcTy = MRI.getType(I.getOperand(1).getReg());
1985 const bool isSigned = Opcode == TargetOpcode::G_SEXT;
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001986 const Register DefReg = I.getOperand(0).getReg();
1987 const Register SrcReg = I.getOperand(1).getReg();
Tim Northover3d38b3a2016-10-11 20:50:21 +00001988 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1989
1990 if (RB.getID() != AArch64::GPRRegBankID) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001991 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
1992 << ", expected: GPR\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00001993 return false;
1994 }
1995
1996 MachineInstr *ExtI;
1997 if (DstTy == LLT::scalar(64)) {
1998 // FIXME: Can we avoid manually doing this?
1999 if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00002000 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
2001 << " operand\n");
Tim Northover3d38b3a2016-10-11 20:50:21 +00002002 return false;
2003 }
2004
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002005 const Register SrcXReg =
Tim Northover3d38b3a2016-10-11 20:50:21 +00002006 MRI.createVirtualRegister(&AArch64::GPR64RegClass);
2007 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
2008 .addDef(SrcXReg)
2009 .addImm(0)
2010 .addUse(SrcReg)
2011 .addImm(AArch64::sub_32);
2012
2013 const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
2014 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
2015 .addDef(DefReg)
2016 .addUse(SrcXReg)
2017 .addImm(0)
2018 .addImm(SrcTy.getSizeInBits() - 1);
Tim Northovera9105be2016-11-09 22:39:54 +00002019 } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
Tim Northover3d38b3a2016-10-11 20:50:21 +00002020 const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
2021 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
2022 .addDef(DefReg)
2023 .addUse(SrcReg)
2024 .addImm(0)
2025 .addImm(SrcTy.getSizeInBits() - 1);
2026 } else {
2027 return false;
2028 }
2029
2030 constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2031
2032 I.eraseFromParent();
2033 return true;
2034 }
Tim Northoverc1d8c2b2016-10-11 22:29:23 +00002035
Tim Northover69271c62016-10-12 22:49:11 +00002036 case TargetOpcode::G_SITOFP:
2037 case TargetOpcode::G_UITOFP:
2038 case TargetOpcode::G_FPTOSI:
2039 case TargetOpcode::G_FPTOUI: {
2040 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
2041 SrcTy = MRI.getType(I.getOperand(1).getReg());
2042 const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
2043 if (NewOpc == Opcode)
2044 return false;
2045
2046 I.setDesc(TII.get(NewOpc));
2047 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2048
2049 return true;
2050 }
2051
2052
Tim Northoverc1d8c2b2016-10-11 22:29:23 +00002053 case TargetOpcode::G_INTTOPTR:
Daniel Sandersedd07842017-08-17 09:26:14 +00002054 // The importer is currently unable to import pointer types since they
2055 // didn't exist in SelectionDAG.
Daniel Sanderseb2f5f32017-08-15 15:10:31 +00002056 return selectCopy(I, TII, MRI, TRI, RBI);
Daniel Sanders16e6dd32017-08-15 13:50:09 +00002057
Daniel Sandersedd07842017-08-17 09:26:14 +00002058 case TargetOpcode::G_BITCAST:
2059 // Imported SelectionDAG rules can handle every bitcast except those that
2060 // bitcast from a type to the same type. Ideally, these shouldn't occur
Amara Emersonb9560512019-04-11 20:32:24 +00002061 // but we might not run an optimizer that deletes them. The other exception
2062 // is bitcasts involving pointer types, as SelectionDAG has no knowledge
2063 // of them.
2064 return selectCopy(I, TII, MRI, TRI, RBI);
Daniel Sandersedd07842017-08-17 09:26:14 +00002065
Tim Northover9ac0eba2016-11-08 00:45:29 +00002066 case TargetOpcode::G_SELECT: {
2067 if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00002068 LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
2069 << ", expected: " << LLT::scalar(1) << '\n');
Tim Northover9ac0eba2016-11-08 00:45:29 +00002070 return false;
2071 }
2072
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002073 const Register CondReg = I.getOperand(1).getReg();
2074 const Register TReg = I.getOperand(2).getReg();
2075 const Register FReg = I.getOperand(3).getReg();
Tim Northover9ac0eba2016-11-08 00:45:29 +00002076
Jessica Paquette99316042019-07-02 19:44:16 +00002077 if (tryOptSelect(I))
Amara Emersonc37ff0d2019-06-05 23:46:16 +00002078 return true;
Tim Northover9ac0eba2016-11-08 00:45:29 +00002079
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002080 Register CSelOpc = selectSelectOpc(I, MRI, RBI);
Tim Northover9ac0eba2016-11-08 00:45:29 +00002081 MachineInstr &TstMI =
2082 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
2083 .addDef(AArch64::WZR)
2084 .addUse(CondReg)
2085 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
2086
2087 MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
2088 .addDef(I.getOperand(0).getReg())
2089 .addUse(TReg)
2090 .addUse(FReg)
2091 .addImm(AArch64CC::NE);
2092
2093 constrainSelectedInstRegOperands(TstMI, TII, TRI, RBI);
2094 constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
2095
2096 I.eraseFromParent();
2097 return true;
2098 }
Tim Northover6c02ad52016-10-12 22:49:04 +00002099 case TargetOpcode::G_ICMP: {
Amara Emerson9bf092d2019-04-09 21:22:43 +00002100 if (Ty.isVector())
2101 return selectVectorICmp(I, MRI);
2102
Aditya Nandakumar02c602e2017-07-31 17:00:16 +00002103 if (Ty != LLT::scalar(32)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00002104 LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty
2105 << ", expected: " << LLT::scalar(32) << '\n');
Tim Northover6c02ad52016-10-12 22:49:04 +00002106 return false;
2107 }
2108
Jessica Paquette49537bb2019-06-17 18:40:06 +00002109 MachineIRBuilder MIRBuilder(I);
Jessica Paquette99316042019-07-02 19:44:16 +00002110 if (!emitIntegerCompare(I.getOperand(2), I.getOperand(3), I.getOperand(1),
2111 MIRBuilder))
2112 return false;
Jessica Paquette49537bb2019-06-17 18:40:06 +00002113 emitCSetForICMP(I.getOperand(0).getReg(), I.getOperand(1).getPredicate(),
Jessica Paquette99316042019-07-02 19:44:16 +00002114 MIRBuilder);
Tim Northover6c02ad52016-10-12 22:49:04 +00002115 I.eraseFromParent();
2116 return true;
2117 }
2118
Tim Northover7dd378d2016-10-12 22:49:07 +00002119 case TargetOpcode::G_FCMP: {
Aditya Nandakumar02c602e2017-07-31 17:00:16 +00002120 if (Ty != LLT::scalar(32)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00002121 LLVM_DEBUG(dbgs() << "G_FCMP result has type: " << Ty
2122 << ", expected: " << LLT::scalar(32) << '\n');
Tim Northover7dd378d2016-10-12 22:49:07 +00002123 return false;
2124 }
2125
Jessica Paquetteb73ea75b2019-05-28 22:52:49 +00002126 unsigned CmpOpc = selectFCMPOpc(I, MRI);
2127 if (!CmpOpc)
Tim Northover7dd378d2016-10-12 22:49:07 +00002128 return false;
Tim Northover7dd378d2016-10-12 22:49:07 +00002129
2130 // FIXME: regbank
2131
2132 AArch64CC::CondCode CC1, CC2;
2133 changeFCMPPredToAArch64CC(
2134 (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
2135
Jessica Paquetteb73ea75b2019-05-28 22:52:49 +00002136 // Partially build the compare. Decide if we need to add a use for the
2137 // third operand based off whether or not we're comparing against 0.0.
2138 auto CmpMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
2139 .addUse(I.getOperand(2).getReg());
2140
2141 // If we don't have an immediate compare, then we need to add a use of the
2142 // register which wasn't used for the immediate.
2143 // Note that the immediate will always be the last operand.
2144 if (CmpOpc != AArch64::FCMPSri && CmpOpc != AArch64::FCMPDri)
2145 CmpMI = CmpMI.addUse(I.getOperand(3).getReg());
Tim Northover7dd378d2016-10-12 22:49:07 +00002146
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002147 const Register DefReg = I.getOperand(0).getReg();
2148 Register Def1Reg = DefReg;
Tim Northover7dd378d2016-10-12 22:49:07 +00002149 if (CC2 != AArch64CC::AL)
2150 Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
2151
2152 MachineInstr &CSetMI =
2153 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
2154 .addDef(Def1Reg)
2155 .addUse(AArch64::WZR)
2156 .addUse(AArch64::WZR)
Tim Northover33a1a0b2017-01-17 23:04:01 +00002157 .addImm(getInvertedCondCode(CC1));
Tim Northover7dd378d2016-10-12 22:49:07 +00002158
2159 if (CC2 != AArch64CC::AL) {
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002160 Register Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
Tim Northover7dd378d2016-10-12 22:49:07 +00002161 MachineInstr &CSet2MI =
2162 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
2163 .addDef(Def2Reg)
2164 .addUse(AArch64::WZR)
2165 .addUse(AArch64::WZR)
Tim Northover33a1a0b2017-01-17 23:04:01 +00002166 .addImm(getInvertedCondCode(CC2));
Tim Northover7dd378d2016-10-12 22:49:07 +00002167 MachineInstr &OrMI =
2168 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
2169 .addDef(DefReg)
2170 .addUse(Def1Reg)
2171 .addUse(Def2Reg);
2172 constrainSelectedInstRegOperands(OrMI, TII, TRI, RBI);
2173 constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
2174 }
Jessica Paquetteb73ea75b2019-05-28 22:52:49 +00002175 constrainSelectedInstRegOperands(*CmpMI, TII, TRI, RBI);
Tim Northover7dd378d2016-10-12 22:49:07 +00002176 constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
2177
2178 I.eraseFromParent();
2179 return true;
2180 }
Tim Northovere9600d82017-02-08 17:57:27 +00002181 case TargetOpcode::G_VASTART:
2182 return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
2183 : selectVaStartAAPCS(I, MF, MRI);
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00002184 case TargetOpcode::G_INTRINSIC:
2185 return selectIntrinsic(I, MRI);
Amara Emerson1f5d9942018-04-25 14:43:59 +00002186 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
Jessica Paquette22c62152019-04-02 19:57:26 +00002187 return selectIntrinsicWithSideEffects(I, MRI);
Amara Emerson1e8c1642018-07-31 00:09:02 +00002188 case TargetOpcode::G_IMPLICIT_DEF: {
Justin Bogner4fc69662017-07-12 17:32:32 +00002189 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
Amara Emerson58aea522018-02-02 01:44:43 +00002190 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002191 const Register DstReg = I.getOperand(0).getReg();
Amara Emerson58aea522018-02-02 01:44:43 +00002192 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
2193 const TargetRegisterClass *DstRC =
2194 getRegClassForTypeOnBank(DstTy, DstRB, RBI);
2195 RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
Justin Bogner4fc69662017-07-12 17:32:32 +00002196 return true;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00002197 }
Amara Emerson1e8c1642018-07-31 00:09:02 +00002198 case TargetOpcode::G_BLOCK_ADDR: {
2199 if (TM.getCodeModel() == CodeModel::Large) {
2200 materializeLargeCMVal(I, I.getOperand(1).getBlockAddress(), 0);
2201 I.eraseFromParent();
2202 return true;
2203 } else {
2204 I.setDesc(TII.get(AArch64::MOVaddrBA));
2205 auto MovMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::MOVaddrBA),
2206 I.getOperand(0).getReg())
2207 .addBlockAddress(I.getOperand(1).getBlockAddress(),
2208 /* Offset */ 0, AArch64II::MO_PAGE)
2209 .addBlockAddress(
2210 I.getOperand(1).getBlockAddress(), /* Offset */ 0,
2211 AArch64II::MO_NC | AArch64II::MO_PAGEOFF);
2212 I.eraseFromParent();
2213 return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI);
2214 }
2215 }
Jessica Paquette991cb392019-04-23 20:46:19 +00002216 case TargetOpcode::G_INTRINSIC_TRUNC:
2217 return selectIntrinsicTrunc(I, MRI);
Jessica Paquette4fe75742019-04-23 23:03:03 +00002218 case TargetOpcode::G_INTRINSIC_ROUND:
2219 return selectIntrinsicRound(I, MRI);
Amara Emerson5ec14602018-12-10 18:44:58 +00002220 case TargetOpcode::G_BUILD_VECTOR:
2221 return selectBuildVector(I, MRI);
Amara Emerson8cb186c2018-12-20 01:11:04 +00002222 case TargetOpcode::G_MERGE_VALUES:
2223 return selectMergeValues(I, MRI);
Jessica Paquette245047d2019-01-24 22:00:41 +00002224 case TargetOpcode::G_UNMERGE_VALUES:
2225 return selectUnmergeValues(I, MRI);
Amara Emerson1abe05c2019-02-21 20:20:16 +00002226 case TargetOpcode::G_SHUFFLE_VECTOR:
2227 return selectShuffleVector(I, MRI);
Jessica Paquette607774c2019-03-11 22:18:01 +00002228 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
2229 return selectExtractElt(I, MRI);
Jessica Paquette5aff1f42019-03-14 18:01:30 +00002230 case TargetOpcode::G_INSERT_VECTOR_ELT:
2231 return selectInsertElt(I, MRI);
Amara Emerson2ff22982019-03-14 22:48:15 +00002232 case TargetOpcode::G_CONCAT_VECTORS:
2233 return selectConcatVectors(I, MRI);
Amara Emerson6e71b342019-06-21 18:10:41 +00002234 case TargetOpcode::G_JUMP_TABLE:
2235 return selectJumpTable(I, MRI);
Amara Emerson1e8c1642018-07-31 00:09:02 +00002236 }
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +00002237
2238 return false;
2239}
Daniel Sanders8a4bae92017-03-14 21:32:08 +00002240
Amara Emerson6e71b342019-06-21 18:10:41 +00002241bool AArch64InstructionSelector::selectBrJT(MachineInstr &I,
2242 MachineRegisterInfo &MRI) const {
2243 assert(I.getOpcode() == TargetOpcode::G_BRJT && "Expected G_BRJT");
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002244 Register JTAddr = I.getOperand(0).getReg();
Amara Emerson6e71b342019-06-21 18:10:41 +00002245 unsigned JTI = I.getOperand(1).getIndex();
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002246 Register Index = I.getOperand(2).getReg();
Amara Emerson6e71b342019-06-21 18:10:41 +00002247 MachineIRBuilder MIB(I);
2248
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002249 Register TargetReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
2250 Register ScratchReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
Amara Emerson6e71b342019-06-21 18:10:41 +00002251 MIB.buildInstr(AArch64::JumpTableDest32, {TargetReg, ScratchReg},
2252 {JTAddr, Index})
2253 .addJumpTableIndex(JTI);
2254
2255 // Build the indirect branch.
2256 MIB.buildInstr(AArch64::BR, {}, {TargetReg});
2257 I.eraseFromParent();
2258 return true;
2259}
2260
2261bool AArch64InstructionSelector::selectJumpTable(
2262 MachineInstr &I, MachineRegisterInfo &MRI) const {
2263 assert(I.getOpcode() == TargetOpcode::G_JUMP_TABLE && "Expected jump table");
2264 assert(I.getOperand(1).isJTI() && "Jump table op should have a JTI!");
2265
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002266 Register DstReg = I.getOperand(0).getReg();
Amara Emerson6e71b342019-06-21 18:10:41 +00002267 unsigned JTI = I.getOperand(1).getIndex();
2268 // We generate a MOVaddrJT which will get expanded to an ADRP + ADD later.
2269 MachineIRBuilder MIB(I);
2270 auto MovMI =
2271 MIB.buildInstr(AArch64::MOVaddrJT, {DstReg}, {})
2272 .addJumpTableIndex(JTI, AArch64II::MO_PAGE)
2273 .addJumpTableIndex(JTI, AArch64II::MO_NC | AArch64II::MO_PAGEOFF);
2274 I.eraseFromParent();
2275 return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI);
2276}
2277
Jessica Paquette991cb392019-04-23 20:46:19 +00002278bool AArch64InstructionSelector::selectIntrinsicTrunc(
2279 MachineInstr &I, MachineRegisterInfo &MRI) const {
2280 const LLT SrcTy = MRI.getType(I.getOperand(0).getReg());
2281
2282 // Select the correct opcode.
2283 unsigned Opc = 0;
2284 if (!SrcTy.isVector()) {
2285 switch (SrcTy.getSizeInBits()) {
2286 default:
2287 case 16:
2288 Opc = AArch64::FRINTZHr;
2289 break;
2290 case 32:
2291 Opc = AArch64::FRINTZSr;
2292 break;
2293 case 64:
2294 Opc = AArch64::FRINTZDr;
2295 break;
2296 }
2297 } else {
2298 unsigned NumElts = SrcTy.getNumElements();
2299 switch (SrcTy.getElementType().getSizeInBits()) {
2300 default:
2301 break;
2302 case 16:
2303 if (NumElts == 4)
2304 Opc = AArch64::FRINTZv4f16;
2305 else if (NumElts == 8)
2306 Opc = AArch64::FRINTZv8f16;
2307 break;
2308 case 32:
2309 if (NumElts == 2)
2310 Opc = AArch64::FRINTZv2f32;
2311 else if (NumElts == 4)
2312 Opc = AArch64::FRINTZv4f32;
2313 break;
2314 case 64:
2315 if (NumElts == 2)
2316 Opc = AArch64::FRINTZv2f64;
2317 break;
2318 }
2319 }
2320
2321 if (!Opc) {
2322 // Didn't get an opcode above, bail.
2323 LLVM_DEBUG(dbgs() << "Unsupported type for G_INTRINSIC_TRUNC!\n");
2324 return false;
2325 }
2326
2327 // Legalization would have set us up perfectly for this; we just need to
2328 // set the opcode and move on.
2329 I.setDesc(TII.get(Opc));
2330 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2331}
2332
Jessica Paquette4fe75742019-04-23 23:03:03 +00002333bool AArch64InstructionSelector::selectIntrinsicRound(
2334 MachineInstr &I, MachineRegisterInfo &MRI) const {
2335 const LLT SrcTy = MRI.getType(I.getOperand(0).getReg());
2336
2337 // Select the correct opcode.
2338 unsigned Opc = 0;
2339 if (!SrcTy.isVector()) {
2340 switch (SrcTy.getSizeInBits()) {
2341 default:
2342 case 16:
2343 Opc = AArch64::FRINTAHr;
2344 break;
2345 case 32:
2346 Opc = AArch64::FRINTASr;
2347 break;
2348 case 64:
2349 Opc = AArch64::FRINTADr;
2350 break;
2351 }
2352 } else {
2353 unsigned NumElts = SrcTy.getNumElements();
2354 switch (SrcTy.getElementType().getSizeInBits()) {
2355 default:
2356 break;
2357 case 16:
2358 if (NumElts == 4)
2359 Opc = AArch64::FRINTAv4f16;
2360 else if (NumElts == 8)
2361 Opc = AArch64::FRINTAv8f16;
2362 break;
2363 case 32:
2364 if (NumElts == 2)
2365 Opc = AArch64::FRINTAv2f32;
2366 else if (NumElts == 4)
2367 Opc = AArch64::FRINTAv4f32;
2368 break;
2369 case 64:
2370 if (NumElts == 2)
2371 Opc = AArch64::FRINTAv2f64;
2372 break;
2373 }
2374 }
2375
2376 if (!Opc) {
2377 // Didn't get an opcode above, bail.
2378 LLVM_DEBUG(dbgs() << "Unsupported type for G_INTRINSIC_ROUND!\n");
2379 return false;
2380 }
2381
2382 // Legalization would have set us up perfectly for this; we just need to
2383 // set the opcode and move on.
2384 I.setDesc(TII.get(Opc));
2385 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2386}
2387
Amara Emerson9bf092d2019-04-09 21:22:43 +00002388bool AArch64InstructionSelector::selectVectorICmp(
2389 MachineInstr &I, MachineRegisterInfo &MRI) const {
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002390 Register DstReg = I.getOperand(0).getReg();
Amara Emerson9bf092d2019-04-09 21:22:43 +00002391 LLT DstTy = MRI.getType(DstReg);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002392 Register SrcReg = I.getOperand(2).getReg();
2393 Register Src2Reg = I.getOperand(3).getReg();
Amara Emerson9bf092d2019-04-09 21:22:43 +00002394 LLT SrcTy = MRI.getType(SrcReg);
2395
2396 unsigned SrcEltSize = SrcTy.getElementType().getSizeInBits();
2397 unsigned NumElts = DstTy.getNumElements();
2398
2399 // First index is element size, 0 == 8b, 1 == 16b, 2 == 32b, 3 == 64b
2400 // Second index is num elts, 0 == v2, 1 == v4, 2 == v8, 3 == v16
2401 // Third index is cc opcode:
2402 // 0 == eq
2403 // 1 == ugt
2404 // 2 == uge
2405 // 3 == ult
2406 // 4 == ule
2407 // 5 == sgt
2408 // 6 == sge
2409 // 7 == slt
2410 // 8 == sle
2411 // ne is done by negating 'eq' result.
2412
2413 // This table below assumes that for some comparisons the operands will be
2414 // commuted.
2415 // ult op == commute + ugt op
2416 // ule op == commute + uge op
2417 // slt op == commute + sgt op
2418 // sle op == commute + sge op
2419 unsigned PredIdx = 0;
2420 bool SwapOperands = false;
2421 CmpInst::Predicate Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
2422 switch (Pred) {
2423 case CmpInst::ICMP_NE:
2424 case CmpInst::ICMP_EQ:
2425 PredIdx = 0;
2426 break;
2427 case CmpInst::ICMP_UGT:
2428 PredIdx = 1;
2429 break;
2430 case CmpInst::ICMP_UGE:
2431 PredIdx = 2;
2432 break;
2433 case CmpInst::ICMP_ULT:
2434 PredIdx = 3;
2435 SwapOperands = true;
2436 break;
2437 case CmpInst::ICMP_ULE:
2438 PredIdx = 4;
2439 SwapOperands = true;
2440 break;
2441 case CmpInst::ICMP_SGT:
2442 PredIdx = 5;
2443 break;
2444 case CmpInst::ICMP_SGE:
2445 PredIdx = 6;
2446 break;
2447 case CmpInst::ICMP_SLT:
2448 PredIdx = 7;
2449 SwapOperands = true;
2450 break;
2451 case CmpInst::ICMP_SLE:
2452 PredIdx = 8;
2453 SwapOperands = true;
2454 break;
2455 default:
2456 llvm_unreachable("Unhandled icmp predicate");
2457 return false;
2458 }
2459
2460 // This table obviously should be tablegen'd when we have our GISel native
2461 // tablegen selector.
2462
2463 static const unsigned OpcTable[4][4][9] = {
2464 {
2465 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2466 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2467 0 /* invalid */},
2468 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2469 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2470 0 /* invalid */},
2471 {AArch64::CMEQv8i8, AArch64::CMHIv8i8, AArch64::CMHSv8i8,
2472 AArch64::CMHIv8i8, AArch64::CMHSv8i8, AArch64::CMGTv8i8,
2473 AArch64::CMGEv8i8, AArch64::CMGTv8i8, AArch64::CMGEv8i8},
2474 {AArch64::CMEQv16i8, AArch64::CMHIv16i8, AArch64::CMHSv16i8,
2475 AArch64::CMHIv16i8, AArch64::CMHSv16i8, AArch64::CMGTv16i8,
2476 AArch64::CMGEv16i8, AArch64::CMGTv16i8, AArch64::CMGEv16i8}
2477 },
2478 {
2479 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2480 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2481 0 /* invalid */},
2482 {AArch64::CMEQv4i16, AArch64::CMHIv4i16, AArch64::CMHSv4i16,
2483 AArch64::CMHIv4i16, AArch64::CMHSv4i16, AArch64::CMGTv4i16,
2484 AArch64::CMGEv4i16, AArch64::CMGTv4i16, AArch64::CMGEv4i16},
2485 {AArch64::CMEQv8i16, AArch64::CMHIv8i16, AArch64::CMHSv8i16,
2486 AArch64::CMHIv8i16, AArch64::CMHSv8i16, AArch64::CMGTv8i16,
2487 AArch64::CMGEv8i16, AArch64::CMGTv8i16, AArch64::CMGEv8i16},
2488 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2489 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2490 0 /* invalid */}
2491 },
2492 {
2493 {AArch64::CMEQv2i32, AArch64::CMHIv2i32, AArch64::CMHSv2i32,
2494 AArch64::CMHIv2i32, AArch64::CMHSv2i32, AArch64::CMGTv2i32,
2495 AArch64::CMGEv2i32, AArch64::CMGTv2i32, AArch64::CMGEv2i32},
2496 {AArch64::CMEQv4i32, AArch64::CMHIv4i32, AArch64::CMHSv4i32,
2497 AArch64::CMHIv4i32, AArch64::CMHSv4i32, AArch64::CMGTv4i32,
2498 AArch64::CMGEv4i32, AArch64::CMGTv4i32, AArch64::CMGEv4i32},
2499 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2500 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2501 0 /* invalid */},
2502 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2503 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2504 0 /* invalid */}
2505 },
2506 {
2507 {AArch64::CMEQv2i64, AArch64::CMHIv2i64, AArch64::CMHSv2i64,
2508 AArch64::CMHIv2i64, AArch64::CMHSv2i64, AArch64::CMGTv2i64,
2509 AArch64::CMGEv2i64, AArch64::CMGTv2i64, AArch64::CMGEv2i64},
2510 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2511 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2512 0 /* invalid */},
2513 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2514 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2515 0 /* invalid */},
2516 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2517 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
2518 0 /* invalid */}
2519 },
2520 };
2521 unsigned EltIdx = Log2_32(SrcEltSize / 8);
2522 unsigned NumEltsIdx = Log2_32(NumElts / 2);
2523 unsigned Opc = OpcTable[EltIdx][NumEltsIdx][PredIdx];
2524 if (!Opc) {
2525 LLVM_DEBUG(dbgs() << "Could not map G_ICMP to cmp opcode");
2526 return false;
2527 }
2528
2529 const RegisterBank &VecRB = *RBI.getRegBank(SrcReg, MRI, TRI);
2530 const TargetRegisterClass *SrcRC =
2531 getRegClassForTypeOnBank(SrcTy, VecRB, RBI, true);
2532 if (!SrcRC) {
2533 LLVM_DEBUG(dbgs() << "Could not determine source register class.\n");
2534 return false;
2535 }
2536
2537 unsigned NotOpc = Pred == ICmpInst::ICMP_NE ? AArch64::NOTv8i8 : 0;
2538 if (SrcTy.getSizeInBits() == 128)
2539 NotOpc = NotOpc ? AArch64::NOTv16i8 : 0;
2540
2541 if (SwapOperands)
2542 std::swap(SrcReg, Src2Reg);
2543
2544 MachineIRBuilder MIB(I);
2545 auto Cmp = MIB.buildInstr(Opc, {SrcRC}, {SrcReg, Src2Reg});
2546 constrainSelectedInstRegOperands(*Cmp, TII, TRI, RBI);
2547
2548 // Invert if we had a 'ne' cc.
2549 if (NotOpc) {
2550 Cmp = MIB.buildInstr(NotOpc, {DstReg}, {Cmp});
2551 constrainSelectedInstRegOperands(*Cmp, TII, TRI, RBI);
2552 } else {
2553 MIB.buildCopy(DstReg, Cmp.getReg(0));
2554 }
2555 RBI.constrainGenericRegister(DstReg, *SrcRC, MRI);
2556 I.eraseFromParent();
2557 return true;
2558}
2559
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00002560MachineInstr *AArch64InstructionSelector::emitScalarToVector(
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002561 unsigned EltSize, const TargetRegisterClass *DstRC, Register Scalar,
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00002562 MachineIRBuilder &MIRBuilder) const {
2563 auto Undef = MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstRC}, {});
Amara Emerson5ec14602018-12-10 18:44:58 +00002564
2565 auto BuildFn = [&](unsigned SubregIndex) {
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00002566 auto Ins =
2567 MIRBuilder
2568 .buildInstr(TargetOpcode::INSERT_SUBREG, {DstRC}, {Undef, Scalar})
2569 .addImm(SubregIndex);
2570 constrainSelectedInstRegOperands(*Undef, TII, TRI, RBI);
2571 constrainSelectedInstRegOperands(*Ins, TII, TRI, RBI);
2572 return &*Ins;
Amara Emerson5ec14602018-12-10 18:44:58 +00002573 };
2574
Amara Emerson8acb0d92019-03-04 19:16:00 +00002575 switch (EltSize) {
Jessica Paquette245047d2019-01-24 22:00:41 +00002576 case 16:
2577 return BuildFn(AArch64::hsub);
Amara Emerson5ec14602018-12-10 18:44:58 +00002578 case 32:
2579 return BuildFn(AArch64::ssub);
2580 case 64:
2581 return BuildFn(AArch64::dsub);
2582 default:
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00002583 return nullptr;
Amara Emerson5ec14602018-12-10 18:44:58 +00002584 }
2585}
2586
Amara Emerson8cb186c2018-12-20 01:11:04 +00002587bool AArch64InstructionSelector::selectMergeValues(
2588 MachineInstr &I, MachineRegisterInfo &MRI) const {
2589 assert(I.getOpcode() == TargetOpcode::G_MERGE_VALUES && "unexpected opcode");
2590 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2591 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
2592 assert(!DstTy.isVector() && !SrcTy.isVector() && "invalid merge operation");
2593
2594 // At the moment we only support merging two s32s into an s64.
2595 if (I.getNumOperands() != 3)
2596 return false;
2597 if (DstTy.getSizeInBits() != 64 || SrcTy.getSizeInBits() != 32)
2598 return false;
2599 const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
2600 if (RB.getID() != AArch64::GPRRegBankID)
2601 return false;
2602
2603 auto *DstRC = &AArch64::GPR64RegClass;
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002604 Register SubToRegDef = MRI.createVirtualRegister(DstRC);
Amara Emerson8cb186c2018-12-20 01:11:04 +00002605 MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
2606 TII.get(TargetOpcode::SUBREG_TO_REG))
2607 .addDef(SubToRegDef)
2608 .addImm(0)
2609 .addUse(I.getOperand(1).getReg())
2610 .addImm(AArch64::sub_32);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002611 Register SubToRegDef2 = MRI.createVirtualRegister(DstRC);
Amara Emerson8cb186c2018-12-20 01:11:04 +00002612 // Need to anyext the second scalar before we can use bfm
2613 MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
2614 TII.get(TargetOpcode::SUBREG_TO_REG))
2615 .addDef(SubToRegDef2)
2616 .addImm(0)
2617 .addUse(I.getOperand(2).getReg())
2618 .addImm(AArch64::sub_32);
Amara Emerson8cb186c2018-12-20 01:11:04 +00002619 MachineInstr &BFM =
2620 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::BFMXri))
Amara Emerson321bfb22018-12-20 03:27:42 +00002621 .addDef(I.getOperand(0).getReg())
Amara Emerson8cb186c2018-12-20 01:11:04 +00002622 .addUse(SubToRegDef)
2623 .addUse(SubToRegDef2)
2624 .addImm(32)
2625 .addImm(31);
2626 constrainSelectedInstRegOperands(SubRegMI, TII, TRI, RBI);
2627 constrainSelectedInstRegOperands(SubRegMI2, TII, TRI, RBI);
2628 constrainSelectedInstRegOperands(BFM, TII, TRI, RBI);
2629 I.eraseFromParent();
2630 return true;
2631}
2632
Jessica Paquette607774c2019-03-11 22:18:01 +00002633static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg,
2634 const unsigned EltSize) {
2635 // Choose a lane copy opcode and subregister based off of the size of the
2636 // vector's elements.
2637 switch (EltSize) {
2638 case 16:
2639 CopyOpc = AArch64::CPYi16;
2640 ExtractSubReg = AArch64::hsub;
2641 break;
2642 case 32:
2643 CopyOpc = AArch64::CPYi32;
2644 ExtractSubReg = AArch64::ssub;
2645 break;
2646 case 64:
2647 CopyOpc = AArch64::CPYi64;
2648 ExtractSubReg = AArch64::dsub;
2649 break;
2650 default:
2651 // Unknown size, bail out.
2652 LLVM_DEBUG(dbgs() << "Elt size '" << EltSize << "' unsupported.\n");
2653 return false;
2654 }
2655 return true;
2656}
2657
Amara Emersond61b89b2019-03-14 22:48:18 +00002658MachineInstr *AArch64InstructionSelector::emitExtractVectorElt(
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002659 Optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
2660 Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
Amara Emersond61b89b2019-03-14 22:48:18 +00002661 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
2662 unsigned CopyOpc = 0;
2663 unsigned ExtractSubReg = 0;
2664 if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, ScalarTy.getSizeInBits())) {
2665 LLVM_DEBUG(
2666 dbgs() << "Couldn't determine lane copy opcode for instruction.\n");
2667 return nullptr;
2668 }
2669
2670 const TargetRegisterClass *DstRC =
2671 getRegClassForTypeOnBank(ScalarTy, DstRB, RBI, true);
2672 if (!DstRC) {
2673 LLVM_DEBUG(dbgs() << "Could not determine destination register class.\n");
2674 return nullptr;
2675 }
2676
2677 const RegisterBank &VecRB = *RBI.getRegBank(VecReg, MRI, TRI);
2678 const LLT &VecTy = MRI.getType(VecReg);
2679 const TargetRegisterClass *VecRC =
2680 getRegClassForTypeOnBank(VecTy, VecRB, RBI, true);
2681 if (!VecRC) {
2682 LLVM_DEBUG(dbgs() << "Could not determine source register class.\n");
2683 return nullptr;
2684 }
2685
2686 // The register that we're going to copy into.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002687 Register InsertReg = VecReg;
Amara Emersond61b89b2019-03-14 22:48:18 +00002688 if (!DstReg)
2689 DstReg = MRI.createVirtualRegister(DstRC);
2690 // If the lane index is 0, we just use a subregister COPY.
2691 if (LaneIdx == 0) {
Amara Emerson86271782019-03-18 19:20:10 +00002692 auto Copy = MIRBuilder.buildInstr(TargetOpcode::COPY, {*DstReg}, {})
2693 .addReg(VecReg, 0, ExtractSubReg);
Amara Emersond61b89b2019-03-14 22:48:18 +00002694 RBI.constrainGenericRegister(*DstReg, *DstRC, MRI);
Amara Emerson3739a202019-03-15 21:59:50 +00002695 return &*Copy;
Amara Emersond61b89b2019-03-14 22:48:18 +00002696 }
2697
2698 // Lane copies require 128-bit wide registers. If we're dealing with an
2699 // unpacked vector, then we need to move up to that width. Insert an implicit
2700 // def and a subregister insert to get us there.
2701 if (VecTy.getSizeInBits() != 128) {
2702 MachineInstr *ScalarToVector = emitScalarToVector(
2703 VecTy.getSizeInBits(), &AArch64::FPR128RegClass, VecReg, MIRBuilder);
2704 if (!ScalarToVector)
2705 return nullptr;
2706 InsertReg = ScalarToVector->getOperand(0).getReg();
2707 }
2708
2709 MachineInstr *LaneCopyMI =
2710 MIRBuilder.buildInstr(CopyOpc, {*DstReg}, {InsertReg}).addImm(LaneIdx);
2711 constrainSelectedInstRegOperands(*LaneCopyMI, TII, TRI, RBI);
2712
2713 // Make sure that we actually constrain the initial copy.
2714 RBI.constrainGenericRegister(*DstReg, *DstRC, MRI);
2715 return LaneCopyMI;
2716}
2717
Jessica Paquette607774c2019-03-11 22:18:01 +00002718bool AArch64InstructionSelector::selectExtractElt(
2719 MachineInstr &I, MachineRegisterInfo &MRI) const {
2720 assert(I.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT &&
2721 "unexpected opcode!");
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002722 Register DstReg = I.getOperand(0).getReg();
Jessica Paquette607774c2019-03-11 22:18:01 +00002723 const LLT NarrowTy = MRI.getType(DstReg);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002724 const Register SrcReg = I.getOperand(1).getReg();
Jessica Paquette607774c2019-03-11 22:18:01 +00002725 const LLT WideTy = MRI.getType(SrcReg);
Amara Emersond61b89b2019-03-14 22:48:18 +00002726 (void)WideTy;
Jessica Paquette607774c2019-03-11 22:18:01 +00002727 assert(WideTy.getSizeInBits() >= NarrowTy.getSizeInBits() &&
2728 "source register size too small!");
2729 assert(NarrowTy.isScalar() && "cannot extract vector into vector!");
2730
2731 // Need the lane index to determine the correct copy opcode.
2732 MachineOperand &LaneIdxOp = I.getOperand(2);
2733 assert(LaneIdxOp.isReg() && "Lane index operand was not a register?");
2734
2735 if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) {
2736 LLVM_DEBUG(dbgs() << "Cannot extract into GPR.\n");
2737 return false;
2738 }
2739
Jessica Paquettebb1aced2019-03-13 21:19:29 +00002740 // Find the index to extract from.
Jessica Paquette76f64b62019-04-26 21:53:13 +00002741 auto VRegAndVal = getConstantVRegValWithLookThrough(LaneIdxOp.getReg(), MRI);
2742 if (!VRegAndVal)
Jessica Paquette607774c2019-03-11 22:18:01 +00002743 return false;
Jessica Paquette76f64b62019-04-26 21:53:13 +00002744 unsigned LaneIdx = VRegAndVal->Value;
Jessica Paquette607774c2019-03-11 22:18:01 +00002745
Jessica Paquette607774c2019-03-11 22:18:01 +00002746 MachineIRBuilder MIRBuilder(I);
2747
Amara Emersond61b89b2019-03-14 22:48:18 +00002748 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
2749 MachineInstr *Extract = emitExtractVectorElt(DstReg, DstRB, NarrowTy, SrcReg,
2750 LaneIdx, MIRBuilder);
2751 if (!Extract)
2752 return false;
2753
2754 I.eraseFromParent();
2755 return true;
2756}
2757
2758bool AArch64InstructionSelector::selectSplitVectorUnmerge(
2759 MachineInstr &I, MachineRegisterInfo &MRI) const {
2760 unsigned NumElts = I.getNumOperands() - 1;
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002761 Register SrcReg = I.getOperand(NumElts).getReg();
Amara Emersond61b89b2019-03-14 22:48:18 +00002762 const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
2763 const LLT SrcTy = MRI.getType(SrcReg);
2764
2765 assert(NarrowTy.isVector() && "Expected an unmerge into vectors");
2766 if (SrcTy.getSizeInBits() > 128) {
2767 LLVM_DEBUG(dbgs() << "Unexpected vector type for vec split unmerge");
2768 return false;
Jessica Paquette607774c2019-03-11 22:18:01 +00002769 }
2770
Amara Emersond61b89b2019-03-14 22:48:18 +00002771 MachineIRBuilder MIB(I);
2772
2773 // We implement a split vector operation by treating the sub-vectors as
2774 // scalars and extracting them.
2775 const RegisterBank &DstRB =
2776 *RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI);
2777 for (unsigned OpIdx = 0; OpIdx < NumElts; ++OpIdx) {
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002778 Register Dst = I.getOperand(OpIdx).getReg();
Amara Emersond61b89b2019-03-14 22:48:18 +00002779 MachineInstr *Extract =
2780 emitExtractVectorElt(Dst, DstRB, NarrowTy, SrcReg, OpIdx, MIB);
2781 if (!Extract)
Jessica Paquette607774c2019-03-11 22:18:01 +00002782 return false;
Jessica Paquette607774c2019-03-11 22:18:01 +00002783 }
Jessica Paquette607774c2019-03-11 22:18:01 +00002784 I.eraseFromParent();
2785 return true;
2786}
2787
Jessica Paquette245047d2019-01-24 22:00:41 +00002788bool AArch64InstructionSelector::selectUnmergeValues(
2789 MachineInstr &I, MachineRegisterInfo &MRI) const {
2790 assert(I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2791 "unexpected opcode");
2792
2793 // TODO: Handle unmerging into GPRs and from scalars to scalars.
2794 if (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() !=
2795 AArch64::FPRRegBankID ||
2796 RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI)->getID() !=
2797 AArch64::FPRRegBankID) {
2798 LLVM_DEBUG(dbgs() << "Unmerging vector-to-gpr and scalar-to-scalar "
2799 "currently unsupported.\n");
2800 return false;
2801 }
2802
2803 // The last operand is the vector source register, and every other operand is
2804 // a register to unpack into.
2805 unsigned NumElts = I.getNumOperands() - 1;
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002806 Register SrcReg = I.getOperand(NumElts).getReg();
Jessica Paquette245047d2019-01-24 22:00:41 +00002807 const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
2808 const LLT WideTy = MRI.getType(SrcReg);
Benjamin Kramer653020d2019-01-24 23:45:07 +00002809 (void)WideTy;
Jessica Paquette245047d2019-01-24 22:00:41 +00002810 assert(WideTy.isVector() && "can only unmerge from vector types!");
2811 assert(WideTy.getSizeInBits() > NarrowTy.getSizeInBits() &&
2812 "source register size too small!");
2813
Amara Emersond61b89b2019-03-14 22:48:18 +00002814 if (!NarrowTy.isScalar())
2815 return selectSplitVectorUnmerge(I, MRI);
Jessica Paquette245047d2019-01-24 22:00:41 +00002816
Amara Emerson3739a202019-03-15 21:59:50 +00002817 MachineIRBuilder MIB(I);
2818
Jessica Paquette245047d2019-01-24 22:00:41 +00002819 // Choose a lane copy opcode and subregister based off of the size of the
2820 // vector's elements.
2821 unsigned CopyOpc = 0;
2822 unsigned ExtractSubReg = 0;
Jessica Paquette607774c2019-03-11 22:18:01 +00002823 if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, NarrowTy.getSizeInBits()))
Jessica Paquette245047d2019-01-24 22:00:41 +00002824 return false;
Jessica Paquette245047d2019-01-24 22:00:41 +00002825
2826 // Set up for the lane copies.
2827 MachineBasicBlock &MBB = *I.getParent();
2828
2829 // Stores the registers we'll be copying from.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002830 SmallVector<Register, 4> InsertRegs;
Jessica Paquette245047d2019-01-24 22:00:41 +00002831
2832 // We'll use the first register twice, so we only need NumElts-1 registers.
2833 unsigned NumInsertRegs = NumElts - 1;
2834
2835 // If our elements fit into exactly 128 bits, then we can copy from the source
2836 // directly. Otherwise, we need to do a bit of setup with some subregister
2837 // inserts.
2838 if (NarrowTy.getSizeInBits() * NumElts == 128) {
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002839 InsertRegs = SmallVector<Register, 4>(NumInsertRegs, SrcReg);
Jessica Paquette245047d2019-01-24 22:00:41 +00002840 } else {
2841 // No. We have to perform subregister inserts. For each insert, create an
2842 // implicit def and a subregister insert, and save the register we create.
2843 for (unsigned Idx = 0; Idx < NumInsertRegs; ++Idx) {
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002844 Register ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
Jessica Paquette245047d2019-01-24 22:00:41 +00002845 MachineInstr &ImpDefMI =
2846 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::IMPLICIT_DEF),
2847 ImpDefReg);
2848
2849 // Now, create the subregister insert from SrcReg.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002850 Register InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
Jessica Paquette245047d2019-01-24 22:00:41 +00002851 MachineInstr &InsMI =
2852 *BuildMI(MBB, I, I.getDebugLoc(),
2853 TII.get(TargetOpcode::INSERT_SUBREG), InsertReg)
2854 .addUse(ImpDefReg)
2855 .addUse(SrcReg)
2856 .addImm(AArch64::dsub);
2857
2858 constrainSelectedInstRegOperands(ImpDefMI, TII, TRI, RBI);
2859 constrainSelectedInstRegOperands(InsMI, TII, TRI, RBI);
2860
2861 // Save the register so that we can copy from it after.
2862 InsertRegs.push_back(InsertReg);
2863 }
2864 }
2865
2866 // Now that we've created any necessary subregister inserts, we can
2867 // create the copies.
2868 //
2869 // Perform the first copy separately as a subregister copy.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002870 Register CopyTo = I.getOperand(0).getReg();
Amara Emerson86271782019-03-18 19:20:10 +00002871 auto FirstCopy = MIB.buildInstr(TargetOpcode::COPY, {CopyTo}, {})
2872 .addReg(InsertRegs[0], 0, ExtractSubReg);
Amara Emerson3739a202019-03-15 21:59:50 +00002873 constrainSelectedInstRegOperands(*FirstCopy, TII, TRI, RBI);
Jessica Paquette245047d2019-01-24 22:00:41 +00002874
2875 // Now, perform the remaining copies as vector lane copies.
2876 unsigned LaneIdx = 1;
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002877 for (Register InsReg : InsertRegs) {
2878 Register CopyTo = I.getOperand(LaneIdx).getReg();
Jessica Paquette245047d2019-01-24 22:00:41 +00002879 MachineInstr &CopyInst =
2880 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CopyOpc), CopyTo)
2881 .addUse(InsReg)
2882 .addImm(LaneIdx);
2883 constrainSelectedInstRegOperands(CopyInst, TII, TRI, RBI);
2884 ++LaneIdx;
2885 }
2886
2887 // Separately constrain the first copy's destination. Because of the
2888 // limitation in constrainOperandRegClass, we can't guarantee that this will
2889 // actually be constrained. So, do it ourselves using the second operand.
2890 const TargetRegisterClass *RC =
2891 MRI.getRegClassOrNull(I.getOperand(1).getReg());
2892 if (!RC) {
2893 LLVM_DEBUG(dbgs() << "Couldn't constrain copy destination.\n");
2894 return false;
2895 }
2896
2897 RBI.constrainGenericRegister(CopyTo, *RC, MRI);
2898 I.eraseFromParent();
2899 return true;
2900}
2901
Amara Emerson2ff22982019-03-14 22:48:15 +00002902bool AArch64InstructionSelector::selectConcatVectors(
2903 MachineInstr &I, MachineRegisterInfo &MRI) const {
2904 assert(I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
2905 "Unexpected opcode");
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00002906 Register Dst = I.getOperand(0).getReg();
2907 Register Op1 = I.getOperand(1).getReg();
2908 Register Op2 = I.getOperand(2).getReg();
Amara Emerson2ff22982019-03-14 22:48:15 +00002909 MachineIRBuilder MIRBuilder(I);
2910 MachineInstr *ConcatMI = emitVectorConcat(Dst, Op1, Op2, MIRBuilder);
2911 if (!ConcatMI)
2912 return false;
2913 I.eraseFromParent();
2914 return true;
2915}
2916
Amara Emerson1abe05c2019-02-21 20:20:16 +00002917void AArch64InstructionSelector::collectShuffleMaskIndices(
2918 MachineInstr &I, MachineRegisterInfo &MRI,
Amara Emerson2806fd02019-04-12 21:31:21 +00002919 SmallVectorImpl<Optional<int>> &Idxs) const {
Amara Emerson1abe05c2019-02-21 20:20:16 +00002920 MachineInstr *MaskDef = MRI.getVRegDef(I.getOperand(3).getReg());
2921 assert(
2922 MaskDef->getOpcode() == TargetOpcode::G_BUILD_VECTOR &&
2923 "G_SHUFFLE_VECTOR should have a constant mask operand as G_BUILD_VECTOR");
2924 // Find the constant indices.
2925 for (unsigned i = 1, e = MaskDef->getNumOperands(); i < e; ++i) {
Amara Emerson1abe05c2019-02-21 20:20:16 +00002926 // Look through copies.
Jessica Paquette31329682019-07-10 18:44:57 +00002927 MachineInstr *ScalarDef =
2928 getDefIgnoringCopies(MaskDef->getOperand(i).getReg(), MRI);
2929 assert(ScalarDef && "Could not find vreg def of shufflevec index op");
Amara Emerson2806fd02019-04-12 21:31:21 +00002930 if (ScalarDef->getOpcode() != TargetOpcode::G_CONSTANT) {
2931 // This be an undef if not a constant.
2932 assert(ScalarDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
2933 Idxs.push_back(None);
2934 } else {
2935 Idxs.push_back(ScalarDef->getOperand(1).getCImm()->getSExtValue());
2936 }
Amara Emerson1abe05c2019-02-21 20:20:16 +00002937 }
2938}
2939
2940unsigned
2941AArch64InstructionSelector::emitConstantPoolEntry(Constant *CPVal,
2942 MachineFunction &MF) const {
Hans Wennborg5d5ee4a2019-04-26 08:31:00 +00002943 Type *CPTy = CPVal->getType();
Amara Emerson1abe05c2019-02-21 20:20:16 +00002944 unsigned Align = MF.getDataLayout().getPrefTypeAlignment(CPTy);
2945 if (Align == 0)
2946 Align = MF.getDataLayout().getTypeAllocSize(CPTy);
2947
2948 MachineConstantPool *MCP = MF.getConstantPool();
2949 return MCP->getConstantPoolIndex(CPVal, Align);
2950}
2951
2952MachineInstr *AArch64InstructionSelector::emitLoadFromConstantPool(
2953 Constant *CPVal, MachineIRBuilder &MIRBuilder) const {
2954 unsigned CPIdx = emitConstantPoolEntry(CPVal, MIRBuilder.getMF());
2955
2956 auto Adrp =
2957 MIRBuilder.buildInstr(AArch64::ADRP, {&AArch64::GPR64RegClass}, {})
2958 .addConstantPoolIndex(CPIdx, 0, AArch64II::MO_PAGE);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002959
2960 MachineInstr *LoadMI = nullptr;
2961 switch (MIRBuilder.getDataLayout().getTypeStoreSize(CPVal->getType())) {
2962 case 16:
2963 LoadMI =
2964 &*MIRBuilder
2965 .buildInstr(AArch64::LDRQui, {&AArch64::FPR128RegClass}, {Adrp})
2966 .addConstantPoolIndex(CPIdx, 0,
2967 AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2968 break;
2969 case 8:
2970 LoadMI = &*MIRBuilder
2971 .buildInstr(AArch64::LDRDui, {&AArch64::FPR64RegClass}, {Adrp})
2972 .addConstantPoolIndex(
2973 CPIdx, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2974 break;
2975 default:
2976 LLVM_DEBUG(dbgs() << "Could not load from constant pool of type "
2977 << *CPVal->getType());
2978 return nullptr;
2979 }
Amara Emerson1abe05c2019-02-21 20:20:16 +00002980 constrainSelectedInstRegOperands(*Adrp, TII, TRI, RBI);
Amara Emerson8acb0d92019-03-04 19:16:00 +00002981 constrainSelectedInstRegOperands(*LoadMI, TII, TRI, RBI);
2982 return LoadMI;
2983}
2984
2985/// Return an <Opcode, SubregIndex> pair to do an vector elt insert of a given
2986/// size and RB.
2987static std::pair<unsigned, unsigned>
2988getInsertVecEltOpInfo(const RegisterBank &RB, unsigned EltSize) {
2989 unsigned Opc, SubregIdx;
2990 if (RB.getID() == AArch64::GPRRegBankID) {
2991 if (EltSize == 32) {
2992 Opc = AArch64::INSvi32gpr;
2993 SubregIdx = AArch64::ssub;
2994 } else if (EltSize == 64) {
2995 Opc = AArch64::INSvi64gpr;
2996 SubregIdx = AArch64::dsub;
2997 } else {
2998 llvm_unreachable("invalid elt size!");
2999 }
3000 } else {
3001 if (EltSize == 8) {
3002 Opc = AArch64::INSvi8lane;
3003 SubregIdx = AArch64::bsub;
3004 } else if (EltSize == 16) {
3005 Opc = AArch64::INSvi16lane;
3006 SubregIdx = AArch64::hsub;
3007 } else if (EltSize == 32) {
3008 Opc = AArch64::INSvi32lane;
3009 SubregIdx = AArch64::ssub;
3010 } else if (EltSize == 64) {
3011 Opc = AArch64::INSvi64lane;
3012 SubregIdx = AArch64::dsub;
3013 } else {
3014 llvm_unreachable("invalid elt size!");
3015 }
3016 }
3017 return std::make_pair(Opc, SubregIdx);
3018}
3019
Jessica Paquette99316042019-07-02 19:44:16 +00003020MachineInstr *
3021AArch64InstructionSelector::emitCMN(MachineOperand &LHS, MachineOperand &RHS,
3022 MachineIRBuilder &MIRBuilder) const {
3023 assert(LHS.isReg() && RHS.isReg() && "Expected LHS and RHS to be registers!");
3024 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
3025 static const unsigned OpcTable[2][2]{{AArch64::ADDSXrr, AArch64::ADDSXri},
3026 {AArch64::ADDSWrr, AArch64::ADDSWri}};
3027 bool Is32Bit = (MRI.getType(LHS.getReg()).getSizeInBits() == 32);
3028 auto ImmFns = selectArithImmed(RHS);
3029 unsigned Opc = OpcTable[Is32Bit][ImmFns.hasValue()];
3030 Register ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR;
3031
3032 auto CmpMI = MIRBuilder.buildInstr(Opc, {ZReg}, {LHS.getReg()});
3033
3034 // If we matched a valid constant immediate, add those operands.
3035 if (ImmFns) {
3036 for (auto &RenderFn : *ImmFns)
3037 RenderFn(CmpMI);
3038 } else {
3039 CmpMI.addUse(RHS.getReg());
3040 }
3041
3042 constrainSelectedInstRegOperands(*CmpMI, TII, TRI, RBI);
3043 return &*CmpMI;
3044}
3045
Jessica Paquette55d19242019-07-08 22:58:36 +00003046MachineInstr *
3047AArch64InstructionSelector::emitTST(const Register &LHS, const Register &RHS,
3048 MachineIRBuilder &MIRBuilder) const {
3049 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
3050 unsigned RegSize = MRI.getType(LHS).getSizeInBits();
3051 bool Is32Bit = (RegSize == 32);
3052 static const unsigned OpcTable[2][2]{{AArch64::ANDSXrr, AArch64::ANDSXri},
3053 {AArch64::ANDSWrr, AArch64::ANDSWri}};
3054 Register ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR;
3055
3056 // We might be able to fold in an immediate into the TST. We need to make sure
3057 // it's a logical immediate though, since ANDS requires that.
3058 auto ValAndVReg = getConstantVRegValWithLookThrough(RHS, MRI);
3059 bool IsImmForm = ValAndVReg.hasValue() &&
3060 AArch64_AM::isLogicalImmediate(ValAndVReg->Value, RegSize);
3061 unsigned Opc = OpcTable[Is32Bit][IsImmForm];
3062 auto TstMI = MIRBuilder.buildInstr(Opc, {ZReg}, {LHS});
3063
3064 if (IsImmForm)
3065 TstMI.addImm(
3066 AArch64_AM::encodeLogicalImmediate(ValAndVReg->Value, RegSize));
3067 else
3068 TstMI.addUse(RHS);
3069
3070 constrainSelectedInstRegOperands(*TstMI, TII, TRI, RBI);
3071 return &*TstMI;
3072}
3073
Jessica Paquette99316042019-07-02 19:44:16 +00003074MachineInstr *AArch64InstructionSelector::emitIntegerCompare(
3075 MachineOperand &LHS, MachineOperand &RHS, MachineOperand &Predicate,
3076 MachineIRBuilder &MIRBuilder) const {
3077 assert(LHS.isReg() && RHS.isReg() && "Expected LHS and RHS to be registers!");
3078 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
3079
Jessica Paquette55d19242019-07-08 22:58:36 +00003080 // Fold the compare if possible.
3081 MachineInstr *FoldCmp =
3082 tryFoldIntegerCompare(LHS, RHS, Predicate, MIRBuilder);
3083 if (FoldCmp)
3084 return FoldCmp;
Jessica Paquette99316042019-07-02 19:44:16 +00003085
3086 // Can't fold into a CMN. Just emit a normal compare.
3087 unsigned CmpOpc = 0;
3088 Register ZReg;
3089
3090 LLT CmpTy = MRI.getType(LHS.getReg());
Jessica Paquette65841092019-07-03 18:30:01 +00003091 assert((CmpTy.isScalar() || CmpTy.isPointer()) &&
3092 "Expected scalar or pointer");
Jessica Paquette99316042019-07-02 19:44:16 +00003093 if (CmpTy == LLT::scalar(32)) {
3094 CmpOpc = AArch64::SUBSWrr;
3095 ZReg = AArch64::WZR;
3096 } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
3097 CmpOpc = AArch64::SUBSXrr;
3098 ZReg = AArch64::XZR;
3099 } else {
3100 return nullptr;
3101 }
3102
3103 // Try to match immediate forms.
3104 auto ImmFns = selectArithImmed(RHS);
3105 if (ImmFns)
3106 CmpOpc = CmpOpc == AArch64::SUBSWrr ? AArch64::SUBSWri : AArch64::SUBSXri;
3107
3108 auto CmpMI = MIRBuilder.buildInstr(CmpOpc).addDef(ZReg).addUse(LHS.getReg());
3109 // If we matched a valid constant immediate, add those operands.
3110 if (ImmFns) {
3111 for (auto &RenderFn : *ImmFns)
3112 RenderFn(CmpMI);
3113 } else {
3114 CmpMI.addUse(RHS.getReg());
3115 }
3116
3117 // Make sure that we can constrain the compare that we emitted.
3118 constrainSelectedInstRegOperands(*CmpMI, TII, TRI, RBI);
3119 return &*CmpMI;
3120}
3121
Amara Emerson8acb0d92019-03-04 19:16:00 +00003122MachineInstr *AArch64InstructionSelector::emitVectorConcat(
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003123 Optional<Register> Dst, Register Op1, Register Op2,
Amara Emerson2ff22982019-03-14 22:48:15 +00003124 MachineIRBuilder &MIRBuilder) const {
Amara Emerson8acb0d92019-03-04 19:16:00 +00003125 // We implement a vector concat by:
3126 // 1. Use scalar_to_vector to insert the lower vector into the larger dest
3127 // 2. Insert the upper vector into the destination's upper element
3128 // TODO: some of this code is common with G_BUILD_VECTOR handling.
3129 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
3130
3131 const LLT Op1Ty = MRI.getType(Op1);
3132 const LLT Op2Ty = MRI.getType(Op2);
3133
3134 if (Op1Ty != Op2Ty) {
3135 LLVM_DEBUG(dbgs() << "Could not do vector concat of differing vector tys");
3136 return nullptr;
3137 }
3138 assert(Op1Ty.isVector() && "Expected a vector for vector concat");
3139
3140 if (Op1Ty.getSizeInBits() >= 128) {
3141 LLVM_DEBUG(dbgs() << "Vector concat not supported for full size vectors");
3142 return nullptr;
3143 }
3144
3145 // At the moment we just support 64 bit vector concats.
3146 if (Op1Ty.getSizeInBits() != 64) {
3147 LLVM_DEBUG(dbgs() << "Vector concat supported for 64b vectors");
3148 return nullptr;
3149 }
3150
3151 const LLT ScalarTy = LLT::scalar(Op1Ty.getSizeInBits());
3152 const RegisterBank &FPRBank = *RBI.getRegBank(Op1, MRI, TRI);
3153 const TargetRegisterClass *DstRC =
3154 getMinClassForRegBank(FPRBank, Op1Ty.getSizeInBits() * 2);
3155
3156 MachineInstr *WidenedOp1 =
3157 emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op1, MIRBuilder);
3158 MachineInstr *WidenedOp2 =
3159 emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op2, MIRBuilder);
3160 if (!WidenedOp1 || !WidenedOp2) {
3161 LLVM_DEBUG(dbgs() << "Could not emit a vector from scalar value");
3162 return nullptr;
3163 }
3164
3165 // Now do the insert of the upper element.
3166 unsigned InsertOpc, InsSubRegIdx;
3167 std::tie(InsertOpc, InsSubRegIdx) =
3168 getInsertVecEltOpInfo(FPRBank, ScalarTy.getSizeInBits());
3169
Amara Emerson2ff22982019-03-14 22:48:15 +00003170 if (!Dst)
3171 Dst = MRI.createVirtualRegister(DstRC);
Amara Emerson8acb0d92019-03-04 19:16:00 +00003172 auto InsElt =
3173 MIRBuilder
Amara Emerson2ff22982019-03-14 22:48:15 +00003174 .buildInstr(InsertOpc, {*Dst}, {WidenedOp1->getOperand(0).getReg()})
Amara Emerson8acb0d92019-03-04 19:16:00 +00003175 .addImm(1) /* Lane index */
3176 .addUse(WidenedOp2->getOperand(0).getReg())
3177 .addImm(0);
Amara Emerson8acb0d92019-03-04 19:16:00 +00003178 constrainSelectedInstRegOperands(*InsElt, TII, TRI, RBI);
3179 return &*InsElt;
Amara Emerson1abe05c2019-02-21 20:20:16 +00003180}
3181
Jessica Paquettea3843fe2019-05-01 22:39:43 +00003182MachineInstr *AArch64InstructionSelector::emitFMovForFConstant(
3183 MachineInstr &I, MachineRegisterInfo &MRI) const {
3184 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT &&
3185 "Expected a G_FCONSTANT!");
3186 MachineOperand &ImmOp = I.getOperand(1);
3187 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
3188
3189 // Only handle 32 and 64 bit defs for now.
3190 if (DefSize != 32 && DefSize != 64)
3191 return nullptr;
3192
3193 // Don't handle null values using FMOV.
3194 if (ImmOp.getFPImm()->isNullValue())
3195 return nullptr;
3196
3197 // Get the immediate representation for the FMOV.
3198 const APFloat &ImmValAPF = ImmOp.getFPImm()->getValueAPF();
3199 int Imm = DefSize == 32 ? AArch64_AM::getFP32Imm(ImmValAPF)
3200 : AArch64_AM::getFP64Imm(ImmValAPF);
3201
3202 // If this is -1, it means the immediate can't be represented as the requested
3203 // floating point value. Bail.
3204 if (Imm == -1)
3205 return nullptr;
3206
3207 // Update MI to represent the new FMOV instruction, constrain it, and return.
3208 ImmOp.ChangeToImmediate(Imm);
3209 unsigned MovOpc = DefSize == 32 ? AArch64::FMOVSi : AArch64::FMOVDi;
3210 I.setDesc(TII.get(MovOpc));
3211 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
3212 return &I;
3213}
3214
Jessica Paquette49537bb2019-06-17 18:40:06 +00003215MachineInstr *
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003216AArch64InstructionSelector::emitCSetForICMP(Register DefReg, unsigned Pred,
Jessica Paquette49537bb2019-06-17 18:40:06 +00003217 MachineIRBuilder &MIRBuilder) const {
3218 // CSINC increments the result when the predicate is false. Invert it.
3219 const AArch64CC::CondCode InvCC = changeICMPPredToAArch64CC(
3220 CmpInst::getInversePredicate((CmpInst::Predicate)Pred));
3221 auto I =
3222 MIRBuilder
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003223 .buildInstr(AArch64::CSINCWr, {DefReg}, {Register(AArch64::WZR), Register(AArch64::WZR)})
Jessica Paquette49537bb2019-06-17 18:40:06 +00003224 .addImm(InvCC);
3225 constrainSelectedInstRegOperands(*I, TII, TRI, RBI);
3226 return &*I;
3227}
3228
Amara Emersonc37ff0d2019-06-05 23:46:16 +00003229bool AArch64InstructionSelector::tryOptSelect(MachineInstr &I) const {
3230 MachineIRBuilder MIB(I);
3231 MachineRegisterInfo &MRI = *MIB.getMRI();
3232 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
3233
3234 // We want to recognize this pattern:
3235 //
3236 // $z = G_FCMP pred, $x, $y
3237 // ...
3238 // $w = G_SELECT $z, $a, $b
3239 //
3240 // Where the value of $z is *only* ever used by the G_SELECT (possibly with
3241 // some copies/truncs in between.)
3242 //
3243 // If we see this, then we can emit something like this:
3244 //
3245 // fcmp $x, $y
3246 // fcsel $w, $a, $b, pred
3247 //
3248 // Rather than emitting both of the rather long sequences in the standard
3249 // G_FCMP/G_SELECT select methods.
3250
3251 // First, check if the condition is defined by a compare.
3252 MachineInstr *CondDef = MRI.getVRegDef(I.getOperand(1).getReg());
3253 while (CondDef) {
3254 // We can only fold if all of the defs have one use.
3255 if (!MRI.hasOneUse(CondDef->getOperand(0).getReg()))
3256 return false;
3257
3258 // We can skip over G_TRUNC since the condition is 1-bit.
3259 // Truncating/extending can have no impact on the value.
3260 unsigned Opc = CondDef->getOpcode();
3261 if (Opc != TargetOpcode::COPY && Opc != TargetOpcode::G_TRUNC)
3262 break;
3263
Amara Emersond940e202019-06-06 07:33:47 +00003264 // Can't see past copies from physregs.
3265 if (Opc == TargetOpcode::COPY &&
3266 TargetRegisterInfo::isPhysicalRegister(CondDef->getOperand(1).getReg()))
3267 return false;
3268
Amara Emersonc37ff0d2019-06-05 23:46:16 +00003269 CondDef = MRI.getVRegDef(CondDef->getOperand(1).getReg());
3270 }
3271
3272 // Is the condition defined by a compare?
Jessica Paquette99316042019-07-02 19:44:16 +00003273 if (!CondDef)
Amara Emersonc37ff0d2019-06-05 23:46:16 +00003274 return false;
3275
Jessica Paquette99316042019-07-02 19:44:16 +00003276 unsigned CondOpc = CondDef->getOpcode();
3277 if (CondOpc != TargetOpcode::G_ICMP && CondOpc != TargetOpcode::G_FCMP)
3278 return false;
3279
Amara Emersonc37ff0d2019-06-05 23:46:16 +00003280 AArch64CC::CondCode CondCode;
Jessica Paquette99316042019-07-02 19:44:16 +00003281 if (CondOpc == TargetOpcode::G_ICMP) {
3282 CondCode = changeICMPPredToAArch64CC(
3283 (CmpInst::Predicate)CondDef->getOperand(1).getPredicate());
3284 if (!emitIntegerCompare(CondDef->getOperand(2), CondDef->getOperand(3),
3285 CondDef->getOperand(1), MIB)) {
3286 LLVM_DEBUG(dbgs() << "Couldn't emit compare for select!\n");
3287 return false;
3288 }
3289 } else {
3290 // Get the condition code for the select.
3291 AArch64CC::CondCode CondCode2;
3292 changeFCMPPredToAArch64CC(
3293 (CmpInst::Predicate)CondDef->getOperand(1).getPredicate(), CondCode,
3294 CondCode2);
Amara Emersonc37ff0d2019-06-05 23:46:16 +00003295
Jessica Paquette99316042019-07-02 19:44:16 +00003296 // changeFCMPPredToAArch64CC sets CondCode2 to AL when we require two
3297 // instructions to emit the comparison.
3298 // TODO: Handle FCMP_UEQ and FCMP_ONE. After that, this check will be
3299 // unnecessary.
3300 if (CondCode2 != AArch64CC::AL)
3301 return false;
Amara Emersonc37ff0d2019-06-05 23:46:16 +00003302
Jessica Paquette99316042019-07-02 19:44:16 +00003303 // Make sure we'll be able to select the compare.
3304 unsigned CmpOpc = selectFCMPOpc(*CondDef, MRI);
3305 if (!CmpOpc)
3306 return false;
Amara Emersonc37ff0d2019-06-05 23:46:16 +00003307
Jessica Paquette99316042019-07-02 19:44:16 +00003308 // Emit a new compare.
3309 auto Cmp = MIB.buildInstr(CmpOpc, {}, {CondDef->getOperand(2).getReg()});
3310 if (CmpOpc != AArch64::FCMPSri && CmpOpc != AArch64::FCMPDri)
3311 Cmp.addUse(CondDef->getOperand(3).getReg());
3312 constrainSelectedInstRegOperands(*Cmp, TII, TRI, RBI);
3313 }
Amara Emersonc37ff0d2019-06-05 23:46:16 +00003314
3315 // Emit the select.
3316 unsigned CSelOpc = selectSelectOpc(I, MRI, RBI);
3317 auto CSel =
3318 MIB.buildInstr(CSelOpc, {I.getOperand(0).getReg()},
3319 {I.getOperand(2).getReg(), I.getOperand(3).getReg()})
3320 .addImm(CondCode);
Amara Emersonc37ff0d2019-06-05 23:46:16 +00003321 constrainSelectedInstRegOperands(*CSel, TII, TRI, RBI);
3322 I.eraseFromParent();
3323 return true;
3324}
3325
Jessica Paquette55d19242019-07-08 22:58:36 +00003326MachineInstr *AArch64InstructionSelector::tryFoldIntegerCompare(
3327 MachineOperand &LHS, MachineOperand &RHS, MachineOperand &Predicate,
3328 MachineIRBuilder &MIRBuilder) const {
Jessica Paquette99316042019-07-02 19:44:16 +00003329 assert(LHS.isReg() && RHS.isReg() && Predicate.isPredicate() &&
3330 "Unexpected MachineOperand");
Jessica Paquette49537bb2019-06-17 18:40:06 +00003331 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
3332 // We want to find this sort of thing:
3333 // x = G_SUB 0, y
3334 // G_ICMP z, x
3335 //
3336 // In this case, we can fold the G_SUB into the G_ICMP using a CMN instead.
3337 // e.g:
3338 //
3339 // cmn z, y
3340
Jessica Paquette49537bb2019-06-17 18:40:06 +00003341 // Helper lambda to detect the subtract followed by the compare.
3342 // Takes in the def of the LHS or RHS, and checks if it's a subtract from 0.
3343 auto IsCMN = [&](MachineInstr *DefMI, const AArch64CC::CondCode &CC) {
3344 if (!DefMI || DefMI->getOpcode() != TargetOpcode::G_SUB)
3345 return false;
3346
3347 // Need to make sure NZCV is the same at the end of the transformation.
3348 if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
3349 return false;
3350
3351 // We want to match against SUBs.
3352 if (DefMI->getOpcode() != TargetOpcode::G_SUB)
3353 return false;
3354
3355 // Make sure that we're getting
3356 // x = G_SUB 0, y
3357 auto ValAndVReg =
3358 getConstantVRegValWithLookThrough(DefMI->getOperand(1).getReg(), MRI);
3359 if (!ValAndVReg || ValAndVReg->Value != 0)
3360 return false;
3361
3362 // This can safely be represented as a CMN.
3363 return true;
3364 };
3365
3366 // Check if the RHS or LHS of the G_ICMP is defined by a SUB
Jessica Paquette31329682019-07-10 18:44:57 +00003367 MachineInstr *LHSDef = getDefIgnoringCopies(LHS.getReg(), MRI);
3368 MachineInstr *RHSDef = getDefIgnoringCopies(RHS.getReg(), MRI);
Jessica Paquette55d19242019-07-08 22:58:36 +00003369 CmpInst::Predicate P = (CmpInst::Predicate)Predicate.getPredicate();
3370 const AArch64CC::CondCode CC = changeICMPPredToAArch64CC(P);
Jessica Paquette99316042019-07-02 19:44:16 +00003371
Jessica Paquette55d19242019-07-08 22:58:36 +00003372 // Given this:
3373 //
3374 // x = G_SUB 0, y
3375 // G_ICMP x, z
3376 //
3377 // Produce this:
3378 //
3379 // cmn y, z
3380 if (IsCMN(LHSDef, CC))
3381 return emitCMN(LHSDef->getOperand(2), RHS, MIRBuilder);
3382
3383 // Same idea here, but with the RHS of the compare instead:
3384 //
3385 // Given this:
3386 //
3387 // x = G_SUB 0, y
3388 // G_ICMP z, x
3389 //
3390 // Produce this:
3391 //
3392 // cmn z, y
3393 if (IsCMN(RHSDef, CC))
3394 return emitCMN(LHS, RHSDef->getOperand(2), MIRBuilder);
3395
3396 // Given this:
3397 //
3398 // z = G_AND x, y
3399 // G_ICMP z, 0
3400 //
3401 // Produce this if the compare is signed:
3402 //
3403 // tst x, y
3404 if (!isUnsignedICMPPred(P) && LHSDef &&
3405 LHSDef->getOpcode() == TargetOpcode::G_AND) {
3406 // Make sure that the RHS is 0.
3407 auto ValAndVReg = getConstantVRegValWithLookThrough(RHS.getReg(), MRI);
3408 if (!ValAndVReg || ValAndVReg->Value != 0)
3409 return nullptr;
3410
3411 return emitTST(LHSDef->getOperand(1).getReg(),
3412 LHSDef->getOperand(2).getReg(), MIRBuilder);
Jessica Paquette49537bb2019-06-17 18:40:06 +00003413 }
3414
Jessica Paquette99316042019-07-02 19:44:16 +00003415 return nullptr;
Jessica Paquette49537bb2019-06-17 18:40:06 +00003416}
3417
Amara Emerson761ca2e2019-03-19 21:43:05 +00003418bool AArch64InstructionSelector::tryOptVectorDup(MachineInstr &I) const {
3419 // Try to match a vector splat operation into a dup instruction.
3420 // We're looking for this pattern:
3421 // %scalar:gpr(s64) = COPY $x0
3422 // %undef:fpr(<2 x s64>) = G_IMPLICIT_DEF
3423 // %cst0:gpr(s32) = G_CONSTANT i32 0
3424 // %zerovec:fpr(<2 x s32>) = G_BUILD_VECTOR %cst0(s32), %cst0(s32)
3425 // %ins:fpr(<2 x s64>) = G_INSERT_VECTOR_ELT %undef, %scalar(s64), %cst0(s32)
3426 // %splat:fpr(<2 x s64>) = G_SHUFFLE_VECTOR %ins(<2 x s64>), %undef,
3427 // %zerovec(<2 x s32>)
3428 //
3429 // ...into:
3430 // %splat = DUP %scalar
3431 // We use the regbank of the scalar to determine which kind of dup to use.
3432 MachineIRBuilder MIB(I);
3433 MachineRegisterInfo &MRI = *MIB.getMRI();
3434 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
3435 using namespace TargetOpcode;
3436 using namespace MIPatternMatch;
3437
3438 // Begin matching the insert.
3439 auto *InsMI =
Jessica Paquette7c959252019-07-10 18:46:56 +00003440 getOpcodeDef(G_INSERT_VECTOR_ELT, I.getOperand(1).getReg(), MRI);
Amara Emerson761ca2e2019-03-19 21:43:05 +00003441 if (!InsMI)
3442 return false;
3443 // Match the undef vector operand.
3444 auto *UndefMI =
Jessica Paquette7c959252019-07-10 18:46:56 +00003445 getOpcodeDef(G_IMPLICIT_DEF, InsMI->getOperand(1).getReg(), MRI);
Amara Emerson761ca2e2019-03-19 21:43:05 +00003446 if (!UndefMI)
3447 return false;
3448 // Match the scalar being splatted.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003449 Register ScalarReg = InsMI->getOperand(2).getReg();
Amara Emerson761ca2e2019-03-19 21:43:05 +00003450 const RegisterBank *ScalarRB = RBI.getRegBank(ScalarReg, MRI, TRI);
3451 // Match the index constant 0.
3452 int64_t Index = 0;
3453 if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ICst(Index)) || Index)
3454 return false;
3455
3456 // The shuffle's second operand doesn't matter if the mask is all zero.
Jessica Paquette7c959252019-07-10 18:46:56 +00003457 auto *ZeroVec = getOpcodeDef(G_BUILD_VECTOR, I.getOperand(3).getReg(), MRI);
Amara Emerson761ca2e2019-03-19 21:43:05 +00003458 if (!ZeroVec)
3459 return false;
3460 int64_t Zero = 0;
3461 if (!mi_match(ZeroVec->getOperand(1).getReg(), MRI, m_ICst(Zero)) || Zero)
3462 return false;
3463 for (unsigned i = 1, e = ZeroVec->getNumOperands() - 1; i < e; ++i) {
3464 if (ZeroVec->getOperand(i).getReg() != ZeroVec->getOperand(1).getReg())
3465 return false; // This wasn't an all zeros vector.
3466 }
3467
3468 // We're done, now find out what kind of splat we need.
3469 LLT VecTy = MRI.getType(I.getOperand(0).getReg());
3470 LLT EltTy = VecTy.getElementType();
3471 if (VecTy.getSizeInBits() != 128 || EltTy.getSizeInBits() < 32) {
3472 LLVM_DEBUG(dbgs() << "Could not optimize splat pattern < 128b yet");
3473 return false;
3474 }
3475 bool IsFP = ScalarRB->getID() == AArch64::FPRRegBankID;
3476 static const unsigned OpcTable[2][2] = {
3477 {AArch64::DUPv4i32gpr, AArch64::DUPv2i64gpr},
3478 {AArch64::DUPv4i32lane, AArch64::DUPv2i64lane}};
3479 unsigned Opc = OpcTable[IsFP][EltTy.getSizeInBits() == 64];
3480
3481 // For FP splats, we need to widen the scalar reg via undef too.
3482 if (IsFP) {
3483 MachineInstr *Widen = emitScalarToVector(
3484 EltTy.getSizeInBits(), &AArch64::FPR128RegClass, ScalarReg, MIB);
3485 if (!Widen)
3486 return false;
3487 ScalarReg = Widen->getOperand(0).getReg();
3488 }
3489 auto Dup = MIB.buildInstr(Opc, {I.getOperand(0).getReg()}, {ScalarReg});
3490 if (IsFP)
3491 Dup.addImm(0);
3492 constrainSelectedInstRegOperands(*Dup, TII, TRI, RBI);
3493 I.eraseFromParent();
3494 return true;
3495}
3496
3497bool AArch64InstructionSelector::tryOptVectorShuffle(MachineInstr &I) const {
3498 if (TM.getOptLevel() == CodeGenOpt::None)
3499 return false;
3500 if (tryOptVectorDup(I))
3501 return true;
3502 return false;
3503}
3504
Amara Emerson1abe05c2019-02-21 20:20:16 +00003505bool AArch64InstructionSelector::selectShuffleVector(
3506 MachineInstr &I, MachineRegisterInfo &MRI) const {
Amara Emerson761ca2e2019-03-19 21:43:05 +00003507 if (tryOptVectorShuffle(I))
3508 return true;
Amara Emerson1abe05c2019-02-21 20:20:16 +00003509 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003510 Register Src1Reg = I.getOperand(1).getReg();
Amara Emerson1abe05c2019-02-21 20:20:16 +00003511 const LLT Src1Ty = MRI.getType(Src1Reg);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003512 Register Src2Reg = I.getOperand(2).getReg();
Amara Emerson1abe05c2019-02-21 20:20:16 +00003513 const LLT Src2Ty = MRI.getType(Src2Reg);
3514
3515 MachineBasicBlock &MBB = *I.getParent();
3516 MachineFunction &MF = *MBB.getParent();
3517 LLVMContext &Ctx = MF.getFunction().getContext();
3518
3519 // G_SHUFFLE_VECTOR doesn't really have a strictly enforced constant mask
3520 // operand, it comes in as a normal vector value which we have to analyze to
Amara Emerson2806fd02019-04-12 21:31:21 +00003521 // find the mask indices. If the mask element is undef, then
3522 // collectShuffleMaskIndices() will add a None entry for that index into
3523 // the list.
3524 SmallVector<Optional<int>, 8> Mask;
Amara Emerson1abe05c2019-02-21 20:20:16 +00003525 collectShuffleMaskIndices(I, MRI, Mask);
3526 assert(!Mask.empty() && "Expected to find mask indices");
3527
3528 // G_SHUFFLE_VECTOR is weird in that the source operands can be scalars, if
3529 // it's originated from a <1 x T> type. Those should have been lowered into
3530 // G_BUILD_VECTOR earlier.
3531 if (!Src1Ty.isVector() || !Src2Ty.isVector()) {
3532 LLVM_DEBUG(dbgs() << "Could not select a \"scalar\" G_SHUFFLE_VECTOR\n");
3533 return false;
3534 }
3535
3536 unsigned BytesPerElt = DstTy.getElementType().getSizeInBits() / 8;
3537
3538 SmallVector<Constant *, 64> CstIdxs;
Amara Emerson2806fd02019-04-12 21:31:21 +00003539 for (auto &MaybeVal : Mask) {
3540 // For now, any undef indexes we'll just assume to be 0. This should be
3541 // optimized in future, e.g. to select DUP etc.
3542 int Val = MaybeVal.hasValue() ? *MaybeVal : 0;
Amara Emerson1abe05c2019-02-21 20:20:16 +00003543 for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
3544 unsigned Offset = Byte + Val * BytesPerElt;
3545 CstIdxs.emplace_back(ConstantInt::get(Type::getInt8Ty(Ctx), Offset));
3546 }
3547 }
3548
Amara Emerson8acb0d92019-03-04 19:16:00 +00003549 MachineIRBuilder MIRBuilder(I);
Amara Emerson1abe05c2019-02-21 20:20:16 +00003550
3551 // Use a constant pool to load the index vector for TBL.
3552 Constant *CPVal = ConstantVector::get(CstIdxs);
Amara Emerson1abe05c2019-02-21 20:20:16 +00003553 MachineInstr *IndexLoad = emitLoadFromConstantPool(CPVal, MIRBuilder);
3554 if (!IndexLoad) {
3555 LLVM_DEBUG(dbgs() << "Could not load from a constant pool");
3556 return false;
3557 }
3558
Amara Emerson8acb0d92019-03-04 19:16:00 +00003559 if (DstTy.getSizeInBits() != 128) {
3560 assert(DstTy.getSizeInBits() == 64 && "Unexpected shuffle result ty");
3561 // This case can be done with TBL1.
Amara Emerson2ff22982019-03-14 22:48:15 +00003562 MachineInstr *Concat = emitVectorConcat(None, Src1Reg, Src2Reg, MIRBuilder);
Amara Emerson8acb0d92019-03-04 19:16:00 +00003563 if (!Concat) {
3564 LLVM_DEBUG(dbgs() << "Could not do vector concat for tbl1");
3565 return false;
3566 }
3567
3568 // The constant pool load will be 64 bits, so need to convert to FPR128 reg.
3569 IndexLoad =
3570 emitScalarToVector(64, &AArch64::FPR128RegClass,
3571 IndexLoad->getOperand(0).getReg(), MIRBuilder);
3572
3573 auto TBL1 = MIRBuilder.buildInstr(
3574 AArch64::TBLv16i8One, {&AArch64::FPR128RegClass},
3575 {Concat->getOperand(0).getReg(), IndexLoad->getOperand(0).getReg()});
3576 constrainSelectedInstRegOperands(*TBL1, TII, TRI, RBI);
3577
Amara Emerson3739a202019-03-15 21:59:50 +00003578 auto Copy =
Amara Emerson86271782019-03-18 19:20:10 +00003579 MIRBuilder
3580 .buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {})
3581 .addReg(TBL1.getReg(0), 0, AArch64::dsub);
Amara Emerson8acb0d92019-03-04 19:16:00 +00003582 RBI.constrainGenericRegister(Copy.getReg(0), AArch64::FPR64RegClass, MRI);
3583 I.eraseFromParent();
3584 return true;
3585 }
3586
Amara Emerson1abe05c2019-02-21 20:20:16 +00003587 // For TBL2 we need to emit a REG_SEQUENCE to tie together two consecutive
3588 // Q registers for regalloc.
3589 auto RegSeq = MIRBuilder
3590 .buildInstr(TargetOpcode::REG_SEQUENCE,
3591 {&AArch64::QQRegClass}, {Src1Reg})
3592 .addImm(AArch64::qsub0)
3593 .addUse(Src2Reg)
3594 .addImm(AArch64::qsub1);
3595
3596 auto TBL2 =
3597 MIRBuilder.buildInstr(AArch64::TBLv16i8Two, {I.getOperand(0).getReg()},
3598 {RegSeq, IndexLoad->getOperand(0).getReg()});
3599 constrainSelectedInstRegOperands(*RegSeq, TII, TRI, RBI);
3600 constrainSelectedInstRegOperands(*TBL2, TII, TRI, RBI);
3601 I.eraseFromParent();
3602 return true;
3603}
3604
Jessica Paquette16d67a32019-03-13 23:22:23 +00003605MachineInstr *AArch64InstructionSelector::emitLaneInsert(
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003606 Optional<Register> DstReg, Register SrcReg, Register EltReg,
Jessica Paquette16d67a32019-03-13 23:22:23 +00003607 unsigned LaneIdx, const RegisterBank &RB,
3608 MachineIRBuilder &MIRBuilder) const {
3609 MachineInstr *InsElt = nullptr;
3610 const TargetRegisterClass *DstRC = &AArch64::FPR128RegClass;
3611 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
3612
3613 // Create a register to define with the insert if one wasn't passed in.
3614 if (!DstReg)
3615 DstReg = MRI.createVirtualRegister(DstRC);
3616
3617 unsigned EltSize = MRI.getType(EltReg).getSizeInBits();
3618 unsigned Opc = getInsertVecEltOpInfo(RB, EltSize).first;
3619
3620 if (RB.getID() == AArch64::FPRRegBankID) {
3621 auto InsSub = emitScalarToVector(EltSize, DstRC, EltReg, MIRBuilder);
3622 InsElt = MIRBuilder.buildInstr(Opc, {*DstReg}, {SrcReg})
3623 .addImm(LaneIdx)
3624 .addUse(InsSub->getOperand(0).getReg())
3625 .addImm(0);
3626 } else {
3627 InsElt = MIRBuilder.buildInstr(Opc, {*DstReg}, {SrcReg})
3628 .addImm(LaneIdx)
3629 .addUse(EltReg);
3630 }
3631
3632 constrainSelectedInstRegOperands(*InsElt, TII, TRI, RBI);
3633 return InsElt;
3634}
3635
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003636bool AArch64InstructionSelector::selectInsertElt(
3637 MachineInstr &I, MachineRegisterInfo &MRI) const {
3638 assert(I.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT);
3639
3640 // Get information on the destination.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003641 Register DstReg = I.getOperand(0).getReg();
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003642 const LLT DstTy = MRI.getType(DstReg);
Jessica Paquetted3ffd472019-03-29 21:39:36 +00003643 unsigned VecSize = DstTy.getSizeInBits();
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003644
3645 // Get information on the element we want to insert into the destination.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003646 Register EltReg = I.getOperand(2).getReg();
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003647 const LLT EltTy = MRI.getType(EltReg);
3648 unsigned EltSize = EltTy.getSizeInBits();
3649 if (EltSize < 16 || EltSize > 64)
3650 return false; // Don't support all element types yet.
3651
3652 // Find the definition of the index. Bail out if it's not defined by a
3653 // G_CONSTANT.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003654 Register IdxReg = I.getOperand(3).getReg();
Jessica Paquette76f64b62019-04-26 21:53:13 +00003655 auto VRegAndVal = getConstantVRegValWithLookThrough(IdxReg, MRI);
3656 if (!VRegAndVal)
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003657 return false;
Jessica Paquette76f64b62019-04-26 21:53:13 +00003658 unsigned LaneIdx = VRegAndVal->Value;
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003659
3660 // Perform the lane insert.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003661 Register SrcReg = I.getOperand(1).getReg();
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003662 const RegisterBank &EltRB = *RBI.getRegBank(EltReg, MRI, TRI);
3663 MachineIRBuilder MIRBuilder(I);
Jessica Paquetted3ffd472019-03-29 21:39:36 +00003664
3665 if (VecSize < 128) {
3666 // If the vector we're inserting into is smaller than 128 bits, widen it
3667 // to 128 to do the insert.
3668 MachineInstr *ScalarToVec = emitScalarToVector(
3669 VecSize, &AArch64::FPR128RegClass, SrcReg, MIRBuilder);
3670 if (!ScalarToVec)
3671 return false;
3672 SrcReg = ScalarToVec->getOperand(0).getReg();
3673 }
3674
3675 // Create an insert into a new FPR128 register.
3676 // Note that if our vector is already 128 bits, we end up emitting an extra
3677 // register.
3678 MachineInstr *InsMI =
3679 emitLaneInsert(None, SrcReg, EltReg, LaneIdx, EltRB, MIRBuilder);
3680
3681 if (VecSize < 128) {
3682 // If we had to widen to perform the insert, then we have to demote back to
3683 // the original size to get the result we want.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003684 Register DemoteVec = InsMI->getOperand(0).getReg();
Jessica Paquetted3ffd472019-03-29 21:39:36 +00003685 const TargetRegisterClass *RC =
3686 getMinClassForRegBank(*RBI.getRegBank(DemoteVec, MRI, TRI), VecSize);
3687 if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) {
3688 LLVM_DEBUG(dbgs() << "Unsupported register class!\n");
3689 return false;
3690 }
3691 unsigned SubReg = 0;
3692 if (!getSubRegForClass(RC, TRI, SubReg))
3693 return false;
3694 if (SubReg != AArch64::ssub && SubReg != AArch64::dsub) {
3695 LLVM_DEBUG(dbgs() << "Unsupported destination size! (" << VecSize
3696 << "\n");
3697 return false;
3698 }
3699 MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {})
3700 .addReg(DemoteVec, 0, SubReg);
3701 RBI.constrainGenericRegister(DstReg, *RC, MRI);
3702 } else {
3703 // No widening needed.
3704 InsMI->getOperand(0).setReg(DstReg);
3705 constrainSelectedInstRegOperands(*InsMI, TII, TRI, RBI);
3706 }
3707
Jessica Paquette5aff1f42019-03-14 18:01:30 +00003708 I.eraseFromParent();
3709 return true;
3710}
3711
Amara Emerson5ec14602018-12-10 18:44:58 +00003712bool AArch64InstructionSelector::selectBuildVector(
3713 MachineInstr &I, MachineRegisterInfo &MRI) const {
3714 assert(I.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3715 // Until we port more of the optimized selections, for now just use a vector
3716 // insert sequence.
3717 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
3718 const LLT EltTy = MRI.getType(I.getOperand(1).getReg());
3719 unsigned EltSize = EltTy.getSizeInBits();
Jessica Paquette245047d2019-01-24 22:00:41 +00003720 if (EltSize < 16 || EltSize > 64)
Amara Emerson5ec14602018-12-10 18:44:58 +00003721 return false; // Don't support all element types yet.
3722 const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00003723 MachineIRBuilder MIRBuilder(I);
Jessica Paquette245047d2019-01-24 22:00:41 +00003724
3725 const TargetRegisterClass *DstRC = &AArch64::FPR128RegClass;
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00003726 MachineInstr *ScalarToVec =
Amara Emerson8acb0d92019-03-04 19:16:00 +00003727 emitScalarToVector(DstTy.getElementType().getSizeInBits(), DstRC,
3728 I.getOperand(1).getReg(), MIRBuilder);
Amara Emerson6bcfa1c2019-02-25 18:52:54 +00003729 if (!ScalarToVec)
Jessica Paquette245047d2019-01-24 22:00:41 +00003730 return false;
3731
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003732 Register DstVec = ScalarToVec->getOperand(0).getReg();
Jessica Paquette245047d2019-01-24 22:00:41 +00003733 unsigned DstSize = DstTy.getSizeInBits();
3734
3735 // Keep track of the last MI we inserted. Later on, we might be able to save
3736 // a copy using it.
3737 MachineInstr *PrevMI = nullptr;
3738 for (unsigned i = 2, e = DstSize / EltSize + 1; i < e; ++i) {
Jessica Paquette16d67a32019-03-13 23:22:23 +00003739 // Note that if we don't do a subregister copy, we can end up making an
3740 // extra register.
3741 PrevMI = &*emitLaneInsert(None, DstVec, I.getOperand(i).getReg(), i - 1, RB,
3742 MIRBuilder);
3743 DstVec = PrevMI->getOperand(0).getReg();
Amara Emerson5ec14602018-12-10 18:44:58 +00003744 }
Jessica Paquette245047d2019-01-24 22:00:41 +00003745
3746 // If DstTy's size in bits is less than 128, then emit a subregister copy
3747 // from DstVec to the last register we've defined.
3748 if (DstSize < 128) {
Jessica Paquette85ace622019-03-13 23:29:54 +00003749 // Force this to be FPR using the destination vector.
3750 const TargetRegisterClass *RC =
3751 getMinClassForRegBank(*RBI.getRegBank(DstVec, MRI, TRI), DstSize);
Jessica Paquette245047d2019-01-24 22:00:41 +00003752 if (!RC)
3753 return false;
Jessica Paquette85ace622019-03-13 23:29:54 +00003754 if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) {
3755 LLVM_DEBUG(dbgs() << "Unsupported register class!\n");
3756 return false;
3757 }
3758
3759 unsigned SubReg = 0;
3760 if (!getSubRegForClass(RC, TRI, SubReg))
3761 return false;
3762 if (SubReg != AArch64::ssub && SubReg != AArch64::dsub) {
3763 LLVM_DEBUG(dbgs() << "Unsupported destination size! (" << DstSize
3764 << "\n");
3765 return false;
3766 }
Jessica Paquette245047d2019-01-24 22:00:41 +00003767
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003768 Register Reg = MRI.createVirtualRegister(RC);
3769 Register DstReg = I.getOperand(0).getReg();
Jessica Paquette245047d2019-01-24 22:00:41 +00003770
Amara Emerson86271782019-03-18 19:20:10 +00003771 MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {})
3772 .addReg(DstVec, 0, SubReg);
Jessica Paquette245047d2019-01-24 22:00:41 +00003773 MachineOperand &RegOp = I.getOperand(1);
3774 RegOp.setReg(Reg);
3775 RBI.constrainGenericRegister(DstReg, *RC, MRI);
3776 } else {
3777 // We don't need a subregister copy. Save a copy by re-using the
3778 // destination register on the final insert.
3779 assert(PrevMI && "PrevMI was null?");
3780 PrevMI->getOperand(0).setReg(I.getOperand(0).getReg());
3781 constrainSelectedInstRegOperands(*PrevMI, TII, TRI, RBI);
3782 }
3783
Amara Emerson5ec14602018-12-10 18:44:58 +00003784 I.eraseFromParent();
3785 return true;
3786}
3787
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00003788/// Helper function to find an intrinsic ID on an a MachineInstr. Returns the
3789/// ID if it exists, and 0 otherwise.
3790static unsigned findIntrinsicID(MachineInstr &I) {
3791 auto IntrinOp = find_if(I.operands(), [&](const MachineOperand &Op) {
3792 return Op.isIntrinsicID();
3793 });
3794 if (IntrinOp == I.operands_end())
3795 return 0;
3796 return IntrinOp->getIntrinsicID();
3797}
3798
Jessica Paquette22c62152019-04-02 19:57:26 +00003799/// Helper function to emit the correct opcode for a llvm.aarch64.stlxr
3800/// intrinsic.
3801static unsigned getStlxrOpcode(unsigned NumBytesToStore) {
3802 switch (NumBytesToStore) {
3803 // TODO: 1, 2, and 4 byte stores.
3804 case 8:
3805 return AArch64::STLXRX;
3806 default:
3807 LLVM_DEBUG(dbgs() << "Unexpected number of bytes to store! ("
3808 << NumBytesToStore << ")\n");
3809 break;
3810 }
3811 return 0;
3812}
3813
3814bool AArch64InstructionSelector::selectIntrinsicWithSideEffects(
3815 MachineInstr &I, MachineRegisterInfo &MRI) const {
3816 // Find the intrinsic ID.
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00003817 unsigned IntrinID = findIntrinsicID(I);
3818 if (!IntrinID)
Jessica Paquette22c62152019-04-02 19:57:26 +00003819 return false;
Jessica Paquette22c62152019-04-02 19:57:26 +00003820 MachineIRBuilder MIRBuilder(I);
3821
3822 // Select the instruction.
3823 switch (IntrinID) {
3824 default:
3825 return false;
3826 case Intrinsic::trap:
3827 MIRBuilder.buildInstr(AArch64::BRK, {}, {}).addImm(1);
3828 break;
Tom Tan7ecb5142019-06-21 23:38:05 +00003829 case Intrinsic::debugtrap:
3830 if (!STI.isTargetWindows())
3831 return false;
3832 MIRBuilder.buildInstr(AArch64::BRK, {}, {}).addImm(0xF000);
3833 break;
Jessica Paquette22c62152019-04-02 19:57:26 +00003834 case Intrinsic::aarch64_stlxr:
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003835 Register StatReg = I.getOperand(0).getReg();
Jessica Paquette22c62152019-04-02 19:57:26 +00003836 assert(RBI.getSizeInBits(StatReg, MRI, TRI) == 32 &&
3837 "Status register must be 32 bits!");
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003838 Register SrcReg = I.getOperand(2).getReg();
Jessica Paquette22c62152019-04-02 19:57:26 +00003839
3840 if (RBI.getSizeInBits(SrcReg, MRI, TRI) != 64) {
3841 LLVM_DEBUG(dbgs() << "Only support 64-bit sources right now.\n");
3842 return false;
3843 }
3844
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003845 Register PtrReg = I.getOperand(3).getReg();
Jessica Paquette22c62152019-04-02 19:57:26 +00003846 assert(MRI.getType(PtrReg).isPointer() && "Expected pointer operand");
3847
3848 // Expect only one memory operand.
3849 if (!I.hasOneMemOperand())
3850 return false;
3851
3852 const MachineMemOperand *MemOp = *I.memoperands_begin();
3853 unsigned NumBytesToStore = MemOp->getSize();
3854 unsigned Opc = getStlxrOpcode(NumBytesToStore);
3855 if (!Opc)
3856 return false;
3857
3858 auto StoreMI = MIRBuilder.buildInstr(Opc, {StatReg}, {SrcReg, PtrReg});
3859 constrainSelectedInstRegOperands(*StoreMI, TII, TRI, RBI);
3860 }
3861
3862 I.eraseFromParent();
3863 return true;
3864}
3865
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00003866bool AArch64InstructionSelector::selectIntrinsic(
3867 MachineInstr &I, MachineRegisterInfo &MRI) const {
3868 unsigned IntrinID = findIntrinsicID(I);
3869 if (!IntrinID)
3870 return false;
3871 MachineIRBuilder MIRBuilder(I);
3872
3873 switch (IntrinID) {
3874 default:
3875 break;
3876 case Intrinsic::aarch64_crypto_sha1h:
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00003877 Register DstReg = I.getOperand(0).getReg();
3878 Register SrcReg = I.getOperand(2).getReg();
Jessica Paquette7f6fe7c2019-04-29 20:58:17 +00003879
3880 // FIXME: Should this be an assert?
3881 if (MRI.getType(DstReg).getSizeInBits() != 32 ||
3882 MRI.getType(SrcReg).getSizeInBits() != 32)
3883 return false;
3884
3885 // The operation has to happen on FPRs. Set up some new FPR registers for
3886 // the source and destination if they are on GPRs.
3887 if (RBI.getRegBank(SrcReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) {
3888 SrcReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
3889 MIRBuilder.buildCopy({SrcReg}, {I.getOperand(2)});
3890
3891 // Make sure the copy ends up getting constrained properly.
3892 RBI.constrainGenericRegister(I.getOperand(2).getReg(),
3893 AArch64::GPR32RegClass, MRI);
3894 }
3895
3896 if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID)
3897 DstReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass);
3898
3899 // Actually insert the instruction.
3900 auto SHA1Inst = MIRBuilder.buildInstr(AArch64::SHA1Hrr, {DstReg}, {SrcReg});
3901 constrainSelectedInstRegOperands(*SHA1Inst, TII, TRI, RBI);
3902
3903 // Did we create a new register for the destination?
3904 if (DstReg != I.getOperand(0).getReg()) {
3905 // Yep. Copy the result of the instruction back into the original
3906 // destination.
3907 MIRBuilder.buildCopy({I.getOperand(0)}, {DstReg});
3908 RBI.constrainGenericRegister(I.getOperand(0).getReg(),
3909 AArch64::GPR32RegClass, MRI);
3910 }
3911
3912 I.eraseFromParent();
3913 return true;
3914 }
3915 return false;
3916}
3917
Amara Emersoncac11512019-07-03 01:49:06 +00003918static Optional<uint64_t> getImmedFromMO(const MachineOperand &Root) {
3919 auto &MI = *Root.getParent();
3920 auto &MBB = *MI.getParent();
3921 auto &MF = *MBB.getParent();
3922 auto &MRI = MF.getRegInfo();
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003923 uint64_t Immed;
3924 if (Root.isImm())
3925 Immed = Root.getImm();
3926 else if (Root.isCImm())
3927 Immed = Root.getCImm()->getZExtValue();
3928 else if (Root.isReg()) {
Jessica Paquettea99cfee2019-07-03 17:46:23 +00003929 auto ValAndVReg =
3930 getConstantVRegValWithLookThrough(Root.getReg(), MRI, true);
3931 if (!ValAndVReg)
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003932 return None;
Jessica Paquettea99cfee2019-07-03 17:46:23 +00003933 Immed = ValAndVReg->Value;
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003934 } else
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003935 return None;
Amara Emersoncac11512019-07-03 01:49:06 +00003936 return Immed;
3937}
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003938
Amara Emersoncac11512019-07-03 01:49:06 +00003939InstructionSelector::ComplexRendererFns
3940AArch64InstructionSelector::selectShiftA_32(const MachineOperand &Root) const {
3941 auto MaybeImmed = getImmedFromMO(Root);
3942 if (MaybeImmed == None || *MaybeImmed > 31)
3943 return None;
3944 uint64_t Enc = (32 - *MaybeImmed) & 0x1f;
3945 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}};
3946}
3947
3948InstructionSelector::ComplexRendererFns
3949AArch64InstructionSelector::selectShiftB_32(const MachineOperand &Root) const {
3950 auto MaybeImmed = getImmedFromMO(Root);
3951 if (MaybeImmed == None || *MaybeImmed > 31)
3952 return None;
3953 uint64_t Enc = 31 - *MaybeImmed;
3954 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}};
3955}
3956
3957InstructionSelector::ComplexRendererFns
3958AArch64InstructionSelector::selectShiftA_64(const MachineOperand &Root) const {
3959 auto MaybeImmed = getImmedFromMO(Root);
3960 if (MaybeImmed == None || *MaybeImmed > 63)
3961 return None;
3962 uint64_t Enc = (64 - *MaybeImmed) & 0x3f;
3963 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}};
3964}
3965
3966InstructionSelector::ComplexRendererFns
3967AArch64InstructionSelector::selectShiftB_64(const MachineOperand &Root) const {
3968 auto MaybeImmed = getImmedFromMO(Root);
3969 if (MaybeImmed == None || *MaybeImmed > 63)
3970 return None;
3971 uint64_t Enc = 63 - *MaybeImmed;
3972 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}};
3973}
3974
3975/// SelectArithImmed - Select an immediate value that can be represented as
3976/// a 12-bit value shifted left by either 0 or 12. If so, return true with
3977/// Val set to the 12-bit value and Shift set to the shifter operand.
3978InstructionSelector::ComplexRendererFns
3979AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
3980 // This function is called from the addsub_shifted_imm ComplexPattern,
3981 // which lists [imm] as the list of opcode it's interested in, however
3982 // we still need to check whether the operand is actually an immediate
3983 // here because the ComplexPattern opcode list is only used in
3984 // root-level opcode matching.
3985 auto MaybeImmed = getImmedFromMO(Root);
3986 if (MaybeImmed == None)
3987 return None;
3988 uint64_t Immed = *MaybeImmed;
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003989 unsigned ShiftAmt;
3990
3991 if (Immed >> 12 == 0) {
3992 ShiftAmt = 0;
3993 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
3994 ShiftAmt = 12;
3995 Immed = Immed >> 12;
3996 } else
Daniel Sandersdf39cba2017-10-15 18:22:54 +00003997 return None;
Daniel Sanders8a4bae92017-03-14 21:32:08 +00003998
3999 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
Daniel Sandersdf39cba2017-10-15 18:22:54 +00004000 return {{
4001 [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
4002 [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
4003 }};
Daniel Sanders8a4bae92017-03-14 21:32:08 +00004004}
Daniel Sanders0b5293f2017-04-06 09:49:34 +00004005
Jessica Paquette2b404d02019-07-23 16:09:42 +00004006/// Return true if it is worth folding MI into an extended register. That is,
4007/// if it's safe to pull it into the addressing mode of a load or store as a
4008/// shift.
4009bool AArch64InstructionSelector::isWorthFoldingIntoExtendedReg(
4010 MachineInstr &MI, const MachineRegisterInfo &MRI) const {
4011 // Always fold if there is one use, or if we're optimizing for size.
4012 Register DefReg = MI.getOperand(0).getReg();
4013 if (MRI.hasOneUse(DefReg) ||
4014 MI.getParent()->getParent()->getFunction().hasMinSize())
4015 return true;
4016
4017 // It's better to avoid folding and recomputing shifts when we don't have a
4018 // fastpath.
4019 if (!STI.hasLSLFast())
4020 return false;
4021
4022 // We have a fastpath, so folding a shift in and potentially computing it
4023 // many times may be beneficial. Check if this is only used in memory ops.
4024 // If it is, then we should fold.
4025 return all_of(MRI.use_instructions(DefReg),
4026 [](MachineInstr &Use) { return Use.mayLoadOrStore(); });
4027}
4028
4029/// This is used for computing addresses like this:
4030///
4031/// ldr x1, [x2, x3, lsl #3]
4032///
4033/// Where x2 is the base register, and x3 is an offset register. The shift-left
4034/// is a constant value specific to this load instruction. That is, we'll never
4035/// see anything other than a 3 here (which corresponds to the size of the
4036/// element being loaded.)
4037InstructionSelector::ComplexRendererFns
4038AArch64InstructionSelector::selectAddrModeShiftedExtendXReg(
4039 MachineOperand &Root, unsigned SizeInBytes) const {
4040 if (!Root.isReg())
4041 return None;
4042 MachineRegisterInfo &MRI = Root.getParent()->getMF()->getRegInfo();
4043
4044 // Make sure that the memory op is a valid size.
4045 int64_t LegalShiftVal = Log2_32(SizeInBytes);
4046 if (LegalShiftVal == 0)
4047 return None;
4048
4049 // We want to find something like this:
4050 //
4051 // val = G_CONSTANT LegalShiftVal
4052 // shift = G_SHL off_reg val
4053 // ptr = G_GEP base_reg shift
4054 // x = G_LOAD ptr
4055 //
4056 // And fold it into this addressing mode:
4057 //
4058 // ldr x, [base_reg, off_reg, lsl #LegalShiftVal]
4059
4060 // Check if we can find the G_GEP.
4061 MachineInstr *Gep = getOpcodeDef(TargetOpcode::G_GEP, Root.getReg(), MRI);
4062 if (!Gep || !isWorthFoldingIntoExtendedReg(*Gep, MRI))
4063 return None;
4064
4065 // Now try to match the G_SHL.
4066 MachineInstr *Shl =
4067 getOpcodeDef(TargetOpcode::G_SHL, Gep->getOperand(2).getReg(), MRI);
4068 if (!Shl || !isWorthFoldingIntoExtendedReg(*Shl, MRI))
4069 return None;
4070
4071 // Now, try to find the specific G_CONSTANT.
4072 auto ValAndVReg =
4073 getConstantVRegValWithLookThrough(Shl->getOperand(2).getReg(), MRI);
4074 if (!ValAndVReg)
4075 return None;
4076
4077 // The value must fit into 3 bits, and must be positive. Make sure that is
4078 // true.
4079 int64_t ImmVal = ValAndVReg->Value;
4080 if ((ImmVal & 0x7) != ImmVal)
4081 return None;
4082
4083 // We are only allowed to shift by LegalShiftVal. This shift value is built
4084 // into the instruction, so we can't just use whatever we want.
4085 if (ImmVal != LegalShiftVal)
4086 return None;
4087
4088 // We can use the LHS of the GEP as the base, and the LHS of the shift as an
4089 // offset. Signify that we are shifting by setting the shift flag to 1.
4090 return {{
4091 [=](MachineInstrBuilder &MIB) { MIB.add(Gep->getOperand(1)); },
4092 [=](MachineInstrBuilder &MIB) { MIB.add(Shl->getOperand(1)); },
4093 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
4094 [=](MachineInstrBuilder &MIB) { MIB.addImm(1); },
4095 }};
4096}
4097
Jessica Paquette7a1dcc52019-07-18 21:50:11 +00004098/// This is used for computing addresses like this:
4099///
4100/// ldr x1, [x2, x3]
4101///
4102/// Where x2 is the base register, and x3 is an offset register.
4103///
4104/// When possible (or profitable) to fold a G_GEP into the address calculation,
4105/// this will do so. Otherwise, it will return None.
4106InstructionSelector::ComplexRendererFns
4107AArch64InstructionSelector::selectAddrModeRegisterOffset(
4108 MachineOperand &Root) const {
4109 MachineRegisterInfo &MRI = Root.getParent()->getMF()->getRegInfo();
4110
Jessica Paquette7a1dcc52019-07-18 21:50:11 +00004111 // We need a GEP.
4112 MachineInstr *Gep = MRI.getVRegDef(Root.getReg());
4113 if (!Gep || Gep->getOpcode() != TargetOpcode::G_GEP)
4114 return None;
4115
4116 // If this is used more than once, let's not bother folding.
4117 // TODO: Check if they are memory ops. If they are, then we can still fold
4118 // without having to recompute anything.
4119 if (!MRI.hasOneUse(Gep->getOperand(0).getReg()))
4120 return None;
4121
4122 // Base is the GEP's LHS, offset is its RHS.
4123 return {{
4124 [=](MachineInstrBuilder &MIB) { MIB.add(Gep->getOperand(1)); },
4125 [=](MachineInstrBuilder &MIB) { MIB.add(Gep->getOperand(2)); },
4126 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
4127 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
4128 }};
4129}
4130
Jessica Paquette2b404d02019-07-23 16:09:42 +00004131/// This is intended to be equivalent to selectAddrModeXRO in
4132/// AArch64ISelDAGtoDAG. It's used for selecting X register offset loads.
4133InstructionSelector::ComplexRendererFns
4134AArch64InstructionSelector::selectAddrModeXRO(MachineOperand &Root,
4135 unsigned SizeInBytes) const {
4136 MachineRegisterInfo &MRI = Root.getParent()->getMF()->getRegInfo();
4137
4138 // If we have a constant offset, then we probably don't want to match a
4139 // register offset.
4140 if (isBaseWithConstantOffset(Root, MRI))
4141 return None;
4142
4143 // Try to fold shifts into the addressing mode.
4144 auto AddrModeFns = selectAddrModeShiftedExtendXReg(Root, SizeInBytes);
4145 if (AddrModeFns)
4146 return AddrModeFns;
4147
4148 // If that doesn't work, see if it's possible to fold in registers from
4149 // a GEP.
4150 return selectAddrModeRegisterOffset(Root);
4151}
4152
Daniel Sandersea8711b2017-10-16 03:36:29 +00004153/// Select a "register plus unscaled signed 9-bit immediate" address. This
4154/// should only match when there is an offset that is not valid for a scaled
4155/// immediate addressing mode. The "Size" argument is the size in bytes of the
4156/// memory reference, which is needed here to know what is valid for a scaled
4157/// immediate.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00004158InstructionSelector::ComplexRendererFns
Daniel Sandersea8711b2017-10-16 03:36:29 +00004159AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
4160 unsigned Size) const {
4161 MachineRegisterInfo &MRI =
4162 Root.getParent()->getParent()->getParent()->getRegInfo();
4163
4164 if (!Root.isReg())
4165 return None;
4166
4167 if (!isBaseWithConstantOffset(Root, MRI))
4168 return None;
4169
4170 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
4171 if (!RootDef)
4172 return None;
4173
4174 MachineOperand &OffImm = RootDef->getOperand(2);
4175 if (!OffImm.isReg())
4176 return None;
4177 MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
4178 if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
4179 return None;
4180 int64_t RHSC;
4181 MachineOperand &RHSOp1 = RHS->getOperand(1);
4182 if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
4183 return None;
4184 RHSC = RHSOp1.getCImm()->getSExtValue();
4185
4186 // If the offset is valid as a scaled immediate, don't match here.
4187 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
4188 return None;
4189 if (RHSC >= -256 && RHSC < 256) {
4190 MachineOperand &Base = RootDef->getOperand(1);
4191 return {{
4192 [=](MachineInstrBuilder &MIB) { MIB.add(Base); },
4193 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
4194 }};
4195 }
4196 return None;
4197}
4198
4199/// Select a "register plus scaled unsigned 12-bit immediate" address. The
4200/// "Size" argument is the size in bytes of the memory reference, which
4201/// determines the scale.
Daniel Sanders1e4569f2017-10-20 20:55:29 +00004202InstructionSelector::ComplexRendererFns
Daniel Sandersea8711b2017-10-16 03:36:29 +00004203AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
4204 unsigned Size) const {
4205 MachineRegisterInfo &MRI =
4206 Root.getParent()->getParent()->getParent()->getRegInfo();
4207
4208 if (!Root.isReg())
4209 return None;
4210
4211 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
4212 if (!RootDef)
4213 return None;
4214
4215 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
4216 return {{
4217 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
4218 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
4219 }};
4220 }
4221
4222 if (isBaseWithConstantOffset(Root, MRI)) {
4223 MachineOperand &LHS = RootDef->getOperand(1);
4224 MachineOperand &RHS = RootDef->getOperand(2);
4225 MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
4226 MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
4227 if (LHSDef && RHSDef) {
4228 int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
4229 unsigned Scale = Log2_32(Size);
4230 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
4231 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
Daniel Sanders01805b62017-10-16 05:39:30 +00004232 return {{
4233 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
4234 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
4235 }};
4236
Daniel Sandersea8711b2017-10-16 03:36:29 +00004237 return {{
4238 [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
4239 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
4240 }};
4241 }
4242 }
4243 }
4244
4245 // Before falling back to our general case, check if the unscaled
4246 // instructions can handle this. If so, that's preferable.
4247 if (selectAddrModeUnscaled(Root, Size).hasValue())
4248 return None;
4249
4250 return {{
4251 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
4252 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
4253 }};
4254}
4255
Volkan Kelesf7f25682018-01-16 18:44:05 +00004256void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
4257 const MachineInstr &MI) const {
4258 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4259 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4260 Optional<int64_t> CstVal = getConstantVRegVal(MI.getOperand(0).getReg(), MRI);
4261 assert(CstVal && "Expected constant value");
4262 MIB.addImm(CstVal.getValue());
4263}
4264
Daniel Sanders0b5293f2017-04-06 09:49:34 +00004265namespace llvm {
4266InstructionSelector *
4267createAArch64InstructionSelector(const AArch64TargetMachine &TM,
4268 AArch64Subtarget &Subtarget,
4269 AArch64RegisterBankInfo &RBI) {
4270 return new AArch64InstructionSelector(TM, Subtarget, RBI);
4271}
4272}