| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 1 | //===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==// | 
|  | 2 | // | 
| Chandler Carruth | 2946cd7 | 2019-01-19 08:50:56 +0000 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | 
|  | 4 | // See https://llvm.org/LICENSE.txt for license information. | 
|  | 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 6 | // | 
|  | 7 | //===----------------------------------------------------------------------===// | 
|  | 8 | /// \file | 
|  | 9 | /// This file implements the targeting of the InstructionSelector class for | 
|  | 10 | /// AArch64. | 
|  | 11 | /// \todo This should be generated by TableGen. | 
|  | 12 | //===----------------------------------------------------------------------===// | 
|  | 13 |  | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 14 | #include "AArch64InstrInfo.h" | 
| Tim Northover | e9600d8 | 2017-02-08 17:57:27 +0000 | [diff] [blame] | 15 | #include "AArch64MachineFunctionInfo.h" | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 16 | #include "AArch64RegisterBankInfo.h" | 
|  | 17 | #include "AArch64RegisterInfo.h" | 
|  | 18 | #include "AArch64Subtarget.h" | 
| Tim Northover | bdf1624 | 2016-10-10 21:50:00 +0000 | [diff] [blame] | 19 | #include "AArch64TargetMachine.h" | 
| Tim Northover | 9ac0eba | 2016-11-08 00:45:29 +0000 | [diff] [blame] | 20 | #include "MCTargetDesc/AArch64AddressingModes.h" | 
| Amara Emerson | 2ff2298 | 2019-03-14 22:48:15 +0000 | [diff] [blame] | 21 | #include "llvm/ADT/Optional.h" | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 22 | #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" | 
| David Blaikie | 6265130 | 2017-10-26 23:39:54 +0000 | [diff] [blame] | 23 | #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" | 
| Amara Emerson | 1e8c164 | 2018-07-31 00:09:02 +0000 | [diff] [blame] | 24 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" | 
| Amara Emerson | 761ca2e | 2019-03-19 21:43:05 +0000 | [diff] [blame] | 25 | #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" | 
| Aditya Nandakumar | 75ad9cc | 2017-04-19 20:48:50 +0000 | [diff] [blame] | 26 | #include "llvm/CodeGen/GlobalISel/Utils.h" | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 27 | #include "llvm/CodeGen/MachineBasicBlock.h" | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 28 | #include "llvm/CodeGen/MachineConstantPool.h" | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 29 | #include "llvm/CodeGen/MachineFunction.h" | 
|  | 30 | #include "llvm/CodeGen/MachineInstr.h" | 
|  | 31 | #include "llvm/CodeGen/MachineInstrBuilder.h" | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 32 | #include "llvm/CodeGen/MachineOperand.h" | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 33 | #include "llvm/CodeGen/MachineRegisterInfo.h" | 
|  | 34 | #include "llvm/IR/Type.h" | 
|  | 35 | #include "llvm/Support/Debug.h" | 
|  | 36 | #include "llvm/Support/raw_ostream.h" | 
|  | 37 |  | 
|  | 38 | #define DEBUG_TYPE "aarch64-isel" | 
|  | 39 |  | 
|  | 40 | using namespace llvm; | 
|  | 41 |  | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 42 | namespace { | 
|  | 43 |  | 
| Daniel Sanders | e7b0d66 | 2017-04-21 15:59:56 +0000 | [diff] [blame] | 44 | #define GET_GLOBALISEL_PREDICATE_BITSET | 
|  | 45 | #include "AArch64GenGlobalISel.inc" | 
|  | 46 | #undef GET_GLOBALISEL_PREDICATE_BITSET | 
|  | 47 |  | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 48 | class AArch64InstructionSelector : public InstructionSelector { | 
|  | 49 | public: | 
|  | 50 | AArch64InstructionSelector(const AArch64TargetMachine &TM, | 
|  | 51 | const AArch64Subtarget &STI, | 
|  | 52 | const AArch64RegisterBankInfo &RBI); | 
|  | 53 |  | 
| Amara Emerson | e14c91b | 2019-08-13 06:26:59 +0000 | [diff] [blame] | 54 | bool select(MachineInstr &I) override; | 
| David Blaikie | 6265130 | 2017-10-26 23:39:54 +0000 | [diff] [blame] | 55 | static const char *getName() { return DEBUG_TYPE; } | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 56 |  | 
| Amara Emerson | e14c91b | 2019-08-13 06:26:59 +0000 | [diff] [blame] | 57 | void setupMF(MachineFunction &MF, CodeGenCoverage &CoverageInfo) override { | 
|  | 58 | InstructionSelector::setupMF(MF, CoverageInfo); | 
|  | 59 |  | 
|  | 60 | // hasFnAttribute() is expensive to call on every BRCOND selection, so | 
|  | 61 | // cache it here for each run of the selector. | 
|  | 62 | ProduceNonFlagSettingCondBr = | 
|  | 63 | !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening); | 
|  | 64 | } | 
|  | 65 |  | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 66 | private: | 
|  | 67 | /// tblgen-erated 'select' implementation, used as the initial selector for | 
|  | 68 | /// the patterns that don't require complex C++. | 
| Daniel Sanders | f76f315 | 2017-11-16 00:46:35 +0000 | [diff] [blame] | 69 | bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 70 |  | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 71 | // A lowering phase that runs before any selection attempts. | 
|  | 72 |  | 
|  | 73 | void preISelLower(MachineInstr &I) const; | 
|  | 74 |  | 
|  | 75 | // An early selection function that runs before the selectImpl() call. | 
|  | 76 | bool earlySelect(MachineInstr &I) const; | 
|  | 77 |  | 
|  | 78 | bool earlySelectSHL(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Jessica Paquette | 7a1dcc5 | 2019-07-18 21:50:11 +0000 | [diff] [blame] | 79 | bool earlySelectLoad(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 80 |  | 
| Jessica Paquette | 41affad | 2019-07-20 01:55:35 +0000 | [diff] [blame] | 81 | /// Eliminate same-sized cross-bank copies into stores before selectImpl(). | 
|  | 82 | void contractCrossBankCopyIntoStore(MachineInstr &I, | 
|  | 83 | MachineRegisterInfo &MRI) const; | 
|  | 84 |  | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 85 | bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF, | 
|  | 86 | MachineRegisterInfo &MRI) const; | 
|  | 87 | bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF, | 
|  | 88 | MachineRegisterInfo &MRI) const; | 
|  | 89 |  | 
|  | 90 | bool selectCompareBranch(MachineInstr &I, MachineFunction &MF, | 
|  | 91 | MachineRegisterInfo &MRI) const; | 
|  | 92 |  | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 93 | bool selectVectorASHR(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
|  | 94 | bool selectVectorSHL(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
|  | 95 |  | 
| Amara Emerson | 5ec1460 | 2018-12-10 18:44:58 +0000 | [diff] [blame] | 96 | // Helper to generate an equivalent of scalar_to_vector into a new register, | 
|  | 97 | // returned via 'Dst'. | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 98 | MachineInstr *emitScalarToVector(unsigned EltSize, | 
| Amara Emerson | 6bcfa1c | 2019-02-25 18:52:54 +0000 | [diff] [blame] | 99 | const TargetRegisterClass *DstRC, | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 100 | Register Scalar, | 
| Amara Emerson | 6bcfa1c | 2019-02-25 18:52:54 +0000 | [diff] [blame] | 101 | MachineIRBuilder &MIRBuilder) const; | 
| Jessica Paquette | 16d67a3 | 2019-03-13 23:22:23 +0000 | [diff] [blame] | 102 |  | 
|  | 103 | /// Emit a lane insert into \p DstReg, or a new vector register if None is | 
|  | 104 | /// provided. | 
|  | 105 | /// | 
|  | 106 | /// The lane inserted into is defined by \p LaneIdx. The vector source | 
|  | 107 | /// register is given by \p SrcReg. The register containing the element is | 
|  | 108 | /// given by \p EltReg. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 109 | MachineInstr *emitLaneInsert(Optional<Register> DstReg, Register SrcReg, | 
|  | 110 | Register EltReg, unsigned LaneIdx, | 
| Jessica Paquette | 16d67a3 | 2019-03-13 23:22:23 +0000 | [diff] [blame] | 111 | const RegisterBank &RB, | 
|  | 112 | MachineIRBuilder &MIRBuilder) const; | 
| Jessica Paquette | 5aff1f4 | 2019-03-14 18:01:30 +0000 | [diff] [blame] | 113 | bool selectInsertElt(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Amara Emerson | 5ec1460 | 2018-12-10 18:44:58 +0000 | [diff] [blame] | 114 | bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Amara Emerson | 8cb186c | 2018-12-20 01:11:04 +0000 | [diff] [blame] | 115 | bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 116 | bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Amara Emerson | 5ec1460 | 2018-12-10 18:44:58 +0000 | [diff] [blame] | 117 |  | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 118 | bool selectShuffleVector(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 119 | bool selectExtractElt(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Amara Emerson | 2ff2298 | 2019-03-14 22:48:15 +0000 | [diff] [blame] | 120 | bool selectConcatVectors(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 121 | bool selectSplitVectorUnmerge(MachineInstr &I, | 
|  | 122 | MachineRegisterInfo &MRI) const; | 
| Jessica Paquette | 22c6215 | 2019-04-02 19:57:26 +0000 | [diff] [blame] | 123 | bool selectIntrinsicWithSideEffects(MachineInstr &I, | 
|  | 124 | MachineRegisterInfo &MRI) const; | 
| Jessica Paquette | 7f6fe7c | 2019-04-29 20:58:17 +0000 | [diff] [blame] | 125 | bool selectIntrinsic(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 126 | bool selectVectorICmp(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Jessica Paquette | 991cb39 | 2019-04-23 20:46:19 +0000 | [diff] [blame] | 127 | bool selectIntrinsicTrunc(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Jessica Paquette | 4fe7574 | 2019-04-23 23:03:03 +0000 | [diff] [blame] | 128 | bool selectIntrinsicRound(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Amara Emerson | 6e71b34 | 2019-06-21 18:10:41 +0000 | [diff] [blame] | 129 | bool selectJumpTable(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
|  | 130 | bool selectBrJT(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Tim Northover | 01eb869 | 2019-08-09 09:32:38 +0000 | [diff] [blame] | 131 | bool selectTLSGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI) const; | 
| Amara Emerson | 6e71b34 | 2019-06-21 18:10:41 +0000 | [diff] [blame] | 132 |  | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 133 | unsigned emitConstantPoolEntry(Constant *CPVal, MachineFunction &MF) const; | 
|  | 134 | MachineInstr *emitLoadFromConstantPool(Constant *CPVal, | 
|  | 135 | MachineIRBuilder &MIRBuilder) const; | 
| Amara Emerson | 2ff2298 | 2019-03-14 22:48:15 +0000 | [diff] [blame] | 136 |  | 
|  | 137 | // Emit a vector concat operation. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 138 | MachineInstr *emitVectorConcat(Optional<Register> Dst, Register Op1, | 
|  | 139 | Register Op2, | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 140 | MachineIRBuilder &MIRBuilder) const; | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 141 | MachineInstr *emitIntegerCompare(MachineOperand &LHS, MachineOperand &RHS, | 
|  | 142 | MachineOperand &Predicate, | 
|  | 143 | MachineIRBuilder &MIRBuilder) const; | 
| Jessica Paquette | 728b18f | 2019-07-24 23:11:01 +0000 | [diff] [blame] | 144 | MachineInstr *emitADD(Register DefReg, MachineOperand &LHS, MachineOperand &RHS, | 
|  | 145 | MachineIRBuilder &MIRBuilder) const; | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 146 | MachineInstr *emitCMN(MachineOperand &LHS, MachineOperand &RHS, | 
|  | 147 | MachineIRBuilder &MIRBuilder) const; | 
| Jessica Paquette | 55d1924 | 2019-07-08 22:58:36 +0000 | [diff] [blame] | 148 | MachineInstr *emitTST(const Register &LHS, const Register &RHS, | 
|  | 149 | MachineIRBuilder &MIRBuilder) const; | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 150 | MachineInstr *emitExtractVectorElt(Optional<Register> DstReg, | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 151 | const RegisterBank &DstRB, LLT ScalarTy, | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 152 | Register VecReg, unsigned LaneIdx, | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 153 | MachineIRBuilder &MIRBuilder) const; | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 154 |  | 
| Jessica Paquette | a3843fe | 2019-05-01 22:39:43 +0000 | [diff] [blame] | 155 | /// Helper function for selecting G_FCONSTANT. If the G_FCONSTANT can be | 
|  | 156 | /// materialized using a FMOV instruction, then update MI and return it. | 
|  | 157 | /// Otherwise, do nothing and return a nullptr. | 
|  | 158 | MachineInstr *emitFMovForFConstant(MachineInstr &MI, | 
|  | 159 | MachineRegisterInfo &MRI) const; | 
|  | 160 |  | 
| Jessica Paquette | 49537bb | 2019-06-17 18:40:06 +0000 | [diff] [blame] | 161 | /// Emit a CSet for a compare. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 162 | MachineInstr *emitCSetForICMP(Register DefReg, unsigned Pred, | 
| Jessica Paquette | 49537bb | 2019-06-17 18:40:06 +0000 | [diff] [blame] | 163 | MachineIRBuilder &MIRBuilder) const; | 
|  | 164 |  | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 165 | // Equivalent to the i32shift_a and friends from AArch64InstrInfo.td. | 
|  | 166 | // We use these manually instead of using the importer since it doesn't | 
|  | 167 | // support SDNodeXForm. | 
|  | 168 | ComplexRendererFns selectShiftA_32(const MachineOperand &Root) const; | 
|  | 169 | ComplexRendererFns selectShiftB_32(const MachineOperand &Root) const; | 
|  | 170 | ComplexRendererFns selectShiftA_64(const MachineOperand &Root) const; | 
|  | 171 | ComplexRendererFns selectShiftB_64(const MachineOperand &Root) const; | 
|  | 172 |  | 
| Jessica Paquette | e4c46c3 | 2019-08-02 18:12:53 +0000 | [diff] [blame] | 173 | ComplexRendererFns select12BitValueWithLeftShift(uint64_t Immed) const; | 
| Daniel Sanders | 1e4569f | 2017-10-20 20:55:29 +0000 | [diff] [blame] | 174 | ComplexRendererFns selectArithImmed(MachineOperand &Root) const; | 
| Jessica Paquette | e4c46c3 | 2019-08-02 18:12:53 +0000 | [diff] [blame] | 175 | ComplexRendererFns selectNegArithImmed(MachineOperand &Root) const; | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 176 |  | 
| Daniel Sanders | 1e4569f | 2017-10-20 20:55:29 +0000 | [diff] [blame] | 177 | ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root, | 
|  | 178 | unsigned Size) const; | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 179 |  | 
| Daniel Sanders | 1e4569f | 2017-10-20 20:55:29 +0000 | [diff] [blame] | 180 | ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const { | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 181 | return selectAddrModeUnscaled(Root, 1); | 
|  | 182 | } | 
| Daniel Sanders | 1e4569f | 2017-10-20 20:55:29 +0000 | [diff] [blame] | 183 | ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const { | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 184 | return selectAddrModeUnscaled(Root, 2); | 
|  | 185 | } | 
| Daniel Sanders | 1e4569f | 2017-10-20 20:55:29 +0000 | [diff] [blame] | 186 | ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const { | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 187 | return selectAddrModeUnscaled(Root, 4); | 
|  | 188 | } | 
| Daniel Sanders | 1e4569f | 2017-10-20 20:55:29 +0000 | [diff] [blame] | 189 | ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const { | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 190 | return selectAddrModeUnscaled(Root, 8); | 
|  | 191 | } | 
| Daniel Sanders | 1e4569f | 2017-10-20 20:55:29 +0000 | [diff] [blame] | 192 | ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const { | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 193 | return selectAddrModeUnscaled(Root, 16); | 
|  | 194 | } | 
|  | 195 |  | 
| Daniel Sanders | 1e4569f | 2017-10-20 20:55:29 +0000 | [diff] [blame] | 196 | ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root, | 
|  | 197 | unsigned Size) const; | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 198 | template <int Width> | 
| Daniel Sanders | 1e4569f | 2017-10-20 20:55:29 +0000 | [diff] [blame] | 199 | ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const { | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 200 | return selectAddrModeIndexed(Root, Width / 8); | 
|  | 201 | } | 
| Jessica Paquette | 2b404d0 | 2019-07-23 16:09:42 +0000 | [diff] [blame] | 202 |  | 
|  | 203 | bool isWorthFoldingIntoExtendedReg(MachineInstr &MI, | 
|  | 204 | const MachineRegisterInfo &MRI) const; | 
|  | 205 | ComplexRendererFns | 
|  | 206 | selectAddrModeShiftedExtendXReg(MachineOperand &Root, | 
|  | 207 | unsigned SizeInBytes) const; | 
| Jessica Paquette | 7a1dcc5 | 2019-07-18 21:50:11 +0000 | [diff] [blame] | 208 | ComplexRendererFns selectAddrModeRegisterOffset(MachineOperand &Root) const; | 
| Jessica Paquette | 2b404d0 | 2019-07-23 16:09:42 +0000 | [diff] [blame] | 209 | ComplexRendererFns selectAddrModeXRO(MachineOperand &Root, | 
|  | 210 | unsigned SizeInBytes) const; | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 211 |  | 
| Volkan Keles | f7f2568 | 2018-01-16 18:44:05 +0000 | [diff] [blame] | 212 | void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI) const; | 
|  | 213 |  | 
| Amara Emerson | 1e8c164 | 2018-07-31 00:09:02 +0000 | [diff] [blame] | 214 | // Materialize a GlobalValue or BlockAddress using a movz+movk sequence. | 
|  | 215 | void materializeLargeCMVal(MachineInstr &I, const Value *V, | 
| Peter Collingbourne | 33773d5 | 2019-07-31 20:14:09 +0000 | [diff] [blame] | 216 | unsigned OpFlags) const; | 
| Amara Emerson | 1e8c164 | 2018-07-31 00:09:02 +0000 | [diff] [blame] | 217 |  | 
| Amara Emerson | 761ca2e | 2019-03-19 21:43:05 +0000 | [diff] [blame] | 218 | // Optimization methods. | 
| Amara Emerson | 761ca2e | 2019-03-19 21:43:05 +0000 | [diff] [blame] | 219 | bool tryOptVectorShuffle(MachineInstr &I) const; | 
|  | 220 | bool tryOptVectorDup(MachineInstr &MI) const; | 
| Amara Emerson | c37ff0d | 2019-06-05 23:46:16 +0000 | [diff] [blame] | 221 | bool tryOptSelect(MachineInstr &MI) const; | 
| Jessica Paquette | 55d1924 | 2019-07-08 22:58:36 +0000 | [diff] [blame] | 222 | MachineInstr *tryFoldIntegerCompare(MachineOperand &LHS, MachineOperand &RHS, | 
|  | 223 | MachineOperand &Predicate, | 
|  | 224 | MachineIRBuilder &MIRBuilder) const; | 
| Amara Emerson | 761ca2e | 2019-03-19 21:43:05 +0000 | [diff] [blame] | 225 |  | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 226 | const AArch64TargetMachine &TM; | 
|  | 227 | const AArch64Subtarget &STI; | 
|  | 228 | const AArch64InstrInfo &TII; | 
|  | 229 | const AArch64RegisterInfo &TRI; | 
|  | 230 | const AArch64RegisterBankInfo &RBI; | 
| Daniel Sanders | e7b0d66 | 2017-04-21 15:59:56 +0000 | [diff] [blame] | 231 |  | 
| Amara Emerson | e14c91b | 2019-08-13 06:26:59 +0000 | [diff] [blame] | 232 | bool ProduceNonFlagSettingCondBr = false; | 
|  | 233 |  | 
| Daniel Sanders | e9fdba3 | 2017-04-29 17:30:09 +0000 | [diff] [blame] | 234 | #define GET_GLOBALISEL_PREDICATES_DECL | 
|  | 235 | #include "AArch64GenGlobalISel.inc" | 
|  | 236 | #undef GET_GLOBALISEL_PREDICATES_DECL | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 237 |  | 
|  | 238 | // We declare the temporaries used by selectImpl() in the class to minimize the | 
|  | 239 | // cost of constructing placeholder values. | 
|  | 240 | #define GET_GLOBALISEL_TEMPORARIES_DECL | 
|  | 241 | #include "AArch64GenGlobalISel.inc" | 
|  | 242 | #undef GET_GLOBALISEL_TEMPORARIES_DECL | 
|  | 243 | }; | 
|  | 244 |  | 
|  | 245 | } // end anonymous namespace | 
|  | 246 |  | 
| Daniel Sanders | 8a4bae9 | 2017-03-14 21:32:08 +0000 | [diff] [blame] | 247 | #define GET_GLOBALISEL_IMPL | 
| Ahmed Bougacha | 36f7035 | 2016-12-21 23:26:20 +0000 | [diff] [blame] | 248 | #include "AArch64GenGlobalISel.inc" | 
| Daniel Sanders | 8a4bae9 | 2017-03-14 21:32:08 +0000 | [diff] [blame] | 249 | #undef GET_GLOBALISEL_IMPL | 
| Ahmed Bougacha | 36f7035 | 2016-12-21 23:26:20 +0000 | [diff] [blame] | 250 |  | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 251 | AArch64InstructionSelector::AArch64InstructionSelector( | 
| Tim Northover | bdf1624 | 2016-10-10 21:50:00 +0000 | [diff] [blame] | 252 | const AArch64TargetMachine &TM, const AArch64Subtarget &STI, | 
|  | 253 | const AArch64RegisterBankInfo &RBI) | 
| Daniel Sanders | 8a4bae9 | 2017-03-14 21:32:08 +0000 | [diff] [blame] | 254 | : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()), | 
| Daniel Sanders | e9fdba3 | 2017-04-29 17:30:09 +0000 | [diff] [blame] | 255 | TRI(*STI.getRegisterInfo()), RBI(RBI), | 
|  | 256 | #define GET_GLOBALISEL_PREDICATES_INIT | 
|  | 257 | #include "AArch64GenGlobalISel.inc" | 
|  | 258 | #undef GET_GLOBALISEL_PREDICATES_INIT | 
| Daniel Sanders | 8a4bae9 | 2017-03-14 21:32:08 +0000 | [diff] [blame] | 259 | #define GET_GLOBALISEL_TEMPORARIES_INIT | 
|  | 260 | #include "AArch64GenGlobalISel.inc" | 
|  | 261 | #undef GET_GLOBALISEL_TEMPORARIES_INIT | 
|  | 262 | { | 
|  | 263 | } | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 264 |  | 
| Tim Northover | fb8d989 | 2016-10-12 22:49:15 +0000 | [diff] [blame] | 265 | // FIXME: This should be target-independent, inferred from the types declared | 
|  | 266 | // for each class in the bank. | 
|  | 267 | static const TargetRegisterClass * | 
|  | 268 | getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB, | 
| Amara Emerson | 3838ed0 | 2018-02-02 18:03:30 +0000 | [diff] [blame] | 269 | const RegisterBankInfo &RBI, | 
|  | 270 | bool GetAllRegSet = false) { | 
| Tim Northover | fb8d989 | 2016-10-12 22:49:15 +0000 | [diff] [blame] | 271 | if (RB.getID() == AArch64::GPRRegBankID) { | 
|  | 272 | if (Ty.getSizeInBits() <= 32) | 
| Amara Emerson | 3838ed0 | 2018-02-02 18:03:30 +0000 | [diff] [blame] | 273 | return GetAllRegSet ? &AArch64::GPR32allRegClass | 
|  | 274 | : &AArch64::GPR32RegClass; | 
| Tim Northover | fb8d989 | 2016-10-12 22:49:15 +0000 | [diff] [blame] | 275 | if (Ty.getSizeInBits() == 64) | 
| Amara Emerson | 3838ed0 | 2018-02-02 18:03:30 +0000 | [diff] [blame] | 276 | return GetAllRegSet ? &AArch64::GPR64allRegClass | 
|  | 277 | : &AArch64::GPR64RegClass; | 
| Tim Northover | fb8d989 | 2016-10-12 22:49:15 +0000 | [diff] [blame] | 278 | return nullptr; | 
|  | 279 | } | 
|  | 280 |  | 
|  | 281 | if (RB.getID() == AArch64::FPRRegBankID) { | 
| Amara Emerson | 3838ed0 | 2018-02-02 18:03:30 +0000 | [diff] [blame] | 282 | if (Ty.getSizeInBits() <= 16) | 
|  | 283 | return &AArch64::FPR16RegClass; | 
| Tim Northover | fb8d989 | 2016-10-12 22:49:15 +0000 | [diff] [blame] | 284 | if (Ty.getSizeInBits() == 32) | 
|  | 285 | return &AArch64::FPR32RegClass; | 
|  | 286 | if (Ty.getSizeInBits() == 64) | 
|  | 287 | return &AArch64::FPR64RegClass; | 
|  | 288 | if (Ty.getSizeInBits() == 128) | 
|  | 289 | return &AArch64::FPR128RegClass; | 
|  | 290 | return nullptr; | 
|  | 291 | } | 
|  | 292 |  | 
|  | 293 | return nullptr; | 
|  | 294 | } | 
|  | 295 |  | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 296 | /// Given a register bank, and size in bits, return the smallest register class | 
|  | 297 | /// that can represent that combination. | 
| Benjamin Kramer | 711950c | 2019-02-11 15:16:21 +0000 | [diff] [blame] | 298 | static const TargetRegisterClass * | 
|  | 299 | getMinClassForRegBank(const RegisterBank &RB, unsigned SizeInBits, | 
|  | 300 | bool GetAllRegSet = false) { | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 301 | unsigned RegBankID = RB.getID(); | 
|  | 302 |  | 
|  | 303 | if (RegBankID == AArch64::GPRRegBankID) { | 
|  | 304 | if (SizeInBits <= 32) | 
|  | 305 | return GetAllRegSet ? &AArch64::GPR32allRegClass | 
|  | 306 | : &AArch64::GPR32RegClass; | 
|  | 307 | if (SizeInBits == 64) | 
|  | 308 | return GetAllRegSet ? &AArch64::GPR64allRegClass | 
|  | 309 | : &AArch64::GPR64RegClass; | 
|  | 310 | } | 
|  | 311 |  | 
|  | 312 | if (RegBankID == AArch64::FPRRegBankID) { | 
|  | 313 | switch (SizeInBits) { | 
|  | 314 | default: | 
|  | 315 | return nullptr; | 
|  | 316 | case 8: | 
|  | 317 | return &AArch64::FPR8RegClass; | 
|  | 318 | case 16: | 
|  | 319 | return &AArch64::FPR16RegClass; | 
|  | 320 | case 32: | 
|  | 321 | return &AArch64::FPR32RegClass; | 
|  | 322 | case 64: | 
|  | 323 | return &AArch64::FPR64RegClass; | 
|  | 324 | case 128: | 
|  | 325 | return &AArch64::FPR128RegClass; | 
|  | 326 | } | 
|  | 327 | } | 
|  | 328 |  | 
|  | 329 | return nullptr; | 
|  | 330 | } | 
|  | 331 |  | 
|  | 332 | /// Returns the correct subregister to use for a given register class. | 
|  | 333 | static bool getSubRegForClass(const TargetRegisterClass *RC, | 
|  | 334 | const TargetRegisterInfo &TRI, unsigned &SubReg) { | 
|  | 335 | switch (TRI.getRegSizeInBits(*RC)) { | 
|  | 336 | case 8: | 
|  | 337 | SubReg = AArch64::bsub; | 
|  | 338 | break; | 
|  | 339 | case 16: | 
|  | 340 | SubReg = AArch64::hsub; | 
|  | 341 | break; | 
|  | 342 | case 32: | 
|  | 343 | if (RC == &AArch64::GPR32RegClass) | 
|  | 344 | SubReg = AArch64::sub_32; | 
|  | 345 | else | 
|  | 346 | SubReg = AArch64::ssub; | 
|  | 347 | break; | 
|  | 348 | case 64: | 
|  | 349 | SubReg = AArch64::dsub; | 
|  | 350 | break; | 
|  | 351 | default: | 
|  | 352 | LLVM_DEBUG( | 
|  | 353 | dbgs() << "Couldn't find appropriate subregister for register class."); | 
|  | 354 | return false; | 
|  | 355 | } | 
|  | 356 |  | 
|  | 357 | return true; | 
|  | 358 | } | 
|  | 359 |  | 
| Ahmed Bougacha | 59e160a | 2016-08-16 14:37:40 +0000 | [diff] [blame] | 360 | /// Check whether \p I is a currently unsupported binary operation: | 
|  | 361 | /// - it has an unsized type | 
|  | 362 | /// - an operand is not a vreg | 
|  | 363 | /// - all operands are not in the same bank | 
|  | 364 | /// These are checks that should someday live in the verifier, but right now, | 
|  | 365 | /// these are mostly limitations of the aarch64 selector. | 
|  | 366 | static bool unsupportedBinOp(const MachineInstr &I, | 
|  | 367 | const AArch64RegisterBankInfo &RBI, | 
|  | 368 | const MachineRegisterInfo &MRI, | 
|  | 369 | const AArch64RegisterInfo &TRI) { | 
| Tim Northover | 0f140c7 | 2016-09-09 11:46:34 +0000 | [diff] [blame] | 370 | LLT Ty = MRI.getType(I.getOperand(0).getReg()); | 
| Tim Northover | 32a078a | 2016-09-15 10:09:59 +0000 | [diff] [blame] | 371 | if (!Ty.isValid()) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 372 | LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n"); | 
| Ahmed Bougacha | 59e160a | 2016-08-16 14:37:40 +0000 | [diff] [blame] | 373 | return true; | 
|  | 374 | } | 
|  | 375 |  | 
|  | 376 | const RegisterBank *PrevOpBank = nullptr; | 
|  | 377 | for (auto &MO : I.operands()) { | 
|  | 378 | // FIXME: Support non-register operands. | 
|  | 379 | if (!MO.isReg()) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 380 | LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n"); | 
| Ahmed Bougacha | 59e160a | 2016-08-16 14:37:40 +0000 | [diff] [blame] | 381 | return true; | 
|  | 382 | } | 
|  | 383 |  | 
|  | 384 | // FIXME: Can generic operations have physical registers operands? If | 
|  | 385 | // so, this will need to be taught about that, and we'll need to get the | 
|  | 386 | // bank out of the minimal class for the register. | 
|  | 387 | // Either way, this needs to be documented (and possibly verified). | 
| Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 388 | if (!Register::isVirtualRegister(MO.getReg())) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 389 | LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n"); | 
| Ahmed Bougacha | 59e160a | 2016-08-16 14:37:40 +0000 | [diff] [blame] | 390 | return true; | 
|  | 391 | } | 
|  | 392 |  | 
|  | 393 | const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI); | 
|  | 394 | if (!OpBank) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 395 | LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n"); | 
| Ahmed Bougacha | 59e160a | 2016-08-16 14:37:40 +0000 | [diff] [blame] | 396 | return true; | 
|  | 397 | } | 
|  | 398 |  | 
|  | 399 | if (PrevOpBank && OpBank != PrevOpBank) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 400 | LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n"); | 
| Ahmed Bougacha | 59e160a | 2016-08-16 14:37:40 +0000 | [diff] [blame] | 401 | return true; | 
|  | 402 | } | 
|  | 403 | PrevOpBank = OpBank; | 
|  | 404 | } | 
|  | 405 | return false; | 
|  | 406 | } | 
|  | 407 |  | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 408 | /// Select the AArch64 opcode for the basic binary operation \p GenericOpc | 
| Ahmed Bougacha | cfb384d | 2017-01-23 21:10:05 +0000 | [diff] [blame] | 409 | /// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 410 | /// and of size \p OpSize. | 
|  | 411 | /// \returns \p GenericOpc if the combination is unsupported. | 
|  | 412 | static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, | 
|  | 413 | unsigned OpSize) { | 
|  | 414 | switch (RegBankID) { | 
|  | 415 | case AArch64::GPRRegBankID: | 
| Ahmed Bougacha | 05a5f7d | 2017-01-25 02:41:38 +0000 | [diff] [blame] | 416 | if (OpSize == 32) { | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 417 | switch (GenericOpc) { | 
| Ahmed Bougacha | 2ac5bf9 | 2016-08-16 14:02:47 +0000 | [diff] [blame] | 418 | case TargetOpcode::G_SHL: | 
|  | 419 | return AArch64::LSLVWr; | 
|  | 420 | case TargetOpcode::G_LSHR: | 
|  | 421 | return AArch64::LSRVWr; | 
|  | 422 | case TargetOpcode::G_ASHR: | 
|  | 423 | return AArch64::ASRVWr; | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 424 | default: | 
|  | 425 | return GenericOpc; | 
|  | 426 | } | 
| Tim Northover | 5578222 | 2016-10-18 20:03:48 +0000 | [diff] [blame] | 427 | } else if (OpSize == 64) { | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 428 | switch (GenericOpc) { | 
| Tim Northover | 2fda4b0 | 2016-10-10 21:49:49 +0000 | [diff] [blame] | 429 | case TargetOpcode::G_GEP: | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 430 | return AArch64::ADDXrr; | 
| Ahmed Bougacha | 2ac5bf9 | 2016-08-16 14:02:47 +0000 | [diff] [blame] | 431 | case TargetOpcode::G_SHL: | 
|  | 432 | return AArch64::LSLVXr; | 
|  | 433 | case TargetOpcode::G_LSHR: | 
|  | 434 | return AArch64::LSRVXr; | 
|  | 435 | case TargetOpcode::G_ASHR: | 
|  | 436 | return AArch64::ASRVXr; | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 437 | default: | 
|  | 438 | return GenericOpc; | 
|  | 439 | } | 
|  | 440 | } | 
| Simon Pilgrim | 9e90152 | 2017-07-08 19:28:24 +0000 | [diff] [blame] | 441 | break; | 
| Ahmed Bougacha | 33e19fe | 2016-08-18 16:05:11 +0000 | [diff] [blame] | 442 | case AArch64::FPRRegBankID: | 
|  | 443 | switch (OpSize) { | 
|  | 444 | case 32: | 
|  | 445 | switch (GenericOpc) { | 
|  | 446 | case TargetOpcode::G_FADD: | 
|  | 447 | return AArch64::FADDSrr; | 
|  | 448 | case TargetOpcode::G_FSUB: | 
|  | 449 | return AArch64::FSUBSrr; | 
|  | 450 | case TargetOpcode::G_FMUL: | 
|  | 451 | return AArch64::FMULSrr; | 
|  | 452 | case TargetOpcode::G_FDIV: | 
|  | 453 | return AArch64::FDIVSrr; | 
|  | 454 | default: | 
|  | 455 | return GenericOpc; | 
|  | 456 | } | 
|  | 457 | case 64: | 
|  | 458 | switch (GenericOpc) { | 
|  | 459 | case TargetOpcode::G_FADD: | 
|  | 460 | return AArch64::FADDDrr; | 
|  | 461 | case TargetOpcode::G_FSUB: | 
|  | 462 | return AArch64::FSUBDrr; | 
|  | 463 | case TargetOpcode::G_FMUL: | 
|  | 464 | return AArch64::FMULDrr; | 
|  | 465 | case TargetOpcode::G_FDIV: | 
|  | 466 | return AArch64::FDIVDrr; | 
| Quentin Colombet | 0e53127 | 2016-10-11 00:21:11 +0000 | [diff] [blame] | 467 | case TargetOpcode::G_OR: | 
|  | 468 | return AArch64::ORRv8i8; | 
| Ahmed Bougacha | 33e19fe | 2016-08-18 16:05:11 +0000 | [diff] [blame] | 469 | default: | 
|  | 470 | return GenericOpc; | 
|  | 471 | } | 
|  | 472 | } | 
| Simon Pilgrim | 9e90152 | 2017-07-08 19:28:24 +0000 | [diff] [blame] | 473 | break; | 
|  | 474 | } | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 475 | return GenericOpc; | 
|  | 476 | } | 
|  | 477 |  | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 478 | /// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc, | 
|  | 479 | /// appropriate for the (value) register bank \p RegBankID and of memory access | 
|  | 480 | /// size \p OpSize.  This returns the variant with the base+unsigned-immediate | 
|  | 481 | /// addressing mode (e.g., LDRXui). | 
|  | 482 | /// \returns \p GenericOpc if the combination is unsupported. | 
|  | 483 | static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID, | 
|  | 484 | unsigned OpSize) { | 
|  | 485 | const bool isStore = GenericOpc == TargetOpcode::G_STORE; | 
|  | 486 | switch (RegBankID) { | 
|  | 487 | case AArch64::GPRRegBankID: | 
|  | 488 | switch (OpSize) { | 
| Tim Northover | 020d104 | 2016-10-17 18:36:53 +0000 | [diff] [blame] | 489 | case 8: | 
|  | 490 | return isStore ? AArch64::STRBBui : AArch64::LDRBBui; | 
|  | 491 | case 16: | 
|  | 492 | return isStore ? AArch64::STRHHui : AArch64::LDRHHui; | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 493 | case 32: | 
|  | 494 | return isStore ? AArch64::STRWui : AArch64::LDRWui; | 
|  | 495 | case 64: | 
|  | 496 | return isStore ? AArch64::STRXui : AArch64::LDRXui; | 
|  | 497 | } | 
| Simon Pilgrim | 9e90152 | 2017-07-08 19:28:24 +0000 | [diff] [blame] | 498 | break; | 
| Quentin Colombet | d2623f8e | 2016-10-11 00:21:14 +0000 | [diff] [blame] | 499 | case AArch64::FPRRegBankID: | 
|  | 500 | switch (OpSize) { | 
| Tim Northover | 020d104 | 2016-10-17 18:36:53 +0000 | [diff] [blame] | 501 | case 8: | 
|  | 502 | return isStore ? AArch64::STRBui : AArch64::LDRBui; | 
|  | 503 | case 16: | 
|  | 504 | return isStore ? AArch64::STRHui : AArch64::LDRHui; | 
| Quentin Colombet | d2623f8e | 2016-10-11 00:21:14 +0000 | [diff] [blame] | 505 | case 32: | 
|  | 506 | return isStore ? AArch64::STRSui : AArch64::LDRSui; | 
|  | 507 | case 64: | 
|  | 508 | return isStore ? AArch64::STRDui : AArch64::LDRDui; | 
|  | 509 | } | 
| Simon Pilgrim | 9e90152 | 2017-07-08 19:28:24 +0000 | [diff] [blame] | 510 | break; | 
|  | 511 | } | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 512 | return GenericOpc; | 
|  | 513 | } | 
|  | 514 |  | 
| Benjamin Kramer | 1411ecf | 2019-01-24 23:39:47 +0000 | [diff] [blame] | 515 | #ifndef NDEBUG | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 516 | /// Helper function that verifies that we have a valid copy at the end of | 
|  | 517 | /// selectCopy. Verifies that the source and dest have the expected sizes and | 
|  | 518 | /// then returns true. | 
|  | 519 | static bool isValidCopy(const MachineInstr &I, const RegisterBank &DstBank, | 
|  | 520 | const MachineRegisterInfo &MRI, | 
|  | 521 | const TargetRegisterInfo &TRI, | 
|  | 522 | const RegisterBankInfo &RBI) { | 
| Daniel Sanders | 5ae66e5 | 2019-08-12 22:40:53 +0000 | [diff] [blame] | 523 | const Register DstReg = I.getOperand(0).getReg(); | 
|  | 524 | const Register SrcReg = I.getOperand(1).getReg(); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 525 | const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI); | 
|  | 526 | const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI); | 
| Amara Emerson | db21189 | 2018-02-20 05:11:57 +0000 | [diff] [blame] | 527 |  | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 528 | // Make sure the size of the source and dest line up. | 
|  | 529 | assert( | 
|  | 530 | (DstSize == SrcSize || | 
|  | 531 | // Copies are a mean to setup initial types, the number of | 
|  | 532 | // bits may not exactly match. | 
| Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 533 | (Register::isPhysicalRegister(SrcReg) && DstSize <= SrcSize) || | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 534 | // Copies are a mean to copy bits around, as long as we are | 
|  | 535 | // on the same register class, that's fine. Otherwise, that | 
|  | 536 | // means we need some SUBREG_TO_REG or AND & co. | 
|  | 537 | (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) && | 
|  | 538 | "Copy with different width?!"); | 
|  | 539 |  | 
|  | 540 | // Check the size of the destination. | 
|  | 541 | assert((DstSize <= 64 || DstBank.getID() == AArch64::FPRRegBankID) && | 
|  | 542 | "GPRs cannot get more than 64-bit width values"); | 
|  | 543 |  | 
|  | 544 | return true; | 
|  | 545 | } | 
| Benjamin Kramer | 1411ecf | 2019-01-24 23:39:47 +0000 | [diff] [blame] | 546 | #endif | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 547 |  | 
|  | 548 | /// Helper function for selectCopy. Inserts a subregister copy from | 
|  | 549 | /// \p *From to \p *To, linking it up to \p I. | 
|  | 550 | /// | 
|  | 551 | /// e.g, given I = "Dst = COPY SrcReg", we'll transform that into | 
|  | 552 | /// | 
|  | 553 | /// CopyReg (From class) = COPY SrcReg | 
|  | 554 | /// SubRegCopy (To class) = COPY CopyReg:SubReg | 
|  | 555 | /// Dst = COPY SubRegCopy | 
| Amara Emerson | 3739a20 | 2019-03-15 21:59:50 +0000 | [diff] [blame] | 556 | static bool selectSubregisterCopy(MachineInstr &I, MachineRegisterInfo &MRI, | 
| Daniel Sanders | d9934d4 | 2019-08-06 17:16:27 +0000 | [diff] [blame] | 557 | const RegisterBankInfo &RBI, Register SrcReg, | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 558 | const TargetRegisterClass *From, | 
|  | 559 | const TargetRegisterClass *To, | 
|  | 560 | unsigned SubReg) { | 
| Amara Emerson | 3739a20 | 2019-03-15 21:59:50 +0000 | [diff] [blame] | 561 | MachineIRBuilder MIB(I); | 
|  | 562 | auto Copy = MIB.buildCopy({From}, {SrcReg}); | 
| Amara Emerson | 8627178 | 2019-03-18 19:20:10 +0000 | [diff] [blame] | 563 | auto SubRegCopy = MIB.buildInstr(TargetOpcode::COPY, {To}, {}) | 
|  | 564 | .addReg(Copy.getReg(0), 0, SubReg); | 
| Amara Emerson | db21189 | 2018-02-20 05:11:57 +0000 | [diff] [blame] | 565 | MachineOperand &RegOp = I.getOperand(1); | 
| Amara Emerson | 3739a20 | 2019-03-15 21:59:50 +0000 | [diff] [blame] | 566 | RegOp.setReg(SubRegCopy.getReg(0)); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 567 |  | 
|  | 568 | // It's possible that the destination register won't be constrained. Make | 
|  | 569 | // sure that happens. | 
| Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 570 | if (!Register::isPhysicalRegister(I.getOperand(0).getReg())) | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 571 | RBI.constrainGenericRegister(I.getOperand(0).getReg(), *To, MRI); | 
|  | 572 |  | 
| Amara Emerson | db21189 | 2018-02-20 05:11:57 +0000 | [diff] [blame] | 573 | return true; | 
|  | 574 | } | 
|  | 575 |  | 
| Jessica Paquette | 910630c | 2019-05-03 22:37:46 +0000 | [diff] [blame] | 576 | /// Helper function to get the source and destination register classes for a | 
|  | 577 | /// copy. Returns a std::pair containing the source register class for the | 
|  | 578 | /// copy, and the destination register class for the copy. If a register class | 
|  | 579 | /// cannot be determined, then it will be nullptr. | 
|  | 580 | static std::pair<const TargetRegisterClass *, const TargetRegisterClass *> | 
|  | 581 | getRegClassesForCopy(MachineInstr &I, const TargetInstrInfo &TII, | 
|  | 582 | MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, | 
|  | 583 | const RegisterBankInfo &RBI) { | 
| Daniel Sanders | 5ae66e5 | 2019-08-12 22:40:53 +0000 | [diff] [blame] | 584 | Register DstReg = I.getOperand(0).getReg(); | 
|  | 585 | Register SrcReg = I.getOperand(1).getReg(); | 
| Jessica Paquette | 910630c | 2019-05-03 22:37:46 +0000 | [diff] [blame] | 586 | const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); | 
|  | 587 | const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); | 
|  | 588 | unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI); | 
|  | 589 | unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI); | 
|  | 590 |  | 
|  | 591 | // Special casing for cross-bank copies of s1s. We can technically represent | 
|  | 592 | // a 1-bit value with any size of register. The minimum size for a GPR is 32 | 
|  | 593 | // bits. So, we need to put the FPR on 32 bits as well. | 
|  | 594 | // | 
|  | 595 | // FIXME: I'm not sure if this case holds true outside of copies. If it does, | 
|  | 596 | // then we can pull it into the helpers that get the appropriate class for a | 
|  | 597 | // register bank. Or make a new helper that carries along some constraint | 
|  | 598 | // information. | 
|  | 599 | if (SrcRegBank != DstRegBank && (DstSize == 1 && SrcSize == 1)) | 
|  | 600 | SrcSize = DstSize = 32; | 
|  | 601 |  | 
|  | 602 | return {getMinClassForRegBank(SrcRegBank, SrcSize, true), | 
|  | 603 | getMinClassForRegBank(DstRegBank, DstSize, true)}; | 
|  | 604 | } | 
|  | 605 |  | 
| Quentin Colombet | cb629a8 | 2016-10-12 03:57:49 +0000 | [diff] [blame] | 606 | static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, | 
|  | 607 | MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, | 
|  | 608 | const RegisterBankInfo &RBI) { | 
|  | 609 |  | 
| Daniel Sanders | 5ae66e5 | 2019-08-12 22:40:53 +0000 | [diff] [blame] | 610 | Register DstReg = I.getOperand(0).getReg(); | 
|  | 611 | Register SrcReg = I.getOperand(1).getReg(); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 612 | const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); | 
|  | 613 | const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); | 
| Jessica Paquette | 910630c | 2019-05-03 22:37:46 +0000 | [diff] [blame] | 614 |  | 
|  | 615 | // Find the correct register classes for the source and destination registers. | 
|  | 616 | const TargetRegisterClass *SrcRC; | 
|  | 617 | const TargetRegisterClass *DstRC; | 
|  | 618 | std::tie(SrcRC, DstRC) = getRegClassesForCopy(I, TII, MRI, TRI, RBI); | 
|  | 619 |  | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 620 | if (!DstRC) { | 
|  | 621 | LLVM_DEBUG(dbgs() << "Unexpected dest size " | 
|  | 622 | << RBI.getSizeInBits(DstReg, MRI, TRI) << '\n'); | 
| Amara Emerson | 3838ed0 | 2018-02-02 18:03:30 +0000 | [diff] [blame] | 623 | return false; | 
| Quentin Colombet | cb629a8 | 2016-10-12 03:57:49 +0000 | [diff] [blame] | 624 | } | 
|  | 625 |  | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 626 | // A couple helpers below, for making sure that the copy we produce is valid. | 
|  | 627 |  | 
|  | 628 | // Set to true if we insert a SUBREG_TO_REG. If we do this, then we don't want | 
|  | 629 | // to verify that the src and dst are the same size, since that's handled by | 
|  | 630 | // the SUBREG_TO_REG. | 
|  | 631 | bool KnownValid = false; | 
|  | 632 |  | 
|  | 633 | // Returns true, or asserts if something we don't expect happens. Instead of | 
|  | 634 | // returning true, we return isValidCopy() to ensure that we verify the | 
|  | 635 | // result. | 
| Jessica Paquette | 76c40f8 | 2019-01-24 22:51:31 +0000 | [diff] [blame] | 636 | auto CheckCopy = [&]() { | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 637 | // If we have a bitcast or something, we can't have physical registers. | 
| Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 638 | assert((I.isCopy() || | 
|  | 639 | (!Register::isPhysicalRegister(I.getOperand(0).getReg()) && | 
|  | 640 | !Register::isPhysicalRegister(I.getOperand(1).getReg()))) && | 
|  | 641 | "No phys reg on generic operator!"); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 642 | assert(KnownValid || isValidCopy(I, DstRegBank, MRI, TRI, RBI)); | 
| Jonas Hahnfeld | 65a401f | 2019-03-04 08:51:32 +0000 | [diff] [blame] | 643 | (void)KnownValid; | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 644 | return true; | 
|  | 645 | }; | 
|  | 646 |  | 
|  | 647 | // Is this a copy? If so, then we may need to insert a subregister copy, or | 
|  | 648 | // a SUBREG_TO_REG. | 
|  | 649 | if (I.isCopy()) { | 
|  | 650 | // Yes. Check if there's anything to fix up. | 
| Amara Emerson | 7e9f348 | 2018-02-18 17:10:49 +0000 | [diff] [blame] | 651 | if (!SrcRC) { | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 652 | LLVM_DEBUG(dbgs() << "Couldn't determine source register class\n"); | 
|  | 653 | return false; | 
| Amara Emerson | 7e9f348 | 2018-02-18 17:10:49 +0000 | [diff] [blame] | 654 | } | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 655 |  | 
|  | 656 | // Is this a cross-bank copy? | 
|  | 657 | if (DstRegBank.getID() != SrcRegBank.getID()) { | 
|  | 658 | // If we're doing a cross-bank copy on different-sized registers, we need | 
|  | 659 | // to do a bit more work. | 
|  | 660 | unsigned SrcSize = TRI.getRegSizeInBits(*SrcRC); | 
|  | 661 | unsigned DstSize = TRI.getRegSizeInBits(*DstRC); | 
|  | 662 |  | 
|  | 663 | if (SrcSize > DstSize) { | 
|  | 664 | // We're doing a cross-bank copy into a smaller register. We need a | 
|  | 665 | // subregister copy. First, get a register class that's on the same bank | 
|  | 666 | // as the destination, but the same size as the source. | 
|  | 667 | const TargetRegisterClass *SubregRC = | 
|  | 668 | getMinClassForRegBank(DstRegBank, SrcSize, true); | 
|  | 669 | assert(SubregRC && "Didn't get a register class for subreg?"); | 
|  | 670 |  | 
|  | 671 | // Get the appropriate subregister for the destination. | 
|  | 672 | unsigned SubReg = 0; | 
|  | 673 | if (!getSubRegForClass(DstRC, TRI, SubReg)) { | 
|  | 674 | LLVM_DEBUG(dbgs() << "Couldn't determine subregister for copy.\n"); | 
|  | 675 | return false; | 
|  | 676 | } | 
|  | 677 |  | 
|  | 678 | // Now, insert a subregister copy using the new register class. | 
| Amara Emerson | 3739a20 | 2019-03-15 21:59:50 +0000 | [diff] [blame] | 679 | selectSubregisterCopy(I, MRI, RBI, SrcReg, SubregRC, DstRC, SubReg); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 680 | return CheckCopy(); | 
|  | 681 | } | 
|  | 682 |  | 
|  | 683 | else if (DstRegBank.getID() == AArch64::GPRRegBankID && DstSize == 32 && | 
|  | 684 | SrcSize == 16) { | 
|  | 685 | // Special case for FPR16 to GPR32. | 
|  | 686 | // FIXME: This can probably be generalized like the above case. | 
| Daniel Sanders | 5ae66e5 | 2019-08-12 22:40:53 +0000 | [diff] [blame] | 687 | Register PromoteReg = | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 688 | MRI.createVirtualRegister(&AArch64::FPR32RegClass); | 
|  | 689 | BuildMI(*I.getParent(), I, I.getDebugLoc(), | 
|  | 690 | TII.get(AArch64::SUBREG_TO_REG), PromoteReg) | 
|  | 691 | .addImm(0) | 
|  | 692 | .addUse(SrcReg) | 
|  | 693 | .addImm(AArch64::hsub); | 
|  | 694 | MachineOperand &RegOp = I.getOperand(1); | 
|  | 695 | RegOp.setReg(PromoteReg); | 
|  | 696 |  | 
|  | 697 | // Promise that the copy is implicitly validated by the SUBREG_TO_REG. | 
|  | 698 | KnownValid = true; | 
|  | 699 | } | 
| Amara Emerson | 7e9f348 | 2018-02-18 17:10:49 +0000 | [diff] [blame] | 700 | } | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 701 |  | 
|  | 702 | // If the destination is a physical register, then there's nothing to | 
|  | 703 | // change, so we're done. | 
| Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 704 | if (Register::isPhysicalRegister(DstReg)) | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 705 | return CheckCopy(); | 
| Amara Emerson | 7e9f348 | 2018-02-18 17:10:49 +0000 | [diff] [blame] | 706 | } | 
|  | 707 |  | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 708 | // No need to constrain SrcReg. It will get constrained when we hit another | 
|  | 709 | // of its use or its defs. Copies do not have constraints. | 
|  | 710 | if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 711 | LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) | 
|  | 712 | << " operand\n"); | 
| Quentin Colombet | cb629a8 | 2016-10-12 03:57:49 +0000 | [diff] [blame] | 713 | return false; | 
|  | 714 | } | 
|  | 715 | I.setDesc(TII.get(AArch64::COPY)); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 716 | return CheckCopy(); | 
| Quentin Colombet | cb629a8 | 2016-10-12 03:57:49 +0000 | [diff] [blame] | 717 | } | 
|  | 718 |  | 
| Tim Northover | 69271c6 | 2016-10-12 22:49:11 +0000 | [diff] [blame] | 719 | static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) { | 
|  | 720 | if (!DstTy.isScalar() || !SrcTy.isScalar()) | 
|  | 721 | return GenericOpc; | 
|  | 722 |  | 
|  | 723 | const unsigned DstSize = DstTy.getSizeInBits(); | 
|  | 724 | const unsigned SrcSize = SrcTy.getSizeInBits(); | 
|  | 725 |  | 
|  | 726 | switch (DstSize) { | 
|  | 727 | case 32: | 
|  | 728 | switch (SrcSize) { | 
|  | 729 | case 32: | 
|  | 730 | switch (GenericOpc) { | 
|  | 731 | case TargetOpcode::G_SITOFP: | 
|  | 732 | return AArch64::SCVTFUWSri; | 
|  | 733 | case TargetOpcode::G_UITOFP: | 
|  | 734 | return AArch64::UCVTFUWSri; | 
|  | 735 | case TargetOpcode::G_FPTOSI: | 
|  | 736 | return AArch64::FCVTZSUWSr; | 
|  | 737 | case TargetOpcode::G_FPTOUI: | 
|  | 738 | return AArch64::FCVTZUUWSr; | 
|  | 739 | default: | 
|  | 740 | return GenericOpc; | 
|  | 741 | } | 
|  | 742 | case 64: | 
|  | 743 | switch (GenericOpc) { | 
|  | 744 | case TargetOpcode::G_SITOFP: | 
|  | 745 | return AArch64::SCVTFUXSri; | 
|  | 746 | case TargetOpcode::G_UITOFP: | 
|  | 747 | return AArch64::UCVTFUXSri; | 
|  | 748 | case TargetOpcode::G_FPTOSI: | 
|  | 749 | return AArch64::FCVTZSUWDr; | 
|  | 750 | case TargetOpcode::G_FPTOUI: | 
|  | 751 | return AArch64::FCVTZUUWDr; | 
|  | 752 | default: | 
|  | 753 | return GenericOpc; | 
|  | 754 | } | 
|  | 755 | default: | 
|  | 756 | return GenericOpc; | 
|  | 757 | } | 
|  | 758 | case 64: | 
|  | 759 | switch (SrcSize) { | 
|  | 760 | case 32: | 
|  | 761 | switch (GenericOpc) { | 
|  | 762 | case TargetOpcode::G_SITOFP: | 
|  | 763 | return AArch64::SCVTFUWDri; | 
|  | 764 | case TargetOpcode::G_UITOFP: | 
|  | 765 | return AArch64::UCVTFUWDri; | 
|  | 766 | case TargetOpcode::G_FPTOSI: | 
|  | 767 | return AArch64::FCVTZSUXSr; | 
|  | 768 | case TargetOpcode::G_FPTOUI: | 
|  | 769 | return AArch64::FCVTZUUXSr; | 
|  | 770 | default: | 
|  | 771 | return GenericOpc; | 
|  | 772 | } | 
|  | 773 | case 64: | 
|  | 774 | switch (GenericOpc) { | 
|  | 775 | case TargetOpcode::G_SITOFP: | 
|  | 776 | return AArch64::SCVTFUXDri; | 
|  | 777 | case TargetOpcode::G_UITOFP: | 
|  | 778 | return AArch64::UCVTFUXDri; | 
|  | 779 | case TargetOpcode::G_FPTOSI: | 
|  | 780 | return AArch64::FCVTZSUXDr; | 
|  | 781 | case TargetOpcode::G_FPTOUI: | 
|  | 782 | return AArch64::FCVTZUUXDr; | 
|  | 783 | default: | 
|  | 784 | return GenericOpc; | 
|  | 785 | } | 
|  | 786 | default: | 
|  | 787 | return GenericOpc; | 
|  | 788 | } | 
|  | 789 | default: | 
|  | 790 | return GenericOpc; | 
|  | 791 | }; | 
|  | 792 | return GenericOpc; | 
|  | 793 | } | 
|  | 794 |  | 
| Amara Emerson | c37ff0d | 2019-06-05 23:46:16 +0000 | [diff] [blame] | 795 | static unsigned selectSelectOpc(MachineInstr &I, MachineRegisterInfo &MRI, | 
|  | 796 | const RegisterBankInfo &RBI) { | 
|  | 797 | const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); | 
|  | 798 | bool IsFP = (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() != | 
|  | 799 | AArch64::GPRRegBankID); | 
|  | 800 | LLT Ty = MRI.getType(I.getOperand(0).getReg()); | 
|  | 801 | if (Ty == LLT::scalar(32)) | 
|  | 802 | return IsFP ? AArch64::FCSELSrrr : AArch64::CSELWr; | 
|  | 803 | else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) | 
|  | 804 | return IsFP ? AArch64::FCSELDrrr : AArch64::CSELXr; | 
|  | 805 | return 0; | 
|  | 806 | } | 
|  | 807 |  | 
| Jessica Paquette | b73ea75b | 2019-05-28 22:52:49 +0000 | [diff] [blame] | 808 | /// Helper function to select the opcode for a G_FCMP. | 
|  | 809 | static unsigned selectFCMPOpc(MachineInstr &I, MachineRegisterInfo &MRI) { | 
|  | 810 | // If this is a compare against +0.0, then we don't have to explicitly | 
|  | 811 | // materialize a constant. | 
|  | 812 | const ConstantFP *FPImm = getConstantFPVRegVal(I.getOperand(3).getReg(), MRI); | 
|  | 813 | bool ShouldUseImm = FPImm && (FPImm->isZero() && !FPImm->isNegative()); | 
|  | 814 | unsigned OpSize = MRI.getType(I.getOperand(2).getReg()).getSizeInBits(); | 
|  | 815 | if (OpSize != 32 && OpSize != 64) | 
|  | 816 | return 0; | 
|  | 817 | unsigned CmpOpcTbl[2][2] = {{AArch64::FCMPSrr, AArch64::FCMPDrr}, | 
|  | 818 | {AArch64::FCMPSri, AArch64::FCMPDri}}; | 
|  | 819 | return CmpOpcTbl[ShouldUseImm][OpSize == 64]; | 
|  | 820 | } | 
|  | 821 |  | 
| Jessica Paquette | 55d1924 | 2019-07-08 22:58:36 +0000 | [diff] [blame] | 822 | /// Returns true if \p P is an unsigned integer comparison predicate. | 
|  | 823 | static bool isUnsignedICMPPred(const CmpInst::Predicate P) { | 
|  | 824 | switch (P) { | 
|  | 825 | default: | 
|  | 826 | return false; | 
|  | 827 | case CmpInst::ICMP_UGT: | 
|  | 828 | case CmpInst::ICMP_UGE: | 
|  | 829 | case CmpInst::ICMP_ULT: | 
|  | 830 | case CmpInst::ICMP_ULE: | 
|  | 831 | return true; | 
|  | 832 | } | 
|  | 833 | } | 
|  | 834 |  | 
| Tim Northover | 6c02ad5 | 2016-10-12 22:49:04 +0000 | [diff] [blame] | 835 | static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) { | 
|  | 836 | switch (P) { | 
|  | 837 | default: | 
|  | 838 | llvm_unreachable("Unknown condition code!"); | 
|  | 839 | case CmpInst::ICMP_NE: | 
|  | 840 | return AArch64CC::NE; | 
|  | 841 | case CmpInst::ICMP_EQ: | 
|  | 842 | return AArch64CC::EQ; | 
|  | 843 | case CmpInst::ICMP_SGT: | 
|  | 844 | return AArch64CC::GT; | 
|  | 845 | case CmpInst::ICMP_SGE: | 
|  | 846 | return AArch64CC::GE; | 
|  | 847 | case CmpInst::ICMP_SLT: | 
|  | 848 | return AArch64CC::LT; | 
|  | 849 | case CmpInst::ICMP_SLE: | 
|  | 850 | return AArch64CC::LE; | 
|  | 851 | case CmpInst::ICMP_UGT: | 
|  | 852 | return AArch64CC::HI; | 
|  | 853 | case CmpInst::ICMP_UGE: | 
|  | 854 | return AArch64CC::HS; | 
|  | 855 | case CmpInst::ICMP_ULT: | 
|  | 856 | return AArch64CC::LO; | 
|  | 857 | case CmpInst::ICMP_ULE: | 
|  | 858 | return AArch64CC::LS; | 
|  | 859 | } | 
|  | 860 | } | 
|  | 861 |  | 
| Tim Northover | 7dd378d | 2016-10-12 22:49:07 +0000 | [diff] [blame] | 862 | static void changeFCMPPredToAArch64CC(CmpInst::Predicate P, | 
|  | 863 | AArch64CC::CondCode &CondCode, | 
|  | 864 | AArch64CC::CondCode &CondCode2) { | 
|  | 865 | CondCode2 = AArch64CC::AL; | 
|  | 866 | switch (P) { | 
|  | 867 | default: | 
|  | 868 | llvm_unreachable("Unknown FP condition!"); | 
|  | 869 | case CmpInst::FCMP_OEQ: | 
|  | 870 | CondCode = AArch64CC::EQ; | 
|  | 871 | break; | 
|  | 872 | case CmpInst::FCMP_OGT: | 
|  | 873 | CondCode = AArch64CC::GT; | 
|  | 874 | break; | 
|  | 875 | case CmpInst::FCMP_OGE: | 
|  | 876 | CondCode = AArch64CC::GE; | 
|  | 877 | break; | 
|  | 878 | case CmpInst::FCMP_OLT: | 
|  | 879 | CondCode = AArch64CC::MI; | 
|  | 880 | break; | 
|  | 881 | case CmpInst::FCMP_OLE: | 
|  | 882 | CondCode = AArch64CC::LS; | 
|  | 883 | break; | 
|  | 884 | case CmpInst::FCMP_ONE: | 
|  | 885 | CondCode = AArch64CC::MI; | 
|  | 886 | CondCode2 = AArch64CC::GT; | 
|  | 887 | break; | 
|  | 888 | case CmpInst::FCMP_ORD: | 
|  | 889 | CondCode = AArch64CC::VC; | 
|  | 890 | break; | 
|  | 891 | case CmpInst::FCMP_UNO: | 
|  | 892 | CondCode = AArch64CC::VS; | 
|  | 893 | break; | 
|  | 894 | case CmpInst::FCMP_UEQ: | 
|  | 895 | CondCode = AArch64CC::EQ; | 
|  | 896 | CondCode2 = AArch64CC::VS; | 
|  | 897 | break; | 
|  | 898 | case CmpInst::FCMP_UGT: | 
|  | 899 | CondCode = AArch64CC::HI; | 
|  | 900 | break; | 
|  | 901 | case CmpInst::FCMP_UGE: | 
|  | 902 | CondCode = AArch64CC::PL; | 
|  | 903 | break; | 
|  | 904 | case CmpInst::FCMP_ULT: | 
|  | 905 | CondCode = AArch64CC::LT; | 
|  | 906 | break; | 
|  | 907 | case CmpInst::FCMP_ULE: | 
|  | 908 | CondCode = AArch64CC::LE; | 
|  | 909 | break; | 
|  | 910 | case CmpInst::FCMP_UNE: | 
|  | 911 | CondCode = AArch64CC::NE; | 
|  | 912 | break; | 
|  | 913 | } | 
|  | 914 | } | 
|  | 915 |  | 
| Ahmed Bougacha | 641cb20 | 2017-03-27 16:35:31 +0000 | [diff] [blame] | 916 | bool AArch64InstructionSelector::selectCompareBranch( | 
|  | 917 | MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const { | 
|  | 918 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 919 | const Register CondReg = I.getOperand(0).getReg(); | 
| Ahmed Bougacha | 641cb20 | 2017-03-27 16:35:31 +0000 | [diff] [blame] | 920 | MachineBasicBlock *DestMBB = I.getOperand(1).getMBB(); | 
|  | 921 | MachineInstr *CCMI = MRI.getVRegDef(CondReg); | 
| Aditya Nandakumar | 02c602e | 2017-07-31 17:00:16 +0000 | [diff] [blame] | 922 | if (CCMI->getOpcode() == TargetOpcode::G_TRUNC) | 
|  | 923 | CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg()); | 
| Ahmed Bougacha | 641cb20 | 2017-03-27 16:35:31 +0000 | [diff] [blame] | 924 | if (CCMI->getOpcode() != TargetOpcode::G_ICMP) | 
|  | 925 | return false; | 
|  | 926 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 927 | Register LHS = CCMI->getOperand(2).getReg(); | 
|  | 928 | Register RHS = CCMI->getOperand(3).getReg(); | 
| Amara Emerson | 7a4d2df | 2019-07-10 19:21:43 +0000 | [diff] [blame] | 929 | auto VRegAndVal = getConstantVRegValWithLookThrough(RHS, MRI); | 
|  | 930 | if (!VRegAndVal) | 
| Ahmed Bougacha | 641cb20 | 2017-03-27 16:35:31 +0000 | [diff] [blame] | 931 | std::swap(RHS, LHS); | 
|  | 932 |  | 
| Amara Emerson | 7a4d2df | 2019-07-10 19:21:43 +0000 | [diff] [blame] | 933 | VRegAndVal = getConstantVRegValWithLookThrough(RHS, MRI); | 
|  | 934 | if (!VRegAndVal || VRegAndVal->Value != 0) { | 
|  | 935 | MachineIRBuilder MIB(I); | 
|  | 936 | // If we can't select a CBZ then emit a cmp + Bcc. | 
|  | 937 | if (!emitIntegerCompare(CCMI->getOperand(2), CCMI->getOperand(3), | 
|  | 938 | CCMI->getOperand(1), MIB)) | 
|  | 939 | return false; | 
|  | 940 | const AArch64CC::CondCode CC = changeICMPPredToAArch64CC( | 
|  | 941 | (CmpInst::Predicate)CCMI->getOperand(1).getPredicate()); | 
|  | 942 | MIB.buildInstr(AArch64::Bcc, {}, {}).addImm(CC).addMBB(DestMBB); | 
|  | 943 | I.eraseFromParent(); | 
|  | 944 | return true; | 
|  | 945 | } | 
| Ahmed Bougacha | 641cb20 | 2017-03-27 16:35:31 +0000 | [diff] [blame] | 946 |  | 
|  | 947 | const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI); | 
|  | 948 | if (RB.getID() != AArch64::GPRRegBankID) | 
|  | 949 | return false; | 
|  | 950 |  | 
|  | 951 | const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate(); | 
|  | 952 | if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ) | 
|  | 953 | return false; | 
|  | 954 |  | 
|  | 955 | const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits(); | 
|  | 956 | unsigned CBOpc = 0; | 
|  | 957 | if (CmpWidth <= 32) | 
|  | 958 | CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW); | 
|  | 959 | else if (CmpWidth == 64) | 
|  | 960 | CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX); | 
|  | 961 | else | 
|  | 962 | return false; | 
|  | 963 |  | 
| Aditya Nandakumar | 18b3f9d | 2018-01-17 19:31:33 +0000 | [diff] [blame] | 964 | BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc)) | 
|  | 965 | .addUse(LHS) | 
|  | 966 | .addMBB(DestMBB) | 
|  | 967 | .constrainAllUses(TII, TRI, RBI); | 
| Ahmed Bougacha | 641cb20 | 2017-03-27 16:35:31 +0000 | [diff] [blame] | 968 |  | 
| Ahmed Bougacha | 641cb20 | 2017-03-27 16:35:31 +0000 | [diff] [blame] | 969 | I.eraseFromParent(); | 
|  | 970 | return true; | 
|  | 971 | } | 
|  | 972 |  | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 973 | bool AArch64InstructionSelector::selectVectorSHL( | 
|  | 974 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 975 | assert(I.getOpcode() == TargetOpcode::G_SHL); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 976 | Register DstReg = I.getOperand(0).getReg(); | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 977 | const LLT Ty = MRI.getType(DstReg); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 978 | Register Src1Reg = I.getOperand(1).getReg(); | 
|  | 979 | Register Src2Reg = I.getOperand(2).getReg(); | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 980 |  | 
|  | 981 | if (!Ty.isVector()) | 
|  | 982 | return false; | 
|  | 983 |  | 
|  | 984 | unsigned Opc = 0; | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 985 | if (Ty == LLT::vector(4, 32)) { | 
|  | 986 | Opc = AArch64::USHLv4i32; | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 987 | } else if (Ty == LLT::vector(2, 32)) { | 
|  | 988 | Opc = AArch64::USHLv2i32; | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 989 | } else { | 
|  | 990 | LLVM_DEBUG(dbgs() << "Unhandled G_SHL type"); | 
|  | 991 | return false; | 
|  | 992 | } | 
|  | 993 |  | 
|  | 994 | MachineIRBuilder MIB(I); | 
|  | 995 | auto UShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Src2Reg}); | 
|  | 996 | constrainSelectedInstRegOperands(*UShl, TII, TRI, RBI); | 
|  | 997 | I.eraseFromParent(); | 
|  | 998 | return true; | 
|  | 999 | } | 
|  | 1000 |  | 
|  | 1001 | bool AArch64InstructionSelector::selectVectorASHR( | 
|  | 1002 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 1003 | assert(I.getOpcode() == TargetOpcode::G_ASHR); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1004 | Register DstReg = I.getOperand(0).getReg(); | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 1005 | const LLT Ty = MRI.getType(DstReg); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1006 | Register Src1Reg = I.getOperand(1).getReg(); | 
|  | 1007 | Register Src2Reg = I.getOperand(2).getReg(); | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 1008 |  | 
|  | 1009 | if (!Ty.isVector()) | 
|  | 1010 | return false; | 
|  | 1011 |  | 
|  | 1012 | // There is not a shift right register instruction, but the shift left | 
|  | 1013 | // register instruction takes a signed value, where negative numbers specify a | 
|  | 1014 | // right shift. | 
|  | 1015 |  | 
|  | 1016 | unsigned Opc = 0; | 
|  | 1017 | unsigned NegOpc = 0; | 
|  | 1018 | const TargetRegisterClass *RC = nullptr; | 
|  | 1019 | if (Ty == LLT::vector(4, 32)) { | 
|  | 1020 | Opc = AArch64::SSHLv4i32; | 
|  | 1021 | NegOpc = AArch64::NEGv4i32; | 
|  | 1022 | RC = &AArch64::FPR128RegClass; | 
|  | 1023 | } else if (Ty == LLT::vector(2, 32)) { | 
|  | 1024 | Opc = AArch64::SSHLv2i32; | 
|  | 1025 | NegOpc = AArch64::NEGv2i32; | 
|  | 1026 | RC = &AArch64::FPR64RegClass; | 
|  | 1027 | } else { | 
|  | 1028 | LLVM_DEBUG(dbgs() << "Unhandled G_ASHR type"); | 
|  | 1029 | return false; | 
|  | 1030 | } | 
|  | 1031 |  | 
|  | 1032 | MachineIRBuilder MIB(I); | 
|  | 1033 | auto Neg = MIB.buildInstr(NegOpc, {RC}, {Src2Reg}); | 
|  | 1034 | constrainSelectedInstRegOperands(*Neg, TII, TRI, RBI); | 
|  | 1035 | auto SShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Neg}); | 
|  | 1036 | constrainSelectedInstRegOperands(*SShl, TII, TRI, RBI); | 
|  | 1037 | I.eraseFromParent(); | 
|  | 1038 | return true; | 
|  | 1039 | } | 
|  | 1040 |  | 
| Tim Northover | e9600d8 | 2017-02-08 17:57:27 +0000 | [diff] [blame] | 1041 | bool AArch64InstructionSelector::selectVaStartAAPCS( | 
|  | 1042 | MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const { | 
|  | 1043 | return false; | 
|  | 1044 | } | 
|  | 1045 |  | 
|  | 1046 | bool AArch64InstructionSelector::selectVaStartDarwin( | 
|  | 1047 | MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const { | 
|  | 1048 | AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1049 | Register ListReg = I.getOperand(0).getReg(); | 
| Tim Northover | e9600d8 | 2017-02-08 17:57:27 +0000 | [diff] [blame] | 1050 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1051 | Register ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass); | 
| Tim Northover | e9600d8 | 2017-02-08 17:57:27 +0000 | [diff] [blame] | 1052 |  | 
|  | 1053 | auto MIB = | 
|  | 1054 | BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri)) | 
|  | 1055 | .addDef(ArgsAddrReg) | 
|  | 1056 | .addFrameIndex(FuncInfo->getVarArgsStackIndex()) | 
|  | 1057 | .addImm(0) | 
|  | 1058 | .addImm(0); | 
|  | 1059 |  | 
|  | 1060 | constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); | 
|  | 1061 |  | 
|  | 1062 | MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui)) | 
|  | 1063 | .addUse(ArgsAddrReg) | 
|  | 1064 | .addUse(ListReg) | 
|  | 1065 | .addImm(0) | 
|  | 1066 | .addMemOperand(*I.memoperands_begin()); | 
|  | 1067 |  | 
|  | 1068 | constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); | 
|  | 1069 | I.eraseFromParent(); | 
|  | 1070 | return true; | 
|  | 1071 | } | 
|  | 1072 |  | 
| Amara Emerson | 1e8c164 | 2018-07-31 00:09:02 +0000 | [diff] [blame] | 1073 | void AArch64InstructionSelector::materializeLargeCMVal( | 
| Peter Collingbourne | 33773d5 | 2019-07-31 20:14:09 +0000 | [diff] [blame] | 1074 | MachineInstr &I, const Value *V, unsigned OpFlags) const { | 
| Amara Emerson | 1e8c164 | 2018-07-31 00:09:02 +0000 | [diff] [blame] | 1075 | MachineBasicBlock &MBB = *I.getParent(); | 
|  | 1076 | MachineFunction &MF = *MBB.getParent(); | 
|  | 1077 | MachineRegisterInfo &MRI = MF.getRegInfo(); | 
|  | 1078 | MachineIRBuilder MIB(I); | 
|  | 1079 |  | 
| Aditya Nandakumar | cef44a2 | 2018-12-11 00:48:50 +0000 | [diff] [blame] | 1080 | auto MovZ = MIB.buildInstr(AArch64::MOVZXi, {&AArch64::GPR64RegClass}, {}); | 
| Amara Emerson | 1e8c164 | 2018-07-31 00:09:02 +0000 | [diff] [blame] | 1081 | MovZ->addOperand(MF, I.getOperand(1)); | 
|  | 1082 | MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 | | 
|  | 1083 | AArch64II::MO_NC); | 
|  | 1084 | MovZ->addOperand(MF, MachineOperand::CreateImm(0)); | 
|  | 1085 | constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI); | 
|  | 1086 |  | 
| Matt Arsenault | e3a676e | 2019-06-24 15:50:29 +0000 | [diff] [blame] | 1087 | auto BuildMovK = [&](Register SrcReg, unsigned char Flags, unsigned Offset, | 
|  | 1088 | Register ForceDstReg) { | 
|  | 1089 | Register DstReg = ForceDstReg | 
| Amara Emerson | 1e8c164 | 2018-07-31 00:09:02 +0000 | [diff] [blame] | 1090 | ? ForceDstReg | 
|  | 1091 | : MRI.createVirtualRegister(&AArch64::GPR64RegClass); | 
|  | 1092 | auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg); | 
|  | 1093 | if (auto *GV = dyn_cast<GlobalValue>(V)) { | 
|  | 1094 | MovI->addOperand(MF, MachineOperand::CreateGA( | 
|  | 1095 | GV, MovZ->getOperand(1).getOffset(), Flags)); | 
|  | 1096 | } else { | 
|  | 1097 | MovI->addOperand( | 
|  | 1098 | MF, MachineOperand::CreateBA(cast<BlockAddress>(V), | 
|  | 1099 | MovZ->getOperand(1).getOffset(), Flags)); | 
|  | 1100 | } | 
|  | 1101 | MovI->addOperand(MF, MachineOperand::CreateImm(Offset)); | 
|  | 1102 | constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI); | 
|  | 1103 | return DstReg; | 
|  | 1104 | }; | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1105 | Register DstReg = BuildMovK(MovZ.getReg(0), | 
| Amara Emerson | 1e8c164 | 2018-07-31 00:09:02 +0000 | [diff] [blame] | 1106 | AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0); | 
|  | 1107 | DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0); | 
|  | 1108 | BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg()); | 
|  | 1109 | return; | 
|  | 1110 | } | 
|  | 1111 |  | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 1112 | void AArch64InstructionSelector::preISelLower(MachineInstr &I) const { | 
|  | 1113 | MachineBasicBlock &MBB = *I.getParent(); | 
|  | 1114 | MachineFunction &MF = *MBB.getParent(); | 
|  | 1115 | MachineRegisterInfo &MRI = MF.getRegInfo(); | 
|  | 1116 |  | 
|  | 1117 | switch (I.getOpcode()) { | 
|  | 1118 | case TargetOpcode::G_SHL: | 
|  | 1119 | case TargetOpcode::G_ASHR: | 
|  | 1120 | case TargetOpcode::G_LSHR: { | 
|  | 1121 | // These shifts are legalized to have 64 bit shift amounts because we want | 
|  | 1122 | // to take advantage of the existing imported selection patterns that assume | 
|  | 1123 | // the immediates are s64s. However, if the shifted type is 32 bits and for | 
|  | 1124 | // some reason we receive input GMIR that has an s64 shift amount that's not | 
|  | 1125 | // a G_CONSTANT, insert a truncate so that we can still select the s32 | 
|  | 1126 | // register-register variant. | 
| Daniel Sanders | 5ae66e5 | 2019-08-12 22:40:53 +0000 | [diff] [blame] | 1127 | Register SrcReg = I.getOperand(1).getReg(); | 
|  | 1128 | Register ShiftReg = I.getOperand(2).getReg(); | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 1129 | const LLT ShiftTy = MRI.getType(ShiftReg); | 
|  | 1130 | const LLT SrcTy = MRI.getType(SrcReg); | 
|  | 1131 | if (SrcTy.isVector()) | 
|  | 1132 | return; | 
|  | 1133 | assert(!ShiftTy.isVector() && "unexpected vector shift ty"); | 
|  | 1134 | if (SrcTy.getSizeInBits() != 32 || ShiftTy.getSizeInBits() != 64) | 
|  | 1135 | return; | 
|  | 1136 | auto *AmtMI = MRI.getVRegDef(ShiftReg); | 
|  | 1137 | assert(AmtMI && "could not find a vreg definition for shift amount"); | 
|  | 1138 | if (AmtMI->getOpcode() != TargetOpcode::G_CONSTANT) { | 
|  | 1139 | // Insert a subregister copy to implement a 64->32 trunc | 
|  | 1140 | MachineIRBuilder MIB(I); | 
|  | 1141 | auto Trunc = MIB.buildInstr(TargetOpcode::COPY, {SrcTy}, {}) | 
|  | 1142 | .addReg(ShiftReg, 0, AArch64::sub_32); | 
|  | 1143 | MRI.setRegBank(Trunc.getReg(0), RBI.getRegBank(AArch64::GPRRegBankID)); | 
|  | 1144 | I.getOperand(2).setReg(Trunc.getReg(0)); | 
|  | 1145 | } | 
|  | 1146 | return; | 
|  | 1147 | } | 
| Jessica Paquette | 41affad | 2019-07-20 01:55:35 +0000 | [diff] [blame] | 1148 | case TargetOpcode::G_STORE: | 
|  | 1149 | contractCrossBankCopyIntoStore(I, MRI); | 
|  | 1150 | return; | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 1151 | default: | 
|  | 1152 | return; | 
|  | 1153 | } | 
|  | 1154 | } | 
|  | 1155 |  | 
|  | 1156 | bool AArch64InstructionSelector::earlySelectSHL( | 
|  | 1157 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 1158 | // We try to match the immediate variant of LSL, which is actually an alias | 
|  | 1159 | // for a special case of UBFM. Otherwise, we fall back to the imported | 
|  | 1160 | // selector which will match the register variant. | 
|  | 1161 | assert(I.getOpcode() == TargetOpcode::G_SHL && "unexpected op"); | 
|  | 1162 | const auto &MO = I.getOperand(2); | 
|  | 1163 | auto VRegAndVal = getConstantVRegVal(MO.getReg(), MRI); | 
|  | 1164 | if (!VRegAndVal) | 
|  | 1165 | return false; | 
|  | 1166 |  | 
|  | 1167 | const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); | 
|  | 1168 | if (DstTy.isVector()) | 
|  | 1169 | return false; | 
|  | 1170 | bool Is64Bit = DstTy.getSizeInBits() == 64; | 
|  | 1171 | auto Imm1Fn = Is64Bit ? selectShiftA_64(MO) : selectShiftA_32(MO); | 
|  | 1172 | auto Imm2Fn = Is64Bit ? selectShiftB_64(MO) : selectShiftB_32(MO); | 
|  | 1173 | MachineIRBuilder MIB(I); | 
|  | 1174 |  | 
|  | 1175 | if (!Imm1Fn || !Imm2Fn) | 
|  | 1176 | return false; | 
|  | 1177 |  | 
|  | 1178 | auto NewI = | 
|  | 1179 | MIB.buildInstr(Is64Bit ? AArch64::UBFMXri : AArch64::UBFMWri, | 
|  | 1180 | {I.getOperand(0).getReg()}, {I.getOperand(1).getReg()}); | 
|  | 1181 |  | 
|  | 1182 | for (auto &RenderFn : *Imm1Fn) | 
|  | 1183 | RenderFn(NewI); | 
|  | 1184 | for (auto &RenderFn : *Imm2Fn) | 
|  | 1185 | RenderFn(NewI); | 
|  | 1186 |  | 
|  | 1187 | I.eraseFromParent(); | 
|  | 1188 | return constrainSelectedInstRegOperands(*NewI, TII, TRI, RBI); | 
|  | 1189 | } | 
|  | 1190 |  | 
| Jessica Paquette | 41affad | 2019-07-20 01:55:35 +0000 | [diff] [blame] | 1191 | void AArch64InstructionSelector::contractCrossBankCopyIntoStore( | 
|  | 1192 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 1193 | assert(I.getOpcode() == TargetOpcode::G_STORE && "Expected G_STORE"); | 
|  | 1194 | // If we're storing a scalar, it doesn't matter what register bank that | 
|  | 1195 | // scalar is on. All that matters is the size. | 
|  | 1196 | // | 
|  | 1197 | // So, if we see something like this (with a 32-bit scalar as an example): | 
|  | 1198 | // | 
|  | 1199 | // %x:gpr(s32) = ... something ... | 
|  | 1200 | // %y:fpr(s32) = COPY %x:gpr(s32) | 
|  | 1201 | // G_STORE %y:fpr(s32) | 
|  | 1202 | // | 
|  | 1203 | // We can fix this up into something like this: | 
|  | 1204 | // | 
|  | 1205 | // G_STORE %x:gpr(s32) | 
|  | 1206 | // | 
|  | 1207 | // And then continue the selection process normally. | 
|  | 1208 | MachineInstr *Def = getDefIgnoringCopies(I.getOperand(0).getReg(), MRI); | 
|  | 1209 | if (!Def) | 
|  | 1210 | return; | 
|  | 1211 | Register DefDstReg = Def->getOperand(0).getReg(); | 
|  | 1212 | LLT DefDstTy = MRI.getType(DefDstReg); | 
|  | 1213 | Register StoreSrcReg = I.getOperand(0).getReg(); | 
|  | 1214 | LLT StoreSrcTy = MRI.getType(StoreSrcReg); | 
|  | 1215 |  | 
|  | 1216 | // If we get something strange like a physical register, then we shouldn't | 
|  | 1217 | // go any further. | 
|  | 1218 | if (!DefDstTy.isValid()) | 
|  | 1219 | return; | 
|  | 1220 |  | 
|  | 1221 | // Are the source and dst types the same size? | 
|  | 1222 | if (DefDstTy.getSizeInBits() != StoreSrcTy.getSizeInBits()) | 
|  | 1223 | return; | 
|  | 1224 |  | 
|  | 1225 | if (RBI.getRegBank(StoreSrcReg, MRI, TRI) == | 
|  | 1226 | RBI.getRegBank(DefDstReg, MRI, TRI)) | 
|  | 1227 | return; | 
|  | 1228 |  | 
|  | 1229 | // We have a cross-bank copy, which is entering a store. Let's fold it. | 
|  | 1230 | I.getOperand(0).setReg(DefDstReg); | 
|  | 1231 | } | 
|  | 1232 |  | 
| Jessica Paquette | 7a1dcc5 | 2019-07-18 21:50:11 +0000 | [diff] [blame] | 1233 | bool AArch64InstructionSelector::earlySelectLoad( | 
|  | 1234 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 1235 | // Try to fold in shifts, etc into the addressing mode of a load. | 
|  | 1236 | assert(I.getOpcode() == TargetOpcode::G_LOAD && "unexpected op"); | 
|  | 1237 |  | 
|  | 1238 | // Don't handle atomic loads/stores yet. | 
|  | 1239 | auto &MemOp = **I.memoperands_begin(); | 
|  | 1240 | if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) { | 
|  | 1241 | LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n"); | 
|  | 1242 | return false; | 
|  | 1243 | } | 
|  | 1244 |  | 
|  | 1245 | unsigned MemBytes = MemOp.getSize(); | 
|  | 1246 |  | 
|  | 1247 | // Only support 64-bit loads for now. | 
|  | 1248 | if (MemBytes != 8) | 
|  | 1249 | return false; | 
|  | 1250 |  | 
|  | 1251 | Register DstReg = I.getOperand(0).getReg(); | 
|  | 1252 | const LLT DstTy = MRI.getType(DstReg); | 
|  | 1253 | // Don't handle vectors. | 
|  | 1254 | if (DstTy.isVector()) | 
|  | 1255 | return false; | 
|  | 1256 |  | 
|  | 1257 | unsigned DstSize = DstTy.getSizeInBits(); | 
|  | 1258 | // TODO: 32-bit destinations. | 
|  | 1259 | if (DstSize != 64) | 
|  | 1260 | return false; | 
|  | 1261 |  | 
| Jessica Paquette | 2b404d0 | 2019-07-23 16:09:42 +0000 | [diff] [blame] | 1262 | // Check if we can do any folding from GEPs/shifts etc. into the load. | 
|  | 1263 | auto ImmFn = selectAddrModeXRO(I.getOperand(1), MemBytes); | 
| Jessica Paquette | 7a1dcc5 | 2019-07-18 21:50:11 +0000 | [diff] [blame] | 1264 | if (!ImmFn) | 
|  | 1265 | return false; | 
|  | 1266 |  | 
|  | 1267 | // We can fold something. Emit the load here. | 
|  | 1268 | MachineIRBuilder MIB(I); | 
|  | 1269 |  | 
|  | 1270 | // Choose the instruction based off the size of the element being loaded, and | 
|  | 1271 | // whether or not we're loading into a FPR. | 
|  | 1272 | const RegisterBank &RB = *RBI.getRegBank(DstReg, MRI, TRI); | 
|  | 1273 | unsigned Opc = | 
|  | 1274 | RB.getID() == AArch64::GPRRegBankID ? AArch64::LDRXroX : AArch64::LDRDroX; | 
|  | 1275 | // Construct the load. | 
|  | 1276 | auto LoadMI = MIB.buildInstr(Opc, {DstReg}, {}); | 
|  | 1277 | for (auto &RenderFn : *ImmFn) | 
|  | 1278 | RenderFn(LoadMI); | 
|  | 1279 | LoadMI.addMemOperand(*I.memoperands_begin()); | 
|  | 1280 | I.eraseFromParent(); | 
|  | 1281 | return constrainSelectedInstRegOperands(*LoadMI, TII, TRI, RBI); | 
|  | 1282 | } | 
|  | 1283 |  | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 1284 | bool AArch64InstructionSelector::earlySelect(MachineInstr &I) const { | 
|  | 1285 | assert(I.getParent() && "Instruction should be in a basic block!"); | 
|  | 1286 | assert(I.getParent()->getParent() && "Instruction should be in a function!"); | 
|  | 1287 |  | 
|  | 1288 | MachineBasicBlock &MBB = *I.getParent(); | 
|  | 1289 | MachineFunction &MF = *MBB.getParent(); | 
|  | 1290 | MachineRegisterInfo &MRI = MF.getRegInfo(); | 
|  | 1291 |  | 
|  | 1292 | switch (I.getOpcode()) { | 
|  | 1293 | case TargetOpcode::G_SHL: | 
|  | 1294 | return earlySelectSHL(I, MRI); | 
| Jessica Paquette | 7a1dcc5 | 2019-07-18 21:50:11 +0000 | [diff] [blame] | 1295 | case TargetOpcode::G_LOAD: | 
|  | 1296 | return earlySelectLoad(I, MRI); | 
| Tim Northover | de98e92 | 2019-08-06 09:18:41 +0000 | [diff] [blame] | 1297 | case TargetOpcode::G_CONSTANT: { | 
|  | 1298 | bool IsZero = false; | 
|  | 1299 | if (I.getOperand(1).isCImm()) | 
|  | 1300 | IsZero = I.getOperand(1).getCImm()->getZExtValue() == 0; | 
|  | 1301 | else if (I.getOperand(1).isImm()) | 
|  | 1302 | IsZero = I.getOperand(1).getImm() == 0; | 
|  | 1303 |  | 
|  | 1304 | if (!IsZero) | 
|  | 1305 | return false; | 
|  | 1306 |  | 
|  | 1307 | Register DefReg = I.getOperand(0).getReg(); | 
|  | 1308 | LLT Ty = MRI.getType(DefReg); | 
| Tim Northover | b5abc42 | 2019-08-06 13:34:08 +0000 | [diff] [blame] | 1309 | if (Ty != LLT::scalar(64) && Ty != LLT::scalar(32)) | 
|  | 1310 | return false; | 
| Tim Northover | de98e92 | 2019-08-06 09:18:41 +0000 | [diff] [blame] | 1311 |  | 
|  | 1312 | if (Ty == LLT::scalar(64)) { | 
|  | 1313 | I.getOperand(1).ChangeToRegister(AArch64::XZR, false); | 
|  | 1314 | RBI.constrainGenericRegister(DefReg, AArch64::GPR64RegClass, MRI); | 
|  | 1315 | } else { | 
|  | 1316 | I.getOperand(1).ChangeToRegister(AArch64::WZR, false); | 
|  | 1317 | RBI.constrainGenericRegister(DefReg, AArch64::GPR32RegClass, MRI); | 
|  | 1318 | } | 
|  | 1319 | I.setDesc(TII.get(TargetOpcode::COPY)); | 
|  | 1320 | return true; | 
|  | 1321 | } | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 1322 | default: | 
|  | 1323 | return false; | 
|  | 1324 | } | 
|  | 1325 | } | 
|  | 1326 |  | 
| Amara Emerson | e14c91b | 2019-08-13 06:26:59 +0000 | [diff] [blame] | 1327 | bool AArch64InstructionSelector::select(MachineInstr &I) { | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 1328 | assert(I.getParent() && "Instruction should be in a basic block!"); | 
|  | 1329 | assert(I.getParent()->getParent() && "Instruction should be in a function!"); | 
|  | 1330 |  | 
|  | 1331 | MachineBasicBlock &MBB = *I.getParent(); | 
|  | 1332 | MachineFunction &MF = *MBB.getParent(); | 
|  | 1333 | MachineRegisterInfo &MRI = MF.getRegInfo(); | 
|  | 1334 |  | 
| Tim Northover | cdf23f1 | 2016-10-31 18:30:59 +0000 | [diff] [blame] | 1335 | unsigned Opcode = I.getOpcode(); | 
| Aditya Nandakumar | efd8a84 | 2017-08-23 20:45:48 +0000 | [diff] [blame] | 1336 | // G_PHI requires same handling as PHI | 
|  | 1337 | if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) { | 
| Tim Northover | cdf23f1 | 2016-10-31 18:30:59 +0000 | [diff] [blame] | 1338 | // Certain non-generic instructions also need some special handling. | 
|  | 1339 |  | 
|  | 1340 | if (Opcode ==  TargetOpcode::LOAD_STACK_GUARD) | 
|  | 1341 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
| Tim Northover | 7d88da6 | 2016-11-08 00:34:06 +0000 | [diff] [blame] | 1342 |  | 
| Aditya Nandakumar | efd8a84 | 2017-08-23 20:45:48 +0000 | [diff] [blame] | 1343 | if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) { | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1344 | const Register DefReg = I.getOperand(0).getReg(); | 
| Tim Northover | 7d88da6 | 2016-11-08 00:34:06 +0000 | [diff] [blame] | 1345 | const LLT DefTy = MRI.getType(DefReg); | 
|  | 1346 |  | 
| Matt Arsenault | 732149b | 2019-07-01 17:02:24 +0000 | [diff] [blame] | 1347 | const RegClassOrRegBank &RegClassOrBank = | 
|  | 1348 | MRI.getRegClassOrRegBank(DefReg); | 
| Tim Northover | 7d88da6 | 2016-11-08 00:34:06 +0000 | [diff] [blame] | 1349 |  | 
| Matt Arsenault | 732149b | 2019-07-01 17:02:24 +0000 | [diff] [blame] | 1350 | const TargetRegisterClass *DefRC | 
|  | 1351 | = RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); | 
|  | 1352 | if (!DefRC) { | 
|  | 1353 | if (!DefTy.isValid()) { | 
|  | 1354 | LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n"); | 
|  | 1355 | return false; | 
|  | 1356 | } | 
|  | 1357 | const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>(); | 
|  | 1358 | DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI); | 
| Tim Northover | 7d88da6 | 2016-11-08 00:34:06 +0000 | [diff] [blame] | 1359 | if (!DefRC) { | 
| Matt Arsenault | 732149b | 2019-07-01 17:02:24 +0000 | [diff] [blame] | 1360 | LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n"); | 
|  | 1361 | return false; | 
| Tim Northover | 7d88da6 | 2016-11-08 00:34:06 +0000 | [diff] [blame] | 1362 | } | 
|  | 1363 | } | 
| Matt Arsenault | 732149b | 2019-07-01 17:02:24 +0000 | [diff] [blame] | 1364 |  | 
| Aditya Nandakumar | efd8a84 | 2017-08-23 20:45:48 +0000 | [diff] [blame] | 1365 | I.setDesc(TII.get(TargetOpcode::PHI)); | 
| Tim Northover | 7d88da6 | 2016-11-08 00:34:06 +0000 | [diff] [blame] | 1366 |  | 
|  | 1367 | return RBI.constrainGenericRegister(DefReg, *DefRC, MRI); | 
|  | 1368 | } | 
|  | 1369 |  | 
|  | 1370 | if (I.isCopy()) | 
| Tim Northover | cdf23f1 | 2016-10-31 18:30:59 +0000 | [diff] [blame] | 1371 | return selectCopy(I, TII, MRI, TRI, RBI); | 
| Tim Northover | 7d88da6 | 2016-11-08 00:34:06 +0000 | [diff] [blame] | 1372 |  | 
|  | 1373 | return true; | 
| Tim Northover | cdf23f1 | 2016-10-31 18:30:59 +0000 | [diff] [blame] | 1374 | } | 
|  | 1375 |  | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 1376 |  | 
|  | 1377 | if (I.getNumOperands() != I.getNumExplicitOperands()) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1378 | LLVM_DEBUG( | 
|  | 1379 | dbgs() << "Generic instruction has unexpected implicit operands\n"); | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 1380 | return false; | 
|  | 1381 | } | 
|  | 1382 |  | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 1383 | // Try to do some lowering before we start instruction selecting. These | 
|  | 1384 | // lowerings are purely transformations on the input G_MIR and so selection | 
|  | 1385 | // must continue after any modification of the instruction. | 
|  | 1386 | preISelLower(I); | 
|  | 1387 |  | 
|  | 1388 | // There may be patterns where the importer can't deal with them optimally, | 
|  | 1389 | // but does select it to a suboptimal sequence so our custom C++ selection | 
|  | 1390 | // code later never has a chance to work on it. Therefore, we have an early | 
|  | 1391 | // selection attempt here to give priority to certain selection routines | 
|  | 1392 | // over the imported ones. | 
|  | 1393 | if (earlySelect(I)) | 
|  | 1394 | return true; | 
|  | 1395 |  | 
| Amara Emerson | e14c91b | 2019-08-13 06:26:59 +0000 | [diff] [blame] | 1396 | if (selectImpl(I, *CoverageInfo)) | 
| Ahmed Bougacha | 36f7035 | 2016-12-21 23:26:20 +0000 | [diff] [blame] | 1397 | return true; | 
|  | 1398 |  | 
| Tim Northover | 32a078a | 2016-09-15 10:09:59 +0000 | [diff] [blame] | 1399 | LLT Ty = | 
|  | 1400 | I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{}; | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 1401 |  | 
| Amara Emerson | 3739a20 | 2019-03-15 21:59:50 +0000 | [diff] [blame] | 1402 | MachineIRBuilder MIB(I); | 
|  | 1403 |  | 
| Tim Northover | 69271c6 | 2016-10-12 22:49:11 +0000 | [diff] [blame] | 1404 | switch (Opcode) { | 
| Tim Northover | 5e3dbf3 | 2016-10-12 22:49:01 +0000 | [diff] [blame] | 1405 | case TargetOpcode::G_BRCOND: { | 
|  | 1406 | if (Ty.getSizeInBits() > 32) { | 
|  | 1407 | // We shouldn't need this on AArch64, but it would be implemented as an | 
|  | 1408 | // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the | 
|  | 1409 | // bit being tested is < 32. | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1410 | LLVM_DEBUG(dbgs() << "G_BRCOND has type: " << Ty | 
|  | 1411 | << ", expected at most 32-bits"); | 
| Tim Northover | 5e3dbf3 | 2016-10-12 22:49:01 +0000 | [diff] [blame] | 1412 | return false; | 
|  | 1413 | } | 
|  | 1414 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1415 | const Register CondReg = I.getOperand(0).getReg(); | 
| Tim Northover | 5e3dbf3 | 2016-10-12 22:49:01 +0000 | [diff] [blame] | 1416 | MachineBasicBlock *DestMBB = I.getOperand(1).getMBB(); | 
|  | 1417 |  | 
| Kristof Beyls | e66bc1f | 2018-12-18 08:50:02 +0000 | [diff] [blame] | 1418 | // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z | 
|  | 1419 | // instructions will not be produced, as they are conditional branch | 
|  | 1420 | // instructions that do not set flags. | 
|  | 1421 | bool ProduceNonFlagSettingCondBr = | 
|  | 1422 | !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening); | 
|  | 1423 | if (ProduceNonFlagSettingCondBr && selectCompareBranch(I, MF, MRI)) | 
| Ahmed Bougacha | 641cb20 | 2017-03-27 16:35:31 +0000 | [diff] [blame] | 1424 | return true; | 
|  | 1425 |  | 
| Kristof Beyls | e66bc1f | 2018-12-18 08:50:02 +0000 | [diff] [blame] | 1426 | if (ProduceNonFlagSettingCondBr) { | 
|  | 1427 | auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW)) | 
|  | 1428 | .addUse(CondReg) | 
|  | 1429 | .addImm(/*bit offset=*/0) | 
|  | 1430 | .addMBB(DestMBB); | 
| Tim Northover | 5e3dbf3 | 2016-10-12 22:49:01 +0000 | [diff] [blame] | 1431 |  | 
| Kristof Beyls | e66bc1f | 2018-12-18 08:50:02 +0000 | [diff] [blame] | 1432 | I.eraseFromParent(); | 
|  | 1433 | return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI); | 
|  | 1434 | } else { | 
|  | 1435 | auto CMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri)) | 
|  | 1436 | .addDef(AArch64::WZR) | 
|  | 1437 | .addUse(CondReg) | 
|  | 1438 | .addImm(1); | 
|  | 1439 | constrainSelectedInstRegOperands(*CMP.getInstr(), TII, TRI, RBI); | 
|  | 1440 | auto Bcc = | 
|  | 1441 | BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::Bcc)) | 
|  | 1442 | .addImm(AArch64CC::EQ) | 
|  | 1443 | .addMBB(DestMBB); | 
|  | 1444 |  | 
|  | 1445 | I.eraseFromParent(); | 
|  | 1446 | return constrainSelectedInstRegOperands(*Bcc.getInstr(), TII, TRI, RBI); | 
|  | 1447 | } | 
| Tim Northover | 5e3dbf3 | 2016-10-12 22:49:01 +0000 | [diff] [blame] | 1448 | } | 
|  | 1449 |  | 
| Kristof Beyls | 65a12c0 | 2017-01-30 09:13:18 +0000 | [diff] [blame] | 1450 | case TargetOpcode::G_BRINDIRECT: { | 
|  | 1451 | I.setDesc(TII.get(AArch64::BR)); | 
|  | 1452 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1453 | } | 
|  | 1454 |  | 
| Amara Emerson | 6e71b34 | 2019-06-21 18:10:41 +0000 | [diff] [blame] | 1455 | case TargetOpcode::G_BRJT: | 
|  | 1456 | return selectBrJT(I, MRI); | 
|  | 1457 |  | 
| Jessica Paquette | 67ab9eb | 2019-04-26 18:00:01 +0000 | [diff] [blame] | 1458 | case TargetOpcode::G_BSWAP: { | 
|  | 1459 | // Handle vector types for G_BSWAP directly. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1460 | Register DstReg = I.getOperand(0).getReg(); | 
| Jessica Paquette | 67ab9eb | 2019-04-26 18:00:01 +0000 | [diff] [blame] | 1461 | LLT DstTy = MRI.getType(DstReg); | 
|  | 1462 |  | 
|  | 1463 | // We should only get vector types here; everything else is handled by the | 
|  | 1464 | // importer right now. | 
|  | 1465 | if (!DstTy.isVector() || DstTy.getSizeInBits() > 128) { | 
|  | 1466 | LLVM_DEBUG(dbgs() << "Dst type for G_BSWAP currently unsupported.\n"); | 
|  | 1467 | return false; | 
|  | 1468 | } | 
|  | 1469 |  | 
|  | 1470 | // Only handle 4 and 2 element vectors for now. | 
|  | 1471 | // TODO: 16-bit elements. | 
|  | 1472 | unsigned NumElts = DstTy.getNumElements(); | 
|  | 1473 | if (NumElts != 4 && NumElts != 2) { | 
|  | 1474 | LLVM_DEBUG(dbgs() << "Unsupported number of elements for G_BSWAP.\n"); | 
|  | 1475 | return false; | 
|  | 1476 | } | 
|  | 1477 |  | 
|  | 1478 | // Choose the correct opcode for the supported types. Right now, that's | 
|  | 1479 | // v2s32, v4s32, and v2s64. | 
|  | 1480 | unsigned Opc = 0; | 
|  | 1481 | unsigned EltSize = DstTy.getElementType().getSizeInBits(); | 
|  | 1482 | if (EltSize == 32) | 
|  | 1483 | Opc = (DstTy.getNumElements() == 2) ? AArch64::REV32v8i8 | 
|  | 1484 | : AArch64::REV32v16i8; | 
|  | 1485 | else if (EltSize == 64) | 
|  | 1486 | Opc = AArch64::REV64v16i8; | 
|  | 1487 |  | 
|  | 1488 | // We should always get something by the time we get here... | 
|  | 1489 | assert(Opc != 0 && "Didn't get an opcode for G_BSWAP?"); | 
|  | 1490 |  | 
|  | 1491 | I.setDesc(TII.get(Opc)); | 
|  | 1492 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1493 | } | 
|  | 1494 |  | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1495 | case TargetOpcode::G_FCONSTANT: | 
| Tim Northover | 4edc60d | 2016-10-10 21:49:42 +0000 | [diff] [blame] | 1496 | case TargetOpcode::G_CONSTANT: { | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1497 | const bool isFP = Opcode == TargetOpcode::G_FCONSTANT; | 
|  | 1498 |  | 
| Amara Emerson | 8f25a02 | 2019-06-21 16:43:50 +0000 | [diff] [blame] | 1499 | const LLT s8 = LLT::scalar(8); | 
|  | 1500 | const LLT s16 = LLT::scalar(16); | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1501 | const LLT s32 = LLT::scalar(32); | 
|  | 1502 | const LLT s64 = LLT::scalar(64); | 
|  | 1503 | const LLT p0 = LLT::pointer(0, 64); | 
|  | 1504 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1505 | const Register DefReg = I.getOperand(0).getReg(); | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1506 | const LLT DefTy = MRI.getType(DefReg); | 
|  | 1507 | const unsigned DefSize = DefTy.getSizeInBits(); | 
|  | 1508 | const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); | 
|  | 1509 |  | 
|  | 1510 | // FIXME: Redundant check, but even less readable when factored out. | 
|  | 1511 | if (isFP) { | 
|  | 1512 | if (Ty != s32 && Ty != s64) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1513 | LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty | 
|  | 1514 | << " constant, expected: " << s32 << " or " << s64 | 
|  | 1515 | << '\n'); | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1516 | return false; | 
|  | 1517 | } | 
|  | 1518 |  | 
|  | 1519 | if (RB.getID() != AArch64::FPRRegBankID) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1520 | LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty | 
|  | 1521 | << " constant on bank: " << RB | 
|  | 1522 | << ", expected: FPR\n"); | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1523 | return false; | 
|  | 1524 | } | 
| Daniel Sanders | 11300ce | 2017-10-13 21:28:03 +0000 | [diff] [blame] | 1525 |  | 
|  | 1526 | // The case when we have 0.0 is covered by tablegen. Reject it here so we | 
|  | 1527 | // can be sure tablegen works correctly and isn't rescued by this code. | 
|  | 1528 | if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0)) | 
|  | 1529 | return false; | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1530 | } else { | 
| Daniel Sanders | 0554004 | 2017-08-08 10:44:31 +0000 | [diff] [blame] | 1531 | // s32 and s64 are covered by tablegen. | 
| Amara Emerson | 8f25a02 | 2019-06-21 16:43:50 +0000 | [diff] [blame] | 1532 | if (Ty != p0 && Ty != s8 && Ty != s16) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1533 | LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty | 
|  | 1534 | << " constant, expected: " << s32 << ", " << s64 | 
|  | 1535 | << ", or " << p0 << '\n'); | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1536 | return false; | 
|  | 1537 | } | 
|  | 1538 |  | 
|  | 1539 | if (RB.getID() != AArch64::GPRRegBankID) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1540 | LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty | 
|  | 1541 | << " constant on bank: " << RB | 
|  | 1542 | << ", expected: GPR\n"); | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1543 | return false; | 
|  | 1544 | } | 
|  | 1545 | } | 
|  | 1546 |  | 
| Amara Emerson | 8f25a02 | 2019-06-21 16:43:50 +0000 | [diff] [blame] | 1547 | // We allow G_CONSTANT of types < 32b. | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1548 | const unsigned MovOpc = | 
| Amara Emerson | 8f25a02 | 2019-06-21 16:43:50 +0000 | [diff] [blame] | 1549 | DefSize == 64 ? AArch64::MOVi64imm : AArch64::MOVi32imm; | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1550 |  | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1551 | if (isFP) { | 
| Jessica Paquette | a3843fe | 2019-05-01 22:39:43 +0000 | [diff] [blame] | 1552 | // Either emit a FMOV, or emit a copy to emit a normal mov. | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1553 | const TargetRegisterClass &GPRRC = | 
|  | 1554 | DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass; | 
|  | 1555 | const TargetRegisterClass &FPRRC = | 
|  | 1556 | DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass; | 
|  | 1557 |  | 
| Jessica Paquette | a3843fe | 2019-05-01 22:39:43 +0000 | [diff] [blame] | 1558 | // Can we use a FMOV instruction to represent the immediate? | 
|  | 1559 | if (emitFMovForFConstant(I, MRI)) | 
|  | 1560 | return true; | 
|  | 1561 |  | 
|  | 1562 | // Nope. Emit a copy and use a normal mov instead. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1563 | const Register DefGPRReg = MRI.createVirtualRegister(&GPRRC); | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1564 | MachineOperand &RegOp = I.getOperand(0); | 
|  | 1565 | RegOp.setReg(DefGPRReg); | 
| Amara Emerson | 3739a20 | 2019-03-15 21:59:50 +0000 | [diff] [blame] | 1566 | MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator())); | 
|  | 1567 | MIB.buildCopy({DefReg}, {DefGPRReg}); | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1568 |  | 
|  | 1569 | if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1570 | LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n"); | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1571 | return false; | 
|  | 1572 | } | 
|  | 1573 |  | 
|  | 1574 | MachineOperand &ImmOp = I.getOperand(1); | 
|  | 1575 | // FIXME: Is going through int64_t always correct? | 
|  | 1576 | ImmOp.ChangeToImmediate( | 
|  | 1577 | ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue()); | 
| Daniel Sanders | 066ebbf | 2017-02-24 15:43:30 +0000 | [diff] [blame] | 1578 | } else if (I.getOperand(1).isCImm()) { | 
| Tim Northover | 9267ac5 | 2016-12-05 21:47:07 +0000 | [diff] [blame] | 1579 | uint64_t Val = I.getOperand(1).getCImm()->getZExtValue(); | 
|  | 1580 | I.getOperand(1).ChangeToImmediate(Val); | 
| Daniel Sanders | 066ebbf | 2017-02-24 15:43:30 +0000 | [diff] [blame] | 1581 | } else if (I.getOperand(1).isImm()) { | 
|  | 1582 | uint64_t Val = I.getOperand(1).getImm(); | 
|  | 1583 | I.getOperand(1).ChangeToImmediate(Val); | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1584 | } | 
|  | 1585 |  | 
| Jessica Paquette | a3843fe | 2019-05-01 22:39:43 +0000 | [diff] [blame] | 1586 | I.setDesc(TII.get(MovOpc)); | 
| Tim Northover | 4494d69 | 2016-10-18 19:47:57 +0000 | [diff] [blame] | 1587 | constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1588 | return true; | 
| Tim Northover | 4edc60d | 2016-10-10 21:49:42 +0000 | [diff] [blame] | 1589 | } | 
| Tim Northover | 7b6d66c | 2017-07-20 22:58:38 +0000 | [diff] [blame] | 1590 | case TargetOpcode::G_EXTRACT: { | 
| Amara Emerson | 511f7f5 | 2019-07-23 22:05:13 +0000 | [diff] [blame] | 1591 | Register DstReg = I.getOperand(0).getReg(); | 
|  | 1592 | Register SrcReg = I.getOperand(1).getReg(); | 
|  | 1593 | LLT SrcTy = MRI.getType(SrcReg); | 
|  | 1594 | LLT DstTy = MRI.getType(DstReg); | 
| Amara Emerson | 242efdb | 2018-02-18 17:28:34 +0000 | [diff] [blame] | 1595 | (void)DstTy; | 
| Amara Emerson | bc03bae | 2018-02-18 17:03:02 +0000 | [diff] [blame] | 1596 | unsigned SrcSize = SrcTy.getSizeInBits(); | 
| Amara Emerson | 511f7f5 | 2019-07-23 22:05:13 +0000 | [diff] [blame] | 1597 |  | 
|  | 1598 | if (SrcTy.getSizeInBits() > 64) { | 
|  | 1599 | // This should be an extract of an s128, which is like a vector extract. | 
|  | 1600 | if (SrcTy.getSizeInBits() != 128) | 
|  | 1601 | return false; | 
|  | 1602 | // Only support extracting 64 bits from an s128 at the moment. | 
|  | 1603 | if (DstTy.getSizeInBits() != 64) | 
|  | 1604 | return false; | 
|  | 1605 |  | 
|  | 1606 | const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI); | 
|  | 1607 | const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); | 
|  | 1608 | // Check we have the right regbank always. | 
|  | 1609 | assert(SrcRB.getID() == AArch64::FPRRegBankID && | 
|  | 1610 | DstRB.getID() == AArch64::FPRRegBankID && | 
|  | 1611 | "Wrong extract regbank!"); | 
| Fangrui Song | 305ace7 | 2019-07-24 01:59:44 +0000 | [diff] [blame] | 1612 | (void)SrcRB; | 
| Amara Emerson | 511f7f5 | 2019-07-23 22:05:13 +0000 | [diff] [blame] | 1613 |  | 
|  | 1614 | // Emit the same code as a vector extract. | 
|  | 1615 | // Offset must be a multiple of 64. | 
|  | 1616 | unsigned Offset = I.getOperand(2).getImm(); | 
|  | 1617 | if (Offset % 64 != 0) | 
|  | 1618 | return false; | 
|  | 1619 | unsigned LaneIdx = Offset / 64; | 
|  | 1620 | MachineIRBuilder MIB(I); | 
|  | 1621 | MachineInstr *Extract = emitExtractVectorElt( | 
|  | 1622 | DstReg, DstRB, LLT::scalar(64), SrcReg, LaneIdx, MIB); | 
|  | 1623 | if (!Extract) | 
|  | 1624 | return false; | 
|  | 1625 | I.eraseFromParent(); | 
|  | 1626 | return true; | 
|  | 1627 | } | 
| Tim Northover | 7b6d66c | 2017-07-20 22:58:38 +0000 | [diff] [blame] | 1628 |  | 
| Amara Emerson | bc03bae | 2018-02-18 17:03:02 +0000 | [diff] [blame] | 1629 | I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri)); | 
| Tim Northover | 7b6d66c | 2017-07-20 22:58:38 +0000 | [diff] [blame] | 1630 | MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() + | 
|  | 1631 | Ty.getSizeInBits() - 1); | 
|  | 1632 |  | 
| Amara Emerson | bc03bae | 2018-02-18 17:03:02 +0000 | [diff] [blame] | 1633 | if (SrcSize < 64) { | 
|  | 1634 | assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 && | 
|  | 1635 | "unexpected G_EXTRACT types"); | 
|  | 1636 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1637 | } | 
|  | 1638 |  | 
| Amara Emerson | 511f7f5 | 2019-07-23 22:05:13 +0000 | [diff] [blame] | 1639 | DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); | 
| Amara Emerson | 3739a20 | 2019-03-15 21:59:50 +0000 | [diff] [blame] | 1640 | MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator())); | 
| Amara Emerson | 8627178 | 2019-03-18 19:20:10 +0000 | [diff] [blame] | 1641 | MIB.buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {}) | 
|  | 1642 | .addReg(DstReg, 0, AArch64::sub_32); | 
| Tim Northover | 7b6d66c | 2017-07-20 22:58:38 +0000 | [diff] [blame] | 1643 | RBI.constrainGenericRegister(I.getOperand(0).getReg(), | 
|  | 1644 | AArch64::GPR32RegClass, MRI); | 
|  | 1645 | I.getOperand(0).setReg(DstReg); | 
|  | 1646 |  | 
|  | 1647 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1648 | } | 
|  | 1649 |  | 
|  | 1650 | case TargetOpcode::G_INSERT: { | 
|  | 1651 | LLT SrcTy = MRI.getType(I.getOperand(2).getReg()); | 
| Amara Emerson | bc03bae | 2018-02-18 17:03:02 +0000 | [diff] [blame] | 1652 | LLT DstTy = MRI.getType(I.getOperand(0).getReg()); | 
|  | 1653 | unsigned DstSize = DstTy.getSizeInBits(); | 
| Tim Northover | 7b6d66c | 2017-07-20 22:58:38 +0000 | [diff] [blame] | 1654 | // Larger inserts are vectors, same-size ones should be something else by | 
|  | 1655 | // now (split up or turned into COPYs). | 
|  | 1656 | if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32) | 
|  | 1657 | return false; | 
|  | 1658 |  | 
| Amara Emerson | bc03bae | 2018-02-18 17:03:02 +0000 | [diff] [blame] | 1659 | I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri)); | 
| Tim Northover | 7b6d66c | 2017-07-20 22:58:38 +0000 | [diff] [blame] | 1660 | unsigned LSB = I.getOperand(3).getImm(); | 
|  | 1661 | unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits(); | 
| Amara Emerson | bc03bae | 2018-02-18 17:03:02 +0000 | [diff] [blame] | 1662 | I.getOperand(3).setImm((DstSize - LSB) % DstSize); | 
| Tim Northover | 7b6d66c | 2017-07-20 22:58:38 +0000 | [diff] [blame] | 1663 | MachineInstrBuilder(MF, I).addImm(Width - 1); | 
|  | 1664 |  | 
| Amara Emerson | bc03bae | 2018-02-18 17:03:02 +0000 | [diff] [blame] | 1665 | if (DstSize < 64) { | 
|  | 1666 | assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 && | 
|  | 1667 | "unexpected G_INSERT types"); | 
|  | 1668 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1669 | } | 
|  | 1670 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1671 | Register SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); | 
| Tim Northover | 7b6d66c | 2017-07-20 22:58:38 +0000 | [diff] [blame] | 1672 | BuildMI(MBB, I.getIterator(), I.getDebugLoc(), | 
|  | 1673 | TII.get(AArch64::SUBREG_TO_REG)) | 
|  | 1674 | .addDef(SrcReg) | 
|  | 1675 | .addImm(0) | 
|  | 1676 | .addUse(I.getOperand(2).getReg()) | 
|  | 1677 | .addImm(AArch64::sub_32); | 
|  | 1678 | RBI.constrainGenericRegister(I.getOperand(2).getReg(), | 
|  | 1679 | AArch64::GPR32RegClass, MRI); | 
|  | 1680 | I.getOperand(2).setReg(SrcReg); | 
|  | 1681 |  | 
|  | 1682 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1683 | } | 
| Ahmed Bougacha | 0306b5e | 2016-08-16 14:02:42 +0000 | [diff] [blame] | 1684 | case TargetOpcode::G_FRAME_INDEX: { | 
|  | 1685 | // allocas and G_FRAME_INDEX are only supported in addrspace(0). | 
| Tim Northover | 5ae8350 | 2016-09-15 09:20:34 +0000 | [diff] [blame] | 1686 | if (Ty != LLT::pointer(0, 64)) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1687 | LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty | 
|  | 1688 | << ", expected: " << LLT::pointer(0, 64) << '\n'); | 
| Ahmed Bougacha | 0306b5e | 2016-08-16 14:02:42 +0000 | [diff] [blame] | 1689 | return false; | 
|  | 1690 | } | 
| Ahmed Bougacha | 0306b5e | 2016-08-16 14:02:42 +0000 | [diff] [blame] | 1691 | I.setDesc(TII.get(AArch64::ADDXri)); | 
| Ahmed Bougacha | 0306b5e | 2016-08-16 14:02:42 +0000 | [diff] [blame] | 1692 |  | 
|  | 1693 | // MOs for a #0 shifted immediate. | 
|  | 1694 | I.addOperand(MachineOperand::CreateImm(0)); | 
|  | 1695 | I.addOperand(MachineOperand::CreateImm(0)); | 
|  | 1696 |  | 
|  | 1697 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1698 | } | 
| Tim Northover | bdf1624 | 2016-10-10 21:50:00 +0000 | [diff] [blame] | 1699 |  | 
|  | 1700 | case TargetOpcode::G_GLOBAL_VALUE: { | 
|  | 1701 | auto GV = I.getOperand(1).getGlobal(); | 
| Tim Northover | 01eb869 | 2019-08-09 09:32:38 +0000 | [diff] [blame] | 1702 | if (GV->isThreadLocal()) | 
|  | 1703 | return selectTLSGlobalValue(I, MRI); | 
|  | 1704 |  | 
| Peter Collingbourne | 33773d5 | 2019-07-31 20:14:09 +0000 | [diff] [blame] | 1705 | unsigned OpFlags = STI.ClassifyGlobalReference(GV, TM); | 
| Tim Northover | fe7c59a | 2016-12-13 18:25:38 +0000 | [diff] [blame] | 1706 | if (OpFlags & AArch64II::MO_GOT) { | 
| Tim Northover | bdf1624 | 2016-10-10 21:50:00 +0000 | [diff] [blame] | 1707 | I.setDesc(TII.get(AArch64::LOADgot)); | 
| Tim Northover | fe7c59a | 2016-12-13 18:25:38 +0000 | [diff] [blame] | 1708 | I.getOperand(1).setTargetFlags(OpFlags); | 
| Amara Emerson | d578577 | 2018-01-18 19:21:27 +0000 | [diff] [blame] | 1709 | } else if (TM.getCodeModel() == CodeModel::Large) { | 
|  | 1710 | // Materialize the global using movz/movk instructions. | 
| Amara Emerson | 1e8c164 | 2018-07-31 00:09:02 +0000 | [diff] [blame] | 1711 | materializeLargeCMVal(I, GV, OpFlags); | 
| Amara Emerson | d578577 | 2018-01-18 19:21:27 +0000 | [diff] [blame] | 1712 | I.eraseFromParent(); | 
|  | 1713 | return true; | 
| David Green | 9dd1d45 | 2018-08-22 11:31:39 +0000 | [diff] [blame] | 1714 | } else if (TM.getCodeModel() == CodeModel::Tiny) { | 
|  | 1715 | I.setDesc(TII.get(AArch64::ADR)); | 
|  | 1716 | I.getOperand(1).setTargetFlags(OpFlags); | 
| Tim Northover | fe7c59a | 2016-12-13 18:25:38 +0000 | [diff] [blame] | 1717 | } else { | 
| Tim Northover | bdf1624 | 2016-10-10 21:50:00 +0000 | [diff] [blame] | 1718 | I.setDesc(TII.get(AArch64::MOVaddr)); | 
|  | 1719 | I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE); | 
|  | 1720 | MachineInstrBuilder MIB(MF, I); | 
|  | 1721 | MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(), | 
|  | 1722 | OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); | 
|  | 1723 | } | 
|  | 1724 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1725 | } | 
|  | 1726 |  | 
| Amara Emerson | d3144a4 | 2019-06-06 07:58:37 +0000 | [diff] [blame] | 1727 | case TargetOpcode::G_ZEXTLOAD: | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 1728 | case TargetOpcode::G_LOAD: | 
|  | 1729 | case TargetOpcode::G_STORE: { | 
| Amara Emerson | d3144a4 | 2019-06-06 07:58:37 +0000 | [diff] [blame] | 1730 | bool IsZExtLoad = I.getOpcode() == TargetOpcode::G_ZEXTLOAD; | 
|  | 1731 | MachineIRBuilder MIB(I); | 
|  | 1732 |  | 
| Tim Northover | 0f140c7 | 2016-09-09 11:46:34 +0000 | [diff] [blame] | 1733 | LLT PtrTy = MRI.getType(I.getOperand(1).getReg()); | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 1734 |  | 
| Tim Northover | 5ae8350 | 2016-09-15 09:20:34 +0000 | [diff] [blame] | 1735 | if (PtrTy != LLT::pointer(0, 64)) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1736 | LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy | 
|  | 1737 | << ", expected: " << LLT::pointer(0, 64) << '\n'); | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 1738 | return false; | 
|  | 1739 | } | 
|  | 1740 |  | 
| Daniel Sanders | 3c1c4c0 | 2017-12-05 05:52:07 +0000 | [diff] [blame] | 1741 | auto &MemOp = **I.memoperands_begin(); | 
|  | 1742 | if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) { | 
| Amara Emerson | 1222cfd | 2019-08-14 21:30:30 +0000 | [diff] [blame] | 1743 | // For now we just support s8 acquire loads to be able to compile stack | 
|  | 1744 | // protector code. | 
|  | 1745 | if (MemOp.getOrdering() == AtomicOrdering::Acquire && | 
|  | 1746 | MemOp.getSize() == 1) { | 
|  | 1747 | I.setDesc(TII.get(AArch64::LDARB)); | 
|  | 1748 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1749 | } | 
|  | 1750 | LLVM_DEBUG(dbgs() << "Atomic load/store not fully supported yet\n"); | 
| Daniel Sanders | 3c1c4c0 | 2017-12-05 05:52:07 +0000 | [diff] [blame] | 1751 | return false; | 
|  | 1752 | } | 
| Daniel Sanders | f84bc37 | 2018-05-05 20:53:24 +0000 | [diff] [blame] | 1753 | unsigned MemSizeInBits = MemOp.getSize() * 8; | 
| Daniel Sanders | 3c1c4c0 | 2017-12-05 05:52:07 +0000 | [diff] [blame] | 1754 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1755 | const Register PtrReg = I.getOperand(1).getReg(); | 
| Ahmed Bougacha | f0b22c4 | 2017-03-27 18:14:20 +0000 | [diff] [blame] | 1756 | #ifndef NDEBUG | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 1757 | const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI); | 
| Ahmed Bougacha | f0b22c4 | 2017-03-27 18:14:20 +0000 | [diff] [blame] | 1758 | // Sanity-check the pointer register. | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 1759 | assert(PtrRB.getID() == AArch64::GPRRegBankID && | 
|  | 1760 | "Load/Store pointer operand isn't a GPR"); | 
| Tim Northover | 0f140c7 | 2016-09-09 11:46:34 +0000 | [diff] [blame] | 1761 | assert(MRI.getType(PtrReg).isPointer() && | 
|  | 1762 | "Load/Store pointer operand isn't a pointer"); | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 1763 | #endif | 
|  | 1764 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1765 | const Register ValReg = I.getOperand(0).getReg(); | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 1766 | const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI); | 
|  | 1767 |  | 
|  | 1768 | const unsigned NewOpc = | 
| Daniel Sanders | f84bc37 | 2018-05-05 20:53:24 +0000 | [diff] [blame] | 1769 | selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemSizeInBits); | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 1770 | if (NewOpc == I.getOpcode()) | 
|  | 1771 | return false; | 
|  | 1772 |  | 
|  | 1773 | I.setDesc(TII.get(NewOpc)); | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 1774 |  | 
| Ahmed Bougacha | 8a65408 | 2017-03-27 17:31:52 +0000 | [diff] [blame] | 1775 | uint64_t Offset = 0; | 
|  | 1776 | auto *PtrMI = MRI.getVRegDef(PtrReg); | 
|  | 1777 |  | 
|  | 1778 | // Try to fold a GEP into our unsigned immediate addressing mode. | 
|  | 1779 | if (PtrMI->getOpcode() == TargetOpcode::G_GEP) { | 
|  | 1780 | if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) { | 
|  | 1781 | int64_t Imm = *COff; | 
| Daniel Sanders | f84bc37 | 2018-05-05 20:53:24 +0000 | [diff] [blame] | 1782 | const unsigned Size = MemSizeInBits / 8; | 
| Ahmed Bougacha | 8a65408 | 2017-03-27 17:31:52 +0000 | [diff] [blame] | 1783 | const unsigned Scale = Log2_32(Size); | 
|  | 1784 | if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) { | 
| Daniel Sanders | 5ae66e5 | 2019-08-12 22:40:53 +0000 | [diff] [blame] | 1785 | Register Ptr2Reg = PtrMI->getOperand(1).getReg(); | 
| Ahmed Bougacha | 8a65408 | 2017-03-27 17:31:52 +0000 | [diff] [blame] | 1786 | I.getOperand(1).setReg(Ptr2Reg); | 
|  | 1787 | PtrMI = MRI.getVRegDef(Ptr2Reg); | 
|  | 1788 | Offset = Imm / Size; | 
|  | 1789 | } | 
|  | 1790 | } | 
|  | 1791 | } | 
|  | 1792 |  | 
| Ahmed Bougacha | f75782f | 2017-03-27 17:31:56 +0000 | [diff] [blame] | 1793 | // If we haven't folded anything into our addressing mode yet, try to fold | 
|  | 1794 | // a frame index into the base+offset. | 
|  | 1795 | if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX) | 
|  | 1796 | I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex()); | 
|  | 1797 |  | 
| Ahmed Bougacha | 8a65408 | 2017-03-27 17:31:52 +0000 | [diff] [blame] | 1798 | I.addOperand(MachineOperand::CreateImm(Offset)); | 
| Ahmed Bougacha | 85a66a6 | 2017-03-27 17:31:48 +0000 | [diff] [blame] | 1799 |  | 
|  | 1800 | // If we're storing a 0, use WZR/XZR. | 
|  | 1801 | if (auto CVal = getConstantVRegVal(ValReg, MRI)) { | 
|  | 1802 | if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) { | 
|  | 1803 | if (I.getOpcode() == AArch64::STRWui) | 
|  | 1804 | I.getOperand(0).setReg(AArch64::WZR); | 
|  | 1805 | else if (I.getOpcode() == AArch64::STRXui) | 
|  | 1806 | I.getOperand(0).setReg(AArch64::XZR); | 
|  | 1807 | } | 
|  | 1808 | } | 
|  | 1809 |  | 
| Amara Emerson | d3144a4 | 2019-06-06 07:58:37 +0000 | [diff] [blame] | 1810 | if (IsZExtLoad) { | 
|  | 1811 | // The zextload from a smaller type to i32 should be handled by the importer. | 
|  | 1812 | if (MRI.getType(ValReg).getSizeInBits() != 64) | 
|  | 1813 | return false; | 
|  | 1814 | // If we have a ZEXTLOAD then change the load's type to be a narrower reg | 
|  | 1815 | //and zero_extend with SUBREG_TO_REG. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1816 | Register LdReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); | 
|  | 1817 | Register DstReg = I.getOperand(0).getReg(); | 
| Amara Emerson | d3144a4 | 2019-06-06 07:58:37 +0000 | [diff] [blame] | 1818 | I.getOperand(0).setReg(LdReg); | 
|  | 1819 |  | 
|  | 1820 | MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator())); | 
|  | 1821 | MIB.buildInstr(AArch64::SUBREG_TO_REG, {DstReg}, {}) | 
|  | 1822 | .addImm(0) | 
|  | 1823 | .addUse(LdReg) | 
|  | 1824 | .addImm(AArch64::sub_32); | 
|  | 1825 | constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1826 | return RBI.constrainGenericRegister(DstReg, AArch64::GPR64allRegClass, | 
|  | 1827 | MRI); | 
|  | 1828 | } | 
| Ahmed Bougacha | 7adfac5 | 2016-07-29 16:56:16 +0000 | [diff] [blame] | 1829 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1830 | } | 
|  | 1831 |  | 
| Tim Northover | 9dd78f8 | 2017-02-08 21:22:25 +0000 | [diff] [blame] | 1832 | case TargetOpcode::G_SMULH: | 
|  | 1833 | case TargetOpcode::G_UMULH: { | 
|  | 1834 | // Reject the various things we don't support yet. | 
|  | 1835 | if (unsupportedBinOp(I, RBI, MRI, TRI)) | 
|  | 1836 | return false; | 
|  | 1837 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1838 | const Register DefReg = I.getOperand(0).getReg(); | 
| Tim Northover | 9dd78f8 | 2017-02-08 21:22:25 +0000 | [diff] [blame] | 1839 | const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); | 
|  | 1840 |  | 
|  | 1841 | if (RB.getID() != AArch64::GPRRegBankID) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1842 | LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n"); | 
| Tim Northover | 9dd78f8 | 2017-02-08 21:22:25 +0000 | [diff] [blame] | 1843 | return false; | 
|  | 1844 | } | 
|  | 1845 |  | 
|  | 1846 | if (Ty != LLT::scalar(64)) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1847 | LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty | 
|  | 1848 | << ", expected: " << LLT::scalar(64) << '\n'); | 
| Tim Northover | 9dd78f8 | 2017-02-08 21:22:25 +0000 | [diff] [blame] | 1849 | return false; | 
|  | 1850 | } | 
|  | 1851 |  | 
|  | 1852 | unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr | 
|  | 1853 | : AArch64::UMULHrr; | 
|  | 1854 | I.setDesc(TII.get(NewOpc)); | 
|  | 1855 |  | 
|  | 1856 | // Now that we selected an opcode, we need to constrain the register | 
|  | 1857 | // operands to use appropriate classes. | 
|  | 1858 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1859 | } | 
| Ahmed Bougacha | 33e19fe | 2016-08-18 16:05:11 +0000 | [diff] [blame] | 1860 | case TargetOpcode::G_FADD: | 
|  | 1861 | case TargetOpcode::G_FSUB: | 
|  | 1862 | case TargetOpcode::G_FMUL: | 
|  | 1863 | case TargetOpcode::G_FDIV: | 
|  | 1864 |  | 
| Ahmed Bougacha | 2ac5bf9 | 2016-08-16 14:02:47 +0000 | [diff] [blame] | 1865 | case TargetOpcode::G_ASHR: | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 1866 | if (MRI.getType(I.getOperand(0).getReg()).isVector()) | 
|  | 1867 | return selectVectorASHR(I, MRI); | 
|  | 1868 | LLVM_FALLTHROUGH; | 
|  | 1869 | case TargetOpcode::G_SHL: | 
|  | 1870 | if (Opcode == TargetOpcode::G_SHL && | 
|  | 1871 | MRI.getType(I.getOperand(0).getReg()).isVector()) | 
|  | 1872 | return selectVectorSHL(I, MRI); | 
|  | 1873 | LLVM_FALLTHROUGH; | 
|  | 1874 | case TargetOpcode::G_OR: | 
| Jessica Paquette | 728b18f | 2019-07-24 23:11:01 +0000 | [diff] [blame] | 1875 | case TargetOpcode::G_LSHR: { | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 1876 | // Reject the various things we don't support yet. | 
| Ahmed Bougacha | 59e160a | 2016-08-16 14:37:40 +0000 | [diff] [blame] | 1877 | if (unsupportedBinOp(I, RBI, MRI, TRI)) | 
|  | 1878 | return false; | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 1879 |  | 
| Ahmed Bougacha | 59e160a | 2016-08-16 14:37:40 +0000 | [diff] [blame] | 1880 | const unsigned OpSize = Ty.getSizeInBits(); | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 1881 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1882 | const Register DefReg = I.getOperand(0).getReg(); | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 1883 | const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); | 
|  | 1884 |  | 
|  | 1885 | const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize); | 
|  | 1886 | if (NewOpc == I.getOpcode()) | 
|  | 1887 | return false; | 
|  | 1888 |  | 
|  | 1889 | I.setDesc(TII.get(NewOpc)); | 
|  | 1890 | // FIXME: Should the type be always reset in setDesc? | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 1891 |  | 
|  | 1892 | // Now that we selected an opcode, we need to constrain the register | 
|  | 1893 | // operands to use appropriate classes. | 
|  | 1894 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1895 | } | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 1896 |  | 
| Jessica Paquette | 728b18f | 2019-07-24 23:11:01 +0000 | [diff] [blame] | 1897 | case TargetOpcode::G_GEP: { | 
|  | 1898 | MachineIRBuilder MIRBuilder(I); | 
|  | 1899 | emitADD(I.getOperand(0).getReg(), I.getOperand(1), I.getOperand(2), | 
|  | 1900 | MIRBuilder); | 
|  | 1901 | I.eraseFromParent(); | 
|  | 1902 | return true; | 
|  | 1903 | } | 
| Jessica Paquette | 7d6784f | 2019-03-14 22:54:29 +0000 | [diff] [blame] | 1904 | case TargetOpcode::G_UADDO: { | 
|  | 1905 | // TODO: Support other types. | 
|  | 1906 | unsigned OpSize = Ty.getSizeInBits(); | 
|  | 1907 | if (OpSize != 32 && OpSize != 64) { | 
|  | 1908 | LLVM_DEBUG( | 
|  | 1909 | dbgs() | 
|  | 1910 | << "G_UADDO currently only supported for 32 and 64 b types.\n"); | 
|  | 1911 | return false; | 
|  | 1912 | } | 
|  | 1913 |  | 
|  | 1914 | // TODO: Support vectors. | 
|  | 1915 | if (Ty.isVector()) { | 
|  | 1916 | LLVM_DEBUG(dbgs() << "G_UADDO currently only supported for scalars.\n"); | 
|  | 1917 | return false; | 
|  | 1918 | } | 
|  | 1919 |  | 
|  | 1920 | // Add and set the set condition flag. | 
|  | 1921 | unsigned AddsOpc = OpSize == 32 ? AArch64::ADDSWrr : AArch64::ADDSXrr; | 
|  | 1922 | MachineIRBuilder MIRBuilder(I); | 
|  | 1923 | auto AddsMI = MIRBuilder.buildInstr( | 
|  | 1924 | AddsOpc, {I.getOperand(0).getReg()}, | 
|  | 1925 | {I.getOperand(2).getReg(), I.getOperand(3).getReg()}); | 
|  | 1926 | constrainSelectedInstRegOperands(*AddsMI, TII, TRI, RBI); | 
|  | 1927 |  | 
|  | 1928 | // Now, put the overflow result in the register given by the first operand | 
|  | 1929 | // to the G_UADDO. CSINC increments the result when the predicate is false, | 
|  | 1930 | // so to get the increment when it's true, we need to use the inverse. In | 
|  | 1931 | // this case, we want to increment when carry is set. | 
|  | 1932 | auto CsetMI = MIRBuilder | 
|  | 1933 | .buildInstr(AArch64::CSINCWr, {I.getOperand(1).getReg()}, | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1934 | {Register(AArch64::WZR), Register(AArch64::WZR)}) | 
| Jessica Paquette | 7d6784f | 2019-03-14 22:54:29 +0000 | [diff] [blame] | 1935 | .addImm(getInvertedCondCode(AArch64CC::HS)); | 
|  | 1936 | constrainSelectedInstRegOperands(*CsetMI, TII, TRI, RBI); | 
|  | 1937 | I.eraseFromParent(); | 
|  | 1938 | return true; | 
|  | 1939 | } | 
|  | 1940 |  | 
| Tim Northover | 398c5f5 | 2017-02-14 20:56:29 +0000 | [diff] [blame] | 1941 | case TargetOpcode::G_PTR_MASK: { | 
|  | 1942 | uint64_t Align = I.getOperand(2).getImm(); | 
|  | 1943 | if (Align >= 64 || Align == 0) | 
|  | 1944 | return false; | 
|  | 1945 |  | 
|  | 1946 | uint64_t Mask = ~((1ULL << Align) - 1); | 
|  | 1947 | I.setDesc(TII.get(AArch64::ANDXri)); | 
|  | 1948 | I.getOperand(2).setImm(AArch64_AM::encodeLogicalImmediate(Mask, 64)); | 
|  | 1949 |  | 
|  | 1950 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 1951 | } | 
| Tim Northover | 037af52c | 2016-10-31 18:31:09 +0000 | [diff] [blame] | 1952 | case TargetOpcode::G_PTRTOINT: | 
| Tim Northover | fb8d989 | 2016-10-12 22:49:15 +0000 | [diff] [blame] | 1953 | case TargetOpcode::G_TRUNC: { | 
|  | 1954 | const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); | 
|  | 1955 | const LLT SrcTy = MRI.getType(I.getOperand(1).getReg()); | 
|  | 1956 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 1957 | const Register DstReg = I.getOperand(0).getReg(); | 
|  | 1958 | const Register SrcReg = I.getOperand(1).getReg(); | 
| Tim Northover | fb8d989 | 2016-10-12 22:49:15 +0000 | [diff] [blame] | 1959 |  | 
|  | 1960 | const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); | 
|  | 1961 | const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI); | 
|  | 1962 |  | 
|  | 1963 | if (DstRB.getID() != SrcRB.getID()) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1964 | LLVM_DEBUG( | 
|  | 1965 | dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n"); | 
| Tim Northover | fb8d989 | 2016-10-12 22:49:15 +0000 | [diff] [blame] | 1966 | return false; | 
|  | 1967 | } | 
|  | 1968 |  | 
|  | 1969 | if (DstRB.getID() == AArch64::GPRRegBankID) { | 
|  | 1970 | const TargetRegisterClass *DstRC = | 
|  | 1971 | getRegClassForTypeOnBank(DstTy, DstRB, RBI); | 
|  | 1972 | if (!DstRC) | 
|  | 1973 | return false; | 
|  | 1974 |  | 
|  | 1975 | const TargetRegisterClass *SrcRC = | 
|  | 1976 | getRegClassForTypeOnBank(SrcTy, SrcRB, RBI); | 
|  | 1977 | if (!SrcRC) | 
|  | 1978 | return false; | 
|  | 1979 |  | 
|  | 1980 | if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) || | 
|  | 1981 | !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1982 | LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n"); | 
| Tim Northover | fb8d989 | 2016-10-12 22:49:15 +0000 | [diff] [blame] | 1983 | return false; | 
|  | 1984 | } | 
|  | 1985 |  | 
|  | 1986 | if (DstRC == SrcRC) { | 
|  | 1987 | // Nothing to be done | 
| Daniel Sanders | cc36dbf | 2017-06-27 10:11:39 +0000 | [diff] [blame] | 1988 | } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) && | 
|  | 1989 | SrcTy == LLT::scalar(64)) { | 
|  | 1990 | llvm_unreachable("TableGen can import this case"); | 
|  | 1991 | return false; | 
| Tim Northover | fb8d989 | 2016-10-12 22:49:15 +0000 | [diff] [blame] | 1992 | } else if (DstRC == &AArch64::GPR32RegClass && | 
|  | 1993 | SrcRC == &AArch64::GPR64RegClass) { | 
|  | 1994 | I.getOperand(1).setSubReg(AArch64::sub_32); | 
|  | 1995 | } else { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 1996 | LLVM_DEBUG( | 
|  | 1997 | dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n"); | 
| Tim Northover | fb8d989 | 2016-10-12 22:49:15 +0000 | [diff] [blame] | 1998 | return false; | 
|  | 1999 | } | 
|  | 2000 |  | 
|  | 2001 | I.setDesc(TII.get(TargetOpcode::COPY)); | 
|  | 2002 | return true; | 
|  | 2003 | } else if (DstRB.getID() == AArch64::FPRRegBankID) { | 
|  | 2004 | if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) { | 
|  | 2005 | I.setDesc(TII.get(AArch64::XTNv4i16)); | 
|  | 2006 | constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 2007 | return true; | 
|  | 2008 | } | 
| Amara Emerson | 511f7f5 | 2019-07-23 22:05:13 +0000 | [diff] [blame] | 2009 |  | 
|  | 2010 | if (!SrcTy.isVector() && SrcTy.getSizeInBits() == 128) { | 
|  | 2011 | MachineIRBuilder MIB(I); | 
|  | 2012 | MachineInstr *Extract = emitExtractVectorElt( | 
|  | 2013 | DstReg, DstRB, LLT::scalar(DstTy.getSizeInBits()), SrcReg, 0, MIB); | 
|  | 2014 | if (!Extract) | 
|  | 2015 | return false; | 
|  | 2016 | I.eraseFromParent(); | 
|  | 2017 | return true; | 
|  | 2018 | } | 
| Tim Northover | fb8d989 | 2016-10-12 22:49:15 +0000 | [diff] [blame] | 2019 | } | 
|  | 2020 |  | 
|  | 2021 | return false; | 
|  | 2022 | } | 
|  | 2023 |  | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2024 | case TargetOpcode::G_ANYEXT: { | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2025 | const Register DstReg = I.getOperand(0).getReg(); | 
|  | 2026 | const Register SrcReg = I.getOperand(1).getReg(); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2027 |  | 
| Quentin Colombet | cb629a8 | 2016-10-12 03:57:49 +0000 | [diff] [blame] | 2028 | const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI); | 
|  | 2029 | if (RBDst.getID() != AArch64::GPRRegBankID) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2030 | LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst | 
|  | 2031 | << ", expected: GPR\n"); | 
| Quentin Colombet | cb629a8 | 2016-10-12 03:57:49 +0000 | [diff] [blame] | 2032 | return false; | 
|  | 2033 | } | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2034 |  | 
| Quentin Colombet | cb629a8 | 2016-10-12 03:57:49 +0000 | [diff] [blame] | 2035 | const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI); | 
|  | 2036 | if (RBSrc.getID() != AArch64::GPRRegBankID) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2037 | LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc | 
|  | 2038 | << ", expected: GPR\n"); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2039 | return false; | 
|  | 2040 | } | 
|  | 2041 |  | 
|  | 2042 | const unsigned DstSize = MRI.getType(DstReg).getSizeInBits(); | 
|  | 2043 |  | 
|  | 2044 | if (DstSize == 0) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2045 | LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n"); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2046 | return false; | 
|  | 2047 | } | 
|  | 2048 |  | 
| Quentin Colombet | cb629a8 | 2016-10-12 03:57:49 +0000 | [diff] [blame] | 2049 | if (DstSize != 64 && DstSize > 32) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2050 | LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize | 
|  | 2051 | << ", expected: 32 or 64\n"); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2052 | return false; | 
|  | 2053 | } | 
| Quentin Colombet | cb629a8 | 2016-10-12 03:57:49 +0000 | [diff] [blame] | 2054 | // At this point G_ANYEXT is just like a plain COPY, but we need | 
|  | 2055 | // to explicitly form the 64-bit value if any. | 
|  | 2056 | if (DstSize > 32) { | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2057 | Register ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass); | 
| Quentin Colombet | cb629a8 | 2016-10-12 03:57:49 +0000 | [diff] [blame] | 2058 | BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG)) | 
|  | 2059 | .addDef(ExtSrc) | 
|  | 2060 | .addImm(0) | 
|  | 2061 | .addUse(SrcReg) | 
|  | 2062 | .addImm(AArch64::sub_32); | 
|  | 2063 | I.getOperand(1).setReg(ExtSrc); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2064 | } | 
| Quentin Colombet | cb629a8 | 2016-10-12 03:57:49 +0000 | [diff] [blame] | 2065 | return selectCopy(I, TII, MRI, TRI, RBI); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2066 | } | 
|  | 2067 |  | 
|  | 2068 | case TargetOpcode::G_ZEXT: | 
|  | 2069 | case TargetOpcode::G_SEXT: { | 
|  | 2070 | unsigned Opcode = I.getOpcode(); | 
| Amara Emerson | c07fe30 | 2019-07-26 00:01:09 +0000 | [diff] [blame] | 2071 | const bool IsSigned = Opcode == TargetOpcode::G_SEXT; | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2072 | const Register DefReg = I.getOperand(0).getReg(); | 
|  | 2073 | const Register SrcReg = I.getOperand(1).getReg(); | 
| Amara Emerson | c07fe30 | 2019-07-26 00:01:09 +0000 | [diff] [blame] | 2074 | const LLT DstTy = MRI.getType(DefReg); | 
|  | 2075 | const LLT SrcTy = MRI.getType(SrcReg); | 
|  | 2076 | unsigned DstSize = DstTy.getSizeInBits(); | 
|  | 2077 | unsigned SrcSize = SrcTy.getSizeInBits(); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2078 |  | 
| Amara Emerson | c07fe30 | 2019-07-26 00:01:09 +0000 | [diff] [blame] | 2079 | assert((*RBI.getRegBank(DefReg, MRI, TRI)).getID() == | 
|  | 2080 | AArch64::GPRRegBankID && | 
|  | 2081 | "Unexpected ext regbank"); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2082 |  | 
| Amara Emerson | c07fe30 | 2019-07-26 00:01:09 +0000 | [diff] [blame] | 2083 | MachineIRBuilder MIB(I); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2084 | MachineInstr *ExtI; | 
| Amara Emerson | c07fe30 | 2019-07-26 00:01:09 +0000 | [diff] [blame] | 2085 | if (DstTy.isVector()) | 
|  | 2086 | return false; // Should be handled by imported patterns. | 
|  | 2087 |  | 
| Amara Emerson | 73752ab | 2019-08-02 21:15:36 +0000 | [diff] [blame] | 2088 | // First check if we're extending the result of a load which has a dest type | 
|  | 2089 | // smaller than 32 bits, then this zext is redundant. GPR32 is the smallest | 
|  | 2090 | // GPR register on AArch64 and all loads which are smaller automatically | 
|  | 2091 | // zero-extend the upper bits. E.g. | 
|  | 2092 | // %v(s8) = G_LOAD %p, :: (load 1) | 
|  | 2093 | // %v2(s32) = G_ZEXT %v(s8) | 
|  | 2094 | if (!IsSigned) { | 
|  | 2095 | auto *LoadMI = getOpcodeDef(TargetOpcode::G_LOAD, SrcReg, MRI); | 
|  | 2096 | if (LoadMI && | 
|  | 2097 | RBI.getRegBank(SrcReg, MRI, TRI)->getID() == AArch64::GPRRegBankID) { | 
|  | 2098 | const MachineMemOperand *MemOp = *LoadMI->memoperands_begin(); | 
|  | 2099 | unsigned BytesLoaded = MemOp->getSize(); | 
|  | 2100 | if (BytesLoaded < 4 && SrcTy.getSizeInBytes() == BytesLoaded) | 
|  | 2101 | return selectCopy(I, TII, MRI, TRI, RBI); | 
|  | 2102 | } | 
|  | 2103 | } | 
|  | 2104 |  | 
| Amara Emerson | c07fe30 | 2019-07-26 00:01:09 +0000 | [diff] [blame] | 2105 | if (DstSize == 64) { | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2106 | // FIXME: Can we avoid manually doing this? | 
|  | 2107 | if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2108 | LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode) | 
|  | 2109 | << " operand\n"); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2110 | return false; | 
|  | 2111 | } | 
|  | 2112 |  | 
| Amara Emerson | c07fe30 | 2019-07-26 00:01:09 +0000 | [diff] [blame] | 2113 | auto SubregToReg = | 
|  | 2114 | MIB.buildInstr(AArch64::SUBREG_TO_REG, {&AArch64::GPR64RegClass}, {}) | 
|  | 2115 | .addImm(0) | 
|  | 2116 | .addUse(SrcReg) | 
|  | 2117 | .addImm(AArch64::sub_32); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2118 |  | 
| Amara Emerson | c07fe30 | 2019-07-26 00:01:09 +0000 | [diff] [blame] | 2119 | ExtI = MIB.buildInstr(IsSigned ? AArch64::SBFMXri : AArch64::UBFMXri, | 
|  | 2120 | {DefReg}, {SubregToReg}) | 
|  | 2121 | .addImm(0) | 
|  | 2122 | .addImm(SrcSize - 1); | 
|  | 2123 | } else if (DstSize <= 32) { | 
|  | 2124 | ExtI = MIB.buildInstr(IsSigned ? AArch64::SBFMWri : AArch64::UBFMWri, | 
|  | 2125 | {DefReg}, {SrcReg}) | 
|  | 2126 | .addImm(0) | 
|  | 2127 | .addImm(SrcSize - 1); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2128 | } else { | 
|  | 2129 | return false; | 
|  | 2130 | } | 
|  | 2131 |  | 
|  | 2132 | constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); | 
| Tim Northover | 3d38b3a | 2016-10-11 20:50:21 +0000 | [diff] [blame] | 2133 | I.eraseFromParent(); | 
|  | 2134 | return true; | 
|  | 2135 | } | 
| Tim Northover | c1d8c2b | 2016-10-11 22:29:23 +0000 | [diff] [blame] | 2136 |  | 
| Tim Northover | 69271c6 | 2016-10-12 22:49:11 +0000 | [diff] [blame] | 2137 | case TargetOpcode::G_SITOFP: | 
|  | 2138 | case TargetOpcode::G_UITOFP: | 
|  | 2139 | case TargetOpcode::G_FPTOSI: | 
|  | 2140 | case TargetOpcode::G_FPTOUI: { | 
|  | 2141 | const LLT DstTy = MRI.getType(I.getOperand(0).getReg()), | 
|  | 2142 | SrcTy = MRI.getType(I.getOperand(1).getReg()); | 
|  | 2143 | const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy); | 
|  | 2144 | if (NewOpc == Opcode) | 
|  | 2145 | return false; | 
|  | 2146 |  | 
|  | 2147 | I.setDesc(TII.get(NewOpc)); | 
|  | 2148 | constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 2149 |  | 
|  | 2150 | return true; | 
|  | 2151 | } | 
|  | 2152 |  | 
|  | 2153 |  | 
| Tim Northover | c1d8c2b | 2016-10-11 22:29:23 +0000 | [diff] [blame] | 2154 | case TargetOpcode::G_INTTOPTR: | 
| Daniel Sanders | edd0784 | 2017-08-17 09:26:14 +0000 | [diff] [blame] | 2155 | // The importer is currently unable to import pointer types since they | 
|  | 2156 | // didn't exist in SelectionDAG. | 
| Daniel Sanders | eb2f5f3 | 2017-08-15 15:10:31 +0000 | [diff] [blame] | 2157 | return selectCopy(I, TII, MRI, TRI, RBI); | 
| Daniel Sanders | 16e6dd3 | 2017-08-15 13:50:09 +0000 | [diff] [blame] | 2158 |  | 
| Daniel Sanders | edd0784 | 2017-08-17 09:26:14 +0000 | [diff] [blame] | 2159 | case TargetOpcode::G_BITCAST: | 
|  | 2160 | // Imported SelectionDAG rules can handle every bitcast except those that | 
|  | 2161 | // bitcast from a type to the same type. Ideally, these shouldn't occur | 
| Amara Emerson | b956051 | 2019-04-11 20:32:24 +0000 | [diff] [blame] | 2162 | // but we might not run an optimizer that deletes them. The other exception | 
|  | 2163 | // is bitcasts involving pointer types, as SelectionDAG has no knowledge | 
|  | 2164 | // of them. | 
|  | 2165 | return selectCopy(I, TII, MRI, TRI, RBI); | 
| Daniel Sanders | edd0784 | 2017-08-17 09:26:14 +0000 | [diff] [blame] | 2166 |  | 
| Tim Northover | 9ac0eba | 2016-11-08 00:45:29 +0000 | [diff] [blame] | 2167 | case TargetOpcode::G_SELECT: { | 
|  | 2168 | if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2169 | LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty | 
|  | 2170 | << ", expected: " << LLT::scalar(1) << '\n'); | 
| Tim Northover | 9ac0eba | 2016-11-08 00:45:29 +0000 | [diff] [blame] | 2171 | return false; | 
|  | 2172 | } | 
|  | 2173 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2174 | const Register CondReg = I.getOperand(1).getReg(); | 
|  | 2175 | const Register TReg = I.getOperand(2).getReg(); | 
|  | 2176 | const Register FReg = I.getOperand(3).getReg(); | 
| Tim Northover | 9ac0eba | 2016-11-08 00:45:29 +0000 | [diff] [blame] | 2177 |  | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 2178 | if (tryOptSelect(I)) | 
| Amara Emerson | c37ff0d | 2019-06-05 23:46:16 +0000 | [diff] [blame] | 2179 | return true; | 
| Tim Northover | 9ac0eba | 2016-11-08 00:45:29 +0000 | [diff] [blame] | 2180 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2181 | Register CSelOpc = selectSelectOpc(I, MRI, RBI); | 
| Tim Northover | 9ac0eba | 2016-11-08 00:45:29 +0000 | [diff] [blame] | 2182 | MachineInstr &TstMI = | 
|  | 2183 | *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri)) | 
|  | 2184 | .addDef(AArch64::WZR) | 
|  | 2185 | .addUse(CondReg) | 
|  | 2186 | .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); | 
|  | 2187 |  | 
|  | 2188 | MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc)) | 
|  | 2189 | .addDef(I.getOperand(0).getReg()) | 
|  | 2190 | .addUse(TReg) | 
|  | 2191 | .addUse(FReg) | 
|  | 2192 | .addImm(AArch64CC::NE); | 
|  | 2193 |  | 
|  | 2194 | constrainSelectedInstRegOperands(TstMI, TII, TRI, RBI); | 
|  | 2195 | constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI); | 
|  | 2196 |  | 
|  | 2197 | I.eraseFromParent(); | 
|  | 2198 | return true; | 
|  | 2199 | } | 
| Tim Northover | 6c02ad5 | 2016-10-12 22:49:04 +0000 | [diff] [blame] | 2200 | case TargetOpcode::G_ICMP: { | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 2201 | if (Ty.isVector()) | 
|  | 2202 | return selectVectorICmp(I, MRI); | 
|  | 2203 |  | 
| Aditya Nandakumar | 02c602e | 2017-07-31 17:00:16 +0000 | [diff] [blame] | 2204 | if (Ty != LLT::scalar(32)) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2205 | LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty | 
|  | 2206 | << ", expected: " << LLT::scalar(32) << '\n'); | 
| Tim Northover | 6c02ad5 | 2016-10-12 22:49:04 +0000 | [diff] [blame] | 2207 | return false; | 
|  | 2208 | } | 
|  | 2209 |  | 
| Jessica Paquette | 49537bb | 2019-06-17 18:40:06 +0000 | [diff] [blame] | 2210 | MachineIRBuilder MIRBuilder(I); | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 2211 | if (!emitIntegerCompare(I.getOperand(2), I.getOperand(3), I.getOperand(1), | 
|  | 2212 | MIRBuilder)) | 
|  | 2213 | return false; | 
| Jessica Paquette | 49537bb | 2019-06-17 18:40:06 +0000 | [diff] [blame] | 2214 | emitCSetForICMP(I.getOperand(0).getReg(), I.getOperand(1).getPredicate(), | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 2215 | MIRBuilder); | 
| Tim Northover | 6c02ad5 | 2016-10-12 22:49:04 +0000 | [diff] [blame] | 2216 | I.eraseFromParent(); | 
|  | 2217 | return true; | 
|  | 2218 | } | 
|  | 2219 |  | 
| Tim Northover | 7dd378d | 2016-10-12 22:49:07 +0000 | [diff] [blame] | 2220 | case TargetOpcode::G_FCMP: { | 
| Aditya Nandakumar | 02c602e | 2017-07-31 17:00:16 +0000 | [diff] [blame] | 2221 | if (Ty != LLT::scalar(32)) { | 
| Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 2222 | LLVM_DEBUG(dbgs() << "G_FCMP result has type: " << Ty | 
|  | 2223 | << ", expected: " << LLT::scalar(32) << '\n'); | 
| Tim Northover | 7dd378d | 2016-10-12 22:49:07 +0000 | [diff] [blame] | 2224 | return false; | 
|  | 2225 | } | 
|  | 2226 |  | 
| Jessica Paquette | b73ea75b | 2019-05-28 22:52:49 +0000 | [diff] [blame] | 2227 | unsigned CmpOpc = selectFCMPOpc(I, MRI); | 
|  | 2228 | if (!CmpOpc) | 
| Tim Northover | 7dd378d | 2016-10-12 22:49:07 +0000 | [diff] [blame] | 2229 | return false; | 
| Tim Northover | 7dd378d | 2016-10-12 22:49:07 +0000 | [diff] [blame] | 2230 |  | 
|  | 2231 | // FIXME: regbank | 
|  | 2232 |  | 
|  | 2233 | AArch64CC::CondCode CC1, CC2; | 
|  | 2234 | changeFCMPPredToAArch64CC( | 
|  | 2235 | (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2); | 
|  | 2236 |  | 
| Jessica Paquette | b73ea75b | 2019-05-28 22:52:49 +0000 | [diff] [blame] | 2237 | // Partially build the compare. Decide if we need to add a use for the | 
|  | 2238 | // third operand based off whether or not we're comparing against 0.0. | 
|  | 2239 | auto CmpMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc)) | 
|  | 2240 | .addUse(I.getOperand(2).getReg()); | 
|  | 2241 |  | 
|  | 2242 | // If we don't have an immediate compare, then we need to add a use of the | 
|  | 2243 | // register which wasn't used for the immediate. | 
|  | 2244 | // Note that the immediate will always be the last operand. | 
|  | 2245 | if (CmpOpc != AArch64::FCMPSri && CmpOpc != AArch64::FCMPDri) | 
|  | 2246 | CmpMI = CmpMI.addUse(I.getOperand(3).getReg()); | 
| Tim Northover | 7dd378d | 2016-10-12 22:49:07 +0000 | [diff] [blame] | 2247 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2248 | const Register DefReg = I.getOperand(0).getReg(); | 
|  | 2249 | Register Def1Reg = DefReg; | 
| Tim Northover | 7dd378d | 2016-10-12 22:49:07 +0000 | [diff] [blame] | 2250 | if (CC2 != AArch64CC::AL) | 
|  | 2251 | Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); | 
|  | 2252 |  | 
|  | 2253 | MachineInstr &CSetMI = | 
|  | 2254 | *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr)) | 
|  | 2255 | .addDef(Def1Reg) | 
|  | 2256 | .addUse(AArch64::WZR) | 
|  | 2257 | .addUse(AArch64::WZR) | 
| Tim Northover | 33a1a0b | 2017-01-17 23:04:01 +0000 | [diff] [blame] | 2258 | .addImm(getInvertedCondCode(CC1)); | 
| Tim Northover | 7dd378d | 2016-10-12 22:49:07 +0000 | [diff] [blame] | 2259 |  | 
|  | 2260 | if (CC2 != AArch64CC::AL) { | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2261 | Register Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); | 
| Tim Northover | 7dd378d | 2016-10-12 22:49:07 +0000 | [diff] [blame] | 2262 | MachineInstr &CSet2MI = | 
|  | 2263 | *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr)) | 
|  | 2264 | .addDef(Def2Reg) | 
|  | 2265 | .addUse(AArch64::WZR) | 
|  | 2266 | .addUse(AArch64::WZR) | 
| Tim Northover | 33a1a0b | 2017-01-17 23:04:01 +0000 | [diff] [blame] | 2267 | .addImm(getInvertedCondCode(CC2)); | 
| Tim Northover | 7dd378d | 2016-10-12 22:49:07 +0000 | [diff] [blame] | 2268 | MachineInstr &OrMI = | 
|  | 2269 | *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr)) | 
|  | 2270 | .addDef(DefReg) | 
|  | 2271 | .addUse(Def1Reg) | 
|  | 2272 | .addUse(Def2Reg); | 
|  | 2273 | constrainSelectedInstRegOperands(OrMI, TII, TRI, RBI); | 
|  | 2274 | constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI); | 
|  | 2275 | } | 
| Jessica Paquette | b73ea75b | 2019-05-28 22:52:49 +0000 | [diff] [blame] | 2276 | constrainSelectedInstRegOperands(*CmpMI, TII, TRI, RBI); | 
| Tim Northover | 7dd378d | 2016-10-12 22:49:07 +0000 | [diff] [blame] | 2277 | constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI); | 
|  | 2278 |  | 
|  | 2279 | I.eraseFromParent(); | 
|  | 2280 | return true; | 
|  | 2281 | } | 
| Tim Northover | e9600d8 | 2017-02-08 17:57:27 +0000 | [diff] [blame] | 2282 | case TargetOpcode::G_VASTART: | 
|  | 2283 | return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI) | 
|  | 2284 | : selectVaStartAAPCS(I, MF, MRI); | 
| Jessica Paquette | 7f6fe7c | 2019-04-29 20:58:17 +0000 | [diff] [blame] | 2285 | case TargetOpcode::G_INTRINSIC: | 
|  | 2286 | return selectIntrinsic(I, MRI); | 
| Amara Emerson | 1f5d994 | 2018-04-25 14:43:59 +0000 | [diff] [blame] | 2287 | case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: | 
| Jessica Paquette | 22c6215 | 2019-04-02 19:57:26 +0000 | [diff] [blame] | 2288 | return selectIntrinsicWithSideEffects(I, MRI); | 
| Amara Emerson | 1e8c164 | 2018-07-31 00:09:02 +0000 | [diff] [blame] | 2289 | case TargetOpcode::G_IMPLICIT_DEF: { | 
| Justin Bogner | 4fc6966 | 2017-07-12 17:32:32 +0000 | [diff] [blame] | 2290 | I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); | 
| Amara Emerson | 58aea52 | 2018-02-02 01:44:43 +0000 | [diff] [blame] | 2291 | const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2292 | const Register DstReg = I.getOperand(0).getReg(); | 
| Amara Emerson | 58aea52 | 2018-02-02 01:44:43 +0000 | [diff] [blame] | 2293 | const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); | 
|  | 2294 | const TargetRegisterClass *DstRC = | 
|  | 2295 | getRegClassForTypeOnBank(DstTy, DstRB, RBI); | 
|  | 2296 | RBI.constrainGenericRegister(DstReg, *DstRC, MRI); | 
| Justin Bogner | 4fc6966 | 2017-07-12 17:32:32 +0000 | [diff] [blame] | 2297 | return true; | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 2298 | } | 
| Amara Emerson | 1e8c164 | 2018-07-31 00:09:02 +0000 | [diff] [blame] | 2299 | case TargetOpcode::G_BLOCK_ADDR: { | 
|  | 2300 | if (TM.getCodeModel() == CodeModel::Large) { | 
|  | 2301 | materializeLargeCMVal(I, I.getOperand(1).getBlockAddress(), 0); | 
|  | 2302 | I.eraseFromParent(); | 
|  | 2303 | return true; | 
|  | 2304 | } else { | 
|  | 2305 | I.setDesc(TII.get(AArch64::MOVaddrBA)); | 
|  | 2306 | auto MovMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::MOVaddrBA), | 
|  | 2307 | I.getOperand(0).getReg()) | 
|  | 2308 | .addBlockAddress(I.getOperand(1).getBlockAddress(), | 
|  | 2309 | /* Offset */ 0, AArch64II::MO_PAGE) | 
|  | 2310 | .addBlockAddress( | 
|  | 2311 | I.getOperand(1).getBlockAddress(), /* Offset */ 0, | 
|  | 2312 | AArch64II::MO_NC | AArch64II::MO_PAGEOFF); | 
|  | 2313 | I.eraseFromParent(); | 
|  | 2314 | return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI); | 
|  | 2315 | } | 
|  | 2316 | } | 
| Jessica Paquette | 991cb39 | 2019-04-23 20:46:19 +0000 | [diff] [blame] | 2317 | case TargetOpcode::G_INTRINSIC_TRUNC: | 
|  | 2318 | return selectIntrinsicTrunc(I, MRI); | 
| Jessica Paquette | 4fe7574 | 2019-04-23 23:03:03 +0000 | [diff] [blame] | 2319 | case TargetOpcode::G_INTRINSIC_ROUND: | 
|  | 2320 | return selectIntrinsicRound(I, MRI); | 
| Amara Emerson | 5ec1460 | 2018-12-10 18:44:58 +0000 | [diff] [blame] | 2321 | case TargetOpcode::G_BUILD_VECTOR: | 
|  | 2322 | return selectBuildVector(I, MRI); | 
| Amara Emerson | 8cb186c | 2018-12-20 01:11:04 +0000 | [diff] [blame] | 2323 | case TargetOpcode::G_MERGE_VALUES: | 
|  | 2324 | return selectMergeValues(I, MRI); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 2325 | case TargetOpcode::G_UNMERGE_VALUES: | 
|  | 2326 | return selectUnmergeValues(I, MRI); | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 2327 | case TargetOpcode::G_SHUFFLE_VECTOR: | 
|  | 2328 | return selectShuffleVector(I, MRI); | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2329 | case TargetOpcode::G_EXTRACT_VECTOR_ELT: | 
|  | 2330 | return selectExtractElt(I, MRI); | 
| Jessica Paquette | 5aff1f4 | 2019-03-14 18:01:30 +0000 | [diff] [blame] | 2331 | case TargetOpcode::G_INSERT_VECTOR_ELT: | 
|  | 2332 | return selectInsertElt(I, MRI); | 
| Amara Emerson | 2ff2298 | 2019-03-14 22:48:15 +0000 | [diff] [blame] | 2333 | case TargetOpcode::G_CONCAT_VECTORS: | 
|  | 2334 | return selectConcatVectors(I, MRI); | 
| Amara Emerson | 6e71b34 | 2019-06-21 18:10:41 +0000 | [diff] [blame] | 2335 | case TargetOpcode::G_JUMP_TABLE: | 
|  | 2336 | return selectJumpTable(I, MRI); | 
| Amara Emerson | 1e8c164 | 2018-07-31 00:09:02 +0000 | [diff] [blame] | 2337 | } | 
| Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 2338 |  | 
|  | 2339 | return false; | 
|  | 2340 | } | 
| Daniel Sanders | 8a4bae9 | 2017-03-14 21:32:08 +0000 | [diff] [blame] | 2341 |  | 
| Amara Emerson | 6e71b34 | 2019-06-21 18:10:41 +0000 | [diff] [blame] | 2342 | bool AArch64InstructionSelector::selectBrJT(MachineInstr &I, | 
|  | 2343 | MachineRegisterInfo &MRI) const { | 
|  | 2344 | assert(I.getOpcode() == TargetOpcode::G_BRJT && "Expected G_BRJT"); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2345 | Register JTAddr = I.getOperand(0).getReg(); | 
| Amara Emerson | 6e71b34 | 2019-06-21 18:10:41 +0000 | [diff] [blame] | 2346 | unsigned JTI = I.getOperand(1).getIndex(); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2347 | Register Index = I.getOperand(2).getReg(); | 
| Amara Emerson | 6e71b34 | 2019-06-21 18:10:41 +0000 | [diff] [blame] | 2348 | MachineIRBuilder MIB(I); | 
|  | 2349 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2350 | Register TargetReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass); | 
|  | 2351 | Register ScratchReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass); | 
| Amara Emerson | 6e71b34 | 2019-06-21 18:10:41 +0000 | [diff] [blame] | 2352 | MIB.buildInstr(AArch64::JumpTableDest32, {TargetReg, ScratchReg}, | 
|  | 2353 | {JTAddr, Index}) | 
|  | 2354 | .addJumpTableIndex(JTI); | 
|  | 2355 |  | 
|  | 2356 | // Build the indirect branch. | 
|  | 2357 | MIB.buildInstr(AArch64::BR, {}, {TargetReg}); | 
|  | 2358 | I.eraseFromParent(); | 
|  | 2359 | return true; | 
|  | 2360 | } | 
|  | 2361 |  | 
|  | 2362 | bool AArch64InstructionSelector::selectJumpTable( | 
|  | 2363 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 2364 | assert(I.getOpcode() == TargetOpcode::G_JUMP_TABLE && "Expected jump table"); | 
|  | 2365 | assert(I.getOperand(1).isJTI() && "Jump table op should have a JTI!"); | 
|  | 2366 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2367 | Register DstReg = I.getOperand(0).getReg(); | 
| Amara Emerson | 6e71b34 | 2019-06-21 18:10:41 +0000 | [diff] [blame] | 2368 | unsigned JTI = I.getOperand(1).getIndex(); | 
|  | 2369 | // We generate a MOVaddrJT which will get expanded to an ADRP + ADD later. | 
|  | 2370 | MachineIRBuilder MIB(I); | 
|  | 2371 | auto MovMI = | 
|  | 2372 | MIB.buildInstr(AArch64::MOVaddrJT, {DstReg}, {}) | 
|  | 2373 | .addJumpTableIndex(JTI, AArch64II::MO_PAGE) | 
|  | 2374 | .addJumpTableIndex(JTI, AArch64II::MO_NC | AArch64II::MO_PAGEOFF); | 
|  | 2375 | I.eraseFromParent(); | 
|  | 2376 | return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI); | 
|  | 2377 | } | 
|  | 2378 |  | 
| Tim Northover | 01eb869 | 2019-08-09 09:32:38 +0000 | [diff] [blame] | 2379 | bool AArch64InstructionSelector::selectTLSGlobalValue( | 
|  | 2380 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 2381 | if (!STI.isTargetMachO()) | 
|  | 2382 | return false; | 
|  | 2383 | MachineFunction &MF = *I.getParent()->getParent(); | 
|  | 2384 | MF.getFrameInfo().setAdjustsStack(true); | 
|  | 2385 |  | 
|  | 2386 | const GlobalValue &GV = *I.getOperand(1).getGlobal(); | 
|  | 2387 | MachineIRBuilder MIB(I); | 
|  | 2388 |  | 
|  | 2389 | MIB.buildInstr(AArch64::LOADgot, {AArch64::X0}, {}) | 
|  | 2390 | .addGlobalAddress(&GV, 0, AArch64II::MO_TLS); | 
|  | 2391 |  | 
| Amara Emerson | 72c81b9 | 2019-08-13 06:55:32 +0000 | [diff] [blame] | 2392 | auto Load = MIB.buildInstr(AArch64::LDRXui, {&AArch64::GPR64commonRegClass}, | 
|  | 2393 | {Register(AArch64::X0)}) | 
|  | 2394 | .addImm(0); | 
| Tim Northover | 01eb869 | 2019-08-09 09:32:38 +0000 | [diff] [blame] | 2395 |  | 
|  | 2396 | // TLS calls preserve all registers except those that absolutely must be | 
|  | 2397 | // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be | 
|  | 2398 | // silly). | 
| Amara Emerson | 72c81b9 | 2019-08-13 06:55:32 +0000 | [diff] [blame] | 2399 | MIB.buildInstr(AArch64::BLR, {}, {Load}) | 
| Tim Northover | 01eb869 | 2019-08-09 09:32:38 +0000 | [diff] [blame] | 2400 | .addDef(AArch64::X0, RegState::Implicit) | 
|  | 2401 | .addRegMask(TRI.getTLSCallPreservedMask()); | 
|  | 2402 |  | 
|  | 2403 | MIB.buildCopy(I.getOperand(0).getReg(), Register(AArch64::X0)); | 
|  | 2404 | RBI.constrainGenericRegister(I.getOperand(0).getReg(), AArch64::GPR64RegClass, | 
|  | 2405 | MRI); | 
|  | 2406 | I.eraseFromParent(); | 
|  | 2407 | return true; | 
|  | 2408 | } | 
|  | 2409 |  | 
| Jessica Paquette | 991cb39 | 2019-04-23 20:46:19 +0000 | [diff] [blame] | 2410 | bool AArch64InstructionSelector::selectIntrinsicTrunc( | 
|  | 2411 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 2412 | const LLT SrcTy = MRI.getType(I.getOperand(0).getReg()); | 
|  | 2413 |  | 
|  | 2414 | // Select the correct opcode. | 
|  | 2415 | unsigned Opc = 0; | 
|  | 2416 | if (!SrcTy.isVector()) { | 
|  | 2417 | switch (SrcTy.getSizeInBits()) { | 
|  | 2418 | default: | 
|  | 2419 | case 16: | 
|  | 2420 | Opc = AArch64::FRINTZHr; | 
|  | 2421 | break; | 
|  | 2422 | case 32: | 
|  | 2423 | Opc = AArch64::FRINTZSr; | 
|  | 2424 | break; | 
|  | 2425 | case 64: | 
|  | 2426 | Opc = AArch64::FRINTZDr; | 
|  | 2427 | break; | 
|  | 2428 | } | 
|  | 2429 | } else { | 
|  | 2430 | unsigned NumElts = SrcTy.getNumElements(); | 
|  | 2431 | switch (SrcTy.getElementType().getSizeInBits()) { | 
|  | 2432 | default: | 
|  | 2433 | break; | 
|  | 2434 | case 16: | 
|  | 2435 | if (NumElts == 4) | 
|  | 2436 | Opc = AArch64::FRINTZv4f16; | 
|  | 2437 | else if (NumElts == 8) | 
|  | 2438 | Opc = AArch64::FRINTZv8f16; | 
|  | 2439 | break; | 
|  | 2440 | case 32: | 
|  | 2441 | if (NumElts == 2) | 
|  | 2442 | Opc = AArch64::FRINTZv2f32; | 
|  | 2443 | else if (NumElts == 4) | 
|  | 2444 | Opc = AArch64::FRINTZv4f32; | 
|  | 2445 | break; | 
|  | 2446 | case 64: | 
|  | 2447 | if (NumElts == 2) | 
|  | 2448 | Opc = AArch64::FRINTZv2f64; | 
|  | 2449 | break; | 
|  | 2450 | } | 
|  | 2451 | } | 
|  | 2452 |  | 
|  | 2453 | if (!Opc) { | 
|  | 2454 | // Didn't get an opcode above, bail. | 
|  | 2455 | LLVM_DEBUG(dbgs() << "Unsupported type for G_INTRINSIC_TRUNC!\n"); | 
|  | 2456 | return false; | 
|  | 2457 | } | 
|  | 2458 |  | 
|  | 2459 | // Legalization would have set us up perfectly for this; we just need to | 
|  | 2460 | // set the opcode and move on. | 
|  | 2461 | I.setDesc(TII.get(Opc)); | 
|  | 2462 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 2463 | } | 
|  | 2464 |  | 
| Jessica Paquette | 4fe7574 | 2019-04-23 23:03:03 +0000 | [diff] [blame] | 2465 | bool AArch64InstructionSelector::selectIntrinsicRound( | 
|  | 2466 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 2467 | const LLT SrcTy = MRI.getType(I.getOperand(0).getReg()); | 
|  | 2468 |  | 
|  | 2469 | // Select the correct opcode. | 
|  | 2470 | unsigned Opc = 0; | 
|  | 2471 | if (!SrcTy.isVector()) { | 
|  | 2472 | switch (SrcTy.getSizeInBits()) { | 
|  | 2473 | default: | 
|  | 2474 | case 16: | 
|  | 2475 | Opc = AArch64::FRINTAHr; | 
|  | 2476 | break; | 
|  | 2477 | case 32: | 
|  | 2478 | Opc = AArch64::FRINTASr; | 
|  | 2479 | break; | 
|  | 2480 | case 64: | 
|  | 2481 | Opc = AArch64::FRINTADr; | 
|  | 2482 | break; | 
|  | 2483 | } | 
|  | 2484 | } else { | 
|  | 2485 | unsigned NumElts = SrcTy.getNumElements(); | 
|  | 2486 | switch (SrcTy.getElementType().getSizeInBits()) { | 
|  | 2487 | default: | 
|  | 2488 | break; | 
|  | 2489 | case 16: | 
|  | 2490 | if (NumElts == 4) | 
|  | 2491 | Opc = AArch64::FRINTAv4f16; | 
|  | 2492 | else if (NumElts == 8) | 
|  | 2493 | Opc = AArch64::FRINTAv8f16; | 
|  | 2494 | break; | 
|  | 2495 | case 32: | 
|  | 2496 | if (NumElts == 2) | 
|  | 2497 | Opc = AArch64::FRINTAv2f32; | 
|  | 2498 | else if (NumElts == 4) | 
|  | 2499 | Opc = AArch64::FRINTAv4f32; | 
|  | 2500 | break; | 
|  | 2501 | case 64: | 
|  | 2502 | if (NumElts == 2) | 
|  | 2503 | Opc = AArch64::FRINTAv2f64; | 
|  | 2504 | break; | 
|  | 2505 | } | 
|  | 2506 | } | 
|  | 2507 |  | 
|  | 2508 | if (!Opc) { | 
|  | 2509 | // Didn't get an opcode above, bail. | 
|  | 2510 | LLVM_DEBUG(dbgs() << "Unsupported type for G_INTRINSIC_ROUND!\n"); | 
|  | 2511 | return false; | 
|  | 2512 | } | 
|  | 2513 |  | 
|  | 2514 | // Legalization would have set us up perfectly for this; we just need to | 
|  | 2515 | // set the opcode and move on. | 
|  | 2516 | I.setDesc(TII.get(Opc)); | 
|  | 2517 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 2518 | } | 
|  | 2519 |  | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 2520 | bool AArch64InstructionSelector::selectVectorICmp( | 
|  | 2521 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2522 | Register DstReg = I.getOperand(0).getReg(); | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 2523 | LLT DstTy = MRI.getType(DstReg); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2524 | Register SrcReg = I.getOperand(2).getReg(); | 
|  | 2525 | Register Src2Reg = I.getOperand(3).getReg(); | 
| Amara Emerson | 9bf092d | 2019-04-09 21:22:43 +0000 | [diff] [blame] | 2526 | LLT SrcTy = MRI.getType(SrcReg); | 
|  | 2527 |  | 
|  | 2528 | unsigned SrcEltSize = SrcTy.getElementType().getSizeInBits(); | 
|  | 2529 | unsigned NumElts = DstTy.getNumElements(); | 
|  | 2530 |  | 
|  | 2531 | // First index is element size, 0 == 8b, 1 == 16b, 2 == 32b, 3 == 64b | 
|  | 2532 | // Second index is num elts, 0 == v2, 1 == v4, 2 == v8, 3 == v16 | 
|  | 2533 | // Third index is cc opcode: | 
|  | 2534 | // 0 == eq | 
|  | 2535 | // 1 == ugt | 
|  | 2536 | // 2 == uge | 
|  | 2537 | // 3 == ult | 
|  | 2538 | // 4 == ule | 
|  | 2539 | // 5 == sgt | 
|  | 2540 | // 6 == sge | 
|  | 2541 | // 7 == slt | 
|  | 2542 | // 8 == sle | 
|  | 2543 | // ne is done by negating 'eq' result. | 
|  | 2544 |  | 
|  | 2545 | // This table below assumes that for some comparisons the operands will be | 
|  | 2546 | // commuted. | 
|  | 2547 | // ult op == commute + ugt op | 
|  | 2548 | // ule op == commute + uge op | 
|  | 2549 | // slt op == commute + sgt op | 
|  | 2550 | // sle op == commute + sge op | 
|  | 2551 | unsigned PredIdx = 0; | 
|  | 2552 | bool SwapOperands = false; | 
|  | 2553 | CmpInst::Predicate Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate(); | 
|  | 2554 | switch (Pred) { | 
|  | 2555 | case CmpInst::ICMP_NE: | 
|  | 2556 | case CmpInst::ICMP_EQ: | 
|  | 2557 | PredIdx = 0; | 
|  | 2558 | break; | 
|  | 2559 | case CmpInst::ICMP_UGT: | 
|  | 2560 | PredIdx = 1; | 
|  | 2561 | break; | 
|  | 2562 | case CmpInst::ICMP_UGE: | 
|  | 2563 | PredIdx = 2; | 
|  | 2564 | break; | 
|  | 2565 | case CmpInst::ICMP_ULT: | 
|  | 2566 | PredIdx = 3; | 
|  | 2567 | SwapOperands = true; | 
|  | 2568 | break; | 
|  | 2569 | case CmpInst::ICMP_ULE: | 
|  | 2570 | PredIdx = 4; | 
|  | 2571 | SwapOperands = true; | 
|  | 2572 | break; | 
|  | 2573 | case CmpInst::ICMP_SGT: | 
|  | 2574 | PredIdx = 5; | 
|  | 2575 | break; | 
|  | 2576 | case CmpInst::ICMP_SGE: | 
|  | 2577 | PredIdx = 6; | 
|  | 2578 | break; | 
|  | 2579 | case CmpInst::ICMP_SLT: | 
|  | 2580 | PredIdx = 7; | 
|  | 2581 | SwapOperands = true; | 
|  | 2582 | break; | 
|  | 2583 | case CmpInst::ICMP_SLE: | 
|  | 2584 | PredIdx = 8; | 
|  | 2585 | SwapOperands = true; | 
|  | 2586 | break; | 
|  | 2587 | default: | 
|  | 2588 | llvm_unreachable("Unhandled icmp predicate"); | 
|  | 2589 | return false; | 
|  | 2590 | } | 
|  | 2591 |  | 
|  | 2592 | // This table obviously should be tablegen'd when we have our GISel native | 
|  | 2593 | // tablegen selector. | 
|  | 2594 |  | 
|  | 2595 | static const unsigned OpcTable[4][4][9] = { | 
|  | 2596 | { | 
|  | 2597 | {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2598 | 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2599 | 0 /* invalid */}, | 
|  | 2600 | {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2601 | 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2602 | 0 /* invalid */}, | 
|  | 2603 | {AArch64::CMEQv8i8, AArch64::CMHIv8i8, AArch64::CMHSv8i8, | 
|  | 2604 | AArch64::CMHIv8i8, AArch64::CMHSv8i8, AArch64::CMGTv8i8, | 
|  | 2605 | AArch64::CMGEv8i8, AArch64::CMGTv8i8, AArch64::CMGEv8i8}, | 
|  | 2606 | {AArch64::CMEQv16i8, AArch64::CMHIv16i8, AArch64::CMHSv16i8, | 
|  | 2607 | AArch64::CMHIv16i8, AArch64::CMHSv16i8, AArch64::CMGTv16i8, | 
|  | 2608 | AArch64::CMGEv16i8, AArch64::CMGTv16i8, AArch64::CMGEv16i8} | 
|  | 2609 | }, | 
|  | 2610 | { | 
|  | 2611 | {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2612 | 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2613 | 0 /* invalid */}, | 
|  | 2614 | {AArch64::CMEQv4i16, AArch64::CMHIv4i16, AArch64::CMHSv4i16, | 
|  | 2615 | AArch64::CMHIv4i16, AArch64::CMHSv4i16, AArch64::CMGTv4i16, | 
|  | 2616 | AArch64::CMGEv4i16, AArch64::CMGTv4i16, AArch64::CMGEv4i16}, | 
|  | 2617 | {AArch64::CMEQv8i16, AArch64::CMHIv8i16, AArch64::CMHSv8i16, | 
|  | 2618 | AArch64::CMHIv8i16, AArch64::CMHSv8i16, AArch64::CMGTv8i16, | 
|  | 2619 | AArch64::CMGEv8i16, AArch64::CMGTv8i16, AArch64::CMGEv8i16}, | 
|  | 2620 | {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2621 | 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2622 | 0 /* invalid */} | 
|  | 2623 | }, | 
|  | 2624 | { | 
|  | 2625 | {AArch64::CMEQv2i32, AArch64::CMHIv2i32, AArch64::CMHSv2i32, | 
|  | 2626 | AArch64::CMHIv2i32, AArch64::CMHSv2i32, AArch64::CMGTv2i32, | 
|  | 2627 | AArch64::CMGEv2i32, AArch64::CMGTv2i32, AArch64::CMGEv2i32}, | 
|  | 2628 | {AArch64::CMEQv4i32, AArch64::CMHIv4i32, AArch64::CMHSv4i32, | 
|  | 2629 | AArch64::CMHIv4i32, AArch64::CMHSv4i32, AArch64::CMGTv4i32, | 
|  | 2630 | AArch64::CMGEv4i32, AArch64::CMGTv4i32, AArch64::CMGEv4i32}, | 
|  | 2631 | {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2632 | 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2633 | 0 /* invalid */}, | 
|  | 2634 | {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2635 | 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2636 | 0 /* invalid */} | 
|  | 2637 | }, | 
|  | 2638 | { | 
|  | 2639 | {AArch64::CMEQv2i64, AArch64::CMHIv2i64, AArch64::CMHSv2i64, | 
|  | 2640 | AArch64::CMHIv2i64, AArch64::CMHSv2i64, AArch64::CMGTv2i64, | 
|  | 2641 | AArch64::CMGEv2i64, AArch64::CMGTv2i64, AArch64::CMGEv2i64}, | 
|  | 2642 | {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2643 | 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2644 | 0 /* invalid */}, | 
|  | 2645 | {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2646 | 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2647 | 0 /* invalid */}, | 
|  | 2648 | {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2649 | 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, | 
|  | 2650 | 0 /* invalid */} | 
|  | 2651 | }, | 
|  | 2652 | }; | 
|  | 2653 | unsigned EltIdx = Log2_32(SrcEltSize / 8); | 
|  | 2654 | unsigned NumEltsIdx = Log2_32(NumElts / 2); | 
|  | 2655 | unsigned Opc = OpcTable[EltIdx][NumEltsIdx][PredIdx]; | 
|  | 2656 | if (!Opc) { | 
|  | 2657 | LLVM_DEBUG(dbgs() << "Could not map G_ICMP to cmp opcode"); | 
|  | 2658 | return false; | 
|  | 2659 | } | 
|  | 2660 |  | 
|  | 2661 | const RegisterBank &VecRB = *RBI.getRegBank(SrcReg, MRI, TRI); | 
|  | 2662 | const TargetRegisterClass *SrcRC = | 
|  | 2663 | getRegClassForTypeOnBank(SrcTy, VecRB, RBI, true); | 
|  | 2664 | if (!SrcRC) { | 
|  | 2665 | LLVM_DEBUG(dbgs() << "Could not determine source register class.\n"); | 
|  | 2666 | return false; | 
|  | 2667 | } | 
|  | 2668 |  | 
|  | 2669 | unsigned NotOpc = Pred == ICmpInst::ICMP_NE ? AArch64::NOTv8i8 : 0; | 
|  | 2670 | if (SrcTy.getSizeInBits() == 128) | 
|  | 2671 | NotOpc = NotOpc ? AArch64::NOTv16i8 : 0; | 
|  | 2672 |  | 
|  | 2673 | if (SwapOperands) | 
|  | 2674 | std::swap(SrcReg, Src2Reg); | 
|  | 2675 |  | 
|  | 2676 | MachineIRBuilder MIB(I); | 
|  | 2677 | auto Cmp = MIB.buildInstr(Opc, {SrcRC}, {SrcReg, Src2Reg}); | 
|  | 2678 | constrainSelectedInstRegOperands(*Cmp, TII, TRI, RBI); | 
|  | 2679 |  | 
|  | 2680 | // Invert if we had a 'ne' cc. | 
|  | 2681 | if (NotOpc) { | 
|  | 2682 | Cmp = MIB.buildInstr(NotOpc, {DstReg}, {Cmp}); | 
|  | 2683 | constrainSelectedInstRegOperands(*Cmp, TII, TRI, RBI); | 
|  | 2684 | } else { | 
|  | 2685 | MIB.buildCopy(DstReg, Cmp.getReg(0)); | 
|  | 2686 | } | 
|  | 2687 | RBI.constrainGenericRegister(DstReg, *SrcRC, MRI); | 
|  | 2688 | I.eraseFromParent(); | 
|  | 2689 | return true; | 
|  | 2690 | } | 
|  | 2691 |  | 
| Amara Emerson | 6bcfa1c | 2019-02-25 18:52:54 +0000 | [diff] [blame] | 2692 | MachineInstr *AArch64InstructionSelector::emitScalarToVector( | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2693 | unsigned EltSize, const TargetRegisterClass *DstRC, Register Scalar, | 
| Amara Emerson | 6bcfa1c | 2019-02-25 18:52:54 +0000 | [diff] [blame] | 2694 | MachineIRBuilder &MIRBuilder) const { | 
|  | 2695 | auto Undef = MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstRC}, {}); | 
| Amara Emerson | 5ec1460 | 2018-12-10 18:44:58 +0000 | [diff] [blame] | 2696 |  | 
|  | 2697 | auto BuildFn = [&](unsigned SubregIndex) { | 
| Amara Emerson | 6bcfa1c | 2019-02-25 18:52:54 +0000 | [diff] [blame] | 2698 | auto Ins = | 
|  | 2699 | MIRBuilder | 
|  | 2700 | .buildInstr(TargetOpcode::INSERT_SUBREG, {DstRC}, {Undef, Scalar}) | 
|  | 2701 | .addImm(SubregIndex); | 
|  | 2702 | constrainSelectedInstRegOperands(*Undef, TII, TRI, RBI); | 
|  | 2703 | constrainSelectedInstRegOperands(*Ins, TII, TRI, RBI); | 
|  | 2704 | return &*Ins; | 
| Amara Emerson | 5ec1460 | 2018-12-10 18:44:58 +0000 | [diff] [blame] | 2705 | }; | 
|  | 2706 |  | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 2707 | switch (EltSize) { | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 2708 | case 16: | 
|  | 2709 | return BuildFn(AArch64::hsub); | 
| Amara Emerson | 5ec1460 | 2018-12-10 18:44:58 +0000 | [diff] [blame] | 2710 | case 32: | 
|  | 2711 | return BuildFn(AArch64::ssub); | 
|  | 2712 | case 64: | 
|  | 2713 | return BuildFn(AArch64::dsub); | 
|  | 2714 | default: | 
| Amara Emerson | 6bcfa1c | 2019-02-25 18:52:54 +0000 | [diff] [blame] | 2715 | return nullptr; | 
| Amara Emerson | 5ec1460 | 2018-12-10 18:44:58 +0000 | [diff] [blame] | 2716 | } | 
|  | 2717 | } | 
|  | 2718 |  | 
| Amara Emerson | 8cb186c | 2018-12-20 01:11:04 +0000 | [diff] [blame] | 2719 | bool AArch64InstructionSelector::selectMergeValues( | 
|  | 2720 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 2721 | assert(I.getOpcode() == TargetOpcode::G_MERGE_VALUES && "unexpected opcode"); | 
|  | 2722 | const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); | 
|  | 2723 | const LLT SrcTy = MRI.getType(I.getOperand(1).getReg()); | 
|  | 2724 | assert(!DstTy.isVector() && !SrcTy.isVector() && "invalid merge operation"); | 
| Amara Emerson | 511f7f5 | 2019-07-23 22:05:13 +0000 | [diff] [blame] | 2725 | const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI); | 
| Amara Emerson | 8cb186c | 2018-12-20 01:11:04 +0000 | [diff] [blame] | 2726 |  | 
| Amara Emerson | 8cb186c | 2018-12-20 01:11:04 +0000 | [diff] [blame] | 2727 | if (I.getNumOperands() != 3) | 
|  | 2728 | return false; | 
| Amara Emerson | 511f7f5 | 2019-07-23 22:05:13 +0000 | [diff] [blame] | 2729 |  | 
|  | 2730 | // Merging 2 s64s into an s128. | 
|  | 2731 | if (DstTy == LLT::scalar(128)) { | 
|  | 2732 | if (SrcTy.getSizeInBits() != 64) | 
|  | 2733 | return false; | 
|  | 2734 | MachineIRBuilder MIB(I); | 
|  | 2735 | Register DstReg = I.getOperand(0).getReg(); | 
|  | 2736 | Register Src1Reg = I.getOperand(1).getReg(); | 
|  | 2737 | Register Src2Reg = I.getOperand(2).getReg(); | 
|  | 2738 | auto Tmp = MIB.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstTy}, {}); | 
|  | 2739 | MachineInstr *InsMI = | 
|  | 2740 | emitLaneInsert(None, Tmp.getReg(0), Src1Reg, /* LaneIdx */ 0, RB, MIB); | 
|  | 2741 | if (!InsMI) | 
|  | 2742 | return false; | 
|  | 2743 | MachineInstr *Ins2MI = emitLaneInsert(DstReg, InsMI->getOperand(0).getReg(), | 
|  | 2744 | Src2Reg, /* LaneIdx */ 1, RB, MIB); | 
|  | 2745 | if (!Ins2MI) | 
|  | 2746 | return false; | 
|  | 2747 | constrainSelectedInstRegOperands(*InsMI, TII, TRI, RBI); | 
|  | 2748 | constrainSelectedInstRegOperands(*Ins2MI, TII, TRI, RBI); | 
|  | 2749 | I.eraseFromParent(); | 
|  | 2750 | return true; | 
|  | 2751 | } | 
|  | 2752 |  | 
| Amara Emerson | 8cb186c | 2018-12-20 01:11:04 +0000 | [diff] [blame] | 2753 | if (RB.getID() != AArch64::GPRRegBankID) | 
|  | 2754 | return false; | 
|  | 2755 |  | 
| Amara Emerson | 511f7f5 | 2019-07-23 22:05:13 +0000 | [diff] [blame] | 2756 | if (DstTy.getSizeInBits() != 64 || SrcTy.getSizeInBits() != 32) | 
|  | 2757 | return false; | 
|  | 2758 |  | 
| Amara Emerson | 8cb186c | 2018-12-20 01:11:04 +0000 | [diff] [blame] | 2759 | auto *DstRC = &AArch64::GPR64RegClass; | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2760 | Register SubToRegDef = MRI.createVirtualRegister(DstRC); | 
| Amara Emerson | 8cb186c | 2018-12-20 01:11:04 +0000 | [diff] [blame] | 2761 | MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(), | 
|  | 2762 | TII.get(TargetOpcode::SUBREG_TO_REG)) | 
|  | 2763 | .addDef(SubToRegDef) | 
|  | 2764 | .addImm(0) | 
|  | 2765 | .addUse(I.getOperand(1).getReg()) | 
|  | 2766 | .addImm(AArch64::sub_32); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2767 | Register SubToRegDef2 = MRI.createVirtualRegister(DstRC); | 
| Amara Emerson | 8cb186c | 2018-12-20 01:11:04 +0000 | [diff] [blame] | 2768 | // Need to anyext the second scalar before we can use bfm | 
|  | 2769 | MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(), | 
|  | 2770 | TII.get(TargetOpcode::SUBREG_TO_REG)) | 
|  | 2771 | .addDef(SubToRegDef2) | 
|  | 2772 | .addImm(0) | 
|  | 2773 | .addUse(I.getOperand(2).getReg()) | 
|  | 2774 | .addImm(AArch64::sub_32); | 
| Amara Emerson | 8cb186c | 2018-12-20 01:11:04 +0000 | [diff] [blame] | 2775 | MachineInstr &BFM = | 
|  | 2776 | *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::BFMXri)) | 
| Amara Emerson | 321bfb2 | 2018-12-20 03:27:42 +0000 | [diff] [blame] | 2777 | .addDef(I.getOperand(0).getReg()) | 
| Amara Emerson | 8cb186c | 2018-12-20 01:11:04 +0000 | [diff] [blame] | 2778 | .addUse(SubToRegDef) | 
|  | 2779 | .addUse(SubToRegDef2) | 
|  | 2780 | .addImm(32) | 
|  | 2781 | .addImm(31); | 
|  | 2782 | constrainSelectedInstRegOperands(SubRegMI, TII, TRI, RBI); | 
|  | 2783 | constrainSelectedInstRegOperands(SubRegMI2, TII, TRI, RBI); | 
|  | 2784 | constrainSelectedInstRegOperands(BFM, TII, TRI, RBI); | 
|  | 2785 | I.eraseFromParent(); | 
|  | 2786 | return true; | 
|  | 2787 | } | 
|  | 2788 |  | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2789 | static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg, | 
|  | 2790 | const unsigned EltSize) { | 
|  | 2791 | // Choose a lane copy opcode and subregister based off of the size of the | 
|  | 2792 | // vector's elements. | 
|  | 2793 | switch (EltSize) { | 
|  | 2794 | case 16: | 
|  | 2795 | CopyOpc = AArch64::CPYi16; | 
|  | 2796 | ExtractSubReg = AArch64::hsub; | 
|  | 2797 | break; | 
|  | 2798 | case 32: | 
|  | 2799 | CopyOpc = AArch64::CPYi32; | 
|  | 2800 | ExtractSubReg = AArch64::ssub; | 
|  | 2801 | break; | 
|  | 2802 | case 64: | 
|  | 2803 | CopyOpc = AArch64::CPYi64; | 
|  | 2804 | ExtractSubReg = AArch64::dsub; | 
|  | 2805 | break; | 
|  | 2806 | default: | 
|  | 2807 | // Unknown size, bail out. | 
|  | 2808 | LLVM_DEBUG(dbgs() << "Elt size '" << EltSize << "' unsupported.\n"); | 
|  | 2809 | return false; | 
|  | 2810 | } | 
|  | 2811 | return true; | 
|  | 2812 | } | 
|  | 2813 |  | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 2814 | MachineInstr *AArch64InstructionSelector::emitExtractVectorElt( | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2815 | Optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy, | 
|  | 2816 | Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const { | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 2817 | MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); | 
|  | 2818 | unsigned CopyOpc = 0; | 
|  | 2819 | unsigned ExtractSubReg = 0; | 
|  | 2820 | if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, ScalarTy.getSizeInBits())) { | 
|  | 2821 | LLVM_DEBUG( | 
|  | 2822 | dbgs() << "Couldn't determine lane copy opcode for instruction.\n"); | 
|  | 2823 | return nullptr; | 
|  | 2824 | } | 
|  | 2825 |  | 
|  | 2826 | const TargetRegisterClass *DstRC = | 
|  | 2827 | getRegClassForTypeOnBank(ScalarTy, DstRB, RBI, true); | 
|  | 2828 | if (!DstRC) { | 
|  | 2829 | LLVM_DEBUG(dbgs() << "Could not determine destination register class.\n"); | 
|  | 2830 | return nullptr; | 
|  | 2831 | } | 
|  | 2832 |  | 
|  | 2833 | const RegisterBank &VecRB = *RBI.getRegBank(VecReg, MRI, TRI); | 
|  | 2834 | const LLT &VecTy = MRI.getType(VecReg); | 
|  | 2835 | const TargetRegisterClass *VecRC = | 
|  | 2836 | getRegClassForTypeOnBank(VecTy, VecRB, RBI, true); | 
|  | 2837 | if (!VecRC) { | 
|  | 2838 | LLVM_DEBUG(dbgs() << "Could not determine source register class.\n"); | 
|  | 2839 | return nullptr; | 
|  | 2840 | } | 
|  | 2841 |  | 
|  | 2842 | // The register that we're going to copy into. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2843 | Register InsertReg = VecReg; | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 2844 | if (!DstReg) | 
|  | 2845 | DstReg = MRI.createVirtualRegister(DstRC); | 
|  | 2846 | // If the lane index is 0, we just use a subregister COPY. | 
|  | 2847 | if (LaneIdx == 0) { | 
| Amara Emerson | 8627178 | 2019-03-18 19:20:10 +0000 | [diff] [blame] | 2848 | auto Copy = MIRBuilder.buildInstr(TargetOpcode::COPY, {*DstReg}, {}) | 
|  | 2849 | .addReg(VecReg, 0, ExtractSubReg); | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 2850 | RBI.constrainGenericRegister(*DstReg, *DstRC, MRI); | 
| Amara Emerson | 3739a20 | 2019-03-15 21:59:50 +0000 | [diff] [blame] | 2851 | return &*Copy; | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 2852 | } | 
|  | 2853 |  | 
|  | 2854 | // Lane copies require 128-bit wide registers. If we're dealing with an | 
|  | 2855 | // unpacked vector, then we need to move up to that width. Insert an implicit | 
|  | 2856 | // def and a subregister insert to get us there. | 
|  | 2857 | if (VecTy.getSizeInBits() != 128) { | 
|  | 2858 | MachineInstr *ScalarToVector = emitScalarToVector( | 
|  | 2859 | VecTy.getSizeInBits(), &AArch64::FPR128RegClass, VecReg, MIRBuilder); | 
|  | 2860 | if (!ScalarToVector) | 
|  | 2861 | return nullptr; | 
|  | 2862 | InsertReg = ScalarToVector->getOperand(0).getReg(); | 
|  | 2863 | } | 
|  | 2864 |  | 
|  | 2865 | MachineInstr *LaneCopyMI = | 
|  | 2866 | MIRBuilder.buildInstr(CopyOpc, {*DstReg}, {InsertReg}).addImm(LaneIdx); | 
|  | 2867 | constrainSelectedInstRegOperands(*LaneCopyMI, TII, TRI, RBI); | 
|  | 2868 |  | 
|  | 2869 | // Make sure that we actually constrain the initial copy. | 
|  | 2870 | RBI.constrainGenericRegister(*DstReg, *DstRC, MRI); | 
|  | 2871 | return LaneCopyMI; | 
|  | 2872 | } | 
|  | 2873 |  | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2874 | bool AArch64InstructionSelector::selectExtractElt( | 
|  | 2875 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 2876 | assert(I.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT && | 
|  | 2877 | "unexpected opcode!"); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2878 | Register DstReg = I.getOperand(0).getReg(); | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2879 | const LLT NarrowTy = MRI.getType(DstReg); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2880 | const Register SrcReg = I.getOperand(1).getReg(); | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2881 | const LLT WideTy = MRI.getType(SrcReg); | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 2882 | (void)WideTy; | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2883 | assert(WideTy.getSizeInBits() >= NarrowTy.getSizeInBits() && | 
|  | 2884 | "source register size too small!"); | 
|  | 2885 | assert(NarrowTy.isScalar() && "cannot extract vector into vector!"); | 
|  | 2886 |  | 
|  | 2887 | // Need the lane index to determine the correct copy opcode. | 
|  | 2888 | MachineOperand &LaneIdxOp = I.getOperand(2); | 
|  | 2889 | assert(LaneIdxOp.isReg() && "Lane index operand was not a register?"); | 
|  | 2890 |  | 
|  | 2891 | if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) { | 
|  | 2892 | LLVM_DEBUG(dbgs() << "Cannot extract into GPR.\n"); | 
|  | 2893 | return false; | 
|  | 2894 | } | 
|  | 2895 |  | 
| Jessica Paquette | bb1aced | 2019-03-13 21:19:29 +0000 | [diff] [blame] | 2896 | // Find the index to extract from. | 
| Jessica Paquette | 76f64b6 | 2019-04-26 21:53:13 +0000 | [diff] [blame] | 2897 | auto VRegAndVal = getConstantVRegValWithLookThrough(LaneIdxOp.getReg(), MRI); | 
|  | 2898 | if (!VRegAndVal) | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2899 | return false; | 
| Jessica Paquette | 76f64b6 | 2019-04-26 21:53:13 +0000 | [diff] [blame] | 2900 | unsigned LaneIdx = VRegAndVal->Value; | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2901 |  | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2902 | MachineIRBuilder MIRBuilder(I); | 
|  | 2903 |  | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 2904 | const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); | 
|  | 2905 | MachineInstr *Extract = emitExtractVectorElt(DstReg, DstRB, NarrowTy, SrcReg, | 
|  | 2906 | LaneIdx, MIRBuilder); | 
|  | 2907 | if (!Extract) | 
|  | 2908 | return false; | 
|  | 2909 |  | 
|  | 2910 | I.eraseFromParent(); | 
|  | 2911 | return true; | 
|  | 2912 | } | 
|  | 2913 |  | 
|  | 2914 | bool AArch64InstructionSelector::selectSplitVectorUnmerge( | 
|  | 2915 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 2916 | unsigned NumElts = I.getNumOperands() - 1; | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2917 | Register SrcReg = I.getOperand(NumElts).getReg(); | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 2918 | const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg()); | 
|  | 2919 | const LLT SrcTy = MRI.getType(SrcReg); | 
|  | 2920 |  | 
|  | 2921 | assert(NarrowTy.isVector() && "Expected an unmerge into vectors"); | 
|  | 2922 | if (SrcTy.getSizeInBits() > 128) { | 
|  | 2923 | LLVM_DEBUG(dbgs() << "Unexpected vector type for vec split unmerge"); | 
|  | 2924 | return false; | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2925 | } | 
|  | 2926 |  | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 2927 | MachineIRBuilder MIB(I); | 
|  | 2928 |  | 
|  | 2929 | // We implement a split vector operation by treating the sub-vectors as | 
|  | 2930 | // scalars and extracting them. | 
|  | 2931 | const RegisterBank &DstRB = | 
|  | 2932 | *RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI); | 
|  | 2933 | for (unsigned OpIdx = 0; OpIdx < NumElts; ++OpIdx) { | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2934 | Register Dst = I.getOperand(OpIdx).getReg(); | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 2935 | MachineInstr *Extract = | 
|  | 2936 | emitExtractVectorElt(Dst, DstRB, NarrowTy, SrcReg, OpIdx, MIB); | 
|  | 2937 | if (!Extract) | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2938 | return false; | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2939 | } | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2940 | I.eraseFromParent(); | 
|  | 2941 | return true; | 
|  | 2942 | } | 
|  | 2943 |  | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 2944 | bool AArch64InstructionSelector::selectUnmergeValues( | 
|  | 2945 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 2946 | assert(I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && | 
|  | 2947 | "unexpected opcode"); | 
|  | 2948 |  | 
|  | 2949 | // TODO: Handle unmerging into GPRs and from scalars to scalars. | 
|  | 2950 | if (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() != | 
|  | 2951 | AArch64::FPRRegBankID || | 
|  | 2952 | RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI)->getID() != | 
|  | 2953 | AArch64::FPRRegBankID) { | 
|  | 2954 | LLVM_DEBUG(dbgs() << "Unmerging vector-to-gpr and scalar-to-scalar " | 
|  | 2955 | "currently unsupported.\n"); | 
|  | 2956 | return false; | 
|  | 2957 | } | 
|  | 2958 |  | 
|  | 2959 | // The last operand is the vector source register, and every other operand is | 
|  | 2960 | // a register to unpack into. | 
|  | 2961 | unsigned NumElts = I.getNumOperands() - 1; | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2962 | Register SrcReg = I.getOperand(NumElts).getReg(); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 2963 | const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg()); | 
|  | 2964 | const LLT WideTy = MRI.getType(SrcReg); | 
| Benjamin Kramer | 653020d | 2019-01-24 23:45:07 +0000 | [diff] [blame] | 2965 | (void)WideTy; | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 2966 | assert(WideTy.isVector() && "can only unmerge from vector types!"); | 
|  | 2967 | assert(WideTy.getSizeInBits() > NarrowTy.getSizeInBits() && | 
|  | 2968 | "source register size too small!"); | 
|  | 2969 |  | 
| Amara Emerson | d61b89b | 2019-03-14 22:48:18 +0000 | [diff] [blame] | 2970 | if (!NarrowTy.isScalar()) | 
|  | 2971 | return selectSplitVectorUnmerge(I, MRI); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 2972 |  | 
| Amara Emerson | 3739a20 | 2019-03-15 21:59:50 +0000 | [diff] [blame] | 2973 | MachineIRBuilder MIB(I); | 
|  | 2974 |  | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 2975 | // Choose a lane copy opcode and subregister based off of the size of the | 
|  | 2976 | // vector's elements. | 
|  | 2977 | unsigned CopyOpc = 0; | 
|  | 2978 | unsigned ExtractSubReg = 0; | 
| Jessica Paquette | 607774c | 2019-03-11 22:18:01 +0000 | [diff] [blame] | 2979 | if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, NarrowTy.getSizeInBits())) | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 2980 | return false; | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 2981 |  | 
|  | 2982 | // Set up for the lane copies. | 
|  | 2983 | MachineBasicBlock &MBB = *I.getParent(); | 
|  | 2984 |  | 
|  | 2985 | // Stores the registers we'll be copying from. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2986 | SmallVector<Register, 4> InsertRegs; | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 2987 |  | 
|  | 2988 | // We'll use the first register twice, so we only need NumElts-1 registers. | 
|  | 2989 | unsigned NumInsertRegs = NumElts - 1; | 
|  | 2990 |  | 
|  | 2991 | // If our elements fit into exactly 128 bits, then we can copy from the source | 
|  | 2992 | // directly. Otherwise, we need to do a bit of setup with some subregister | 
|  | 2993 | // inserts. | 
|  | 2994 | if (NarrowTy.getSizeInBits() * NumElts == 128) { | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 2995 | InsertRegs = SmallVector<Register, 4>(NumInsertRegs, SrcReg); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 2996 | } else { | 
|  | 2997 | // No. We have to perform subregister inserts. For each insert, create an | 
|  | 2998 | // implicit def and a subregister insert, and save the register we create. | 
|  | 2999 | for (unsigned Idx = 0; Idx < NumInsertRegs; ++Idx) { | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3000 | Register ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3001 | MachineInstr &ImpDefMI = | 
|  | 3002 | *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::IMPLICIT_DEF), | 
|  | 3003 | ImpDefReg); | 
|  | 3004 |  | 
|  | 3005 | // Now, create the subregister insert from SrcReg. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3006 | Register InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3007 | MachineInstr &InsMI = | 
|  | 3008 | *BuildMI(MBB, I, I.getDebugLoc(), | 
|  | 3009 | TII.get(TargetOpcode::INSERT_SUBREG), InsertReg) | 
|  | 3010 | .addUse(ImpDefReg) | 
|  | 3011 | .addUse(SrcReg) | 
|  | 3012 | .addImm(AArch64::dsub); | 
|  | 3013 |  | 
|  | 3014 | constrainSelectedInstRegOperands(ImpDefMI, TII, TRI, RBI); | 
|  | 3015 | constrainSelectedInstRegOperands(InsMI, TII, TRI, RBI); | 
|  | 3016 |  | 
|  | 3017 | // Save the register so that we can copy from it after. | 
|  | 3018 | InsertRegs.push_back(InsertReg); | 
|  | 3019 | } | 
|  | 3020 | } | 
|  | 3021 |  | 
|  | 3022 | // Now that we've created any necessary subregister inserts, we can | 
|  | 3023 | // create the copies. | 
|  | 3024 | // | 
|  | 3025 | // Perform the first copy separately as a subregister copy. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3026 | Register CopyTo = I.getOperand(0).getReg(); | 
| Amara Emerson | 8627178 | 2019-03-18 19:20:10 +0000 | [diff] [blame] | 3027 | auto FirstCopy = MIB.buildInstr(TargetOpcode::COPY, {CopyTo}, {}) | 
|  | 3028 | .addReg(InsertRegs[0], 0, ExtractSubReg); | 
| Amara Emerson | 3739a20 | 2019-03-15 21:59:50 +0000 | [diff] [blame] | 3029 | constrainSelectedInstRegOperands(*FirstCopy, TII, TRI, RBI); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3030 |  | 
|  | 3031 | // Now, perform the remaining copies as vector lane copies. | 
|  | 3032 | unsigned LaneIdx = 1; | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3033 | for (Register InsReg : InsertRegs) { | 
|  | 3034 | Register CopyTo = I.getOperand(LaneIdx).getReg(); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3035 | MachineInstr &CopyInst = | 
|  | 3036 | *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CopyOpc), CopyTo) | 
|  | 3037 | .addUse(InsReg) | 
|  | 3038 | .addImm(LaneIdx); | 
|  | 3039 | constrainSelectedInstRegOperands(CopyInst, TII, TRI, RBI); | 
|  | 3040 | ++LaneIdx; | 
|  | 3041 | } | 
|  | 3042 |  | 
|  | 3043 | // Separately constrain the first copy's destination. Because of the | 
|  | 3044 | // limitation in constrainOperandRegClass, we can't guarantee that this will | 
|  | 3045 | // actually be constrained. So, do it ourselves using the second operand. | 
|  | 3046 | const TargetRegisterClass *RC = | 
|  | 3047 | MRI.getRegClassOrNull(I.getOperand(1).getReg()); | 
|  | 3048 | if (!RC) { | 
|  | 3049 | LLVM_DEBUG(dbgs() << "Couldn't constrain copy destination.\n"); | 
|  | 3050 | return false; | 
|  | 3051 | } | 
|  | 3052 |  | 
|  | 3053 | RBI.constrainGenericRegister(CopyTo, *RC, MRI); | 
|  | 3054 | I.eraseFromParent(); | 
|  | 3055 | return true; | 
|  | 3056 | } | 
|  | 3057 |  | 
| Amara Emerson | 2ff2298 | 2019-03-14 22:48:15 +0000 | [diff] [blame] | 3058 | bool AArch64InstructionSelector::selectConcatVectors( | 
|  | 3059 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 3060 | assert(I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && | 
|  | 3061 | "Unexpected opcode"); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3062 | Register Dst = I.getOperand(0).getReg(); | 
|  | 3063 | Register Op1 = I.getOperand(1).getReg(); | 
|  | 3064 | Register Op2 = I.getOperand(2).getReg(); | 
| Amara Emerson | 2ff2298 | 2019-03-14 22:48:15 +0000 | [diff] [blame] | 3065 | MachineIRBuilder MIRBuilder(I); | 
|  | 3066 | MachineInstr *ConcatMI = emitVectorConcat(Dst, Op1, Op2, MIRBuilder); | 
|  | 3067 | if (!ConcatMI) | 
|  | 3068 | return false; | 
|  | 3069 | I.eraseFromParent(); | 
|  | 3070 | return true; | 
|  | 3071 | } | 
|  | 3072 |  | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3073 | unsigned | 
|  | 3074 | AArch64InstructionSelector::emitConstantPoolEntry(Constant *CPVal, | 
|  | 3075 | MachineFunction &MF) const { | 
| Hans Wennborg | 5d5ee4a | 2019-04-26 08:31:00 +0000 | [diff] [blame] | 3076 | Type *CPTy = CPVal->getType(); | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3077 | unsigned Align = MF.getDataLayout().getPrefTypeAlignment(CPTy); | 
|  | 3078 | if (Align == 0) | 
|  | 3079 | Align = MF.getDataLayout().getTypeAllocSize(CPTy); | 
|  | 3080 |  | 
|  | 3081 | MachineConstantPool *MCP = MF.getConstantPool(); | 
|  | 3082 | return MCP->getConstantPoolIndex(CPVal, Align); | 
|  | 3083 | } | 
|  | 3084 |  | 
|  | 3085 | MachineInstr *AArch64InstructionSelector::emitLoadFromConstantPool( | 
|  | 3086 | Constant *CPVal, MachineIRBuilder &MIRBuilder) const { | 
|  | 3087 | unsigned CPIdx = emitConstantPoolEntry(CPVal, MIRBuilder.getMF()); | 
|  | 3088 |  | 
|  | 3089 | auto Adrp = | 
|  | 3090 | MIRBuilder.buildInstr(AArch64::ADRP, {&AArch64::GPR64RegClass}, {}) | 
|  | 3091 | .addConstantPoolIndex(CPIdx, 0, AArch64II::MO_PAGE); | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 3092 |  | 
|  | 3093 | MachineInstr *LoadMI = nullptr; | 
|  | 3094 | switch (MIRBuilder.getDataLayout().getTypeStoreSize(CPVal->getType())) { | 
|  | 3095 | case 16: | 
|  | 3096 | LoadMI = | 
|  | 3097 | &*MIRBuilder | 
|  | 3098 | .buildInstr(AArch64::LDRQui, {&AArch64::FPR128RegClass}, {Adrp}) | 
|  | 3099 | .addConstantPoolIndex(CPIdx, 0, | 
|  | 3100 | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); | 
|  | 3101 | break; | 
|  | 3102 | case 8: | 
|  | 3103 | LoadMI = &*MIRBuilder | 
|  | 3104 | .buildInstr(AArch64::LDRDui, {&AArch64::FPR64RegClass}, {Adrp}) | 
|  | 3105 | .addConstantPoolIndex( | 
|  | 3106 | CPIdx, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC); | 
|  | 3107 | break; | 
|  | 3108 | default: | 
|  | 3109 | LLVM_DEBUG(dbgs() << "Could not load from constant pool of type " | 
|  | 3110 | << *CPVal->getType()); | 
|  | 3111 | return nullptr; | 
|  | 3112 | } | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3113 | constrainSelectedInstRegOperands(*Adrp, TII, TRI, RBI); | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 3114 | constrainSelectedInstRegOperands(*LoadMI, TII, TRI, RBI); | 
|  | 3115 | return LoadMI; | 
|  | 3116 | } | 
|  | 3117 |  | 
|  | 3118 | /// Return an <Opcode, SubregIndex> pair to do an vector elt insert of a given | 
|  | 3119 | /// size and RB. | 
|  | 3120 | static std::pair<unsigned, unsigned> | 
|  | 3121 | getInsertVecEltOpInfo(const RegisterBank &RB, unsigned EltSize) { | 
|  | 3122 | unsigned Opc, SubregIdx; | 
|  | 3123 | if (RB.getID() == AArch64::GPRRegBankID) { | 
|  | 3124 | if (EltSize == 32) { | 
|  | 3125 | Opc = AArch64::INSvi32gpr; | 
|  | 3126 | SubregIdx = AArch64::ssub; | 
|  | 3127 | } else if (EltSize == 64) { | 
|  | 3128 | Opc = AArch64::INSvi64gpr; | 
|  | 3129 | SubregIdx = AArch64::dsub; | 
|  | 3130 | } else { | 
|  | 3131 | llvm_unreachable("invalid elt size!"); | 
|  | 3132 | } | 
|  | 3133 | } else { | 
|  | 3134 | if (EltSize == 8) { | 
|  | 3135 | Opc = AArch64::INSvi8lane; | 
|  | 3136 | SubregIdx = AArch64::bsub; | 
|  | 3137 | } else if (EltSize == 16) { | 
|  | 3138 | Opc = AArch64::INSvi16lane; | 
|  | 3139 | SubregIdx = AArch64::hsub; | 
|  | 3140 | } else if (EltSize == 32) { | 
|  | 3141 | Opc = AArch64::INSvi32lane; | 
|  | 3142 | SubregIdx = AArch64::ssub; | 
|  | 3143 | } else if (EltSize == 64) { | 
|  | 3144 | Opc = AArch64::INSvi64lane; | 
|  | 3145 | SubregIdx = AArch64::dsub; | 
|  | 3146 | } else { | 
|  | 3147 | llvm_unreachable("invalid elt size!"); | 
|  | 3148 | } | 
|  | 3149 | } | 
|  | 3150 | return std::make_pair(Opc, SubregIdx); | 
|  | 3151 | } | 
|  | 3152 |  | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3153 | MachineInstr * | 
| Jessica Paquette | 728b18f | 2019-07-24 23:11:01 +0000 | [diff] [blame] | 3154 | AArch64InstructionSelector::emitADD(Register DefReg, MachineOperand &LHS, | 
|  | 3155 | MachineOperand &RHS, | 
|  | 3156 | MachineIRBuilder &MIRBuilder) const { | 
|  | 3157 | assert(LHS.isReg() && RHS.isReg() && "Expected LHS and RHS to be registers!"); | 
|  | 3158 | MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo(); | 
|  | 3159 | static const unsigned OpcTable[2][2]{{AArch64::ADDXrr, AArch64::ADDXri}, | 
|  | 3160 | {AArch64::ADDWrr, AArch64::ADDWri}}; | 
|  | 3161 | bool Is32Bit = MRI.getType(LHS.getReg()).getSizeInBits() == 32; | 
|  | 3162 | auto ImmFns = selectArithImmed(RHS); | 
|  | 3163 | unsigned Opc = OpcTable[Is32Bit][ImmFns.hasValue()]; | 
|  | 3164 | auto AddMI = MIRBuilder.buildInstr(Opc, {DefReg}, {LHS.getReg()}); | 
|  | 3165 |  | 
|  | 3166 | // If we matched a valid constant immediate, add those operands. | 
|  | 3167 | if (ImmFns) { | 
|  | 3168 | for (auto &RenderFn : *ImmFns) | 
|  | 3169 | RenderFn(AddMI); | 
|  | 3170 | } else { | 
|  | 3171 | AddMI.addUse(RHS.getReg()); | 
|  | 3172 | } | 
|  | 3173 |  | 
|  | 3174 | constrainSelectedInstRegOperands(*AddMI, TII, TRI, RBI); | 
|  | 3175 | return &*AddMI; | 
|  | 3176 | } | 
|  | 3177 |  | 
|  | 3178 | MachineInstr * | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3179 | AArch64InstructionSelector::emitCMN(MachineOperand &LHS, MachineOperand &RHS, | 
|  | 3180 | MachineIRBuilder &MIRBuilder) const { | 
|  | 3181 | assert(LHS.isReg() && RHS.isReg() && "Expected LHS and RHS to be registers!"); | 
|  | 3182 | MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo(); | 
|  | 3183 | static const unsigned OpcTable[2][2]{{AArch64::ADDSXrr, AArch64::ADDSXri}, | 
|  | 3184 | {AArch64::ADDSWrr, AArch64::ADDSWri}}; | 
|  | 3185 | bool Is32Bit = (MRI.getType(LHS.getReg()).getSizeInBits() == 32); | 
|  | 3186 | auto ImmFns = selectArithImmed(RHS); | 
|  | 3187 | unsigned Opc = OpcTable[Is32Bit][ImmFns.hasValue()]; | 
|  | 3188 | Register ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR; | 
|  | 3189 |  | 
|  | 3190 | auto CmpMI = MIRBuilder.buildInstr(Opc, {ZReg}, {LHS.getReg()}); | 
|  | 3191 |  | 
|  | 3192 | // If we matched a valid constant immediate, add those operands. | 
|  | 3193 | if (ImmFns) { | 
|  | 3194 | for (auto &RenderFn : *ImmFns) | 
|  | 3195 | RenderFn(CmpMI); | 
|  | 3196 | } else { | 
|  | 3197 | CmpMI.addUse(RHS.getReg()); | 
|  | 3198 | } | 
|  | 3199 |  | 
|  | 3200 | constrainSelectedInstRegOperands(*CmpMI, TII, TRI, RBI); | 
|  | 3201 | return &*CmpMI; | 
|  | 3202 | } | 
|  | 3203 |  | 
| Jessica Paquette | 55d1924 | 2019-07-08 22:58:36 +0000 | [diff] [blame] | 3204 | MachineInstr * | 
|  | 3205 | AArch64InstructionSelector::emitTST(const Register &LHS, const Register &RHS, | 
|  | 3206 | MachineIRBuilder &MIRBuilder) const { | 
|  | 3207 | MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo(); | 
|  | 3208 | unsigned RegSize = MRI.getType(LHS).getSizeInBits(); | 
|  | 3209 | bool Is32Bit = (RegSize == 32); | 
|  | 3210 | static const unsigned OpcTable[2][2]{{AArch64::ANDSXrr, AArch64::ANDSXri}, | 
|  | 3211 | {AArch64::ANDSWrr, AArch64::ANDSWri}}; | 
|  | 3212 | Register ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR; | 
|  | 3213 |  | 
|  | 3214 | // We might be able to fold in an immediate into the TST. We need to make sure | 
|  | 3215 | // it's a logical immediate though, since ANDS requires that. | 
|  | 3216 | auto ValAndVReg = getConstantVRegValWithLookThrough(RHS, MRI); | 
|  | 3217 | bool IsImmForm = ValAndVReg.hasValue() && | 
|  | 3218 | AArch64_AM::isLogicalImmediate(ValAndVReg->Value, RegSize); | 
|  | 3219 | unsigned Opc = OpcTable[Is32Bit][IsImmForm]; | 
|  | 3220 | auto TstMI = MIRBuilder.buildInstr(Opc, {ZReg}, {LHS}); | 
|  | 3221 |  | 
|  | 3222 | if (IsImmForm) | 
|  | 3223 | TstMI.addImm( | 
|  | 3224 | AArch64_AM::encodeLogicalImmediate(ValAndVReg->Value, RegSize)); | 
|  | 3225 | else | 
|  | 3226 | TstMI.addUse(RHS); | 
|  | 3227 |  | 
|  | 3228 | constrainSelectedInstRegOperands(*TstMI, TII, TRI, RBI); | 
|  | 3229 | return &*TstMI; | 
|  | 3230 | } | 
|  | 3231 |  | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3232 | MachineInstr *AArch64InstructionSelector::emitIntegerCompare( | 
|  | 3233 | MachineOperand &LHS, MachineOperand &RHS, MachineOperand &Predicate, | 
|  | 3234 | MachineIRBuilder &MIRBuilder) const { | 
|  | 3235 | assert(LHS.isReg() && RHS.isReg() && "Expected LHS and RHS to be registers!"); | 
|  | 3236 | MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo(); | 
|  | 3237 |  | 
| Jessica Paquette | 55d1924 | 2019-07-08 22:58:36 +0000 | [diff] [blame] | 3238 | // Fold the compare if possible. | 
|  | 3239 | MachineInstr *FoldCmp = | 
|  | 3240 | tryFoldIntegerCompare(LHS, RHS, Predicate, MIRBuilder); | 
|  | 3241 | if (FoldCmp) | 
|  | 3242 | return FoldCmp; | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3243 |  | 
|  | 3244 | // Can't fold into a CMN. Just emit a normal compare. | 
|  | 3245 | unsigned CmpOpc = 0; | 
|  | 3246 | Register ZReg; | 
|  | 3247 |  | 
|  | 3248 | LLT CmpTy = MRI.getType(LHS.getReg()); | 
| Jessica Paquette | 6584109 | 2019-07-03 18:30:01 +0000 | [diff] [blame] | 3249 | assert((CmpTy.isScalar() || CmpTy.isPointer()) && | 
|  | 3250 | "Expected scalar or pointer"); | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3251 | if (CmpTy == LLT::scalar(32)) { | 
|  | 3252 | CmpOpc = AArch64::SUBSWrr; | 
|  | 3253 | ZReg = AArch64::WZR; | 
|  | 3254 | } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) { | 
|  | 3255 | CmpOpc = AArch64::SUBSXrr; | 
|  | 3256 | ZReg = AArch64::XZR; | 
|  | 3257 | } else { | 
|  | 3258 | return nullptr; | 
|  | 3259 | } | 
|  | 3260 |  | 
|  | 3261 | // Try to match immediate forms. | 
|  | 3262 | auto ImmFns = selectArithImmed(RHS); | 
|  | 3263 | if (ImmFns) | 
|  | 3264 | CmpOpc = CmpOpc == AArch64::SUBSWrr ? AArch64::SUBSWri : AArch64::SUBSXri; | 
|  | 3265 |  | 
|  | 3266 | auto CmpMI = MIRBuilder.buildInstr(CmpOpc).addDef(ZReg).addUse(LHS.getReg()); | 
|  | 3267 | // If we matched a valid constant immediate, add those operands. | 
|  | 3268 | if (ImmFns) { | 
|  | 3269 | for (auto &RenderFn : *ImmFns) | 
|  | 3270 | RenderFn(CmpMI); | 
|  | 3271 | } else { | 
|  | 3272 | CmpMI.addUse(RHS.getReg()); | 
|  | 3273 | } | 
|  | 3274 |  | 
|  | 3275 | // Make sure that we can constrain the compare that we emitted. | 
|  | 3276 | constrainSelectedInstRegOperands(*CmpMI, TII, TRI, RBI); | 
|  | 3277 | return &*CmpMI; | 
|  | 3278 | } | 
|  | 3279 |  | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 3280 | MachineInstr *AArch64InstructionSelector::emitVectorConcat( | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3281 | Optional<Register> Dst, Register Op1, Register Op2, | 
| Amara Emerson | 2ff2298 | 2019-03-14 22:48:15 +0000 | [diff] [blame] | 3282 | MachineIRBuilder &MIRBuilder) const { | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 3283 | // We implement a vector concat by: | 
|  | 3284 | // 1. Use scalar_to_vector to insert the lower vector into the larger dest | 
|  | 3285 | // 2. Insert the upper vector into the destination's upper element | 
|  | 3286 | // TODO: some of this code is common with G_BUILD_VECTOR handling. | 
|  | 3287 | MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo(); | 
|  | 3288 |  | 
|  | 3289 | const LLT Op1Ty = MRI.getType(Op1); | 
|  | 3290 | const LLT Op2Ty = MRI.getType(Op2); | 
|  | 3291 |  | 
|  | 3292 | if (Op1Ty != Op2Ty) { | 
|  | 3293 | LLVM_DEBUG(dbgs() << "Could not do vector concat of differing vector tys"); | 
|  | 3294 | return nullptr; | 
|  | 3295 | } | 
|  | 3296 | assert(Op1Ty.isVector() && "Expected a vector for vector concat"); | 
|  | 3297 |  | 
|  | 3298 | if (Op1Ty.getSizeInBits() >= 128) { | 
|  | 3299 | LLVM_DEBUG(dbgs() << "Vector concat not supported for full size vectors"); | 
|  | 3300 | return nullptr; | 
|  | 3301 | } | 
|  | 3302 |  | 
|  | 3303 | // At the moment we just support 64 bit vector concats. | 
|  | 3304 | if (Op1Ty.getSizeInBits() != 64) { | 
|  | 3305 | LLVM_DEBUG(dbgs() << "Vector concat supported for 64b vectors"); | 
|  | 3306 | return nullptr; | 
|  | 3307 | } | 
|  | 3308 |  | 
|  | 3309 | const LLT ScalarTy = LLT::scalar(Op1Ty.getSizeInBits()); | 
|  | 3310 | const RegisterBank &FPRBank = *RBI.getRegBank(Op1, MRI, TRI); | 
|  | 3311 | const TargetRegisterClass *DstRC = | 
|  | 3312 | getMinClassForRegBank(FPRBank, Op1Ty.getSizeInBits() * 2); | 
|  | 3313 |  | 
|  | 3314 | MachineInstr *WidenedOp1 = | 
|  | 3315 | emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op1, MIRBuilder); | 
|  | 3316 | MachineInstr *WidenedOp2 = | 
|  | 3317 | emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op2, MIRBuilder); | 
|  | 3318 | if (!WidenedOp1 || !WidenedOp2) { | 
|  | 3319 | LLVM_DEBUG(dbgs() << "Could not emit a vector from scalar value"); | 
|  | 3320 | return nullptr; | 
|  | 3321 | } | 
|  | 3322 |  | 
|  | 3323 | // Now do the insert of the upper element. | 
|  | 3324 | unsigned InsertOpc, InsSubRegIdx; | 
|  | 3325 | std::tie(InsertOpc, InsSubRegIdx) = | 
|  | 3326 | getInsertVecEltOpInfo(FPRBank, ScalarTy.getSizeInBits()); | 
|  | 3327 |  | 
| Amara Emerson | 2ff2298 | 2019-03-14 22:48:15 +0000 | [diff] [blame] | 3328 | if (!Dst) | 
|  | 3329 | Dst = MRI.createVirtualRegister(DstRC); | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 3330 | auto InsElt = | 
|  | 3331 | MIRBuilder | 
| Amara Emerson | 2ff2298 | 2019-03-14 22:48:15 +0000 | [diff] [blame] | 3332 | .buildInstr(InsertOpc, {*Dst}, {WidenedOp1->getOperand(0).getReg()}) | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 3333 | .addImm(1) /* Lane index */ | 
|  | 3334 | .addUse(WidenedOp2->getOperand(0).getReg()) | 
|  | 3335 | .addImm(0); | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 3336 | constrainSelectedInstRegOperands(*InsElt, TII, TRI, RBI); | 
|  | 3337 | return &*InsElt; | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3338 | } | 
|  | 3339 |  | 
| Jessica Paquette | a3843fe | 2019-05-01 22:39:43 +0000 | [diff] [blame] | 3340 | MachineInstr *AArch64InstructionSelector::emitFMovForFConstant( | 
|  | 3341 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 3342 | assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && | 
|  | 3343 | "Expected a G_FCONSTANT!"); | 
|  | 3344 | MachineOperand &ImmOp = I.getOperand(1); | 
|  | 3345 | unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); | 
|  | 3346 |  | 
|  | 3347 | // Only handle 32 and 64 bit defs for now. | 
|  | 3348 | if (DefSize != 32 && DefSize != 64) | 
|  | 3349 | return nullptr; | 
|  | 3350 |  | 
|  | 3351 | // Don't handle null values using FMOV. | 
|  | 3352 | if (ImmOp.getFPImm()->isNullValue()) | 
|  | 3353 | return nullptr; | 
|  | 3354 |  | 
|  | 3355 | // Get the immediate representation for the FMOV. | 
|  | 3356 | const APFloat &ImmValAPF = ImmOp.getFPImm()->getValueAPF(); | 
|  | 3357 | int Imm = DefSize == 32 ? AArch64_AM::getFP32Imm(ImmValAPF) | 
|  | 3358 | : AArch64_AM::getFP64Imm(ImmValAPF); | 
|  | 3359 |  | 
|  | 3360 | // If this is -1, it means the immediate can't be represented as the requested | 
|  | 3361 | // floating point value. Bail. | 
|  | 3362 | if (Imm == -1) | 
|  | 3363 | return nullptr; | 
|  | 3364 |  | 
|  | 3365 | // Update MI to represent the new FMOV instruction, constrain it, and return. | 
|  | 3366 | ImmOp.ChangeToImmediate(Imm); | 
|  | 3367 | unsigned MovOpc = DefSize == 32 ? AArch64::FMOVSi : AArch64::FMOVDi; | 
|  | 3368 | I.setDesc(TII.get(MovOpc)); | 
|  | 3369 | constrainSelectedInstRegOperands(I, TII, TRI, RBI); | 
|  | 3370 | return &I; | 
|  | 3371 | } | 
|  | 3372 |  | 
| Jessica Paquette | 49537bb | 2019-06-17 18:40:06 +0000 | [diff] [blame] | 3373 | MachineInstr * | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3374 | AArch64InstructionSelector::emitCSetForICMP(Register DefReg, unsigned Pred, | 
| Jessica Paquette | 49537bb | 2019-06-17 18:40:06 +0000 | [diff] [blame] | 3375 | MachineIRBuilder &MIRBuilder) const { | 
|  | 3376 | // CSINC increments the result when the predicate is false. Invert it. | 
|  | 3377 | const AArch64CC::CondCode InvCC = changeICMPPredToAArch64CC( | 
|  | 3378 | CmpInst::getInversePredicate((CmpInst::Predicate)Pred)); | 
|  | 3379 | auto I = | 
|  | 3380 | MIRBuilder | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3381 | .buildInstr(AArch64::CSINCWr, {DefReg}, {Register(AArch64::WZR), Register(AArch64::WZR)}) | 
| Jessica Paquette | 49537bb | 2019-06-17 18:40:06 +0000 | [diff] [blame] | 3382 | .addImm(InvCC); | 
|  | 3383 | constrainSelectedInstRegOperands(*I, TII, TRI, RBI); | 
|  | 3384 | return &*I; | 
|  | 3385 | } | 
|  | 3386 |  | 
| Amara Emerson | c37ff0d | 2019-06-05 23:46:16 +0000 | [diff] [blame] | 3387 | bool AArch64InstructionSelector::tryOptSelect(MachineInstr &I) const { | 
|  | 3388 | MachineIRBuilder MIB(I); | 
|  | 3389 | MachineRegisterInfo &MRI = *MIB.getMRI(); | 
|  | 3390 | const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); | 
|  | 3391 |  | 
|  | 3392 | // We want to recognize this pattern: | 
|  | 3393 | // | 
|  | 3394 | // $z = G_FCMP pred, $x, $y | 
|  | 3395 | // ... | 
|  | 3396 | // $w = G_SELECT $z, $a, $b | 
|  | 3397 | // | 
|  | 3398 | // Where the value of $z is *only* ever used by the G_SELECT (possibly with | 
|  | 3399 | // some copies/truncs in between.) | 
|  | 3400 | // | 
|  | 3401 | // If we see this, then we can emit something like this: | 
|  | 3402 | // | 
|  | 3403 | // fcmp $x, $y | 
|  | 3404 | // fcsel $w, $a, $b, pred | 
|  | 3405 | // | 
|  | 3406 | // Rather than emitting both of the rather long sequences in the standard | 
|  | 3407 | // G_FCMP/G_SELECT select methods. | 
|  | 3408 |  | 
|  | 3409 | // First, check if the condition is defined by a compare. | 
|  | 3410 | MachineInstr *CondDef = MRI.getVRegDef(I.getOperand(1).getReg()); | 
|  | 3411 | while (CondDef) { | 
|  | 3412 | // We can only fold if all of the defs have one use. | 
|  | 3413 | if (!MRI.hasOneUse(CondDef->getOperand(0).getReg())) | 
|  | 3414 | return false; | 
|  | 3415 |  | 
|  | 3416 | // We can skip over G_TRUNC since the condition is 1-bit. | 
|  | 3417 | // Truncating/extending can have no impact on the value. | 
|  | 3418 | unsigned Opc = CondDef->getOpcode(); | 
|  | 3419 | if (Opc != TargetOpcode::COPY && Opc != TargetOpcode::G_TRUNC) | 
|  | 3420 | break; | 
|  | 3421 |  | 
| Amara Emerson | d940e20 | 2019-06-06 07:33:47 +0000 | [diff] [blame] | 3422 | // Can't see past copies from physregs. | 
|  | 3423 | if (Opc == TargetOpcode::COPY && | 
| Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 3424 | Register::isPhysicalRegister(CondDef->getOperand(1).getReg())) | 
| Amara Emerson | d940e20 | 2019-06-06 07:33:47 +0000 | [diff] [blame] | 3425 | return false; | 
|  | 3426 |  | 
| Amara Emerson | c37ff0d | 2019-06-05 23:46:16 +0000 | [diff] [blame] | 3427 | CondDef = MRI.getVRegDef(CondDef->getOperand(1).getReg()); | 
|  | 3428 | } | 
|  | 3429 |  | 
|  | 3430 | // Is the condition defined by a compare? | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3431 | if (!CondDef) | 
| Amara Emerson | c37ff0d | 2019-06-05 23:46:16 +0000 | [diff] [blame] | 3432 | return false; | 
|  | 3433 |  | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3434 | unsigned CondOpc = CondDef->getOpcode(); | 
|  | 3435 | if (CondOpc != TargetOpcode::G_ICMP && CondOpc != TargetOpcode::G_FCMP) | 
|  | 3436 | return false; | 
|  | 3437 |  | 
| Amara Emerson | c37ff0d | 2019-06-05 23:46:16 +0000 | [diff] [blame] | 3438 | AArch64CC::CondCode CondCode; | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3439 | if (CondOpc == TargetOpcode::G_ICMP) { | 
|  | 3440 | CondCode = changeICMPPredToAArch64CC( | 
|  | 3441 | (CmpInst::Predicate)CondDef->getOperand(1).getPredicate()); | 
|  | 3442 | if (!emitIntegerCompare(CondDef->getOperand(2), CondDef->getOperand(3), | 
|  | 3443 | CondDef->getOperand(1), MIB)) { | 
|  | 3444 | LLVM_DEBUG(dbgs() << "Couldn't emit compare for select!\n"); | 
|  | 3445 | return false; | 
|  | 3446 | } | 
|  | 3447 | } else { | 
|  | 3448 | // Get the condition code for the select. | 
|  | 3449 | AArch64CC::CondCode CondCode2; | 
|  | 3450 | changeFCMPPredToAArch64CC( | 
|  | 3451 | (CmpInst::Predicate)CondDef->getOperand(1).getPredicate(), CondCode, | 
|  | 3452 | CondCode2); | 
| Amara Emerson | c37ff0d | 2019-06-05 23:46:16 +0000 | [diff] [blame] | 3453 |  | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3454 | // changeFCMPPredToAArch64CC sets CondCode2 to AL when we require two | 
|  | 3455 | // instructions to emit the comparison. | 
|  | 3456 | // TODO: Handle FCMP_UEQ and FCMP_ONE. After that, this check will be | 
|  | 3457 | // unnecessary. | 
|  | 3458 | if (CondCode2 != AArch64CC::AL) | 
|  | 3459 | return false; | 
| Amara Emerson | c37ff0d | 2019-06-05 23:46:16 +0000 | [diff] [blame] | 3460 |  | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3461 | // Make sure we'll be able to select the compare. | 
|  | 3462 | unsigned CmpOpc = selectFCMPOpc(*CondDef, MRI); | 
|  | 3463 | if (!CmpOpc) | 
|  | 3464 | return false; | 
| Amara Emerson | c37ff0d | 2019-06-05 23:46:16 +0000 | [diff] [blame] | 3465 |  | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3466 | // Emit a new compare. | 
|  | 3467 | auto Cmp = MIB.buildInstr(CmpOpc, {}, {CondDef->getOperand(2).getReg()}); | 
|  | 3468 | if (CmpOpc != AArch64::FCMPSri && CmpOpc != AArch64::FCMPDri) | 
|  | 3469 | Cmp.addUse(CondDef->getOperand(3).getReg()); | 
|  | 3470 | constrainSelectedInstRegOperands(*Cmp, TII, TRI, RBI); | 
|  | 3471 | } | 
| Amara Emerson | c37ff0d | 2019-06-05 23:46:16 +0000 | [diff] [blame] | 3472 |  | 
|  | 3473 | // Emit the select. | 
|  | 3474 | unsigned CSelOpc = selectSelectOpc(I, MRI, RBI); | 
|  | 3475 | auto CSel = | 
|  | 3476 | MIB.buildInstr(CSelOpc, {I.getOperand(0).getReg()}, | 
|  | 3477 | {I.getOperand(2).getReg(), I.getOperand(3).getReg()}) | 
|  | 3478 | .addImm(CondCode); | 
| Amara Emerson | c37ff0d | 2019-06-05 23:46:16 +0000 | [diff] [blame] | 3479 | constrainSelectedInstRegOperands(*CSel, TII, TRI, RBI); | 
|  | 3480 | I.eraseFromParent(); | 
|  | 3481 | return true; | 
|  | 3482 | } | 
|  | 3483 |  | 
| Jessica Paquette | 55d1924 | 2019-07-08 22:58:36 +0000 | [diff] [blame] | 3484 | MachineInstr *AArch64InstructionSelector::tryFoldIntegerCompare( | 
|  | 3485 | MachineOperand &LHS, MachineOperand &RHS, MachineOperand &Predicate, | 
|  | 3486 | MachineIRBuilder &MIRBuilder) const { | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3487 | assert(LHS.isReg() && RHS.isReg() && Predicate.isPredicate() && | 
|  | 3488 | "Unexpected MachineOperand"); | 
| Jessica Paquette | 49537bb | 2019-06-17 18:40:06 +0000 | [diff] [blame] | 3489 | MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); | 
|  | 3490 | // We want to find this sort of thing: | 
|  | 3491 | // x = G_SUB 0, y | 
|  | 3492 | // G_ICMP z, x | 
|  | 3493 | // | 
|  | 3494 | // In this case, we can fold the G_SUB into the G_ICMP using a CMN instead. | 
|  | 3495 | // e.g: | 
|  | 3496 | // | 
|  | 3497 | // cmn z, y | 
|  | 3498 |  | 
| Jessica Paquette | 49537bb | 2019-06-17 18:40:06 +0000 | [diff] [blame] | 3499 | // Helper lambda to detect the subtract followed by the compare. | 
|  | 3500 | // Takes in the def of the LHS or RHS, and checks if it's a subtract from 0. | 
|  | 3501 | auto IsCMN = [&](MachineInstr *DefMI, const AArch64CC::CondCode &CC) { | 
|  | 3502 | if (!DefMI || DefMI->getOpcode() != TargetOpcode::G_SUB) | 
|  | 3503 | return false; | 
|  | 3504 |  | 
|  | 3505 | // Need to make sure NZCV is the same at the end of the transformation. | 
|  | 3506 | if (CC != AArch64CC::EQ && CC != AArch64CC::NE) | 
|  | 3507 | return false; | 
|  | 3508 |  | 
|  | 3509 | // We want to match against SUBs. | 
|  | 3510 | if (DefMI->getOpcode() != TargetOpcode::G_SUB) | 
|  | 3511 | return false; | 
|  | 3512 |  | 
|  | 3513 | // Make sure that we're getting | 
|  | 3514 | // x = G_SUB 0, y | 
|  | 3515 | auto ValAndVReg = | 
|  | 3516 | getConstantVRegValWithLookThrough(DefMI->getOperand(1).getReg(), MRI); | 
|  | 3517 | if (!ValAndVReg || ValAndVReg->Value != 0) | 
|  | 3518 | return false; | 
|  | 3519 |  | 
|  | 3520 | // This can safely be represented as a CMN. | 
|  | 3521 | return true; | 
|  | 3522 | }; | 
|  | 3523 |  | 
|  | 3524 | // Check if the RHS or LHS of the G_ICMP is defined by a SUB | 
| Jessica Paquette | 3132968 | 2019-07-10 18:44:57 +0000 | [diff] [blame] | 3525 | MachineInstr *LHSDef = getDefIgnoringCopies(LHS.getReg(), MRI); | 
|  | 3526 | MachineInstr *RHSDef = getDefIgnoringCopies(RHS.getReg(), MRI); | 
| Jessica Paquette | 55d1924 | 2019-07-08 22:58:36 +0000 | [diff] [blame] | 3527 | CmpInst::Predicate P = (CmpInst::Predicate)Predicate.getPredicate(); | 
|  | 3528 | const AArch64CC::CondCode CC = changeICMPPredToAArch64CC(P); | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3529 |  | 
| Jessica Paquette | 55d1924 | 2019-07-08 22:58:36 +0000 | [diff] [blame] | 3530 | // Given this: | 
|  | 3531 | // | 
|  | 3532 | // x = G_SUB 0, y | 
|  | 3533 | // G_ICMP x, z | 
|  | 3534 | // | 
|  | 3535 | // Produce this: | 
|  | 3536 | // | 
|  | 3537 | // cmn y, z | 
|  | 3538 | if (IsCMN(LHSDef, CC)) | 
|  | 3539 | return emitCMN(LHSDef->getOperand(2), RHS, MIRBuilder); | 
|  | 3540 |  | 
|  | 3541 | // Same idea here, but with the RHS of the compare instead: | 
|  | 3542 | // | 
|  | 3543 | // Given this: | 
|  | 3544 | // | 
|  | 3545 | // x = G_SUB 0, y | 
|  | 3546 | // G_ICMP z, x | 
|  | 3547 | // | 
|  | 3548 | // Produce this: | 
|  | 3549 | // | 
|  | 3550 | // cmn z, y | 
|  | 3551 | if (IsCMN(RHSDef, CC)) | 
|  | 3552 | return emitCMN(LHS, RHSDef->getOperand(2), MIRBuilder); | 
|  | 3553 |  | 
|  | 3554 | // Given this: | 
|  | 3555 | // | 
|  | 3556 | // z = G_AND x, y | 
|  | 3557 | // G_ICMP z, 0 | 
|  | 3558 | // | 
|  | 3559 | // Produce this if the compare is signed: | 
|  | 3560 | // | 
|  | 3561 | // tst x, y | 
|  | 3562 | if (!isUnsignedICMPPred(P) && LHSDef && | 
|  | 3563 | LHSDef->getOpcode() == TargetOpcode::G_AND) { | 
|  | 3564 | // Make sure that the RHS is 0. | 
|  | 3565 | auto ValAndVReg = getConstantVRegValWithLookThrough(RHS.getReg(), MRI); | 
|  | 3566 | if (!ValAndVReg || ValAndVReg->Value != 0) | 
|  | 3567 | return nullptr; | 
|  | 3568 |  | 
|  | 3569 | return emitTST(LHSDef->getOperand(1).getReg(), | 
|  | 3570 | LHSDef->getOperand(2).getReg(), MIRBuilder); | 
| Jessica Paquette | 49537bb | 2019-06-17 18:40:06 +0000 | [diff] [blame] | 3571 | } | 
|  | 3572 |  | 
| Jessica Paquette | 9931604 | 2019-07-02 19:44:16 +0000 | [diff] [blame] | 3573 | return nullptr; | 
| Jessica Paquette | 49537bb | 2019-06-17 18:40:06 +0000 | [diff] [blame] | 3574 | } | 
|  | 3575 |  | 
| Amara Emerson | 761ca2e | 2019-03-19 21:43:05 +0000 | [diff] [blame] | 3576 | bool AArch64InstructionSelector::tryOptVectorDup(MachineInstr &I) const { | 
|  | 3577 | // Try to match a vector splat operation into a dup instruction. | 
|  | 3578 | // We're looking for this pattern: | 
|  | 3579 | //    %scalar:gpr(s64) = COPY $x0 | 
|  | 3580 | //    %undef:fpr(<2 x s64>) = G_IMPLICIT_DEF | 
|  | 3581 | //    %cst0:gpr(s32) = G_CONSTANT i32 0 | 
|  | 3582 | //    %zerovec:fpr(<2 x s32>) = G_BUILD_VECTOR %cst0(s32), %cst0(s32) | 
|  | 3583 | //    %ins:fpr(<2 x s64>) = G_INSERT_VECTOR_ELT %undef, %scalar(s64), %cst0(s32) | 
|  | 3584 | //    %splat:fpr(<2 x s64>) = G_SHUFFLE_VECTOR %ins(<2 x s64>), %undef, | 
|  | 3585 | //                                             %zerovec(<2 x s32>) | 
|  | 3586 | // | 
|  | 3587 | // ...into: | 
|  | 3588 | // %splat = DUP %scalar | 
|  | 3589 | // We use the regbank of the scalar to determine which kind of dup to use. | 
|  | 3590 | MachineIRBuilder MIB(I); | 
|  | 3591 | MachineRegisterInfo &MRI = *MIB.getMRI(); | 
|  | 3592 | const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); | 
|  | 3593 | using namespace TargetOpcode; | 
|  | 3594 | using namespace MIPatternMatch; | 
|  | 3595 |  | 
|  | 3596 | // Begin matching the insert. | 
|  | 3597 | auto *InsMI = | 
| Jessica Paquette | 7c95925 | 2019-07-10 18:46:56 +0000 | [diff] [blame] | 3598 | getOpcodeDef(G_INSERT_VECTOR_ELT, I.getOperand(1).getReg(), MRI); | 
| Amara Emerson | 761ca2e | 2019-03-19 21:43:05 +0000 | [diff] [blame] | 3599 | if (!InsMI) | 
|  | 3600 | return false; | 
|  | 3601 | // Match the undef vector operand. | 
|  | 3602 | auto *UndefMI = | 
| Jessica Paquette | 7c95925 | 2019-07-10 18:46:56 +0000 | [diff] [blame] | 3603 | getOpcodeDef(G_IMPLICIT_DEF, InsMI->getOperand(1).getReg(), MRI); | 
| Amara Emerson | 761ca2e | 2019-03-19 21:43:05 +0000 | [diff] [blame] | 3604 | if (!UndefMI) | 
|  | 3605 | return false; | 
|  | 3606 | // Match the scalar being splatted. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3607 | Register ScalarReg = InsMI->getOperand(2).getReg(); | 
| Amara Emerson | 761ca2e | 2019-03-19 21:43:05 +0000 | [diff] [blame] | 3608 | const RegisterBank *ScalarRB = RBI.getRegBank(ScalarReg, MRI, TRI); | 
|  | 3609 | // Match the index constant 0. | 
|  | 3610 | int64_t Index = 0; | 
|  | 3611 | if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ICst(Index)) || Index) | 
|  | 3612 | return false; | 
|  | 3613 |  | 
|  | 3614 | // The shuffle's second operand doesn't matter if the mask is all zero. | 
| Matt Arsenault | 5af9cf0 | 2019-08-13 15:34:38 +0000 | [diff] [blame] | 3615 | const Constant *Mask = I.getOperand(3).getShuffleMask(); | 
|  | 3616 | if (!isa<ConstantAggregateZero>(Mask)) | 
| Amara Emerson | 761ca2e | 2019-03-19 21:43:05 +0000 | [diff] [blame] | 3617 | return false; | 
| Amara Emerson | 761ca2e | 2019-03-19 21:43:05 +0000 | [diff] [blame] | 3618 |  | 
|  | 3619 | // We're done, now find out what kind of splat we need. | 
|  | 3620 | LLT VecTy = MRI.getType(I.getOperand(0).getReg()); | 
|  | 3621 | LLT EltTy = VecTy.getElementType(); | 
|  | 3622 | if (VecTy.getSizeInBits() != 128 || EltTy.getSizeInBits() < 32) { | 
|  | 3623 | LLVM_DEBUG(dbgs() << "Could not optimize splat pattern < 128b yet"); | 
|  | 3624 | return false; | 
|  | 3625 | } | 
|  | 3626 | bool IsFP = ScalarRB->getID() == AArch64::FPRRegBankID; | 
|  | 3627 | static const unsigned OpcTable[2][2] = { | 
|  | 3628 | {AArch64::DUPv4i32gpr, AArch64::DUPv2i64gpr}, | 
|  | 3629 | {AArch64::DUPv4i32lane, AArch64::DUPv2i64lane}}; | 
|  | 3630 | unsigned Opc = OpcTable[IsFP][EltTy.getSizeInBits() == 64]; | 
|  | 3631 |  | 
|  | 3632 | // For FP splats, we need to widen the scalar reg via undef too. | 
|  | 3633 | if (IsFP) { | 
|  | 3634 | MachineInstr *Widen = emitScalarToVector( | 
|  | 3635 | EltTy.getSizeInBits(), &AArch64::FPR128RegClass, ScalarReg, MIB); | 
|  | 3636 | if (!Widen) | 
|  | 3637 | return false; | 
|  | 3638 | ScalarReg = Widen->getOperand(0).getReg(); | 
|  | 3639 | } | 
|  | 3640 | auto Dup = MIB.buildInstr(Opc, {I.getOperand(0).getReg()}, {ScalarReg}); | 
|  | 3641 | if (IsFP) | 
|  | 3642 | Dup.addImm(0); | 
|  | 3643 | constrainSelectedInstRegOperands(*Dup, TII, TRI, RBI); | 
|  | 3644 | I.eraseFromParent(); | 
|  | 3645 | return true; | 
|  | 3646 | } | 
|  | 3647 |  | 
|  | 3648 | bool AArch64InstructionSelector::tryOptVectorShuffle(MachineInstr &I) const { | 
|  | 3649 | if (TM.getOptLevel() == CodeGenOpt::None) | 
|  | 3650 | return false; | 
|  | 3651 | if (tryOptVectorDup(I)) | 
|  | 3652 | return true; | 
|  | 3653 | return false; | 
|  | 3654 | } | 
|  | 3655 |  | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3656 | bool AArch64InstructionSelector::selectShuffleVector( | 
|  | 3657 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
| Amara Emerson | 761ca2e | 2019-03-19 21:43:05 +0000 | [diff] [blame] | 3658 | if (tryOptVectorShuffle(I)) | 
|  | 3659 | return true; | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3660 | const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3661 | Register Src1Reg = I.getOperand(1).getReg(); | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3662 | const LLT Src1Ty = MRI.getType(Src1Reg); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3663 | Register Src2Reg = I.getOperand(2).getReg(); | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3664 | const LLT Src2Ty = MRI.getType(Src2Reg); | 
| Matt Arsenault | 5af9cf0 | 2019-08-13 15:34:38 +0000 | [diff] [blame] | 3665 | const Constant *ShuffleMask = I.getOperand(3).getShuffleMask(); | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3666 |  | 
|  | 3667 | MachineBasicBlock &MBB = *I.getParent(); | 
|  | 3668 | MachineFunction &MF = *MBB.getParent(); | 
|  | 3669 | LLVMContext &Ctx = MF.getFunction().getContext(); | 
|  | 3670 |  | 
| Matt Arsenault | 5af9cf0 | 2019-08-13 15:34:38 +0000 | [diff] [blame] | 3671 | SmallVector<int, 8> Mask; | 
|  | 3672 | ShuffleVectorInst::getShuffleMask(ShuffleMask, Mask); | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3673 |  | 
|  | 3674 | // G_SHUFFLE_VECTOR is weird in that the source operands can be scalars, if | 
|  | 3675 | // it's originated from a <1 x T> type. Those should have been lowered into | 
|  | 3676 | // G_BUILD_VECTOR earlier. | 
|  | 3677 | if (!Src1Ty.isVector() || !Src2Ty.isVector()) { | 
|  | 3678 | LLVM_DEBUG(dbgs() << "Could not select a \"scalar\" G_SHUFFLE_VECTOR\n"); | 
|  | 3679 | return false; | 
|  | 3680 | } | 
|  | 3681 |  | 
|  | 3682 | unsigned BytesPerElt = DstTy.getElementType().getSizeInBits() / 8; | 
|  | 3683 |  | 
|  | 3684 | SmallVector<Constant *, 64> CstIdxs; | 
| Matt Arsenault | 5af9cf0 | 2019-08-13 15:34:38 +0000 | [diff] [blame] | 3685 | for (int Val : Mask) { | 
| Amara Emerson | 2806fd0 | 2019-04-12 21:31:21 +0000 | [diff] [blame] | 3686 | // For now, any undef indexes we'll just assume to be 0. This should be | 
|  | 3687 | // optimized in future, e.g. to select DUP etc. | 
| Matt Arsenault | 5af9cf0 | 2019-08-13 15:34:38 +0000 | [diff] [blame] | 3688 | Val = Val < 0 ? 0 : Val; | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3689 | for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) { | 
|  | 3690 | unsigned Offset = Byte + Val * BytesPerElt; | 
|  | 3691 | CstIdxs.emplace_back(ConstantInt::get(Type::getInt8Ty(Ctx), Offset)); | 
|  | 3692 | } | 
|  | 3693 | } | 
|  | 3694 |  | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 3695 | MachineIRBuilder MIRBuilder(I); | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3696 |  | 
|  | 3697 | // Use a constant pool to load the index vector for TBL. | 
|  | 3698 | Constant *CPVal = ConstantVector::get(CstIdxs); | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3699 | MachineInstr *IndexLoad = emitLoadFromConstantPool(CPVal, MIRBuilder); | 
|  | 3700 | if (!IndexLoad) { | 
|  | 3701 | LLVM_DEBUG(dbgs() << "Could not load from a constant pool"); | 
|  | 3702 | return false; | 
|  | 3703 | } | 
|  | 3704 |  | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 3705 | if (DstTy.getSizeInBits() != 128) { | 
|  | 3706 | assert(DstTy.getSizeInBits() == 64 && "Unexpected shuffle result ty"); | 
|  | 3707 | // This case can be done with TBL1. | 
| Amara Emerson | 2ff2298 | 2019-03-14 22:48:15 +0000 | [diff] [blame] | 3708 | MachineInstr *Concat = emitVectorConcat(None, Src1Reg, Src2Reg, MIRBuilder); | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 3709 | if (!Concat) { | 
|  | 3710 | LLVM_DEBUG(dbgs() << "Could not do vector concat for tbl1"); | 
|  | 3711 | return false; | 
|  | 3712 | } | 
|  | 3713 |  | 
|  | 3714 | // The constant pool load will be 64 bits, so need to convert to FPR128 reg. | 
|  | 3715 | IndexLoad = | 
|  | 3716 | emitScalarToVector(64, &AArch64::FPR128RegClass, | 
|  | 3717 | IndexLoad->getOperand(0).getReg(), MIRBuilder); | 
|  | 3718 |  | 
|  | 3719 | auto TBL1 = MIRBuilder.buildInstr( | 
|  | 3720 | AArch64::TBLv16i8One, {&AArch64::FPR128RegClass}, | 
|  | 3721 | {Concat->getOperand(0).getReg(), IndexLoad->getOperand(0).getReg()}); | 
|  | 3722 | constrainSelectedInstRegOperands(*TBL1, TII, TRI, RBI); | 
|  | 3723 |  | 
| Amara Emerson | 3739a20 | 2019-03-15 21:59:50 +0000 | [diff] [blame] | 3724 | auto Copy = | 
| Amara Emerson | 8627178 | 2019-03-18 19:20:10 +0000 | [diff] [blame] | 3725 | MIRBuilder | 
|  | 3726 | .buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {}) | 
|  | 3727 | .addReg(TBL1.getReg(0), 0, AArch64::dsub); | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 3728 | RBI.constrainGenericRegister(Copy.getReg(0), AArch64::FPR64RegClass, MRI); | 
|  | 3729 | I.eraseFromParent(); | 
|  | 3730 | return true; | 
|  | 3731 | } | 
|  | 3732 |  | 
| Amara Emerson | 1abe05c | 2019-02-21 20:20:16 +0000 | [diff] [blame] | 3733 | // For TBL2 we need to emit a REG_SEQUENCE to tie together two consecutive | 
|  | 3734 | // Q registers for regalloc. | 
|  | 3735 | auto RegSeq = MIRBuilder | 
|  | 3736 | .buildInstr(TargetOpcode::REG_SEQUENCE, | 
|  | 3737 | {&AArch64::QQRegClass}, {Src1Reg}) | 
|  | 3738 | .addImm(AArch64::qsub0) | 
|  | 3739 | .addUse(Src2Reg) | 
|  | 3740 | .addImm(AArch64::qsub1); | 
|  | 3741 |  | 
|  | 3742 | auto TBL2 = | 
|  | 3743 | MIRBuilder.buildInstr(AArch64::TBLv16i8Two, {I.getOperand(0).getReg()}, | 
|  | 3744 | {RegSeq, IndexLoad->getOperand(0).getReg()}); | 
|  | 3745 | constrainSelectedInstRegOperands(*RegSeq, TII, TRI, RBI); | 
|  | 3746 | constrainSelectedInstRegOperands(*TBL2, TII, TRI, RBI); | 
|  | 3747 | I.eraseFromParent(); | 
|  | 3748 | return true; | 
|  | 3749 | } | 
|  | 3750 |  | 
| Jessica Paquette | 16d67a3 | 2019-03-13 23:22:23 +0000 | [diff] [blame] | 3751 | MachineInstr *AArch64InstructionSelector::emitLaneInsert( | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3752 | Optional<Register> DstReg, Register SrcReg, Register EltReg, | 
| Jessica Paquette | 16d67a3 | 2019-03-13 23:22:23 +0000 | [diff] [blame] | 3753 | unsigned LaneIdx, const RegisterBank &RB, | 
|  | 3754 | MachineIRBuilder &MIRBuilder) const { | 
|  | 3755 | MachineInstr *InsElt = nullptr; | 
|  | 3756 | const TargetRegisterClass *DstRC = &AArch64::FPR128RegClass; | 
|  | 3757 | MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); | 
|  | 3758 |  | 
|  | 3759 | // Create a register to define with the insert if one wasn't passed in. | 
|  | 3760 | if (!DstReg) | 
|  | 3761 | DstReg = MRI.createVirtualRegister(DstRC); | 
|  | 3762 |  | 
|  | 3763 | unsigned EltSize = MRI.getType(EltReg).getSizeInBits(); | 
|  | 3764 | unsigned Opc = getInsertVecEltOpInfo(RB, EltSize).first; | 
|  | 3765 |  | 
|  | 3766 | if (RB.getID() == AArch64::FPRRegBankID) { | 
|  | 3767 | auto InsSub = emitScalarToVector(EltSize, DstRC, EltReg, MIRBuilder); | 
|  | 3768 | InsElt = MIRBuilder.buildInstr(Opc, {*DstReg}, {SrcReg}) | 
|  | 3769 | .addImm(LaneIdx) | 
|  | 3770 | .addUse(InsSub->getOperand(0).getReg()) | 
|  | 3771 | .addImm(0); | 
|  | 3772 | } else { | 
|  | 3773 | InsElt = MIRBuilder.buildInstr(Opc, {*DstReg}, {SrcReg}) | 
|  | 3774 | .addImm(LaneIdx) | 
|  | 3775 | .addUse(EltReg); | 
|  | 3776 | } | 
|  | 3777 |  | 
|  | 3778 | constrainSelectedInstRegOperands(*InsElt, TII, TRI, RBI); | 
|  | 3779 | return InsElt; | 
|  | 3780 | } | 
|  | 3781 |  | 
| Jessica Paquette | 5aff1f4 | 2019-03-14 18:01:30 +0000 | [diff] [blame] | 3782 | bool AArch64InstructionSelector::selectInsertElt( | 
|  | 3783 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 3784 | assert(I.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT); | 
|  | 3785 |  | 
|  | 3786 | // Get information on the destination. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3787 | Register DstReg = I.getOperand(0).getReg(); | 
| Jessica Paquette | 5aff1f4 | 2019-03-14 18:01:30 +0000 | [diff] [blame] | 3788 | const LLT DstTy = MRI.getType(DstReg); | 
| Jessica Paquette | d3ffd47 | 2019-03-29 21:39:36 +0000 | [diff] [blame] | 3789 | unsigned VecSize = DstTy.getSizeInBits(); | 
| Jessica Paquette | 5aff1f4 | 2019-03-14 18:01:30 +0000 | [diff] [blame] | 3790 |  | 
|  | 3791 | // Get information on the element we want to insert into the destination. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3792 | Register EltReg = I.getOperand(2).getReg(); | 
| Jessica Paquette | 5aff1f4 | 2019-03-14 18:01:30 +0000 | [diff] [blame] | 3793 | const LLT EltTy = MRI.getType(EltReg); | 
|  | 3794 | unsigned EltSize = EltTy.getSizeInBits(); | 
|  | 3795 | if (EltSize < 16 || EltSize > 64) | 
|  | 3796 | return false; // Don't support all element types yet. | 
|  | 3797 |  | 
|  | 3798 | // Find the definition of the index. Bail out if it's not defined by a | 
|  | 3799 | // G_CONSTANT. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3800 | Register IdxReg = I.getOperand(3).getReg(); | 
| Jessica Paquette | 76f64b6 | 2019-04-26 21:53:13 +0000 | [diff] [blame] | 3801 | auto VRegAndVal = getConstantVRegValWithLookThrough(IdxReg, MRI); | 
|  | 3802 | if (!VRegAndVal) | 
| Jessica Paquette | 5aff1f4 | 2019-03-14 18:01:30 +0000 | [diff] [blame] | 3803 | return false; | 
| Jessica Paquette | 76f64b6 | 2019-04-26 21:53:13 +0000 | [diff] [blame] | 3804 | unsigned LaneIdx = VRegAndVal->Value; | 
| Jessica Paquette | 5aff1f4 | 2019-03-14 18:01:30 +0000 | [diff] [blame] | 3805 |  | 
|  | 3806 | // Perform the lane insert. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3807 | Register SrcReg = I.getOperand(1).getReg(); | 
| Jessica Paquette | 5aff1f4 | 2019-03-14 18:01:30 +0000 | [diff] [blame] | 3808 | const RegisterBank &EltRB = *RBI.getRegBank(EltReg, MRI, TRI); | 
|  | 3809 | MachineIRBuilder MIRBuilder(I); | 
| Jessica Paquette | d3ffd47 | 2019-03-29 21:39:36 +0000 | [diff] [blame] | 3810 |  | 
|  | 3811 | if (VecSize < 128) { | 
|  | 3812 | // If the vector we're inserting into is smaller than 128 bits, widen it | 
|  | 3813 | // to 128 to do the insert. | 
|  | 3814 | MachineInstr *ScalarToVec = emitScalarToVector( | 
|  | 3815 | VecSize, &AArch64::FPR128RegClass, SrcReg, MIRBuilder); | 
|  | 3816 | if (!ScalarToVec) | 
|  | 3817 | return false; | 
|  | 3818 | SrcReg = ScalarToVec->getOperand(0).getReg(); | 
|  | 3819 | } | 
|  | 3820 |  | 
|  | 3821 | // Create an insert into a new FPR128 register. | 
|  | 3822 | // Note that if our vector is already 128 bits, we end up emitting an extra | 
|  | 3823 | // register. | 
|  | 3824 | MachineInstr *InsMI = | 
|  | 3825 | emitLaneInsert(None, SrcReg, EltReg, LaneIdx, EltRB, MIRBuilder); | 
|  | 3826 |  | 
|  | 3827 | if (VecSize < 128) { | 
|  | 3828 | // If we had to widen to perform the insert, then we have to demote back to | 
|  | 3829 | // the original size to get the result we want. | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3830 | Register DemoteVec = InsMI->getOperand(0).getReg(); | 
| Jessica Paquette | d3ffd47 | 2019-03-29 21:39:36 +0000 | [diff] [blame] | 3831 | const TargetRegisterClass *RC = | 
|  | 3832 | getMinClassForRegBank(*RBI.getRegBank(DemoteVec, MRI, TRI), VecSize); | 
|  | 3833 | if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) { | 
|  | 3834 | LLVM_DEBUG(dbgs() << "Unsupported register class!\n"); | 
|  | 3835 | return false; | 
|  | 3836 | } | 
|  | 3837 | unsigned SubReg = 0; | 
|  | 3838 | if (!getSubRegForClass(RC, TRI, SubReg)) | 
|  | 3839 | return false; | 
|  | 3840 | if (SubReg != AArch64::ssub && SubReg != AArch64::dsub) { | 
|  | 3841 | LLVM_DEBUG(dbgs() << "Unsupported destination size! (" << VecSize | 
|  | 3842 | << "\n"); | 
|  | 3843 | return false; | 
|  | 3844 | } | 
|  | 3845 | MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {}) | 
|  | 3846 | .addReg(DemoteVec, 0, SubReg); | 
|  | 3847 | RBI.constrainGenericRegister(DstReg, *RC, MRI); | 
|  | 3848 | } else { | 
|  | 3849 | // No widening needed. | 
|  | 3850 | InsMI->getOperand(0).setReg(DstReg); | 
|  | 3851 | constrainSelectedInstRegOperands(*InsMI, TII, TRI, RBI); | 
|  | 3852 | } | 
|  | 3853 |  | 
| Jessica Paquette | 5aff1f4 | 2019-03-14 18:01:30 +0000 | [diff] [blame] | 3854 | I.eraseFromParent(); | 
|  | 3855 | return true; | 
|  | 3856 | } | 
|  | 3857 |  | 
| Amara Emerson | 5ec1460 | 2018-12-10 18:44:58 +0000 | [diff] [blame] | 3858 | bool AArch64InstructionSelector::selectBuildVector( | 
|  | 3859 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 3860 | assert(I.getOpcode() == TargetOpcode::G_BUILD_VECTOR); | 
|  | 3861 | // Until we port more of the optimized selections, for now just use a vector | 
|  | 3862 | // insert sequence. | 
|  | 3863 | const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); | 
|  | 3864 | const LLT EltTy = MRI.getType(I.getOperand(1).getReg()); | 
|  | 3865 | unsigned EltSize = EltTy.getSizeInBits(); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3866 | if (EltSize < 16 || EltSize > 64) | 
| Amara Emerson | 5ec1460 | 2018-12-10 18:44:58 +0000 | [diff] [blame] | 3867 | return false; // Don't support all element types yet. | 
|  | 3868 | const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI); | 
| Amara Emerson | 6bcfa1c | 2019-02-25 18:52:54 +0000 | [diff] [blame] | 3869 | MachineIRBuilder MIRBuilder(I); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3870 |  | 
|  | 3871 | const TargetRegisterClass *DstRC = &AArch64::FPR128RegClass; | 
| Amara Emerson | 6bcfa1c | 2019-02-25 18:52:54 +0000 | [diff] [blame] | 3872 | MachineInstr *ScalarToVec = | 
| Amara Emerson | 8acb0d9 | 2019-03-04 19:16:00 +0000 | [diff] [blame] | 3873 | emitScalarToVector(DstTy.getElementType().getSizeInBits(), DstRC, | 
|  | 3874 | I.getOperand(1).getReg(), MIRBuilder); | 
| Amara Emerson | 6bcfa1c | 2019-02-25 18:52:54 +0000 | [diff] [blame] | 3875 | if (!ScalarToVec) | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3876 | return false; | 
|  | 3877 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3878 | Register DstVec = ScalarToVec->getOperand(0).getReg(); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3879 | unsigned DstSize = DstTy.getSizeInBits(); | 
|  | 3880 |  | 
|  | 3881 | // Keep track of the last MI we inserted. Later on, we might be able to save | 
|  | 3882 | // a copy using it. | 
|  | 3883 | MachineInstr *PrevMI = nullptr; | 
|  | 3884 | for (unsigned i = 2, e = DstSize / EltSize + 1; i < e; ++i) { | 
| Jessica Paquette | 16d67a3 | 2019-03-13 23:22:23 +0000 | [diff] [blame] | 3885 | // Note that if we don't do a subregister copy, we can end up making an | 
|  | 3886 | // extra register. | 
|  | 3887 | PrevMI = &*emitLaneInsert(None, DstVec, I.getOperand(i).getReg(), i - 1, RB, | 
|  | 3888 | MIRBuilder); | 
|  | 3889 | DstVec = PrevMI->getOperand(0).getReg(); | 
| Amara Emerson | 5ec1460 | 2018-12-10 18:44:58 +0000 | [diff] [blame] | 3890 | } | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3891 |  | 
|  | 3892 | // If DstTy's size in bits is less than 128, then emit a subregister copy | 
|  | 3893 | // from DstVec to the last register we've defined. | 
|  | 3894 | if (DstSize < 128) { | 
| Jessica Paquette | 85ace62 | 2019-03-13 23:29:54 +0000 | [diff] [blame] | 3895 | // Force this to be FPR using the destination vector. | 
|  | 3896 | const TargetRegisterClass *RC = | 
|  | 3897 | getMinClassForRegBank(*RBI.getRegBank(DstVec, MRI, TRI), DstSize); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3898 | if (!RC) | 
|  | 3899 | return false; | 
| Jessica Paquette | 85ace62 | 2019-03-13 23:29:54 +0000 | [diff] [blame] | 3900 | if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) { | 
|  | 3901 | LLVM_DEBUG(dbgs() << "Unsupported register class!\n"); | 
|  | 3902 | return false; | 
|  | 3903 | } | 
|  | 3904 |  | 
|  | 3905 | unsigned SubReg = 0; | 
|  | 3906 | if (!getSubRegForClass(RC, TRI, SubReg)) | 
|  | 3907 | return false; | 
|  | 3908 | if (SubReg != AArch64::ssub && SubReg != AArch64::dsub) { | 
|  | 3909 | LLVM_DEBUG(dbgs() << "Unsupported destination size! (" << DstSize | 
|  | 3910 | << "\n"); | 
|  | 3911 | return false; | 
|  | 3912 | } | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3913 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3914 | Register Reg = MRI.createVirtualRegister(RC); | 
|  | 3915 | Register DstReg = I.getOperand(0).getReg(); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3916 |  | 
| Amara Emerson | 8627178 | 2019-03-18 19:20:10 +0000 | [diff] [blame] | 3917 | MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {}) | 
|  | 3918 | .addReg(DstVec, 0, SubReg); | 
| Jessica Paquette | 245047d | 2019-01-24 22:00:41 +0000 | [diff] [blame] | 3919 | MachineOperand &RegOp = I.getOperand(1); | 
|  | 3920 | RegOp.setReg(Reg); | 
|  | 3921 | RBI.constrainGenericRegister(DstReg, *RC, MRI); | 
|  | 3922 | } else { | 
|  | 3923 | // We don't need a subregister copy. Save a copy by re-using the | 
|  | 3924 | // destination register on the final insert. | 
|  | 3925 | assert(PrevMI && "PrevMI was null?"); | 
|  | 3926 | PrevMI->getOperand(0).setReg(I.getOperand(0).getReg()); | 
|  | 3927 | constrainSelectedInstRegOperands(*PrevMI, TII, TRI, RBI); | 
|  | 3928 | } | 
|  | 3929 |  | 
| Amara Emerson | 5ec1460 | 2018-12-10 18:44:58 +0000 | [diff] [blame] | 3930 | I.eraseFromParent(); | 
|  | 3931 | return true; | 
|  | 3932 | } | 
|  | 3933 |  | 
| Jessica Paquette | 7f6fe7c | 2019-04-29 20:58:17 +0000 | [diff] [blame] | 3934 | /// Helper function to find an intrinsic ID on an a MachineInstr. Returns the | 
|  | 3935 | /// ID if it exists, and 0 otherwise. | 
|  | 3936 | static unsigned findIntrinsicID(MachineInstr &I) { | 
|  | 3937 | auto IntrinOp = find_if(I.operands(), [&](const MachineOperand &Op) { | 
|  | 3938 | return Op.isIntrinsicID(); | 
|  | 3939 | }); | 
|  | 3940 | if (IntrinOp == I.operands_end()) | 
|  | 3941 | return 0; | 
|  | 3942 | return IntrinOp->getIntrinsicID(); | 
|  | 3943 | } | 
|  | 3944 |  | 
| Jessica Paquette | 22c6215 | 2019-04-02 19:57:26 +0000 | [diff] [blame] | 3945 | /// Helper function to emit the correct opcode for a llvm.aarch64.stlxr | 
|  | 3946 | /// intrinsic. | 
|  | 3947 | static unsigned getStlxrOpcode(unsigned NumBytesToStore) { | 
|  | 3948 | switch (NumBytesToStore) { | 
| Jessica Paquette | aa8b999 | 2019-07-26 23:28:53 +0000 | [diff] [blame] | 3949 | // TODO: 1 and 2 byte stores | 
|  | 3950 | case 4: | 
|  | 3951 | return AArch64::STLXRW; | 
| Jessica Paquette | 22c6215 | 2019-04-02 19:57:26 +0000 | [diff] [blame] | 3952 | case 8: | 
|  | 3953 | return AArch64::STLXRX; | 
|  | 3954 | default: | 
|  | 3955 | LLVM_DEBUG(dbgs() << "Unexpected number of bytes to store! (" | 
|  | 3956 | << NumBytesToStore << ")\n"); | 
|  | 3957 | break; | 
|  | 3958 | } | 
|  | 3959 | return 0; | 
|  | 3960 | } | 
|  | 3961 |  | 
|  | 3962 | bool AArch64InstructionSelector::selectIntrinsicWithSideEffects( | 
|  | 3963 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 3964 | // Find the intrinsic ID. | 
| Jessica Paquette | 7f6fe7c | 2019-04-29 20:58:17 +0000 | [diff] [blame] | 3965 | unsigned IntrinID = findIntrinsicID(I); | 
|  | 3966 | if (!IntrinID) | 
| Jessica Paquette | 22c6215 | 2019-04-02 19:57:26 +0000 | [diff] [blame] | 3967 | return false; | 
| Jessica Paquette | 22c6215 | 2019-04-02 19:57:26 +0000 | [diff] [blame] | 3968 | MachineIRBuilder MIRBuilder(I); | 
|  | 3969 |  | 
|  | 3970 | // Select the instruction. | 
|  | 3971 | switch (IntrinID) { | 
|  | 3972 | default: | 
|  | 3973 | return false; | 
|  | 3974 | case Intrinsic::trap: | 
|  | 3975 | MIRBuilder.buildInstr(AArch64::BRK, {}, {}).addImm(1); | 
|  | 3976 | break; | 
| Tom Tan | 7ecb514 | 2019-06-21 23:38:05 +0000 | [diff] [blame] | 3977 | case Intrinsic::debugtrap: | 
|  | 3978 | if (!STI.isTargetWindows()) | 
|  | 3979 | return false; | 
|  | 3980 | MIRBuilder.buildInstr(AArch64::BRK, {}, {}).addImm(0xF000); | 
|  | 3981 | break; | 
| Jessica Paquette | 22c6215 | 2019-04-02 19:57:26 +0000 | [diff] [blame] | 3982 | case Intrinsic::aarch64_stlxr: | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3983 | Register StatReg = I.getOperand(0).getReg(); | 
| Jessica Paquette | 22c6215 | 2019-04-02 19:57:26 +0000 | [diff] [blame] | 3984 | assert(RBI.getSizeInBits(StatReg, MRI, TRI) == 32 && | 
|  | 3985 | "Status register must be 32 bits!"); | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3986 | Register SrcReg = I.getOperand(2).getReg(); | 
| Jessica Paquette | 22c6215 | 2019-04-02 19:57:26 +0000 | [diff] [blame] | 3987 |  | 
|  | 3988 | if (RBI.getSizeInBits(SrcReg, MRI, TRI) != 64) { | 
|  | 3989 | LLVM_DEBUG(dbgs() << "Only support 64-bit sources right now.\n"); | 
|  | 3990 | return false; | 
|  | 3991 | } | 
|  | 3992 |  | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 3993 | Register PtrReg = I.getOperand(3).getReg(); | 
| Jessica Paquette | 22c6215 | 2019-04-02 19:57:26 +0000 | [diff] [blame] | 3994 | assert(MRI.getType(PtrReg).isPointer() && "Expected pointer operand"); | 
|  | 3995 |  | 
|  | 3996 | // Expect only one memory operand. | 
|  | 3997 | if (!I.hasOneMemOperand()) | 
|  | 3998 | return false; | 
|  | 3999 |  | 
|  | 4000 | const MachineMemOperand *MemOp = *I.memoperands_begin(); | 
|  | 4001 | unsigned NumBytesToStore = MemOp->getSize(); | 
|  | 4002 | unsigned Opc = getStlxrOpcode(NumBytesToStore); | 
|  | 4003 | if (!Opc) | 
|  | 4004 | return false; | 
| Jessica Paquette | aa8b999 | 2019-07-26 23:28:53 +0000 | [diff] [blame] | 4005 | unsigned NumBitsToStore = NumBytesToStore * 8; | 
|  | 4006 | if (NumBitsToStore != 64) { | 
|  | 4007 | // The intrinsic always has a 64-bit source, but we might actually want | 
|  | 4008 | // a differently-sized source for the instruction. Try to get it. | 
|  | 4009 | // TODO: For 1 and 2-byte stores, this will have a G_AND. For now, let's | 
|  | 4010 | // just handle 4-byte stores. | 
|  | 4011 | // TODO: If we don't find a G_ZEXT, we'll have to truncate the value down | 
|  | 4012 | // to the right size for the STLXR. | 
|  | 4013 | MachineInstr *Zext = getOpcodeDef(TargetOpcode::G_ZEXT, SrcReg, MRI); | 
|  | 4014 | if (!Zext) | 
|  | 4015 | return false; | 
|  | 4016 | SrcReg = Zext->getOperand(1).getReg(); | 
|  | 4017 | // We should get an appropriately-sized register here. | 
|  | 4018 | if (RBI.getSizeInBits(SrcReg, MRI, TRI) != NumBitsToStore) | 
|  | 4019 | return false; | 
|  | 4020 | } | 
|  | 4021 | auto StoreMI = MIRBuilder.buildInstr(Opc, {StatReg}, {SrcReg, PtrReg}) | 
|  | 4022 | .addMemOperand(*I.memoperands_begin()); | 
| Jessica Paquette | 22c6215 | 2019-04-02 19:57:26 +0000 | [diff] [blame] | 4023 | constrainSelectedInstRegOperands(*StoreMI, TII, TRI, RBI); | 
|  | 4024 | } | 
|  | 4025 |  | 
|  | 4026 | I.eraseFromParent(); | 
|  | 4027 | return true; | 
|  | 4028 | } | 
|  | 4029 |  | 
| Jessica Paquette | 7f6fe7c | 2019-04-29 20:58:17 +0000 | [diff] [blame] | 4030 | bool AArch64InstructionSelector::selectIntrinsic( | 
|  | 4031 | MachineInstr &I, MachineRegisterInfo &MRI) const { | 
|  | 4032 | unsigned IntrinID = findIntrinsicID(I); | 
|  | 4033 | if (!IntrinID) | 
|  | 4034 | return false; | 
|  | 4035 | MachineIRBuilder MIRBuilder(I); | 
|  | 4036 |  | 
|  | 4037 | switch (IntrinID) { | 
|  | 4038 | default: | 
|  | 4039 | break; | 
|  | 4040 | case Intrinsic::aarch64_crypto_sha1h: | 
| Matt Arsenault | faeaedf | 2019-06-24 16:16:12 +0000 | [diff] [blame] | 4041 | Register DstReg = I.getOperand(0).getReg(); | 
|  | 4042 | Register SrcReg = I.getOperand(2).getReg(); | 
| Jessica Paquette | 7f6fe7c | 2019-04-29 20:58:17 +0000 | [diff] [blame] | 4043 |  | 
|  | 4044 | // FIXME: Should this be an assert? | 
|  | 4045 | if (MRI.getType(DstReg).getSizeInBits() != 32 || | 
|  | 4046 | MRI.getType(SrcReg).getSizeInBits() != 32) | 
|  | 4047 | return false; | 
|  | 4048 |  | 
|  | 4049 | // The operation has to happen on FPRs. Set up some new FPR registers for | 
|  | 4050 | // the source and destination if they are on GPRs. | 
|  | 4051 | if (RBI.getRegBank(SrcReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) { | 
|  | 4052 | SrcReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass); | 
|  | 4053 | MIRBuilder.buildCopy({SrcReg}, {I.getOperand(2)}); | 
|  | 4054 |  | 
|  | 4055 | // Make sure the copy ends up getting constrained properly. | 
|  | 4056 | RBI.constrainGenericRegister(I.getOperand(2).getReg(), | 
|  | 4057 | AArch64::GPR32RegClass, MRI); | 
|  | 4058 | } | 
|  | 4059 |  | 
|  | 4060 | if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) | 
|  | 4061 | DstReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass); | 
|  | 4062 |  | 
|  | 4063 | // Actually insert the instruction. | 
|  | 4064 | auto SHA1Inst = MIRBuilder.buildInstr(AArch64::SHA1Hrr, {DstReg}, {SrcReg}); | 
|  | 4065 | constrainSelectedInstRegOperands(*SHA1Inst, TII, TRI, RBI); | 
|  | 4066 |  | 
|  | 4067 | // Did we create a new register for the destination? | 
|  | 4068 | if (DstReg != I.getOperand(0).getReg()) { | 
|  | 4069 | // Yep. Copy the result of the instruction back into the original | 
|  | 4070 | // destination. | 
|  | 4071 | MIRBuilder.buildCopy({I.getOperand(0)}, {DstReg}); | 
|  | 4072 | RBI.constrainGenericRegister(I.getOperand(0).getReg(), | 
|  | 4073 | AArch64::GPR32RegClass, MRI); | 
|  | 4074 | } | 
|  | 4075 |  | 
|  | 4076 | I.eraseFromParent(); | 
|  | 4077 | return true; | 
|  | 4078 | } | 
|  | 4079 | return false; | 
|  | 4080 | } | 
|  | 4081 |  | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 4082 | static Optional<uint64_t> getImmedFromMO(const MachineOperand &Root) { | 
|  | 4083 | auto &MI = *Root.getParent(); | 
|  | 4084 | auto &MBB = *MI.getParent(); | 
|  | 4085 | auto &MF = *MBB.getParent(); | 
|  | 4086 | auto &MRI = MF.getRegInfo(); | 
| Daniel Sanders | 8a4bae9 | 2017-03-14 21:32:08 +0000 | [diff] [blame] | 4087 | uint64_t Immed; | 
|  | 4088 | if (Root.isImm()) | 
|  | 4089 | Immed = Root.getImm(); | 
|  | 4090 | else if (Root.isCImm()) | 
|  | 4091 | Immed = Root.getCImm()->getZExtValue(); | 
|  | 4092 | else if (Root.isReg()) { | 
| Jessica Paquette | a99cfee | 2019-07-03 17:46:23 +0000 | [diff] [blame] | 4093 | auto ValAndVReg = | 
|  | 4094 | getConstantVRegValWithLookThrough(Root.getReg(), MRI, true); | 
|  | 4095 | if (!ValAndVReg) | 
| Daniel Sanders | df39cba | 2017-10-15 18:22:54 +0000 | [diff] [blame] | 4096 | return None; | 
| Jessica Paquette | a99cfee | 2019-07-03 17:46:23 +0000 | [diff] [blame] | 4097 | Immed = ValAndVReg->Value; | 
| Daniel Sanders | 8a4bae9 | 2017-03-14 21:32:08 +0000 | [diff] [blame] | 4098 | } else | 
| Daniel Sanders | df39cba | 2017-10-15 18:22:54 +0000 | [diff] [blame] | 4099 | return None; | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 4100 | return Immed; | 
|  | 4101 | } | 
| Daniel Sanders | 8a4bae9 | 2017-03-14 21:32:08 +0000 | [diff] [blame] | 4102 |  | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 4103 | InstructionSelector::ComplexRendererFns | 
|  | 4104 | AArch64InstructionSelector::selectShiftA_32(const MachineOperand &Root) const { | 
|  | 4105 | auto MaybeImmed = getImmedFromMO(Root); | 
|  | 4106 | if (MaybeImmed == None || *MaybeImmed > 31) | 
|  | 4107 | return None; | 
|  | 4108 | uint64_t Enc = (32 - *MaybeImmed) & 0x1f; | 
|  | 4109 | return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}}; | 
|  | 4110 | } | 
|  | 4111 |  | 
|  | 4112 | InstructionSelector::ComplexRendererFns | 
|  | 4113 | AArch64InstructionSelector::selectShiftB_32(const MachineOperand &Root) const { | 
|  | 4114 | auto MaybeImmed = getImmedFromMO(Root); | 
|  | 4115 | if (MaybeImmed == None || *MaybeImmed > 31) | 
|  | 4116 | return None; | 
|  | 4117 | uint64_t Enc = 31 - *MaybeImmed; | 
|  | 4118 | return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}}; | 
|  | 4119 | } | 
|  | 4120 |  | 
|  | 4121 | InstructionSelector::ComplexRendererFns | 
|  | 4122 | AArch64InstructionSelector::selectShiftA_64(const MachineOperand &Root) const { | 
|  | 4123 | auto MaybeImmed = getImmedFromMO(Root); | 
|  | 4124 | if (MaybeImmed == None || *MaybeImmed > 63) | 
|  | 4125 | return None; | 
|  | 4126 | uint64_t Enc = (64 - *MaybeImmed) & 0x3f; | 
|  | 4127 | return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}}; | 
|  | 4128 | } | 
|  | 4129 |  | 
|  | 4130 | InstructionSelector::ComplexRendererFns | 
|  | 4131 | AArch64InstructionSelector::selectShiftB_64(const MachineOperand &Root) const { | 
|  | 4132 | auto MaybeImmed = getImmedFromMO(Root); | 
|  | 4133 | if (MaybeImmed == None || *MaybeImmed > 63) | 
|  | 4134 | return None; | 
|  | 4135 | uint64_t Enc = 63 - *MaybeImmed; | 
|  | 4136 | return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}}; | 
|  | 4137 | } | 
|  | 4138 |  | 
| Jessica Paquette | e4c46c3 | 2019-08-02 18:12:53 +0000 | [diff] [blame] | 4139 | /// Helper to select an immediate value that can be represented as a 12-bit | 
|  | 4140 | /// value shifted left by either 0 or 12. If it is possible to do so, return | 
|  | 4141 | /// the immediate and shift value. If not, return None. | 
|  | 4142 | /// | 
|  | 4143 | /// Used by selectArithImmed and selectNegArithImmed. | 
| Amara Emerson | cac1151 | 2019-07-03 01:49:06 +0000 | [diff] [blame] | 4144 | InstructionSelector::ComplexRendererFns | 
| Jessica Paquette | e4c46c3 | 2019-08-02 18:12:53 +0000 | [diff] [blame] | 4145 | AArch64InstructionSelector::select12BitValueWithLeftShift( | 
|  | 4146 | uint64_t Immed) const { | 
| Daniel Sanders | 8a4bae9 | 2017-03-14 21:32:08 +0000 | [diff] [blame] | 4147 | unsigned ShiftAmt; | 
| Daniel Sanders | 8a4bae9 | 2017-03-14 21:32:08 +0000 | [diff] [blame] | 4148 | if (Immed >> 12 == 0) { | 
|  | 4149 | ShiftAmt = 0; | 
|  | 4150 | } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) { | 
|  | 4151 | ShiftAmt = 12; | 
|  | 4152 | Immed = Immed >> 12; | 
|  | 4153 | } else | 
| Daniel Sanders | df39cba | 2017-10-15 18:22:54 +0000 | [diff] [blame] | 4154 | return None; | 
| Daniel Sanders | 8a4bae9 | 2017-03-14 21:32:08 +0000 | [diff] [blame] | 4155 |  | 
|  | 4156 | unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt); | 
| Daniel Sanders | df39cba | 2017-10-15 18:22:54 +0000 | [diff] [blame] | 4157 | return {{ | 
|  | 4158 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); }, | 
|  | 4159 | [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); }, | 
|  | 4160 | }}; | 
| Daniel Sanders | 8a4bae9 | 2017-03-14 21:32:08 +0000 | [diff] [blame] | 4161 | } | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 4162 |  | 
| Jessica Paquette | e4c46c3 | 2019-08-02 18:12:53 +0000 | [diff] [blame] | 4163 | /// SelectArithImmed - Select an immediate value that can be represented as | 
|  | 4164 | /// a 12-bit value shifted left by either 0 or 12.  If so, return true with | 
|  | 4165 | /// Val set to the 12-bit value and Shift set to the shifter operand. | 
|  | 4166 | InstructionSelector::ComplexRendererFns | 
|  | 4167 | AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const { | 
|  | 4168 | // This function is called from the addsub_shifted_imm ComplexPattern, | 
|  | 4169 | // which lists [imm] as the list of opcode it's interested in, however | 
|  | 4170 | // we still need to check whether the operand is actually an immediate | 
|  | 4171 | // here because the ComplexPattern opcode list is only used in | 
|  | 4172 | // root-level opcode matching. | 
|  | 4173 | auto MaybeImmed = getImmedFromMO(Root); | 
|  | 4174 | if (MaybeImmed == None) | 
|  | 4175 | return None; | 
|  | 4176 | return select12BitValueWithLeftShift(*MaybeImmed); | 
|  | 4177 | } | 
|  | 4178 |  | 
|  | 4179 | /// SelectNegArithImmed - As above, but negates the value before trying to | 
|  | 4180 | /// select it. | 
|  | 4181 | InstructionSelector::ComplexRendererFns | 
|  | 4182 | AArch64InstructionSelector::selectNegArithImmed(MachineOperand &Root) const { | 
|  | 4183 | // We need a register here, because we need to know if we have a 64 or 32 | 
|  | 4184 | // bit immediate. | 
|  | 4185 | if (!Root.isReg()) | 
|  | 4186 | return None; | 
|  | 4187 | auto MaybeImmed = getImmedFromMO(Root); | 
|  | 4188 | if (MaybeImmed == None) | 
|  | 4189 | return None; | 
|  | 4190 | uint64_t Immed = *MaybeImmed; | 
|  | 4191 |  | 
|  | 4192 | // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0" | 
|  | 4193 | // have the opposite effect on the C flag, so this pattern mustn't match under | 
|  | 4194 | // those circumstances. | 
|  | 4195 | if (Immed == 0) | 
|  | 4196 | return None; | 
|  | 4197 |  | 
|  | 4198 | // Check if we're dealing with a 32-bit type on the root or a 64-bit type on | 
|  | 4199 | // the root. | 
|  | 4200 | MachineRegisterInfo &MRI = Root.getParent()->getMF()->getRegInfo(); | 
|  | 4201 | if (MRI.getType(Root.getReg()).getSizeInBits() == 32) | 
|  | 4202 | Immed = ~((uint32_t)Immed) + 1; | 
|  | 4203 | else | 
|  | 4204 | Immed = ~Immed + 1ULL; | 
|  | 4205 |  | 
|  | 4206 | if (Immed & 0xFFFFFFFFFF000000ULL) | 
|  | 4207 | return None; | 
|  | 4208 |  | 
|  | 4209 | Immed &= 0xFFFFFFULL; | 
|  | 4210 | return select12BitValueWithLeftShift(Immed); | 
|  | 4211 | } | 
|  | 4212 |  | 
| Jessica Paquette | 2b404d0 | 2019-07-23 16:09:42 +0000 | [diff] [blame] | 4213 | /// Return true if it is worth folding MI into an extended register. That is, | 
|  | 4214 | /// if it's safe to pull it into the addressing mode of a load or store as a | 
|  | 4215 | /// shift. | 
|  | 4216 | bool AArch64InstructionSelector::isWorthFoldingIntoExtendedReg( | 
|  | 4217 | MachineInstr &MI, const MachineRegisterInfo &MRI) const { | 
|  | 4218 | // Always fold if there is one use, or if we're optimizing for size. | 
|  | 4219 | Register DefReg = MI.getOperand(0).getReg(); | 
|  | 4220 | if (MRI.hasOneUse(DefReg) || | 
|  | 4221 | MI.getParent()->getParent()->getFunction().hasMinSize()) | 
|  | 4222 | return true; | 
|  | 4223 |  | 
|  | 4224 | // It's better to avoid folding and recomputing shifts when we don't have a | 
|  | 4225 | // fastpath. | 
|  | 4226 | if (!STI.hasLSLFast()) | 
|  | 4227 | return false; | 
|  | 4228 |  | 
|  | 4229 | // We have a fastpath, so folding a shift in and potentially computing it | 
|  | 4230 | // many times may be beneficial. Check if this is only used in memory ops. | 
|  | 4231 | // If it is, then we should fold. | 
|  | 4232 | return all_of(MRI.use_instructions(DefReg), | 
|  | 4233 | [](MachineInstr &Use) { return Use.mayLoadOrStore(); }); | 
|  | 4234 | } | 
|  | 4235 |  | 
|  | 4236 | /// This is used for computing addresses like this: | 
|  | 4237 | /// | 
|  | 4238 | /// ldr x1, [x2, x3, lsl #3] | 
|  | 4239 | /// | 
|  | 4240 | /// Where x2 is the base register, and x3 is an offset register. The shift-left | 
|  | 4241 | /// is a constant value specific to this load instruction. That is, we'll never | 
|  | 4242 | /// see anything other than a 3 here (which corresponds to the size of the | 
|  | 4243 | /// element being loaded.) | 
|  | 4244 | InstructionSelector::ComplexRendererFns | 
|  | 4245 | AArch64InstructionSelector::selectAddrModeShiftedExtendXReg( | 
|  | 4246 | MachineOperand &Root, unsigned SizeInBytes) const { | 
|  | 4247 | if (!Root.isReg()) | 
|  | 4248 | return None; | 
|  | 4249 | MachineRegisterInfo &MRI = Root.getParent()->getMF()->getRegInfo(); | 
|  | 4250 |  | 
|  | 4251 | // Make sure that the memory op is a valid size. | 
|  | 4252 | int64_t LegalShiftVal = Log2_32(SizeInBytes); | 
|  | 4253 | if (LegalShiftVal == 0) | 
|  | 4254 | return None; | 
|  | 4255 |  | 
|  | 4256 | // We want to find something like this: | 
|  | 4257 | // | 
|  | 4258 | // val = G_CONSTANT LegalShiftVal | 
|  | 4259 | // shift = G_SHL off_reg val | 
|  | 4260 | // ptr = G_GEP base_reg shift | 
|  | 4261 | // x = G_LOAD ptr | 
|  | 4262 | // | 
|  | 4263 | // And fold it into this addressing mode: | 
|  | 4264 | // | 
|  | 4265 | // ldr x, [base_reg, off_reg, lsl #LegalShiftVal] | 
|  | 4266 |  | 
|  | 4267 | // Check if we can find the G_GEP. | 
|  | 4268 | MachineInstr *Gep = getOpcodeDef(TargetOpcode::G_GEP, Root.getReg(), MRI); | 
|  | 4269 | if (!Gep || !isWorthFoldingIntoExtendedReg(*Gep, MRI)) | 
|  | 4270 | return None; | 
|  | 4271 |  | 
| Jessica Paquette | 6849911 | 2019-07-24 22:49:42 +0000 | [diff] [blame] | 4272 | // Now, try to match an opcode which will match our specific offset. | 
|  | 4273 | // We want a G_SHL or a G_MUL. | 
|  | 4274 | MachineInstr *OffsetInst = getDefIgnoringCopies(Gep->getOperand(2).getReg(), MRI); | 
|  | 4275 | if (!OffsetInst) | 
| Jessica Paquette | 2b404d0 | 2019-07-23 16:09:42 +0000 | [diff] [blame] | 4276 | return None; | 
|  | 4277 |  | 
| Jessica Paquette | 6849911 | 2019-07-24 22:49:42 +0000 | [diff] [blame] | 4278 | unsigned OffsetOpc = OffsetInst->getOpcode(); | 
|  | 4279 | if (OffsetOpc != TargetOpcode::G_SHL && OffsetOpc != TargetOpcode::G_MUL) | 
| Jessica Paquette | 2b404d0 | 2019-07-23 16:09:42 +0000 | [diff] [blame] | 4280 | return None; | 
|  | 4281 |  | 
| Jessica Paquette | 6849911 | 2019-07-24 22:49:42 +0000 | [diff] [blame] | 4282 | if (!isWorthFoldingIntoExtendedReg(*OffsetInst, MRI)) | 
|  | 4283 | return None; | 
|  | 4284 |  | 
|  | 4285 | // Now, try to find the specific G_CONSTANT. Start by assuming that the | 
|  | 4286 | // register we will offset is the LHS, and the register containing the | 
|  | 4287 | // constant is the RHS. | 
|  | 4288 | Register OffsetReg = OffsetInst->getOperand(1).getReg(); | 
|  | 4289 | Register ConstantReg = OffsetInst->getOperand(2).getReg(); | 
|  | 4290 | auto ValAndVReg = getConstantVRegValWithLookThrough(ConstantReg, MRI); | 
|  | 4291 | if (!ValAndVReg) { | 
|  | 4292 | // We didn't get a constant on the RHS. If the opcode is a shift, then | 
|  | 4293 | // we're done. | 
|  | 4294 | if (OffsetOpc == TargetOpcode::G_SHL) | 
|  | 4295 | return None; | 
|  | 4296 |  | 
|  | 4297 | // If we have a G_MUL, we can use either register. Try looking at the RHS. | 
|  | 4298 | std::swap(OffsetReg, ConstantReg); | 
|  | 4299 | ValAndVReg = getConstantVRegValWithLookThrough(ConstantReg, MRI); | 
|  | 4300 | if (!ValAndVReg) | 
|  | 4301 | return None; | 
|  | 4302 | } | 
|  | 4303 |  | 
| Jessica Paquette | 2b404d0 | 2019-07-23 16:09:42 +0000 | [diff] [blame] | 4304 | // The value must fit into 3 bits, and must be positive. Make sure that is | 
|  | 4305 | // true. | 
|  | 4306 | int64_t ImmVal = ValAndVReg->Value; | 
| Jessica Paquette | 6849911 | 2019-07-24 22:49:42 +0000 | [diff] [blame] | 4307 |  | 
|  | 4308 | // Since we're going to pull this into a shift, the constant value must be | 
|  | 4309 | // a power of 2. If we got a multiply, then we need to check this. | 
|  | 4310 | if (OffsetOpc == TargetOpcode::G_MUL) { | 
|  | 4311 | if (!isPowerOf2_32(ImmVal)) | 
|  | 4312 | return None; | 
|  | 4313 |  | 
|  | 4314 | // Got a power of 2. So, the amount we'll shift is the log base-2 of that. | 
|  | 4315 | ImmVal = Log2_32(ImmVal); | 
|  | 4316 | } | 
|  | 4317 |  | 
| Jessica Paquette | 2b404d0 | 2019-07-23 16:09:42 +0000 | [diff] [blame] | 4318 | if ((ImmVal & 0x7) != ImmVal) | 
|  | 4319 | return None; | 
|  | 4320 |  | 
|  | 4321 | // We are only allowed to shift by LegalShiftVal. This shift value is built | 
|  | 4322 | // into the instruction, so we can't just use whatever we want. | 
|  | 4323 | if (ImmVal != LegalShiftVal) | 
|  | 4324 | return None; | 
|  | 4325 |  | 
|  | 4326 | // We can use the LHS of the GEP as the base, and the LHS of the shift as an | 
|  | 4327 | // offset. Signify that we are shifting by setting the shift flag to 1. | 
|  | 4328 | return {{ | 
|  | 4329 | [=](MachineInstrBuilder &MIB) { MIB.add(Gep->getOperand(1)); }, | 
| Jessica Paquette | 6849911 | 2019-07-24 22:49:42 +0000 | [diff] [blame] | 4330 | [=](MachineInstrBuilder &MIB) { MIB.addUse(OffsetReg); }, | 
| Jessica Paquette | 2b404d0 | 2019-07-23 16:09:42 +0000 | [diff] [blame] | 4331 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, | 
|  | 4332 | [=](MachineInstrBuilder &MIB) { MIB.addImm(1); }, | 
|  | 4333 | }}; | 
|  | 4334 | } | 
|  | 4335 |  | 
| Jessica Paquette | 7a1dcc5 | 2019-07-18 21:50:11 +0000 | [diff] [blame] | 4336 | /// This is used for computing addresses like this: | 
|  | 4337 | /// | 
|  | 4338 | /// ldr x1, [x2, x3] | 
|  | 4339 | /// | 
|  | 4340 | /// Where x2 is the base register, and x3 is an offset register. | 
|  | 4341 | /// | 
|  | 4342 | /// When possible (or profitable) to fold a G_GEP into the address calculation, | 
|  | 4343 | /// this will do so. Otherwise, it will return None. | 
|  | 4344 | InstructionSelector::ComplexRendererFns | 
|  | 4345 | AArch64InstructionSelector::selectAddrModeRegisterOffset( | 
|  | 4346 | MachineOperand &Root) const { | 
|  | 4347 | MachineRegisterInfo &MRI = Root.getParent()->getMF()->getRegInfo(); | 
|  | 4348 |  | 
| Jessica Paquette | 7a1dcc5 | 2019-07-18 21:50:11 +0000 | [diff] [blame] | 4349 | // We need a GEP. | 
|  | 4350 | MachineInstr *Gep = MRI.getVRegDef(Root.getReg()); | 
|  | 4351 | if (!Gep || Gep->getOpcode() != TargetOpcode::G_GEP) | 
|  | 4352 | return None; | 
|  | 4353 |  | 
|  | 4354 | // If this is used more than once, let's not bother folding. | 
|  | 4355 | // TODO: Check if they are memory ops. If they are, then we can still fold | 
|  | 4356 | // without having to recompute anything. | 
|  | 4357 | if (!MRI.hasOneUse(Gep->getOperand(0).getReg())) | 
|  | 4358 | return None; | 
|  | 4359 |  | 
|  | 4360 | // Base is the GEP's LHS, offset is its RHS. | 
|  | 4361 | return {{ | 
|  | 4362 | [=](MachineInstrBuilder &MIB) { MIB.add(Gep->getOperand(1)); }, | 
|  | 4363 | [=](MachineInstrBuilder &MIB) { MIB.add(Gep->getOperand(2)); }, | 
|  | 4364 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, | 
|  | 4365 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, | 
|  | 4366 | }}; | 
|  | 4367 | } | 
|  | 4368 |  | 
| Jessica Paquette | 2b404d0 | 2019-07-23 16:09:42 +0000 | [diff] [blame] | 4369 | /// This is intended to be equivalent to selectAddrModeXRO in | 
|  | 4370 | /// AArch64ISelDAGtoDAG. It's used for selecting X register offset loads. | 
|  | 4371 | InstructionSelector::ComplexRendererFns | 
|  | 4372 | AArch64InstructionSelector::selectAddrModeXRO(MachineOperand &Root, | 
|  | 4373 | unsigned SizeInBytes) const { | 
|  | 4374 | MachineRegisterInfo &MRI = Root.getParent()->getMF()->getRegInfo(); | 
|  | 4375 |  | 
|  | 4376 | // If we have a constant offset, then we probably don't want to match a | 
|  | 4377 | // register offset. | 
|  | 4378 | if (isBaseWithConstantOffset(Root, MRI)) | 
|  | 4379 | return None; | 
|  | 4380 |  | 
|  | 4381 | // Try to fold shifts into the addressing mode. | 
|  | 4382 | auto AddrModeFns = selectAddrModeShiftedExtendXReg(Root, SizeInBytes); | 
|  | 4383 | if (AddrModeFns) | 
|  | 4384 | return AddrModeFns; | 
|  | 4385 |  | 
|  | 4386 | // If that doesn't work, see if it's possible to fold in registers from | 
|  | 4387 | // a GEP. | 
|  | 4388 | return selectAddrModeRegisterOffset(Root); | 
|  | 4389 | } | 
|  | 4390 |  | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 4391 | /// Select a "register plus unscaled signed 9-bit immediate" address.  This | 
|  | 4392 | /// should only match when there is an offset that is not valid for a scaled | 
|  | 4393 | /// immediate addressing mode.  The "Size" argument is the size in bytes of the | 
|  | 4394 | /// memory reference, which is needed here to know what is valid for a scaled | 
|  | 4395 | /// immediate. | 
| Daniel Sanders | 1e4569f | 2017-10-20 20:55:29 +0000 | [diff] [blame] | 4396 | InstructionSelector::ComplexRendererFns | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 4397 | AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root, | 
|  | 4398 | unsigned Size) const { | 
|  | 4399 | MachineRegisterInfo &MRI = | 
|  | 4400 | Root.getParent()->getParent()->getParent()->getRegInfo(); | 
|  | 4401 |  | 
|  | 4402 | if (!Root.isReg()) | 
|  | 4403 | return None; | 
|  | 4404 |  | 
|  | 4405 | if (!isBaseWithConstantOffset(Root, MRI)) | 
|  | 4406 | return None; | 
|  | 4407 |  | 
|  | 4408 | MachineInstr *RootDef = MRI.getVRegDef(Root.getReg()); | 
|  | 4409 | if (!RootDef) | 
|  | 4410 | return None; | 
|  | 4411 |  | 
|  | 4412 | MachineOperand &OffImm = RootDef->getOperand(2); | 
|  | 4413 | if (!OffImm.isReg()) | 
|  | 4414 | return None; | 
|  | 4415 | MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg()); | 
|  | 4416 | if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT) | 
|  | 4417 | return None; | 
|  | 4418 | int64_t RHSC; | 
|  | 4419 | MachineOperand &RHSOp1 = RHS->getOperand(1); | 
|  | 4420 | if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64) | 
|  | 4421 | return None; | 
|  | 4422 | RHSC = RHSOp1.getCImm()->getSExtValue(); | 
|  | 4423 |  | 
|  | 4424 | // If the offset is valid as a scaled immediate, don't match here. | 
|  | 4425 | if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size))) | 
|  | 4426 | return None; | 
|  | 4427 | if (RHSC >= -256 && RHSC < 256) { | 
|  | 4428 | MachineOperand &Base = RootDef->getOperand(1); | 
|  | 4429 | return {{ | 
|  | 4430 | [=](MachineInstrBuilder &MIB) { MIB.add(Base); }, | 
|  | 4431 | [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }, | 
|  | 4432 | }}; | 
|  | 4433 | } | 
|  | 4434 | return None; | 
|  | 4435 | } | 
|  | 4436 |  | 
|  | 4437 | /// Select a "register plus scaled unsigned 12-bit immediate" address.  The | 
|  | 4438 | /// "Size" argument is the size in bytes of the memory reference, which | 
|  | 4439 | /// determines the scale. | 
| Daniel Sanders | 1e4569f | 2017-10-20 20:55:29 +0000 | [diff] [blame] | 4440 | InstructionSelector::ComplexRendererFns | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 4441 | AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root, | 
|  | 4442 | unsigned Size) const { | 
|  | 4443 | MachineRegisterInfo &MRI = | 
|  | 4444 | Root.getParent()->getParent()->getParent()->getRegInfo(); | 
|  | 4445 |  | 
|  | 4446 | if (!Root.isReg()) | 
|  | 4447 | return None; | 
|  | 4448 |  | 
|  | 4449 | MachineInstr *RootDef = MRI.getVRegDef(Root.getReg()); | 
|  | 4450 | if (!RootDef) | 
|  | 4451 | return None; | 
|  | 4452 |  | 
|  | 4453 | if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) { | 
|  | 4454 | return {{ | 
|  | 4455 | [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }, | 
|  | 4456 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, | 
|  | 4457 | }}; | 
|  | 4458 | } | 
|  | 4459 |  | 
|  | 4460 | if (isBaseWithConstantOffset(Root, MRI)) { | 
|  | 4461 | MachineOperand &LHS = RootDef->getOperand(1); | 
|  | 4462 | MachineOperand &RHS = RootDef->getOperand(2); | 
|  | 4463 | MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg()); | 
|  | 4464 | MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg()); | 
|  | 4465 | if (LHSDef && RHSDef) { | 
|  | 4466 | int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue(); | 
|  | 4467 | unsigned Scale = Log2_32(Size); | 
|  | 4468 | if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) { | 
|  | 4469 | if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) | 
| Daniel Sanders | 01805b6 | 2017-10-16 05:39:30 +0000 | [diff] [blame] | 4470 | return {{ | 
|  | 4471 | [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); }, | 
|  | 4472 | [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); }, | 
|  | 4473 | }}; | 
|  | 4474 |  | 
| Daniel Sanders | ea8711b | 2017-10-16 03:36:29 +0000 | [diff] [blame] | 4475 | return {{ | 
|  | 4476 | [=](MachineInstrBuilder &MIB) { MIB.add(LHS); }, | 
|  | 4477 | [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); }, | 
|  | 4478 | }}; | 
|  | 4479 | } | 
|  | 4480 | } | 
|  | 4481 | } | 
|  | 4482 |  | 
|  | 4483 | // Before falling back to our general case, check if the unscaled | 
|  | 4484 | // instructions can handle this. If so, that's preferable. | 
|  | 4485 | if (selectAddrModeUnscaled(Root, Size).hasValue()) | 
|  | 4486 | return None; | 
|  | 4487 |  | 
|  | 4488 | return {{ | 
|  | 4489 | [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, | 
|  | 4490 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, | 
|  | 4491 | }}; | 
|  | 4492 | } | 
|  | 4493 |  | 
| Volkan Keles | f7f2568 | 2018-01-16 18:44:05 +0000 | [diff] [blame] | 4494 | void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB, | 
|  | 4495 | const MachineInstr &MI) const { | 
|  | 4496 | const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); | 
|  | 4497 | assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT"); | 
|  | 4498 | Optional<int64_t> CstVal = getConstantVRegVal(MI.getOperand(0).getReg(), MRI); | 
|  | 4499 | assert(CstVal && "Expected constant value"); | 
|  | 4500 | MIB.addImm(CstVal.getValue()); | 
|  | 4501 | } | 
|  | 4502 |  | 
| Daniel Sanders | 0b5293f | 2017-04-06 09:49:34 +0000 | [diff] [blame] | 4503 | namespace llvm { | 
|  | 4504 | InstructionSelector * | 
|  | 4505 | createAArch64InstructionSelector(const AArch64TargetMachine &TM, | 
|  | 4506 | AArch64Subtarget &Subtarget, | 
|  | 4507 | AArch64RegisterBankInfo &RBI) { | 
|  | 4508 | return new AArch64InstructionSelector(TM, Subtarget, RBI); | 
|  | 4509 | } | 
|  | 4510 | } |