Sebastian Pop | eb65d72 | 2016-10-08 12:30:07 +0000 | [diff] [blame] | 1 | //=- AArch64VectorByElementOpt.cpp - AArch64 vector by element inst opt pass =// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file contains a pass that performs optimization for vector by element |
| 11 | // SIMD instructions. |
| 12 | // |
| 13 | // Certain SIMD instructions with vector element operand are not efficient. |
| 14 | // Rewrite them into SIMD instructions with vector operands. This rewrite |
| 15 | // is driven by the latency of the instructions. |
| 16 | // |
| 17 | // Example: |
| 18 | // fmla v0.4s, v1.4s, v2.s[1] |
| 19 | // is rewritten into |
| 20 | // dup v3.4s, v2.s[1] |
| 21 | // fmla v0.4s, v1.4s, v3.4s |
| 22 | //===----------------------------------------------------------------------===// |
| 23 | |
| 24 | #include "AArch64InstrInfo.h" |
| 25 | #include "llvm/ADT/Statistic.h" |
| 26 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 27 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 28 | #include "llvm/CodeGen/TargetSchedule.h" |
| 29 | |
| 30 | using namespace llvm; |
| 31 | |
| 32 | #define DEBUG_TYPE "aarch64-vectorbyelement-opt" |
| 33 | |
| 34 | STATISTIC(NumModifiedInstr, |
| 35 | "Number of vector by element instructions modified"); |
| 36 | |
| 37 | #define AARCH64_VECTOR_BY_ELEMENT_OPT_NAME \ |
| 38 | "AArch64 vector by element instruction optimization pass" |
| 39 | |
| 40 | namespace { |
| 41 | |
| 42 | struct AArch64VectorByElementOpt : public MachineFunctionPass { |
| 43 | static char ID; |
| 44 | AArch64VectorByElementOpt() : MachineFunctionPass(ID) { |
| 45 | initializeAArch64VectorByElementOptPass(*PassRegistry::getPassRegistry()); |
| 46 | } |
| 47 | |
| 48 | const TargetInstrInfo *TII; |
| 49 | MachineRegisterInfo *MRI; |
| 50 | TargetSchedModel SchedModel; |
| 51 | |
| 52 | /// Based only on latency of instructions, determine if it is cost efficient |
| 53 | /// to replace the instruction InstDesc by the two instructions InstDescRep1 |
| 54 | /// and InstDescRep2. |
| 55 | /// Return true if replacement is recommended. |
| 56 | bool |
| 57 | shouldReplaceInstruction(MachineFunction *MF, const MCInstrDesc *InstDesc, |
| 58 | const MCInstrDesc *InstDescRep1, |
| 59 | const MCInstrDesc *InstDescRep2, |
| 60 | std::map<unsigned, bool> &VecInstElemTable) const; |
| 61 | |
| 62 | /// Determine if we need to exit the vector by element instruction |
| 63 | /// optimization pass early. This makes sure that Targets with no need |
| 64 | /// for this optimization do not spent any compile time on this pass. |
| 65 | /// This check is done by comparing the latency of an indexed FMLA |
| 66 | /// instruction to the latency of the DUP + the latency of a vector |
| 67 | /// FMLA instruction. We do not check on other related instructions such |
| 68 | /// as FMLS as we assume that if the situation shows up for one |
| 69 | /// instruction, then it is likely to show up for the related ones. |
| 70 | /// Return true if early exit of the pass is recommended. |
| 71 | bool earlyExitVectElement(MachineFunction *MF); |
| 72 | |
| 73 | /// Check whether an equivalent DUP instruction has already been |
| 74 | /// created or not. |
| 75 | /// Return true when the dup instruction already exists. In this case, |
| 76 | /// DestReg will point to the destination of the already created DUP. |
| 77 | bool reuseDUP(MachineInstr &MI, unsigned DupOpcode, unsigned SrcReg, |
| 78 | unsigned LaneNumber, unsigned *DestReg) const; |
| 79 | |
| 80 | /// Certain SIMD instructions with vector element operand are not efficient. |
| 81 | /// Rewrite them into SIMD instructions with vector operands. This rewrite |
| 82 | /// is driven by the latency of the instructions. |
| 83 | /// Return true if the SIMD instruction is modified. |
| 84 | bool optimizeVectElement(MachineInstr &MI, |
| 85 | std::map<unsigned, bool> *VecInstElemTable) const; |
| 86 | |
| 87 | bool runOnMachineFunction(MachineFunction &Fn) override; |
| 88 | |
| 89 | StringRef getPassName() const override { |
| 90 | return AARCH64_VECTOR_BY_ELEMENT_OPT_NAME; |
| 91 | } |
| 92 | }; |
| 93 | char AArch64VectorByElementOpt::ID = 0; |
| 94 | } // namespace |
| 95 | |
| 96 | INITIALIZE_PASS(AArch64VectorByElementOpt, "aarch64-vectorbyelement-opt", |
| 97 | AARCH64_VECTOR_BY_ELEMENT_OPT_NAME, false, false) |
| 98 | |
| 99 | /// Based only on latency of instructions, determine if it is cost efficient |
| 100 | /// to replace the instruction InstDesc by the two instructions InstDescRep1 |
| 101 | /// and InstDescRep2. Note that it is assumed in this fuction that an |
| 102 | /// instruction of type InstDesc is always replaced by the same two |
| 103 | /// instructions as results are cached here. |
| 104 | /// Return true if replacement is recommended. |
| 105 | bool AArch64VectorByElementOpt::shouldReplaceInstruction( |
| 106 | MachineFunction *MF, const MCInstrDesc *InstDesc, |
| 107 | const MCInstrDesc *InstDescRep1, const MCInstrDesc *InstDescRep2, |
| 108 | std::map<unsigned, bool> &VecInstElemTable) const { |
| 109 | // Check if replacment decision is alredy available in the cached table. |
| 110 | // if so, return it. |
| 111 | if (!VecInstElemTable.empty() && |
| 112 | VecInstElemTable.find(InstDesc->getOpcode()) != VecInstElemTable.end()) |
| 113 | return VecInstElemTable[InstDesc->getOpcode()]; |
| 114 | |
| 115 | unsigned SCIdx = InstDesc->getSchedClass(); |
| 116 | unsigned SCIdxRep1 = InstDescRep1->getSchedClass(); |
| 117 | unsigned SCIdxRep2 = InstDescRep2->getSchedClass(); |
| 118 | const MCSchedClassDesc *SCDesc = |
| 119 | SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdx); |
| 120 | const MCSchedClassDesc *SCDescRep1 = |
| 121 | SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdxRep1); |
| 122 | const MCSchedClassDesc *SCDescRep2 = |
| 123 | SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdxRep2); |
| 124 | |
| 125 | // If a subtarget does not define resources for any of the instructions |
| 126 | // of interest, then return false for no replacement. |
| 127 | if (!SCDesc->isValid() || SCDesc->isVariant() || !SCDescRep1->isValid() || |
| 128 | SCDescRep1->isVariant() || !SCDescRep2->isValid() || |
| 129 | SCDescRep2->isVariant()) { |
| 130 | VecInstElemTable[InstDesc->getOpcode()] = false; |
| 131 | return false; |
| 132 | } |
| 133 | |
| 134 | if (SchedModel.computeInstrLatency(InstDesc->getOpcode()) > |
| 135 | SchedModel.computeInstrLatency(InstDescRep1->getOpcode()) + |
| 136 | SchedModel.computeInstrLatency(InstDescRep2->getOpcode())) { |
| 137 | VecInstElemTable[InstDesc->getOpcode()] = true; |
| 138 | return true; |
| 139 | } |
| 140 | VecInstElemTable[InstDesc->getOpcode()] = false; |
| 141 | return false; |
| 142 | } |
| 143 | |
| 144 | /// Determine if we need to exit the vector by element instruction |
| 145 | /// optimization pass early. This makes sure that Targets with no need |
| 146 | /// for this optimization do not spent any compile time on this pass. |
| 147 | /// This check is done by comparing the latency of an indexed FMLA |
| 148 | /// instruction to the latency of the DUP + the latency of a vector |
| 149 | /// FMLA instruction. We do not check on other related instructions such |
| 150 | /// as FMLS as we assume that if the situation shows up for one |
| 151 | /// instruction, then it is likely to show up for the related ones. |
| 152 | /// Return true if early exit of the pass is recommended. |
| 153 | bool AArch64VectorByElementOpt::earlyExitVectElement(MachineFunction *MF) { |
| 154 | std::map<unsigned, bool> VecInstElemTable; |
| 155 | const MCInstrDesc *IndexMulMCID = &TII->get(AArch64::FMLAv4i32_indexed); |
| 156 | const MCInstrDesc *DupMCID = &TII->get(AArch64::DUPv4i32lane); |
| 157 | const MCInstrDesc *MulMCID = &TII->get(AArch64::FMULv4f32); |
| 158 | |
| 159 | if (!shouldReplaceInstruction(MF, IndexMulMCID, DupMCID, MulMCID, |
| 160 | VecInstElemTable)) |
| 161 | return true; |
| 162 | return false; |
| 163 | } |
| 164 | |
| 165 | /// Check whether an equivalent DUP instruction has already been |
| 166 | /// created or not. |
| 167 | /// Return true when the dup instruction already exists. In this case, |
| 168 | /// DestReg will point to the destination of the already created DUP. |
| 169 | bool AArch64VectorByElementOpt::reuseDUP(MachineInstr &MI, unsigned DupOpcode, |
| 170 | unsigned SrcReg, unsigned LaneNumber, |
| 171 | unsigned *DestReg) const { |
| 172 | for (MachineBasicBlock::iterator MII = MI, MIE = MI.getParent()->begin(); |
| 173 | MII != MIE;) { |
| 174 | MII--; |
| 175 | MachineInstr *CurrentMI = &*MII; |
| 176 | |
| 177 | if (CurrentMI->getOpcode() == DupOpcode && |
| 178 | CurrentMI->getNumOperands() == 3 && |
| 179 | CurrentMI->getOperand(1).getReg() == SrcReg && |
| 180 | CurrentMI->getOperand(2).getImm() == LaneNumber) { |
| 181 | *DestReg = CurrentMI->getOperand(0).getReg(); |
| 182 | return true; |
| 183 | } |
| 184 | } |
| 185 | |
| 186 | return false; |
| 187 | } |
| 188 | |
| 189 | /// Certain SIMD instructions with vector element operand are not efficient. |
| 190 | /// Rewrite them into SIMD instructions with vector operands. This rewrite |
| 191 | /// is driven by the latency of the instructions. |
| 192 | /// The instruction of concerns are for the time being fmla, fmls, fmul, |
| 193 | /// and fmulx and hence they are hardcoded. |
| 194 | /// |
| 195 | /// Example: |
| 196 | /// fmla v0.4s, v1.4s, v2.s[1] |
| 197 | /// is rewritten into |
| 198 | /// dup v3.4s, v2.s[1] // dup not necessary if redundant |
| 199 | /// fmla v0.4s, v1.4s, v3.4s |
| 200 | /// Return true if the SIMD instruction is modified. |
| 201 | bool AArch64VectorByElementOpt::optimizeVectElement( |
| 202 | MachineInstr &MI, std::map<unsigned, bool> *VecInstElemTable) const { |
| 203 | const MCInstrDesc *MulMCID, *DupMCID; |
| 204 | const TargetRegisterClass *RC = &AArch64::FPR128RegClass; |
| 205 | |
| 206 | switch (MI.getOpcode()) { |
| 207 | default: |
| 208 | return false; |
| 209 | |
| 210 | // 4X32 instructions |
| 211 | case AArch64::FMLAv4i32_indexed: |
| 212 | DupMCID = &TII->get(AArch64::DUPv4i32lane); |
| 213 | MulMCID = &TII->get(AArch64::FMLAv4f32); |
| 214 | break; |
| 215 | case AArch64::FMLSv4i32_indexed: |
| 216 | DupMCID = &TII->get(AArch64::DUPv4i32lane); |
| 217 | MulMCID = &TII->get(AArch64::FMLSv4f32); |
| 218 | break; |
| 219 | case AArch64::FMULXv4i32_indexed: |
| 220 | DupMCID = &TII->get(AArch64::DUPv4i32lane); |
| 221 | MulMCID = &TII->get(AArch64::FMULXv4f32); |
| 222 | break; |
| 223 | case AArch64::FMULv4i32_indexed: |
| 224 | DupMCID = &TII->get(AArch64::DUPv4i32lane); |
| 225 | MulMCID = &TII->get(AArch64::FMULv4f32); |
| 226 | break; |
| 227 | |
| 228 | // 2X64 instructions |
| 229 | case AArch64::FMLAv2i64_indexed: |
| 230 | DupMCID = &TII->get(AArch64::DUPv2i64lane); |
| 231 | MulMCID = &TII->get(AArch64::FMLAv2f64); |
| 232 | break; |
| 233 | case AArch64::FMLSv2i64_indexed: |
| 234 | DupMCID = &TII->get(AArch64::DUPv2i64lane); |
| 235 | MulMCID = &TII->get(AArch64::FMLSv2f64); |
| 236 | break; |
| 237 | case AArch64::FMULXv2i64_indexed: |
| 238 | DupMCID = &TII->get(AArch64::DUPv2i64lane); |
| 239 | MulMCID = &TII->get(AArch64::FMULXv2f64); |
| 240 | break; |
| 241 | case AArch64::FMULv2i64_indexed: |
| 242 | DupMCID = &TII->get(AArch64::DUPv2i64lane); |
| 243 | MulMCID = &TII->get(AArch64::FMULv2f64); |
| 244 | break; |
| 245 | |
| 246 | // 2X32 instructions |
| 247 | case AArch64::FMLAv2i32_indexed: |
| 248 | RC = &AArch64::FPR64RegClass; |
| 249 | DupMCID = &TII->get(AArch64::DUPv2i32lane); |
| 250 | MulMCID = &TII->get(AArch64::FMLAv2f32); |
| 251 | break; |
| 252 | case AArch64::FMLSv2i32_indexed: |
| 253 | RC = &AArch64::FPR64RegClass; |
| 254 | DupMCID = &TII->get(AArch64::DUPv2i32lane); |
| 255 | MulMCID = &TII->get(AArch64::FMLSv2f32); |
| 256 | break; |
| 257 | case AArch64::FMULXv2i32_indexed: |
| 258 | RC = &AArch64::FPR64RegClass; |
| 259 | DupMCID = &TII->get(AArch64::DUPv2i32lane); |
| 260 | MulMCID = &TII->get(AArch64::FMULXv2f32); |
| 261 | break; |
| 262 | case AArch64::FMULv2i32_indexed: |
| 263 | RC = &AArch64::FPR64RegClass; |
| 264 | DupMCID = &TII->get(AArch64::DUPv2i32lane); |
| 265 | MulMCID = &TII->get(AArch64::FMULv2f32); |
| 266 | break; |
| 267 | } |
| 268 | |
| 269 | if (!shouldReplaceInstruction(MI.getParent()->getParent(), |
| 270 | &TII->get(MI.getOpcode()), DupMCID, MulMCID, |
| 271 | *VecInstElemTable)) |
| 272 | return false; |
| 273 | |
| 274 | const DebugLoc &DL = MI.getDebugLoc(); |
| 275 | MachineBasicBlock &MBB = *MI.getParent(); |
| 276 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
| 277 | |
| 278 | // get the operands of the current SIMD arithmetic instruction. |
| 279 | unsigned MulDest = MI.getOperand(0).getReg(); |
| 280 | unsigned SrcReg0 = MI.getOperand(1).getReg(); |
| 281 | unsigned Src0IsKill = getKillRegState(MI.getOperand(1).isKill()); |
| 282 | unsigned SrcReg1 = MI.getOperand(2).getReg(); |
| 283 | unsigned Src1IsKill = getKillRegState(MI.getOperand(2).isKill()); |
| 284 | unsigned DupDest; |
| 285 | |
| 286 | // Instructions of interest have either 4 or 5 operands. |
| 287 | if (MI.getNumOperands() == 5) { |
| 288 | unsigned SrcReg2 = MI.getOperand(3).getReg(); |
| 289 | unsigned Src2IsKill = getKillRegState(MI.getOperand(3).isKill()); |
| 290 | unsigned LaneNumber = MI.getOperand(4).getImm(); |
| 291 | |
| 292 | // Create a new DUP instruction. Note that if an equivalent DUP instruction |
| 293 | // has already been created before, then use that one instread of creating |
| 294 | // a new one. |
| 295 | if (!reuseDUP(MI, DupMCID->getOpcode(), SrcReg2, LaneNumber, &DupDest)) { |
| 296 | DupDest = MRI.createVirtualRegister(RC); |
| 297 | BuildMI(MBB, MI, DL, *DupMCID, DupDest) |
| 298 | .addReg(SrcReg2, Src2IsKill) |
| 299 | .addImm(LaneNumber); |
| 300 | } |
| 301 | BuildMI(MBB, MI, DL, *MulMCID, MulDest) |
| 302 | .addReg(SrcReg0, Src0IsKill) |
| 303 | .addReg(SrcReg1, Src1IsKill) |
| 304 | .addReg(DupDest, Src2IsKill); |
| 305 | } else if (MI.getNumOperands() == 4) { |
| 306 | unsigned LaneNumber = MI.getOperand(3).getImm(); |
| 307 | if (!reuseDUP(MI, DupMCID->getOpcode(), SrcReg1, LaneNumber, &DupDest)) { |
| 308 | DupDest = MRI.createVirtualRegister(RC); |
| 309 | BuildMI(MBB, MI, DL, *DupMCID, DupDest) |
| 310 | .addReg(SrcReg1, Src1IsKill) |
| 311 | .addImm(LaneNumber); |
| 312 | } |
| 313 | BuildMI(MBB, MI, DL, *MulMCID, MulDest) |
| 314 | .addReg(SrcReg0, Src0IsKill) |
| 315 | .addReg(DupDest, Src1IsKill); |
| 316 | } else { |
| 317 | return false; |
| 318 | } |
| 319 | |
| 320 | ++NumModifiedInstr; |
| 321 | return true; |
| 322 | } |
| 323 | |
| 324 | bool AArch64VectorByElementOpt::runOnMachineFunction(MachineFunction &MF) { |
| 325 | if (skipFunction(*MF.getFunction())) |
| 326 | return false; |
| 327 | |
| 328 | TII = MF.getSubtarget().getInstrInfo(); |
| 329 | MRI = &MF.getRegInfo(); |
| 330 | const TargetSubtargetInfo &ST = MF.getSubtarget(); |
| 331 | const AArch64InstrInfo *AAII = |
| 332 | static_cast<const AArch64InstrInfo *>(ST.getInstrInfo()); |
| 333 | if (!AAII) |
| 334 | return false; |
| 335 | SchedModel.init(ST.getSchedModel(), &ST, AAII); |
| 336 | if (!SchedModel.hasInstrSchedModel()) |
| 337 | return false; |
| 338 | |
| 339 | // A simple check to exit this pass early for targets that do not need it. |
| 340 | if (earlyExitVectElement(&MF)) |
| 341 | return false; |
| 342 | |
| 343 | bool Changed = false; |
| 344 | std::map<unsigned, bool> VecInstElemTable; |
| 345 | SmallVector<MachineInstr *, 8> RemoveMIs; |
| 346 | |
| 347 | for (MachineBasicBlock &MBB : MF) { |
| 348 | for (MachineBasicBlock::iterator MII = MBB.begin(), MIE = MBB.end(); |
| 349 | MII != MIE;) { |
| 350 | MachineInstr &MI = *MII; |
| 351 | if (optimizeVectElement(MI, &VecInstElemTable)) { |
| 352 | // Add MI to the list of instructions to be removed given that it has |
| 353 | // been replaced. |
| 354 | RemoveMIs.push_back(&MI); |
| 355 | Changed = true; |
| 356 | } |
| 357 | ++MII; |
| 358 | } |
| 359 | } |
| 360 | |
| 361 | for (MachineInstr *MI : RemoveMIs) |
| 362 | MI->eraseFromParent(); |
| 363 | |
| 364 | return Changed; |
| 365 | } |
| 366 | |
| 367 | /// createAArch64VectorByElementOptPass - returns an instance of the |
| 368 | /// vector by element optimization pass. |
| 369 | FunctionPass *llvm::createAArch64VectorByElementOptPass() { |
| 370 | return new AArch64VectorByElementOpt(); |
| 371 | } |