blob: bc2320dd20b342dca9bd4034424969b425b522e1 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===-- AArch64AdvSIMDScalar.cpp - Replace dead defs w/ zero reg --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// When profitable, replace GPR targeting i64 instructions with their
10// AdvSIMD scalar equivalents. Generally speaking, "profitable" is defined
11// as minimizing the number of cross-class register copies.
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// TODO: Graph based predicate heuristics.
16// Walking the instruction list linearly will get many, perhaps most, of
17// the cases, but to do a truly thorough job of this, we need a more
18// wholistic approach.
19//
20// This optimization is very similar in spirit to the register allocator's
21// spill placement, only here we're determining where to place cross-class
22// register copies rather than spills. As such, a similar approach is
23// called for.
24//
25// We want to build up a set of graphs of all instructions which are candidates
26// for transformation along with instructions which generate their inputs and
27// consume their outputs. For each edge in the graph, we assign a weight
28// based on whether there is a copy required there (weight zero if not) and
29// the block frequency of the block containing the defining or using
30// instruction, whichever is less. Our optimization is then a graph problem
31// to minimize the total weight of all the graphs, then transform instructions
32// and add or remove copy instructions as called for to implement the
33// solution.
34//===----------------------------------------------------------------------===//
35
36#include "AArch64.h"
37#include "AArch64InstrInfo.h"
38#include "AArch64RegisterInfo.h"
Eric Christopherd9134482014-08-04 21:25:23 +000039#include "AArch64Subtarget.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000040#include "llvm/ADT/Statistic.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000041#include "llvm/CodeGen/MachineFunction.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000042#include "llvm/CodeGen/MachineFunctionPass.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000043#include "llvm/CodeGen/MachineInstr.h"
44#include "llvm/CodeGen/MachineInstrBuilder.h"
45#include "llvm/CodeGen/MachineRegisterInfo.h"
46#include "llvm/Support/CommandLine.h"
47#include "llvm/Support/Debug.h"
48#include "llvm/Support/raw_ostream.h"
49using namespace llvm;
50
51#define DEBUG_TYPE "aarch64-simd-scalar"
52
53// Allow forcing all i64 operations with equivalent SIMD instructions to use
54// them. For stress-testing the transformation function.
55static cl::opt<bool>
56TransformAll("aarch64-simd-scalar-force-all",
57 cl::desc("Force use of AdvSIMD scalar instructions everywhere"),
58 cl::init(false), cl::Hidden);
59
60STATISTIC(NumScalarInsnsUsed, "Number of scalar instructions used");
61STATISTIC(NumCopiesDeleted, "Number of cross-class copies deleted");
62STATISTIC(NumCopiesInserted, "Number of cross-class copies inserted");
63
Chad Rosier794b9b22015-08-05 15:18:58 +000064#define AARCH64_ADVSIMD_NAME "AdvSIMD Scalar Operation Optimization"
65
Tim Northover3b0846e2014-05-24 12:50:23 +000066namespace {
67class AArch64AdvSIMDScalar : public MachineFunctionPass {
68 MachineRegisterInfo *MRI;
Eric Christopherf761d902015-01-30 01:10:18 +000069 const TargetInstrInfo *TII;
Tim Northover3b0846e2014-05-24 12:50:23 +000070
71private:
72 // isProfitableToTransform - Predicate function to determine whether an
73 // instruction should be transformed to its equivalent AdvSIMD scalar
74 // instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +000075 bool isProfitableToTransform(const MachineInstr &MI) const;
Tim Northover3b0846e2014-05-24 12:50:23 +000076
77 // transformInstruction - Perform the transformation of an instruction
78 // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
79 // to be the correct register class, minimizing cross-class copies.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +000080 void transformInstruction(MachineInstr &MI);
Tim Northover3b0846e2014-05-24 12:50:23 +000081
82 // processMachineBasicBlock - Main optimzation loop.
83 bool processMachineBasicBlock(MachineBasicBlock *MBB);
84
85public:
86 static char ID; // Pass identification, replacement for typeid.
Chad Rosier794b9b22015-08-05 15:18:58 +000087 explicit AArch64AdvSIMDScalar() : MachineFunctionPass(ID) {
88 initializeAArch64AdvSIMDScalarPass(*PassRegistry::getPassRegistry());
89 }
Tim Northover3b0846e2014-05-24 12:50:23 +000090
91 bool runOnMachineFunction(MachineFunction &F) override;
92
Mehdi Amini117296c2016-10-01 02:56:57 +000093 StringRef getPassName() const override { return AARCH64_ADVSIMD_NAME; }
Tim Northover3b0846e2014-05-24 12:50:23 +000094
95 void getAnalysisUsage(AnalysisUsage &AU) const override {
96 AU.setPreservesCFG();
97 MachineFunctionPass::getAnalysisUsage(AU);
98 }
99};
100char AArch64AdvSIMDScalar::ID = 0;
101} // end anonymous namespace
102
Chad Rosier794b9b22015-08-05 15:18:58 +0000103INITIALIZE_PASS(AArch64AdvSIMDScalar, "aarch64-simd-scalar",
104 AARCH64_ADVSIMD_NAME, false, false)
105
Tim Northover3b0846e2014-05-24 12:50:23 +0000106static bool isGPR64(unsigned Reg, unsigned SubReg,
107 const MachineRegisterInfo *MRI) {
108 if (SubReg)
109 return false;
110 if (TargetRegisterInfo::isVirtualRegister(Reg))
111 return MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::GPR64RegClass);
112 return AArch64::GPR64RegClass.contains(Reg);
113}
114
115static bool isFPR64(unsigned Reg, unsigned SubReg,
116 const MachineRegisterInfo *MRI) {
117 if (TargetRegisterInfo::isVirtualRegister(Reg))
118 return (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR64RegClass) &&
119 SubReg == 0) ||
120 (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR128RegClass) &&
121 SubReg == AArch64::dsub);
122 // Physical register references just check the register class directly.
123 return (AArch64::FPR64RegClass.contains(Reg) && SubReg == 0) ||
124 (AArch64::FPR128RegClass.contains(Reg) && SubReg == AArch64::dsub);
125}
126
127// getSrcFromCopy - Get the original source register for a GPR64 <--> FPR64
128// copy instruction. Return zero_reg if the instruction is not a copy.
Quentin Colombet9598f102016-04-22 18:09:14 +0000129static MachineOperand *getSrcFromCopy(MachineInstr *MI,
130 const MachineRegisterInfo *MRI,
131 unsigned &SubReg) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000132 SubReg = 0;
133 // The "FMOV Xd, Dn" instruction is the typical form.
134 if (MI->getOpcode() == AArch64::FMOVDXr ||
135 MI->getOpcode() == AArch64::FMOVXDr)
Quentin Colombet9598f102016-04-22 18:09:14 +0000136 return &MI->getOperand(1);
Tim Northover3b0846e2014-05-24 12:50:23 +0000137 // A lane zero extract "UMOV.d Xd, Vn[0]" is equivalent. We shouldn't see
138 // these at this stage, but it's easy to check for.
139 if (MI->getOpcode() == AArch64::UMOVvi64 && MI->getOperand(2).getImm() == 0) {
140 SubReg = AArch64::dsub;
Quentin Colombet9598f102016-04-22 18:09:14 +0000141 return &MI->getOperand(1);
Tim Northover3b0846e2014-05-24 12:50:23 +0000142 }
143 // Or just a plain COPY instruction. This can be directly to/from FPR64,
144 // or it can be a dsub subreg reference to an FPR128.
145 if (MI->getOpcode() == AArch64::COPY) {
146 if (isFPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
147 MRI) &&
148 isGPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(), MRI))
Quentin Colombet9598f102016-04-22 18:09:14 +0000149 return &MI->getOperand(1);
Tim Northover3b0846e2014-05-24 12:50:23 +0000150 if (isGPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
151 MRI) &&
152 isFPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(),
153 MRI)) {
154 SubReg = MI->getOperand(1).getSubReg();
Quentin Colombet9598f102016-04-22 18:09:14 +0000155 return &MI->getOperand(1);
Tim Northover3b0846e2014-05-24 12:50:23 +0000156 }
157 }
158
159 // Otherwise, this is some other kind of instruction.
Quentin Colombet9598f102016-04-22 18:09:14 +0000160 return nullptr;
Tim Northover3b0846e2014-05-24 12:50:23 +0000161}
162
163// getTransformOpcode - For any opcode for which there is an AdvSIMD equivalent
164// that we're considering transforming to, return that AdvSIMD opcode. For all
165// others, return the original opcode.
Matthias Braunfa3872e2015-05-18 20:27:55 +0000166static unsigned getTransformOpcode(unsigned Opc) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000167 switch (Opc) {
168 default:
169 break;
170 // FIXME: Lots more possibilities.
171 case AArch64::ADDXrr:
172 return AArch64::ADDv1i64;
173 case AArch64::SUBXrr:
174 return AArch64::SUBv1i64;
Chad Rosier5908ab42014-08-04 21:20:25 +0000175 case AArch64::ANDXrr:
176 return AArch64::ANDv8i8;
177 case AArch64::EORXrr:
178 return AArch64::EORv8i8;
179 case AArch64::ORRXrr:
180 return AArch64::ORRv8i8;
Tim Northover3b0846e2014-05-24 12:50:23 +0000181 }
182 // No AdvSIMD equivalent, so just return the original opcode.
183 return Opc;
184}
185
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000186static bool isTransformable(const MachineInstr &MI) {
187 unsigned Opc = MI.getOpcode();
Tim Northover3b0846e2014-05-24 12:50:23 +0000188 return Opc != getTransformOpcode(Opc);
189}
190
191// isProfitableToTransform - Predicate function to determine whether an
192// instruction should be transformed to its equivalent AdvSIMD scalar
193// instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000194bool AArch64AdvSIMDScalar::isProfitableToTransform(
195 const MachineInstr &MI) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000196 // If this instruction isn't eligible to be transformed (no SIMD equivalent),
197 // early exit since that's the common case.
198 if (!isTransformable(MI))
199 return false;
200
201 // Count the number of copies we'll need to add and approximate the number
202 // of copies that a transform will enable us to remove.
203 unsigned NumNewCopies = 3;
204 unsigned NumRemovableCopies = 0;
205
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000206 unsigned OrigSrc0 = MI.getOperand(1).getReg();
207 unsigned OrigSrc1 = MI.getOperand(2).getReg();
Quentin Colombet9598f102016-04-22 18:09:14 +0000208 unsigned SubReg0;
209 unsigned SubReg1;
Tim Northover3b0846e2014-05-24 12:50:23 +0000210 if (!MRI->def_empty(OrigSrc0)) {
211 MachineRegisterInfo::def_instr_iterator Def =
212 MRI->def_instr_begin(OrigSrc0);
213 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
Quentin Colombet9598f102016-04-22 18:09:14 +0000214 MachineOperand *MOSrc0 = getSrcFromCopy(&*Def, MRI, SubReg0);
Tim Northover3b0846e2014-05-24 12:50:23 +0000215 // If the source was from a copy, we don't need to insert a new copy.
Quentin Colombet9598f102016-04-22 18:09:14 +0000216 if (MOSrc0)
Tim Northover3b0846e2014-05-24 12:50:23 +0000217 --NumNewCopies;
218 // If there are no other users of the original source, we can delete
219 // that instruction.
Quentin Colombet9598f102016-04-22 18:09:14 +0000220 if (MOSrc0 && MRI->hasOneNonDBGUse(OrigSrc0))
Tim Northover3b0846e2014-05-24 12:50:23 +0000221 ++NumRemovableCopies;
222 }
223 if (!MRI->def_empty(OrigSrc1)) {
224 MachineRegisterInfo::def_instr_iterator Def =
225 MRI->def_instr_begin(OrigSrc1);
226 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
Quentin Colombet9598f102016-04-22 18:09:14 +0000227 MachineOperand *MOSrc1 = getSrcFromCopy(&*Def, MRI, SubReg1);
228 if (MOSrc1)
Tim Northover3b0846e2014-05-24 12:50:23 +0000229 --NumNewCopies;
230 // If there are no other users of the original source, we can delete
231 // that instruction.
Quentin Colombet9598f102016-04-22 18:09:14 +0000232 if (MOSrc1 && MRI->hasOneNonDBGUse(OrigSrc1))
Tim Northover3b0846e2014-05-24 12:50:23 +0000233 ++NumRemovableCopies;
234 }
235
236 // If any of the uses of the original instructions is a cross class copy,
237 // that's a copy that will be removable if we transform. Likewise, if
238 // any of the uses is a transformable instruction, it's likely the tranforms
239 // will chain, enabling us to save a copy there, too. This is an aggressive
240 // heuristic that approximates the graph based cost analysis described above.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000241 unsigned Dst = MI.getOperand(0).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +0000242 bool AllUsesAreCopies = true;
243 for (MachineRegisterInfo::use_instr_nodbg_iterator
244 Use = MRI->use_instr_nodbg_begin(Dst),
245 E = MRI->use_instr_nodbg_end();
246 Use != E; ++Use) {
247 unsigned SubReg;
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000248 if (getSrcFromCopy(&*Use, MRI, SubReg) || isTransformable(*Use))
Tim Northover3b0846e2014-05-24 12:50:23 +0000249 ++NumRemovableCopies;
250 // If the use is an INSERT_SUBREG, that's still something that can
251 // directly use the FPR64, so we don't invalidate AllUsesAreCopies. It's
252 // preferable to have it use the FPR64 in most cases, as if the source
253 // vector is an IMPLICIT_DEF, the INSERT_SUBREG just goes away entirely.
254 // Ditto for a lane insert.
255 else if (Use->getOpcode() == AArch64::INSERT_SUBREG ||
256 Use->getOpcode() == AArch64::INSvi64gpr)
257 ;
258 else
259 AllUsesAreCopies = false;
260 }
261 // If all of the uses of the original destination register are copies to
262 // FPR64, then we won't end up having a new copy back to GPR64 either.
263 if (AllUsesAreCopies)
264 --NumNewCopies;
265
266 // If a transform will not increase the number of cross-class copies required,
267 // return true.
268 if (NumNewCopies <= NumRemovableCopies)
269 return true;
270
271 // Finally, even if we otherwise wouldn't transform, check if we're forcing
272 // transformation of everything.
273 return TransformAll;
274}
275
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000276static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr &MI,
Tim Northover3b0846e2014-05-24 12:50:23 +0000277 unsigned Dst, unsigned Src, bool IsKill) {
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000278 MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
279 TII->get(AArch64::COPY), Dst)
280 .addReg(Src, getKillRegState(IsKill));
Tim Northover3b0846e2014-05-24 12:50:23 +0000281 DEBUG(dbgs() << " adding copy: " << *MIB);
282 ++NumCopiesInserted;
283 return MIB;
284}
285
286// transformInstruction - Perform the transformation of an instruction
287// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
288// to be the correct register class, minimizing cross-class copies.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000289void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
290 DEBUG(dbgs() << "Scalar transform: " << MI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000291
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000292 MachineBasicBlock *MBB = MI.getParent();
293 unsigned OldOpc = MI.getOpcode();
Matthias Braunfa3872e2015-05-18 20:27:55 +0000294 unsigned NewOpc = getTransformOpcode(OldOpc);
Tim Northover3b0846e2014-05-24 12:50:23 +0000295 assert(OldOpc != NewOpc && "transform an instruction to itself?!");
296
297 // Check if we need a copy for the source registers.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000298 unsigned OrigSrc0 = MI.getOperand(1).getReg();
299 unsigned OrigSrc1 = MI.getOperand(2).getReg();
Tim Northover3b0846e2014-05-24 12:50:23 +0000300 unsigned Src0 = 0, SubReg0;
301 unsigned Src1 = 0, SubReg1;
Quentin Colombet9598f102016-04-22 18:09:14 +0000302 bool KillSrc0 = false, KillSrc1 = false;
Tim Northover3b0846e2014-05-24 12:50:23 +0000303 if (!MRI->def_empty(OrigSrc0)) {
304 MachineRegisterInfo::def_instr_iterator Def =
305 MRI->def_instr_begin(OrigSrc0);
306 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
Quentin Colombet9598f102016-04-22 18:09:14 +0000307 MachineOperand *MOSrc0 = getSrcFromCopy(&*Def, MRI, SubReg0);
Tim Northover3b0846e2014-05-24 12:50:23 +0000308 // If there are no other users of the original source, we can delete
309 // that instruction.
Quentin Colombet9598f102016-04-22 18:09:14 +0000310 if (MOSrc0) {
311 Src0 = MOSrc0->getReg();
312 KillSrc0 = MOSrc0->isKill();
313 // Src0 is going to be reused, thus, it cannot be killed anymore.
314 MOSrc0->setIsKill(false);
315 if (MRI->hasOneNonDBGUse(OrigSrc0)) {
316 assert(MOSrc0 && "Can't delete copy w/o a valid original source!");
317 Def->eraseFromParent();
318 ++NumCopiesDeleted;
319 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000320 }
321 }
322 if (!MRI->def_empty(OrigSrc1)) {
323 MachineRegisterInfo::def_instr_iterator Def =
324 MRI->def_instr_begin(OrigSrc1);
325 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
Quentin Colombet9598f102016-04-22 18:09:14 +0000326 MachineOperand *MOSrc1 = getSrcFromCopy(&*Def, MRI, SubReg1);
Tim Northover3b0846e2014-05-24 12:50:23 +0000327 // If there are no other users of the original source, we can delete
328 // that instruction.
Quentin Colombet9598f102016-04-22 18:09:14 +0000329 if (MOSrc1) {
330 Src1 = MOSrc1->getReg();
331 KillSrc1 = MOSrc1->isKill();
332 // Src0 is going to be reused, thus, it cannot be killed anymore.
333 MOSrc1->setIsKill(false);
334 if (MRI->hasOneNonDBGUse(OrigSrc1)) {
335 assert(MOSrc1 && "Can't delete copy w/o a valid original source!");
336 Def->eraseFromParent();
337 ++NumCopiesDeleted;
338 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000339 }
340 }
341 // If we weren't able to reference the original source directly, create a
342 // copy.
343 if (!Src0) {
344 SubReg0 = 0;
345 Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
Quentin Colombet9598f102016-04-22 18:09:14 +0000346 insertCopy(TII, MI, Src0, OrigSrc0, KillSrc0);
347 KillSrc0 = true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000348 }
349 if (!Src1) {
350 SubReg1 = 0;
351 Src1 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
Quentin Colombet9598f102016-04-22 18:09:14 +0000352 insertCopy(TII, MI, Src1, OrigSrc1, KillSrc1);
353 KillSrc1 = true;
Tim Northover3b0846e2014-05-24 12:50:23 +0000354 }
355
356 // Create a vreg for the destination.
357 // FIXME: No need to do this if the ultimate user expects an FPR64.
358 // Check for that and avoid the copy if possible.
359 unsigned Dst = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
360
361 // For now, all of the new instructions have the same simple three-register
362 // form, so no need to special case based on what instruction we're
363 // building.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000364 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(NewOpc), Dst)
Quentin Colombet9598f102016-04-22 18:09:14 +0000365 .addReg(Src0, getKillRegState(KillSrc0), SubReg0)
366 .addReg(Src1, getKillRegState(KillSrc1), SubReg1);
Tim Northover3b0846e2014-05-24 12:50:23 +0000367
368 // Now copy the result back out to a GPR.
369 // FIXME: Try to avoid this if all uses could actually just use the FPR64
370 // directly.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000371 insertCopy(TII, MI, MI.getOperand(0).getReg(), Dst, true);
Tim Northover3b0846e2014-05-24 12:50:23 +0000372
373 // Erase the old instruction.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000374 MI.eraseFromParent();
Tim Northover3b0846e2014-05-24 12:50:23 +0000375
376 ++NumScalarInsnsUsed;
377}
378
379// processMachineBasicBlock - Main optimzation loop.
380bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
381 bool Changed = false;
382 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) {
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000383 MachineInstr &MI = *I++;
Tim Northover3b0846e2014-05-24 12:50:23 +0000384 if (isProfitableToTransform(MI)) {
385 transformInstruction(MI);
386 Changed = true;
387 }
388 }
389 return Changed;
390}
391
392// runOnMachineFunction - Pass entry point from PassManager.
393bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
394 bool Changed = false;
395 DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
396
Andrew Kaylor1ac98bb2016-04-25 21:58:52 +0000397 if (skipFunction(*mf.getFunction()))
398 return false;
399
Tim Northover3b0846e2014-05-24 12:50:23 +0000400 MRI = &mf.getRegInfo();
Eric Christopherf761d902015-01-30 01:10:18 +0000401 TII = mf.getSubtarget().getInstrInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +0000402
403 // Just check things on a one-block-at-a-time basis.
404 for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I)
Duncan P. N. Exon Smithd3b9df02015-10-13 20:02:15 +0000405 if (processMachineBasicBlock(&*I))
Tim Northover3b0846e2014-05-24 12:50:23 +0000406 Changed = true;
407 return Changed;
408}
409
410// createAArch64AdvSIMDScalar - Factory function used by AArch64TargetMachine
411// to add the pass to the PassManager.
412FunctionPass *llvm::createAArch64AdvSIMDScalar() {
413 return new AArch64AdvSIMDScalar();
414}