blob: 22536e9762e447b7020753ca546314e72ec33f4e [file] [log] [blame]
Chris Lattner0d5644b2003-01-13 00:26:36 +00001//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
Misha Brukman10468d82005-04-21 22:55:34 +00002//
John Criswell482202a2003-10-20 19:43:21 +00003// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Misha Brukman10468d82005-04-21 22:55:34 +00007//
John Criswell482202a2003-10-20 19:43:21 +00008//===----------------------------------------------------------------------===//
Chris Lattner910b82f2002-10-28 23:55:33 +00009//
Chris Lattnerf6932b72005-01-19 06:53:34 +000010// This file implements the TargetInstrInfo class.
Chris Lattner910b82f2002-10-28 23:55:33 +000011//
12//===----------------------------------------------------------------------===//
13
Eric Christopher4fdc7652014-06-11 16:59:33 +000014#include "llvm/Target/TargetInstrInfo.h"
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +000015#include "llvm/CodeGen/MachineFrameInfo.h"
Lang Hames39609992013-11-29 03:07:54 +000016#include "llvm/CodeGen/MachineInstrBuilder.h"
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +000017#include "llvm/CodeGen/MachineMemOperand.h"
18#include "llvm/CodeGen/MachineRegisterInfo.h"
19#include "llvm/CodeGen/PseudoSourceValue.h"
20#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
Lang Hames39609992013-11-29 03:07:54 +000021#include "llvm/CodeGen/StackMaps.h"
Andrew Trick10d5be42013-11-17 01:36:23 +000022#include "llvm/IR/DataLayout.h"
Evan Cheng49d4c0b2010-10-06 06:27:31 +000023#include "llvm/MC/MCAsmInfo.h"
Evan Cheng8264e272011-06-29 01:14:12 +000024#include "llvm/MC/MCInstrItineraries.h"
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +000025#include "llvm/Support/CommandLine.h"
Chris Lattner01614192009-08-02 04:58:19 +000026#include "llvm/Support/ErrorHandling.h"
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +000027#include "llvm/Support/raw_ostream.h"
Michael Kuperstein698ea3b2015-01-08 11:59:43 +000028#include "llvm/Target/TargetFrameLowering.h"
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +000029#include "llvm/Target/TargetLowering.h"
30#include "llvm/Target/TargetMachine.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000031#include "llvm/Target/TargetRegisterInfo.h"
Nick Lewycky0de20af2010-12-19 20:43:38 +000032#include <cctype>
Chris Lattnerf6932b72005-01-19 06:53:34 +000033using namespace llvm;
Chris Lattner910b82f2002-10-28 23:55:33 +000034
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +000035static cl::opt<bool> DisableHazardRecognizer(
36 "disable-sched-hazard", cl::Hidden, cl::init(false),
37 cl::desc("Disable hazard detection during preRA scheduling"));
Chris Lattnere98a3c32009-08-02 05:20:37 +000038
Chris Lattner0d5644b2003-01-13 00:26:36 +000039TargetInstrInfo::~TargetInstrInfo() {
Chris Lattner910b82f2002-10-28 23:55:33 +000040}
41
Evan Cheng8d71a752011-06-27 21:26:13 +000042const TargetRegisterClass*
Evan Cheng6cc775f2011-06-28 19:10:37 +000043TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
Jakob Stoklund Olesen3c52f022012-05-07 22:10:26 +000044 const TargetRegisterInfo *TRI,
45 const MachineFunction &MF) const {
Evan Cheng6cc775f2011-06-28 19:10:37 +000046 if (OpNum >= MCID.getNumOperands())
Craig Topperc0196b12014-04-14 00:51:57 +000047 return nullptr;
Evan Cheng8d71a752011-06-27 21:26:13 +000048
Evan Cheng6cc775f2011-06-28 19:10:37 +000049 short RegClass = MCID.OpInfo[OpNum].RegClass;
50 if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
Jakob Stoklund Olesen3c52f022012-05-07 22:10:26 +000051 return TRI->getPointerRegClass(MF, RegClass);
Evan Cheng8d71a752011-06-27 21:26:13 +000052
53 // Instructions like INSERT_SUBREG do not have fixed register classes.
54 if (RegClass < 0)
Craig Topperc0196b12014-04-14 00:51:57 +000055 return nullptr;
Evan Cheng8d71a752011-06-27 21:26:13 +000056
57 // Otherwise just look it up normally.
58 return TRI->getRegClass(RegClass);
59}
60
Chris Lattner01614192009-08-02 04:58:19 +000061/// insertNoop - Insert a noop into the instruction stream at the specified
62/// point.
Andrew Trickc416ba62010-12-24 04:28:06 +000063void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
Chris Lattner01614192009-08-02 04:58:19 +000064 MachineBasicBlock::iterator MI) const {
65 llvm_unreachable("Target didn't implement insertNoop!");
66}
67
Chris Lattnere98a3c32009-08-02 05:20:37 +000068/// Measure the specified inline asm to determine an approximation of its
69/// length.
Jim Grosbacha3df87f2011-03-24 18:46:34 +000070/// Comments (which run till the next SeparatorString or newline) do not
Chris Lattnere98a3c32009-08-02 05:20:37 +000071/// count as an instruction.
72/// Any other non-whitespace text is considered an instruction, with
Jim Grosbacha3df87f2011-03-24 18:46:34 +000073/// multiple instructions separated by SeparatorString or newlines.
Chris Lattnere98a3c32009-08-02 05:20:37 +000074/// Variable-length instructions are not handled here; this function
75/// may be overloaded in the target code to do that.
76unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
Chris Lattnere9a75a62009-08-22 21:43:10 +000077 const MCAsmInfo &MAI) const {
Andrew Trickc416ba62010-12-24 04:28:06 +000078
79
Chris Lattnere98a3c32009-08-02 05:20:37 +000080 // Count the number of instructions in the asm.
81 bool atInsnStart = true;
82 unsigned Length = 0;
83 for (; *Str; ++Str) {
Jim Grosbacha3df87f2011-03-24 18:46:34 +000084 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
85 strlen(MAI.getSeparatorString())) == 0)
Chris Lattnere98a3c32009-08-02 05:20:37 +000086 atInsnStart = true;
Guy Benyei83c74e92013-02-12 21:21:59 +000087 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
Chris Lattnere9a75a62009-08-22 21:43:10 +000088 Length += MAI.getMaxInstLength();
Chris Lattnere98a3c32009-08-02 05:20:37 +000089 atInsnStart = false;
90 }
Chris Lattnere9a75a62009-08-22 21:43:10 +000091 if (atInsnStart && strncmp(Str, MAI.getCommentString(),
92 strlen(MAI.getCommentString())) == 0)
Chris Lattnere98a3c32009-08-02 05:20:37 +000093 atInsnStart = false;
94 }
Andrew Trickc416ba62010-12-24 04:28:06 +000095
Chris Lattnere98a3c32009-08-02 05:20:37 +000096 return Length;
97}
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +000098
99/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
100/// after it, replacing it with an unconditional branch to NewDest.
101void
102TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
103 MachineBasicBlock *NewDest) const {
104 MachineBasicBlock *MBB = Tail->getParent();
105
106 // Remove all the old successors of MBB from the CFG.
107 while (!MBB->succ_empty())
108 MBB->removeSuccessor(MBB->succ_begin());
109
110 // Remove all the dead instructions from the end of MBB.
111 MBB->erase(Tail, MBB->end());
112
113 // If MBB isn't immediately before MBB, insert a branch to it.
114 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
Craig Topperc0196b12014-04-14 00:51:57 +0000115 InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(),
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000116 Tail->getDebugLoc());
117 MBB->addSuccessor(NewDest);
118}
119
120// commuteInstruction - The default implementation of this method just exchanges
121// the two operands returned by findCommutedOpIndices.
122MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
123 bool NewMI) const {
124 const MCInstrDesc &MCID = MI->getDesc();
125 bool HasDef = MCID.getNumDefs();
126 if (HasDef && !MI->getOperand(0).isReg())
127 // No idea how to commute this instruction. Target should implement its own.
Craig Topperc0196b12014-04-14 00:51:57 +0000128 return nullptr;
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000129 unsigned Idx1, Idx2;
130 if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
Quentin Colombet2eb151e2014-05-08 23:12:27 +0000131 assert(MI->isCommutable() && "Precondition violation: MI must be commutable.");
132 return nullptr;
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000133 }
134
135 assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
136 "This only knows how to commute register operands so far");
137 unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
138 unsigned Reg1 = MI->getOperand(Idx1).getReg();
139 unsigned Reg2 = MI->getOperand(Idx2).getReg();
140 unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
141 unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
142 unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
143 bool Reg1IsKill = MI->getOperand(Idx1).isKill();
144 bool Reg2IsKill = MI->getOperand(Idx2).isKill();
145 // If destination is tied to either of the commuted source register, then
146 // it must be updated.
147 if (HasDef && Reg0 == Reg1 &&
148 MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
149 Reg2IsKill = false;
150 Reg0 = Reg2;
151 SubReg0 = SubReg2;
152 } else if (HasDef && Reg0 == Reg2 &&
153 MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
154 Reg1IsKill = false;
155 Reg0 = Reg1;
156 SubReg0 = SubReg1;
157 }
158
159 if (NewMI) {
160 // Create a new instruction.
161 MachineFunction &MF = *MI->getParent()->getParent();
162 MI = MF.CloneMachineInstr(MI);
163 }
164
165 if (HasDef) {
166 MI->getOperand(0).setReg(Reg0);
167 MI->getOperand(0).setSubReg(SubReg0);
168 }
169 MI->getOperand(Idx2).setReg(Reg1);
170 MI->getOperand(Idx1).setReg(Reg2);
171 MI->getOperand(Idx2).setSubReg(SubReg1);
172 MI->getOperand(Idx1).setSubReg(SubReg2);
173 MI->getOperand(Idx2).setIsKill(Reg1IsKill);
174 MI->getOperand(Idx1).setIsKill(Reg2IsKill);
175 return MI;
176}
177
178/// findCommutedOpIndices - If specified MI is commutable, return the two
179/// operand indices that would swap value. Return true if the instruction
180/// is not in a form which this routine understands.
181bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
182 unsigned &SrcOpIdx1,
183 unsigned &SrcOpIdx2) const {
184 assert(!MI->isBundle() &&
185 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
186
187 const MCInstrDesc &MCID = MI->getDesc();
188 if (!MCID.isCommutable())
189 return false;
190 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
191 // is not true, then the target must implement this.
192 SrcOpIdx1 = MCID.getNumDefs();
193 SrcOpIdx2 = SrcOpIdx1 + 1;
194 if (!MI->getOperand(SrcOpIdx1).isReg() ||
195 !MI->getOperand(SrcOpIdx2).isReg())
196 // No idea.
197 return false;
198 return true;
199}
200
201
202bool
203TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
204 if (!MI->isTerminator()) return false;
205
206 // Conditional branch is a special case.
207 if (MI->isBranch() && !MI->isBarrier())
208 return true;
209 if (!MI->isPredicable())
210 return true;
211 return !isPredicated(MI);
212}
213
214
215bool TargetInstrInfo::PredicateInstruction(MachineInstr *MI,
216 const SmallVectorImpl<MachineOperand> &Pred) const {
217 bool MadeChange = false;
218
219 assert(!MI->isBundle() &&
220 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
221
222 const MCInstrDesc &MCID = MI->getDesc();
223 if (!MI->isPredicable())
224 return false;
225
226 for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
227 if (MCID.OpInfo[i].isPredicate()) {
228 MachineOperand &MO = MI->getOperand(i);
229 if (MO.isReg()) {
230 MO.setReg(Pred[j].getReg());
231 MadeChange = true;
232 } else if (MO.isImm()) {
233 MO.setImm(Pred[j].getImm());
234 MadeChange = true;
235 } else if (MO.isMBB()) {
236 MO.setMBB(Pred[j].getMBB());
237 MadeChange = true;
238 }
239 ++j;
240 }
241 }
242 return MadeChange;
243}
244
245bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
246 const MachineMemOperand *&MMO,
247 int &FrameIndex) const {
248 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
249 oe = MI->memoperands_end();
250 o != oe;
251 ++o) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000252 if ((*o)->isLoad()) {
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000253 if (const FixedStackPseudoSourceValue *Value =
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000254 dyn_cast_or_null<FixedStackPseudoSourceValue>(
255 (*o)->getPseudoValue())) {
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000256 FrameIndex = Value->getFrameIndex();
257 MMO = *o;
258 return true;
259 }
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000260 }
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000261 }
262 return false;
263}
264
265bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
266 const MachineMemOperand *&MMO,
267 int &FrameIndex) const {
268 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
269 oe = MI->memoperands_end();
270 o != oe;
271 ++o) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000272 if ((*o)->isStore()) {
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000273 if (const FixedStackPseudoSourceValue *Value =
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000274 dyn_cast_or_null<FixedStackPseudoSourceValue>(
275 (*o)->getPseudoValue())) {
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000276 FrameIndex = Value->getFrameIndex();
277 MMO = *o;
278 return true;
279 }
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000280 }
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000281 }
282 return false;
283}
284
Andrew Trick10d5be42013-11-17 01:36:23 +0000285bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
286 unsigned SubIdx, unsigned &Size,
287 unsigned &Offset,
288 const TargetMachine *TM) const {
289 if (!SubIdx) {
290 Size = RC->getSize();
291 Offset = 0;
292 return true;
293 }
Eric Christopherd9134482014-08-04 21:25:23 +0000294 unsigned BitSize =
295 TM->getSubtargetImpl()->getRegisterInfo()->getSubRegIdxSize(SubIdx);
Andrew Trick10d5be42013-11-17 01:36:23 +0000296 // Convert bit size to byte size to be consistent with
297 // MCRegisterClass::getSize().
298 if (BitSize % 8)
299 return false;
300
Eric Christopherd9134482014-08-04 21:25:23 +0000301 int BitOffset =
302 TM->getSubtargetImpl()->getRegisterInfo()->getSubRegIdxOffset(SubIdx);
Andrew Trick10d5be42013-11-17 01:36:23 +0000303 if (BitOffset < 0 || BitOffset % 8)
304 return false;
305
306 Size = BitSize /= 8;
307 Offset = (unsigned)BitOffset / 8;
308
309 assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
310
Eric Christopher8b770652015-01-26 19:03:15 +0000311 if (!TM->getDataLayout()->isLittleEndian()) {
Andrew Trick10d5be42013-11-17 01:36:23 +0000312 Offset = RC->getSize() - (Offset + Size);
313 }
314 return true;
315}
316
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000317void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
318 MachineBasicBlock::iterator I,
319 unsigned DestReg,
320 unsigned SubIdx,
321 const MachineInstr *Orig,
322 const TargetRegisterInfo &TRI) const {
323 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
324 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
325 MBB.insert(I, MI);
326}
327
328bool
329TargetInstrInfo::produceSameValue(const MachineInstr *MI0,
330 const MachineInstr *MI1,
331 const MachineRegisterInfo *MRI) const {
332 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
333}
334
335MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig,
336 MachineFunction &MF) const {
337 assert(!Orig->isNotDuplicable() &&
338 "Instruction cannot be duplicated");
339 return MF.CloneMachineInstr(Orig);
340}
341
342// If the COPY instruction in MI can be folded to a stack operation, return
343// the register class to use.
344static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
345 unsigned FoldIdx) {
346 assert(MI->isCopy() && "MI must be a COPY instruction");
347 if (MI->getNumOperands() != 2)
Craig Topperc0196b12014-04-14 00:51:57 +0000348 return nullptr;
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000349 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
350
351 const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
352 const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
353
354 if (FoldOp.getSubReg() || LiveOp.getSubReg())
Craig Topperc0196b12014-04-14 00:51:57 +0000355 return nullptr;
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000356
357 unsigned FoldReg = FoldOp.getReg();
358 unsigned LiveReg = LiveOp.getReg();
359
360 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
361 "Cannot fold physregs");
362
363 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
364 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
365
366 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
Craig Topperc0196b12014-04-14 00:51:57 +0000367 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000368
369 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
370 return RC;
371
372 // FIXME: Allow folding when register classes are memory compatible.
Craig Topperc0196b12014-04-14 00:51:57 +0000373 return nullptr;
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000374}
375
Rafael Espindola6865d6f2014-09-15 18:32:58 +0000376void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
377 llvm_unreachable("Not a MachO target");
378}
379
Benjamin Kramerf1362f62015-02-28 12:04:00 +0000380bool TargetInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
381 ArrayRef<unsigned> Ops) const {
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000382 return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
383}
384
Benjamin Kramerf1362f62015-02-28 12:04:00 +0000385static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
386 ArrayRef<unsigned> Ops, int FrameIndex,
Lang Hames39609992013-11-29 03:07:54 +0000387 const TargetInstrInfo &TII) {
388 unsigned StartIdx = 0;
389 switch (MI->getOpcode()) {
390 case TargetOpcode::STACKMAP:
391 StartIdx = 2; // Skip ID, nShadowBytes.
392 break;
393 case TargetOpcode::PATCHPOINT: {
394 // For PatchPoint, the call args are not foldable.
395 PatchPointOpers opers(MI);
396 StartIdx = opers.getVarIdx();
397 break;
398 }
399 default:
400 llvm_unreachable("unexpected stackmap opcode");
401 }
402
403 // Return false if any operands requested for folding are not foldable (not
404 // part of the stackmap's live values).
Benjamin Kramerf1362f62015-02-28 12:04:00 +0000405 for (unsigned Op : Ops) {
406 if (Op < StartIdx)
Craig Topperc0196b12014-04-14 00:51:57 +0000407 return nullptr;
Lang Hames39609992013-11-29 03:07:54 +0000408 }
409
410 MachineInstr *NewMI =
411 MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
412 MachineInstrBuilder MIB(MF, NewMI);
413
414 // No need to fold return, the meta data, and function arguments
415 for (unsigned i = 0; i < StartIdx; ++i)
416 MIB.addOperand(MI->getOperand(i));
417
418 for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
419 MachineOperand &MO = MI->getOperand(i);
420 if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
421 unsigned SpillSize;
422 unsigned SpillOffset;
423 // Compute the spill slot size and offset.
424 const TargetRegisterClass *RC =
425 MF.getRegInfo().getRegClass(MO.getReg());
426 bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize,
427 SpillOffset, &MF.getTarget());
428 if (!Valid)
429 report_fatal_error("cannot spill patchpoint subregister operand");
430 MIB.addImm(StackMaps::IndirectMemRefOp);
431 MIB.addImm(SpillSize);
432 MIB.addFrameIndex(FrameIndex);
Lang Hames2ce64a72013-12-07 03:30:59 +0000433 MIB.addImm(SpillOffset);
Lang Hames39609992013-11-29 03:07:54 +0000434 }
435 else
436 MIB.addOperand(MO);
437 }
438 return NewMI;
439}
440
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000441/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
442/// slot into the specified machine instruction for the specified operand(s).
443/// If this is possible, a new instruction is returned with the specified
444/// operand folded, otherwise NULL is returned. The client is responsible for
445/// removing the old instruction and adding the new one in the instruction
446/// stream.
Benjamin Kramerf1362f62015-02-28 12:04:00 +0000447MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
448 ArrayRef<unsigned> Ops,
449 int FI) const {
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000450 unsigned Flags = 0;
451 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
452 if (MI->getOperand(Ops[i]).isDef())
453 Flags |= MachineMemOperand::MOStore;
454 else
455 Flags |= MachineMemOperand::MOLoad;
456
457 MachineBasicBlock *MBB = MI->getParent();
458 assert(MBB && "foldMemoryOperand needs an inserted instruction");
459 MachineFunction &MF = *MBB->getParent();
460
Craig Topperc0196b12014-04-14 00:51:57 +0000461 MachineInstr *NewMI = nullptr;
Lang Hames39609992013-11-29 03:07:54 +0000462
463 if (MI->getOpcode() == TargetOpcode::STACKMAP ||
464 MI->getOpcode() == TargetOpcode::PATCHPOINT) {
465 // Fold stackmap/patchpoint.
466 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
467 } else {
468 // Ask the target to do the actual folding.
469 NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI);
470 }
471
472 if (NewMI) {
Andrew Tricka9f4d922013-11-14 23:45:04 +0000473 NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000474 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
475 assert((!(Flags & MachineMemOperand::MOStore) ||
476 NewMI->mayStore()) &&
477 "Folded a def to a non-store!");
478 assert((!(Flags & MachineMemOperand::MOLoad) ||
479 NewMI->mayLoad()) &&
480 "Folded a use to a non-load!");
481 const MachineFrameInfo &MFI = *MF.getFrameInfo();
482 assert(MFI.getObjectOffset(FI) != -1);
483 MachineMemOperand *MMO =
484 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
485 Flags, MFI.getObjectSize(FI),
486 MFI.getObjectAlignment(FI));
487 NewMI->addMemOperand(MF, MMO);
488
489 // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
490 return MBB->insert(MI, NewMI);
491 }
492
493 // Straight COPY may fold as load/store.
494 if (!MI->isCopy() || Ops.size() != 1)
Craig Topperc0196b12014-04-14 00:51:57 +0000495 return nullptr;
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000496
497 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
498 if (!RC)
Craig Topperc0196b12014-04-14 00:51:57 +0000499 return nullptr;
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000500
501 const MachineOperand &MO = MI->getOperand(1-Ops[0]);
502 MachineBasicBlock::iterator Pos = MI;
Eric Christopherfc6de422014-08-05 02:39:49 +0000503 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000504
505 if (Flags == MachineMemOperand::MOStore)
506 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
507 else
508 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
509 return --Pos;
510}
511
512/// foldMemoryOperand - Same as the previous version except it allows folding
513/// of any load and store from / to any address, not just from a specific
514/// stack slot.
Benjamin Kramerf1362f62015-02-28 12:04:00 +0000515MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
516 ArrayRef<unsigned> Ops,
517 MachineInstr *LoadMI) const {
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000518 assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
519#ifndef NDEBUG
520 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
521 assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
522#endif
523 MachineBasicBlock &MBB = *MI->getParent();
524 MachineFunction &MF = *MBB.getParent();
525
526 // Ask the target to do the actual folding.
Craig Topperc0196b12014-04-14 00:51:57 +0000527 MachineInstr *NewMI = nullptr;
Lang Hames39609992013-11-29 03:07:54 +0000528 int FrameIndex = 0;
529
530 if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
531 MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
532 isLoadFromStackSlot(LoadMI, FrameIndex)) {
533 // Fold stackmap/patchpoint.
534 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
535 } else {
536 // Ask the target to do the actual folding.
Lang Hames8e6e6ab2014-01-02 19:38:41 +0000537 NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
Lang Hames39609992013-11-29 03:07:54 +0000538 }
Lang Hames39609992013-11-29 03:07:54 +0000539
Craig Topperc0196b12014-04-14 00:51:57 +0000540 if (!NewMI) return nullptr;
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000541
542 NewMI = MBB.insert(MI, NewMI);
543
544 // Copy the memoperands from the load to the folded instruction.
Andrew Tricka9f4d922013-11-14 23:45:04 +0000545 if (MI->memoperands_empty()) {
546 NewMI->setMemRefs(LoadMI->memoperands_begin(),
547 LoadMI->memoperands_end());
548 }
549 else {
550 // Handle the rare case of folding multiple loads.
551 NewMI->setMemRefs(MI->memoperands_begin(),
552 MI->memoperands_end());
553 for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(),
554 E = LoadMI->memoperands_end(); I != E; ++I) {
555 NewMI->addMemOperand(MF, *I);
556 }
557 }
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000558 return NewMI;
559}
560
561bool TargetInstrInfo::
562isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
563 AliasAnalysis *AA) const {
564 const MachineFunction &MF = *MI->getParent()->getParent();
565 const MachineRegisterInfo &MRI = MF.getRegInfo();
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000566
567 // Remat clients assume operand 0 is the defined register.
568 if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
569 return false;
570 unsigned DefReg = MI->getOperand(0).getReg();
571
572 // A sub-register definition can only be rematerialized if the instruction
573 // doesn't read the other parts of the register. Otherwise it is really a
574 // read-modify-write operation on the full virtual register which cannot be
575 // moved safely.
576 if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
577 MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
578 return false;
579
580 // A load from a fixed stack slot can be rematerialized. This may be
581 // redundant with subsequent checks, but it's target-independent,
582 // simple, and a common case.
583 int FrameIdx = 0;
Eric Christopher9d916792014-07-23 22:12:03 +0000584 if (isLoadFromStackSlot(MI, FrameIdx) &&
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000585 MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
586 return true;
587
588 // Avoid instructions obviously unsafe for remat.
589 if (MI->isNotDuplicable() || MI->mayStore() ||
590 MI->hasUnmodeledSideEffects())
591 return false;
592
593 // Don't remat inline asm. We have no idea how expensive it is
594 // even if it's side effect free.
595 if (MI->isInlineAsm())
596 return false;
597
598 // Avoid instructions which load from potentially varying memory.
599 if (MI->mayLoad() && !MI->isInvariantLoad(AA))
600 return false;
601
602 // If any of the registers accessed are non-constant, conservatively assume
603 // the instruction is not rematerializable.
604 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
605 const MachineOperand &MO = MI->getOperand(i);
606 if (!MO.isReg()) continue;
607 unsigned Reg = MO.getReg();
608 if (Reg == 0)
609 continue;
610
611 // Check for a well-behaved physical register.
612 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
613 if (MO.isUse()) {
614 // If the physreg has no defs anywhere, it's just an ambient register
615 // and we can freely move its uses. Alternatively, if it's allocatable,
616 // it could get allocated to something with a def during allocation.
617 if (!MRI.isConstantPhysReg(Reg, MF))
618 return false;
619 } else {
620 // A physreg def. We can't remat it.
621 return false;
622 }
623 continue;
624 }
625
626 // Only allow one virtual-register def. There may be multiple defs of the
627 // same virtual register, though.
628 if (MO.isDef() && Reg != DefReg)
629 return false;
630
631 // Don't allow any virtual-register uses. Rematting an instruction with
632 // virtual register uses would length the live ranges of the uses, which
633 // is not necessarily a good idea, certainly not "trivial".
634 if (MO.isUse())
635 return false;
636 }
637
638 // Everything checked out.
639 return true;
640}
641
Michael Kuperstein8c65e312015-01-08 11:04:38 +0000642int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const {
643 const MachineFunction *MF = MI->getParent()->getParent();
644 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
645 bool StackGrowsDown =
646 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
647
648 int FrameSetupOpcode = getCallFrameSetupOpcode();
649 int FrameDestroyOpcode = getCallFrameDestroyOpcode();
650
651 if (MI->getOpcode() != FrameSetupOpcode &&
652 MI->getOpcode() != FrameDestroyOpcode)
653 return 0;
654
655 int SPAdj = MI->getOperand(0).getImm();
656
657 if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) ||
658 (StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode))
659 SPAdj = -SPAdj;
660
661 return SPAdj;
662}
663
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000664/// isSchedulingBoundary - Test if the given instruction should be
665/// considered a scheduling boundary. This primarily includes labels
666/// and terminators.
667bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
668 const MachineBasicBlock *MBB,
669 const MachineFunction &MF) const {
670 // Terminators and labels can't be scheduled around.
Rafael Espindolab1f25f12014-03-07 06:08:31 +0000671 if (MI->isTerminator() || MI->isPosition())
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000672 return true;
673
674 // Don't attempt to schedule around any instruction that defines
675 // a stack-oriented pointer, as it's unlikely to be profitable. This
676 // saves compile time, because it doesn't require every single
677 // stack slot reference to depend on the instruction that does the
678 // modification.
Eric Christopherfc6de422014-08-05 02:39:49 +0000679 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
680 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000681 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI))
682 return true;
683
684 return false;
685}
686
687// Provide a global flag for disabling the PreRA hazard recognizer that targets
688// may choose to honor.
689bool TargetInstrInfo::usePreRAHazardRecognizer() const {
690 return !DisableHazardRecognizer;
691}
692
693// Default implementation of CreateTargetRAHazardRecognizer.
694ScheduleHazardRecognizer *TargetInstrInfo::
Eric Christopherf047bfd2014-06-13 22:38:52 +0000695CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000696 const ScheduleDAG *DAG) const {
697 // Dummy hazard recognizer allows all instructions to issue.
698 return new ScheduleHazardRecognizer();
699}
700
701// Default implementation of CreateTargetMIHazardRecognizer.
702ScheduleHazardRecognizer *TargetInstrInfo::
703CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
704 const ScheduleDAG *DAG) const {
705 return (ScheduleHazardRecognizer *)
706 new ScoreboardHazardRecognizer(II, DAG, "misched");
707}
708
709// Default implementation of CreateTargetPostRAHazardRecognizer.
710ScheduleHazardRecognizer *TargetInstrInfo::
711CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
712 const ScheduleDAG *DAG) const {
713 return (ScheduleHazardRecognizer *)
714 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
715}
716
717//===----------------------------------------------------------------------===//
718// SelectionDAG latency interface.
719//===----------------------------------------------------------------------===//
720
721int
722TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
723 SDNode *DefNode, unsigned DefIdx,
724 SDNode *UseNode, unsigned UseIdx) const {
725 if (!ItinData || ItinData->isEmpty())
726 return -1;
727
728 if (!DefNode->isMachineOpcode())
729 return -1;
730
731 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
732 if (!UseNode->isMachineOpcode())
733 return ItinData->getOperandCycle(DefClass, DefIdx);
734 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
735 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
736}
737
738int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
739 SDNode *N) const {
740 if (!ItinData || ItinData->isEmpty())
741 return 1;
742
743 if (!N->isMachineOpcode())
744 return 1;
745
746 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
747}
748
749//===----------------------------------------------------------------------===//
750// MachineInstr latency interface.
751//===----------------------------------------------------------------------===//
752
753unsigned
754TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
755 const MachineInstr *MI) const {
756 if (!ItinData || ItinData->isEmpty())
757 return 1;
758
759 unsigned Class = MI->getDesc().getSchedClass();
760 int UOps = ItinData->Itineraries[Class].NumMicroOps;
761 if (UOps >= 0)
762 return UOps;
763
764 // The # of u-ops is dynamically determined. The specific target should
765 // override this function to return the right number.
766 return 1;
767}
768
769/// Return the default expected latency for a def based on it's opcode.
Pete Cooper11759452014-09-02 17:43:54 +0000770unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000771 const MachineInstr *DefMI) const {
772 if (DefMI->isTransient())
773 return 0;
774 if (DefMI->mayLoad())
Pete Cooper11759452014-09-02 17:43:54 +0000775 return SchedModel.LoadLatency;
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000776 if (isHighLatencyDef(DefMI->getOpcode()))
Pete Cooper11759452014-09-02 17:43:54 +0000777 return SchedModel.HighLatency;
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000778 return 1;
779}
780
Arnold Schwaighoferd2f96b92013-09-30 15:28:56 +0000781unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const {
782 return 0;
783}
784
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000785unsigned TargetInstrInfo::
786getInstrLatency(const InstrItineraryData *ItinData,
787 const MachineInstr *MI,
788 unsigned *PredCost) const {
789 // Default to one cycle for no itinerary. However, an "empty" itinerary may
790 // still have a MinLatency property, which getStageLatency checks.
791 if (!ItinData)
792 return MI->mayLoad() ? 2 : 1;
793
794 return ItinData->getStageLatency(MI->getDesc().getSchedClass());
795}
796
797bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
798 const MachineInstr *DefMI,
799 unsigned DefIdx) const {
800 if (!ItinData || ItinData->isEmpty())
801 return false;
802
803 unsigned DefClass = DefMI->getDesc().getSchedClass();
804 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
805 return (DefCycle != -1 && DefCycle <= 1);
806}
807
808/// Both DefMI and UseMI must be valid. By default, call directly to the
809/// itinerary. This may be overriden by the target.
810int TargetInstrInfo::
811getOperandLatency(const InstrItineraryData *ItinData,
812 const MachineInstr *DefMI, unsigned DefIdx,
813 const MachineInstr *UseMI, unsigned UseIdx) const {
814 unsigned DefClass = DefMI->getDesc().getSchedClass();
815 unsigned UseClass = UseMI->getDesc().getSchedClass();
816 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
817}
818
819/// If we can determine the operand latency from the def only, without itinerary
820/// lookup, do so. Otherwise return -1.
821int TargetInstrInfo::computeDefOperandLatency(
822 const InstrItineraryData *ItinData,
Andrew Trickde2109e2013-06-15 04:49:57 +0000823 const MachineInstr *DefMI) const {
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000824
825 // Let the target hook getInstrLatency handle missing itineraries.
826 if (!ItinData)
827 return getInstrLatency(ItinData, DefMI);
828
Andrew Trickde2109e2013-06-15 04:49:57 +0000829 if(ItinData->isEmpty())
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000830 return defaultDefLatency(ItinData->SchedModel, DefMI);
831
832 // ...operand lookup required
833 return -1;
834}
835
836/// computeOperandLatency - Compute and return the latency of the given data
837/// dependent def and use when the operand indices are already known. UseMI may
838/// be NULL for an unknown use.
839///
840/// FindMin may be set to get the minimum vs. expected latency. Minimum
841/// latency is used for scheduling groups, while expected latency is for
842/// instruction cost and critical path.
843///
844/// Depending on the subtarget's itinerary properties, this may or may not need
845/// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
846/// UseIdx to compute min latency.
847unsigned TargetInstrInfo::
848computeOperandLatency(const InstrItineraryData *ItinData,
849 const MachineInstr *DefMI, unsigned DefIdx,
Andrew Trickde2109e2013-06-15 04:49:57 +0000850 const MachineInstr *UseMI, unsigned UseIdx) const {
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000851
Andrew Trickde2109e2013-06-15 04:49:57 +0000852 int DefLatency = computeDefOperandLatency(ItinData, DefMI);
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000853 if (DefLatency >= 0)
854 return DefLatency;
855
856 assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
857
858 int OperLatency = 0;
859 if (UseMI)
860 OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
861 else {
862 unsigned DefClass = DefMI->getDesc().getSchedClass();
863 OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
864 }
865 if (OperLatency >= 0)
866 return OperLatency;
867
868 // No operand latency was found.
869 unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
870
871 // Expected latency is the max of the stage latency and itinerary props.
Andrew Trickde2109e2013-06-15 04:49:57 +0000872 InstrLatency = std::max(InstrLatency,
873 defaultDefLatency(ItinData->SchedModel, DefMI));
Jakob Stoklund Olesenc351aed2012-11-28 02:35:13 +0000874 return InstrLatency;
875}
Quentin Colombetd533cdf2014-08-11 22:17:14 +0000876
877bool TargetInstrInfo::getRegSequenceInputs(
878 const MachineInstr &MI, unsigned DefIdx,
879 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
Quentin Colombet8427df92014-08-12 17:11:26 +0000880 assert((MI.isRegSequence() ||
881 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
Quentin Colombetd533cdf2014-08-11 22:17:14 +0000882
883 if (!MI.isRegSequence())
884 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
885
886 // We are looking at:
887 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
888 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
889 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
890 OpIdx += 2) {
891 const MachineOperand &MOReg = MI.getOperand(OpIdx);
892 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
893 assert(MOSubIdx.isImm() &&
894 "One of the subindex of the reg_sequence is not an immediate");
895 // Record Reg:SubReg, SubIdx.
896 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
897 (unsigned)MOSubIdx.getImm()));
898 }
899 return true;
900}
Quentin Colombet7e75cba2014-08-20 21:51:26 +0000901
902bool TargetInstrInfo::getExtractSubregInputs(
903 const MachineInstr &MI, unsigned DefIdx,
904 RegSubRegPairAndIdx &InputReg) const {
905 assert((MI.isExtractSubreg() ||
906 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
907
908 if (!MI.isExtractSubreg())
909 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
910
911 // We are looking at:
912 // Def = EXTRACT_SUBREG v0.sub1, sub0.
913 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
914 const MachineOperand &MOReg = MI.getOperand(1);
915 const MachineOperand &MOSubIdx = MI.getOperand(2);
916 assert(MOSubIdx.isImm() &&
917 "The subindex of the extract_subreg is not an immediate");
918
919 InputReg.Reg = MOReg.getReg();
920 InputReg.SubReg = MOReg.getSubReg();
921 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
922 return true;
923}
Quentin Colombet7e3da662014-08-20 23:49:36 +0000924
925bool TargetInstrInfo::getInsertSubregInputs(
926 const MachineInstr &MI, unsigned DefIdx,
927 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
928 assert((MI.isInsertSubreg() ||
929 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
930
931 if (!MI.isInsertSubreg())
932 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
933
934 // We are looking at:
935 // Def = INSERT_SEQUENCE v0, v1, sub0.
936 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
937 const MachineOperand &MOBaseReg = MI.getOperand(1);
938 const MachineOperand &MOInsertedReg = MI.getOperand(2);
939 const MachineOperand &MOSubIdx = MI.getOperand(3);
940 assert(MOSubIdx.isImm() &&
941 "One of the subindex of the reg_sequence is not an immediate");
942 BaseReg.Reg = MOBaseReg.getReg();
943 BaseReg.SubReg = MOBaseReg.getSubReg();
944
945 InsertedReg.Reg = MOInsertedReg.getReg();
946 InsertedReg.SubReg = MOInsertedReg.getSubReg();
947 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
948 return true;
949}