blob: a1ea692d3bf8da7adeb1c4954f1a3d2bf7df5c17 [file] [log] [blame]
David Goodwin334c2642009-07-08 16:09:28 +00001//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Base ARM implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARMBaseInstrInfo.h"
15#include "ARM.h"
16#include "ARMAddressingModes.h"
17#include "ARMGenInstrInfo.inc"
18#include "ARMMachineFunctionInfo.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/CodeGen/LiveVariables.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineJumpTableInfo.h"
24#include "llvm/Target/TargetAsmInfo.h"
25#include "llvm/Support/CommandLine.h"
26using namespace llvm;
27
28static cl::opt<bool>
29EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
30 cl::desc("Enable ARM 2-addr to 3-addr conv"));
31
32static inline
33const MachineInstrBuilder &AddDefaultPred(const MachineInstrBuilder &MIB) {
34 return MIB.addImm((int64_t)ARMCC::AL).addReg(0);
35}
36
37static inline
38const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) {
39 return MIB.addReg(0);
40}
41
42ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget &STI)
43 : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)) {
44}
45
46MachineInstr *
47ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
48 MachineBasicBlock::iterator &MBBI,
49 LiveVariables *LV) const {
50 if (!EnableARM3Addr)
51 return NULL;
52
53 MachineInstr *MI = MBBI;
54 MachineFunction &MF = *MI->getParent()->getParent();
55 unsigned TSFlags = MI->getDesc().TSFlags;
56 bool isPre = false;
57 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
58 default: return NULL;
59 case ARMII::IndexModePre:
60 isPre = true;
61 break;
62 case ARMII::IndexModePost:
63 break;
64 }
65
66 // Try splitting an indexed load/store to an un-indexed one plus an add/sub
67 // operation.
68 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
69 if (MemOpc == 0)
70 return NULL;
71
72 MachineInstr *UpdateMI = NULL;
73 MachineInstr *MemMI = NULL;
74 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
75 const TargetInstrDesc &TID = MI->getDesc();
76 unsigned NumOps = TID.getNumOperands();
77 bool isLoad = !TID.mayStore();
78 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
79 const MachineOperand &Base = MI->getOperand(2);
80 const MachineOperand &Offset = MI->getOperand(NumOps-3);
81 unsigned WBReg = WB.getReg();
82 unsigned BaseReg = Base.getReg();
83 unsigned OffReg = Offset.getReg();
84 unsigned OffImm = MI->getOperand(NumOps-2).getImm();
85 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
86 switch (AddrMode) {
87 default:
88 assert(false && "Unknown indexed op!");
89 return NULL;
90 case ARMII::AddrMode2: {
91 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
92 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
93 if (OffReg == 0) {
94 int SOImmVal = ARM_AM::getSOImmVal(Amt);
95 if (SOImmVal == -1)
96 // Can't encode it in a so_imm operand. This transformation will
97 // add more than 1 instruction. Abandon!
98 return NULL;
99 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
100 get(isSub ? getOpcode(ARMII::SUBri) :
101 getOpcode(ARMII::ADDri)), WBReg)
102 .addReg(BaseReg).addImm(SOImmVal)
103 .addImm(Pred).addReg(0).addReg(0);
104 } else if (Amt != 0) {
105 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
106 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
107 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
108 get(isSub ? getOpcode(ARMII::SUBrs) :
109 getOpcode(ARMII::ADDrs)), WBReg)
110 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
111 .addImm(Pred).addReg(0).addReg(0);
112 } else
113 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
114 get(isSub ? getOpcode(ARMII::SUBrr) :
115 getOpcode(ARMII::ADDrr)), WBReg)
116 .addReg(BaseReg).addReg(OffReg)
117 .addImm(Pred).addReg(0).addReg(0);
118 break;
119 }
120 case ARMII::AddrMode3 : {
121 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
122 unsigned Amt = ARM_AM::getAM3Offset(OffImm);
123 if (OffReg == 0)
124 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
125 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
126 get(isSub ? getOpcode(ARMII::SUBri) :
127 getOpcode(ARMII::ADDri)), WBReg)
128 .addReg(BaseReg).addImm(Amt)
129 .addImm(Pred).addReg(0).addReg(0);
130 else
131 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
132 get(isSub ? getOpcode(ARMII::SUBrr) :
133 getOpcode(ARMII::ADDrr)), WBReg)
134 .addReg(BaseReg).addReg(OffReg)
135 .addImm(Pred).addReg(0).addReg(0);
136 break;
137 }
138 }
139
140 std::vector<MachineInstr*> NewMIs;
141 if (isPre) {
142 if (isLoad)
143 MemMI = BuildMI(MF, MI->getDebugLoc(),
144 get(MemOpc), MI->getOperand(0).getReg())
145 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
146 else
147 MemMI = BuildMI(MF, MI->getDebugLoc(),
148 get(MemOpc)).addReg(MI->getOperand(1).getReg())
149 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
150 NewMIs.push_back(MemMI);
151 NewMIs.push_back(UpdateMI);
152 } else {
153 if (isLoad)
154 MemMI = BuildMI(MF, MI->getDebugLoc(),
155 get(MemOpc), MI->getOperand(0).getReg())
156 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
157 else
158 MemMI = BuildMI(MF, MI->getDebugLoc(),
159 get(MemOpc)).addReg(MI->getOperand(1).getReg())
160 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
161 if (WB.isDead())
162 UpdateMI->getOperand(0).setIsDead();
163 NewMIs.push_back(UpdateMI);
164 NewMIs.push_back(MemMI);
165 }
166
167 // Transfer LiveVariables states, kill / dead info.
168 if (LV) {
169 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
170 MachineOperand &MO = MI->getOperand(i);
171 if (MO.isReg() && MO.getReg() &&
172 TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
173 unsigned Reg = MO.getReg();
174
175 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
176 if (MO.isDef()) {
177 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
178 if (MO.isDead())
179 LV->addVirtualRegisterDead(Reg, NewMI);
180 }
181 if (MO.isUse() && MO.isKill()) {
182 for (unsigned j = 0; j < 2; ++j) {
183 // Look at the two new MI's in reverse order.
184 MachineInstr *NewMI = NewMIs[j];
185 if (!NewMI->readsRegister(Reg))
186 continue;
187 LV->addVirtualRegisterKilled(Reg, NewMI);
188 if (VI.removeKill(MI))
189 VI.Kills.push_back(NewMI);
190 break;
191 }
192 }
193 }
194 }
195 }
196
197 MFI->insert(MBBI, NewMIs[1]);
198 MFI->insert(MBBI, NewMIs[0]);
199 return NewMIs[0];
200}
201
202// Branch analysis.
203bool
204ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
205 MachineBasicBlock *&FBB,
206 SmallVectorImpl<MachineOperand> &Cond,
207 bool AllowModify) const {
208 // If the block has no terminators, it just falls into the block after it.
209 MachineBasicBlock::iterator I = MBB.end();
210 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
211 return false;
212
213 // Get the last instruction in the block.
214 MachineInstr *LastInst = I;
215
216 // If there is only one terminator instruction, process it.
217 unsigned LastOpc = LastInst->getOpcode();
218 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
219 if (LastOpc == getOpcode(ARMII::B)) {
220 TBB = LastInst->getOperand(0).getMBB();
221 return false;
222 }
223 if (LastOpc == getOpcode(ARMII::Bcc)) {
224 // Block ends with fall-through condbranch.
225 TBB = LastInst->getOperand(0).getMBB();
226 Cond.push_back(LastInst->getOperand(1));
227 Cond.push_back(LastInst->getOperand(2));
228 return false;
229 }
230 return true; // Can't handle indirect branch.
231 }
232
233 // Get the instruction before it if it is a terminator.
234 MachineInstr *SecondLastInst = I;
235
236 // If there are three terminators, we don't know what sort of block this is.
237 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
238 return true;
239
240 // If the block ends with ARMII::B and a ARMII::Bcc, handle it.
241 unsigned SecondLastOpc = SecondLastInst->getOpcode();
242 if ((SecondLastOpc == getOpcode(ARMII::Bcc)) &&
243 (LastOpc == getOpcode(ARMII::B))) {
244 TBB = SecondLastInst->getOperand(0).getMBB();
245 Cond.push_back(SecondLastInst->getOperand(1));
246 Cond.push_back(SecondLastInst->getOperand(2));
247 FBB = LastInst->getOperand(0).getMBB();
248 return false;
249 }
250
251 // If the block ends with two unconditional branches, handle it. The second
252 // one is not executed, so remove it.
253 if ((SecondLastOpc == getOpcode(ARMII::B)) &&
254 (LastOpc == getOpcode(ARMII::B))) {
255 TBB = SecondLastInst->getOperand(0).getMBB();
256 I = LastInst;
257 if (AllowModify)
258 I->eraseFromParent();
259 return false;
260 }
261
262 // ...likewise if it ends with a branch table followed by an unconditional
263 // branch. The branch folder can create these, and we must get rid of them for
264 // correctness of Thumb constant islands.
265 if (((SecondLastOpc == getOpcode(ARMII::BR_JTr)) ||
266 (SecondLastOpc == getOpcode(ARMII::BR_JTm)) ||
267 (SecondLastOpc == getOpcode(ARMII::BR_JTadd))) &&
268 (LastOpc == getOpcode(ARMII::B))) {
269 I = LastInst;
270 if (AllowModify)
271 I->eraseFromParent();
272 return true;
273 }
274
275 // Otherwise, can't handle this.
276 return true;
277}
278
279
280unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
281 int BOpc = getOpcode(ARMII::B);
282 int BccOpc = getOpcode(ARMII::Bcc);
283
284 MachineBasicBlock::iterator I = MBB.end();
285 if (I == MBB.begin()) return 0;
286 --I;
287 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc)
288 return 0;
289
290 // Remove the branch.
291 I->eraseFromParent();
292
293 I = MBB.end();
294
295 if (I == MBB.begin()) return 1;
296 --I;
297 if (I->getOpcode() != BccOpc)
298 return 1;
299
300 // Remove the branch.
301 I->eraseFromParent();
302 return 2;
303}
304
305unsigned
306ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
307 MachineBasicBlock *FBB,
308 const SmallVectorImpl<MachineOperand> &Cond) const {
309 // FIXME this should probably have a DebugLoc argument
310 DebugLoc dl = DebugLoc::getUnknownLoc();
311 int BOpc = getOpcode(ARMII::B);
312 int BccOpc = getOpcode(ARMII::Bcc);
313
314 // Shouldn't be a fall through.
315 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
316 assert((Cond.size() == 2 || Cond.size() == 0) &&
317 "ARM branch conditions have two components!");
318
319 if (FBB == 0) {
320 if (Cond.empty()) // Unconditional branch?
321 BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
322 else
323 BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
324 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
325 return 1;
326 }
327
328 // Two-way conditional branch.
329 BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
330 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
331 BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
332 return 2;
333}
334
335bool ARMBaseInstrInfo::
336ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
337 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
338 Cond[0].setImm(ARMCC::getOppositeCondition(CC));
339 return false;
340}
341
342bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
343 int PIdx = MI->findFirstPredOperandIdx();
344 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
345}
346
347bool ARMBaseInstrInfo::
348PredicateInstruction(MachineInstr *MI,
349 const SmallVectorImpl<MachineOperand> &Pred) const {
350 unsigned Opc = MI->getOpcode();
351 if (Opc == getOpcode(ARMII::B)) {
352 MI->setDesc(get(getOpcode(ARMII::Bcc)));
353 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
354 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
355 return true;
356 }
357
358 int PIdx = MI->findFirstPredOperandIdx();
359 if (PIdx != -1) {
360 MachineOperand &PMO = MI->getOperand(PIdx);
361 PMO.setImm(Pred[0].getImm());
362 MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
363 return true;
364 }
365 return false;
366}
367
368bool ARMBaseInstrInfo::
369SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
370 const SmallVectorImpl<MachineOperand> &Pred2) const {
371 if (Pred1.size() > 2 || Pred2.size() > 2)
372 return false;
373
374 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
375 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
376 if (CC1 == CC2)
377 return true;
378
379 switch (CC1) {
380 default:
381 return false;
382 case ARMCC::AL:
383 return true;
384 case ARMCC::HS:
385 return CC2 == ARMCC::HI;
386 case ARMCC::LS:
387 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
388 case ARMCC::GE:
389 return CC2 == ARMCC::GT;
390 case ARMCC::LE:
391 return CC2 == ARMCC::LT;
392 }
393}
394
395bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
396 std::vector<MachineOperand> &Pred) const {
397 const TargetInstrDesc &TID = MI->getDesc();
398 if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
399 return false;
400
401 bool Found = false;
402 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
403 const MachineOperand &MO = MI->getOperand(i);
404 if (MO.isReg() && MO.getReg() == ARM::CPSR) {
405 Pred.push_back(MO);
406 Found = true;
407 }
408 }
409
410 return Found;
411}
412
413
414/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
415static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
416 unsigned JTI) DISABLE_INLINE;
417static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
418 unsigned JTI) {
419 return JT[JTI].MBBs.size();
420}
421
422/// GetInstSize - Return the size of the specified MachineInstr.
423///
424unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
425 const MachineBasicBlock &MBB = *MI->getParent();
426 const MachineFunction *MF = MBB.getParent();
427 const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
428
429 // Basic size info comes from the TSFlags field.
430 const TargetInstrDesc &TID = MI->getDesc();
431 unsigned TSFlags = TID.TSFlags;
432
433 switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
434 default: {
435 // If this machine instr is an inline asm, measure it.
436 if (MI->getOpcode() == ARM::INLINEASM)
437 return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
438 if (MI->isLabel())
439 return 0;
440 switch (MI->getOpcode()) {
441 default:
442 assert(0 && "Unknown or unset size field for instr!");
443 break;
444 case TargetInstrInfo::IMPLICIT_DEF:
445 case TargetInstrInfo::DECLARE:
446 case TargetInstrInfo::DBG_LABEL:
447 case TargetInstrInfo::EH_LABEL:
448 return 0;
449 }
450 break;
451 }
452 case ARMII::Size8Bytes: return 8; // Arm instruction x 2.
453 case ARMII::Size4Bytes: return 4; // Arm instruction.
454 case ARMII::Size2Bytes: return 2; // Thumb instruction.
455 case ARMII::SizeSpecial: {
456 switch (MI->getOpcode()) {
457 case ARM::CONSTPOOL_ENTRY:
458 // If this machine instr is a constant pool entry, its size is recorded as
459 // operand #2.
460 return MI->getOperand(2).getImm();
461 case ARM::Int_eh_sjlj_setjmp: return 12;
462 case ARM::BR_JTr:
463 case ARM::BR_JTm:
464 case ARM::BR_JTadd:
465 case ARM::t2BR_JTr:
466 case ARM::t2BR_JTm:
467 case ARM::t2BR_JTadd:
468 case ARM::tBR_JTr: {
469 // These are jumptable branches, i.e. a branch followed by an inlined
470 // jumptable. The size is 4 + 4 * number of entries.
471 unsigned NumOps = TID.getNumOperands();
472 MachineOperand JTOP =
473 MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
474 unsigned JTI = JTOP.getIndex();
475 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
476 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
477 assert(JTI < JT.size());
478 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
479 // 4 aligned. The assembler / linker may add 2 byte padding just before
480 // the JT entries. The size does not include this padding; the
481 // constant islands pass does separate bookkeeping for it.
482 // FIXME: If we know the size of the function is less than (1 << 16) *2
483 // bytes, we can use 16-bit entries instead. Then there won't be an
484 // alignment issue.
485 return getNumJTEntries(JT, JTI) * 4 +
486 ((MI->getOpcode()==ARM::tBR_JTr) ? 2 : 4);
487 }
488 default:
489 // Otherwise, pseudo-instruction sizes are zero.
490 return 0;
491 }
492 }
493 }
494 return 0; // Not reached
495}
496
497/// Return true if the instruction is a register to register move and
498/// leave the source and dest operands in the passed parameters.
499///
500bool
501ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
502 unsigned &SrcReg, unsigned &DstReg,
503 unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
504 SrcSubIdx = DstSubIdx = 0; // No sub-registers.
505
506 unsigned oc = MI.getOpcode();
507 if ((oc == getOpcode(ARMII::FCPYS)) ||
508 (oc == getOpcode(ARMII::FCPYD)) ||
509 (oc == getOpcode(ARMII::VMOVD)) ||
510 (oc == getOpcode(ARMII::VMOVQ))) {
511 SrcReg = MI.getOperand(1).getReg();
512 DstReg = MI.getOperand(0).getReg();
513 return true;
514 }
515 else if (oc == getOpcode(ARMII::MOVr)) {
516 assert(MI.getDesc().getNumOperands() >= 2 &&
517 MI.getOperand(0).isReg() &&
518 MI.getOperand(1).isReg() &&
519 "Invalid ARM MOV instruction");
520 SrcReg = MI.getOperand(1).getReg();
521 DstReg = MI.getOperand(0).getReg();
522 return true;
523 }
524
525 return false;
526}
527
528unsigned
529ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
530 int &FrameIndex) const {
531 unsigned oc = MI->getOpcode();
532 if (oc == getOpcode(ARMII::LDR)) {
533 if (MI->getOperand(1).isFI() &&
534 MI->getOperand(2).isReg() &&
535 MI->getOperand(3).isImm() &&
536 MI->getOperand(2).getReg() == 0 &&
537 MI->getOperand(3).getImm() == 0) {
538 FrameIndex = MI->getOperand(1).getIndex();
539 return MI->getOperand(0).getReg();
540 }
541 }
542 else if ((oc == getOpcode(ARMII::FLDD)) ||
543 (oc == getOpcode(ARMII::FLDS))) {
544 if (MI->getOperand(1).isFI() &&
545 MI->getOperand(2).isImm() &&
546 MI->getOperand(2).getImm() == 0) {
547 FrameIndex = MI->getOperand(1).getIndex();
548 return MI->getOperand(0).getReg();
549 }
550 }
551
552 return 0;
553}
554
555unsigned
556ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
557 int &FrameIndex) const {
558 unsigned oc = MI->getOpcode();
559 if (oc == getOpcode(ARMII::STR)) {
560 if (MI->getOperand(1).isFI() &&
561 MI->getOperand(2).isReg() &&
562 MI->getOperand(3).isImm() &&
563 MI->getOperand(2).getReg() == 0 &&
564 MI->getOperand(3).getImm() == 0) {
565 FrameIndex = MI->getOperand(1).getIndex();
566 return MI->getOperand(0).getReg();
567 }
568 }
569 else if ((oc == getOpcode(ARMII::FSTD)) ||
570 (oc == getOpcode(ARMII::FSTS))) {
571 if (MI->getOperand(1).isFI() &&
572 MI->getOperand(2).isImm() &&
573 MI->getOperand(2).getImm() == 0) {
574 FrameIndex = MI->getOperand(1).getIndex();
575 return MI->getOperand(0).getReg();
576 }
577 }
578
579 return 0;
580}
581
582bool
583ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
584 MachineBasicBlock::iterator I,
585 unsigned DestReg, unsigned SrcReg,
586 const TargetRegisterClass *DestRC,
587 const TargetRegisterClass *SrcRC) const {
588 DebugLoc DL = DebugLoc::getUnknownLoc();
589 if (I != MBB.end()) DL = I->getDebugLoc();
590
591 if (DestRC != SrcRC) {
592 // Not yet supported!
593 return false;
594 }
595
596 if (DestRC == ARM::GPRRegisterClass)
597 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::MOVr)), DestReg)
598 .addReg(SrcReg)));
599 else if (DestRC == ARM::SPRRegisterClass)
600 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FCPYS)), DestReg)
601 .addReg(SrcReg));
602 else if (DestRC == ARM::DPRRegisterClass)
603 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FCPYD)), DestReg)
604 .addReg(SrcReg));
605 else if (DestRC == ARM::QPRRegisterClass)
606 BuildMI(MBB, I, DL, get(getOpcode(ARMII::VMOVQ)), DestReg).addReg(SrcReg);
607 else
608 return false;
609
610 return true;
611}
612
613void ARMBaseInstrInfo::
614storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
615 unsigned SrcReg, bool isKill, int FI,
616 const TargetRegisterClass *RC) const {
617 DebugLoc DL = DebugLoc::getUnknownLoc();
618 if (I != MBB.end()) DL = I->getDebugLoc();
619
620 if (RC == ARM::GPRRegisterClass) {
621 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::STR)))
622 .addReg(SrcReg, getKillRegState(isKill))
623 .addFrameIndex(FI).addReg(0).addImm(0));
624 } else if (RC == ARM::DPRRegisterClass) {
625 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FSTD)))
626 .addReg(SrcReg, getKillRegState(isKill))
627 .addFrameIndex(FI).addImm(0));
628 } else {
629 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
630 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FSTS)))
631 .addReg(SrcReg, getKillRegState(isKill))
632 .addFrameIndex(FI).addImm(0));
633 }
634}
635
636void
637ARMBaseInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
638 bool isKill,
639 SmallVectorImpl<MachineOperand> &Addr,
640 const TargetRegisterClass *RC,
641 SmallVectorImpl<MachineInstr*> &NewMIs) const{
642 DebugLoc DL = DebugLoc::getUnknownLoc();
643 unsigned Opc = 0;
644 if (RC == ARM::GPRRegisterClass) {
645 Opc = getOpcode(ARMII::STR);
646 } else if (RC == ARM::DPRRegisterClass) {
647 Opc = getOpcode(ARMII::FSTD);
648 } else {
649 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
650 Opc = getOpcode(ARMII::FSTS);
651 }
652
653 MachineInstrBuilder MIB =
654 BuildMI(MF, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill));
655 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
656 MIB.addOperand(Addr[i]);
657 AddDefaultPred(MIB);
658 NewMIs.push_back(MIB);
659 return;
660}
661
662void ARMBaseInstrInfo::
663loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
664 unsigned DestReg, int FI,
665 const TargetRegisterClass *RC) const {
666 DebugLoc DL = DebugLoc::getUnknownLoc();
667 if (I != MBB.end()) DL = I->getDebugLoc();
668
669 if (RC == ARM::GPRRegisterClass) {
670 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::LDR)), DestReg)
671 .addFrameIndex(FI).addReg(0).addImm(0));
672 } else if (RC == ARM::DPRRegisterClass) {
673 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FLDD)), DestReg)
674 .addFrameIndex(FI).addImm(0));
675 } else {
676 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
677 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FLDS)), DestReg)
678 .addFrameIndex(FI).addImm(0));
679 }
680}
681
682void ARMBaseInstrInfo::
683loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
684 SmallVectorImpl<MachineOperand> &Addr,
685 const TargetRegisterClass *RC,
686 SmallVectorImpl<MachineInstr*> &NewMIs) const {
687 DebugLoc DL = DebugLoc::getUnknownLoc();
688 unsigned Opc = 0;
689 if (RC == ARM::GPRRegisterClass) {
690 Opc = getOpcode(ARMII::LDR);
691 } else if (RC == ARM::DPRRegisterClass) {
692 Opc = getOpcode(ARMII::FLDD);
693 } else {
694 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
695 Opc = getOpcode(ARMII::FLDS);
696 }
697
698 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
699 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
700 MIB.addOperand(Addr[i]);
701 AddDefaultPred(MIB);
702 NewMIs.push_back(MIB);
703 return;
704}
705
706MachineInstr *ARMBaseInstrInfo::
707foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
708 const SmallVectorImpl<unsigned> &Ops, int FI) const {
709 if (Ops.size() != 1) return NULL;
710
711 unsigned OpNum = Ops[0];
712 unsigned Opc = MI->getOpcode();
713 MachineInstr *NewMI = NULL;
714 if (Opc == getOpcode(ARMII::MOVr)) {
715 // If it is updating CPSR, then it cannot be folded.
716 if (MI->getOperand(4).getReg() != ARM::CPSR) {
717 unsigned Pred = MI->getOperand(2).getImm();
718 unsigned PredReg = MI->getOperand(3).getReg();
719 if (OpNum == 0) { // move -> store
720 unsigned SrcReg = MI->getOperand(1).getReg();
721 bool isKill = MI->getOperand(1).isKill();
722 bool isUndef = MI->getOperand(1).isUndef();
723 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::STR)))
724 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
725 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
726 } else { // move -> load
727 unsigned DstReg = MI->getOperand(0).getReg();
728 bool isDead = MI->getOperand(0).isDead();
729 bool isUndef = MI->getOperand(0).isUndef();
730 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::LDR)))
731 .addReg(DstReg,
732 RegState::Define |
733 getDeadRegState(isDead) |
734 getUndefRegState(isUndef))
735 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
736 }
737 }
738 }
739 else if (Opc == getOpcode(ARMII::FCPYS)) {
740 unsigned Pred = MI->getOperand(2).getImm();
741 unsigned PredReg = MI->getOperand(3).getReg();
742 if (OpNum == 0) { // move -> store
743 unsigned SrcReg = MI->getOperand(1).getReg();
744 bool isKill = MI->getOperand(1).isKill();
745 bool isUndef = MI->getOperand(1).isUndef();
746 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::FSTS)))
747 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
748 .addFrameIndex(FI)
749 .addImm(0).addImm(Pred).addReg(PredReg);
750 } else { // move -> load
751 unsigned DstReg = MI->getOperand(0).getReg();
752 bool isDead = MI->getOperand(0).isDead();
753 bool isUndef = MI->getOperand(0).isUndef();
754 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::FLDS)))
755 .addReg(DstReg,
756 RegState::Define |
757 getDeadRegState(isDead) |
758 getUndefRegState(isUndef))
759 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
760 }
761 }
762 else if (Opc == getOpcode(ARMII::FCPYD)) {
763 unsigned Pred = MI->getOperand(2).getImm();
764 unsigned PredReg = MI->getOperand(3).getReg();
765 if (OpNum == 0) { // move -> store
766 unsigned SrcReg = MI->getOperand(1).getReg();
767 bool isKill = MI->getOperand(1).isKill();
768 bool isUndef = MI->getOperand(1).isUndef();
769 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::FSTD)))
770 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
771 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
772 } else { // move -> load
773 unsigned DstReg = MI->getOperand(0).getReg();
774 bool isDead = MI->getOperand(0).isDead();
775 bool isUndef = MI->getOperand(0).isUndef();
776 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::FLDD)))
777 .addReg(DstReg,
778 RegState::Define |
779 getDeadRegState(isDead) |
780 getUndefRegState(isUndef))
781 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
782 }
783 }
784
785 return NewMI;
786}
787
788MachineInstr*
789ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
790 MachineInstr* MI,
791 const SmallVectorImpl<unsigned> &Ops,
792 MachineInstr* LoadMI) const {
793 return 0;
794}
795
796bool
797ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
798 const SmallVectorImpl<unsigned> &Ops) const {
799 if (Ops.size() != 1) return false;
800
801 unsigned Opc = MI->getOpcode();
802 if (Opc == getOpcode(ARMII::MOVr)) {
803 // If it is updating CPSR, then it cannot be folded.
804 return MI->getOperand(4).getReg() != ARM::CPSR;
805 }
806 else if ((Opc == getOpcode(ARMII::FCPYS)) ||
807 (Opc == getOpcode(ARMII::FCPYD))) {
808 return true;
809 }
810 else if ((Opc == getOpcode(ARMII::VMOVD)) ||
811 (Opc == getOpcode(ARMII::VMOVQ))) {
812 return false; // FIXME
813 }
814
815 return false;
816}