blob: f659cb50a05a1502279b50a51c30922bc3bad6ee [file] [log] [blame]
David Goodwin334c2642009-07-08 16:09:28 +00001//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Base ARM implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARMBaseInstrInfo.h"
15#include "ARM.h"
16#include "ARMAddressingModes.h"
17#include "ARMGenInstrInfo.inc"
18#include "ARMMachineFunctionInfo.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/CodeGen/LiveVariables.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineJumpTableInfo.h"
24#include "llvm/Target/TargetAsmInfo.h"
25#include "llvm/Support/CommandLine.h"
26using namespace llvm;
27
28static cl::opt<bool>
29EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
30 cl::desc("Enable ARM 2-addr to 3-addr conv"));
31
32static inline
33const MachineInstrBuilder &AddDefaultPred(const MachineInstrBuilder &MIB) {
34 return MIB.addImm((int64_t)ARMCC::AL).addReg(0);
35}
36
37static inline
38const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) {
39 return MIB.addReg(0);
40}
41
42ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget &STI)
43 : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)) {
44}
45
46MachineInstr *
47ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
48 MachineBasicBlock::iterator &MBBI,
49 LiveVariables *LV) const {
50 if (!EnableARM3Addr)
51 return NULL;
52
53 MachineInstr *MI = MBBI;
54 MachineFunction &MF = *MI->getParent()->getParent();
55 unsigned TSFlags = MI->getDesc().TSFlags;
56 bool isPre = false;
57 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
58 default: return NULL;
59 case ARMII::IndexModePre:
60 isPre = true;
61 break;
62 case ARMII::IndexModePost:
63 break;
64 }
65
66 // Try splitting an indexed load/store to an un-indexed one plus an add/sub
67 // operation.
68 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
69 if (MemOpc == 0)
70 return NULL;
71
72 MachineInstr *UpdateMI = NULL;
73 MachineInstr *MemMI = NULL;
74 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
75 const TargetInstrDesc &TID = MI->getDesc();
76 unsigned NumOps = TID.getNumOperands();
77 bool isLoad = !TID.mayStore();
78 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
79 const MachineOperand &Base = MI->getOperand(2);
80 const MachineOperand &Offset = MI->getOperand(NumOps-3);
81 unsigned WBReg = WB.getReg();
82 unsigned BaseReg = Base.getReg();
83 unsigned OffReg = Offset.getReg();
84 unsigned OffImm = MI->getOperand(NumOps-2).getImm();
85 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
86 switch (AddrMode) {
87 default:
88 assert(false && "Unknown indexed op!");
89 return NULL;
90 case ARMII::AddrMode2: {
91 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
92 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
93 if (OffReg == 0) {
Evan Chenge7cbe412009-07-08 21:03:57 +000094 if (ARM_AM::getSOImmVal(Amt) == -1)
David Goodwin334c2642009-07-08 16:09:28 +000095 // Can't encode it in a so_imm operand. This transformation will
96 // add more than 1 instruction. Abandon!
97 return NULL;
98 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
99 get(isSub ? getOpcode(ARMII::SUBri) :
100 getOpcode(ARMII::ADDri)), WBReg)
Evan Chenge7cbe412009-07-08 21:03:57 +0000101 .addReg(BaseReg).addImm(Amt)
David Goodwin334c2642009-07-08 16:09:28 +0000102 .addImm(Pred).addReg(0).addReg(0);
103 } else if (Amt != 0) {
104 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
105 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
106 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
107 get(isSub ? getOpcode(ARMII::SUBrs) :
108 getOpcode(ARMII::ADDrs)), WBReg)
109 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
110 .addImm(Pred).addReg(0).addReg(0);
111 } else
112 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
113 get(isSub ? getOpcode(ARMII::SUBrr) :
114 getOpcode(ARMII::ADDrr)), WBReg)
115 .addReg(BaseReg).addReg(OffReg)
116 .addImm(Pred).addReg(0).addReg(0);
117 break;
118 }
119 case ARMII::AddrMode3 : {
120 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
121 unsigned Amt = ARM_AM::getAM3Offset(OffImm);
122 if (OffReg == 0)
123 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
124 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
125 get(isSub ? getOpcode(ARMII::SUBri) :
126 getOpcode(ARMII::ADDri)), WBReg)
127 .addReg(BaseReg).addImm(Amt)
128 .addImm(Pred).addReg(0).addReg(0);
129 else
130 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
131 get(isSub ? getOpcode(ARMII::SUBrr) :
132 getOpcode(ARMII::ADDrr)), WBReg)
133 .addReg(BaseReg).addReg(OffReg)
134 .addImm(Pred).addReg(0).addReg(0);
135 break;
136 }
137 }
138
139 std::vector<MachineInstr*> NewMIs;
140 if (isPre) {
141 if (isLoad)
142 MemMI = BuildMI(MF, MI->getDebugLoc(),
143 get(MemOpc), MI->getOperand(0).getReg())
144 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
145 else
146 MemMI = BuildMI(MF, MI->getDebugLoc(),
147 get(MemOpc)).addReg(MI->getOperand(1).getReg())
148 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
149 NewMIs.push_back(MemMI);
150 NewMIs.push_back(UpdateMI);
151 } else {
152 if (isLoad)
153 MemMI = BuildMI(MF, MI->getDebugLoc(),
154 get(MemOpc), MI->getOperand(0).getReg())
155 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
156 else
157 MemMI = BuildMI(MF, MI->getDebugLoc(),
158 get(MemOpc)).addReg(MI->getOperand(1).getReg())
159 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
160 if (WB.isDead())
161 UpdateMI->getOperand(0).setIsDead();
162 NewMIs.push_back(UpdateMI);
163 NewMIs.push_back(MemMI);
164 }
165
166 // Transfer LiveVariables states, kill / dead info.
167 if (LV) {
168 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
169 MachineOperand &MO = MI->getOperand(i);
170 if (MO.isReg() && MO.getReg() &&
171 TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
172 unsigned Reg = MO.getReg();
173
174 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
175 if (MO.isDef()) {
176 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
177 if (MO.isDead())
178 LV->addVirtualRegisterDead(Reg, NewMI);
179 }
180 if (MO.isUse() && MO.isKill()) {
181 for (unsigned j = 0; j < 2; ++j) {
182 // Look at the two new MI's in reverse order.
183 MachineInstr *NewMI = NewMIs[j];
184 if (!NewMI->readsRegister(Reg))
185 continue;
186 LV->addVirtualRegisterKilled(Reg, NewMI);
187 if (VI.removeKill(MI))
188 VI.Kills.push_back(NewMI);
189 break;
190 }
191 }
192 }
193 }
194 }
195
196 MFI->insert(MBBI, NewMIs[1]);
197 MFI->insert(MBBI, NewMIs[0]);
198 return NewMIs[0];
199}
200
201// Branch analysis.
202bool
203ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
204 MachineBasicBlock *&FBB,
205 SmallVectorImpl<MachineOperand> &Cond,
206 bool AllowModify) const {
207 // If the block has no terminators, it just falls into the block after it.
208 MachineBasicBlock::iterator I = MBB.end();
209 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
210 return false;
211
212 // Get the last instruction in the block.
213 MachineInstr *LastInst = I;
214
215 // If there is only one terminator instruction, process it.
216 unsigned LastOpc = LastInst->getOpcode();
217 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
218 if (LastOpc == getOpcode(ARMII::B)) {
219 TBB = LastInst->getOperand(0).getMBB();
220 return false;
221 }
222 if (LastOpc == getOpcode(ARMII::Bcc)) {
223 // Block ends with fall-through condbranch.
224 TBB = LastInst->getOperand(0).getMBB();
225 Cond.push_back(LastInst->getOperand(1));
226 Cond.push_back(LastInst->getOperand(2));
227 return false;
228 }
229 return true; // Can't handle indirect branch.
230 }
231
232 // Get the instruction before it if it is a terminator.
233 MachineInstr *SecondLastInst = I;
234
235 // If there are three terminators, we don't know what sort of block this is.
236 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
237 return true;
238
239 // If the block ends with ARMII::B and a ARMII::Bcc, handle it.
240 unsigned SecondLastOpc = SecondLastInst->getOpcode();
241 if ((SecondLastOpc == getOpcode(ARMII::Bcc)) &&
242 (LastOpc == getOpcode(ARMII::B))) {
243 TBB = SecondLastInst->getOperand(0).getMBB();
244 Cond.push_back(SecondLastInst->getOperand(1));
245 Cond.push_back(SecondLastInst->getOperand(2));
246 FBB = LastInst->getOperand(0).getMBB();
247 return false;
248 }
249
250 // If the block ends with two unconditional branches, handle it. The second
251 // one is not executed, so remove it.
252 if ((SecondLastOpc == getOpcode(ARMII::B)) &&
253 (LastOpc == getOpcode(ARMII::B))) {
254 TBB = SecondLastInst->getOperand(0).getMBB();
255 I = LastInst;
256 if (AllowModify)
257 I->eraseFromParent();
258 return false;
259 }
260
261 // ...likewise if it ends with a branch table followed by an unconditional
262 // branch. The branch folder can create these, and we must get rid of them for
263 // correctness of Thumb constant islands.
264 if (((SecondLastOpc == getOpcode(ARMII::BR_JTr)) ||
265 (SecondLastOpc == getOpcode(ARMII::BR_JTm)) ||
266 (SecondLastOpc == getOpcode(ARMII::BR_JTadd))) &&
267 (LastOpc == getOpcode(ARMII::B))) {
268 I = LastInst;
269 if (AllowModify)
270 I->eraseFromParent();
271 return true;
272 }
273
274 // Otherwise, can't handle this.
275 return true;
276}
277
278
279unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
280 int BOpc = getOpcode(ARMII::B);
281 int BccOpc = getOpcode(ARMII::Bcc);
282
283 MachineBasicBlock::iterator I = MBB.end();
284 if (I == MBB.begin()) return 0;
285 --I;
286 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc)
287 return 0;
288
289 // Remove the branch.
290 I->eraseFromParent();
291
292 I = MBB.end();
293
294 if (I == MBB.begin()) return 1;
295 --I;
296 if (I->getOpcode() != BccOpc)
297 return 1;
298
299 // Remove the branch.
300 I->eraseFromParent();
301 return 2;
302}
303
304unsigned
305ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
306 MachineBasicBlock *FBB,
307 const SmallVectorImpl<MachineOperand> &Cond) const {
308 // FIXME this should probably have a DebugLoc argument
309 DebugLoc dl = DebugLoc::getUnknownLoc();
310 int BOpc = getOpcode(ARMII::B);
311 int BccOpc = getOpcode(ARMII::Bcc);
312
313 // Shouldn't be a fall through.
314 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
315 assert((Cond.size() == 2 || Cond.size() == 0) &&
316 "ARM branch conditions have two components!");
317
318 if (FBB == 0) {
319 if (Cond.empty()) // Unconditional branch?
320 BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
321 else
322 BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
323 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
324 return 1;
325 }
326
327 // Two-way conditional branch.
328 BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
329 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
330 BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
331 return 2;
332}
333
334bool ARMBaseInstrInfo::
335ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
336 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
337 Cond[0].setImm(ARMCC::getOppositeCondition(CC));
338 return false;
339}
340
341bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
342 int PIdx = MI->findFirstPredOperandIdx();
343 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
344}
345
346bool ARMBaseInstrInfo::
347PredicateInstruction(MachineInstr *MI,
348 const SmallVectorImpl<MachineOperand> &Pred) const {
349 unsigned Opc = MI->getOpcode();
350 if (Opc == getOpcode(ARMII::B)) {
351 MI->setDesc(get(getOpcode(ARMII::Bcc)));
352 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
353 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
354 return true;
355 }
356
357 int PIdx = MI->findFirstPredOperandIdx();
358 if (PIdx != -1) {
359 MachineOperand &PMO = MI->getOperand(PIdx);
360 PMO.setImm(Pred[0].getImm());
361 MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
362 return true;
363 }
364 return false;
365}
366
367bool ARMBaseInstrInfo::
368SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
369 const SmallVectorImpl<MachineOperand> &Pred2) const {
370 if (Pred1.size() > 2 || Pred2.size() > 2)
371 return false;
372
373 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
374 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
375 if (CC1 == CC2)
376 return true;
377
378 switch (CC1) {
379 default:
380 return false;
381 case ARMCC::AL:
382 return true;
383 case ARMCC::HS:
384 return CC2 == ARMCC::HI;
385 case ARMCC::LS:
386 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
387 case ARMCC::GE:
388 return CC2 == ARMCC::GT;
389 case ARMCC::LE:
390 return CC2 == ARMCC::LT;
391 }
392}
393
394bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
395 std::vector<MachineOperand> &Pred) const {
396 const TargetInstrDesc &TID = MI->getDesc();
397 if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
398 return false;
399
400 bool Found = false;
401 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
402 const MachineOperand &MO = MI->getOperand(i);
403 if (MO.isReg() && MO.getReg() == ARM::CPSR) {
404 Pred.push_back(MO);
405 Found = true;
406 }
407 }
408
409 return Found;
410}
411
412
413/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
414static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
415 unsigned JTI) DISABLE_INLINE;
416static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
417 unsigned JTI) {
418 return JT[JTI].MBBs.size();
419}
420
421/// GetInstSize - Return the size of the specified MachineInstr.
422///
423unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
424 const MachineBasicBlock &MBB = *MI->getParent();
425 const MachineFunction *MF = MBB.getParent();
426 const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
427
428 // Basic size info comes from the TSFlags field.
429 const TargetInstrDesc &TID = MI->getDesc();
430 unsigned TSFlags = TID.TSFlags;
431
432 switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
433 default: {
434 // If this machine instr is an inline asm, measure it.
435 if (MI->getOpcode() == ARM::INLINEASM)
436 return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
437 if (MI->isLabel())
438 return 0;
439 switch (MI->getOpcode()) {
440 default:
441 assert(0 && "Unknown or unset size field for instr!");
442 break;
443 case TargetInstrInfo::IMPLICIT_DEF:
444 case TargetInstrInfo::DECLARE:
445 case TargetInstrInfo::DBG_LABEL:
446 case TargetInstrInfo::EH_LABEL:
447 return 0;
448 }
449 break;
450 }
451 case ARMII::Size8Bytes: return 8; // Arm instruction x 2.
452 case ARMII::Size4Bytes: return 4; // Arm instruction.
453 case ARMII::Size2Bytes: return 2; // Thumb instruction.
454 case ARMII::SizeSpecial: {
455 switch (MI->getOpcode()) {
456 case ARM::CONSTPOOL_ENTRY:
457 // If this machine instr is a constant pool entry, its size is recorded as
458 // operand #2.
459 return MI->getOperand(2).getImm();
460 case ARM::Int_eh_sjlj_setjmp: return 12;
461 case ARM::BR_JTr:
462 case ARM::BR_JTm:
463 case ARM::BR_JTadd:
464 case ARM::t2BR_JTr:
465 case ARM::t2BR_JTm:
466 case ARM::t2BR_JTadd:
467 case ARM::tBR_JTr: {
468 // These are jumptable branches, i.e. a branch followed by an inlined
469 // jumptable. The size is 4 + 4 * number of entries.
470 unsigned NumOps = TID.getNumOperands();
471 MachineOperand JTOP =
472 MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
473 unsigned JTI = JTOP.getIndex();
474 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
475 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
476 assert(JTI < JT.size());
477 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
478 // 4 aligned. The assembler / linker may add 2 byte padding just before
479 // the JT entries. The size does not include this padding; the
480 // constant islands pass does separate bookkeeping for it.
481 // FIXME: If we know the size of the function is less than (1 << 16) *2
482 // bytes, we can use 16-bit entries instead. Then there won't be an
483 // alignment issue.
484 return getNumJTEntries(JT, JTI) * 4 +
485 ((MI->getOpcode()==ARM::tBR_JTr) ? 2 : 4);
486 }
487 default:
488 // Otherwise, pseudo-instruction sizes are zero.
489 return 0;
490 }
491 }
492 }
493 return 0; // Not reached
494}
495
496/// Return true if the instruction is a register to register move and
497/// leave the source and dest operands in the passed parameters.
498///
499bool
500ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
501 unsigned &SrcReg, unsigned &DstReg,
502 unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
503 SrcSubIdx = DstSubIdx = 0; // No sub-registers.
504
505 unsigned oc = MI.getOpcode();
506 if ((oc == getOpcode(ARMII::FCPYS)) ||
507 (oc == getOpcode(ARMII::FCPYD)) ||
508 (oc == getOpcode(ARMII::VMOVD)) ||
509 (oc == getOpcode(ARMII::VMOVQ))) {
510 SrcReg = MI.getOperand(1).getReg();
511 DstReg = MI.getOperand(0).getReg();
512 return true;
513 }
514 else if (oc == getOpcode(ARMII::MOVr)) {
515 assert(MI.getDesc().getNumOperands() >= 2 &&
516 MI.getOperand(0).isReg() &&
517 MI.getOperand(1).isReg() &&
518 "Invalid ARM MOV instruction");
519 SrcReg = MI.getOperand(1).getReg();
520 DstReg = MI.getOperand(0).getReg();
521 return true;
522 }
523
524 return false;
525}
526
527unsigned
528ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
529 int &FrameIndex) const {
530 unsigned oc = MI->getOpcode();
531 if (oc == getOpcode(ARMII::LDR)) {
532 if (MI->getOperand(1).isFI() &&
533 MI->getOperand(2).isReg() &&
534 MI->getOperand(3).isImm() &&
535 MI->getOperand(2).getReg() == 0 &&
536 MI->getOperand(3).getImm() == 0) {
537 FrameIndex = MI->getOperand(1).getIndex();
538 return MI->getOperand(0).getReg();
539 }
540 }
541 else if ((oc == getOpcode(ARMII::FLDD)) ||
542 (oc == getOpcode(ARMII::FLDS))) {
543 if (MI->getOperand(1).isFI() &&
544 MI->getOperand(2).isImm() &&
545 MI->getOperand(2).getImm() == 0) {
546 FrameIndex = MI->getOperand(1).getIndex();
547 return MI->getOperand(0).getReg();
548 }
549 }
550
551 return 0;
552}
553
554unsigned
555ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
556 int &FrameIndex) const {
557 unsigned oc = MI->getOpcode();
558 if (oc == getOpcode(ARMII::STR)) {
559 if (MI->getOperand(1).isFI() &&
560 MI->getOperand(2).isReg() &&
561 MI->getOperand(3).isImm() &&
562 MI->getOperand(2).getReg() == 0 &&
563 MI->getOperand(3).getImm() == 0) {
564 FrameIndex = MI->getOperand(1).getIndex();
565 return MI->getOperand(0).getReg();
566 }
567 }
568 else if ((oc == getOpcode(ARMII::FSTD)) ||
569 (oc == getOpcode(ARMII::FSTS))) {
570 if (MI->getOperand(1).isFI() &&
571 MI->getOperand(2).isImm() &&
572 MI->getOperand(2).getImm() == 0) {
573 FrameIndex = MI->getOperand(1).getIndex();
574 return MI->getOperand(0).getReg();
575 }
576 }
577
578 return 0;
579}
580
581bool
582ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
583 MachineBasicBlock::iterator I,
584 unsigned DestReg, unsigned SrcReg,
585 const TargetRegisterClass *DestRC,
586 const TargetRegisterClass *SrcRC) const {
587 DebugLoc DL = DebugLoc::getUnknownLoc();
588 if (I != MBB.end()) DL = I->getDebugLoc();
589
590 if (DestRC != SrcRC) {
591 // Not yet supported!
592 return false;
593 }
594
595 if (DestRC == ARM::GPRRegisterClass)
596 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::MOVr)), DestReg)
597 .addReg(SrcReg)));
598 else if (DestRC == ARM::SPRRegisterClass)
599 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FCPYS)), DestReg)
600 .addReg(SrcReg));
601 else if (DestRC == ARM::DPRRegisterClass)
602 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FCPYD)), DestReg)
603 .addReg(SrcReg));
604 else if (DestRC == ARM::QPRRegisterClass)
605 BuildMI(MBB, I, DL, get(getOpcode(ARMII::VMOVQ)), DestReg).addReg(SrcReg);
606 else
607 return false;
608
609 return true;
610}
611
612void ARMBaseInstrInfo::
613storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
614 unsigned SrcReg, bool isKill, int FI,
615 const TargetRegisterClass *RC) const {
616 DebugLoc DL = DebugLoc::getUnknownLoc();
617 if (I != MBB.end()) DL = I->getDebugLoc();
618
619 if (RC == ARM::GPRRegisterClass) {
620 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::STR)))
621 .addReg(SrcReg, getKillRegState(isKill))
622 .addFrameIndex(FI).addReg(0).addImm(0));
623 } else if (RC == ARM::DPRRegisterClass) {
624 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FSTD)))
625 .addReg(SrcReg, getKillRegState(isKill))
626 .addFrameIndex(FI).addImm(0));
627 } else {
628 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
629 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FSTS)))
630 .addReg(SrcReg, getKillRegState(isKill))
631 .addFrameIndex(FI).addImm(0));
632 }
633}
634
635void
636ARMBaseInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
637 bool isKill,
638 SmallVectorImpl<MachineOperand> &Addr,
639 const TargetRegisterClass *RC,
640 SmallVectorImpl<MachineInstr*> &NewMIs) const{
641 DebugLoc DL = DebugLoc::getUnknownLoc();
642 unsigned Opc = 0;
643 if (RC == ARM::GPRRegisterClass) {
644 Opc = getOpcode(ARMII::STR);
645 } else if (RC == ARM::DPRRegisterClass) {
646 Opc = getOpcode(ARMII::FSTD);
647 } else {
648 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
649 Opc = getOpcode(ARMII::FSTS);
650 }
651
652 MachineInstrBuilder MIB =
653 BuildMI(MF, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill));
654 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
655 MIB.addOperand(Addr[i]);
656 AddDefaultPred(MIB);
657 NewMIs.push_back(MIB);
658 return;
659}
660
661void ARMBaseInstrInfo::
662loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
663 unsigned DestReg, int FI,
664 const TargetRegisterClass *RC) const {
665 DebugLoc DL = DebugLoc::getUnknownLoc();
666 if (I != MBB.end()) DL = I->getDebugLoc();
667
668 if (RC == ARM::GPRRegisterClass) {
669 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::LDR)), DestReg)
670 .addFrameIndex(FI).addReg(0).addImm(0));
671 } else if (RC == ARM::DPRRegisterClass) {
672 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FLDD)), DestReg)
673 .addFrameIndex(FI).addImm(0));
674 } else {
675 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
676 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FLDS)), DestReg)
677 .addFrameIndex(FI).addImm(0));
678 }
679}
680
681void ARMBaseInstrInfo::
682loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
683 SmallVectorImpl<MachineOperand> &Addr,
684 const TargetRegisterClass *RC,
685 SmallVectorImpl<MachineInstr*> &NewMIs) const {
686 DebugLoc DL = DebugLoc::getUnknownLoc();
687 unsigned Opc = 0;
688 if (RC == ARM::GPRRegisterClass) {
689 Opc = getOpcode(ARMII::LDR);
690 } else if (RC == ARM::DPRRegisterClass) {
691 Opc = getOpcode(ARMII::FLDD);
692 } else {
693 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
694 Opc = getOpcode(ARMII::FLDS);
695 }
696
697 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
698 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
699 MIB.addOperand(Addr[i]);
700 AddDefaultPred(MIB);
701 NewMIs.push_back(MIB);
702 return;
703}
704
705MachineInstr *ARMBaseInstrInfo::
706foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
707 const SmallVectorImpl<unsigned> &Ops, int FI) const {
708 if (Ops.size() != 1) return NULL;
709
710 unsigned OpNum = Ops[0];
711 unsigned Opc = MI->getOpcode();
712 MachineInstr *NewMI = NULL;
713 if (Opc == getOpcode(ARMII::MOVr)) {
714 // If it is updating CPSR, then it cannot be folded.
715 if (MI->getOperand(4).getReg() != ARM::CPSR) {
716 unsigned Pred = MI->getOperand(2).getImm();
717 unsigned PredReg = MI->getOperand(3).getReg();
718 if (OpNum == 0) { // move -> store
719 unsigned SrcReg = MI->getOperand(1).getReg();
720 bool isKill = MI->getOperand(1).isKill();
721 bool isUndef = MI->getOperand(1).isUndef();
722 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::STR)))
723 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
724 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
725 } else { // move -> load
726 unsigned DstReg = MI->getOperand(0).getReg();
727 bool isDead = MI->getOperand(0).isDead();
728 bool isUndef = MI->getOperand(0).isUndef();
729 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::LDR)))
730 .addReg(DstReg,
731 RegState::Define |
732 getDeadRegState(isDead) |
733 getUndefRegState(isUndef))
734 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
735 }
736 }
737 }
738 else if (Opc == getOpcode(ARMII::FCPYS)) {
739 unsigned Pred = MI->getOperand(2).getImm();
740 unsigned PredReg = MI->getOperand(3).getReg();
741 if (OpNum == 0) { // move -> store
742 unsigned SrcReg = MI->getOperand(1).getReg();
743 bool isKill = MI->getOperand(1).isKill();
744 bool isUndef = MI->getOperand(1).isUndef();
745 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::FSTS)))
746 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
747 .addFrameIndex(FI)
748 .addImm(0).addImm(Pred).addReg(PredReg);
749 } else { // move -> load
750 unsigned DstReg = MI->getOperand(0).getReg();
751 bool isDead = MI->getOperand(0).isDead();
752 bool isUndef = MI->getOperand(0).isUndef();
753 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::FLDS)))
754 .addReg(DstReg,
755 RegState::Define |
756 getDeadRegState(isDead) |
757 getUndefRegState(isUndef))
758 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
759 }
760 }
761 else if (Opc == getOpcode(ARMII::FCPYD)) {
762 unsigned Pred = MI->getOperand(2).getImm();
763 unsigned PredReg = MI->getOperand(3).getReg();
764 if (OpNum == 0) { // move -> store
765 unsigned SrcReg = MI->getOperand(1).getReg();
766 bool isKill = MI->getOperand(1).isKill();
767 bool isUndef = MI->getOperand(1).isUndef();
768 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::FSTD)))
769 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
770 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
771 } else { // move -> load
772 unsigned DstReg = MI->getOperand(0).getReg();
773 bool isDead = MI->getOperand(0).isDead();
774 bool isUndef = MI->getOperand(0).isUndef();
775 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::FLDD)))
776 .addReg(DstReg,
777 RegState::Define |
778 getDeadRegState(isDead) |
779 getUndefRegState(isUndef))
780 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
781 }
782 }
783
784 return NewMI;
785}
786
787MachineInstr*
788ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
789 MachineInstr* MI,
790 const SmallVectorImpl<unsigned> &Ops,
791 MachineInstr* LoadMI) const {
792 return 0;
793}
794
795bool
796ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
797 const SmallVectorImpl<unsigned> &Ops) const {
798 if (Ops.size() != 1) return false;
799
800 unsigned Opc = MI->getOpcode();
801 if (Opc == getOpcode(ARMII::MOVr)) {
802 // If it is updating CPSR, then it cannot be folded.
803 return MI->getOperand(4).getReg() != ARM::CPSR;
804 }
805 else if ((Opc == getOpcode(ARMII::FCPYS)) ||
806 (Opc == getOpcode(ARMII::FCPYD))) {
807 return true;
808 }
809 else if ((Opc == getOpcode(ARMII::VMOVD)) ||
810 (Opc == getOpcode(ARMII::VMOVQ))) {
811 return false; // FIXME
812 }
813
814 return false;
815}