blob: 06b78d09cc7cc7dcafa041ee5a50df1d8634a230 [file] [log] [blame]
Tom Stellardf98f2ce2012-12-11 21:25:42 +00001//===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief R600 Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15#include "R600InstrInfo.h"
Tom Stellardf98f2ce2012-12-11 21:25:42 +000016#include "AMDGPUSubtarget.h"
Chandler Carruth58a2cbe2013-01-02 10:22:59 +000017#include "AMDGPUTargetMachine.h"
Tom Stellardf98f2ce2012-12-11 21:25:42 +000018#include "R600Defines.h"
19#include "R600RegisterInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21
22#define GET_INSTRINFO_CTOR
23#include "AMDGPUGenDFAPacketizer.inc"
24
25using namespace llvm;
26
27R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
28 : AMDGPUInstrInfo(tm),
29 RI(tm, *this)
30 { }
31
32const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
33 return RI;
34}
35
36bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
37 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
38}
39
40bool R600InstrInfo::isVector(const MachineInstr &MI) const {
41 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
42}
43
44void
45R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
46 MachineBasicBlock::iterator MI, DebugLoc DL,
47 unsigned DestReg, unsigned SrcReg,
48 bool KillSrc) const {
49 if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
50 && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
51 for (unsigned I = 0; I < 4; I++) {
52 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
53 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
54 RI.getSubReg(DestReg, SubRegIndex),
55 RI.getSubReg(SrcReg, SubRegIndex))
56 .addReg(DestReg,
57 RegState::Define | RegState::Implicit);
58 }
59 } else {
60
61 // We can't copy vec4 registers
62 assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
63 && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
64
65 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
66 DestReg, SrcReg);
67 NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0))
68 .setIsKill(KillSrc);
69 }
70}
71
72MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
73 unsigned DstReg, int64_t Imm) const {
74 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
NAKAMURA Takumi6b207d32012-12-20 00:22:11 +000075 MachineInstrBuilder MIB(*MF, MI);
76 MIB.addReg(DstReg, RegState::Define);
77 MIB.addReg(AMDGPU::ALU_LITERAL_X);
78 MIB.addImm(Imm);
79 MIB.addReg(0); // PREDICATE_BIT
Tom Stellardf98f2ce2012-12-11 21:25:42 +000080
81 return MI;
82}
83
84unsigned R600InstrInfo::getIEQOpcode() const {
85 return AMDGPU::SETE_INT;
86}
87
88bool R600InstrInfo::isMov(unsigned Opcode) const {
89
90
91 switch(Opcode) {
92 default: return false;
93 case AMDGPU::MOV:
94 case AMDGPU::MOV_IMM_F32:
95 case AMDGPU::MOV_IMM_I32:
96 return true;
97 }
98}
99
100// Some instructions act as place holders to emulate operations that the GPU
101// hardware does automatically. This function can be used to check if
102// an opcode falls into this category.
103bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
104 switch (Opcode) {
105 default: return false;
106 case AMDGPU::RETURN:
107 case AMDGPU::RESERVE_REG:
108 return true;
109 }
110}
111
112bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
113 switch(Opcode) {
114 default: return false;
115 case AMDGPU::DOT4_r600_pseudo:
116 case AMDGPU::DOT4_eg_pseudo:
117 return true;
118 }
119}
120
121bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
122 switch(Opcode) {
123 default: return false;
124 case AMDGPU::CUBE_r600_pseudo:
125 case AMDGPU::CUBE_r600_real:
126 case AMDGPU::CUBE_eg_pseudo:
127 case AMDGPU::CUBE_eg_real:
128 return true;
129 }
130}
131
132bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
133 unsigned TargetFlags = get(Opcode).TSFlags;
134
135 return ((TargetFlags & R600_InstFlag::OP1) |
136 (TargetFlags & R600_InstFlag::OP2) |
137 (TargetFlags & R600_InstFlag::OP3));
138}
139
140DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
141 const ScheduleDAG *DAG) const {
142 const InstrItineraryData *II = TM->getInstrItineraryData();
143 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
144}
145
146static bool
147isPredicateSetter(unsigned Opcode) {
148 switch (Opcode) {
149 case AMDGPU::PRED_X:
150 return true;
151 default:
152 return false;
153 }
154}
155
156static MachineInstr *
157findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
158 MachineBasicBlock::iterator I) {
159 while (I != MBB.begin()) {
160 --I;
161 MachineInstr *MI = I;
162 if (isPredicateSetter(MI->getOpcode()))
163 return MI;
164 }
165
166 return NULL;
167}
168
169bool
170R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
171 MachineBasicBlock *&TBB,
172 MachineBasicBlock *&FBB,
173 SmallVectorImpl<MachineOperand> &Cond,
174 bool AllowModify) const {
175 // Most of the following comes from the ARM implementation of AnalyzeBranch
176
177 // If the block has no terminators, it just falls into the block after it.
178 MachineBasicBlock::iterator I = MBB.end();
179 if (I == MBB.begin())
180 return false;
181 --I;
182 while (I->isDebugValue()) {
183 if (I == MBB.begin())
184 return false;
185 --I;
186 }
187 if (static_cast<MachineInstr *>(I)->getOpcode() != AMDGPU::JUMP) {
188 return false;
189 }
190
191 // Get the last instruction in the block.
192 MachineInstr *LastInst = I;
193
194 // If there is only one terminator instruction, process it.
195 unsigned LastOpc = LastInst->getOpcode();
196 if (I == MBB.begin() ||
197 static_cast<MachineInstr *>(--I)->getOpcode() != AMDGPU::JUMP) {
198 if (LastOpc == AMDGPU::JUMP) {
199 if(!isPredicated(LastInst)) {
200 TBB = LastInst->getOperand(0).getMBB();
201 return false;
202 } else {
203 MachineInstr *predSet = I;
204 while (!isPredicateSetter(predSet->getOpcode())) {
205 predSet = --I;
206 }
207 TBB = LastInst->getOperand(0).getMBB();
208 Cond.push_back(predSet->getOperand(1));
209 Cond.push_back(predSet->getOperand(2));
210 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
211 return false;
212 }
213 }
214 return true; // Can't handle indirect branch.
215 }
216
217 // Get the instruction before it if it is a terminator.
218 MachineInstr *SecondLastInst = I;
219 unsigned SecondLastOpc = SecondLastInst->getOpcode();
220
221 // If the block ends with a B and a Bcc, handle it.
222 if (SecondLastOpc == AMDGPU::JUMP &&
223 isPredicated(SecondLastInst) &&
224 LastOpc == AMDGPU::JUMP &&
225 !isPredicated(LastInst)) {
226 MachineInstr *predSet = --I;
227 while (!isPredicateSetter(predSet->getOpcode())) {
228 predSet = --I;
229 }
230 TBB = SecondLastInst->getOperand(0).getMBB();
231 FBB = LastInst->getOperand(0).getMBB();
232 Cond.push_back(predSet->getOperand(1));
233 Cond.push_back(predSet->getOperand(2));
234 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
235 return false;
236 }
237
238 // Otherwise, can't handle this.
239 return true;
240}
241
242int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
243 const MachineInstr *MI = op.getParent();
244
245 switch (MI->getDesc().OpInfo->RegClass) {
246 default: // FIXME: fallthrough??
247 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
248 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
249 };
250}
251
252unsigned
253R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
254 MachineBasicBlock *TBB,
255 MachineBasicBlock *FBB,
256 const SmallVectorImpl<MachineOperand> &Cond,
257 DebugLoc DL) const {
258 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
259
260 if (FBB == 0) {
261 if (Cond.empty()) {
262 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB).addReg(0);
263 return 1;
264 } else {
265 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
266 assert(PredSet && "No previous predicate !");
267 addFlag(PredSet, 0, MO_FLAG_PUSH);
268 PredSet->getOperand(2).setImm(Cond[1].getImm());
269
270 BuildMI(&MBB, DL, get(AMDGPU::JUMP))
271 .addMBB(TBB)
272 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
273 return 1;
274 }
275 } else {
276 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
277 assert(PredSet && "No previous predicate !");
278 addFlag(PredSet, 0, MO_FLAG_PUSH);
279 PredSet->getOperand(2).setImm(Cond[1].getImm());
280 BuildMI(&MBB, DL, get(AMDGPU::JUMP))
281 .addMBB(TBB)
282 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
283 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB).addReg(0);
284 return 2;
285 }
286}
287
288unsigned
289R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
290
291 // Note : we leave PRED* instructions there.
292 // They may be needed when predicating instructions.
293
294 MachineBasicBlock::iterator I = MBB.end();
295
296 if (I == MBB.begin()) {
297 return 0;
298 }
299 --I;
300 switch (I->getOpcode()) {
301 default:
302 return 0;
303 case AMDGPU::JUMP:
304 if (isPredicated(I)) {
305 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
306 clearFlag(predSet, 0, MO_FLAG_PUSH);
307 }
308 I->eraseFromParent();
309 break;
310 }
311 I = MBB.end();
312
313 if (I == MBB.begin()) {
314 return 1;
315 }
316 --I;
317 switch (I->getOpcode()) {
318 // FIXME: only one case??
319 default:
320 return 1;
321 case AMDGPU::JUMP:
322 if (isPredicated(I)) {
323 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
324 clearFlag(predSet, 0, MO_FLAG_PUSH);
325 }
326 I->eraseFromParent();
327 break;
328 }
329 return 2;
330}
331
332bool
333R600InstrInfo::isPredicated(const MachineInstr *MI) const {
334 int idx = MI->findFirstPredOperandIdx();
335 if (idx < 0)
336 return false;
337
338 unsigned Reg = MI->getOperand(idx).getReg();
339 switch (Reg) {
340 default: return false;
341 case AMDGPU::PRED_SEL_ONE:
342 case AMDGPU::PRED_SEL_ZERO:
343 case AMDGPU::PREDICATE_BIT:
344 return true;
345 }
346}
347
348bool
349R600InstrInfo::isPredicable(MachineInstr *MI) const {
350 // XXX: KILL* instructions can be predicated, but they must be the last
351 // instruction in a clause, so this means any instructions after them cannot
352 // be predicated. Until we have proper support for instruction clauses in the
353 // backend, we will mark KILL* instructions as unpredicable.
354
355 if (MI->getOpcode() == AMDGPU::KILLGT) {
356 return false;
357 } else {
358 return AMDGPUInstrInfo::isPredicable(MI);
359 }
360}
361
362
363bool
364R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
365 unsigned NumCyles,
366 unsigned ExtraPredCycles,
367 const BranchProbability &Probability) const{
368 return true;
369}
370
371bool
372R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
373 unsigned NumTCycles,
374 unsigned ExtraTCycles,
375 MachineBasicBlock &FMBB,
376 unsigned NumFCycles,
377 unsigned ExtraFCycles,
378 const BranchProbability &Probability) const {
379 return true;
380}
381
382bool
383R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
384 unsigned NumCyles,
385 const BranchProbability &Probability)
386 const {
387 return true;
388}
389
390bool
391R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
392 MachineBasicBlock &FMBB) const {
393 return false;
394}
395
396
397bool
398R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
399 MachineOperand &MO = Cond[1];
400 switch (MO.getImm()) {
401 case OPCODE_IS_ZERO_INT:
402 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
403 break;
404 case OPCODE_IS_NOT_ZERO_INT:
405 MO.setImm(OPCODE_IS_ZERO_INT);
406 break;
407 case OPCODE_IS_ZERO:
408 MO.setImm(OPCODE_IS_NOT_ZERO);
409 break;
410 case OPCODE_IS_NOT_ZERO:
411 MO.setImm(OPCODE_IS_ZERO);
412 break;
413 default:
414 return true;
415 }
416
417 MachineOperand &MO2 = Cond[2];
418 switch (MO2.getReg()) {
419 case AMDGPU::PRED_SEL_ZERO:
420 MO2.setReg(AMDGPU::PRED_SEL_ONE);
421 break;
422 case AMDGPU::PRED_SEL_ONE:
423 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
424 break;
425 default:
426 return true;
427 }
428 return false;
429}
430
431bool
432R600InstrInfo::DefinesPredicate(MachineInstr *MI,
433 std::vector<MachineOperand> &Pred) const {
434 return isPredicateSetter(MI->getOpcode());
435}
436
437
438bool
439R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
440 const SmallVectorImpl<MachineOperand> &Pred2) const {
441 return false;
442}
443
444
445bool
446R600InstrInfo::PredicateInstruction(MachineInstr *MI,
447 const SmallVectorImpl<MachineOperand> &Pred) const {
448 int PIdx = MI->findFirstPredOperandIdx();
449
450 if (PIdx != -1) {
451 MachineOperand &PMO = MI->getOperand(PIdx);
452 PMO.setReg(Pred[2].getReg());
NAKAMURA Takumi6b207d32012-12-20 00:22:11 +0000453 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
454 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000455 return true;
456 }
457
458 return false;
459}
460
461unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
462 const MachineInstr *MI,
463 unsigned *PredCost) const {
464 if (PredCost)
465 *PredCost = 2;
466 return 2;
467}
468
469MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
470 MachineBasicBlock::iterator I,
471 unsigned Opcode,
472 unsigned DstReg,
473 unsigned Src0Reg,
474 unsigned Src1Reg) const {
475 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
476 DstReg); // $dst
477
478 if (Src1Reg) {
479 MIB.addImm(0) // $update_exec_mask
480 .addImm(0); // $update_predicate
481 }
482 MIB.addImm(1) // $write
483 .addImm(0) // $omod
484 .addImm(0) // $dst_rel
485 .addImm(0) // $dst_clamp
486 .addReg(Src0Reg) // $src0
487 .addImm(0) // $src0_neg
488 .addImm(0) // $src0_rel
489 .addImm(0); // $src0_abs
490
491 if (Src1Reg) {
492 MIB.addReg(Src1Reg) // $src1
493 .addImm(0) // $src1_neg
494 .addImm(0) // $src1_rel
495 .addImm(0); // $src1_abs
496 }
497
498 //XXX: The r600g finalizer expects this to be 1, once we've moved the
499 //scheduling to the backend, we can change the default to 0.
500 MIB.addImm(1) // $last
501 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
502 .addImm(0); // $literal
503
504 return MIB;
505}
506
507MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
508 MachineBasicBlock::iterator I,
509 unsigned DstReg,
510 uint64_t Imm) const {
511 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
512 AMDGPU::ALU_LITERAL_X);
513 setImmOperand(MovImm, R600Operands::IMM, Imm);
514 return MovImm;
515}
516
517int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
518 R600Operands::Ops Op) const {
519 return getOperandIdx(MI.getOpcode(), Op);
520}
521
522int R600InstrInfo::getOperandIdx(unsigned Opcode,
523 R600Operands::Ops Op) const {
524 const static int OpTable[3][R600Operands::COUNT] = {
525// W C S S S S S S S S
526// R O D L S R R R S R R R S R R L P
527// D U I M R A R C C C C C C C R C C A R I
528// S E U T O E M C 0 0 0 C 1 1 1 C 2 2 S E M
529// T M P E D L P 0 N R A 1 N R A 2 N R T D M
530 {0,-1,-1, 1, 2, 3, 4, 5, 6, 7, 8,-1,-1,-1,-1,-1,-1,-1, 9,10,11},
531 {0, 1, 2, 3, 4 ,5 ,6 ,7, 8, 9,10,11,12,-1,-1,-1,13,14,15,16,17},
532 {0,-1,-1,-1,-1, 1, 2, 3, 4, 5,-1, 6, 7, 8,-1, 9,10,11,12,13,14}
533 };
534 unsigned TargetFlags = get(Opcode).TSFlags;
535 unsigned OpTableIdx;
536
537 if (!HAS_NATIVE_OPERANDS(TargetFlags)) {
538 switch (Op) {
539 case R600Operands::DST: return 0;
540 case R600Operands::SRC0: return 1;
541 case R600Operands::SRC1: return 2;
542 case R600Operands::SRC2: return 3;
543 default:
544 assert(!"Unknown operand type for instruction");
545 return -1;
546 }
547 }
548
549 if (TargetFlags & R600_InstFlag::OP1) {
550 OpTableIdx = 0;
551 } else if (TargetFlags & R600_InstFlag::OP2) {
552 OpTableIdx = 1;
553 } else {
554 assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined "
555 "for this instruction");
556 OpTableIdx = 2;
557 }
558
559 return OpTable[OpTableIdx][Op];
560}
561
562void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
563 int64_t Imm) const {
564 int Idx = getOperandIdx(*MI, Op);
565 assert(Idx != -1 && "Operand not supported for this instruction.");
566 assert(MI->getOperand(Idx).isImm());
567 MI->getOperand(Idx).setImm(Imm);
568}
569
570//===----------------------------------------------------------------------===//
571// Instruction flag getters/setters
572//===----------------------------------------------------------------------===//
573
574bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
575 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
576}
577
578MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
579 unsigned Flag) const {
580 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
581 int FlagIndex = 0;
582 if (Flag != 0) {
583 // If we pass something other than the default value of Flag to this
584 // function, it means we are want to set a flag on an instruction
585 // that uses native encoding.
586 assert(HAS_NATIVE_OPERANDS(TargetFlags));
587 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
588 switch (Flag) {
589 case MO_FLAG_CLAMP:
590 FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP);
591 break;
592 case MO_FLAG_MASK:
593 FlagIndex = getOperandIdx(*MI, R600Operands::WRITE);
594 break;
595 case MO_FLAG_NOT_LAST:
596 case MO_FLAG_LAST:
597 FlagIndex = getOperandIdx(*MI, R600Operands::LAST);
598 break;
599 case MO_FLAG_NEG:
600 switch (SrcIdx) {
601 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break;
602 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break;
603 case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break;
604 }
605 break;
606
607 case MO_FLAG_ABS:
608 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
609 "instructions.");
Tom Stellard08f2d932012-12-13 19:38:52 +0000610 (void)IsOP3;
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000611 switch (SrcIdx) {
612 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break;
613 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break;
614 }
615 break;
616
617 default:
618 FlagIndex = -1;
619 break;
620 }
621 assert(FlagIndex != -1 && "Flag not supported for this instruction");
622 } else {
623 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
624 assert(FlagIndex != 0 &&
625 "Instruction flags not supported for this instruction");
626 }
627
628 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
629 assert(FlagOp.isImm());
630 return FlagOp;
631}
632
633void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
634 unsigned Flag) const {
635 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
636 if (Flag == 0) {
637 return;
638 }
639 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
640 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
641 if (Flag == MO_FLAG_NOT_LAST) {
642 clearFlag(MI, Operand, MO_FLAG_LAST);
643 } else if (Flag == MO_FLAG_MASK) {
644 clearFlag(MI, Operand, Flag);
645 } else {
646 FlagOp.setImm(1);
647 }
648 } else {
649 MachineOperand &FlagOp = getFlagOp(MI, Operand);
650 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
651 }
652}
653
654void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
655 unsigned Flag) const {
656 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
657 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
658 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
659 FlagOp.setImm(0);
660 } else {
661 MachineOperand &FlagOp = getFlagOp(MI);
662 unsigned InstFlags = FlagOp.getImm();
663 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
664 FlagOp.setImm(InstFlags);
665 }
666}