blob: a60a1802d48ed1f34bc7c2ca10ff4044a2798990 [file] [log] [blame]
Tom Stellardf98f2ce2012-12-11 21:25:42 +00001//===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief R600 Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15#include "R600InstrInfo.h"
16#include "AMDGPUTargetMachine.h"
17#include "AMDGPUSubtarget.h"
18#include "R600Defines.h"
19#include "R600RegisterInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21
22#define GET_INSTRINFO_CTOR
23#include "AMDGPUGenDFAPacketizer.inc"
24
25using namespace llvm;
26
27R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
28 : AMDGPUInstrInfo(tm),
29 RI(tm, *this)
30 { }
31
32const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
33 return RI;
34}
35
36bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
37 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
38}
39
40bool R600InstrInfo::isVector(const MachineInstr &MI) const {
41 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
42}
43
44void
45R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
46 MachineBasicBlock::iterator MI, DebugLoc DL,
47 unsigned DestReg, unsigned SrcReg,
48 bool KillSrc) const {
49 if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
50 && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
51 for (unsigned I = 0; I < 4; I++) {
52 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
53 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
54 RI.getSubReg(DestReg, SubRegIndex),
55 RI.getSubReg(SrcReg, SubRegIndex))
56 .addReg(DestReg,
57 RegState::Define | RegState::Implicit);
58 }
59 } else {
60
61 // We can't copy vec4 registers
62 assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
63 && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
64
65 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
66 DestReg, SrcReg);
67 NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0))
68 .setIsKill(KillSrc);
69 }
70}
71
72MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
73 unsigned DstReg, int64_t Imm) const {
74 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
75 MachineInstrBuilder(MI).addReg(DstReg, RegState::Define);
76 MachineInstrBuilder(MI).addReg(AMDGPU::ALU_LITERAL_X);
77 MachineInstrBuilder(MI).addImm(Imm);
78 MachineInstrBuilder(MI).addReg(0); // PREDICATE_BIT
79
80 return MI;
81}
82
83unsigned R600InstrInfo::getIEQOpcode() const {
84 return AMDGPU::SETE_INT;
85}
86
87bool R600InstrInfo::isMov(unsigned Opcode) const {
88
89
90 switch(Opcode) {
91 default: return false;
92 case AMDGPU::MOV:
93 case AMDGPU::MOV_IMM_F32:
94 case AMDGPU::MOV_IMM_I32:
95 return true;
96 }
97}
98
99// Some instructions act as place holders to emulate operations that the GPU
100// hardware does automatically. This function can be used to check if
101// an opcode falls into this category.
102bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
103 switch (Opcode) {
104 default: return false;
105 case AMDGPU::RETURN:
106 case AMDGPU::RESERVE_REG:
107 return true;
108 }
109}
110
111bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
112 switch(Opcode) {
113 default: return false;
114 case AMDGPU::DOT4_r600_pseudo:
115 case AMDGPU::DOT4_eg_pseudo:
116 return true;
117 }
118}
119
120bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
121 switch(Opcode) {
122 default: return false;
123 case AMDGPU::CUBE_r600_pseudo:
124 case AMDGPU::CUBE_r600_real:
125 case AMDGPU::CUBE_eg_pseudo:
126 case AMDGPU::CUBE_eg_real:
127 return true;
128 }
129}
130
131bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
132 unsigned TargetFlags = get(Opcode).TSFlags;
133
134 return ((TargetFlags & R600_InstFlag::OP1) |
135 (TargetFlags & R600_InstFlag::OP2) |
136 (TargetFlags & R600_InstFlag::OP3));
137}
138
139DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
140 const ScheduleDAG *DAG) const {
141 const InstrItineraryData *II = TM->getInstrItineraryData();
142 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
143}
144
145static bool
146isPredicateSetter(unsigned Opcode) {
147 switch (Opcode) {
148 case AMDGPU::PRED_X:
149 return true;
150 default:
151 return false;
152 }
153}
154
155static MachineInstr *
156findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
157 MachineBasicBlock::iterator I) {
158 while (I != MBB.begin()) {
159 --I;
160 MachineInstr *MI = I;
161 if (isPredicateSetter(MI->getOpcode()))
162 return MI;
163 }
164
165 return NULL;
166}
167
168bool
169R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
170 MachineBasicBlock *&TBB,
171 MachineBasicBlock *&FBB,
172 SmallVectorImpl<MachineOperand> &Cond,
173 bool AllowModify) const {
174 // Most of the following comes from the ARM implementation of AnalyzeBranch
175
176 // If the block has no terminators, it just falls into the block after it.
177 MachineBasicBlock::iterator I = MBB.end();
178 if (I == MBB.begin())
179 return false;
180 --I;
181 while (I->isDebugValue()) {
182 if (I == MBB.begin())
183 return false;
184 --I;
185 }
186 if (static_cast<MachineInstr *>(I)->getOpcode() != AMDGPU::JUMP) {
187 return false;
188 }
189
190 // Get the last instruction in the block.
191 MachineInstr *LastInst = I;
192
193 // If there is only one terminator instruction, process it.
194 unsigned LastOpc = LastInst->getOpcode();
195 if (I == MBB.begin() ||
196 static_cast<MachineInstr *>(--I)->getOpcode() != AMDGPU::JUMP) {
197 if (LastOpc == AMDGPU::JUMP) {
198 if(!isPredicated(LastInst)) {
199 TBB = LastInst->getOperand(0).getMBB();
200 return false;
201 } else {
202 MachineInstr *predSet = I;
203 while (!isPredicateSetter(predSet->getOpcode())) {
204 predSet = --I;
205 }
206 TBB = LastInst->getOperand(0).getMBB();
207 Cond.push_back(predSet->getOperand(1));
208 Cond.push_back(predSet->getOperand(2));
209 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
210 return false;
211 }
212 }
213 return true; // Can't handle indirect branch.
214 }
215
216 // Get the instruction before it if it is a terminator.
217 MachineInstr *SecondLastInst = I;
218 unsigned SecondLastOpc = SecondLastInst->getOpcode();
219
220 // If the block ends with a B and a Bcc, handle it.
221 if (SecondLastOpc == AMDGPU::JUMP &&
222 isPredicated(SecondLastInst) &&
223 LastOpc == AMDGPU::JUMP &&
224 !isPredicated(LastInst)) {
225 MachineInstr *predSet = --I;
226 while (!isPredicateSetter(predSet->getOpcode())) {
227 predSet = --I;
228 }
229 TBB = SecondLastInst->getOperand(0).getMBB();
230 FBB = LastInst->getOperand(0).getMBB();
231 Cond.push_back(predSet->getOperand(1));
232 Cond.push_back(predSet->getOperand(2));
233 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
234 return false;
235 }
236
237 // Otherwise, can't handle this.
238 return true;
239}
240
241int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
242 const MachineInstr *MI = op.getParent();
243
244 switch (MI->getDesc().OpInfo->RegClass) {
245 default: // FIXME: fallthrough??
246 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
247 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
248 };
249}
250
251unsigned
252R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
253 MachineBasicBlock *TBB,
254 MachineBasicBlock *FBB,
255 const SmallVectorImpl<MachineOperand> &Cond,
256 DebugLoc DL) const {
257 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
258
259 if (FBB == 0) {
260 if (Cond.empty()) {
261 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB).addReg(0);
262 return 1;
263 } else {
264 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
265 assert(PredSet && "No previous predicate !");
266 addFlag(PredSet, 0, MO_FLAG_PUSH);
267 PredSet->getOperand(2).setImm(Cond[1].getImm());
268
269 BuildMI(&MBB, DL, get(AMDGPU::JUMP))
270 .addMBB(TBB)
271 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
272 return 1;
273 }
274 } else {
275 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
276 assert(PredSet && "No previous predicate !");
277 addFlag(PredSet, 0, MO_FLAG_PUSH);
278 PredSet->getOperand(2).setImm(Cond[1].getImm());
279 BuildMI(&MBB, DL, get(AMDGPU::JUMP))
280 .addMBB(TBB)
281 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
282 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB).addReg(0);
283 return 2;
284 }
285}
286
287unsigned
288R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
289
290 // Note : we leave PRED* instructions there.
291 // They may be needed when predicating instructions.
292
293 MachineBasicBlock::iterator I = MBB.end();
294
295 if (I == MBB.begin()) {
296 return 0;
297 }
298 --I;
299 switch (I->getOpcode()) {
300 default:
301 return 0;
302 case AMDGPU::JUMP:
303 if (isPredicated(I)) {
304 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
305 clearFlag(predSet, 0, MO_FLAG_PUSH);
306 }
307 I->eraseFromParent();
308 break;
309 }
310 I = MBB.end();
311
312 if (I == MBB.begin()) {
313 return 1;
314 }
315 --I;
316 switch (I->getOpcode()) {
317 // FIXME: only one case??
318 default:
319 return 1;
320 case AMDGPU::JUMP:
321 if (isPredicated(I)) {
322 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
323 clearFlag(predSet, 0, MO_FLAG_PUSH);
324 }
325 I->eraseFromParent();
326 break;
327 }
328 return 2;
329}
330
331bool
332R600InstrInfo::isPredicated(const MachineInstr *MI) const {
333 int idx = MI->findFirstPredOperandIdx();
334 if (idx < 0)
335 return false;
336
337 unsigned Reg = MI->getOperand(idx).getReg();
338 switch (Reg) {
339 default: return false;
340 case AMDGPU::PRED_SEL_ONE:
341 case AMDGPU::PRED_SEL_ZERO:
342 case AMDGPU::PREDICATE_BIT:
343 return true;
344 }
345}
346
347bool
348R600InstrInfo::isPredicable(MachineInstr *MI) const {
349 // XXX: KILL* instructions can be predicated, but they must be the last
350 // instruction in a clause, so this means any instructions after them cannot
351 // be predicated. Until we have proper support for instruction clauses in the
352 // backend, we will mark KILL* instructions as unpredicable.
353
354 if (MI->getOpcode() == AMDGPU::KILLGT) {
355 return false;
356 } else {
357 return AMDGPUInstrInfo::isPredicable(MI);
358 }
359}
360
361
362bool
363R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
364 unsigned NumCyles,
365 unsigned ExtraPredCycles,
366 const BranchProbability &Probability) const{
367 return true;
368}
369
370bool
371R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
372 unsigned NumTCycles,
373 unsigned ExtraTCycles,
374 MachineBasicBlock &FMBB,
375 unsigned NumFCycles,
376 unsigned ExtraFCycles,
377 const BranchProbability &Probability) const {
378 return true;
379}
380
381bool
382R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
383 unsigned NumCyles,
384 const BranchProbability &Probability)
385 const {
386 return true;
387}
388
389bool
390R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
391 MachineBasicBlock &FMBB) const {
392 return false;
393}
394
395
396bool
397R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
398 MachineOperand &MO = Cond[1];
399 switch (MO.getImm()) {
400 case OPCODE_IS_ZERO_INT:
401 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
402 break;
403 case OPCODE_IS_NOT_ZERO_INT:
404 MO.setImm(OPCODE_IS_ZERO_INT);
405 break;
406 case OPCODE_IS_ZERO:
407 MO.setImm(OPCODE_IS_NOT_ZERO);
408 break;
409 case OPCODE_IS_NOT_ZERO:
410 MO.setImm(OPCODE_IS_ZERO);
411 break;
412 default:
413 return true;
414 }
415
416 MachineOperand &MO2 = Cond[2];
417 switch (MO2.getReg()) {
418 case AMDGPU::PRED_SEL_ZERO:
419 MO2.setReg(AMDGPU::PRED_SEL_ONE);
420 break;
421 case AMDGPU::PRED_SEL_ONE:
422 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
423 break;
424 default:
425 return true;
426 }
427 return false;
428}
429
430bool
431R600InstrInfo::DefinesPredicate(MachineInstr *MI,
432 std::vector<MachineOperand> &Pred) const {
433 return isPredicateSetter(MI->getOpcode());
434}
435
436
437bool
438R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
439 const SmallVectorImpl<MachineOperand> &Pred2) const {
440 return false;
441}
442
443
444bool
445R600InstrInfo::PredicateInstruction(MachineInstr *MI,
446 const SmallVectorImpl<MachineOperand> &Pred) const {
447 int PIdx = MI->findFirstPredOperandIdx();
448
449 if (PIdx != -1) {
450 MachineOperand &PMO = MI->getOperand(PIdx);
451 PMO.setReg(Pred[2].getReg());
452 MachineInstrBuilder(MI).addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
453 return true;
454 }
455
456 return false;
457}
458
459unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
460 const MachineInstr *MI,
461 unsigned *PredCost) const {
462 if (PredCost)
463 *PredCost = 2;
464 return 2;
465}
466
467MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
468 MachineBasicBlock::iterator I,
469 unsigned Opcode,
470 unsigned DstReg,
471 unsigned Src0Reg,
472 unsigned Src1Reg) const {
473 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
474 DstReg); // $dst
475
476 if (Src1Reg) {
477 MIB.addImm(0) // $update_exec_mask
478 .addImm(0); // $update_predicate
479 }
480 MIB.addImm(1) // $write
481 .addImm(0) // $omod
482 .addImm(0) // $dst_rel
483 .addImm(0) // $dst_clamp
484 .addReg(Src0Reg) // $src0
485 .addImm(0) // $src0_neg
486 .addImm(0) // $src0_rel
487 .addImm(0); // $src0_abs
488
489 if (Src1Reg) {
490 MIB.addReg(Src1Reg) // $src1
491 .addImm(0) // $src1_neg
492 .addImm(0) // $src1_rel
493 .addImm(0); // $src1_abs
494 }
495
496 //XXX: The r600g finalizer expects this to be 1, once we've moved the
497 //scheduling to the backend, we can change the default to 0.
498 MIB.addImm(1) // $last
499 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
500 .addImm(0); // $literal
501
502 return MIB;
503}
504
505MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
506 MachineBasicBlock::iterator I,
507 unsigned DstReg,
508 uint64_t Imm) const {
509 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
510 AMDGPU::ALU_LITERAL_X);
511 setImmOperand(MovImm, R600Operands::IMM, Imm);
512 return MovImm;
513}
514
515int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
516 R600Operands::Ops Op) const {
517 return getOperandIdx(MI.getOpcode(), Op);
518}
519
520int R600InstrInfo::getOperandIdx(unsigned Opcode,
521 R600Operands::Ops Op) const {
522 const static int OpTable[3][R600Operands::COUNT] = {
523// W C S S S S S S S S
524// R O D L S R R R S R R R S R R L P
525// D U I M R A R C C C C C C C R C C A R I
526// S E U T O E M C 0 0 0 C 1 1 1 C 2 2 S E M
527// T M P E D L P 0 N R A 1 N R A 2 N R T D M
528 {0,-1,-1, 1, 2, 3, 4, 5, 6, 7, 8,-1,-1,-1,-1,-1,-1,-1, 9,10,11},
529 {0, 1, 2, 3, 4 ,5 ,6 ,7, 8, 9,10,11,12,-1,-1,-1,13,14,15,16,17},
530 {0,-1,-1,-1,-1, 1, 2, 3, 4, 5,-1, 6, 7, 8,-1, 9,10,11,12,13,14}
531 };
532 unsigned TargetFlags = get(Opcode).TSFlags;
533 unsigned OpTableIdx;
534
535 if (!HAS_NATIVE_OPERANDS(TargetFlags)) {
536 switch (Op) {
537 case R600Operands::DST: return 0;
538 case R600Operands::SRC0: return 1;
539 case R600Operands::SRC1: return 2;
540 case R600Operands::SRC2: return 3;
541 default:
542 assert(!"Unknown operand type for instruction");
543 return -1;
544 }
545 }
546
547 if (TargetFlags & R600_InstFlag::OP1) {
548 OpTableIdx = 0;
549 } else if (TargetFlags & R600_InstFlag::OP2) {
550 OpTableIdx = 1;
551 } else {
552 assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined "
553 "for this instruction");
554 OpTableIdx = 2;
555 }
556
557 return OpTable[OpTableIdx][Op];
558}
559
560void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
561 int64_t Imm) const {
562 int Idx = getOperandIdx(*MI, Op);
563 assert(Idx != -1 && "Operand not supported for this instruction.");
564 assert(MI->getOperand(Idx).isImm());
565 MI->getOperand(Idx).setImm(Imm);
566}
567
568//===----------------------------------------------------------------------===//
569// Instruction flag getters/setters
570//===----------------------------------------------------------------------===//
571
572bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
573 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
574}
575
576MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
577 unsigned Flag) const {
578 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
579 int FlagIndex = 0;
580 if (Flag != 0) {
581 // If we pass something other than the default value of Flag to this
582 // function, it means we are want to set a flag on an instruction
583 // that uses native encoding.
584 assert(HAS_NATIVE_OPERANDS(TargetFlags));
585 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
586 switch (Flag) {
587 case MO_FLAG_CLAMP:
588 FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP);
589 break;
590 case MO_FLAG_MASK:
591 FlagIndex = getOperandIdx(*MI, R600Operands::WRITE);
592 break;
593 case MO_FLAG_NOT_LAST:
594 case MO_FLAG_LAST:
595 FlagIndex = getOperandIdx(*MI, R600Operands::LAST);
596 break;
597 case MO_FLAG_NEG:
598 switch (SrcIdx) {
599 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break;
600 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break;
601 case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break;
602 }
603 break;
604
605 case MO_FLAG_ABS:
606 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
607 "instructions.");
608 switch (SrcIdx) {
609 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break;
610 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break;
611 }
612 break;
613
614 default:
615 FlagIndex = -1;
616 break;
617 }
618 assert(FlagIndex != -1 && "Flag not supported for this instruction");
619 } else {
620 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
621 assert(FlagIndex != 0 &&
622 "Instruction flags not supported for this instruction");
623 }
624
625 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
626 assert(FlagOp.isImm());
627 return FlagOp;
628}
629
630void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
631 unsigned Flag) const {
632 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
633 if (Flag == 0) {
634 return;
635 }
636 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
637 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
638 if (Flag == MO_FLAG_NOT_LAST) {
639 clearFlag(MI, Operand, MO_FLAG_LAST);
640 } else if (Flag == MO_FLAG_MASK) {
641 clearFlag(MI, Operand, Flag);
642 } else {
643 FlagOp.setImm(1);
644 }
645 } else {
646 MachineOperand &FlagOp = getFlagOp(MI, Operand);
647 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
648 }
649}
650
651void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
652 unsigned Flag) const {
653 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
654 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
655 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
656 FlagOp.setImm(0);
657 } else {
658 MachineOperand &FlagOp = getFlagOp(MI);
659 unsigned InstFlags = FlagOp.getImm();
660 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
661 FlagOp.setImm(InstFlags);
662 }
663}