blob: 738c90b30e54f8f08aabca4d22077655a9a93ea8 [file] [log] [blame]
Tom Stellard1bd80722014-04-30 15:31:33 +00001//===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// i1 values are usually inserted by the CFG Structurize pass and they are
9/// unique in that they can be copied from VALU to SALU registers.
10/// This is not possible for any other value type. Since there are no
11/// MOV instructions for i1, we to use V_CMP_* and V_CNDMASK to move the i1.
12///
13//===----------------------------------------------------------------------===//
14//
15
16#define DEBUG_TYPE "si-i1-copies"
17#include "AMDGPU.h"
18#include "SIInstrInfo.h"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "llvm/CodeGen/MachineDominators.h"
21#include "llvm/CodeGen/MachineFunctionPass.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/IR/LLVMContext.h"
25#include "llvm/IR/Function.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Target/TargetMachine.h"
28
29using namespace llvm;
30
31namespace {
32
33class SILowerI1Copies : public MachineFunctionPass {
34public:
35 static char ID;
36
37public:
38 SILowerI1Copies() : MachineFunctionPass(ID) {
39 initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
40 }
41
42 virtual bool runOnMachineFunction(MachineFunction &MF) override;
43
44 virtual const char *getPassName() const override {
45 return "SI Lower il Copies";
46 }
47
48 virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
49 AU.addRequired<MachineDominatorTree>();
50 AU.setPreservesCFG();
51 MachineFunctionPass::getAnalysisUsage(AU);
52 }
53};
54
55} // End anonymous namespace.
56
57INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE,
58 "SI Lower il Copies", false, false)
59INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
60INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE,
61 "SI Lower il Copies", false, false)
62
63char SILowerI1Copies::ID = 0;
64
65char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
66
67FunctionPass *llvm::createSILowerI1CopiesPass() {
68 return new SILowerI1Copies();
69}
70
71bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) {
72 MachineRegisterInfo &MRI = MF.getRegInfo();
73 const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
74 MF.getTarget().getInstrInfo());
75 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
Tom Stellard365a2b42014-05-15 14:41:50 +000076 std::vector<unsigned> I1Defs;
Tom Stellard1bd80722014-04-30 15:31:33 +000077
78 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
79 BI != BE; ++BI) {
80
81 MachineBasicBlock &MBB = *BI;
82 MachineBasicBlock::iterator I, Next;
83 for (I = MBB.begin(); I != MBB.end(); I = Next) {
84 Next = std::next(I);
85 MachineInstr &MI = *I;
86
87 if (MI.getOpcode() == AMDGPU::V_MOV_I1) {
Tom Stellard365a2b42014-05-15 14:41:50 +000088 I1Defs.push_back(MI.getOperand(0).getReg());
Tom Stellard1bd80722014-04-30 15:31:33 +000089 MI.setDesc(TII->get(AMDGPU::V_MOV_B32_e32));
90 continue;
91 }
92
Tom Stellard365a2b42014-05-15 14:41:50 +000093 if (MI.getOpcode() == AMDGPU::V_AND_I1) {
94 I1Defs.push_back(MI.getOperand(0).getReg());
95 MI.setDesc(TII->get(AMDGPU::V_AND_B32_e32));
96 continue;
97 }
98
99 if (MI.getOpcode() == AMDGPU::V_OR_I1) {
100 I1Defs.push_back(MI.getOperand(0).getReg());
101 MI.setDesc(TII->get(AMDGPU::V_OR_B32_e32));
102 continue;
103 }
104
Tom Stellard1bd80722014-04-30 15:31:33 +0000105 if (MI.getOpcode() != AMDGPU::COPY ||
106 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg()) ||
107 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(1).getReg()))
108 continue;
109
110
111 const TargetRegisterClass *DstRC =
112 MRI.getRegClass(MI.getOperand(0).getReg());
113 const TargetRegisterClass *SrcRC =
114 MRI.getRegClass(MI.getOperand(1).getReg());
115
116 if (DstRC == &AMDGPU::VReg_1RegClass &&
117 TRI->getCommonSubClass(SrcRC, &AMDGPU::SGPR_64RegClass)) {
Tom Stellard365a2b42014-05-15 14:41:50 +0000118 I1Defs.push_back(MI.getOperand(0).getReg());
Tom Stellard1bd80722014-04-30 15:31:33 +0000119 BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(AMDGPU::V_CNDMASK_B32_e64))
120 .addOperand(MI.getOperand(0))
121 .addImm(0)
122 .addImm(-1)
123 .addOperand(MI.getOperand(1))
124 .addImm(0)
125 .addImm(0)
126 .addImm(0)
127 .addImm(0);
128 MI.eraseFromParent();
129 } else if (TRI->getCommonSubClass(DstRC, &AMDGPU::SGPR_64RegClass) &&
130 SrcRC == &AMDGPU::VReg_1RegClass) {
131 BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(AMDGPU::V_CMP_NE_I32_e64))
132 .addOperand(MI.getOperand(0))
133 .addImm(0)
134 .addOperand(MI.getOperand(1))
135 .addImm(0)
136 .addImm(0)
137 .addImm(0)
138 .addImm(0);
139 MI.eraseFromParent();
140 }
Tom Stellard1bd80722014-04-30 15:31:33 +0000141 }
142 }
Tom Stellard365a2b42014-05-15 14:41:50 +0000143
144 for (unsigned Reg : I1Defs)
145 MRI.setRegClass(Reg, &AMDGPU::VReg_32RegClass);
146
Tom Stellard1bd80722014-04-30 15:31:33 +0000147 return false;
148}