blob: 9e62980940be5f67f9629660fa478aa8c4ee6424 [file] [log] [blame]
Tom Stellard1bd80722014-04-30 15:31:33 +00001//===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// i1 values are usually inserted by the CFG Structurize pass and they are
9/// unique in that they can be copied from VALU to SALU registers.
10/// This is not possible for any other value type. Since there are no
11/// MOV instructions for i1, we to use V_CMP_* and V_CNDMASK to move the i1.
12///
13//===----------------------------------------------------------------------===//
14//
15
16#define DEBUG_TYPE "si-i1-copies"
17#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000018#include "AMDGPUSubtarget.h"
Tom Stellard1bd80722014-04-30 15:31:33 +000019#include "SIInstrInfo.h"
20#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Tom Stellard1bd80722014-04-30 15:31:33 +000021#include "llvm/CodeGen/MachineFunctionPass.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/IR/LLVMContext.h"
25#include "llvm/IR/Function.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Target/TargetMachine.h"
28
29using namespace llvm;
30
31namespace {
32
33class SILowerI1Copies : public MachineFunctionPass {
34public:
35 static char ID;
36
37public:
38 SILowerI1Copies() : MachineFunctionPass(ID) {
39 initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
40 }
41
Craig Topperfd38cbe2014-08-30 16:48:34 +000042 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard1bd80722014-04-30 15:31:33 +000043
Mehdi Amini117296c2016-10-01 02:56:57 +000044 StringRef getPassName() const override { return "SI Lower i1 Copies"; }
Tom Stellard1bd80722014-04-30 15:31:33 +000045
Craig Topperfd38cbe2014-08-30 16:48:34 +000046 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard1bd80722014-04-30 15:31:33 +000047 AU.setPreservesCFG();
48 MachineFunctionPass::getAnalysisUsage(AU);
49 }
50};
51
52} // End anonymous namespace.
53
Matt Arsenault427c5482016-02-11 06:15:34 +000054INITIALIZE_PASS(SILowerI1Copies, DEBUG_TYPE,
55 "SI Lower i1 Copies", false, false)
Tom Stellard1bd80722014-04-30 15:31:33 +000056
57char SILowerI1Copies::ID = 0;
58
59char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
60
61FunctionPass *llvm::createSILowerI1CopiesPass() {
62 return new SILowerI1Copies();
63}
64
65bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) {
66 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +000067 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
68 const SIInstrInfo *TII = ST.getInstrInfo();
69 const TargetRegisterInfo *TRI = &TII->getRegisterInfo();
70
Tom Stellard365a2b42014-05-15 14:41:50 +000071 std::vector<unsigned> I1Defs;
Tom Stellard1bd80722014-04-30 15:31:33 +000072
73 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
74 BI != BE; ++BI) {
75
76 MachineBasicBlock &MBB = *BI;
77 MachineBasicBlock::iterator I, Next;
78 for (I = MBB.begin(); I != MBB.end(); I = Next) {
79 Next = std::next(I);
80 MachineInstr &MI = *I;
81
Matt Arsenault72858932014-11-14 18:43:41 +000082 if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF) {
83 unsigned Reg = MI.getOperand(0).getReg();
84 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
85 if (RC == &AMDGPU::VReg_1RegClass)
86 MRI.setRegClass(Reg, &AMDGPU::SReg_64RegClass);
87 continue;
88 }
89
Matt Arsenaultbecd6562014-12-03 05:22:35 +000090 if (MI.getOpcode() != AMDGPU::COPY)
Tom Stellard1bd80722014-04-30 15:31:33 +000091 continue;
92
Matt Arsenaultbecd6562014-12-03 05:22:35 +000093 const MachineOperand &Dst = MI.getOperand(0);
94 const MachineOperand &Src = MI.getOperand(1);
Tom Stellard1bd80722014-04-30 15:31:33 +000095
Matt Arsenaultbecd6562014-12-03 05:22:35 +000096 if (!TargetRegisterInfo::isVirtualRegister(Src.getReg()) ||
97 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
98 continue;
99
100 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst.getReg());
101 const TargetRegisterClass *SrcRC = MRI.getRegClass(Src.getReg());
Tom Stellard1bd80722014-04-30 15:31:33 +0000102
103 if (DstRC == &AMDGPU::VReg_1RegClass &&
104 TRI->getCommonSubClass(SrcRC, &AMDGPU::SGPR_64RegClass)) {
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000105 I1Defs.push_back(Dst.getReg());
106 DebugLoc DL = MI.getDebugLoc();
107
108 MachineInstr *DefInst = MRI.getUniqueVRegDef(Src.getReg());
109 if (DefInst->getOpcode() == AMDGPU::S_MOV_B64) {
110 if (DefInst->getOperand(1).isImm()) {
111 I1Defs.push_back(Dst.getReg());
112
113 int64_t Val = DefInst->getOperand(1).getImm();
114 assert(Val == 0 || Val == -1);
115
116 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_MOV_B32_e32))
117 .addOperand(Dst)
118 .addImm(Val);
119 MI.eraseFromParent();
120 continue;
121 }
122 }
123
124 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64))
125 .addOperand(Dst)
126 .addImm(0)
127 .addImm(-1)
128 .addOperand(Src);
Tom Stellard1bd80722014-04-30 15:31:33 +0000129 MI.eraseFromParent();
130 } else if (TRI->getCommonSubClass(DstRC, &AMDGPU::SGPR_64RegClass) &&
131 SrcRC == &AMDGPU::VReg_1RegClass) {
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000132 BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(AMDGPU::V_CMP_NE_U32_e64))
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000133 .addOperand(Dst)
134 .addOperand(Src)
135 .addImm(0);
Tom Stellard1bd80722014-04-30 15:31:33 +0000136 MI.eraseFromParent();
137 }
Tom Stellard1bd80722014-04-30 15:31:33 +0000138 }
139 }
Tom Stellard365a2b42014-05-15 14:41:50 +0000140
141 for (unsigned Reg : I1Defs)
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000142 MRI.setRegClass(Reg, &AMDGPU::VGPR_32RegClass);
Tom Stellard365a2b42014-05-15 14:41:50 +0000143
Tom Stellard1bd80722014-04-30 15:31:33 +0000144 return false;
145}