blob: 0bfcef562f045d67d2877b997da47bed4e06efaf [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief SI Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "SIInstrInfo.h"
17#include "AMDGPUTargetMachine.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
20#include "llvm/MC/MCInstrDesc.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000021#include <stdio.h>
22
23using namespace llvm;
24
25SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
26 : AMDGPUInstrInfo(tm),
27 RI(tm, *this)
28 { }
29
30const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
31 return RI;
32}
33
34void
35SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Christian Konigd0e3da12013-03-01 09:46:27 +000036 MachineBasicBlock::iterator MI, DebugLoc DL,
37 unsigned DestReg, unsigned SrcReg,
38 bool KillSrc) const {
39
Tom Stellard75aadc22012-12-11 21:25:42 +000040 // If we are trying to copy to or from SCC, there is a bug somewhere else in
41 // the backend. While it may be theoretically possible to do this, it should
42 // never be necessary.
43 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
44
Christian Konigd0e3da12013-03-01 09:46:27 +000045 const int16_t Sub0_15[] = {
46 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
47 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
48 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
49 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
50 };
51
52 const int16_t Sub0_7[] = {
53 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
54 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
55 };
56
57 const int16_t Sub0_3[] = {
58 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
59 };
60
61 const int16_t Sub0_1[] = {
62 AMDGPU::sub0, AMDGPU::sub1, 0
63 };
64
65 unsigned Opcode;
66 const int16_t *SubIndices;
67
Christian Konig082c6612013-03-26 14:04:12 +000068 if (AMDGPU::M0 == DestReg) {
69 // Check if M0 isn't already set to this value
70 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
71 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
72
73 if (!I->definesRegister(AMDGPU::M0))
74 continue;
75
76 unsigned Opc = I->getOpcode();
77 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
78 break;
79
80 if (!I->readsRegister(SrcReg))
81 break;
82
83 // The copy isn't necessary
84 return;
85 }
86 }
87
Christian Konigd0e3da12013-03-01 09:46:27 +000088 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
89 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
90 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
91 .addReg(SrcReg, getKillRegState(KillSrc));
92 return;
93
Tom Stellardaac18892013-02-07 19:39:43 +000094 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
Tom Stellard75aadc22012-12-11 21:25:42 +000095 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
96 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
97 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +000098 return;
99
100 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
101 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
102 Opcode = AMDGPU::S_MOV_B32;
103 SubIndices = Sub0_3;
104
105 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
106 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
107 Opcode = AMDGPU::S_MOV_B32;
108 SubIndices = Sub0_7;
109
110 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
111 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
112 Opcode = AMDGPU::S_MOV_B32;
113 SubIndices = Sub0_15;
114
Tom Stellard75aadc22012-12-11 21:25:42 +0000115 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
116 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
Christian Konigd0e3da12013-03-01 09:46:27 +0000117 AMDGPU::SReg_32RegClass.contains(SrcReg));
Tom Stellard75aadc22012-12-11 21:25:42 +0000118 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
119 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000120 return;
121
122 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
123 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
124 AMDGPU::SReg_64RegClass.contains(SrcReg));
125 Opcode = AMDGPU::V_MOV_B32_e32;
126 SubIndices = Sub0_1;
127
128 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
129 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
130 AMDGPU::SReg_128RegClass.contains(SrcReg));
131 Opcode = AMDGPU::V_MOV_B32_e32;
132 SubIndices = Sub0_3;
133
134 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
135 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
136 AMDGPU::SReg_256RegClass.contains(SrcReg));
137 Opcode = AMDGPU::V_MOV_B32_e32;
138 SubIndices = Sub0_7;
139
140 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
141 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
142 AMDGPU::SReg_512RegClass.contains(SrcReg));
143 Opcode = AMDGPU::V_MOV_B32_e32;
144 SubIndices = Sub0_15;
145
Tom Stellard75aadc22012-12-11 21:25:42 +0000146 } else {
Christian Konigd0e3da12013-03-01 09:46:27 +0000147 llvm_unreachable("Can't copy register!");
148 }
149
150 while (unsigned SubIdx = *SubIndices++) {
151 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
152 get(Opcode), RI.getSubReg(DestReg, SubIdx));
153
154 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
155
156 if (*SubIndices)
157 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
Tom Stellard75aadc22012-12-11 21:25:42 +0000158 }
159}
160
Christian Konig3c145802013-03-27 09:12:59 +0000161unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
162
163 int NewOpc;
164
165 // Try to map original to commuted opcode
166 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
167 return NewOpc;
168
169 // Try to map commuted to original opcode
170 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
171 return NewOpc;
172
173 return Opcode;
174}
175
Christian Konig76edd4f2013-02-26 17:52:29 +0000176MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
177 bool NewMI) const {
178
179 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg() ||
180 !MI->getOperand(2).isReg())
181 return 0;
182
Christian Konig3c145802013-03-27 09:12:59 +0000183 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
184
185 if (MI)
186 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
187
188 return MI;
Christian Konig76edd4f2013-02-26 17:52:29 +0000189}
190
Tom Stellard75aadc22012-12-11 21:25:42 +0000191MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
192 int64_t Imm) const {
Christian Konigc756cb992013-02-16 11:28:22 +0000193 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_B32_e32), DebugLoc());
NAKAMURA Takumi2a0b40f2012-12-20 00:22:11 +0000194 MachineInstrBuilder MIB(*MF, MI);
195 MIB.addReg(DstReg, RegState::Define);
196 MIB.addImm(Imm);
Tom Stellard75aadc22012-12-11 21:25:42 +0000197
198 return MI;
199
200}
201
202bool SIInstrInfo::isMov(unsigned Opcode) const {
203 switch(Opcode) {
204 default: return false;
205 case AMDGPU::S_MOV_B32:
206 case AMDGPU::S_MOV_B64:
207 case AMDGPU::V_MOV_B32_e32:
208 case AMDGPU::V_MOV_B32_e64:
Tom Stellard75aadc22012-12-11 21:25:42 +0000209 return true;
210 }
211}
212
213bool
214SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
215 return RC != &AMDGPU::EXECRegRegClass;
216}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000217
218//===----------------------------------------------------------------------===//
219// Indirect addressing callbacks
220//===----------------------------------------------------------------------===//
221
222unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
223 unsigned Channel) const {
224 assert(Channel == 0);
225 return RegIndex;
226}
227
228
229int SIInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
230 llvm_unreachable("Unimplemented");
231}
232
233int SIInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
234 llvm_unreachable("Unimplemented");
235}
236
237const TargetRegisterClass *SIInstrInfo::getIndirectAddrStoreRegClass(
238 unsigned SourceReg) const {
239 llvm_unreachable("Unimplemented");
240}
241
242const TargetRegisterClass *SIInstrInfo::getIndirectAddrLoadRegClass() const {
243 llvm_unreachable("Unimplemented");
244}
245
246MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
247 MachineBasicBlock *MBB,
248 MachineBasicBlock::iterator I,
249 unsigned ValueReg,
250 unsigned Address, unsigned OffsetReg) const {
251 llvm_unreachable("Unimplemented");
252}
253
254MachineInstrBuilder SIInstrInfo::buildIndirectRead(
255 MachineBasicBlock *MBB,
256 MachineBasicBlock::iterator I,
257 unsigned ValueReg,
258 unsigned Address, unsigned OffsetReg) const {
259 llvm_unreachable("Unimplemented");
260}
261
262const TargetRegisterClass *SIInstrInfo::getSuperIndirectRegClass() const {
263 llvm_unreachable("Unimplemented");
264}