blob: de2373b11a756670e15f8f5ad51a1dff2982ef3a [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief SI Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "SIInstrInfo.h"
17#include "AMDGPUTargetMachine.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
20#include "llvm/MC/MCInstrDesc.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000021#include <stdio.h>
22
23using namespace llvm;
24
25SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
26 : AMDGPUInstrInfo(tm),
27 RI(tm, *this)
28 { }
29
30const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
31 return RI;
32}
33
34void
35SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Christian Konigd0e3da12013-03-01 09:46:27 +000036 MachineBasicBlock::iterator MI, DebugLoc DL,
37 unsigned DestReg, unsigned SrcReg,
38 bool KillSrc) const {
39
Tom Stellard75aadc22012-12-11 21:25:42 +000040 // If we are trying to copy to or from SCC, there is a bug somewhere else in
41 // the backend. While it may be theoretically possible to do this, it should
42 // never be necessary.
43 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
44
Christian Konigd0e3da12013-03-01 09:46:27 +000045 const int16_t Sub0_15[] = {
46 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
47 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
48 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
49 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
50 };
51
52 const int16_t Sub0_7[] = {
53 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
54 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
55 };
56
57 const int16_t Sub0_3[] = {
58 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
59 };
60
61 const int16_t Sub0_1[] = {
62 AMDGPU::sub0, AMDGPU::sub1, 0
63 };
64
65 unsigned Opcode;
66 const int16_t *SubIndices;
67
68 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
69 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
70 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
71 .addReg(SrcReg, getKillRegState(KillSrc));
72 return;
73
Tom Stellardaac18892013-02-07 19:39:43 +000074 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
Tom Stellard75aadc22012-12-11 21:25:42 +000075 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
76 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
77 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +000078 return;
79
80 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
81 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
82 Opcode = AMDGPU::S_MOV_B32;
83 SubIndices = Sub0_3;
84
85 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
86 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
87 Opcode = AMDGPU::S_MOV_B32;
88 SubIndices = Sub0_7;
89
90 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
91 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
92 Opcode = AMDGPU::S_MOV_B32;
93 SubIndices = Sub0_15;
94
Tom Stellard75aadc22012-12-11 21:25:42 +000095 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
96 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
Christian Konigd0e3da12013-03-01 09:46:27 +000097 AMDGPU::SReg_32RegClass.contains(SrcReg));
Tom Stellard75aadc22012-12-11 21:25:42 +000098 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
99 .addReg(SrcReg, getKillRegState(KillSrc));
Christian Konigd0e3da12013-03-01 09:46:27 +0000100 return;
101
102 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
103 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
104 AMDGPU::SReg_64RegClass.contains(SrcReg));
105 Opcode = AMDGPU::V_MOV_B32_e32;
106 SubIndices = Sub0_1;
107
108 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
109 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
110 AMDGPU::SReg_128RegClass.contains(SrcReg));
111 Opcode = AMDGPU::V_MOV_B32_e32;
112 SubIndices = Sub0_3;
113
114 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
115 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
116 AMDGPU::SReg_256RegClass.contains(SrcReg));
117 Opcode = AMDGPU::V_MOV_B32_e32;
118 SubIndices = Sub0_7;
119
120 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
121 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
122 AMDGPU::SReg_512RegClass.contains(SrcReg));
123 Opcode = AMDGPU::V_MOV_B32_e32;
124 SubIndices = Sub0_15;
125
Tom Stellard75aadc22012-12-11 21:25:42 +0000126 } else {
Christian Konigd0e3da12013-03-01 09:46:27 +0000127 llvm_unreachable("Can't copy register!");
128 }
129
130 while (unsigned SubIdx = *SubIndices++) {
131 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
132 get(Opcode), RI.getSubReg(DestReg, SubIdx));
133
134 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
135
136 if (*SubIndices)
137 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
Tom Stellard75aadc22012-12-11 21:25:42 +0000138 }
139}
140
Christian Konig76edd4f2013-02-26 17:52:29 +0000141MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
142 bool NewMI) const {
143
144 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg() ||
145 !MI->getOperand(2).isReg())
146 return 0;
147
148 return TargetInstrInfo::commuteInstruction(MI, NewMI);
149}
150
Tom Stellard75aadc22012-12-11 21:25:42 +0000151MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
152 int64_t Imm) const {
Christian Konigc756cb992013-02-16 11:28:22 +0000153 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_B32_e32), DebugLoc());
NAKAMURA Takumi2a0b40f2012-12-20 00:22:11 +0000154 MachineInstrBuilder MIB(*MF, MI);
155 MIB.addReg(DstReg, RegState::Define);
156 MIB.addImm(Imm);
Tom Stellard75aadc22012-12-11 21:25:42 +0000157
158 return MI;
159
160}
161
162bool SIInstrInfo::isMov(unsigned Opcode) const {
163 switch(Opcode) {
164 default: return false;
165 case AMDGPU::S_MOV_B32:
166 case AMDGPU::S_MOV_B64:
167 case AMDGPU::V_MOV_B32_e32:
168 case AMDGPU::V_MOV_B32_e64:
Tom Stellard75aadc22012-12-11 21:25:42 +0000169 return true;
170 }
171}
172
173bool
174SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
175 return RC != &AMDGPU::EXECRegRegClass;
176}
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000177
178//===----------------------------------------------------------------------===//
179// Indirect addressing callbacks
180//===----------------------------------------------------------------------===//
181
182unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
183 unsigned Channel) const {
184 assert(Channel == 0);
185 return RegIndex;
186}
187
188
189int SIInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
190 llvm_unreachable("Unimplemented");
191}
192
193int SIInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
194 llvm_unreachable("Unimplemented");
195}
196
197const TargetRegisterClass *SIInstrInfo::getIndirectAddrStoreRegClass(
198 unsigned SourceReg) const {
199 llvm_unreachable("Unimplemented");
200}
201
202const TargetRegisterClass *SIInstrInfo::getIndirectAddrLoadRegClass() const {
203 llvm_unreachable("Unimplemented");
204}
205
206MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
207 MachineBasicBlock *MBB,
208 MachineBasicBlock::iterator I,
209 unsigned ValueReg,
210 unsigned Address, unsigned OffsetReg) const {
211 llvm_unreachable("Unimplemented");
212}
213
214MachineInstrBuilder SIInstrInfo::buildIndirectRead(
215 MachineBasicBlock *MBB,
216 MachineBasicBlock::iterator I,
217 unsigned ValueReg,
218 unsigned Address, unsigned OffsetReg) const {
219 llvm_unreachable("Unimplemented");
220}
221
222const TargetRegisterClass *SIInstrInfo::getSuperIndirectRegClass() const {
223 llvm_unreachable("Unimplemented");
224}