blob: c6ad4d531dc82fa40b5cbae3f8a6b8e4075c3829 [file] [log] [blame]
Tom Stellardf98f2ce2012-12-11 21:25:42 +00001//===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief SI Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "SIInstrInfo.h"
17#include "AMDGPUTargetMachine.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
20#include "llvm/MC/MCInstrDesc.h"
Tom Stellardf98f2ce2012-12-11 21:25:42 +000021#include <stdio.h>
22
23using namespace llvm;
24
25SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
26 : AMDGPUInstrInfo(tm),
27 RI(tm, *this)
28 { }
29
30const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
31 return RI;
32}
33
34void
35SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
36 MachineBasicBlock::iterator MI, DebugLoc DL,
37 unsigned DestReg, unsigned SrcReg,
38 bool KillSrc) const {
39 // If we are trying to copy to or from SCC, there is a bug somewhere else in
40 // the backend. While it may be theoretically possible to do this, it should
41 // never be necessary.
42 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
43
44 if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
45 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
46 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
47 .addReg(SrcReg, getKillRegState(KillSrc));
48 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
49 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
50 AMDGPU::SReg_32RegClass.contains(SrcReg));
51 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
52 .addReg(SrcReg, getKillRegState(KillSrc));
53 } else {
54 assert(AMDGPU::SReg_32RegClass.contains(DestReg));
55 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
56 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
57 .addReg(SrcReg, getKillRegState(KillSrc));
58 }
59}
60
61MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
62 int64_t Imm) const {
63 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_IMM_I32), DebugLoc());
NAKAMURA Takumi6b207d32012-12-20 00:22:11 +000064 MachineInstrBuilder MIB(*MF, MI);
65 MIB.addReg(DstReg, RegState::Define);
66 MIB.addImm(Imm);
Tom Stellardf98f2ce2012-12-11 21:25:42 +000067
68 return MI;
69
70}
71
72bool SIInstrInfo::isMov(unsigned Opcode) const {
73 switch(Opcode) {
74 default: return false;
75 case AMDGPU::S_MOV_B32:
76 case AMDGPU::S_MOV_B64:
77 case AMDGPU::V_MOV_B32_e32:
78 case AMDGPU::V_MOV_B32_e64:
79 case AMDGPU::V_MOV_IMM_F32:
80 case AMDGPU::V_MOV_IMM_I32:
81 case AMDGPU::S_MOV_IMM_I32:
82 return true;
83 }
84}
85
86bool
87SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
88 return RC != &AMDGPU::EXECRegRegClass;
89}