blob: 42c7b967f3e79d20bb07d421de997517d000fde9 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Implementation of the TargetInstrInfo class that is common to all
12/// AMD GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUTargetMachine.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000019#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22
Chandler Carruthd174b722014-04-22 02:03:14 +000023using namespace llvm;
24
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000025#define GET_INSTRINFO_CTOR_DTOR
Christian Konigf741fbf2013-02-26 17:52:42 +000026#define GET_INSTRMAP_INFO
Tom Stellard75aadc22012-12-11 21:25:42 +000027#include "AMDGPUGenInstrInfo.inc"
28
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000029// Pin the vtable to this file.
30void AMDGPUInstrInfo::anchor() {}
31
Matt Arsenault43e92fe2016-06-24 06:30:11 +000032AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &ST)
33 : AMDGPUGenInstrInfo(-1, -1), ST(ST) {}
Tom Stellard75aadc22012-12-11 21:25:42 +000034
Matt Arsenault034d6662014-07-24 02:10:17 +000035bool AMDGPUInstrInfo::enableClusterLoads() const {
36 return true;
37}
38
Matt Arsenaultd5f4de22014-08-06 00:29:49 +000039// FIXME: This behaves strangely. If, for example, you have 32 load + stores,
40// the first 16 loads will be interleaved with the stores, and the next 16 will
41// be clustered as expected. It should really split into 2 16 store batches.
42//
43// Loads are clustered until this returns false, rather than trying to schedule
44// groups of stores. This also means we have to deal with saying different
45// address space loads should be clustered, and ones which might cause bank
46// conflicts.
47//
48// This might be deprecated so it might not be worth that much effort to fix.
49bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
50 int64_t Offset0, int64_t Offset1,
51 unsigned NumLoads) const {
52 assert(Offset1 > Offset0 &&
53 "Second offset should be larger than first offset!");
54 // If we have less than 16 loads in a row, and the offsets are within 64
55 // bytes, then schedule together.
56
57 // A cacheline is 64 bytes (for global memory).
58 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
Tom Stellard75aadc22012-12-11 21:25:42 +000059}
60
Tom Stellard682bfbc2013-10-10 17:11:24 +000061int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
62 switch (Channels) {
63 default: return Opcode;
64 case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
65 case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
66 case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
67 }
68}
Tom Stellardc721a232014-05-16 20:56:47 +000069
Matt Arsenault43e92fe2016-06-24 06:30:11 +000070// This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td
71enum SIEncodingFamily {
72 SI = 0,
73 VI = 1
74};
75
Tom Stellardc721a232014-05-16 20:56:47 +000076// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
Matt Arsenault1f0227a2014-10-07 21:29:56 +000077// header files, so we need to wrap it in a function that takes unsigned
Tom Stellardc721a232014-05-16 20:56:47 +000078// instead.
79namespace llvm {
80namespace AMDGPU {
Marek Olsaka93603d2015-01-15 18:42:51 +000081static int getMCOpcode(uint16_t Opcode, unsigned Gen) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +000082 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
Tom Stellardc721a232014-05-16 20:56:47 +000083}
84}
85}
Marek Olsaka93603d2015-01-15 18:42:51 +000086
Matt Arsenault43e92fe2016-06-24 06:30:11 +000087static SIEncodingFamily subtargetEncodingFamily(const AMDGPUSubtarget &ST) {
88 switch (ST.getGeneration()) {
89 case AMDGPUSubtarget::SOUTHERN_ISLANDS:
90 case AMDGPUSubtarget::SEA_ISLANDS:
91 return SIEncodingFamily::SI;
Marek Olsaka93603d2015-01-15 18:42:51 +000092 case AMDGPUSubtarget::VOLCANIC_ISLANDS:
Matt Arsenault43e92fe2016-06-24 06:30:11 +000093 return SIEncodingFamily::VI;
94
95 // FIXME: This should never be called for r600 GPUs.
96 case AMDGPUSubtarget::R600:
97 case AMDGPUSubtarget::R700:
98 case AMDGPUSubtarget::EVERGREEN:
99 case AMDGPUSubtarget::NORTHERN_ISLANDS:
100 return SIEncodingFamily::SI;
Marek Olsaka93603d2015-01-15 18:42:51 +0000101 }
Simon Pilgrim634dde32016-06-27 12:58:10 +0000102
103 llvm_unreachable("Unknown subtarget generation!");
Marek Olsaka93603d2015-01-15 18:42:51 +0000104}
105
106int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000107 int MCOp = AMDGPU::getMCOpcode(Opcode, subtargetEncodingFamily(ST));
Marek Olsaka93603d2015-01-15 18:42:51 +0000108
109 // -1 means that Opcode is already a native instruction.
110 if (MCOp == -1)
111 return Opcode;
112
113 // (uint16_t)-1 means that Opcode is a pseudo instruction that has
114 // no encoding in the given subtarget generation.
115 if (MCOp == (uint16_t)-1)
116 return -1;
117
118 return MCOp;
119}