blob: 9a00ecb24ebe611420c8eea7c647b6d277323444 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Implementation of the TargetInstrInfo class that is common to all
12/// AMD GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUTargetMachine.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000019#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22
Chandler Carruthd174b722014-04-22 02:03:14 +000023using namespace llvm;
24
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000025#define GET_INSTRINFO_CTOR_DTOR
Tom Stellard02661d92013-06-25 21:22:18 +000026#define GET_INSTRINFO_NAMED_OPS
Christian Konigf741fbf2013-02-26 17:52:42 +000027#define GET_INSTRMAP_INFO
Tom Stellard75aadc22012-12-11 21:25:42 +000028#include "AMDGPUGenInstrInfo.inc"
29
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000030// Pin the vtable to this file.
31void AMDGPUInstrInfo::anchor() {}
32
Matt Arsenault43e92fe2016-06-24 06:30:11 +000033AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &ST)
34 : AMDGPUGenInstrInfo(-1, -1), ST(ST) {}
Tom Stellard75aadc22012-12-11 21:25:42 +000035
Matt Arsenault034d6662014-07-24 02:10:17 +000036bool AMDGPUInstrInfo::enableClusterLoads() const {
37 return true;
38}
39
Matt Arsenaultd5f4de22014-08-06 00:29:49 +000040// FIXME: This behaves strangely. If, for example, you have 32 load + stores,
41// the first 16 loads will be interleaved with the stores, and the next 16 will
42// be clustered as expected. It should really split into 2 16 store batches.
43//
44// Loads are clustered until this returns false, rather than trying to schedule
45// groups of stores. This also means we have to deal with saying different
46// address space loads should be clustered, and ones which might cause bank
47// conflicts.
48//
49// This might be deprecated so it might not be worth that much effort to fix.
50bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
51 int64_t Offset0, int64_t Offset1,
52 unsigned NumLoads) const {
53 assert(Offset1 > Offset0 &&
54 "Second offset should be larger than first offset!");
55 // If we have less than 16 loads in a row, and the offsets are within 64
56 // bytes, then schedule together.
57
58 // A cacheline is 64 bytes (for global memory).
59 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
Tom Stellard75aadc22012-12-11 21:25:42 +000060}
61
Tom Stellard682bfbc2013-10-10 17:11:24 +000062int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
63 switch (Channels) {
64 default: return Opcode;
65 case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
66 case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
67 case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
68 }
69}
Tom Stellardc721a232014-05-16 20:56:47 +000070
Matt Arsenault43e92fe2016-06-24 06:30:11 +000071// This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td
72enum SIEncodingFamily {
73 SI = 0,
74 VI = 1
75};
76
Tom Stellardc721a232014-05-16 20:56:47 +000077// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
Matt Arsenault1f0227a2014-10-07 21:29:56 +000078// header files, so we need to wrap it in a function that takes unsigned
Tom Stellardc721a232014-05-16 20:56:47 +000079// instead.
80namespace llvm {
81namespace AMDGPU {
Marek Olsaka93603d2015-01-15 18:42:51 +000082static int getMCOpcode(uint16_t Opcode, unsigned Gen) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +000083 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
Tom Stellardc721a232014-05-16 20:56:47 +000084}
85}
86}
Marek Olsaka93603d2015-01-15 18:42:51 +000087
Matt Arsenault43e92fe2016-06-24 06:30:11 +000088static SIEncodingFamily subtargetEncodingFamily(const AMDGPUSubtarget &ST) {
89 switch (ST.getGeneration()) {
90 case AMDGPUSubtarget::SOUTHERN_ISLANDS:
91 case AMDGPUSubtarget::SEA_ISLANDS:
92 return SIEncodingFamily::SI;
Marek Olsaka93603d2015-01-15 18:42:51 +000093 case AMDGPUSubtarget::VOLCANIC_ISLANDS:
Matt Arsenault43e92fe2016-06-24 06:30:11 +000094 return SIEncodingFamily::VI;
95
96 // FIXME: This should never be called for r600 GPUs.
97 case AMDGPUSubtarget::R600:
98 case AMDGPUSubtarget::R700:
99 case AMDGPUSubtarget::EVERGREEN:
100 case AMDGPUSubtarget::NORTHERN_ISLANDS:
101 return SIEncodingFamily::SI;
Marek Olsaka93603d2015-01-15 18:42:51 +0000102 }
Simon Pilgrim634dde32016-06-27 12:58:10 +0000103
104 llvm_unreachable("Unknown subtarget generation!");
Marek Olsaka93603d2015-01-15 18:42:51 +0000105}
106
107int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000108 int MCOp = AMDGPU::getMCOpcode(Opcode, subtargetEncodingFamily(ST));
Marek Olsaka93603d2015-01-15 18:42:51 +0000109
110 // -1 means that Opcode is already a native instruction.
111 if (MCOp == -1)
112 return Opcode;
113
114 // (uint16_t)-1 means that Opcode is a pseudo instruction that has
115 // no encoding in the given subtarget generation.
116 if (MCOp == (uint16_t)-1)
117 return -1;
118
119 return MCOp;
120}