blob: 7a6e2604d77c57390ddec05255278d353998f033 [file] [log] [blame]
Andrew Trick99ab6c62012-09-14 20:26:46 +00001//===-- llvm/Target/TargetSchedule.cpp - Sched Machine Model ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements a wrapper around MCSchedModel that allows the interface
11// to benefit from information currently only available in TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/CodeGen/TargetSchedule.h"
16#include "llvm/Target/TargetInstrInfo.h"
Andrew Trick412cd2f2012-10-10 05:43:09 +000017#include "llvm/Target/TargetMachine.h"
Andrew Trick34301ce2012-09-18 04:03:34 +000018#include "llvm/Target/TargetRegisterInfo.h"
Andrew Trick99ab6c62012-09-14 20:26:46 +000019#include "llvm/Target/TargetSubtargetInfo.h"
20#include "llvm/Support/CommandLine.h"
Andrew Trick3918cad2012-09-18 18:20:02 +000021#include "llvm/Support/raw_ostream.h"
Andrew Trick99ab6c62012-09-14 20:26:46 +000022
23using namespace llvm;
24
Andrew Trick72fd0a92012-10-04 00:24:34 +000025static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(true),
Andrew Trick99ab6c62012-09-14 20:26:46 +000026 cl::desc("Use TargetSchedModel for latency lookup"));
27
Andrew Trick34301ce2012-09-18 04:03:34 +000028static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true),
29 cl::desc("Use InstrItineraryData for latency lookup"));
30
Andrew Trick42bb1062012-10-09 23:44:26 +000031bool TargetSchedModel::hasInstrSchedModel() const {
32 return EnableSchedModel && SchedModel.hasInstrSchedModel();
33}
34
35bool TargetSchedModel::hasInstrItineraries() const {
36 return EnableSchedItins && !InstrItins.isEmpty();
37}
38
Andrew Trick99ab6c62012-09-14 20:26:46 +000039void TargetSchedModel::init(const MCSchedModel &sm,
40 const TargetSubtargetInfo *sti,
41 const TargetInstrInfo *tii) {
42 SchedModel = sm;
43 STI = sti;
44 TII = tii;
45 STI->initInstrItins(InstrItins);
46}
Andrew Trick34301ce2012-09-18 04:03:34 +000047
Andrew Trick412cd2f2012-10-10 05:43:09 +000048unsigned TargetSchedModel::getNumMicroOps(MachineInstr *MI) const {
49 if (hasInstrItineraries()) {
50 int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass());
51 return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, MI);
52 }
Andrew Trick4903c152012-10-11 05:37:06 +000053 if (hasInstrSchedModel()) {
54 const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
55 if (SCDesc->isValid())
56 return SCDesc->NumMicroOps;
57 }
58 return MI->isTransient() ? 0 : 1;
Andrew Trick412cd2f2012-10-10 05:43:09 +000059}
60
Andrew Trick34301ce2012-09-18 04:03:34 +000061/// If we can determine the operand latency from the def only, without machine
62/// model or itinerary lookup, do so. Otherwise return -1.
63int TargetSchedModel::getDefLatency(const MachineInstr *DefMI,
64 bool FindMin) const {
65
66 // Return a latency based on the itinerary properties and defining instruction
67 // if possible. Some common subtargets don't require per-operand latency,
68 // especially for minimum latencies.
69 if (FindMin) {
70 // If MinLatency is invalid, then use the itinerary for MinLatency. If no
71 // itinerary exists either, then use single cycle latency.
Andrew Trick42bb1062012-10-09 23:44:26 +000072 if (SchedModel.MinLatency < 0 && !hasInstrItineraries()) {
Andrew Trick34301ce2012-09-18 04:03:34 +000073 return 1;
74 }
75 return SchedModel.MinLatency;
76 }
Andrew Trick42bb1062012-10-09 23:44:26 +000077 else if (!hasInstrSchedModel() && !hasInstrItineraries()) {
Andrew Trick34301ce2012-09-18 04:03:34 +000078 return TII->defaultDefLatency(&SchedModel, DefMI);
79 }
80 // ...operand lookup required
81 return -1;
82}
83
84/// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
85/// evaluation of predicates that depend on instruction operands or flags.
86const MCSchedClassDesc *TargetSchedModel::
87resolveSchedClass(const MachineInstr *MI) const {
88
89 // Get the definition's scheduling class descriptor from this machine model.
90 unsigned SchedClass = MI->getDesc().getSchedClass();
91 const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
92
93#ifndef NDEBUG
94 unsigned NIter = 0;
95#endif
96 while (SCDesc->isVariant()) {
97 assert(++NIter < 6 && "Variants are nested deeper than the magic number");
98
99 SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
100 SCDesc = SchedModel.getSchedClassDesc(SchedClass);
101 }
102 return SCDesc;
103}
104
105/// Find the def index of this operand. This index maps to the machine model and
106/// is independent of use operands. Def operands may be reordered with uses or
107/// merged with uses without affecting the def index (e.g. before/after
108/// regalloc). However, an instruction's def operands must never be reordered
109/// with respect to each other.
110static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) {
111 unsigned DefIdx = 0;
112 for (unsigned i = 0; i != DefOperIdx; ++i) {
113 const MachineOperand &MO = MI->getOperand(i);
114 if (MO.isReg() && MO.isDef())
115 ++DefIdx;
116 }
117 return DefIdx;
118}
119
120/// Find the use index of this operand. This is independent of the instruction's
121/// def operands.
Andrew Trick3918cad2012-09-18 18:20:02 +0000122///
123/// Note that uses are not determined by the operand's isUse property, which
124/// is simply the inverse of isDef. Here we consider any readsReg operand to be
125/// a "use". The machine model allows an operand to be both a Def and Use.
Andrew Trick34301ce2012-09-18 04:03:34 +0000126static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) {
127 unsigned UseIdx = 0;
128 for (unsigned i = 0; i != UseOperIdx; ++i) {
129 const MachineOperand &MO = MI->getOperand(i);
Andrew Trick3918cad2012-09-18 18:20:02 +0000130 if (MO.isReg() && MO.readsReg())
Andrew Trick34301ce2012-09-18 04:03:34 +0000131 ++UseIdx;
132 }
133 return UseIdx;
134}
135
136// Top-level API for clients that know the operand indices.
137unsigned TargetSchedModel::computeOperandLatency(
138 const MachineInstr *DefMI, unsigned DefOperIdx,
139 const MachineInstr *UseMI, unsigned UseOperIdx,
140 bool FindMin) const {
141
142 int DefLatency = getDefLatency(DefMI, FindMin);
143 if (DefLatency >= 0)
144 return DefLatency;
145
Andrew Trick42bb1062012-10-09 23:44:26 +0000146 if (hasInstrItineraries()) {
Andrew Trick72fd0a92012-10-04 00:24:34 +0000147 int OperLatency = 0;
148 if (UseMI) {
149 OperLatency =
150 TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx, UseMI, UseOperIdx);
Andrew Trick34301ce2012-09-18 04:03:34 +0000151 }
Andrew Trick72fd0a92012-10-04 00:24:34 +0000152 else {
153 unsigned DefClass = DefMI->getDesc().getSchedClass();
154 OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
155 }
156 if (OperLatency >= 0)
157 return OperLatency;
158
159 // No operand latency was found.
160 unsigned InstrLatency = TII->getInstrLatency(&InstrItins, DefMI);
161
162 // Expected latency is the max of the stage latency and itinerary props.
Andrew Trickc0dfffa2012-10-09 23:44:32 +0000163 // Rather than directly querying InstrItins stage latency, we call a TII
164 // hook to allow subtargets to specialize latency. This hook is only
165 // applicable to the InstrItins model. InstrSchedModel should model all
166 // special cases without TII hooks.
Andrew Trick72fd0a92012-10-04 00:24:34 +0000167 if (!FindMin)
168 InstrLatency = std::max(InstrLatency,
169 TII->defaultDefLatency(&SchedModel, DefMI));
170 return InstrLatency;
171 }
Andrew Trick42bb1062012-10-09 23:44:26 +0000172 assert(!FindMin && hasInstrSchedModel() &&
Andrew Trick72fd0a92012-10-04 00:24:34 +0000173 "Expected a SchedModel for this cpu");
174 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
175 unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
176 if (DefIdx < SCDesc->NumWriteLatencyEntries) {
177 // Lookup the definition's write latency in SubtargetInfo.
178 const MCWriteLatencyEntry *WLEntry =
179 STI->getWriteLatencyEntry(SCDesc, DefIdx);
180 unsigned WriteID = WLEntry->WriteResourceID;
181 unsigned Latency = WLEntry->Cycles;
182 if (!UseMI)
183 return Latency;
184
185 // Lookup the use's latency adjustment in SubtargetInfo.
186 const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI);
187 if (UseDesc->NumReadAdvanceEntries == 0)
188 return Latency;
189 unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
190 return Latency - STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
191 }
192 // If DefIdx does not exist in the model (e.g. implicit defs), then return
193 // unit latency (defaultDefLatency may be too conservative).
Andrew Trick3918cad2012-09-18 18:20:02 +0000194#ifndef NDEBUG
Andrew Trick72fd0a92012-10-04 00:24:34 +0000195 if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
196 && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()) {
197 std::string Err;
198 raw_string_ostream ss(Err);
199 ss << "DefIdx " << DefIdx << " exceeds machine model writes for "
200 << *DefMI;
201 report_fatal_error(ss.str());
202 }
Andrew Trick3918cad2012-09-18 18:20:02 +0000203#endif
Andrew Trick4903c152012-10-11 05:37:06 +0000204 return DefMI->isTransient() ? 0 : 1;
Andrew Trick34301ce2012-09-18 04:03:34 +0000205}
Andrew Trickc0dfffa2012-10-09 23:44:32 +0000206
207unsigned TargetSchedModel::computeInstrLatency(const MachineInstr *MI) const {
Andrew Trick82d46ae2012-10-10 05:43:18 +0000208 // For the itinerary model, fall back to the old subtarget hook.
209 // Allow subtargets to compute Bundle latencies outside the machine model.
210 if (hasInstrItineraries() || MI->isBundle())
Andrew Trickc0dfffa2012-10-09 23:44:32 +0000211 return TII->getInstrLatency(&InstrItins, MI);
Andrew Trick82d46ae2012-10-10 05:43:18 +0000212
Andrew Trickc0dfffa2012-10-09 23:44:32 +0000213 if (hasInstrSchedModel()) {
Andrew Trickc0dfffa2012-10-09 23:44:32 +0000214 const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
Andrew Trick4903c152012-10-11 05:37:06 +0000215 if (SCDesc->isValid()) {
216 unsigned Latency = 0;
217 for (unsigned DefIdx = 0, DefEnd = SCDesc->NumWriteLatencyEntries;
218 DefIdx != DefEnd; ++DefIdx) {
219 // Lookup the definition's write latency in SubtargetInfo.
220 const MCWriteLatencyEntry *WLEntry =
221 STI->getWriteLatencyEntry(SCDesc, DefIdx);
222 Latency = std::max(Latency, WLEntry->Cycles);
223 }
224 return Latency;
Andrew Trickc0dfffa2012-10-09 23:44:32 +0000225 }
Andrew Trickc0dfffa2012-10-09 23:44:32 +0000226 }
227 return TII->defaultDefLatency(&SchedModel, MI);
228}
Andrew Trick412cd2f2012-10-10 05:43:09 +0000229
230unsigned TargetSchedModel::
231computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
232 const MachineInstr *DepMI) const {
233 // MinLatency == -1 is for in-order processors that always have unit
234 // MinLatency. MinLatency > 0 is for in-order processors with varying min
235 // latencies, but since this is not a RAW dep, we always use unit latency.
236 if (SchedModel.MinLatency != 0)
237 return 1;
238
239 // MinLatency == 0 indicates an out-of-order processor that can dispatch
240 // WAW dependencies in the same cycle.
241
242 // Treat predication as a data dependency for out-of-order cpus. In-order
243 // cpus do not need to treat predicated writes specially.
244 //
245 // TODO: The following hack exists because predication passes do not
246 // correctly append imp-use operands, and readsReg() strangely returns false
247 // for predicated defs.
248 unsigned Reg = DefMI->getOperand(DefOperIdx).getReg();
249 const MachineFunction &MF = *DefMI->getParent()->getParent();
250 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
251 if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(DepMI))
252 return computeInstrLatency(DefMI);
253
254 // If we have a per operand scheduling model, check if this def is writing
255 // an unbuffered resource. If so, it treated like an in-order cpu.
256 if (hasInstrSchedModel()) {
257 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
Andrew Trick4903c152012-10-11 05:37:06 +0000258 if (SCDesc->isValid()) {
259 for (const MCWriteProcResEntry *PRI = STI->getWriteProcResBegin(SCDesc),
260 *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
261 if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->IsBuffered)
262 return 1;
263 }
Andrew Trick412cd2f2012-10-10 05:43:09 +0000264 }
265 }
266 return 0;
267}