blob: 1cb020a9f6d1d39285d5ee7452d065c6856fe6c1 [file] [log] [blame]
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +00001//===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// This file implements the InstrBuilder interface.
12///
13//===----------------------------------------------------------------------===//
14
15#include "InstrBuilder.h"
Andrea Di Biagio2145b132018-06-20 10:08:11 +000016#include "llvm/ADT/APInt.h"
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +000017#include "llvm/ADT/DenseMap.h"
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000018#include "llvm/MC/MCInst.h"
19#include "llvm/Support/Debug.h"
Andrea Di Biagio24fb4fc2018-05-04 13:52:12 +000020#include "llvm/Support/WithColor.h"
Andrea Di Biagio88347792018-07-09 12:30:55 +000021#include "llvm/Support/raw_ostream.h"
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000022
23#define DEBUG_TYPE "llvm-mca"
24
25namespace mca {
26
27using namespace llvm;
28
Andrea Di Biagio94fafdf2018-03-24 16:05:36 +000029static void initializeUsedResources(InstrDesc &ID,
30 const MCSchedClassDesc &SCDesc,
31 const MCSubtargetInfo &STI,
32 ArrayRef<uint64_t> ProcResourceMasks) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000033 const MCSchedModel &SM = STI.getSchedModel();
34
35 // Populate resources consumed.
36 using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
37 std::vector<ResourcePlusCycles> Worklist;
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +000038
39 // Track cycles contributed by resources that are in a "Super" relationship.
40 // This is required if we want to correctly match the behavior of method
41 // SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
42 // of "consumed" processor resources and resource cycles, the logic in
43 // ExpandProcResource() doesn't update the number of resource cycles
44 // contributed by a "Super" resource to a group.
45 // We need to take this into account when we find that a processor resource is
46 // part of a group, and it is also used as the "Super" of other resources.
47 // This map stores the number of cycles contributed by sub-resources that are
48 // part of a "Super" resource. The key value is the "Super" resource mask ID.
49 DenseMap<uint64_t, unsigned> SuperResources;
50
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000051 for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
52 const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
53 const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
54 uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
55 if (PR.BufferSize != -1)
56 ID.Buffers.push_back(Mask);
57 CycleSegment RCy(0, PRE->Cycles, false);
58 Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +000059 if (PR.SuperIdx) {
60 uint64_t Super = ProcResourceMasks[PR.SuperIdx];
61 SuperResources[Super] += PRE->Cycles;
62 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000063 }
64
65 // Sort elements by mask popcount, so that we prioritize resource units over
66 // resource groups, and smaller groups over larger groups.
Andrea Di Biagioa7699122018-09-28 10:47:24 +000067 sort(Worklist, [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
68 unsigned popcntA = countPopulation(A.first);
69 unsigned popcntB = countPopulation(B.first);
70 if (popcntA < popcntB)
71 return true;
72 if (popcntA > popcntB)
73 return false;
74 return A.first < B.first;
75 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000076
77 uint64_t UsedResourceUnits = 0;
78
79 // Remove cycles contributed by smaller resources.
80 for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
81 ResourcePlusCycles &A = Worklist[I];
82 if (!A.second.size()) {
83 A.second.NumUnits = 0;
84 A.second.setReserved();
85 ID.Resources.emplace_back(A);
86 continue;
87 }
88
89 ID.Resources.emplace_back(A);
90 uint64_t NormalizedMask = A.first;
91 if (countPopulation(A.first) == 1) {
92 UsedResourceUnits |= A.first;
93 } else {
94 // Remove the leading 1 from the resource group mask.
95 NormalizedMask ^= PowerOf2Floor(NormalizedMask);
96 }
97
98 for (unsigned J = I + 1; J < E; ++J) {
99 ResourcePlusCycles &B = Worklist[J];
100 if ((NormalizedMask & B.first) == NormalizedMask) {
Matt Davis8e2c7592018-10-01 23:01:45 +0000101 B.second.CS.subtract(A.second.size() - SuperResources[A.first]);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000102 if (countPopulation(B.first) > 1)
103 B.second.NumUnits++;
104 }
105 }
106 }
107
108 // A SchedWrite may specify a number of cycles in which a resource group
109 // is reserved. For example (on target x86; cpu Haswell):
110 //
111 // SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
112 // let ResourceCycles = [2, 2, 3];
113 // }
114 //
115 // This means:
116 // Resource units HWPort0 and HWPort1 are both used for 2cy.
117 // Resource group HWPort01 is the union of HWPort0 and HWPort1.
118 // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
119 // will not be usable for 2 entire cycles from instruction issue.
120 //
121 // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
122 // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
123 // extra delay on top of the 2 cycles latency.
124 // During those extra cycles, HWPort01 is not usable by other instructions.
125 for (ResourcePlusCycles &RPC : ID.Resources) {
126 if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
127 // Remove the leading 1 from the resource group mask.
128 uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
129 if ((Mask & UsedResourceUnits) == Mask)
130 RPC.second.setReserved();
131 }
132 }
133
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000134 LLVM_DEBUG({
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000135 for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
136 dbgs() << "\t\tMask=" << R.first << ", cy=" << R.second.size() << '\n';
137 for (const uint64_t R : ID.Buffers)
138 dbgs() << "\t\tBuffer Mask=" << R << '\n';
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000139 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000140}
141
142static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
143 const MCSchedClassDesc &SCDesc,
144 const MCSubtargetInfo &STI) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000145 if (MCDesc.isCall()) {
146 // We cannot estimate how long this call will take.
147 // Artificially set an arbitrarily high latency (100cy).
Andrea Di Biagioc95a1302018-03-13 15:59:59 +0000148 ID.MaxLatency = 100U;
149 return;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000150 }
151
Andrea Di Biagioc95a1302018-03-13 15:59:59 +0000152 int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
153 // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
154 ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000155}
156
Matt Davis4bcf3692018-08-13 18:11:48 +0000157Error InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
158 unsigned SchedClassID) {
Andrea Di Biagio88347792018-07-09 12:30:55 +0000159 const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
160 const MCSchedModel &SM = STI.getSchedModel();
161 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
162
Andrea Di Biagioace775e2018-06-21 12:14:49 +0000163 // These are for now the (strong) assumptions made by this algorithm:
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000164 // * The number of explicit and implicit register definitions in a MCInst
165 // matches the number of explicit and implicit definitions according to
166 // the opcode descriptor (MCInstrDesc).
167 // * Register definitions take precedence over register uses in the operands
168 // list.
169 // * If an opcode specifies an optional definition, then the optional
170 // definition is always the last operand in the sequence, and it can be
171 // set to zero (i.e. "no register").
172 //
173 // These assumptions work quite well for most out-of-order in-tree targets
174 // like x86. This is mainly because the vast majority of instructions is
175 // expanded to MCInst using a straightforward lowering logic that preserves
176 // the ordering of the operands.
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000177 unsigned NumExplicitDefs = MCDesc.getNumDefs();
178 unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
179 unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
180 unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
181 if (MCDesc.hasOptionalDef())
182 TotalDefs++;
183 ID.Writes.resize(TotalDefs);
184 // Iterate over the operands list, and skip non-register operands.
185 // The first NumExplictDefs register operands are expected to be register
186 // definitions.
187 unsigned CurrentDef = 0;
188 unsigned i = 0;
189 for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
190 const MCOperand &Op = MCI.getOperand(i);
191 if (!Op.isReg())
192 continue;
193
194 WriteDescriptor &Write = ID.Writes[CurrentDef];
195 Write.OpIndex = i;
196 if (CurrentDef < NumWriteLatencyEntries) {
197 const MCWriteLatencyEntry &WLE =
198 *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
199 // Conservatively default to MaxLatency.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000200 Write.Latency =
201 WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000202 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
203 } else {
204 // Assign a default latency for this write.
205 Write.Latency = ID.MaxLatency;
206 Write.SClassOrWriteResourceID = 0;
207 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000208 Write.IsOptionalDef = false;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000209 LLVM_DEBUG({
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000210 dbgs() << "\t\t[Def] OpIdx=" << Write.OpIndex
211 << ", Latency=" << Write.Latency
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000212 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
213 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000214 CurrentDef++;
215 }
216
Matt Davis4bcf3692018-08-13 18:11:48 +0000217 if (CurrentDef != NumExplicitDefs) {
218 return make_error<StringError>(
219 "error: Expected more register operand definitions.",
220 inconvertibleErrorCode());
221 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000222
223 CurrentDef = 0;
224 for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
225 unsigned Index = NumExplicitDefs + CurrentDef;
226 WriteDescriptor &Write = ID.Writes[Index];
Andrea Di Biagio21f0fdb2018-06-22 16:37:05 +0000227 Write.OpIndex = ~CurrentDef;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000228 Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000229 if (Index < NumWriteLatencyEntries) {
230 const MCWriteLatencyEntry &WLE =
231 *STI.getWriteLatencyEntry(&SCDesc, Index);
232 // Conservatively default to MaxLatency.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000233 Write.Latency =
234 WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000235 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
236 } else {
237 // Assign a default latency for this write.
238 Write.Latency = ID.MaxLatency;
239 Write.SClassOrWriteResourceID = 0;
240 }
241
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000242 Write.IsOptionalDef = false;
243 assert(Write.RegisterID != 0 && "Expected a valid phys register!");
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000244 LLVM_DEBUG({
245 dbgs() << "\t\t[Def] OpIdx=" << Write.OpIndex
246 << ", PhysReg=" << MRI.getName(Write.RegisterID)
247 << ", Latency=" << Write.Latency
248 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
249 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000250 }
251
252 if (MCDesc.hasOptionalDef()) {
253 // Always assume that the optional definition is the last operand of the
254 // MCInst sequence.
255 const MCOperand &Op = MCI.getOperand(MCI.getNumOperands() - 1);
256 if (i == MCI.getNumOperands() || !Op.isReg())
Matt Davis4bcf3692018-08-13 18:11:48 +0000257 return make_error<StringError>(
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000258 "error: expected a register operand for an optional "
Matt Davis4bcf3692018-08-13 18:11:48 +0000259 "definition. Instruction has not be correctly analyzed.",
260 inconvertibleErrorCode());
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000261
262 WriteDescriptor &Write = ID.Writes[TotalDefs - 1];
263 Write.OpIndex = MCI.getNumOperands() - 1;
264 // Assign a default latency for this write.
265 Write.Latency = ID.MaxLatency;
266 Write.SClassOrWriteResourceID = 0;
267 Write.IsOptionalDef = true;
268 }
Matt Davis4bcf3692018-08-13 18:11:48 +0000269
270 return ErrorSuccess();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000271}
272
Matt Davis4bcf3692018-08-13 18:11:48 +0000273Error InstrBuilder::populateReads(InstrDesc &ID, const MCInst &MCI,
274 unsigned SchedClassID) {
Andrea Di Biagio88347792018-07-09 12:30:55 +0000275 const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000276 unsigned NumExplicitDefs = MCDesc.getNumDefs();
Andrea Di Biagio88347792018-07-09 12:30:55 +0000277
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000278 // Skip explicit definitions.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000279 unsigned i = 0;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000280 for (; i < MCI.getNumOperands() && NumExplicitDefs; ++i) {
281 const MCOperand &Op = MCI.getOperand(i);
282 if (Op.isReg())
283 NumExplicitDefs--;
284 }
285
Matt Davis4bcf3692018-08-13 18:11:48 +0000286 if (NumExplicitDefs) {
287 return make_error<StringError>(
288 "error: Expected more register operand definitions. ",
289 inconvertibleErrorCode());
290 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000291
292 unsigned NumExplicitUses = MCI.getNumOperands() - i;
293 unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
294 if (MCDesc.hasOptionalDef()) {
295 assert(NumExplicitUses);
296 NumExplicitUses--;
297 }
298 unsigned TotalUses = NumExplicitUses + NumImplicitUses;
299 if (!TotalUses)
Matt Davis4bcf3692018-08-13 18:11:48 +0000300 return ErrorSuccess();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000301
302 ID.Reads.resize(TotalUses);
303 for (unsigned CurrentUse = 0; CurrentUse < NumExplicitUses; ++CurrentUse) {
304 ReadDescriptor &Read = ID.Reads[CurrentUse];
305 Read.OpIndex = i + CurrentUse;
Andrea Di Biagio0a837ef2018-03-29 14:26:56 +0000306 Read.UseIndex = CurrentUse;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000307 Read.SchedClassID = SchedClassID;
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000308 LLVM_DEBUG(dbgs() << "\t\t[Use] OpIdx=" << Read.OpIndex
309 << ", UseIndex=" << Read.UseIndex << '\n');
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000310 }
311
312 for (unsigned CurrentUse = 0; CurrentUse < NumImplicitUses; ++CurrentUse) {
313 ReadDescriptor &Read = ID.Reads[NumExplicitUses + CurrentUse];
Andrea Di Biagio21f0fdb2018-06-22 16:37:05 +0000314 Read.OpIndex = ~CurrentUse;
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000315 Read.UseIndex = NumExplicitUses + CurrentUse;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000316 Read.RegisterID = MCDesc.getImplicitUses()[CurrentUse];
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000317 Read.SchedClassID = SchedClassID;
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000318 LLVM_DEBUG(dbgs() << "\t\t[Use] OpIdx=" << Read.OpIndex << ", RegisterID="
319 << MRI.getName(Read.RegisterID) << '\n');
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000320 }
Matt Davis4bcf3692018-08-13 18:11:48 +0000321 return ErrorSuccess();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000322}
323
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000324Error InstrBuilder::verifyInstrDesc(const InstrDesc &ID,
325 const MCInst &MCI) const {
326 if (ID.NumMicroOps != 0)
327 return ErrorSuccess();
328
329 bool UsesMemory = ID.MayLoad || ID.MayStore;
330 bool UsesBuffers = !ID.Buffers.empty();
331 bool UsesResources = !ID.Resources.empty();
332 if (!UsesMemory && !UsesBuffers && !UsesResources)
333 return ErrorSuccess();
334
335 std::string ToString;
336 raw_string_ostream OS(ToString);
337 if (UsesMemory) {
338 WithColor::error() << "found an inconsistent instruction that decodes "
339 << "into zero opcodes and that consumes load/store "
340 << "unit resources.\n";
341 } else {
342 WithColor::error() << "found an inconsistent instruction that decodes"
343 << " to zero opcodes and that consumes scheduler "
344 << "resources.\n";
345 }
346
347 MCIP.printInst(&MCI, OS, "", STI);
348 OS.flush();
349 WithColor::note() << "instruction: " << ToString << '\n';
350 return make_error<StringError>("Invalid instruction definition found",
351 inconvertibleErrorCode());
352}
353
Matt Davis4bcf3692018-08-13 18:11:48 +0000354Expected<const InstrDesc &>
355InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000356 assert(STI.getSchedModel().hasInstrSchedModel() &&
357 "Itineraries are not yet supported!");
358
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000359 // Obtain the instruction descriptor from the opcode.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000360 unsigned short Opcode = MCI.getOpcode();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000361 const MCInstrDesc &MCDesc = MCII.get(Opcode);
362 const MCSchedModel &SM = STI.getSchedModel();
363
364 // Then obtain the scheduling class information from the instruction.
Andrea Di Biagio49c85912018-05-04 13:10:10 +0000365 unsigned SchedClassID = MCDesc.getSchedClass();
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000366 unsigned CPUID = SM.getProcessorID();
367
368 // Try to solve variant scheduling classes.
369 if (SchedClassID) {
370 while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
371 SchedClassID = STI.resolveVariantSchedClass(SchedClassID, &MCI, CPUID);
372
Matt Davis4bcf3692018-08-13 18:11:48 +0000373 if (!SchedClassID) {
374 return make_error<StringError>("unable to resolve this variant class.",
375 inconvertibleErrorCode());
376 }
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000377 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000378
Matt Davis4bcf3692018-08-13 18:11:48 +0000379 // Check if this instruction is supported. Otherwise, report an error.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000380 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
381 if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
382 std::string ToString;
Andrea Di Biagioa7699122018-09-28 10:47:24 +0000383 raw_string_ostream OS(ToString);
Andrea Di Biagio88347792018-07-09 12:30:55 +0000384 WithColor::error() << "found an unsupported instruction in the input"
385 << " assembly sequence.\n";
386 MCIP.printInst(&MCI, OS, "", STI);
387 OS.flush();
Andrea Di Biagio88347792018-07-09 12:30:55 +0000388 WithColor::note() << "instruction: " << ToString << '\n';
Matt Davis4bcf3692018-08-13 18:11:48 +0000389 return make_error<StringError>(
390 "Don't know how to analyze unsupported instructions",
391 inconvertibleErrorCode());
Andrea Di Biagio88347792018-07-09 12:30:55 +0000392 }
393
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000394 // Create a new empty descriptor.
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000395 std::unique_ptr<InstrDesc> ID = llvm::make_unique<InstrDesc>();
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000396 ID->NumMicroOps = SCDesc.NumMicroOps;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000397
398 if (MCDesc.isCall()) {
399 // We don't correctly model calls.
Andrea Di Biagio24fb4fc2018-05-04 13:52:12 +0000400 WithColor::warning() << "found a call in the input assembly sequence.\n";
401 WithColor::note() << "call instructions are not correctly modeled. "
402 << "Assume a latency of 100cy.\n";
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000403 }
404
405 if (MCDesc.isReturn()) {
Andrea Di Biagio24fb4fc2018-05-04 13:52:12 +0000406 WithColor::warning() << "found a return instruction in the input"
407 << " assembly sequence.\n";
408 WithColor::note() << "program counter updates are ignored.\n";
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000409 }
410
411 ID->MayLoad = MCDesc.mayLoad();
412 ID->MayStore = MCDesc.mayStore();
413 ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
414
415 initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000416 computeMaxLatency(*ID, MCDesc, SCDesc, STI);
Matt Davis4bcf3692018-08-13 18:11:48 +0000417 if (auto Err = populateWrites(*ID, MCI, SchedClassID))
418 return std::move(Err);
419 if (auto Err = populateReads(*ID, MCI, SchedClassID))
420 return std::move(Err);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000421
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000422 LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
423 LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000424
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000425 // Sanity check on the instruction descriptor.
426 if (Error Err = verifyInstrDesc(*ID, MCI))
427 return std::move(Err);
428
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000429 // Now add the new descriptor.
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000430 SchedClassID = MCDesc.getSchedClass();
431 if (!SM.getSchedClassDesc(SchedClassID)->isVariant()) {
432 Descriptors[MCI.getOpcode()] = std::move(ID);
433 return *Descriptors[MCI.getOpcode()];
434 }
435
436 VariantDescriptors[&MCI] = std::move(ID);
437 return *VariantDescriptors[&MCI];
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000438}
439
Matt Davis4bcf3692018-08-13 18:11:48 +0000440Expected<const InstrDesc &>
441InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000442 if (Descriptors.find_as(MCI.getOpcode()) != Descriptors.end())
443 return *Descriptors[MCI.getOpcode()];
444
445 if (VariantDescriptors.find(&MCI) != VariantDescriptors.end())
446 return *VariantDescriptors[&MCI];
447
448 return createInstrDescImpl(MCI);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000449}
450
Matt Davis4bcf3692018-08-13 18:11:48 +0000451Expected<std::unique_ptr<Instruction>>
Andrea Di Biagio49c85912018-05-04 13:10:10 +0000452InstrBuilder::createInstruction(const MCInst &MCI) {
Matt Davis4bcf3692018-08-13 18:11:48 +0000453 Expected<const InstrDesc &> DescOrErr = getOrCreateInstrDesc(MCI);
454 if (!DescOrErr)
455 return DescOrErr.takeError();
456 const InstrDesc &D = *DescOrErr;
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000457 std::unique_ptr<Instruction> NewIS = llvm::make_unique<Instruction>(D);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000458
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000459 // Check if this is a dependency breaking instruction.
Andrea Di Biagio8b6c3142018-09-19 15:57:45 +0000460 APInt Mask;
461
462 unsigned ProcID = STI.getSchedModel().getProcessorID();
463 bool IsZeroIdiom = MCIA.isZeroIdiom(MCI, Mask, ProcID);
464 bool IsDepBreaking =
465 IsZeroIdiom || MCIA.isDependencyBreaking(MCI, Mask, ProcID);
Andrea Di Biagio6eebbe02018-10-12 11:23:04 +0000466 if (MCIA.isOptimizableRegisterMove(MCI, ProcID))
467 NewIS->setOptimizableMove();
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000468
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000469 // Initialize Reads first.
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000470 for (const ReadDescriptor &RD : D.Reads) {
471 int RegID = -1;
Andrea Di Biagio21f0fdb2018-06-22 16:37:05 +0000472 if (!RD.isImplicitRead()) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000473 // explicit read.
474 const MCOperand &Op = MCI.getOperand(RD.OpIndex);
475 // Skip non-register operands.
476 if (!Op.isReg())
477 continue;
478 RegID = Op.getReg();
479 } else {
480 // Implicit read.
481 RegID = RD.RegisterID;
482 }
483
484 // Skip invalid register operands.
485 if (!RegID)
486 continue;
487
488 // Okay, this is a register operand. Create a ReadState for it.
489 assert(RegID > 0 && "Invalid register ID found!");
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000490 auto RS = llvm::make_unique<ReadState>(RD, RegID);
491
Andrea Di Biagio8b6c3142018-09-19 15:57:45 +0000492 if (IsDepBreaking) {
493 // A mask of all zeroes means: explicit input operands are not
494 // independent.
495 if (Mask.isNullValue()) {
496 if (!RD.isImplicitRead())
497 RS->setIndependentFromDef();
498 } else {
499 // Check if this register operand is independent according to `Mask`.
500 // Note that Mask may not have enough bits to describe all explicit and
501 // implicit input operands. If this register operand doesn't have a
502 // corresponding bit in Mask, then conservatively assume that it is
503 // dependent.
504 if (Mask.getBitWidth() > RD.UseIndex) {
505 // Okay. This map describe register use `RD.UseIndex`.
506 if (Mask[RD.UseIndex])
507 RS->setIndependentFromDef();
508 }
509 }
510 }
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000511 NewIS->getUses().emplace_back(std::move(RS));
Andrea Di Biagio4704f032018-03-20 12:25:54 +0000512 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000513
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000514 // Early exit if there are no writes.
515 if (D.Writes.empty())
Matt Davis4bcf3692018-08-13 18:11:48 +0000516 return std::move(NewIS);
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000517
518 // Track register writes that implicitly clear the upper portion of the
519 // underlying super-registers using an APInt.
520 APInt WriteMask(D.Writes.size(), 0);
521
522 // Now query the MCInstrAnalysis object to obtain information about which
523 // register writes implicitly clear the upper portion of a super-register.
524 MCIA.clearsSuperRegisters(MRI, MCI, WriteMask);
525
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000526 // Initialize writes.
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000527 unsigned WriteIndex = 0;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000528 for (const WriteDescriptor &WD : D.Writes) {
Andrea Di Biagio88347792018-07-09 12:30:55 +0000529 unsigned RegID = WD.isImplicitWrite() ? WD.RegisterID
530 : MCI.getOperand(WD.OpIndex).getReg();
Andrea Di Biagio35622482018-03-22 10:19:20 +0000531 // Check if this is a optional definition that references NoReg.
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000532 if (WD.IsOptionalDef && !RegID) {
533 ++WriteIndex;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000534 continue;
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000535 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000536
Andrea Di Biagio35622482018-03-22 10:19:20 +0000537 assert(RegID && "Expected a valid register ID!");
Andrea Di Biagiod65492a2018-06-20 14:30:17 +0000538 NewIS->getDefs().emplace_back(llvm::make_unique<WriteState>(
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000539 WD, RegID, /* ClearsSuperRegs */ WriteMask[WriteIndex],
540 /* WritesZero */ IsZeroIdiom));
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000541 ++WriteIndex;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000542 }
543
Matt Davis4bcf3692018-08-13 18:11:48 +0000544 return std::move(NewIS);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000545}
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000546} // namespace mca