blob: 3768c2e7088db5b869348b2de0c16fc5aec4da4b [file] [log] [blame]
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +00001//===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// This file implements the InstrBuilder interface.
12///
13//===----------------------------------------------------------------------===//
14
15#include "InstrBuilder.h"
Andrea Di Biagio2145b132018-06-20 10:08:11 +000016#include "llvm/ADT/APInt.h"
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +000017#include "llvm/ADT/DenseMap.h"
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000018#include "llvm/MC/MCInst.h"
19#include "llvm/Support/Debug.h"
Andrea Di Biagio24fb4fc2018-05-04 13:52:12 +000020#include "llvm/Support/WithColor.h"
Andrea Di Biagio88347792018-07-09 12:30:55 +000021#include "llvm/Support/raw_ostream.h"
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000022
23#define DEBUG_TYPE "llvm-mca"
24
25namespace mca {
26
27using namespace llvm;
28
Andrea Di Biagio77c26ae2018-10-25 11:51:34 +000029InstrBuilder::InstrBuilder(const llvm::MCSubtargetInfo &sti,
30 const llvm::MCInstrInfo &mcii,
31 const llvm::MCRegisterInfo &mri,
32 const llvm::MCInstrAnalysis &mcia)
33 : STI(sti), MCII(mcii), MRI(mri), MCIA(mcia) {
34 computeProcResourceMasks(STI.getSchedModel(), ProcResourceMasks);
35}
36
Andrea Di Biagio94fafdf2018-03-24 16:05:36 +000037static void initializeUsedResources(InstrDesc &ID,
38 const MCSchedClassDesc &SCDesc,
39 const MCSubtargetInfo &STI,
40 ArrayRef<uint64_t> ProcResourceMasks) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000041 const MCSchedModel &SM = STI.getSchedModel();
42
43 // Populate resources consumed.
44 using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
45 std::vector<ResourcePlusCycles> Worklist;
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +000046
47 // Track cycles contributed by resources that are in a "Super" relationship.
48 // This is required if we want to correctly match the behavior of method
49 // SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
50 // of "consumed" processor resources and resource cycles, the logic in
51 // ExpandProcResource() doesn't update the number of resource cycles
52 // contributed by a "Super" resource to a group.
53 // We need to take this into account when we find that a processor resource is
54 // part of a group, and it is also used as the "Super" of other resources.
55 // This map stores the number of cycles contributed by sub-resources that are
56 // part of a "Super" resource. The key value is the "Super" resource mask ID.
57 DenseMap<uint64_t, unsigned> SuperResources;
58
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000059 for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
60 const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
61 const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
62 uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
63 if (PR.BufferSize != -1)
64 ID.Buffers.push_back(Mask);
65 CycleSegment RCy(0, PRE->Cycles, false);
66 Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +000067 if (PR.SuperIdx) {
68 uint64_t Super = ProcResourceMasks[PR.SuperIdx];
69 SuperResources[Super] += PRE->Cycles;
70 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000071 }
72
73 // Sort elements by mask popcount, so that we prioritize resource units over
74 // resource groups, and smaller groups over larger groups.
Andrea Di Biagioa7699122018-09-28 10:47:24 +000075 sort(Worklist, [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
76 unsigned popcntA = countPopulation(A.first);
77 unsigned popcntB = countPopulation(B.first);
78 if (popcntA < popcntB)
79 return true;
80 if (popcntA > popcntB)
81 return false;
82 return A.first < B.first;
83 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000084
85 uint64_t UsedResourceUnits = 0;
86
87 // Remove cycles contributed by smaller resources.
88 for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
89 ResourcePlusCycles &A = Worklist[I];
90 if (!A.second.size()) {
91 A.second.NumUnits = 0;
92 A.second.setReserved();
93 ID.Resources.emplace_back(A);
94 continue;
95 }
96
97 ID.Resources.emplace_back(A);
98 uint64_t NormalizedMask = A.first;
99 if (countPopulation(A.first) == 1) {
100 UsedResourceUnits |= A.first;
101 } else {
102 // Remove the leading 1 from the resource group mask.
103 NormalizedMask ^= PowerOf2Floor(NormalizedMask);
104 }
105
106 for (unsigned J = I + 1; J < E; ++J) {
107 ResourcePlusCycles &B = Worklist[J];
108 if ((NormalizedMask & B.first) == NormalizedMask) {
Matt Davis8e2c7592018-10-01 23:01:45 +0000109 B.second.CS.subtract(A.second.size() - SuperResources[A.first]);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000110 if (countPopulation(B.first) > 1)
111 B.second.NumUnits++;
112 }
113 }
114 }
115
116 // A SchedWrite may specify a number of cycles in which a resource group
117 // is reserved. For example (on target x86; cpu Haswell):
118 //
119 // SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
120 // let ResourceCycles = [2, 2, 3];
121 // }
122 //
123 // This means:
124 // Resource units HWPort0 and HWPort1 are both used for 2cy.
125 // Resource group HWPort01 is the union of HWPort0 and HWPort1.
126 // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
127 // will not be usable for 2 entire cycles from instruction issue.
128 //
129 // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
130 // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
131 // extra delay on top of the 2 cycles latency.
132 // During those extra cycles, HWPort01 is not usable by other instructions.
133 for (ResourcePlusCycles &RPC : ID.Resources) {
134 if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
135 // Remove the leading 1 from the resource group mask.
136 uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
137 if ((Mask & UsedResourceUnits) == Mask)
138 RPC.second.setReserved();
139 }
140 }
141
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000142 LLVM_DEBUG({
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000143 for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
144 dbgs() << "\t\tMask=" << R.first << ", cy=" << R.second.size() << '\n';
145 for (const uint64_t R : ID.Buffers)
146 dbgs() << "\t\tBuffer Mask=" << R << '\n';
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000147 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000148}
149
150static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
151 const MCSchedClassDesc &SCDesc,
152 const MCSubtargetInfo &STI) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000153 if (MCDesc.isCall()) {
154 // We cannot estimate how long this call will take.
155 // Artificially set an arbitrarily high latency (100cy).
Andrea Di Biagioc95a1302018-03-13 15:59:59 +0000156 ID.MaxLatency = 100U;
157 return;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000158 }
159
Andrea Di Biagioc95a1302018-03-13 15:59:59 +0000160 int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
161 // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
162 ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000163}
164
Matt Davis4bcf3692018-08-13 18:11:48 +0000165Error InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
166 unsigned SchedClassID) {
Andrea Di Biagio88347792018-07-09 12:30:55 +0000167 const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
168 const MCSchedModel &SM = STI.getSchedModel();
169 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
170
Andrea Di Biagioace775e2018-06-21 12:14:49 +0000171 // These are for now the (strong) assumptions made by this algorithm:
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000172 // * The number of explicit and implicit register definitions in a MCInst
173 // matches the number of explicit and implicit definitions according to
174 // the opcode descriptor (MCInstrDesc).
175 // * Register definitions take precedence over register uses in the operands
176 // list.
177 // * If an opcode specifies an optional definition, then the optional
178 // definition is always the last operand in the sequence, and it can be
179 // set to zero (i.e. "no register").
180 //
181 // These assumptions work quite well for most out-of-order in-tree targets
182 // like x86. This is mainly because the vast majority of instructions is
183 // expanded to MCInst using a straightforward lowering logic that preserves
184 // the ordering of the operands.
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000185 unsigned NumExplicitDefs = MCDesc.getNumDefs();
186 unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
187 unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
188 unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
189 if (MCDesc.hasOptionalDef())
190 TotalDefs++;
191 ID.Writes.resize(TotalDefs);
192 // Iterate over the operands list, and skip non-register operands.
193 // The first NumExplictDefs register operands are expected to be register
194 // definitions.
195 unsigned CurrentDef = 0;
196 unsigned i = 0;
197 for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
198 const MCOperand &Op = MCI.getOperand(i);
199 if (!Op.isReg())
200 continue;
201
202 WriteDescriptor &Write = ID.Writes[CurrentDef];
203 Write.OpIndex = i;
204 if (CurrentDef < NumWriteLatencyEntries) {
205 const MCWriteLatencyEntry &WLE =
206 *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
207 // Conservatively default to MaxLatency.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000208 Write.Latency =
209 WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000210 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
211 } else {
212 // Assign a default latency for this write.
213 Write.Latency = ID.MaxLatency;
214 Write.SClassOrWriteResourceID = 0;
215 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000216 Write.IsOptionalDef = false;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000217 LLVM_DEBUG({
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000218 dbgs() << "\t\t[Def] OpIdx=" << Write.OpIndex
219 << ", Latency=" << Write.Latency
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000220 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
221 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000222 CurrentDef++;
223 }
224
Matt Davis4bcf3692018-08-13 18:11:48 +0000225 if (CurrentDef != NumExplicitDefs) {
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000226 return make_error<InstructionError<MCInst>>(
227 "Expected more register operand definitions.", MCI);
Matt Davis4bcf3692018-08-13 18:11:48 +0000228 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000229
230 CurrentDef = 0;
231 for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
232 unsigned Index = NumExplicitDefs + CurrentDef;
233 WriteDescriptor &Write = ID.Writes[Index];
Andrea Di Biagio21f0fdb2018-06-22 16:37:05 +0000234 Write.OpIndex = ~CurrentDef;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000235 Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000236 if (Index < NumWriteLatencyEntries) {
237 const MCWriteLatencyEntry &WLE =
238 *STI.getWriteLatencyEntry(&SCDesc, Index);
239 // Conservatively default to MaxLatency.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000240 Write.Latency =
241 WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000242 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
243 } else {
244 // Assign a default latency for this write.
245 Write.Latency = ID.MaxLatency;
246 Write.SClassOrWriteResourceID = 0;
247 }
248
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000249 Write.IsOptionalDef = false;
250 assert(Write.RegisterID != 0 && "Expected a valid phys register!");
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000251 LLVM_DEBUG({
252 dbgs() << "\t\t[Def] OpIdx=" << Write.OpIndex
253 << ", PhysReg=" << MRI.getName(Write.RegisterID)
254 << ", Latency=" << Write.Latency
255 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
256 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000257 }
258
259 if (MCDesc.hasOptionalDef()) {
260 // Always assume that the optional definition is the last operand of the
261 // MCInst sequence.
262 const MCOperand &Op = MCI.getOperand(MCI.getNumOperands() - 1);
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000263 if (i == MCI.getNumOperands() || !Op.isReg()) {
264 std::string Message =
265 "expected a register operand for an optional definition. Instruction "
266 "has not been correctly analyzed.";
267 return make_error<InstructionError<MCInst>>(Message, MCI);
268 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000269
270 WriteDescriptor &Write = ID.Writes[TotalDefs - 1];
271 Write.OpIndex = MCI.getNumOperands() - 1;
272 // Assign a default latency for this write.
273 Write.Latency = ID.MaxLatency;
274 Write.SClassOrWriteResourceID = 0;
275 Write.IsOptionalDef = true;
276 }
Matt Davis4bcf3692018-08-13 18:11:48 +0000277
278 return ErrorSuccess();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000279}
280
Matt Davis4bcf3692018-08-13 18:11:48 +0000281Error InstrBuilder::populateReads(InstrDesc &ID, const MCInst &MCI,
282 unsigned SchedClassID) {
Andrea Di Biagio88347792018-07-09 12:30:55 +0000283 const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000284 unsigned NumExplicitDefs = MCDesc.getNumDefs();
Andrea Di Biagio88347792018-07-09 12:30:55 +0000285
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000286 // Skip explicit definitions.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000287 unsigned i = 0;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000288 for (; i < MCI.getNumOperands() && NumExplicitDefs; ++i) {
289 const MCOperand &Op = MCI.getOperand(i);
290 if (Op.isReg())
291 NumExplicitDefs--;
292 }
293
Matt Davis4bcf3692018-08-13 18:11:48 +0000294 if (NumExplicitDefs) {
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000295 return make_error<InstructionError<MCInst>>(
296 "Expected more register operand definitions.", MCI);
Matt Davis4bcf3692018-08-13 18:11:48 +0000297 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000298
299 unsigned NumExplicitUses = MCI.getNumOperands() - i;
300 unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
301 if (MCDesc.hasOptionalDef()) {
302 assert(NumExplicitUses);
303 NumExplicitUses--;
304 }
305 unsigned TotalUses = NumExplicitUses + NumImplicitUses;
306 if (!TotalUses)
Matt Davis4bcf3692018-08-13 18:11:48 +0000307 return ErrorSuccess();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000308
309 ID.Reads.resize(TotalUses);
310 for (unsigned CurrentUse = 0; CurrentUse < NumExplicitUses; ++CurrentUse) {
311 ReadDescriptor &Read = ID.Reads[CurrentUse];
312 Read.OpIndex = i + CurrentUse;
Andrea Di Biagio0a837ef2018-03-29 14:26:56 +0000313 Read.UseIndex = CurrentUse;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000314 Read.SchedClassID = SchedClassID;
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000315 LLVM_DEBUG(dbgs() << "\t\t[Use] OpIdx=" << Read.OpIndex
316 << ", UseIndex=" << Read.UseIndex << '\n');
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000317 }
318
319 for (unsigned CurrentUse = 0; CurrentUse < NumImplicitUses; ++CurrentUse) {
320 ReadDescriptor &Read = ID.Reads[NumExplicitUses + CurrentUse];
Andrea Di Biagio21f0fdb2018-06-22 16:37:05 +0000321 Read.OpIndex = ~CurrentUse;
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000322 Read.UseIndex = NumExplicitUses + CurrentUse;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000323 Read.RegisterID = MCDesc.getImplicitUses()[CurrentUse];
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000324 Read.SchedClassID = SchedClassID;
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000325 LLVM_DEBUG(dbgs() << "\t\t[Use] OpIdx=" << Read.OpIndex << ", RegisterID="
326 << MRI.getName(Read.RegisterID) << '\n');
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000327 }
Matt Davis4bcf3692018-08-13 18:11:48 +0000328 return ErrorSuccess();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000329}
330
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000331Error InstrBuilder::verifyInstrDesc(const InstrDesc &ID,
332 const MCInst &MCI) const {
333 if (ID.NumMicroOps != 0)
334 return ErrorSuccess();
335
336 bool UsesMemory = ID.MayLoad || ID.MayStore;
337 bool UsesBuffers = !ID.Buffers.empty();
338 bool UsesResources = !ID.Resources.empty();
339 if (!UsesMemory && !UsesBuffers && !UsesResources)
340 return ErrorSuccess();
341
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000342 StringRef Message;
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000343 if (UsesMemory) {
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000344 Message = "found an inconsistent instruction that decodes "
345 "into zero opcodes and that consumes load/store "
346 "unit resources.";
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000347 } else {
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000348 Message = "found an inconsistent instruction that decodes "
349 "to zero opcodes and that consumes scheduler "
350 "resources.";
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000351 }
352
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000353 return make_error<InstructionError<MCInst>>(Message, MCI);
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000354}
355
Matt Davis4bcf3692018-08-13 18:11:48 +0000356Expected<const InstrDesc &>
357InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000358 assert(STI.getSchedModel().hasInstrSchedModel() &&
359 "Itineraries are not yet supported!");
360
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000361 // Obtain the instruction descriptor from the opcode.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000362 unsigned short Opcode = MCI.getOpcode();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000363 const MCInstrDesc &MCDesc = MCII.get(Opcode);
364 const MCSchedModel &SM = STI.getSchedModel();
365
366 // Then obtain the scheduling class information from the instruction.
Andrea Di Biagio49c85912018-05-04 13:10:10 +0000367 unsigned SchedClassID = MCDesc.getSchedClass();
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000368 unsigned CPUID = SM.getProcessorID();
369
370 // Try to solve variant scheduling classes.
371 if (SchedClassID) {
372 while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
373 SchedClassID = STI.resolveVariantSchedClass(SchedClassID, &MCI, CPUID);
374
Matt Davis4bcf3692018-08-13 18:11:48 +0000375 if (!SchedClassID) {
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000376 return make_error<InstructionError<MCInst>>(
377 "unable to resolve scheduling class for write variant.", MCI);
Matt Davis4bcf3692018-08-13 18:11:48 +0000378 }
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000379 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000380
Matt Davis4bcf3692018-08-13 18:11:48 +0000381 // Check if this instruction is supported. Otherwise, report an error.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000382 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
383 if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000384 return make_error<InstructionError<MCInst>>(
385 "found an unsupported instruction in the input assembly sequence.",
386 MCI);
Andrea Di Biagio88347792018-07-09 12:30:55 +0000387 }
388
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000389 // Create a new empty descriptor.
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000390 std::unique_ptr<InstrDesc> ID = llvm::make_unique<InstrDesc>();
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000391 ID->NumMicroOps = SCDesc.NumMicroOps;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000392
393 if (MCDesc.isCall()) {
394 // We don't correctly model calls.
Andrea Di Biagio24fb4fc2018-05-04 13:52:12 +0000395 WithColor::warning() << "found a call in the input assembly sequence.\n";
396 WithColor::note() << "call instructions are not correctly modeled. "
397 << "Assume a latency of 100cy.\n";
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000398 }
399
400 if (MCDesc.isReturn()) {
Andrea Di Biagio24fb4fc2018-05-04 13:52:12 +0000401 WithColor::warning() << "found a return instruction in the input"
402 << " assembly sequence.\n";
403 WithColor::note() << "program counter updates are ignored.\n";
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000404 }
405
406 ID->MayLoad = MCDesc.mayLoad();
407 ID->MayStore = MCDesc.mayStore();
408 ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
409
410 initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000411 computeMaxLatency(*ID, MCDesc, SCDesc, STI);
Matt Davis4bcf3692018-08-13 18:11:48 +0000412 if (auto Err = populateWrites(*ID, MCI, SchedClassID))
413 return std::move(Err);
414 if (auto Err = populateReads(*ID, MCI, SchedClassID))
415 return std::move(Err);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000416
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000417 LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
418 LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000419
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000420 // Sanity check on the instruction descriptor.
421 if (Error Err = verifyInstrDesc(*ID, MCI))
422 return std::move(Err);
423
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000424 // Now add the new descriptor.
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000425 SchedClassID = MCDesc.getSchedClass();
426 if (!SM.getSchedClassDesc(SchedClassID)->isVariant()) {
427 Descriptors[MCI.getOpcode()] = std::move(ID);
428 return *Descriptors[MCI.getOpcode()];
429 }
430
431 VariantDescriptors[&MCI] = std::move(ID);
432 return *VariantDescriptors[&MCI];
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000433}
434
Matt Davis4bcf3692018-08-13 18:11:48 +0000435Expected<const InstrDesc &>
436InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000437 if (Descriptors.find_as(MCI.getOpcode()) != Descriptors.end())
438 return *Descriptors[MCI.getOpcode()];
439
440 if (VariantDescriptors.find(&MCI) != VariantDescriptors.end())
441 return *VariantDescriptors[&MCI];
442
443 return createInstrDescImpl(MCI);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000444}
445
Matt Davis4bcf3692018-08-13 18:11:48 +0000446Expected<std::unique_ptr<Instruction>>
Andrea Di Biagio49c85912018-05-04 13:10:10 +0000447InstrBuilder::createInstruction(const MCInst &MCI) {
Matt Davis4bcf3692018-08-13 18:11:48 +0000448 Expected<const InstrDesc &> DescOrErr = getOrCreateInstrDesc(MCI);
449 if (!DescOrErr)
450 return DescOrErr.takeError();
451 const InstrDesc &D = *DescOrErr;
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000452 std::unique_ptr<Instruction> NewIS = llvm::make_unique<Instruction>(D);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000453
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000454 // Check if this is a dependency breaking instruction.
Andrea Di Biagio8b6c3142018-09-19 15:57:45 +0000455 APInt Mask;
456
457 unsigned ProcID = STI.getSchedModel().getProcessorID();
458 bool IsZeroIdiom = MCIA.isZeroIdiom(MCI, Mask, ProcID);
459 bool IsDepBreaking =
460 IsZeroIdiom || MCIA.isDependencyBreaking(MCI, Mask, ProcID);
Andrea Di Biagio6eebbe02018-10-12 11:23:04 +0000461 if (MCIA.isOptimizableRegisterMove(MCI, ProcID))
462 NewIS->setOptimizableMove();
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000463
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000464 // Initialize Reads first.
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000465 for (const ReadDescriptor &RD : D.Reads) {
466 int RegID = -1;
Andrea Di Biagio21f0fdb2018-06-22 16:37:05 +0000467 if (!RD.isImplicitRead()) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000468 // explicit read.
469 const MCOperand &Op = MCI.getOperand(RD.OpIndex);
470 // Skip non-register operands.
471 if (!Op.isReg())
472 continue;
473 RegID = Op.getReg();
474 } else {
475 // Implicit read.
476 RegID = RD.RegisterID;
477 }
478
479 // Skip invalid register operands.
480 if (!RegID)
481 continue;
482
483 // Okay, this is a register operand. Create a ReadState for it.
484 assert(RegID > 0 && "Invalid register ID found!");
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000485 auto RS = llvm::make_unique<ReadState>(RD, RegID);
486
Andrea Di Biagio8b6c3142018-09-19 15:57:45 +0000487 if (IsDepBreaking) {
488 // A mask of all zeroes means: explicit input operands are not
489 // independent.
490 if (Mask.isNullValue()) {
491 if (!RD.isImplicitRead())
492 RS->setIndependentFromDef();
493 } else {
494 // Check if this register operand is independent according to `Mask`.
495 // Note that Mask may not have enough bits to describe all explicit and
496 // implicit input operands. If this register operand doesn't have a
497 // corresponding bit in Mask, then conservatively assume that it is
498 // dependent.
499 if (Mask.getBitWidth() > RD.UseIndex) {
500 // Okay. This map describe register use `RD.UseIndex`.
501 if (Mask[RD.UseIndex])
502 RS->setIndependentFromDef();
503 }
504 }
505 }
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000506 NewIS->getUses().emplace_back(std::move(RS));
Andrea Di Biagio4704f032018-03-20 12:25:54 +0000507 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000508
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000509 // Early exit if there are no writes.
510 if (D.Writes.empty())
Matt Davis4bcf3692018-08-13 18:11:48 +0000511 return std::move(NewIS);
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000512
513 // Track register writes that implicitly clear the upper portion of the
514 // underlying super-registers using an APInt.
515 APInt WriteMask(D.Writes.size(), 0);
516
517 // Now query the MCInstrAnalysis object to obtain information about which
518 // register writes implicitly clear the upper portion of a super-register.
519 MCIA.clearsSuperRegisters(MRI, MCI, WriteMask);
520
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000521 // Initialize writes.
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000522 unsigned WriteIndex = 0;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000523 for (const WriteDescriptor &WD : D.Writes) {
Andrea Di Biagio88347792018-07-09 12:30:55 +0000524 unsigned RegID = WD.isImplicitWrite() ? WD.RegisterID
525 : MCI.getOperand(WD.OpIndex).getReg();
Andrea Di Biagio35622482018-03-22 10:19:20 +0000526 // Check if this is a optional definition that references NoReg.
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000527 if (WD.IsOptionalDef && !RegID) {
528 ++WriteIndex;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000529 continue;
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000530 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000531
Andrea Di Biagio35622482018-03-22 10:19:20 +0000532 assert(RegID && "Expected a valid register ID!");
Andrea Di Biagiod65492a2018-06-20 14:30:17 +0000533 NewIS->getDefs().emplace_back(llvm::make_unique<WriteState>(
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000534 WD, RegID, /* ClearsSuperRegs */ WriteMask[WriteIndex],
535 /* WritesZero */ IsZeroIdiom));
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000536 ++WriteIndex;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000537 }
538
Matt Davis4bcf3692018-08-13 18:11:48 +0000539 return std::move(NewIS);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000540}
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000541} // namespace mca