blob: 1e08f898523679849974fe63729f8725a93e62ca [file] [log] [blame]
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +00001//===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file implements the InstrBuilder interface.
11///
12//===----------------------------------------------------------------------===//
13
Clement Courbetcc5e6a72018-12-17 08:08:31 +000014#include "llvm/MCA/InstrBuilder.h"
Andrea Di Biagio2145b132018-06-20 10:08:11 +000015#include "llvm/ADT/APInt.h"
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +000016#include "llvm/ADT/DenseMap.h"
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000017#include "llvm/MC/MCInst.h"
18#include "llvm/Support/Debug.h"
Andrea Di Biagio24fb4fc2018-05-04 13:52:12 +000019#include "llvm/Support/WithColor.h"
Andrea Di Biagio88347792018-07-09 12:30:55 +000020#include "llvm/Support/raw_ostream.h"
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000021
22#define DEBUG_TYPE "llvm-mca"
23
Fangrui Song5a8fd652018-10-30 15:56:08 +000024namespace llvm {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000025namespace mca {
26
Andrea Di Biagio77c26ae2018-10-25 11:51:34 +000027InstrBuilder::InstrBuilder(const llvm::MCSubtargetInfo &sti,
28 const llvm::MCInstrInfo &mcii,
29 const llvm::MCRegisterInfo &mri,
Andrea Di Biagio45060672018-12-17 14:00:37 +000030 const llvm::MCInstrAnalysis *mcia)
Andrea Di Biagio42720602018-11-24 18:40:45 +000031 : STI(sti), MCII(mcii), MRI(mri), MCIA(mcia), FirstCallInst(true),
32 FirstReturnInst(true) {
Andrea Di Biagio97ed0762019-01-10 13:59:13 +000033 const MCSchedModel &SM = STI.getSchedModel();
34 ProcResourceMasks.resize(SM.getNumProcResourceKinds());
Andrea Di Biagio77c26ae2018-10-25 11:51:34 +000035 computeProcResourceMasks(STI.getSchedModel(), ProcResourceMasks);
36}
37
Andrea Di Biagio94fafdf2018-03-24 16:05:36 +000038static void initializeUsedResources(InstrDesc &ID,
39 const MCSchedClassDesc &SCDesc,
40 const MCSubtargetInfo &STI,
41 ArrayRef<uint64_t> ProcResourceMasks) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000042 const MCSchedModel &SM = STI.getSchedModel();
43
44 // Populate resources consumed.
45 using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
46 std::vector<ResourcePlusCycles> Worklist;
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +000047
48 // Track cycles contributed by resources that are in a "Super" relationship.
49 // This is required if we want to correctly match the behavior of method
50 // SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
51 // of "consumed" processor resources and resource cycles, the logic in
52 // ExpandProcResource() doesn't update the number of resource cycles
53 // contributed by a "Super" resource to a group.
54 // We need to take this into account when we find that a processor resource is
55 // part of a group, and it is also used as the "Super" of other resources.
56 // This map stores the number of cycles contributed by sub-resources that are
57 // part of a "Super" resource. The key value is the "Super" resource mask ID.
58 DenseMap<uint64_t, unsigned> SuperResources;
59
Andrea Di Biagio91bdf242018-11-09 19:30:20 +000060 unsigned NumProcResources = SM.getNumProcResourceKinds();
61 APInt Buffers(NumProcResources, 0);
62
Andrea Di Biagio3f4b5482019-01-04 15:08:38 +000063 bool AllInOrderResources = true;
64 bool AnyDispatchHazards = false;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000065 for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
66 const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
67 const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
68 uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
Andrea Di Biagio3f4b5482019-01-04 15:08:38 +000069 if (PR.BufferSize < 0) {
70 AllInOrderResources = false;
71 } else {
Andrea Di Biagio91bdf242018-11-09 19:30:20 +000072 Buffers.setBit(PRE->ProcResourceIdx);
Andrea Di Biagio3f4b5482019-01-04 15:08:38 +000073 AnyDispatchHazards |= (PR.BufferSize == 0);
74 AllInOrderResources &= (PR.BufferSize <= 1);
75 }
76
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000077 CycleSegment RCy(0, PRE->Cycles, false);
78 Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +000079 if (PR.SuperIdx) {
80 uint64_t Super = ProcResourceMasks[PR.SuperIdx];
81 SuperResources[Super] += PRE->Cycles;
82 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000083 }
84
Andrea Di Biagio3f4b5482019-01-04 15:08:38 +000085 ID.MustIssueImmediately = AllInOrderResources && AnyDispatchHazards;
86
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000087 // Sort elements by mask popcount, so that we prioritize resource units over
88 // resource groups, and smaller groups over larger groups.
Andrea Di Biagioa7699122018-09-28 10:47:24 +000089 sort(Worklist, [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
90 unsigned popcntA = countPopulation(A.first);
91 unsigned popcntB = countPopulation(B.first);
92 if (popcntA < popcntB)
93 return true;
94 if (popcntA > popcntB)
95 return false;
96 return A.first < B.first;
97 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000098
99 uint64_t UsedResourceUnits = 0;
100
101 // Remove cycles contributed by smaller resources.
102 for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
103 ResourcePlusCycles &A = Worklist[I];
104 if (!A.second.size()) {
105 A.second.NumUnits = 0;
106 A.second.setReserved();
107 ID.Resources.emplace_back(A);
108 continue;
109 }
110
111 ID.Resources.emplace_back(A);
112 uint64_t NormalizedMask = A.first;
113 if (countPopulation(A.first) == 1) {
114 UsedResourceUnits |= A.first;
115 } else {
116 // Remove the leading 1 from the resource group mask.
117 NormalizedMask ^= PowerOf2Floor(NormalizedMask);
118 }
119
120 for (unsigned J = I + 1; J < E; ++J) {
121 ResourcePlusCycles &B = Worklist[J];
122 if ((NormalizedMask & B.first) == NormalizedMask) {
Matt Davis8e2c7592018-10-01 23:01:45 +0000123 B.second.CS.subtract(A.second.size() - SuperResources[A.first]);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000124 if (countPopulation(B.first) > 1)
125 B.second.NumUnits++;
126 }
127 }
128 }
129
130 // A SchedWrite may specify a number of cycles in which a resource group
131 // is reserved. For example (on target x86; cpu Haswell):
132 //
133 // SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
134 // let ResourceCycles = [2, 2, 3];
135 // }
136 //
137 // This means:
138 // Resource units HWPort0 and HWPort1 are both used for 2cy.
139 // Resource group HWPort01 is the union of HWPort0 and HWPort1.
140 // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
141 // will not be usable for 2 entire cycles from instruction issue.
142 //
143 // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
144 // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
145 // extra delay on top of the 2 cycles latency.
146 // During those extra cycles, HWPort01 is not usable by other instructions.
147 for (ResourcePlusCycles &RPC : ID.Resources) {
148 if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
149 // Remove the leading 1 from the resource group mask.
150 uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
151 if ((Mask & UsedResourceUnits) == Mask)
152 RPC.second.setReserved();
153 }
154 }
155
Andrea Di Biagio91bdf242018-11-09 19:30:20 +0000156 // Identify extra buffers that are consumed through super resources.
157 for (const std::pair<uint64_t, unsigned> &SR : SuperResources) {
158 for (unsigned I = 1, E = NumProcResources; I < E; ++I) {
159 const MCProcResourceDesc &PR = *SM.getProcResource(I);
160 if (PR.BufferSize == -1)
161 continue;
162
163 uint64_t Mask = ProcResourceMasks[I];
164 if (Mask != SR.first && ((Mask & SR.first) == SR.first))
165 Buffers.setBit(I);
166 }
167 }
168
169 // Now set the buffers.
170 if (unsigned NumBuffers = Buffers.countPopulation()) {
171 ID.Buffers.resize(NumBuffers);
172 for (unsigned I = 0, E = NumProcResources; I < E && NumBuffers; ++I) {
173 if (Buffers[I]) {
174 --NumBuffers;
175 ID.Buffers[NumBuffers] = ProcResourceMasks[I];
176 }
177 }
178 }
179
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000180 LLVM_DEBUG({
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000181 for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
Andrea Di Biagio97ed0762019-01-10 13:59:13 +0000182 dbgs() << "\t\tMask=" << format_hex(R.first, 16) << ", "
183 << "cy=" << R.second.size() << '\n';
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000184 for (const uint64_t R : ID.Buffers)
Evandro Menezes224d8312019-01-09 23:57:15 +0000185 dbgs() << "\t\tBuffer Mask=" << format_hex(R, 16) << '\n';
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000186 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000187}
188
189static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
190 const MCSchedClassDesc &SCDesc,
191 const MCSubtargetInfo &STI) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000192 if (MCDesc.isCall()) {
193 // We cannot estimate how long this call will take.
194 // Artificially set an arbitrarily high latency (100cy).
Andrea Di Biagioc95a1302018-03-13 15:59:59 +0000195 ID.MaxLatency = 100U;
196 return;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000197 }
198
Andrea Di Biagioc95a1302018-03-13 15:59:59 +0000199 int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
200 // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
201 ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000202}
203
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000204static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI) {
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000205 // Count register definitions, and skip non register operands in the process.
206 unsigned I, E;
207 unsigned NumExplicitDefs = MCDesc.getNumDefs();
208 for (I = 0, E = MCI.getNumOperands(); NumExplicitDefs && I < E; ++I) {
209 const MCOperand &Op = MCI.getOperand(I);
210 if (Op.isReg())
211 --NumExplicitDefs;
212 }
213
214 if (NumExplicitDefs) {
215 return make_error<InstructionError<MCInst>>(
216 "Expected more register operand definitions.", MCI);
217 }
218
219 if (MCDesc.hasOptionalDef()) {
220 // Always assume that the optional definition is the last operand.
221 const MCOperand &Op = MCI.getOperand(MCDesc.getNumOperands() - 1);
222 if (I == MCI.getNumOperands() || !Op.isReg()) {
223 std::string Message =
224 "expected a register operand for an optional definition. Instruction "
225 "has not been correctly analyzed.";
226 return make_error<InstructionError<MCInst>>(Message, MCI);
227 }
228 }
229
230 return ErrorSuccess();
231}
232
233void InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
234 unsigned SchedClassID) {
Andrea Di Biagio88347792018-07-09 12:30:55 +0000235 const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
236 const MCSchedModel &SM = STI.getSchedModel();
237 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
238
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000239 // Assumptions made by this algorithm:
240 // 1. The number of explicit and implicit register definitions in a MCInst
241 // matches the number of explicit and implicit definitions according to
242 // the opcode descriptor (MCInstrDesc).
243 // 2. Uses start at index #(MCDesc.getNumDefs()).
244 // 3. There can only be a single optional register definition, an it is
245 // always the last operand of the sequence (excluding extra operands
246 // contributed by variadic opcodes).
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000247 //
248 // These assumptions work quite well for most out-of-order in-tree targets
249 // like x86. This is mainly because the vast majority of instructions is
250 // expanded to MCInst using a straightforward lowering logic that preserves
251 // the ordering of the operands.
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000252 //
253 // About assumption 1.
254 // The algorithm allows non-register operands between register operand
255 // definitions. This helps to handle some special ARM instructions with
256 // implicit operand increment (-mtriple=armv7):
257 //
258 // vld1.32 {d18, d19}, [r1]! @ <MCInst #1463 VLD1q32wb_fixed
259 // @ <MCOperand Reg:59>
260 // @ <MCOperand Imm:0> (!!)
261 // @ <MCOperand Reg:67>
262 // @ <MCOperand Imm:0>
263 // @ <MCOperand Imm:14>
264 // @ <MCOperand Reg:0>>
265 //
266 // MCDesc reports:
267 // 6 explicit operands.
268 // 1 optional definition
269 // 2 explicit definitions (!!)
270 //
271 // The presence of an 'Imm' operand between the two register definitions
272 // breaks the assumption that "register definitions are always at the
273 // beginning of the operand sequence".
274 //
275 // To workaround this issue, this algorithm ignores (i.e. skips) any
276 // non-register operands between register definitions. The optional
277 // definition is still at index #(NumOperands-1).
278 //
279 // According to assumption 2. register reads start at #(NumExplicitDefs-1).
280 // That means, register R1 from the example is both read and written.
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000281 unsigned NumExplicitDefs = MCDesc.getNumDefs();
282 unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
283 unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
284 unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
285 if (MCDesc.hasOptionalDef())
286 TotalDefs++;
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000287
Andrea Di Biagio36296c02018-11-25 12:46:24 +0000288 unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
289 ID.Writes.resize(TotalDefs + NumVariadicOps);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000290 // Iterate over the operands list, and skip non-register operands.
291 // The first NumExplictDefs register operands are expected to be register
292 // definitions.
293 unsigned CurrentDef = 0;
294 unsigned i = 0;
295 for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
296 const MCOperand &Op = MCI.getOperand(i);
297 if (!Op.isReg())
298 continue;
299
300 WriteDescriptor &Write = ID.Writes[CurrentDef];
301 Write.OpIndex = i;
302 if (CurrentDef < NumWriteLatencyEntries) {
303 const MCWriteLatencyEntry &WLE =
304 *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
305 // Conservatively default to MaxLatency.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000306 Write.Latency =
307 WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000308 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
309 } else {
310 // Assign a default latency for this write.
311 Write.Latency = ID.MaxLatency;
312 Write.SClassOrWriteResourceID = 0;
313 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000314 Write.IsOptionalDef = false;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000315 LLVM_DEBUG({
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000316 dbgs() << "\t\t[Def] OpIdx=" << Write.OpIndex
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000317 << ", Latency=" << Write.Latency
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000318 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
319 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000320 CurrentDef++;
321 }
322
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000323 assert(CurrentDef == NumExplicitDefs &&
324 "Expected more register operand definitions.");
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000325 for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
326 unsigned Index = NumExplicitDefs + CurrentDef;
327 WriteDescriptor &Write = ID.Writes[Index];
Andrea Di Biagio21f0fdb2018-06-22 16:37:05 +0000328 Write.OpIndex = ~CurrentDef;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000329 Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000330 if (Index < NumWriteLatencyEntries) {
331 const MCWriteLatencyEntry &WLE =
332 *STI.getWriteLatencyEntry(&SCDesc, Index);
333 // Conservatively default to MaxLatency.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000334 Write.Latency =
335 WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000336 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
337 } else {
338 // Assign a default latency for this write.
339 Write.Latency = ID.MaxLatency;
340 Write.SClassOrWriteResourceID = 0;
341 }
342
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000343 Write.IsOptionalDef = false;
344 assert(Write.RegisterID != 0 && "Expected a valid phys register!");
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000345 LLVM_DEBUG({
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000346 dbgs() << "\t\t[Def][I] OpIdx=" << ~Write.OpIndex
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000347 << ", PhysReg=" << MRI.getName(Write.RegisterID)
348 << ", Latency=" << Write.Latency
349 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
350 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000351 }
352
353 if (MCDesc.hasOptionalDef()) {
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000354 WriteDescriptor &Write = ID.Writes[NumExplicitDefs + NumImplicitDefs];
355 Write.OpIndex = MCDesc.getNumOperands() - 1;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000356 // Assign a default latency for this write.
357 Write.Latency = ID.MaxLatency;
358 Write.SClassOrWriteResourceID = 0;
359 Write.IsOptionalDef = true;
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000360 LLVM_DEBUG({
361 dbgs() << "\t\t[Def][O] OpIdx=" << Write.OpIndex
362 << ", Latency=" << Write.Latency
363 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
364 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000365 }
Andrea Di Biagio36296c02018-11-25 12:46:24 +0000366
367 if (!NumVariadicOps)
368 return;
369
370 // FIXME: if an instruction opcode is flagged 'mayStore', and it has no
371 // "unmodeledSideEffects', then this logic optimistically assumes that any
372 // extra register operands in the variadic sequence is not a register
373 // definition.
374 //
375 // Otherwise, we conservatively assume that any register operand from the
376 // variadic sequence is both a register read and a register write.
377 bool AssumeUsesOnly = MCDesc.mayStore() && !MCDesc.mayLoad() &&
378 !MCDesc.hasUnmodeledSideEffects();
379 CurrentDef = NumExplicitDefs + NumImplicitDefs + MCDesc.hasOptionalDef();
380 for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
381 I < NumVariadicOps && !AssumeUsesOnly; ++I, ++OpIndex) {
382 const MCOperand &Op = MCI.getOperand(OpIndex);
383 if (!Op.isReg())
384 continue;
385
386 WriteDescriptor &Write = ID.Writes[CurrentDef];
387 Write.OpIndex = OpIndex;
388 // Assign a default latency for this write.
389 Write.Latency = ID.MaxLatency;
390 Write.SClassOrWriteResourceID = 0;
391 Write.IsOptionalDef = false;
392 ++CurrentDef;
393 LLVM_DEBUG({
394 dbgs() << "\t\t[Def][V] OpIdx=" << Write.OpIndex
395 << ", Latency=" << Write.Latency
396 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
397 });
398 }
399
400 ID.Writes.resize(CurrentDef);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000401}
402
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000403void InstrBuilder::populateReads(InstrDesc &ID, const MCInst &MCI,
404 unsigned SchedClassID) {
Andrea Di Biagio88347792018-07-09 12:30:55 +0000405 const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000406 unsigned NumExplicitUses = MCDesc.getNumOperands() - MCDesc.getNumDefs();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000407 unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000408 // Remove the optional definition.
409 if (MCDesc.hasOptionalDef())
410 --NumExplicitUses;
Andrea Di Biagio36296c02018-11-25 12:46:24 +0000411 unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
412 unsigned TotalUses = NumExplicitUses + NumImplicitUses + NumVariadicOps;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000413 ID.Reads.resize(TotalUses);
Andrea Di Biagio36296c02018-11-25 12:46:24 +0000414 unsigned CurrentUse = 0;
415 for (unsigned I = 0, OpIndex = MCDesc.getNumDefs(); I < NumExplicitUses;
416 ++I, ++OpIndex) {
417 const MCOperand &Op = MCI.getOperand(OpIndex);
418 if (!Op.isReg())
419 continue;
420
421 ReadDescriptor &Read = ID.Reads[CurrentUse];
422 Read.OpIndex = OpIndex;
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000423 Read.UseIndex = I;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000424 Read.SchedClassID = SchedClassID;
Andrea Di Biagio36296c02018-11-25 12:46:24 +0000425 ++CurrentUse;
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000426 LLVM_DEBUG(dbgs() << "\t\t[Use] OpIdx=" << Read.OpIndex
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000427 << ", UseIndex=" << Read.UseIndex << '\n');
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000428 }
429
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000430 // For the purpose of ReadAdvance, implicit uses come directly after explicit
431 // uses. The "UseIndex" must be updated according to that implicit layout.
432 for (unsigned I = 0; I < NumImplicitUses; ++I) {
Andrea Di Biagio36296c02018-11-25 12:46:24 +0000433 ReadDescriptor &Read = ID.Reads[CurrentUse + I];
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000434 Read.OpIndex = ~I;
435 Read.UseIndex = NumExplicitUses + I;
436 Read.RegisterID = MCDesc.getImplicitUses()[I];
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000437 Read.SchedClassID = SchedClassID;
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000438 LLVM_DEBUG(dbgs() << "\t\t[Use][I] OpIdx=" << ~Read.OpIndex
439 << ", UseIndex=" << Read.UseIndex << ", RegisterID="
Andrea Di Biagio23fbe7c2018-07-13 14:55:47 +0000440 << MRI.getName(Read.RegisterID) << '\n');
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000441 }
Andrea Di Biagio36296c02018-11-25 12:46:24 +0000442
443 CurrentUse += NumImplicitUses;
444
445 // FIXME: If an instruction opcode is marked as 'mayLoad', and it has no
446 // "unmodeledSideEffects", then this logic optimistically assumes that any
447 // extra register operands in the variadic sequence are not register
448 // definition.
449
450 bool AssumeDefsOnly = !MCDesc.mayStore() && MCDesc.mayLoad() &&
451 !MCDesc.hasUnmodeledSideEffects();
452 for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
453 I < NumVariadicOps && !AssumeDefsOnly; ++I, ++OpIndex) {
454 const MCOperand &Op = MCI.getOperand(OpIndex);
455 if (!Op.isReg())
456 continue;
457
458 ReadDescriptor &Read = ID.Reads[CurrentUse];
459 Read.OpIndex = OpIndex;
460 Read.UseIndex = NumExplicitUses + NumImplicitUses + I;
461 Read.SchedClassID = SchedClassID;
462 ++CurrentUse;
463 LLVM_DEBUG(dbgs() << "\t\t[Use][V] OpIdx=" << Read.OpIndex
464 << ", UseIndex=" << Read.UseIndex << '\n');
465 }
466
467 ID.Reads.resize(CurrentUse);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000468}
469
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000470Error InstrBuilder::verifyInstrDesc(const InstrDesc &ID,
471 const MCInst &MCI) const {
472 if (ID.NumMicroOps != 0)
473 return ErrorSuccess();
474
475 bool UsesMemory = ID.MayLoad || ID.MayStore;
476 bool UsesBuffers = !ID.Buffers.empty();
477 bool UsesResources = !ID.Resources.empty();
478 if (!UsesMemory && !UsesBuffers && !UsesResources)
479 return ErrorSuccess();
480
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000481 StringRef Message;
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000482 if (UsesMemory) {
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000483 Message = "found an inconsistent instruction that decodes "
484 "into zero opcodes and that consumes load/store "
485 "unit resources.";
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000486 } else {
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000487 Message = "found an inconsistent instruction that decodes "
488 "to zero opcodes and that consumes scheduler "
489 "resources.";
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000490 }
491
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000492 return make_error<InstructionError<MCInst>>(Message, MCI);
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000493}
494
Matt Davis4bcf3692018-08-13 18:11:48 +0000495Expected<const InstrDesc &>
496InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000497 assert(STI.getSchedModel().hasInstrSchedModel() &&
498 "Itineraries are not yet supported!");
499
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000500 // Obtain the instruction descriptor from the opcode.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000501 unsigned short Opcode = MCI.getOpcode();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000502 const MCInstrDesc &MCDesc = MCII.get(Opcode);
503 const MCSchedModel &SM = STI.getSchedModel();
504
505 // Then obtain the scheduling class information from the instruction.
Andrea Di Biagio49c85912018-05-04 13:10:10 +0000506 unsigned SchedClassID = MCDesc.getSchedClass();
Andrea Di Biagio36296c02018-11-25 12:46:24 +0000507 bool IsVariant = SM.getSchedClassDesc(SchedClassID)->isVariant();
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000508
509 // Try to solve variant scheduling classes.
Andrea Di Biagio36296c02018-11-25 12:46:24 +0000510 if (IsVariant) {
511 unsigned CPUID = SM.getProcessorID();
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000512 while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
513 SchedClassID = STI.resolveVariantSchedClass(SchedClassID, &MCI, CPUID);
514
Matt Davis4bcf3692018-08-13 18:11:48 +0000515 if (!SchedClassID) {
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000516 return make_error<InstructionError<MCInst>>(
517 "unable to resolve scheduling class for write variant.", MCI);
Matt Davis4bcf3692018-08-13 18:11:48 +0000518 }
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000519 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000520
Matt Davis4bcf3692018-08-13 18:11:48 +0000521 // Check if this instruction is supported. Otherwise, report an error.
Andrea Di Biagio88347792018-07-09 12:30:55 +0000522 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
523 if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
Andrea Di Biagio083addf2018-10-24 10:56:47 +0000524 return make_error<InstructionError<MCInst>>(
525 "found an unsupported instruction in the input assembly sequence.",
526 MCI);
Andrea Di Biagio88347792018-07-09 12:30:55 +0000527 }
528
Andrea Di Biagio97ed0762019-01-10 13:59:13 +0000529 LLVM_DEBUG(dbgs() << "\n\t\tOpcode Name= " << MCII.getName(Opcode) << '\n');
530 LLVM_DEBUG(dbgs() << "\t\tSchedClassID=" << SchedClassID << '\n');
531
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000532 // Create a new empty descriptor.
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000533 std::unique_ptr<InstrDesc> ID = llvm::make_unique<InstrDesc>();
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000534 ID->NumMicroOps = SCDesc.NumMicroOps;
Andrea Di Biagiod768d352019-01-23 16:35:07 +0000535 ID->SchedClassID = SchedClassID;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000536
Andrea Di Biagio42720602018-11-24 18:40:45 +0000537 if (MCDesc.isCall() && FirstCallInst) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000538 // We don't correctly model calls.
Andrea Di Biagio24fb4fc2018-05-04 13:52:12 +0000539 WithColor::warning() << "found a call in the input assembly sequence.\n";
540 WithColor::note() << "call instructions are not correctly modeled. "
541 << "Assume a latency of 100cy.\n";
Andrea Di Biagio42720602018-11-24 18:40:45 +0000542 FirstCallInst = false;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000543 }
544
Andrea Di Biagio42720602018-11-24 18:40:45 +0000545 if (MCDesc.isReturn() && FirstReturnInst) {
Andrea Di Biagio24fb4fc2018-05-04 13:52:12 +0000546 WithColor::warning() << "found a return instruction in the input"
547 << " assembly sequence.\n";
548 WithColor::note() << "program counter updates are ignored.\n";
Andrea Di Biagio42720602018-11-24 18:40:45 +0000549 FirstReturnInst = false;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000550 }
551
552 ID->MayLoad = MCDesc.mayLoad();
553 ID->MayStore = MCDesc.mayStore();
554 ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
Andrea Di Biagio4c737112018-12-17 14:27:33 +0000555 ID->BeginGroup = SCDesc.BeginGroup;
556 ID->EndGroup = SCDesc.EndGroup;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000557
558 initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000559 computeMaxLatency(*ID, MCDesc, SCDesc, STI);
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000560
561 if (Error Err = verifyOperands(MCDesc, MCI))
Matt Davis4bcf3692018-08-13 18:11:48 +0000562 return std::move(Err);
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000563
564 populateWrites(*ID, MCI, SchedClassID);
565 populateReads(*ID, MCI, SchedClassID);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000566
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000567 LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
568 LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000569
Andrea Di Biagioaacd5e12018-10-04 10:36:49 +0000570 // Sanity check on the instruction descriptor.
571 if (Error Err = verifyInstrDesc(*ID, MCI))
572 return std::move(Err);
573
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000574 // Now add the new descriptor.
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000575 SchedClassID = MCDesc.getSchedClass();
Andrea Di Biagio36296c02018-11-25 12:46:24 +0000576 bool IsVariadic = MCDesc.isVariadic();
577 if (!IsVariadic && !IsVariant) {
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000578 Descriptors[MCI.getOpcode()] = std::move(ID);
579 return *Descriptors[MCI.getOpcode()];
580 }
581
582 VariantDescriptors[&MCI] = std::move(ID);
583 return *VariantDescriptors[&MCI];
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000584}
585
Matt Davis4bcf3692018-08-13 18:11:48 +0000586Expected<const InstrDesc &>
587InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000588 if (Descriptors.find_as(MCI.getOpcode()) != Descriptors.end())
589 return *Descriptors[MCI.getOpcode()];
590
591 if (VariantDescriptors.find(&MCI) != VariantDescriptors.end())
592 return *VariantDescriptors[&MCI];
593
594 return createInstrDescImpl(MCI);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000595}
596
Matt Davis4bcf3692018-08-13 18:11:48 +0000597Expected<std::unique_ptr<Instruction>>
Andrea Di Biagio49c85912018-05-04 13:10:10 +0000598InstrBuilder::createInstruction(const MCInst &MCI) {
Matt Davis4bcf3692018-08-13 18:11:48 +0000599 Expected<const InstrDesc &> DescOrErr = getOrCreateInstrDesc(MCI);
600 if (!DescOrErr)
601 return DescOrErr.takeError();
602 const InstrDesc &D = *DescOrErr;
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000603 std::unique_ptr<Instruction> NewIS = llvm::make_unique<Instruction>(D);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000604
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000605 // Check if this is a dependency breaking instruction.
Andrea Di Biagio8b6c3142018-09-19 15:57:45 +0000606 APInt Mask;
607
Andrea Di Biagio45060672018-12-17 14:00:37 +0000608 bool IsZeroIdiom = false;
609 bool IsDepBreaking = false;
610 if (MCIA) {
611 unsigned ProcID = STI.getSchedModel().getProcessorID();
612 IsZeroIdiom = MCIA->isZeroIdiom(MCI, Mask, ProcID);
613 IsDepBreaking =
614 IsZeroIdiom || MCIA->isDependencyBreaking(MCI, Mask, ProcID);
615 if (MCIA->isOptimizableRegisterMove(MCI, ProcID))
616 NewIS->setOptimizableMove();
617 }
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000618
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000619 // Initialize Reads first.
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000620 for (const ReadDescriptor &RD : D.Reads) {
621 int RegID = -1;
Andrea Di Biagio21f0fdb2018-06-22 16:37:05 +0000622 if (!RD.isImplicitRead()) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000623 // explicit read.
624 const MCOperand &Op = MCI.getOperand(RD.OpIndex);
625 // Skip non-register operands.
626 if (!Op.isReg())
627 continue;
628 RegID = Op.getReg();
629 } else {
630 // Implicit read.
631 RegID = RD.RegisterID;
632 }
633
634 // Skip invalid register operands.
635 if (!RegID)
636 continue;
637
638 // Okay, this is a register operand. Create a ReadState for it.
639 assert(RegID > 0 && "Invalid register ID found!");
Andrea Di Biagio1e6d0aa2018-10-25 17:03:51 +0000640 NewIS->getUses().emplace_back(RD, RegID);
641 ReadState &RS = NewIS->getUses().back();
Andrea Di Biagio9f9cdd42018-09-18 15:00:06 +0000642
Andrea Di Biagio8b6c3142018-09-19 15:57:45 +0000643 if (IsDepBreaking) {
644 // A mask of all zeroes means: explicit input operands are not
645 // independent.
646 if (Mask.isNullValue()) {
647 if (!RD.isImplicitRead())
Andrea Di Biagio1e6d0aa2018-10-25 17:03:51 +0000648 RS.setIndependentFromDef();
Andrea Di Biagio8b6c3142018-09-19 15:57:45 +0000649 } else {
650 // Check if this register operand is independent according to `Mask`.
651 // Note that Mask may not have enough bits to describe all explicit and
652 // implicit input operands. If this register operand doesn't have a
653 // corresponding bit in Mask, then conservatively assume that it is
654 // dependent.
655 if (Mask.getBitWidth() > RD.UseIndex) {
656 // Okay. This map describe register use `RD.UseIndex`.
657 if (Mask[RD.UseIndex])
Andrea Di Biagio1e6d0aa2018-10-25 17:03:51 +0000658 RS.setIndependentFromDef();
Andrea Di Biagio8b6c3142018-09-19 15:57:45 +0000659 }
660 }
661 }
Andrea Di Biagio4704f032018-03-20 12:25:54 +0000662 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000663
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000664 // Early exit if there are no writes.
665 if (D.Writes.empty())
Matt Davis4bcf3692018-08-13 18:11:48 +0000666 return std::move(NewIS);
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000667
668 // Track register writes that implicitly clear the upper portion of the
669 // underlying super-registers using an APInt.
670 APInt WriteMask(D.Writes.size(), 0);
671
672 // Now query the MCInstrAnalysis object to obtain information about which
673 // register writes implicitly clear the upper portion of a super-register.
Andrea Di Biagio45060672018-12-17 14:00:37 +0000674 if (MCIA)
675 MCIA->clearsSuperRegisters(MRI, MCI, WriteMask);
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000676
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000677 // Initialize writes.
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000678 unsigned WriteIndex = 0;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000679 for (const WriteDescriptor &WD : D.Writes) {
Andrea Di Biagio88347792018-07-09 12:30:55 +0000680 unsigned RegID = WD.isImplicitWrite() ? WD.RegisterID
681 : MCI.getOperand(WD.OpIndex).getReg();
Andrea Di Biagio35622482018-03-22 10:19:20 +0000682 // Check if this is a optional definition that references NoReg.
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000683 if (WD.IsOptionalDef && !RegID) {
684 ++WriteIndex;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000685 continue;
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000686 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000687
Andrea Di Biagio35622482018-03-22 10:19:20 +0000688 assert(RegID && "Expected a valid register ID!");
Andrea Di Biagio7e32cc82018-11-23 20:26:57 +0000689 NewIS->getDefs().emplace_back(WD, RegID,
690 /* ClearsSuperRegs */ WriteMask[WriteIndex],
691 /* WritesZero */ IsZeroIdiom);
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000692 ++WriteIndex;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000693 }
694
Matt Davis4bcf3692018-08-13 18:11:48 +0000695 return std::move(NewIS);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000696}
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000697} // namespace mca
Fangrui Song5a8fd652018-10-30 15:56:08 +0000698} // namespace llvm