blob: 5b6b31b2eda9f5e1b6aa6b8b61911bb89e23a97f [file] [log] [blame]
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +00001//===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// This file implements the InstrBuilder interface.
12///
13//===----------------------------------------------------------------------===//
14
15#include "InstrBuilder.h"
Andrea Di Biagio2145b132018-06-20 10:08:11 +000016#include "llvm/ADT/APInt.h"
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +000017#include "llvm/ADT/DenseMap.h"
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000018#include "llvm/MC/MCInst.h"
19#include "llvm/Support/Debug.h"
20#include "llvm/Support/raw_ostream.h"
Andrea Di Biagio24fb4fc2018-05-04 13:52:12 +000021#include "llvm/Support/WithColor.h"
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000022
23#define DEBUG_TYPE "llvm-mca"
24
25namespace mca {
26
27using namespace llvm;
28
Andrea Di Biagio94fafdf2018-03-24 16:05:36 +000029static void initializeUsedResources(InstrDesc &ID,
30 const MCSchedClassDesc &SCDesc,
31 const MCSubtargetInfo &STI,
32 ArrayRef<uint64_t> ProcResourceMasks) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000033 const MCSchedModel &SM = STI.getSchedModel();
34
35 // Populate resources consumed.
36 using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
37 std::vector<ResourcePlusCycles> Worklist;
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +000038
39 // Track cycles contributed by resources that are in a "Super" relationship.
40 // This is required if we want to correctly match the behavior of method
41 // SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
42 // of "consumed" processor resources and resource cycles, the logic in
43 // ExpandProcResource() doesn't update the number of resource cycles
44 // contributed by a "Super" resource to a group.
45 // We need to take this into account when we find that a processor resource is
46 // part of a group, and it is also used as the "Super" of other resources.
47 // This map stores the number of cycles contributed by sub-resources that are
48 // part of a "Super" resource. The key value is the "Super" resource mask ID.
49 DenseMap<uint64_t, unsigned> SuperResources;
50
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000051 for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
52 const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
53 const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
54 uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
55 if (PR.BufferSize != -1)
56 ID.Buffers.push_back(Mask);
57 CycleSegment RCy(0, PRE->Cycles, false);
58 Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +000059 if (PR.SuperIdx) {
60 uint64_t Super = ProcResourceMasks[PR.SuperIdx];
61 SuperResources[Super] += PRE->Cycles;
62 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000063 }
64
65 // Sort elements by mask popcount, so that we prioritize resource units over
66 // resource groups, and smaller groups over larger groups.
Mandeep Singh Grang8db564e2018-04-01 21:24:53 +000067 llvm::sort(Worklist.begin(), Worklist.end(),
68 [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
69 unsigned popcntA = countPopulation(A.first);
70 unsigned popcntB = countPopulation(B.first);
71 if (popcntA < popcntB)
72 return true;
73 if (popcntA > popcntB)
74 return false;
75 return A.first < B.first;
76 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000077
78 uint64_t UsedResourceUnits = 0;
79
80 // Remove cycles contributed by smaller resources.
81 for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
82 ResourcePlusCycles &A = Worklist[I];
83 if (!A.second.size()) {
84 A.second.NumUnits = 0;
85 A.second.setReserved();
86 ID.Resources.emplace_back(A);
87 continue;
88 }
89
90 ID.Resources.emplace_back(A);
91 uint64_t NormalizedMask = A.first;
92 if (countPopulation(A.first) == 1) {
93 UsedResourceUnits |= A.first;
94 } else {
95 // Remove the leading 1 from the resource group mask.
96 NormalizedMask ^= PowerOf2Floor(NormalizedMask);
97 }
98
99 for (unsigned J = I + 1; J < E; ++J) {
100 ResourcePlusCycles &B = Worklist[J];
101 if ((NormalizedMask & B.first) == NormalizedMask) {
Andrea Di Biagio2008c7c2018-06-04 12:23:07 +0000102 B.second.CS.Subtract(A.second.size() - SuperResources[A.first]);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000103 if (countPopulation(B.first) > 1)
104 B.second.NumUnits++;
105 }
106 }
107 }
108
109 // A SchedWrite may specify a number of cycles in which a resource group
110 // is reserved. For example (on target x86; cpu Haswell):
111 //
112 // SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
113 // let ResourceCycles = [2, 2, 3];
114 // }
115 //
116 // This means:
117 // Resource units HWPort0 and HWPort1 are both used for 2cy.
118 // Resource group HWPort01 is the union of HWPort0 and HWPort1.
119 // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
120 // will not be usable for 2 entire cycles from instruction issue.
121 //
122 // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
123 // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
124 // extra delay on top of the 2 cycles latency.
125 // During those extra cycles, HWPort01 is not usable by other instructions.
126 for (ResourcePlusCycles &RPC : ID.Resources) {
127 if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
128 // Remove the leading 1 from the resource group mask.
129 uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
130 if ((Mask & UsedResourceUnits) == Mask)
131 RPC.second.setReserved();
132 }
133 }
134
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000135 LLVM_DEBUG({
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000136 for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
137 dbgs() << "\t\tMask=" << R.first << ", cy=" << R.second.size() << '\n';
138 for (const uint64_t R : ID.Buffers)
139 dbgs() << "\t\tBuffer Mask=" << R << '\n';
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000140 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000141}
142
143static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
144 const MCSchedClassDesc &SCDesc,
145 const MCSubtargetInfo &STI) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000146 if (MCDesc.isCall()) {
147 // We cannot estimate how long this call will take.
148 // Artificially set an arbitrarily high latency (100cy).
Andrea Di Biagioc95a1302018-03-13 15:59:59 +0000149 ID.MaxLatency = 100U;
150 return;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000151 }
152
Andrea Di Biagioc95a1302018-03-13 15:59:59 +0000153 int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
154 // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
155 ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000156}
157
158static void populateWrites(InstrDesc &ID, const MCInst &MCI,
159 const MCInstrDesc &MCDesc,
160 const MCSchedClassDesc &SCDesc,
161 const MCSubtargetInfo &STI) {
Andrea Di Biagioace775e2018-06-21 12:14:49 +0000162 // These are for now the (strong) assumptions made by this algorithm:
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000163 // * The number of explicit and implicit register definitions in a MCInst
164 // matches the number of explicit and implicit definitions according to
165 // the opcode descriptor (MCInstrDesc).
166 // * Register definitions take precedence over register uses in the operands
167 // list.
168 // * If an opcode specifies an optional definition, then the optional
169 // definition is always the last operand in the sequence, and it can be
170 // set to zero (i.e. "no register").
171 //
172 // These assumptions work quite well for most out-of-order in-tree targets
173 // like x86. This is mainly because the vast majority of instructions is
174 // expanded to MCInst using a straightforward lowering logic that preserves
175 // the ordering of the operands.
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000176 unsigned NumExplicitDefs = MCDesc.getNumDefs();
177 unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
178 unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
179 unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
180 if (MCDesc.hasOptionalDef())
181 TotalDefs++;
182 ID.Writes.resize(TotalDefs);
183 // Iterate over the operands list, and skip non-register operands.
184 // The first NumExplictDefs register operands are expected to be register
185 // definitions.
186 unsigned CurrentDef = 0;
187 unsigned i = 0;
188 for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
189 const MCOperand &Op = MCI.getOperand(i);
190 if (!Op.isReg())
191 continue;
192
193 WriteDescriptor &Write = ID.Writes[CurrentDef];
194 Write.OpIndex = i;
195 if (CurrentDef < NumWriteLatencyEntries) {
196 const MCWriteLatencyEntry &WLE =
197 *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
198 // Conservatively default to MaxLatency.
Andrea Di Biagiobb25e272018-07-06 13:46:10 +0000199 Write.Latency = WLE.Cycles < 0 ? ID.MaxLatency
200 : static_cast<unsigned>(WLE.Cycles);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000201 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
202 } else {
203 // Assign a default latency for this write.
204 Write.Latency = ID.MaxLatency;
205 Write.SClassOrWriteResourceID = 0;
206 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000207 Write.IsOptionalDef = false;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000208 LLVM_DEBUG({
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000209 dbgs() << "\t\tOpIdx=" << Write.OpIndex << ", Latency=" << Write.Latency
210 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
211 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000212 CurrentDef++;
213 }
214
215 if (CurrentDef != NumExplicitDefs)
216 llvm::report_fatal_error(
217 "error: Expected more register operand definitions. ");
218
219 CurrentDef = 0;
220 for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
221 unsigned Index = NumExplicitDefs + CurrentDef;
222 WriteDescriptor &Write = ID.Writes[Index];
Andrea Di Biagio21f0fdb2018-06-22 16:37:05 +0000223 Write.OpIndex = ~CurrentDef;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000224 Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000225 if (Index < NumWriteLatencyEntries) {
226 const MCWriteLatencyEntry &WLE =
227 *STI.getWriteLatencyEntry(&SCDesc, Index);
228 // Conservatively default to MaxLatency.
Andrea Di Biagiobb25e272018-07-06 13:46:10 +0000229 Write.Latency = WLE.Cycles < 0 ? ID.MaxLatency
230 : static_cast<unsigned>(WLE.Cycles);
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000231 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
232 } else {
233 // Assign a default latency for this write.
234 Write.Latency = ID.MaxLatency;
235 Write.SClassOrWriteResourceID = 0;
236 }
237
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000238 Write.IsOptionalDef = false;
239 assert(Write.RegisterID != 0 && "Expected a valid phys register!");
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000240 LLVM_DEBUG(dbgs() << "\t\tOpIdx=" << Write.OpIndex << ", PhysReg="
241 << Write.RegisterID << ", Latency=" << Write.Latency
242 << ", WriteResourceID=" << Write.SClassOrWriteResourceID
243 << '\n');
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000244 }
245
246 if (MCDesc.hasOptionalDef()) {
247 // Always assume that the optional definition is the last operand of the
248 // MCInst sequence.
249 const MCOperand &Op = MCI.getOperand(MCI.getNumOperands() - 1);
250 if (i == MCI.getNumOperands() || !Op.isReg())
251 llvm::report_fatal_error(
252 "error: expected a register operand for an optional "
253 "definition. Instruction has not be correctly analyzed.\n",
254 false);
255
256 WriteDescriptor &Write = ID.Writes[TotalDefs - 1];
257 Write.OpIndex = MCI.getNumOperands() - 1;
258 // Assign a default latency for this write.
259 Write.Latency = ID.MaxLatency;
260 Write.SClassOrWriteResourceID = 0;
261 Write.IsOptionalDef = true;
262 }
263}
264
265static void populateReads(InstrDesc &ID, const MCInst &MCI,
266 const MCInstrDesc &MCDesc,
267 const MCSchedClassDesc &SCDesc,
268 const MCSubtargetInfo &STI) {
269 unsigned SchedClassID = MCDesc.getSchedClass();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000270 unsigned i = 0;
271 unsigned NumExplicitDefs = MCDesc.getNumDefs();
272 // Skip explicit definitions.
273 for (; i < MCI.getNumOperands() && NumExplicitDefs; ++i) {
274 const MCOperand &Op = MCI.getOperand(i);
275 if (Op.isReg())
276 NumExplicitDefs--;
277 }
278
279 if (NumExplicitDefs)
280 llvm::report_fatal_error(
281 "error: Expected more register operand definitions. ", false);
282
283 unsigned NumExplicitUses = MCI.getNumOperands() - i;
284 unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
285 if (MCDesc.hasOptionalDef()) {
286 assert(NumExplicitUses);
287 NumExplicitUses--;
288 }
289 unsigned TotalUses = NumExplicitUses + NumImplicitUses;
290 if (!TotalUses)
291 return;
292
293 ID.Reads.resize(TotalUses);
294 for (unsigned CurrentUse = 0; CurrentUse < NumExplicitUses; ++CurrentUse) {
295 ReadDescriptor &Read = ID.Reads[CurrentUse];
296 Read.OpIndex = i + CurrentUse;
Andrea Di Biagio0a837ef2018-03-29 14:26:56 +0000297 Read.UseIndex = CurrentUse;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000298 Read.SchedClassID = SchedClassID;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000299 LLVM_DEBUG(dbgs() << "\t\tOpIdx=" << Read.OpIndex);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000300 }
301
302 for (unsigned CurrentUse = 0; CurrentUse < NumImplicitUses; ++CurrentUse) {
303 ReadDescriptor &Read = ID.Reads[NumExplicitUses + CurrentUse];
Andrea Di Biagio21f0fdb2018-06-22 16:37:05 +0000304 Read.OpIndex = ~CurrentUse;
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000305 Read.UseIndex = NumExplicitUses + CurrentUse;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000306 Read.RegisterID = MCDesc.getImplicitUses()[CurrentUse];
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000307 Read.SchedClassID = SchedClassID;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000308 LLVM_DEBUG(dbgs() << "\t\tOpIdx=" << Read.OpIndex
309 << ", RegisterID=" << Read.RegisterID << '\n');
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000310 }
311}
312
Andrea Di Biagio49c85912018-05-04 13:10:10 +0000313const InstrDesc &InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000314 assert(STI.getSchedModel().hasInstrSchedModel() &&
315 "Itineraries are not yet supported!");
316
317 unsigned short Opcode = MCI.getOpcode();
318 // Obtain the instruction descriptor from the opcode.
319 const MCInstrDesc &MCDesc = MCII.get(Opcode);
320 const MCSchedModel &SM = STI.getSchedModel();
321
322 // Then obtain the scheduling class information from the instruction.
Andrea Di Biagio49c85912018-05-04 13:10:10 +0000323 unsigned SchedClassID = MCDesc.getSchedClass();
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000324 unsigned CPUID = SM.getProcessorID();
325
326 // Try to solve variant scheduling classes.
327 if (SchedClassID) {
328 while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
329 SchedClassID = STI.resolveVariantSchedClass(SchedClassID, &MCI, CPUID);
330
331 if (!SchedClassID)
332 llvm::report_fatal_error("unable to resolve this variant class.");
333 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000334
335 // Create a new empty descriptor.
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000336 std::unique_ptr<InstrDesc> ID = llvm::make_unique<InstrDesc>();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000337
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000338 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
339 ID->NumMicroOps = SCDesc.NumMicroOps;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000340
341 if (MCDesc.isCall()) {
342 // We don't correctly model calls.
Andrea Di Biagio24fb4fc2018-05-04 13:52:12 +0000343 WithColor::warning() << "found a call in the input assembly sequence.\n";
344 WithColor::note() << "call instructions are not correctly modeled. "
345 << "Assume a latency of 100cy.\n";
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000346 }
347
348 if (MCDesc.isReturn()) {
Andrea Di Biagio24fb4fc2018-05-04 13:52:12 +0000349 WithColor::warning() << "found a return instruction in the input"
350 << " assembly sequence.\n";
351 WithColor::note() << "program counter updates are ignored.\n";
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000352 }
353
354 ID->MayLoad = MCDesc.mayLoad();
355 ID->MayStore = MCDesc.mayStore();
356 ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
357
358 initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000359 computeMaxLatency(*ID, MCDesc, SCDesc, STI);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000360 populateWrites(*ID, MCI, MCDesc, SCDesc, STI);
361 populateReads(*ID, MCI, MCDesc, SCDesc, STI);
362
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000363 LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
364 LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000365
366 // Now add the new descriptor.
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000367 SchedClassID = MCDesc.getSchedClass();
368 if (!SM.getSchedClassDesc(SchedClassID)->isVariant()) {
369 Descriptors[MCI.getOpcode()] = std::move(ID);
370 return *Descriptors[MCI.getOpcode()];
371 }
372
373 VariantDescriptors[&MCI] = std::move(ID);
374 return *VariantDescriptors[&MCI];
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000375}
376
Andrea Di Biagio4704f032018-03-20 12:25:54 +0000377const InstrDesc &InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
Andrea Di Biagio39e5a562018-06-04 15:43:09 +0000378 if (Descriptors.find_as(MCI.getOpcode()) != Descriptors.end())
379 return *Descriptors[MCI.getOpcode()];
380
381 if (VariantDescriptors.find(&MCI) != VariantDescriptors.end())
382 return *VariantDescriptors[&MCI];
383
384 return createInstrDescImpl(MCI);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000385}
386
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000387std::unique_ptr<Instruction>
Andrea Di Biagio49c85912018-05-04 13:10:10 +0000388InstrBuilder::createInstruction(const MCInst &MCI) {
Andrea Di Biagio4704f032018-03-20 12:25:54 +0000389 const InstrDesc &D = getOrCreateInstrDesc(MCI);
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000390 std::unique_ptr<Instruction> NewIS = llvm::make_unique<Instruction>(D);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000391
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000392 // Initialize Reads first.
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000393 for (const ReadDescriptor &RD : D.Reads) {
394 int RegID = -1;
Andrea Di Biagio21f0fdb2018-06-22 16:37:05 +0000395 if (!RD.isImplicitRead()) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000396 // explicit read.
397 const MCOperand &Op = MCI.getOperand(RD.OpIndex);
398 // Skip non-register operands.
399 if (!Op.isReg())
400 continue;
401 RegID = Op.getReg();
402 } else {
403 // Implicit read.
404 RegID = RD.RegisterID;
405 }
406
407 // Skip invalid register operands.
408 if (!RegID)
409 continue;
410
411 // Okay, this is a register operand. Create a ReadState for it.
412 assert(RegID > 0 && "Invalid register ID found!");
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000413 NewIS->getUses().emplace_back(llvm::make_unique<ReadState>(RD, RegID));
Andrea Di Biagio4704f032018-03-20 12:25:54 +0000414 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000415
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000416 // Early exit if there are no writes.
417 if (D.Writes.empty())
418 return NewIS;
419
420 // Track register writes that implicitly clear the upper portion of the
421 // underlying super-registers using an APInt.
422 APInt WriteMask(D.Writes.size(), 0);
423
424 // Now query the MCInstrAnalysis object to obtain information about which
425 // register writes implicitly clear the upper portion of a super-register.
426 MCIA.clearsSuperRegisters(MRI, MCI, WriteMask);
427
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000428 // Initialize writes.
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000429 unsigned WriteIndex = 0;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000430 for (const WriteDescriptor &WD : D.Writes) {
431 unsigned RegID =
Andrea Di Biagio21f0fdb2018-06-22 16:37:05 +0000432 WD.isImplicitWrite() ? WD.RegisterID : MCI.getOperand(WD.OpIndex).getReg();
Andrea Di Biagio35622482018-03-22 10:19:20 +0000433 // Check if this is a optional definition that references NoReg.
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000434 if (WD.IsOptionalDef && !RegID) {
435 ++WriteIndex;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000436 continue;
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000437 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000438
Andrea Di Biagio35622482018-03-22 10:19:20 +0000439 assert(RegID && "Expected a valid register ID!");
Andrea Di Biagiod65492a2018-06-20 14:30:17 +0000440 NewIS->getDefs().emplace_back(llvm::make_unique<WriteState>(
441 WD, RegID, /* ClearsSuperRegs */ WriteMask[WriteIndex]));
Andrea Di Biagio2145b132018-06-20 10:08:11 +0000442 ++WriteIndex;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000443 }
444
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000445 return NewIS;
446}
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000447} // namespace mca