blob: c45ec7d28669902d9f5f32fbcff94afdf3d76c2e [file] [log] [blame]
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +00001//===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// This file implements the InstrBuilder interface.
12///
13//===----------------------------------------------------------------------===//
14
15#include "InstrBuilder.h"
16#include "llvm/MC/MCInst.h"
17#include "llvm/Support/Debug.h"
18#include "llvm/Support/raw_ostream.h"
19
20#define DEBUG_TYPE "llvm-mca"
21
22namespace mca {
23
24using namespace llvm;
25
Andrea Di Biagio94fafdf2018-03-24 16:05:36 +000026static void initializeUsedResources(InstrDesc &ID,
27 const MCSchedClassDesc &SCDesc,
28 const MCSubtargetInfo &STI,
29 ArrayRef<uint64_t> ProcResourceMasks) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000030 const MCSchedModel &SM = STI.getSchedModel();
31
32 // Populate resources consumed.
33 using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
34 std::vector<ResourcePlusCycles> Worklist;
35 for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
36 const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
37 const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
38 uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
39 if (PR.BufferSize != -1)
40 ID.Buffers.push_back(Mask);
41 CycleSegment RCy(0, PRE->Cycles, false);
42 Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
43 }
44
45 // Sort elements by mask popcount, so that we prioritize resource units over
46 // resource groups, and smaller groups over larger groups.
Mandeep Singh Grang8db564e2018-04-01 21:24:53 +000047 llvm::sort(Worklist.begin(), Worklist.end(),
48 [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
49 unsigned popcntA = countPopulation(A.first);
50 unsigned popcntB = countPopulation(B.first);
51 if (popcntA < popcntB)
52 return true;
53 if (popcntA > popcntB)
54 return false;
55 return A.first < B.first;
56 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000057
58 uint64_t UsedResourceUnits = 0;
59
60 // Remove cycles contributed by smaller resources.
61 for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
62 ResourcePlusCycles &A = Worklist[I];
63 if (!A.second.size()) {
64 A.second.NumUnits = 0;
65 A.second.setReserved();
66 ID.Resources.emplace_back(A);
67 continue;
68 }
69
70 ID.Resources.emplace_back(A);
71 uint64_t NormalizedMask = A.first;
72 if (countPopulation(A.first) == 1) {
73 UsedResourceUnits |= A.first;
74 } else {
75 // Remove the leading 1 from the resource group mask.
76 NormalizedMask ^= PowerOf2Floor(NormalizedMask);
77 }
78
79 for (unsigned J = I + 1; J < E; ++J) {
80 ResourcePlusCycles &B = Worklist[J];
81 if ((NormalizedMask & B.first) == NormalizedMask) {
82 B.second.CS.Subtract(A.second.size());
83 if (countPopulation(B.first) > 1)
84 B.second.NumUnits++;
85 }
86 }
87 }
88
89 // A SchedWrite may specify a number of cycles in which a resource group
90 // is reserved. For example (on target x86; cpu Haswell):
91 //
92 // SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
93 // let ResourceCycles = [2, 2, 3];
94 // }
95 //
96 // This means:
97 // Resource units HWPort0 and HWPort1 are both used for 2cy.
98 // Resource group HWPort01 is the union of HWPort0 and HWPort1.
99 // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
100 // will not be usable for 2 entire cycles from instruction issue.
101 //
102 // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
103 // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
104 // extra delay on top of the 2 cycles latency.
105 // During those extra cycles, HWPort01 is not usable by other instructions.
106 for (ResourcePlusCycles &RPC : ID.Resources) {
107 if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
108 // Remove the leading 1 from the resource group mask.
109 uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
110 if ((Mask & UsedResourceUnits) == Mask)
111 RPC.second.setReserved();
112 }
113 }
114
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000115 DEBUG({
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000116 for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
117 dbgs() << "\t\tMask=" << R.first << ", cy=" << R.second.size() << '\n';
118 for (const uint64_t R : ID.Buffers)
119 dbgs() << "\t\tBuffer Mask=" << R << '\n';
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000120 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000121}
122
123static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
124 const MCSchedClassDesc &SCDesc,
125 const MCSubtargetInfo &STI) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000126 if (MCDesc.isCall()) {
127 // We cannot estimate how long this call will take.
128 // Artificially set an arbitrarily high latency (100cy).
Andrea Di Biagioc95a1302018-03-13 15:59:59 +0000129 ID.MaxLatency = 100U;
130 return;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000131 }
132
Andrea Di Biagioc95a1302018-03-13 15:59:59 +0000133 int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
134 // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
135 ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000136}
137
138static void populateWrites(InstrDesc &ID, const MCInst &MCI,
139 const MCInstrDesc &MCDesc,
140 const MCSchedClassDesc &SCDesc,
141 const MCSubtargetInfo &STI) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000142 // Set if writes through this opcode may update super registers.
143 // TODO: on x86-64, a 4 byte write of a general purpose register always
144 // fully updates the super-register.
145 // More in general, (at least on x86) not all register writes perform
146 // a partial (super-)register update.
147 // For example, an AVX instruction that writes on a XMM register implicitly
148 // zeroes the upper half of every aliasing super-register.
149 //
150 // For now, we pessimistically assume that writes are all potentially
151 // partial register updates. This is a good default for most targets, execept
152 // for those like x86 which implement a special semantic for certain opcodes.
153 // At least on x86, this may lead to an inaccurate prediction of the
154 // instruction level parallelism.
155 bool FullyUpdatesSuperRegisters = false;
156
157 // Now Populate Writes.
158
159 // This algorithm currently works under the strong (and potentially incorrect)
160 // assumption that information related to register def/uses can be obtained
161 // from MCInstrDesc.
162 //
163 // However class MCInstrDesc is used to describe MachineInstr objects and not
164 // MCInst objects. To be more specific, MCInstrDesc objects are opcode
165 // descriptors that are automatically generated via tablegen based on the
166 // instruction set information available from the target .td files. That
167 // means, the number of (explicit) definitions according to MCInstrDesc always
168 // matches the cardinality of the `(outs)` set in tablegen.
169 //
170 // By constructions, definitions must appear first in the operand sequence of
171 // a MachineInstr. Also, the (outs) sequence is preserved (example: the first
172 // element in the outs set is the first operand in the corresponding
173 // MachineInstr). That's the reason why MCInstrDesc only needs to declare the
174 // total number of register definitions, and not where those definitions are
175 // in the machine operand sequence.
176 //
177 // Unfortunately, it is not safe to use the information from MCInstrDesc to
178 // also describe MCInst objects. An MCInst object can be obtained from a
179 // MachineInstr through a lowering step which may restructure the operand
180 // sequence (and even remove or introduce new operands). So, there is a high
181 // risk that the lowering step breaks the assumptions that register
182 // definitions are always at the beginning of the machine operand sequence.
183 //
184 // This is a fundamental problem, and it is still an open problem. Essentially
185 // we have to find a way to correlate def/use operands of a MachineInstr to
186 // operands of an MCInst. Otherwise, we cannot correctly reconstruct data
187 // dependencies, nor we can correctly interpret the scheduling model, which
188 // heavily uses machine operand indices to define processor read-advance
189 // information, and to identify processor write resources. Essentially, we
190 // either need something like a MCInstrDesc, but for MCInst, or a way
191 // to map MCInst operands back to MachineInstr operands.
192 //
193 // Unfortunately, we don't have that information now. So, this prototype
194 // currently work under the strong assumption that we can always safely trust
195 // the content of an MCInstrDesc. For example, we can query a MCInstrDesc to
196 // obtain the number of explicit and implicit register defintions. We also
197 // assume that register definitions always come first in the operand sequence.
198 // This last assumption usually makes sense for MachineInstr, where register
199 // definitions always appear at the beginning of the operands sequence. In
200 // reality, these assumptions could be broken by the lowering step, which can
201 // decide to lay out operands in a different order than the original order of
202 // operand as specified by the MachineInstr.
203 //
204 // Things get even more complicated in the presence of "optional" register
205 // definitions. For MachineInstr, optional register definitions are always at
206 // the end of the operand sequence. Some ARM instructions that may update the
207 // status flags specify that register as a optional operand. Since we don't
208 // have operand descriptors for MCInst, we assume for now that the optional
209 // definition is always the last operand of a MCInst. Again, this assumption
210 // may be okay for most targets. However, there is no guarantee that targets
211 // would respect that.
212 //
213 // In conclusion: these are for now the strong assumptions made by the tool:
214 // * The number of explicit and implicit register definitions in a MCInst
215 // matches the number of explicit and implicit definitions according to
216 // the opcode descriptor (MCInstrDesc).
217 // * Register definitions take precedence over register uses in the operands
218 // list.
219 // * If an opcode specifies an optional definition, then the optional
220 // definition is always the last operand in the sequence, and it can be
221 // set to zero (i.e. "no register").
222 //
223 // These assumptions work quite well for most out-of-order in-tree targets
224 // like x86. This is mainly because the vast majority of instructions is
225 // expanded to MCInst using a straightforward lowering logic that preserves
226 // the ordering of the operands.
227 //
228 // In the longer term, we need to find a proper solution for this issue.
229 unsigned NumExplicitDefs = MCDesc.getNumDefs();
230 unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
231 unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
232 unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
233 if (MCDesc.hasOptionalDef())
234 TotalDefs++;
235 ID.Writes.resize(TotalDefs);
236 // Iterate over the operands list, and skip non-register operands.
237 // The first NumExplictDefs register operands are expected to be register
238 // definitions.
239 unsigned CurrentDef = 0;
240 unsigned i = 0;
241 for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
242 const MCOperand &Op = MCI.getOperand(i);
243 if (!Op.isReg())
244 continue;
245
246 WriteDescriptor &Write = ID.Writes[CurrentDef];
247 Write.OpIndex = i;
248 if (CurrentDef < NumWriteLatencyEntries) {
249 const MCWriteLatencyEntry &WLE =
250 *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
251 // Conservatively default to MaxLatency.
252 Write.Latency = WLE.Cycles == -1 ? ID.MaxLatency : WLE.Cycles;
253 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
254 } else {
255 // Assign a default latency for this write.
256 Write.Latency = ID.MaxLatency;
257 Write.SClassOrWriteResourceID = 0;
258 }
259 Write.FullyUpdatesSuperRegs = FullyUpdatesSuperRegisters;
260 Write.IsOptionalDef = false;
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000261 DEBUG({
262 dbgs() << "\t\tOpIdx=" << Write.OpIndex << ", Latency=" << Write.Latency
263 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
264 });
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000265 CurrentDef++;
266 }
267
268 if (CurrentDef != NumExplicitDefs)
269 llvm::report_fatal_error(
270 "error: Expected more register operand definitions. ");
271
272 CurrentDef = 0;
273 for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
274 unsigned Index = NumExplicitDefs + CurrentDef;
275 WriteDescriptor &Write = ID.Writes[Index];
276 Write.OpIndex = -1;
277 Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000278 if (Index < NumWriteLatencyEntries) {
279 const MCWriteLatencyEntry &WLE =
280 *STI.getWriteLatencyEntry(&SCDesc, Index);
281 // Conservatively default to MaxLatency.
282 Write.Latency = WLE.Cycles == -1 ? ID.MaxLatency : WLE.Cycles;
283 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
284 } else {
285 // Assign a default latency for this write.
286 Write.Latency = ID.MaxLatency;
287 Write.SClassOrWriteResourceID = 0;
288 }
289
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000290 Write.IsOptionalDef = false;
291 assert(Write.RegisterID != 0 && "Expected a valid phys register!");
292 DEBUG(dbgs() << "\t\tOpIdx=" << Write.OpIndex << ", PhysReg="
293 << Write.RegisterID << ", Latency=" << Write.Latency
294 << ", WriteResourceID=" << Write.SClassOrWriteResourceID
295 << '\n');
296 }
297
298 if (MCDesc.hasOptionalDef()) {
299 // Always assume that the optional definition is the last operand of the
300 // MCInst sequence.
301 const MCOperand &Op = MCI.getOperand(MCI.getNumOperands() - 1);
302 if (i == MCI.getNumOperands() || !Op.isReg())
303 llvm::report_fatal_error(
304 "error: expected a register operand for an optional "
305 "definition. Instruction has not be correctly analyzed.\n",
306 false);
307
308 WriteDescriptor &Write = ID.Writes[TotalDefs - 1];
309 Write.OpIndex = MCI.getNumOperands() - 1;
310 // Assign a default latency for this write.
311 Write.Latency = ID.MaxLatency;
312 Write.SClassOrWriteResourceID = 0;
313 Write.IsOptionalDef = true;
314 }
315}
316
317static void populateReads(InstrDesc &ID, const MCInst &MCI,
318 const MCInstrDesc &MCDesc,
319 const MCSchedClassDesc &SCDesc,
320 const MCSubtargetInfo &STI) {
321 unsigned SchedClassID = MCDesc.getSchedClass();
322 bool HasReadAdvanceEntries = SCDesc.NumReadAdvanceEntries > 0;
323
324 unsigned i = 0;
325 unsigned NumExplicitDefs = MCDesc.getNumDefs();
326 // Skip explicit definitions.
327 for (; i < MCI.getNumOperands() && NumExplicitDefs; ++i) {
328 const MCOperand &Op = MCI.getOperand(i);
329 if (Op.isReg())
330 NumExplicitDefs--;
331 }
332
333 if (NumExplicitDefs)
334 llvm::report_fatal_error(
335 "error: Expected more register operand definitions. ", false);
336
337 unsigned NumExplicitUses = MCI.getNumOperands() - i;
338 unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
339 if (MCDesc.hasOptionalDef()) {
340 assert(NumExplicitUses);
341 NumExplicitUses--;
342 }
343 unsigned TotalUses = NumExplicitUses + NumImplicitUses;
344 if (!TotalUses)
345 return;
346
347 ID.Reads.resize(TotalUses);
348 for (unsigned CurrentUse = 0; CurrentUse < NumExplicitUses; ++CurrentUse) {
349 ReadDescriptor &Read = ID.Reads[CurrentUse];
350 Read.OpIndex = i + CurrentUse;
Andrea Di Biagio0a837ef2018-03-29 14:26:56 +0000351 Read.UseIndex = CurrentUse;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000352 Read.HasReadAdvanceEntries = HasReadAdvanceEntries;
353 Read.SchedClassID = SchedClassID;
354 DEBUG(dbgs() << "\t\tOpIdx=" << Read.OpIndex);
355 }
356
357 for (unsigned CurrentUse = 0; CurrentUse < NumImplicitUses; ++CurrentUse) {
358 ReadDescriptor &Read = ID.Reads[NumExplicitUses + CurrentUse];
359 Read.OpIndex = -1;
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000360 Read.UseIndex = NumExplicitUses + CurrentUse;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000361 Read.RegisterID = MCDesc.getImplicitUses()[CurrentUse];
Andrea Di Biagio6fd62fe2018-04-02 13:46:49 +0000362 Read.HasReadAdvanceEntries = HasReadAdvanceEntries;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000363 Read.SchedClassID = SchedClassID;
364 DEBUG(dbgs() << "\t\tOpIdx=" << Read.OpIndex
365 << ", RegisterID=" << Read.RegisterID << '\n');
366 }
367}
368
Andrea Di Biagio4704f032018-03-20 12:25:54 +0000369void InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000370 assert(STI.getSchedModel().hasInstrSchedModel() &&
371 "Itineraries are not yet supported!");
372
373 unsigned short Opcode = MCI.getOpcode();
374 // Obtain the instruction descriptor from the opcode.
375 const MCInstrDesc &MCDesc = MCII.get(Opcode);
376 const MCSchedModel &SM = STI.getSchedModel();
377
378 // Then obtain the scheduling class information from the instruction.
379 const MCSchedClassDesc &SCDesc =
380 *SM.getSchedClassDesc(MCDesc.getSchedClass());
381
382 // Create a new empty descriptor.
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000383 std::unique_ptr<InstrDesc> ID = llvm::make_unique<InstrDesc>();
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000384
385 if (SCDesc.isVariant()) {
386 errs() << "warning: don't know how to model variant opcodes.\n"
387 << "note: assume 1 micro opcode.\n";
388 ID->NumMicroOps = 1U;
389 } else {
390 ID->NumMicroOps = SCDesc.NumMicroOps;
391 }
392
393 if (MCDesc.isCall()) {
394 // We don't correctly model calls.
395 errs() << "warning: found a call in the input assembly sequence.\n"
396 << "note: call instructions are not correctly modeled. Assume a "
397 "latency of 100cy.\n";
398 }
399
400 if (MCDesc.isReturn()) {
401 errs() << "warning: found a return instruction in the input assembly "
402 "sequence.\n"
403 << "note: program counter updates are ignored.\n";
404 }
405
406 ID->MayLoad = MCDesc.mayLoad();
407 ID->MayStore = MCDesc.mayStore();
408 ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
409
410 initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000411 computeMaxLatency(*ID, MCDesc, SCDesc, STI);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000412 populateWrites(*ID, MCI, MCDesc, SCDesc, STI);
413 populateReads(*ID, MCI, MCDesc, SCDesc, STI);
414
415 DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
416 DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
417
418 // Now add the new descriptor.
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000419 Descriptors[Opcode] = std::move(ID);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000420}
421
Andrea Di Biagio4704f032018-03-20 12:25:54 +0000422const InstrDesc &InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
Andrea Di Biagio35622482018-03-22 10:19:20 +0000423 if (Descriptors.find_as(MCI.getOpcode()) == Descriptors.end())
Andrea Di Biagio4704f032018-03-20 12:25:54 +0000424 createInstrDescImpl(MCI);
Andrea Di Biagio35622482018-03-22 10:19:20 +0000425 return *Descriptors[MCI.getOpcode()];
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000426}
427
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000428std::unique_ptr<Instruction>
429InstrBuilder::createInstruction(unsigned Idx, const MCInst &MCI) {
Andrea Di Biagio4704f032018-03-20 12:25:54 +0000430 const InstrDesc &D = getOrCreateInstrDesc(MCI);
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000431 std::unique_ptr<Instruction> NewIS = llvm::make_unique<Instruction>(D);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000432
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000433 // Initialize Reads first.
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000434 for (const ReadDescriptor &RD : D.Reads) {
435 int RegID = -1;
436 if (RD.OpIndex != -1) {
437 // explicit read.
438 const MCOperand &Op = MCI.getOperand(RD.OpIndex);
439 // Skip non-register operands.
440 if (!Op.isReg())
441 continue;
442 RegID = Op.getReg();
443 } else {
444 // Implicit read.
445 RegID = RD.RegisterID;
446 }
447
448 // Skip invalid register operands.
449 if (!RegID)
450 continue;
451
452 // Okay, this is a register operand. Create a ReadState for it.
453 assert(RegID > 0 && "Invalid register ID found!");
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000454 NewIS->getUses().emplace_back(llvm::make_unique<ReadState>(RD, RegID));
Andrea Di Biagio4704f032018-03-20 12:25:54 +0000455 }
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000456
Andrea Di Biagiodb66efc2018-04-25 09:38:58 +0000457 // Initialize writes.
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000458 for (const WriteDescriptor &WD : D.Writes) {
459 unsigned RegID =
460 WD.OpIndex == -1 ? WD.RegisterID : MCI.getOperand(WD.OpIndex).getReg();
Andrea Di Biagio35622482018-03-22 10:19:20 +0000461 // Check if this is a optional definition that references NoReg.
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000462 if (WD.IsOptionalDef && !RegID)
463 continue;
464
Andrea Di Biagio35622482018-03-22 10:19:20 +0000465 assert(RegID && "Expected a valid register ID!");
Andrea Di Biagio7b3d1622018-03-20 12:58:34 +0000466 NewIS->getDefs().emplace_back(llvm::make_unique<WriteState>(WD, RegID));
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000467 }
468
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000469 return NewIS;
470}
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000471} // namespace mca