Revert "Revert r347596 "Support for inserting profile-directed cache prefetches""

Summary:
This reverts commit d8517b96dfbd42e6a8db33c50d1fa1e58e63fbb9.

Fix: correct  the use of DenseMap.

Reviewers: davidxl, hans, wmi

Reviewed By: wmi

Subscribers: mgorny, eraman, llvm-commits

Differential Revision: https://reviews.llvm.org/D55088

llvm-svn: 347938
diff --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt
index 5ded1f9..524b4ae 100644
--- a/llvm/lib/Target/X86/CMakeLists.txt
+++ b/llvm/lib/Target/X86/CMakeLists.txt
@@ -30,6 +30,7 @@
   X86CmovConversion.cpp
   X86CondBrFolding.cpp
   X86DomainReassignment.cpp
+  X86DiscriminateMemOps.cpp
   X86ExpandPseudo.cpp
   X86FastISel.cpp
   X86FixupBWInsts.cpp
@@ -44,6 +45,7 @@
   X86ISelLowering.cpp
   X86IndirectBranchTracking.cpp
   X86InterleavedAccess.cpp
+  X86InsertPrefetch.cpp
   X86InstrFMA3Info.cpp
   X86InstrFoldTables.cpp
   X86InstrInfo.cpp
diff --git a/llvm/lib/Target/X86/LLVMBuild.txt b/llvm/lib/Target/X86/LLVMBuild.txt
index 2062163..055336b 100644
--- a/llvm/lib/Target/X86/LLVMBuild.txt
+++ b/llvm/lib/Target/X86/LLVMBuild.txt
@@ -31,5 +31,5 @@
 type = Library
 name = X86CodeGen
 parent = X86
-required_libraries = Analysis AsmPrinter CodeGen Core MC SelectionDAG Support Target X86AsmPrinter X86Desc X86Info X86Utils GlobalISel
+required_libraries = Analysis AsmPrinter CodeGen Core MC SelectionDAG Support Target X86AsmPrinter X86Desc X86Info X86Utils GlobalISel ProfileData
 add_to_library_groups = X86
diff --git a/llvm/lib/Target/X86/X86.h b/llvm/lib/Target/X86/X86.h
index 19f8e35..ab0cfeb 100644
--- a/llvm/lib/Target/X86/X86.h
+++ b/llvm/lib/Target/X86/X86.h
@@ -122,6 +122,13 @@
 /// This pass creates the thunks for the retpoline feature.
 FunctionPass *createX86RetpolineThunksPass();
 
+/// This pass ensures instructions featuring a memory operand
+/// have distinctive <LineNumber, Discriminator> (with respect to eachother)
+FunctionPass *createX86DiscriminateMemOpsPass();
+
+/// This pass applies profiling information to insert cache prefetches.
+FunctionPass *createX86InsertPrefetchPass();
+
 InstructionSelector *createX86InstructionSelector(const X86TargetMachine &TM,
                                                   X86Subtarget &,
                                                   X86RegisterBankInfo &);
diff --git a/llvm/lib/Target/X86/X86DiscriminateMemOps.cpp b/llvm/lib/Target/X86/X86DiscriminateMemOps.cpp
new file mode 100644
index 0000000..4935896
--- /dev/null
+++ b/llvm/lib/Target/X86/X86DiscriminateMemOps.cpp
@@ -0,0 +1,132 @@
+//===- X86DiscriminateMemOps.cpp - Unique IDs for Mem Ops -----------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// This pass aids profile-driven cache prefetch insertion by ensuring all
+/// instructions that have a memory operand are distinguishible from each other.
+///
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/ProfileData/SampleProf.h"
+#include "llvm/ProfileData/SampleProfReader.h"
+#include "llvm/Transforms/IPO/SampleProfile.h"
+using namespace llvm;
+
+namespace {
+
+using Location = std::pair<StringRef, unsigned>;
+
+Location diToLocation(const DILocation *Loc) {
+  return std::make_pair(Loc->getFilename(), Loc->getLine());
+}
+
+/// Ensure each instruction having a memory operand has a distinct <LineNumber,
+/// Discriminator> pair.
+void updateDebugInfo(MachineInstr *MI, const DILocation *Loc) {
+  DebugLoc DL(Loc);
+  MI->setDebugLoc(DL);
+}
+
+class X86DiscriminateMemOps : public MachineFunctionPass {
+  bool runOnMachineFunction(MachineFunction &MF) override;
+  StringRef getPassName() const override {
+    return "X86 Discriminate Memory Operands";
+  }
+
+public:
+  static char ID;
+
+  /// Default construct and initialize the pass.
+  X86DiscriminateMemOps();
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+//            Implementation
+//===----------------------------------------------------------------------===//
+
+char X86DiscriminateMemOps::ID = 0;
+
+/// Default construct and initialize the pass.
+X86DiscriminateMemOps::X86DiscriminateMemOps() : MachineFunctionPass(ID) {}
+
+bool X86DiscriminateMemOps::runOnMachineFunction(MachineFunction &MF) {
+  DISubprogram *FDI = MF.getFunction().getSubprogram();
+  if (!FDI || !FDI->getUnit()->getDebugInfoForProfiling())
+    return false;
+
+  // Have a default DILocation, if we find instructions with memops that don't
+  // have any debug info.
+  const DILocation *ReferenceDI =
+      DILocation::get(FDI->getContext(), FDI->getLine(), 0, FDI);
+
+  DenseMap<Location, unsigned> MemOpDiscriminators;
+  MemOpDiscriminators[diToLocation(ReferenceDI)] = 0;
+
+  // Figure out the largest discriminator issued for each Location. When we
+  // issue new discriminators, we can thus avoid issuing discriminators
+  // belonging to instructions that don't have memops. This isn't a requirement
+  // for the goals of this pass, however, it avoids unnecessary ambiguity.
+  for (auto &MBB : MF) {
+    for (auto &MI : MBB) {
+      const auto &DI = MI.getDebugLoc();
+      if (!DI)
+        continue;
+      Location Loc = diToLocation(DI);
+      MemOpDiscriminators[Loc] =
+          std::max(MemOpDiscriminators[Loc], DI->getBaseDiscriminator());
+    }
+  }
+
+  // Keep track of the discriminators seen at each Location. If an instruction's
+  // DebugInfo has a Location and discriminator we've already seen, replace its
+  // discriminator with a new one, to guarantee uniqueness.
+  DenseMap<Location, DenseSet<unsigned>> Seen;
+
+  bool Changed = false;
+  for (auto &MBB : MF) {
+    for (auto &MI : MBB) {
+      if (X86II::getMemoryOperandNo(MI.getDesc().TSFlags) < 0)
+        continue;
+      const DILocation *DI = MI.getDebugLoc();
+      if (!DI) {
+        DI = ReferenceDI;
+      }
+      DenseSet<unsigned> &Set = Seen[diToLocation(DI)];
+      const std::pair<DenseSet<unsigned>::iterator, bool> TryInsert =
+          Set.insert(DI->getBaseDiscriminator());
+      if (!TryInsert.second) {
+        DI = DI->setBaseDiscriminator(++MemOpDiscriminators[diToLocation(DI)]);
+        updateDebugInfo(&MI, DI);
+        Changed = true;
+        const std::pair<DenseSet<unsigned>::iterator, bool> MustInsert =
+            Set.insert(DI->getBaseDiscriminator());
+        assert(MustInsert.second);
+      }
+
+      // Bump the reference DI to avoid cramming discriminators on line 0.
+      // FIXME(mtrofin): pin ReferenceDI on blocks or first instruction with DI
+      // in a block. It's more consistent than just relying on the last memop
+      // instruction we happened to see.
+      ReferenceDI = DI;
+    }
+  }
+  return Changed;
+}
+
+FunctionPass *llvm::createX86DiscriminateMemOpsPass() {
+  return new X86DiscriminateMemOps();
+}
diff --git a/llvm/lib/Target/X86/X86InsertPrefetch.cpp b/llvm/lib/Target/X86/X86InsertPrefetch.cpp
new file mode 100644
index 0000000..30b46a0
--- /dev/null
+++ b/llvm/lib/Target/X86/X86InsertPrefetch.cpp
@@ -0,0 +1,253 @@
+//===------- X86InsertPrefetch.cpp - Insert cache prefetch hints ----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass applies cache prefetch instructions based on a profile. The pass
+// assumes DiscriminateMemOps ran immediately before, to ensure debug info
+// matches the one used at profile generation time. The profile is encoded in
+// afdo format (text or binary). It contains prefetch hints recommendations.
+// Each recommendation is made in terms of debug info locations, a type (i.e.
+// nta, t{0|1|2}) and a delta. The debug info identifies an instruction with a
+// memory operand (see X86DiscriminateMemOps). The prefetch will be made for
+// a location at that memory operand + the delta specified in the
+// recommendation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/ProfileData/SampleProf.h"
+#include "llvm/ProfileData/SampleProfReader.h"
+#include "llvm/Transforms/IPO/SampleProfile.h"
+using namespace llvm;
+using namespace sampleprof;
+
+static cl::opt<std::string>
+    PrefetchHintsFile("prefetch-hints-file",
+                      cl::desc("Path to the prefetch hints profile."),
+                      cl::Hidden);
+namespace {
+
+class X86InsertPrefetch : public MachineFunctionPass {
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  bool doInitialization(Module &) override;
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+  struct PrefetchInfo {
+    unsigned InstructionID;
+    int64_t Delta;
+  };
+  typedef SmallVectorImpl<PrefetchInfo> Prefetches;
+  bool findPrefetchInfo(const FunctionSamples *Samples, const MachineInstr &MI,
+                        Prefetches &prefetches) const;
+
+public:
+  static char ID;
+  X86InsertPrefetch(const std::string &PrefetchHintsFilename);
+  StringRef getPassName() const override {
+    return "X86 Insert Cache Prefetches";
+  }
+
+private:
+  std::string Filename;
+  std::unique_ptr<SampleProfileReader> Reader;
+};
+
+using PrefetchHints = SampleRecord::CallTargetMap;
+
+// Return any prefetching hints for the specified MachineInstruction. The hints
+// are returned as pairs (name, delta).
+ErrorOr<PrefetchHints> getPrefetchHints(const FunctionSamples *TopSamples,
+                                        const MachineInstr &MI) {
+  if (const auto &Loc = MI.getDebugLoc())
+    if (const auto *Samples = TopSamples->findFunctionSamples(Loc))
+      return Samples->findCallTargetMapAt(FunctionSamples::getOffset(Loc),
+                                          Loc->getBaseDiscriminator());
+  return std::error_code();
+}
+
+// The prefetch instruction can't take memory operands involving vector
+// registers.
+bool IsMemOpCompatibleWithPrefetch(const MachineInstr &MI, int Op) {
+  unsigned BaseReg = MI.getOperand(Op + X86::AddrBaseReg).getReg();
+  unsigned IndexReg = MI.getOperand(Op + X86::AddrIndexReg).getReg();
+  return (BaseReg == 0 ||
+          X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) ||
+          X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg)) &&
+         (IndexReg == 0 ||
+          X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) ||
+          X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg));
+}
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+//            Implementation
+//===----------------------------------------------------------------------===//
+
+char X86InsertPrefetch::ID = 0;
+
+X86InsertPrefetch::X86InsertPrefetch(const std::string &PrefetchHintsFilename)
+    : MachineFunctionPass(ID), Filename(PrefetchHintsFilename) {}
+
+/// Return true if the provided MachineInstruction has cache prefetch hints. In
+/// that case, the prefetch hints are stored, in order, in the Prefetches
+/// vector.
+bool X86InsertPrefetch::findPrefetchInfo(const FunctionSamples *TopSamples,
+                                         const MachineInstr &MI,
+                                         Prefetches &Prefetches) const {
+  assert(Prefetches.empty() &&
+         "Expected caller passed empty PrefetchInfo vector.");
+  static const std::pair<const StringRef, unsigned> HintTypes[] = {
+      {"_nta_", X86::PREFETCHNTA},
+      {"_t0_", X86::PREFETCHT0},
+      {"_t1_", X86::PREFETCHT1},
+      {"_t2_", X86::PREFETCHT2},
+  };
+  static const char *SerializedPrefetchPrefix = "__prefetch";
+
+  const ErrorOr<PrefetchHints> T = getPrefetchHints(TopSamples, MI);
+  if (!T)
+    return false;
+  int16_t max_index = -1;
+  // Convert serialized prefetch hints into PrefetchInfo objects, and populate
+  // the Prefetches vector.
+  for (const auto &S_V : *T) {
+    StringRef Name = S_V.getKey();
+    if (Name.consume_front(SerializedPrefetchPrefix)) {
+      int64_t D = static_cast<int64_t>(S_V.second);
+      unsigned IID = 0;
+      for (const auto &HintType : HintTypes) {
+        if (Name.startswith(HintType.first)) {
+          Name = Name.drop_front(HintType.first.size());
+          IID = HintType.second;
+          break;
+        }
+      }
+      if (IID == 0)
+        return false;
+      uint8_t index = 0;
+      Name.consumeInteger(10, index);
+
+      if (index >= Prefetches.size())
+        Prefetches.resize(index + 1);
+      Prefetches[index] = {IID, D};
+      max_index = std::max(max_index, static_cast<int16_t>(index));
+    }
+  }
+  assert(max_index + 1 >= 0 &&
+         "Possible overflow: max_index + 1 should be positive.");
+  assert(static_cast<size_t>(max_index + 1) == Prefetches.size() &&
+         "The number of prefetch hints received should match the number of "
+         "PrefetchInfo objects returned");
+  return !Prefetches.empty();
+}
+
+bool X86InsertPrefetch::doInitialization(Module &M) {
+  if (Filename.empty())
+    return false;
+
+  LLVMContext &Ctx = M.getContext();
+  ErrorOr<std::unique_ptr<SampleProfileReader>> ReaderOrErr =
+      SampleProfileReader::create(Filename, Ctx);
+  if (std::error_code EC = ReaderOrErr.getError()) {
+    std::string Msg = "Could not open profile: " + EC.message();
+    Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg,
+                                             DiagnosticSeverity::DS_Warning));
+    return false;
+  }
+  Reader = std::move(ReaderOrErr.get());
+  Reader->read();
+  return true;
+}
+
+void X86InsertPrefetch::getAnalysisUsage(AnalysisUsage &AU) const {
+  AU.setPreservesAll();
+  AU.addRequired<MachineModuleInfo>();
+}
+
+bool X86InsertPrefetch::runOnMachineFunction(MachineFunction &MF) {
+  if (!Reader)
+    return false;
+  const FunctionSamples *Samples = Reader->getSamplesFor(MF.getFunction());
+  if (!Samples)
+    return false;
+
+  bool Changed = false;
+
+  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+  SmallVector<PrefetchInfo, 4> Prefetches;
+  for (auto &MBB : MF) {
+    for (auto MI = MBB.instr_begin(); MI != MBB.instr_end();) {
+      auto Current = MI;
+      ++MI;
+
+      int Offset = X86II::getMemoryOperandNo(Current->getDesc().TSFlags);
+      if (Offset < 0)
+        continue;
+      unsigned Bias = X86II::getOperandBias(Current->getDesc());
+      int MemOpOffset = Offset + Bias;
+      // FIXME(mtrofin): ORE message when the recommendation cannot be taken.
+      if (!IsMemOpCompatibleWithPrefetch(*Current, MemOpOffset))
+        continue;
+      Prefetches.clear();
+      if (!findPrefetchInfo(Samples, *Current, Prefetches))
+        continue;
+      assert(!Prefetches.empty() &&
+             "The Prefetches vector should contain at least a value if "
+             "findPrefetchInfo returned true.");
+      for (auto &PrefInfo : Prefetches) {
+        unsigned PFetchInstrID = PrefInfo.InstructionID;
+        int64_t Delta = PrefInfo.Delta;
+        const MCInstrDesc &Desc = TII->get(PFetchInstrID);
+        MachineInstr *PFetch =
+            MF.CreateMachineInstr(Desc, Current->getDebugLoc(), true);
+        MachineInstrBuilder MIB(MF, PFetch);
+
+        assert(X86::AddrBaseReg == 0 && X86::AddrScaleAmt == 1 &&
+               X86::AddrIndexReg == 2 && X86::AddrDisp == 3 &&
+               X86::AddrSegmentReg == 4 &&
+               "Unexpected change in X86 operand offset order.");
+
+        // This assumes X86::AddBaseReg = 0, {...}ScaleAmt = 1, etc.
+        // FIXME(mtrofin): consider adding a:
+        //     MachineInstrBuilder::set(unsigned offset, op).
+        MIB.addReg(Current->getOperand(MemOpOffset + X86::AddrBaseReg).getReg())
+            .addImm(
+                Current->getOperand(MemOpOffset + X86::AddrScaleAmt).getImm())
+            .addReg(
+                Current->getOperand(MemOpOffset + X86::AddrIndexReg).getReg())
+            .addImm(Current->getOperand(MemOpOffset + X86::AddrDisp).getImm() +
+                    Delta)
+            .addReg(Current->getOperand(MemOpOffset + X86::AddrSegmentReg)
+                        .getReg());
+
+        if (!Current->memoperands_empty()) {
+          MachineMemOperand *CurrentOp = *(Current->memoperands_begin());
+          MIB.addMemOperand(MF.getMachineMemOperand(
+              CurrentOp, CurrentOp->getOffset() + Delta, CurrentOp->getSize()));
+        }
+
+        // Insert before Current. This is because Current may clobber some of
+        // the registers used to describe the input memory operand.
+        MBB.insert(Current, PFetch);
+        Changed = true;
+      }
+    }
+  }
+  return Changed;
+}
+
+FunctionPass *llvm::createX86InsertPrefetchPass() {
+  return new X86InsertPrefetch(PrefetchHintsFile);
+}
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 3ef0207..c2b5a6e 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -497,6 +497,8 @@
     addPass(createX86FixupLEAs());
     addPass(createX86EvexToVexInsts());
   }
+  addPass(createX86DiscriminateMemOpsPass());
+  addPass(createX86InsertPrefetchPass());
 }
 
 void X86PassConfig::addPreEmitPass2() {