Rename invariant.group.barrier to launder.invariant.group
Summary:
This is one of the initial commit of "RFC: Devirtualization v2" proposal:
https://docs.google.com/document/d/16GVtCpzK8sIHNc2qZz6RN8amICNBtvjWUod2SujZVEo/edit?usp=sharing
Reviewers: rsmith, amharc, kuhar, sanjoy
Subscribers: arsenm, nhaehnle, javed.absar, hiraditya, llvm-commits
Differential Revision: https://reviews.llvm.org/D45111
llvm-svn: 331448
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 0793d01..aaa79bc 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -985,8 +985,8 @@
const GEPOperator *GEP2,
uint64_t V2Size,
const DataLayout &DL) {
- assert(GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
- GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
+ assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
+ GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
"Expected GEPs with the same pointer operand");
@@ -1264,8 +1264,8 @@
// If we know the two GEPs are based off of the exact same pointer (and not
// just the same underlying object), see if that tells us anything about
// the resulting pointers.
- if (GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
- GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
+ if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
+ GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
// If we couldn't find anything interesting, don't abandon just yet.
@@ -1578,8 +1578,8 @@
return NoAlias;
// Strip off any casts if they exist.
- V1 = V1->stripPointerCastsAndBarriers();
- V2 = V2->stripPointerCastsAndBarriers();
+ V1 = V1->stripPointerCastsAndInvariantGroups();
+ V2 = V2->stripPointerCastsAndInvariantGroups();
// If V1 or V2 is undef, the result is NoAlias because we can always pick a
// value for undef that aliases nothing in the program.
diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp
index 7341188..661922d 100644
--- a/llvm/lib/Analysis/MemorySSA.cpp
+++ b/llvm/lib/Analysis/MemorySSA.cpp
@@ -352,9 +352,6 @@
const Instruction *I) {
// If the memory can't be changed, then loads of the memory can't be
// clobbered.
- //
- // FIXME: We should handle invariant groups, as well. It's a bit harder,
- // because we need to pay close attention to invariant group barriers.
return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
AA.pointsToConstantMemory(cast<LoadInst>(I)->
getPointerOperand()));
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 8729db4..3e8fd4eb 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -1668,7 +1668,7 @@
InsertedInsts.insert(ExtVal);
return true;
}
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
II->replaceAllUsesWith(II->getArgOperand(0));
II->eraseFromParent();
return true;
diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 571fd66..166e7b1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -1444,7 +1444,7 @@
updateValueMap(II, ResultReg);
return true;
}
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
case Intrinsic::expect: {
unsigned ResultReg = getRegForValue(II->getArgOperand(0));
if (!ResultReg)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 2ac4d3a..58d323a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -5710,7 +5710,7 @@
}
case Intrinsic::annotation:
case Intrinsic::ptr_annotation:
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
// Drop the intrinsic, but forward the value
setValue(&I, getValue(I.getOperand(0)));
return nullptr;
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 13fb278..fd403f3 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -528,6 +528,17 @@
return true;
}
}
+ if (Name.startswith("invariant.group.barrier")) {
+ // Rename invariant.group.barrier to launder.invariant.group
+ auto Args = F->getFunctionType()->params();
+ Type* ObjectPtr[1] = {Args[0]};
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::launder_invariant_group, ObjectPtr);
+ return true;
+
+ }
+
break;
}
case 'm': {
diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp
index a479d04..2f95e93 100644
--- a/llvm/lib/IR/Value.cpp
+++ b/llvm/lib/IR/Value.cpp
@@ -499,7 +499,7 @@
enum PointerStripKind {
PSK_ZeroIndices,
PSK_ZeroIndicesAndAliases,
- PSK_ZeroIndicesAndAliasesAndBarriers,
+ PSK_ZeroIndicesAndAliasesAndInvariantGroups,
PSK_InBoundsConstantIndices,
PSK_InBounds
};
@@ -518,7 +518,7 @@
if (auto *GEP = dyn_cast<GEPOperator>(V)) {
switch (StripKind) {
case PSK_ZeroIndicesAndAliases:
- case PSK_ZeroIndicesAndAliasesAndBarriers:
+ case PSK_ZeroIndicesAndAliasesAndInvariantGroups:
case PSK_ZeroIndices:
if (!GEP->hasAllZeroIndices())
return V;
@@ -546,11 +546,11 @@
V = RV;
continue;
}
- // The result of invariant.group.barrier must alias it's argument,
+ // The result of launder.invariant.group must alias it's argument,
// but it can't be marked with returned attribute, that's why it needs
// special case.
- if (StripKind == PSK_ZeroIndicesAndAliasesAndBarriers &&
- CS.getIntrinsicID() == Intrinsic::invariant_group_barrier) {
+ if (StripKind == PSK_ZeroIndicesAndAliasesAndInvariantGroups &&
+ CS.getIntrinsicID() == Intrinsic::launder_invariant_group) {
V = CS.getArgOperand(0);
continue;
}
@@ -576,8 +576,8 @@
return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(this);
}
-const Value *Value::stripPointerCastsAndBarriers() const {
- return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliasesAndBarriers>(
+const Value *Value::stripPointerCastsAndInvariantGroups() const {
+ return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliasesAndInvariantGroups>(
this);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index cf8469f..336467a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -454,7 +454,7 @@
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
case Intrinsic::objectsize:
return true;
default:
@@ -878,7 +878,7 @@
}
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
Intr->eraseFromParent();
// FIXME: I think the invariant marker should still theoretically apply,
// but the intrinsics need to be changed to accept pointers with any