Change getSlotIndex to return unsigned.
The actual storage was already using unsigned, but the interface was using
uint64_t. This is wasteful on 32 bits and looks to be the root causes of
a miscompilation on Windows where a value was being sign extended to 64bits
to compare with the result of getSlotIndex.
Patch by Pasi Parviainen!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@180791 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/IR/AttributeImpl.h b/lib/IR/AttributeImpl.h
index b17aa43..2f7b449 100644
--- a/lib/IR/AttributeImpl.h
+++ b/lib/IR/AttributeImpl.h
@@ -228,7 +228,7 @@
/// is the index of the return, parameter, or function object that the
/// attributes are applied to, not the index into the AttrNodes list where the
/// attributes reside.
- uint64_t getSlotIndex(unsigned Slot) const {
+ unsigned getSlotIndex(unsigned Slot) const {
return AttrNodes[Slot].first;
}
@@ -270,7 +270,7 @@
}
// FIXME: This atrocity is temporary.
- uint64_t Raw(uint64_t Index) const;
+ uint64_t Raw(unsigned Index) const;
};
} // end llvm namespace
diff --git a/lib/IR/Attributes.cpp b/lib/IR/Attributes.cpp
index 065f772..2443b34 100644
--- a/lib/IR/Attributes.cpp
+++ b/lib/IR/Attributes.cpp
@@ -501,7 +501,7 @@
// AttributeSetImpl Definition
//===----------------------------------------------------------------------===//
-uint64_t AttributeSetImpl::Raw(uint64_t Index) const {
+uint64_t AttributeSetImpl::Raw(unsigned Index) const {
for (unsigned I = 0, E = getNumAttributes(); I != E; ++I) {
if (getSlotIndex(I) != Index) continue;
const AttributeSetNode *ASN = AttrNodes[I].second;
@@ -889,7 +889,7 @@
return pImpl ? pImpl->getNumAttributes() : 0;
}
-uint64_t AttributeSet::getSlotIndex(unsigned Slot) const {
+unsigned AttributeSet::getSlotIndex(unsigned Slot) const {
assert(pImpl && Slot < pImpl->getNumAttributes() &&
"Slot # out of range!");
return pImpl->getSlotIndex(Slot);