Change getSlotIndex to return unsigned.
The actual storage was already using unsigned, but the interface was using
uint64_t. This is wasteful on 32 bits and looks to be the root causes of
a miscompilation on Windows where a value was being sign extended to 64bits
to compare with the result of getSlotIndex.
Patch by Pasi Parviainen!
llvm-svn: 180791
diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp
index 065f772..2443b34 100644
--- a/llvm/lib/IR/Attributes.cpp
+++ b/llvm/lib/IR/Attributes.cpp
@@ -501,7 +501,7 @@
// AttributeSetImpl Definition
//===----------------------------------------------------------------------===//
-uint64_t AttributeSetImpl::Raw(uint64_t Index) const {
+uint64_t AttributeSetImpl::Raw(unsigned Index) const {
for (unsigned I = 0, E = getNumAttributes(); I != E; ++I) {
if (getSlotIndex(I) != Index) continue;
const AttributeSetNode *ASN = AttrNodes[I].second;
@@ -889,7 +889,7 @@
return pImpl ? pImpl->getNumAttributes() : 0;
}
-uint64_t AttributeSet::getSlotIndex(unsigned Slot) const {
+unsigned AttributeSet::getSlotIndex(unsigned Slot) const {
assert(pImpl && Slot < pImpl->getNumAttributes() &&
"Slot # out of range!");
return pImpl->getSlotIndex(Slot);