[opaque pointer type] Add textual IR support for explicit type parameter to load instruction
Essentially the same as the GEP change in r230786.
A similar migration script can be used to update test cases, though a few more
test case improvements/changes were required this time around: (r229269-r229278)
import fileinput
import sys
import re
pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)")
for line in sys.stdin:
sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line))
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7649
llvm-svn: 230794
diff --git a/llvm/test/CodeGen/ARM/fast-isel-pred.ll b/llvm/test/CodeGen/ARM/fast-isel-pred.ll
index bf1593beef..ae8b67d 100644
--- a/llvm/test/CodeGen/ARM/fast-isel-pred.ll
+++ b/llvm/test/CodeGen/ARM/fast-isel-pred.ll
@@ -7,9 +7,9 @@
%X = alloca <4 x i32>, align 16
%Y = alloca <4 x float>, align 16
store i32 0, i32* %retval
- %tmp = load <4 x i32>* %X, align 16
+ %tmp = load <4 x i32>, <4 x i32>* %X, align 16
call void @__aa(<4 x i32> %tmp, i8* null, i32 3, <4 x float>* %Y)
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
@@ -24,15 +24,15 @@
store i8* %p, i8** %p.addr, align 4
store i32 %offset, i32* %offset.addr, align 4
store <4 x float>* %constants, <4 x float>** %constants.addr, align 4
- %tmp = load <4 x i32>* %v.addr, align 16
+ %tmp = load <4 x i32>, <4 x i32>* %v.addr, align 16
store <4 x i32> %tmp, <4 x i32>* %__a.addr.i, align 16
- %tmp.i = load <4 x i32>* %__a.addr.i, align 16
+ %tmp.i = load <4 x i32>, <4 x i32>* %__a.addr.i, align 16
%0 = bitcast <4 x i32> %tmp.i to <16 x i8>
%1 = bitcast <16 x i8> %0 to <4 x i32>
%vcvt.i = sitofp <4 x i32> %1 to <4 x float>
- %tmp1 = load i8** %p.addr, align 4
- %tmp2 = load i32* %offset.addr, align 4
- %tmp3 = load <4 x float>** %constants.addr, align 4
+ %tmp1 = load i8*, i8** %p.addr, align 4
+ %tmp2 = load i32, i32* %offset.addr, align 4
+ %tmp3 = load <4 x float>*, <4 x float>** %constants.addr, align 4
call void @__bb(<4 x float> %vcvt.i, i8* %tmp1, i32 %tmp2, <4 x float>* %tmp3)
ret void
}
@@ -48,9 +48,9 @@
store i8* %p, i8** %p.addr, align 4
store i32 %offset, i32* %offset.addr, align 4
store <4 x float>* %constants, <4 x float>** %constants.addr, align 4
- %tmp = load i64* %data, align 4
- %tmp1 = load i8** %p.addr, align 4
- %tmp2 = load i32* %offset.addr, align 4
+ %tmp = load i64, i64* %data, align 4
+ %tmp1 = load i8*, i8** %p.addr, align 4
+ %tmp2 = load i32, i32* %offset.addr, align 4
%add.ptr = getelementptr i8, i8* %tmp1, i32 %tmp2
%0 = bitcast i8* %add.ptr to i64*
%arrayidx = getelementptr inbounds i64, i64* %0, i32 0