[opaque pointer type] Add textual IR support for explicit type parameter to load instruction

Essentially the same as the GEP change in r230786.

A similar migration script can be used to update test cases, though a few more
test case improvements/changes were required this time around: (r229269-r229278)

import fileinput
import sys
import re

pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)")

for line in sys.stdin:
  sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line))

Reviewers: rafael, dexonsmith, grosser

Differential Revision: http://reviews.llvm.org/D7649

llvm-svn: 230794
diff --git a/llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll b/llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll
index ed3981b..0032c4a 100644
--- a/llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll
+++ b/llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll
@@ -4,7 +4,7 @@
 
 define <4 x i16> @vclz16(<4 x i16>* %A) nounwind {
 ;CHECK: @vclz16
-        %tmp1 = load <4 x i16>* %A
+        %tmp1 = load <4 x i16>, <4 x i16>* %A
         %tmp2 = call <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16> %tmp1)
 ;CHECK: {{call.*@llvm.ctlz.v4i16\(<4 x i16>.*, i1 false}}
         ret <4 x i16> %tmp2
@@ -12,7 +12,7 @@
 
 define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
 ;CHECK: @vcnt8
-        %tmp1 = load <8 x i8>* %A
+        %tmp1 = load <8 x i8>, <8 x i8>* %A
         %tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1)
 ;CHECK: call <8 x i8> @llvm.ctpop.v8i8(<8 x i8>
         ret <8 x i8> %tmp2
diff --git a/llvm/test/Bitcode/case-ranges-3.3.ll b/llvm/test/Bitcode/case-ranges-3.3.ll
index 020b37f4..eb55ef1 100644
--- a/llvm/test/Bitcode/case-ranges-3.3.ll
+++ b/llvm/test/Bitcode/case-ranges-3.3.ll
@@ -10,7 +10,7 @@
   %1 = alloca i32, align 4
   %2 = alloca i32, align 4
   store i32 %x, i32* %2, align 4
-  %3 = load i32* %2, align 4
+  %3 = load i32, i32* %2, align 4
   switch i32 %3, label %9 [
 ; CHECK: switch i32 %3, label %9
     i32 -3, label %4
@@ -63,6 +63,6 @@
   br label %11
 
 ; <label>:11
-  %12 = load i32* %1
+  %12 = load i32, i32* %1
   ret i32 %12
 }
diff --git a/llvm/test/Bitcode/function-encoding-rel-operands.ll b/llvm/test/Bitcode/function-encoding-rel-operands.ll
index d7a7516..1307dd4 100644
--- a/llvm/test/Bitcode/function-encoding-rel-operands.ll
+++ b/llvm/test/Bitcode/function-encoding-rel-operands.ll
@@ -44,7 +44,7 @@
 define i1 @test_load(i32 %a, {i32, i32}* %ptr) nounwind {
 entry:
   %0 = getelementptr inbounds {i32, i32}, {i32, i32}* %ptr, i32 %a, i32 0
-  %1 = load i32* %0
+  %1 = load i32, i32* %0
   %2 = icmp eq i32 %1, %a
   ret i1 %2
 }
diff --git a/llvm/test/Bitcode/memInstructions.3.2.ll b/llvm/test/Bitcode/memInstructions.3.2.ll
index 356ecf7..f430086 100644
--- a/llvm/test/Bitcode/memInstructions.3.2.ll
+++ b/llvm/test/Bitcode/memInstructions.3.2.ll
@@ -27,53 +27,53 @@
   %ptr1 = alloca i8
   store i8 2, i8* %ptr1
 
-; CHECK: %res1 = load i8* %ptr1
-  %res1 = load i8* %ptr1
+; CHECK: %res1 = load i8, i8* %ptr1
+  %res1 = load i8, i8* %ptr1
 
-; CHECK-NEXT: %res2 = load volatile i8* %ptr1
-  %res2 = load volatile i8* %ptr1
+; CHECK-NEXT: %res2 = load volatile i8, i8* %ptr1
+  %res2 = load volatile i8, i8* %ptr1
 
-; CHECK-NEXT: %res3 = load i8* %ptr1, align 1
-  %res3 = load i8* %ptr1, align 1
+; CHECK-NEXT: %res3 = load i8, i8* %ptr1, align 1
+  %res3 = load i8, i8* %ptr1, align 1
 
-; CHECK-NEXT: %res4 = load volatile i8* %ptr1, align 1
-  %res4 = load volatile i8* %ptr1, align 1
+; CHECK-NEXT: %res4 = load volatile i8, i8* %ptr1, align 1
+  %res4 = load volatile i8, i8* %ptr1, align 1
 
-; CHECK-NEXT: %res5 = load i8* %ptr1, !nontemporal !0
-  %res5 = load i8* %ptr1, !nontemporal !0
+; CHECK-NEXT: %res5 = load i8, i8* %ptr1, !nontemporal !0
+  %res5 = load i8, i8* %ptr1, !nontemporal !0
 
-; CHECK-NEXT: %res6 = load volatile i8* %ptr1, !nontemporal !0
-  %res6 = load volatile i8* %ptr1, !nontemporal !0
+; CHECK-NEXT: %res6 = load volatile i8, i8* %ptr1, !nontemporal !0
+  %res6 = load volatile i8, i8* %ptr1, !nontemporal !0
 
-; CHECK-NEXT: %res7 = load i8* %ptr1, align 1, !nontemporal !0
-  %res7 = load i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0
+  %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0
 
-; CHECK-NEXT: %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0
-  %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
+  %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
 
-; CHECK-NEXT: %res9 = load i8* %ptr1, !invariant.load !1
-  %res9 = load i8* %ptr1, !invariant.load !1
+; CHECK-NEXT: %res9 = load i8, i8* %ptr1, !invariant.load !1
+  %res9 = load i8, i8* %ptr1, !invariant.load !1
 
-; CHECK-NEXT: %res10 = load volatile i8* %ptr1, !invariant.load !1
-  %res10 = load volatile i8* %ptr1, !invariant.load !1
+; CHECK-NEXT: %res10 = load volatile i8, i8* %ptr1, !invariant.load !1
+  %res10 = load volatile i8, i8* %ptr1, !invariant.load !1
 
-; CHECK-NEXT: %res11 = load i8* %ptr1, align 1, !invariant.load !1
-  %res11 = load i8* %ptr1, align 1, !invariant.load !1
+; CHECK-NEXT: %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1
+  %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1
 
-; CHECK-NEXT: %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1
-  %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1
+; CHECK-NEXT: %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
+  %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
 
-; CHECK-NEXT: %res13 = load i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
-  %res13 = load i8* %ptr1, !nontemporal !0, !invariant.load !1
+; CHECK-NEXT: %res13 = load i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+  %res13 = load i8, i8* %ptr1, !nontemporal !0, !invariant.load !1
 
-; CHECK-NEXT: %res14 = load volatile i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
-  %res14 = load volatile i8* %ptr1, !nontemporal !0, !invariant.load !1
+; CHECK-NEXT: %res14 = load volatile i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+  %res14 = load volatile i8, i8* %ptr1, !nontemporal !0, !invariant.load !1
 
-; CHECK-NEXT: %res15 = load i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
-  %res15 = load i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
+; CHECK-NEXT: %res15 = load i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+  %res15 = load i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
 
-; CHECK-NEXT: %res16 = load volatile i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
-  %res16 = load volatile i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
+; CHECK-NEXT: %res16 = load volatile i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+  %res16 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
 
   ret void
 }
@@ -83,53 +83,53 @@
   %ptr1 = alloca i8
   store i8 2, i8* %ptr1
 
-; CHECK: %res1 = load atomic i8* %ptr1 unordered, align 1
-  %res1 = load atomic i8* %ptr1 unordered, align 1
+; CHECK: %res1 = load atomic i8, i8* %ptr1 unordered, align 1
+  %res1 = load atomic i8, i8* %ptr1 unordered, align 1
 
-; CHECK-NEXT: %res2 = load atomic i8* %ptr1 monotonic, align 1
-  %res2 = load atomic i8* %ptr1 monotonic, align 1
+; CHECK-NEXT: %res2 = load atomic i8, i8* %ptr1 monotonic, align 1
+  %res2 = load atomic i8, i8* %ptr1 monotonic, align 1
 
-; CHECK-NEXT: %res3 = load atomic i8* %ptr1 acquire, align 1
-  %res3 = load atomic i8* %ptr1 acquire, align 1
+; CHECK-NEXT: %res3 = load atomic i8, i8* %ptr1 acquire, align 1
+  %res3 = load atomic i8, i8* %ptr1 acquire, align 1
 
-; CHECK-NEXT: %res4 = load atomic i8* %ptr1 seq_cst, align 1
-  %res4 = load atomic i8* %ptr1 seq_cst, align 1
+; CHECK-NEXT: %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1
+  %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1
 
-; CHECK-NEXT: %res5 = load atomic volatile i8* %ptr1 unordered, align 1
-  %res5 = load atomic volatile i8* %ptr1 unordered, align 1
+; CHECK-NEXT: %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1
+  %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1
 
-; CHECK-NEXT: %res6 = load atomic volatile i8* %ptr1 monotonic, align 1
-  %res6 = load atomic volatile i8* %ptr1 monotonic, align 1
+; CHECK-NEXT: %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1
+  %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1
 
-; CHECK-NEXT: %res7 = load atomic volatile i8* %ptr1 acquire, align 1
-  %res7 = load atomic volatile i8* %ptr1 acquire, align 1
+; CHECK-NEXT: %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1
+  %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1
 
-; CHECK-NEXT: %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1
-  %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1
+; CHECK-NEXT: %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
+  %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
 
-; CHECK-NEXT: %res9 = load atomic i8* %ptr1 singlethread unordered, align 1
-  %res9 = load atomic i8* %ptr1 singlethread unordered, align 1
+; CHECK-NEXT: %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1
+  %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1
 
-; CHECK-NEXT: %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1
-  %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1
+; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1
+  %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1
 
-; CHECK-NEXT: %res11 = load atomic i8* %ptr1 singlethread acquire, align 1
-  %res11 = load atomic i8* %ptr1 singlethread acquire, align 1
+; CHECK-NEXT: %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1
+  %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1
 
-; CHECK-NEXT: %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1
-  %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1
+; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1
+  %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1
 
-; CHECK-NEXT: %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1
-  %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1
+; CHECK-NEXT: %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1
+  %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1
 
-; CHECK-NEXT: %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1
-  %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1
+; CHECK-NEXT: %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1
+  %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1
 
-; CHECK-NEXT: %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1
-  %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1
+; CHECK-NEXT: %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1
+  %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1
 
-; CHECK-NEXT: %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1
-  %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1
+; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1
+  %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1
 
   ret void
 }
diff --git a/llvm/test/Bitcode/metadata-2.ll b/llvm/test/Bitcode/metadata-2.ll
index 07371a3..a5367da 100644
--- a/llvm/test/Bitcode/metadata-2.ll
+++ b/llvm/test/Bitcode/metadata-2.ll
@@ -77,7 +77,7 @@
 
 define internal void @_D5tango4core8BitManip16__moduleinfoCtorZ() nounwind {
 moduleinfoCtorEntry:
-	%current = load %ModuleReference** @_Dmodule_ref		; <%ModuleReference*> [#uses=1]
+	%current = load %ModuleReference*, %ModuleReference** @_Dmodule_ref		; <%ModuleReference*> [#uses=1]
 	store %ModuleReference* %current, %ModuleReference** getelementptr (%ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, i32 0, i32 0)
 	store %ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, %ModuleReference** @_Dmodule_ref
 	ret void
diff --git a/llvm/test/Bitcode/upgrade-loop-metadata.ll b/llvm/test/Bitcode/upgrade-loop-metadata.ll
index be2a99a4..8dee907 100644
--- a/llvm/test/Bitcode/upgrade-loop-metadata.ll
+++ b/llvm/test/Bitcode/upgrade-loop-metadata.ll
@@ -10,7 +10,7 @@
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %entry
-  %0 = load i32* %i, align 4
+  %0 = load i32, i32* %i, align 4
   %cmp = icmp slt i32 %0, 16
   br i1 %cmp, label %for.body, label %for.end, !llvm.loop !1
 
@@ -18,7 +18,7 @@
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body
-  %1 = load i32* %i, align 4
+  %1 = load i32, i32* %i, align 4
   %inc = add nsw i32 %1, 1
   store i32 %inc, i32* %i, align 4
   br label %for.cond
diff --git a/llvm/test/Bitcode/use-list-order.ll b/llvm/test/Bitcode/use-list-order.ll
index 6617b9c5..f57b4a6 100644
--- a/llvm/test/Bitcode/use-list-order.ll
+++ b/llvm/test/Bitcode/use-list-order.ll
@@ -79,13 +79,13 @@
 
 define i1 @loadb() {
 entry:
-  %b = load i1* @b
+  %b = load i1, i1* @b
   ret i1 %b
 }
 
 define i1 @loada() {
 entry:
-  %a = load i1* getelementptr ([4 x i1]* @a, i64 0, i64 2)
+  %a = load i1, i1* getelementptr ([4 x i1]* @a, i64 0, i64 2)
   ret i1 %a
 }
 
@@ -115,7 +115,7 @@
 
 define i4 @globalAndFunctionFunctionUser() {
 entry:
-  %local = load i4* @globalAndFunction
+  %local = load i4, i4* @globalAndFunction
   ret i4 %local
 }