[opaque pointer type] Add textual IR support for explicit type parameter to load instruction

Essentially the same as the GEP change in r230786.

A similar migration script can be used to update test cases, though a few more
test case improvements/changes were required this time around: (r229269-r229278)

import fileinput
import sys
import re

pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)")

for line in sys.stdin:
  sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line))

Reviewers: rafael, dexonsmith, grosser

Differential Revision: http://reviews.llvm.org/D7649

llvm-svn: 230794
diff --git a/llvm/test/CodeGen/Mips/msa/3r-c.ll b/llvm/test/CodeGen/Mips/msa/3r-c.ll
index 6ec92c2..a3913e0 100644
--- a/llvm/test/CodeGen/Mips/msa/3r-c.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r-c.ll
@@ -10,8 +10,8 @@
 
 define void @llvm_mips_ceq_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>* @llvm_mips_ceq_b_ARG1
-  %1 = load <16 x i8>* @llvm_mips_ceq_b_ARG2
+  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ceq_b_ARG1
+  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ceq_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.ceq.b(<16 x i8> %0, <16 x i8> %1)
   store <16 x i8> %2, <16 x i8>* @llvm_mips_ceq_b_RES
   ret void
@@ -32,8 +32,8 @@
 
 define void @llvm_mips_ceq_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>* @llvm_mips_ceq_h_ARG1
-  %1 = load <8 x i16>* @llvm_mips_ceq_h_ARG2
+  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ceq_h_ARG1
+  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ceq_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.ceq.h(<8 x i16> %0, <8 x i16> %1)
   store <8 x i16> %2, <8 x i16>* @llvm_mips_ceq_h_RES
   ret void
@@ -54,8 +54,8 @@
 
 define void @llvm_mips_ceq_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>* @llvm_mips_ceq_w_ARG1
-  %1 = load <4 x i32>* @llvm_mips_ceq_w_ARG2
+  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ceq_w_ARG1
+  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ceq_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.ceq.w(<4 x i32> %0, <4 x i32> %1)
   store <4 x i32> %2, <4 x i32>* @llvm_mips_ceq_w_RES
   ret void
@@ -76,8 +76,8 @@
 
 define void @llvm_mips_ceq_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>* @llvm_mips_ceq_d_ARG1
-  %1 = load <2 x i64>* @llvm_mips_ceq_d_ARG2
+  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ceq_d_ARG1
+  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ceq_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.ceq.d(<2 x i64> %0, <2 x i64> %1)
   store <2 x i64> %2, <2 x i64>* @llvm_mips_ceq_d_RES
   ret void
@@ -98,8 +98,8 @@
 
 define void @llvm_mips_cle_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>* @llvm_mips_cle_s_b_ARG1
-  %1 = load <16 x i8>* @llvm_mips_cle_s_b_ARG2
+  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_s_b_ARG1
+  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.cle.s.b(<16 x i8> %0, <16 x i8> %1)
   store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_s_b_RES
   ret void
@@ -120,8 +120,8 @@
 
 define void @llvm_mips_cle_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>* @llvm_mips_cle_s_h_ARG1
-  %1 = load <8 x i16>* @llvm_mips_cle_s_h_ARG2
+  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_s_h_ARG1
+  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.cle.s.h(<8 x i16> %0, <8 x i16> %1)
   store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_s_h_RES
   ret void
@@ -142,8 +142,8 @@
 
 define void @llvm_mips_cle_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>* @llvm_mips_cle_s_w_ARG1
-  %1 = load <4 x i32>* @llvm_mips_cle_s_w_ARG2
+  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_s_w_ARG1
+  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.cle.s.w(<4 x i32> %0, <4 x i32> %1)
   store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_s_w_RES
   ret void
@@ -164,8 +164,8 @@
 
 define void @llvm_mips_cle_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>* @llvm_mips_cle_s_d_ARG1
-  %1 = load <2 x i64>* @llvm_mips_cle_s_d_ARG2
+  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_s_d_ARG1
+  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.cle.s.d(<2 x i64> %0, <2 x i64> %1)
   store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_s_d_RES
   ret void
@@ -186,8 +186,8 @@
 
 define void @llvm_mips_cle_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>* @llvm_mips_cle_u_b_ARG1
-  %1 = load <16 x i8>* @llvm_mips_cle_u_b_ARG2
+  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_u_b_ARG1
+  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.cle.u.b(<16 x i8> %0, <16 x i8> %1)
   store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_u_b_RES
   ret void
@@ -208,8 +208,8 @@
 
 define void @llvm_mips_cle_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>* @llvm_mips_cle_u_h_ARG1
-  %1 = load <8 x i16>* @llvm_mips_cle_u_h_ARG2
+  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_u_h_ARG1
+  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.cle.u.h(<8 x i16> %0, <8 x i16> %1)
   store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_u_h_RES
   ret void
@@ -230,8 +230,8 @@
 
 define void @llvm_mips_cle_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>* @llvm_mips_cle_u_w_ARG1
-  %1 = load <4 x i32>* @llvm_mips_cle_u_w_ARG2
+  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_u_w_ARG1
+  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.cle.u.w(<4 x i32> %0, <4 x i32> %1)
   store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_u_w_RES
   ret void
@@ -252,8 +252,8 @@
 
 define void @llvm_mips_cle_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>* @llvm_mips_cle_u_d_ARG1
-  %1 = load <2 x i64>* @llvm_mips_cle_u_d_ARG2
+  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_u_d_ARG1
+  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.cle.u.d(<2 x i64> %0, <2 x i64> %1)
   store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_u_d_RES
   ret void
@@ -274,8 +274,8 @@
 
 define void @llvm_mips_clt_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>* @llvm_mips_clt_s_b_ARG1
-  %1 = load <16 x i8>* @llvm_mips_clt_s_b_ARG2
+  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_s_b_ARG1
+  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.clt.s.b(<16 x i8> %0, <16 x i8> %1)
   store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_s_b_RES
   ret void
@@ -296,8 +296,8 @@
 
 define void @llvm_mips_clt_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>* @llvm_mips_clt_s_h_ARG1
-  %1 = load <8 x i16>* @llvm_mips_clt_s_h_ARG2
+  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_s_h_ARG1
+  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.clt.s.h(<8 x i16> %0, <8 x i16> %1)
   store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_s_h_RES
   ret void
@@ -318,8 +318,8 @@
 
 define void @llvm_mips_clt_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>* @llvm_mips_clt_s_w_ARG1
-  %1 = load <4 x i32>* @llvm_mips_clt_s_w_ARG2
+  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_s_w_ARG1
+  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.clt.s.w(<4 x i32> %0, <4 x i32> %1)
   store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_s_w_RES
   ret void
@@ -340,8 +340,8 @@
 
 define void @llvm_mips_clt_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>* @llvm_mips_clt_s_d_ARG1
-  %1 = load <2 x i64>* @llvm_mips_clt_s_d_ARG2
+  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_s_d_ARG1
+  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.clt.s.d(<2 x i64> %0, <2 x i64> %1)
   store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_s_d_RES
   ret void
@@ -362,8 +362,8 @@
 
 define void @llvm_mips_clt_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>* @llvm_mips_clt_u_b_ARG1
-  %1 = load <16 x i8>* @llvm_mips_clt_u_b_ARG2
+  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_u_b_ARG1
+  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.clt.u.b(<16 x i8> %0, <16 x i8> %1)
   store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_u_b_RES
   ret void
@@ -384,8 +384,8 @@
 
 define void @llvm_mips_clt_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>* @llvm_mips_clt_u_h_ARG1
-  %1 = load <8 x i16>* @llvm_mips_clt_u_h_ARG2
+  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_u_h_ARG1
+  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.clt.u.h(<8 x i16> %0, <8 x i16> %1)
   store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_u_h_RES
   ret void
@@ -406,8 +406,8 @@
 
 define void @llvm_mips_clt_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>* @llvm_mips_clt_u_w_ARG1
-  %1 = load <4 x i32>* @llvm_mips_clt_u_w_ARG2
+  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_u_w_ARG1
+  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.clt.u.w(<4 x i32> %0, <4 x i32> %1)
   store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_u_w_RES
   ret void
@@ -428,8 +428,8 @@
 
 define void @llvm_mips_clt_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>* @llvm_mips_clt_u_d_ARG1
-  %1 = load <2 x i64>* @llvm_mips_clt_u_d_ARG2
+  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_u_d_ARG1
+  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.clt.u.d(<2 x i64> %0, <2 x i64> %1)
   store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_u_d_RES
   ret void