[opaque pointer type] Add textual IR support for explicit type parameter to load instruction

Essentially the same as the GEP change in r230786.

A similar migration script can be used to update test cases, though a few more
test case improvements/changes were required this time around: (r229269-r229278)

import fileinput
import sys
import re

pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)")

for line in sys.stdin:
  sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line))

Reviewers: rafael, dexonsmith, grosser

Differential Revision: http://reviews.llvm.org/D7649

llvm-svn: 230794
diff --git a/llvm/test/CodeGen/PowerPC/vsx-minmax.ll b/llvm/test/CodeGen/PowerPC/vsx-minmax.ll
index 47f50ab..ad72cac 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-minmax.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-minmax.ll
@@ -18,35 +18,35 @@
 define void @test1() #0 {
 ; CHECK-LABEL: @test1
 entry:
-  %0 = load volatile <4 x float>* @vf, align 16
-  %1 = load volatile <4 x float>* @vf, align 16
+  %0 = load volatile <4 x float>, <4 x float>* @vf, align 16
+  %1 = load volatile <4 x float>, <4 x float>* @vf, align 16
   %2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1)
 ; CHECK: xvmaxsp
   store <4 x float> %2, <4 x float>* @vf1, align 16
-  %3 = load <2 x double>* @vd, align 16
+  %3 = load <2 x double>, <2 x double>* @vd, align 16
   %4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3)
 ; CHECK: xvmaxdp
   store <2 x double> %4, <2 x double>* @vd1, align 16
-  %5 = load volatile <4 x float>* @vf, align 16
-  %6 = load volatile <4 x float>* @vf, align 16
+  %5 = load volatile <4 x float>, <4 x float>* @vf, align 16
+  %6 = load volatile <4 x float>, <4 x float>* @vf, align 16
   %7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6)
 ; CHECK: xvmaxsp
   store <4 x float> %7, <4 x float>* @vf2, align 16
-  %8 = load volatile <4 x float>* @vf, align 16
-  %9 = load volatile <4 x float>* @vf, align 16
+  %8 = load volatile <4 x float>, <4 x float>* @vf, align 16
+  %9 = load volatile <4 x float>, <4 x float>* @vf, align 16
   %10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9)
 ; CHECK: xvminsp
   store <4 x float> %10, <4 x float>* @vf3, align 16
-  %11 = load <2 x double>* @vd, align 16
+  %11 = load <2 x double>, <2 x double>* @vd, align 16
   %12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11)
 ; CHECK: xvmindp
   store <2 x double> %12, <2 x double>* @vd2, align 16
-  %13 = load volatile <4 x float>* @vf, align 16
-  %14 = load volatile <4 x float>* @vf, align 16
+  %13 = load volatile <4 x float>, <4 x float>* @vf, align 16
+  %14 = load volatile <4 x float>, <4 x float>* @vf, align 16
   %15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14)
 ; CHECK: xvminsp
   store <4 x float> %15, <4 x float>* @vf4, align 16
-  %16 = load double* @d, align 8
+  %16 = load double, double* @d, align 8
   %17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16)
 ; CHECK: xsmaxdp
   store double %17, double* @d1, align 8