[opaque pointer type] Add textual IR support for explicit type parameter to load instruction

Essentially the same as the GEP change in r230786.

A similar migration script can be used to update test cases, though a few more
test case improvements/changes were required this time around: (r229269-r229278)

import fileinput
import sys
import re

pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)")

for line in sys.stdin:
  sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line))

Reviewers: rafael, dexonsmith, grosser

Differential Revision: http://reviews.llvm.org/D7649

llvm-svn: 230794
diff --git a/llvm/test/CodeGen/PowerPC/s000-alias-misched.ll b/llvm/test/CodeGen/PowerPC/s000-alias-misched.ll
index f67b9f2..8e29635 100644
--- a/llvm/test/CodeGen/PowerPC/s000-alias-misched.ll
+++ b/llvm/test/CodeGen/PowerPC/s000-alias-misched.ll
@@ -37,7 +37,7 @@
   %arrayidx = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv
   %arrayidx6 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv
   %0 = bitcast double* %arrayidx to <1 x double>*
-  %1 = load <1 x double>* %0, align 32
+  %1 = load <1 x double>, <1 x double>* %0, align 32
   %add = fadd <1 x double> %1, <double 1.000000e+00>
   %2 = bitcast double* %arrayidx6 to <1 x double>*
   store <1 x double> %add, <1 x double>* %2, align 32
@@ -45,7 +45,7 @@
   %arrayidx.4 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.322
   %arrayidx6.4 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.322
   %3 = bitcast double* %arrayidx.4 to <1 x double>*
-  %4 = load <1 x double>* %3, align 32
+  %4 = load <1 x double>, <1 x double>* %3, align 32
   %add.4 = fadd <1 x double> %4, <double 1.000000e+00>
   %5 = bitcast double* %arrayidx6.4 to <1 x double>*
   store <1 x double> %add.4, <1 x double>* %5, align 32
@@ -53,7 +53,7 @@
   %arrayidx.8 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.726
   %arrayidx6.8 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.726
   %6 = bitcast double* %arrayidx.8 to <1 x double>*
-  %7 = load <1 x double>* %6, align 32
+  %7 = load <1 x double>, <1 x double>* %6, align 32
   %add.8 = fadd <1 x double> %7, <double 1.000000e+00>
   %8 = bitcast double* %arrayidx6.8 to <1 x double>*
   store <1 x double> %add.8, <1 x double>* %8, align 32
@@ -61,7 +61,7 @@
   %arrayidx.12 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1130
   %arrayidx6.12 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1130
   %9 = bitcast double* %arrayidx.12 to <1 x double>*
-  %10 = load <1 x double>* %9, align 32
+  %10 = load <1 x double>, <1 x double>* %9, align 32
   %add.12 = fadd <1 x double> %10, <double 1.000000e+00>
   %11 = bitcast double* %arrayidx6.12 to <1 x double>*
   store <1 x double> %add.12, <1 x double>* %11, align 32