[opaque pointer type] Add textual IR support for explicit type parameter to load instruction

Essentially the same as the GEP change in r230786.

A similar migration script can be used to update test cases, though a few more
test case improvements/changes were required this time around: (r229269-r229278)

import fileinput
import sys
import re

pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)")

for line in sys.stdin:
  sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line))

Reviewers: rafael, dexonsmith, grosser

Differential Revision: http://reviews.llvm.org/D7649

llvm-svn: 230794
diff --git a/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll b/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll
index 92e366a..9777d3e 100644
--- a/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll
@@ -16,26 +16,26 @@
 ; promote i8 during lowering.
 
 ; match a sign-extending load 8-bit -> 32-bit
-   %val8_sext32 = load volatile i8* @var_8bit
+   %val8_sext32 = load volatile i8, i8* @var_8bit
    %val32_signed = sext i8 %val8_sext32 to i32
    store volatile i32 %val32_signed, i32* @var_32bit
 ; CHECK: adrp {{x[0-9]+}}, var_8bit
 ; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; match a zero-extending load volatile 8-bit -> 32-bit
-  %val8_zext32 = load volatile i8* @var_8bit
+  %val8_zext32 = load volatile i8, i8* @var_8bit
   %val32_unsigned = zext i8 %val8_zext32 to i32
   store volatile i32 %val32_unsigned, i32* @var_32bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; match an any-extending load volatile 8-bit -> 32-bit
-  %val8_anyext = load volatile i8* @var_8bit
+  %val8_anyext = load volatile i8, i8* @var_8bit
   %newval8 = add i8 %val8_anyext, 1
   store volatile i8 %newval8, i8* @var_8bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; match a sign-extending load volatile 8-bit -> 64-bit
-  %val8_sext64 = load volatile i8* @var_8bit
+  %val8_sext64 = load volatile i8, i8* @var_8bit
   %val64_signed = sext i8 %val8_sext64 to i64
   store volatile i64 %val64_signed, i64* @var_64bit
 ; CHECK: ldrsb {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
@@ -43,19 +43,19 @@
 ; match a zero-extending load volatile 8-bit -> 64-bit.
 ; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
 ; of x0 so it's identical to load volatileing to 32-bits.
-  %val8_zext64 = load volatile i8* @var_8bit
+  %val8_zext64 = load volatile i8, i8* @var_8bit
   %val64_unsigned = zext i8 %val8_zext64 to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; truncating store volatile 32-bits to 8-bits
-  %val32 = load volatile i32* @var_32bit
+  %val32 = load volatile i32, i32* @var_32bit
   %val8_trunc32 = trunc i32 %val32 to i8
   store volatile i8 %val8_trunc32, i8* @var_8bit
 ; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
 
 ; truncating store volatile 64-bits to 8-bits
-  %val64 = load volatile i64* @var_64bit
+  %val64 = load volatile i64, i64* @var_64bit
   %val8_trunc64 = trunc i64 %val64 to i8
   store volatile i8 %val8_trunc64, i8* @var_8bit
 ; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
@@ -70,26 +70,26 @@
 ; lowering.
 
 ; match a sign-extending load volatile 16-bit -> 32-bit
-  %val16_sext32 = load volatile i16* @var_16bit
+  %val16_sext32 = load volatile i16, i16* @var_16bit
   %val32_signed = sext i16 %val16_sext32 to i32
   store volatile i32 %val32_signed, i32* @var_32bit
 ; CHECK: adrp {{x[0-9]+}}, var_16bit
 ; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; match a zero-extending load volatile 16-bit -> 32-bit
-  %val16_zext32 = load volatile i16* @var_16bit
+  %val16_zext32 = load volatile i16, i16* @var_16bit
   %val32_unsigned = zext i16 %val16_zext32 to i32
   store volatile i32 %val32_unsigned, i32* @var_32bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; match an any-extending load volatile 16-bit -> 32-bit
-  %val16_anyext = load volatile i16* @var_16bit
+  %val16_anyext = load volatile i16, i16* @var_16bit
   %newval16 = add i16 %val16_anyext, 1
   store volatile i16 %newval16, i16* @var_16bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; match a sign-extending load volatile 16-bit -> 64-bit
-  %val16_sext64 = load volatile i16* @var_16bit
+  %val16_sext64 = load volatile i16, i16* @var_16bit
   %val64_signed = sext i16 %val16_sext64 to i64
   store volatile i64 %val64_signed, i64* @var_64bit
 ; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
@@ -97,19 +97,19 @@
 ; match a zero-extending load volatile 16-bit -> 64-bit.
 ; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
 ; of x0 so it's identical to load volatileing to 32-bits.
-  %val16_zext64 = load volatile i16* @var_16bit
+  %val16_zext64 = load volatile i16, i16* @var_16bit
   %val64_unsigned = zext i16 %val16_zext64 to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; truncating store volatile 32-bits to 16-bits
-  %val32 = load volatile i32* @var_32bit
+  %val32 = load volatile i32, i32* @var_32bit
   %val16_trunc32 = trunc i32 %val32 to i16
   store volatile i16 %val16_trunc32, i16* @var_16bit
 ; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
 
 ; truncating store volatile 64-bits to 16-bits
-  %val64 = load volatile i64* @var_64bit
+  %val64 = load volatile i64, i64* @var_64bit
   %val16_trunc64 = trunc i64 %val64 to i16
   store volatile i16 %val16_trunc64, i16* @var_16bit
 ; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
@@ -121,28 +121,28 @@
 ; CHECK-LABEL: ldst_32bit:
 
 ; Straight 32-bit load/store
-  %val32_noext = load volatile i32* @var_32bit
+  %val32_noext = load volatile i32, i32* @var_32bit
   store volatile i32 %val32_noext, i32* @var_32bit
 ; CHECK: adrp {{x[0-9]+}}, var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
 
 ; Zero-extension to 64-bits
-  %val32_zext = load volatile i32* @var_32bit
+  %val32_zext = load volatile i32, i32* @var_32bit
   %val64_unsigned = zext i32 %val32_zext to i64
   store volatile i64 %val64_unsigned, i64* @var_64bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
 
 ; Sign-extension to 64-bits
-  %val32_sext = load volatile i32* @var_32bit
+  %val32_sext = load volatile i32, i32* @var_32bit
   %val64_signed = sext i32 %val32_sext to i64
   store volatile i64 %val64_signed, i64* @var_64bit
 ; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
 
 ; Truncation from 64-bits
-  %val64_trunc = load volatile i64* @var_64bit
+  %val64_trunc = load volatile i64, i64* @var_64bit
   %val32_trunc = trunc i64 %val64_trunc to i32
   store volatile i32 %val32_trunc, i32* @var_32bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
@@ -163,62 +163,62 @@
 
 define void @ldst_complex_offsets() {
 ; CHECK: ldst_complex_offsets
-  %arr8_addr = load volatile i8** @arr8
+  %arr8_addr = load volatile i8*, i8** @arr8
 ; CHECK: adrp {{x[0-9]+}}, arr8
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr8]
 
   %arr8_sub1_addr = getelementptr i8, i8* %arr8_addr, i64 1
-  %arr8_sub1 = load volatile i8* %arr8_sub1_addr
+  %arr8_sub1 = load volatile i8, i8* %arr8_sub1_addr
   store volatile i8 %arr8_sub1, i8* @var_8bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #1]
 
   %arr8_sub4095_addr = getelementptr i8, i8* %arr8_addr, i64 4095
-  %arr8_sub4095 = load volatile i8* %arr8_sub4095_addr
+  %arr8_sub4095 = load volatile i8, i8* %arr8_sub4095_addr
   store volatile i8 %arr8_sub4095, i8* @var_8bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #4095]
 
 
-  %arr16_addr = load volatile i16** @arr16
+  %arr16_addr = load volatile i16*, i16** @arr16
 ; CHECK: adrp {{x[0-9]+}}, arr16
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr16]
 
   %arr16_sub1_addr = getelementptr i16, i16* %arr16_addr, i64 1
-  %arr16_sub1 = load volatile i16* %arr16_sub1_addr
+  %arr16_sub1 = load volatile i16, i16* %arr16_sub1_addr
   store volatile i16 %arr16_sub1, i16* @var_16bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #2]
 
   %arr16_sub4095_addr = getelementptr i16, i16* %arr16_addr, i64 4095
-  %arr16_sub4095 = load volatile i16* %arr16_sub4095_addr
+  %arr16_sub4095 = load volatile i16, i16* %arr16_sub4095_addr
   store volatile i16 %arr16_sub4095, i16* @var_16bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #8190]
 
 
-  %arr32_addr = load volatile i32** @arr32
+  %arr32_addr = load volatile i32*, i32** @arr32
 ; CHECK: adrp {{x[0-9]+}}, arr32
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr32]
 
   %arr32_sub1_addr = getelementptr i32, i32* %arr32_addr, i64 1
-  %arr32_sub1 = load volatile i32* %arr32_sub1_addr
+  %arr32_sub1 = load volatile i32, i32* %arr32_sub1_addr
   store volatile i32 %arr32_sub1, i32* @var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #4]
 
   %arr32_sub4095_addr = getelementptr i32, i32* %arr32_addr, i64 4095
-  %arr32_sub4095 = load volatile i32* %arr32_sub4095_addr
+  %arr32_sub4095 = load volatile i32, i32* %arr32_sub4095_addr
   store volatile i32 %arr32_sub4095, i32* @var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #16380]
 
 
-  %arr64_addr = load volatile i64** @arr64
+  %arr64_addr = load volatile i64*, i64** @arr64
 ; CHECK: adrp {{x[0-9]+}}, arr64
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr64]
 
   %arr64_sub1_addr = getelementptr i64, i64* %arr64_addr, i64 1
-  %arr64_sub1 = load volatile i64* %arr64_sub1_addr
+  %arr64_sub1 = load volatile i64, i64* %arr64_sub1_addr
   store volatile i64 %arr64_sub1, i64* @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #8]
 
   %arr64_sub4095_addr = getelementptr i64, i64* %arr64_addr, i64 4095
-  %arr64_sub4095 = load volatile i64* %arr64_sub4095_addr
+  %arr64_sub4095 = load volatile i64, i64* %arr64_sub4095_addr
   store volatile i64 %arr64_sub4095, i64* @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #32760]
 
@@ -228,7 +228,7 @@
 define void @ldst_float() {
 ; CHECK-LABEL: ldst_float:
 
-   %valfp = load volatile float* @var_float
+   %valfp = load volatile float, float* @var_float
 ; CHECK: adrp {{x[0-9]+}}, var_float
 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_float]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
@@ -243,7 +243,7 @@
 define void @ldst_double() {
 ; CHECK-LABEL: ldst_double:
 
-   %valfp = load volatile double* @var_double
+   %valfp = load volatile double, double* @var_double
 ; CHECK: adrp {{x[0-9]+}}, var_double
 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_double]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},