[opaque pointer type] Add textual IR support for explicit type parameter to load instruction

Essentially the same as the GEP change in r230786.

A similar migration script can be used to update test cases, though a few more
test case improvements/changes were required this time around: (r229269-r229278)

import fileinput
import sys
import re

pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)")

for line in sys.stdin:
  sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line))

Reviewers: rafael, dexonsmith, grosser

Differential Revision: http://reviews.llvm.org/D7649

llvm-svn: 230794
diff --git a/llvm/test/CodeGen/X86/atomic_mi.ll b/llvm/test/CodeGen/X86/atomic_mi.ll
index 19e019e..7a6204f 100644
--- a/llvm/test/CodeGen/X86/atomic_mi.ll
+++ b/llvm/test/CodeGen/X86/atomic_mi.ll
@@ -103,7 +103,7 @@
 ; X32-NOT: lock
 ; X32: addb
 ; X32-NOT: movb
-  %1 = load atomic i8* %p seq_cst, align 1
+  %1 = load atomic i8, i8* %p seq_cst, align 1
   %2 = add i8 %1, 2
   store atomic i8 %2, i8* %p release, align 1
   ret void
@@ -116,7 +116,7 @@
 ; X64-NOT: addw
 ; X32-LABEL: add_16
 ; X32-NOT: addw
-  %1 = load atomic i16* %p acquire, align 2
+  %1 = load atomic i16, i16* %p acquire, align 2
   %2 = add i16 %1, 2
   store atomic i16 %2, i16* %p release, align 2
   ret void
@@ -131,7 +131,7 @@
 ; X32-NOT: lock
 ; X32: addl
 ; X32-NOT: movl
-  %1 = load atomic i32* %p acquire, align 4
+  %1 = load atomic i32, i32* %p acquire, align 4
   %2 = add i32 %1, 2
   store atomic i32 %2, i32* %p monotonic, align 4
   ret void
@@ -144,7 +144,7 @@
 ; X64-NOT: movq
 ;   We do not check X86-32 as it cannot do 'addq'.
 ; X32-LABEL: add_64
-  %1 = load atomic i64* %p acquire, align 8
+  %1 = load atomic i64, i64* %p acquire, align 8
   %2 = add i64 %1, 2
   store atomic i64 %2, i64* %p release, align 8
   ret void
@@ -155,7 +155,7 @@
 ; X64: xchgl
 ; X32-LABEL: add_32_seq_cst
 ; X32: xchgl
-  %1 = load atomic i32* %p monotonic, align 4
+  %1 = load atomic i32, i32* %p monotonic, align 4
   %2 = add i32 %1, 2
   store atomic i32 %2, i32* %p seq_cst, align 4
   ret void
@@ -172,7 +172,7 @@
 ; X32-NOT: lock
 ; X32: andb
 ; X32-NOT: movb
-  %1 = load atomic i8* %p monotonic, align 1
+  %1 = load atomic i8, i8* %p monotonic, align 1
   %2 = and i8 %1, 2
   store atomic i8 %2, i8* %p release, align 1
   ret void
@@ -185,7 +185,7 @@
 ; X64-NOT: andw
 ; X32-LABEL: and_16
 ; X32-NOT: andw
-  %1 = load atomic i16* %p acquire, align 2
+  %1 = load atomic i16, i16* %p acquire, align 2
   %2 = and i16 %1, 2
   store atomic i16 %2, i16* %p release, align 2
   ret void
@@ -200,7 +200,7 @@
 ; X32-NOT: lock
 ; X32: andl
 ; X32-NOT: movl
-  %1 = load atomic i32* %p acquire, align 4
+  %1 = load atomic i32, i32* %p acquire, align 4
   %2 = and i32 %1, 2
   store atomic i32 %2, i32* %p release, align 4
   ret void
@@ -213,7 +213,7 @@
 ; X64-NOT: movq
 ;   We do not check X86-32 as it cannot do 'andq'.
 ; X32-LABEL: and_64
-  %1 = load atomic i64* %p acquire, align 8
+  %1 = load atomic i64, i64* %p acquire, align 8
   %2 = and i64 %1, 2
   store atomic i64 %2, i64* %p release, align 8
   ret void
@@ -224,7 +224,7 @@
 ; X64: xchgl
 ; X32-LABEL: and_32_seq_cst
 ; X32: xchgl
-  %1 = load atomic i32* %p monotonic, align 4
+  %1 = load atomic i32, i32* %p monotonic, align 4
   %2 = and i32 %1, 2
   store atomic i32 %2, i32* %p seq_cst, align 4
   ret void
@@ -241,7 +241,7 @@
 ; X32-NOT: lock
 ; X32: orb
 ; X32-NOT: movb
-  %1 = load atomic i8* %p acquire, align 1
+  %1 = load atomic i8, i8* %p acquire, align 1
   %2 = or i8 %1, 2
   store atomic i8 %2, i8* %p release, align 1
   ret void
@@ -252,7 +252,7 @@
 ; X64-NOT: orw
 ; X32-LABEL: or_16
 ; X32-NOT: orw
-  %1 = load atomic i16* %p acquire, align 2
+  %1 = load atomic i16, i16* %p acquire, align 2
   %2 = or i16 %1, 2
   store atomic i16 %2, i16* %p release, align 2
   ret void
@@ -267,7 +267,7 @@
 ; X32-NOT: lock
 ; X32: orl
 ; X32-NOT: movl
-  %1 = load atomic i32* %p acquire, align 4
+  %1 = load atomic i32, i32* %p acquire, align 4
   %2 = or i32 %1, 2
   store atomic i32 %2, i32* %p release, align 4
   ret void
@@ -280,7 +280,7 @@
 ; X64-NOT: movq
 ;   We do not check X86-32 as it cannot do 'orq'.
 ; X32-LABEL: or_64
-  %1 = load atomic i64* %p acquire, align 8
+  %1 = load atomic i64, i64* %p acquire, align 8
   %2 = or i64 %1, 2
   store atomic i64 %2, i64* %p release, align 8
   ret void
@@ -291,7 +291,7 @@
 ; X64: xchgl
 ; X32-LABEL: or_32_seq_cst
 ; X32: xchgl
-  %1 = load atomic i32* %p monotonic, align 4
+  %1 = load atomic i32, i32* %p monotonic, align 4
   %2 = or i32 %1, 2
   store atomic i32 %2, i32* %p seq_cst, align 4
   ret void
@@ -308,7 +308,7 @@
 ; X32-NOT: lock
 ; X32: xorb
 ; X32-NOT: movb
-  %1 = load atomic i8* %p acquire, align 1
+  %1 = load atomic i8, i8* %p acquire, align 1
   %2 = xor i8 %1, 2
   store atomic i8 %2, i8* %p release, align 1
   ret void
@@ -319,7 +319,7 @@
 ; X64-NOT: xorw
 ; X32-LABEL: xor_16
 ; X32-NOT: xorw
-  %1 = load atomic i16* %p acquire, align 2
+  %1 = load atomic i16, i16* %p acquire, align 2
   %2 = xor i16 %1, 2
   store atomic i16 %2, i16* %p release, align 2
   ret void
@@ -334,7 +334,7 @@
 ; X32-NOT: lock
 ; X32: xorl
 ; X32-NOT: movl
-  %1 = load atomic i32* %p acquire, align 4
+  %1 = load atomic i32, i32* %p acquire, align 4
   %2 = xor i32 %1, 2
   store atomic i32 %2, i32* %p release, align 4
   ret void
@@ -347,7 +347,7 @@
 ; X64-NOT: movq
 ;   We do not check X86-32 as it cannot do 'xorq'.
 ; X32-LABEL: xor_64
-  %1 = load atomic i64* %p acquire, align 8
+  %1 = load atomic i64, i64* %p acquire, align 8
   %2 = xor i64 %1, 2
   store atomic i64 %2, i64* %p release, align 8
   ret void
@@ -358,7 +358,7 @@
 ; X64: xchgl
 ; X32-LABEL: xor_32_seq_cst
 ; X32: xchgl
-  %1 = load atomic i32* %p monotonic, align 4
+  %1 = load atomic i32, i32* %p monotonic, align 4
   %2 = xor i32 %1, 2
   store atomic i32 %2, i32* %p seq_cst, align 4
   ret void
@@ -378,7 +378,7 @@
 ; SLOW_INC-LABEL: inc_8
 ; SLOW_INC-NOT: incb
 ; SLOW_INC-NOT: movb
-  %1 = load atomic i8* %p seq_cst, align 1
+  %1 = load atomic i8, i8* %p seq_cst, align 1
   %2 = add i8 %1, 1
   store atomic i8 %2, i8* %p release, align 1
   ret void
@@ -393,7 +393,7 @@
 ; X32-NOT: incw
 ; SLOW_INC-LABEL: inc_16
 ; SLOW_INC-NOT: incw
-  %1 = load atomic i16* %p acquire, align 2
+  %1 = load atomic i16, i16* %p acquire, align 2
   %2 = add i16 %1, 1
   store atomic i16 %2, i16* %p release, align 2
   ret void
@@ -411,7 +411,7 @@
 ; SLOW_INC-LABEL: inc_32
 ; SLOW_INC-NOT: incl
 ; SLOW_INC-NOT: movl
-  %1 = load atomic i32* %p acquire, align 4
+  %1 = load atomic i32, i32* %p acquire, align 4
   %2 = add i32 %1, 1
   store atomic i32 %2, i32* %p monotonic, align 4
   ret void
@@ -427,7 +427,7 @@
 ; SLOW_INC-LABEL: inc_64
 ; SLOW_INC-NOT: incq
 ; SLOW_INC-NOT: movq
-  %1 = load atomic i64* %p acquire, align 8
+  %1 = load atomic i64, i64* %p acquire, align 8
   %2 = add i64 %1, 1
   store atomic i64 %2, i64* %p release, align 8
   ret void
@@ -438,7 +438,7 @@
 ; X64: xchgl
 ; X32-LABEL: inc_32_seq_cst
 ; X32: xchgl
-  %1 = load atomic i32* %p monotonic, align 4
+  %1 = load atomic i32, i32* %p monotonic, align 4
   %2 = add i32 %1, 1
   store atomic i32 %2, i32* %p seq_cst, align 4
   ret void
@@ -458,7 +458,7 @@
 ; SLOW_INC-LABEL: dec_8
 ; SLOW_INC-NOT: decb
 ; SLOW_INC-NOT: movb
-  %1 = load atomic i8* %p seq_cst, align 1
+  %1 = load atomic i8, i8* %p seq_cst, align 1
   %2 = sub i8 %1, 1
   store atomic i8 %2, i8* %p release, align 1
   ret void
@@ -473,7 +473,7 @@
 ; X32-NOT: decw
 ; SLOW_INC-LABEL: dec_16
 ; SLOW_INC-NOT: decw
-  %1 = load atomic i16* %p acquire, align 2
+  %1 = load atomic i16, i16* %p acquire, align 2
   %2 = sub i16 %1, 1
   store atomic i16 %2, i16* %p release, align 2
   ret void
@@ -491,7 +491,7 @@
 ; SLOW_INC-LABEL: dec_32
 ; SLOW_INC-NOT: decl
 ; SLOW_INC-NOT: movl
-  %1 = load atomic i32* %p acquire, align 4
+  %1 = load atomic i32, i32* %p acquire, align 4
   %2 = sub i32 %1, 1
   store atomic i32 %2, i32* %p monotonic, align 4
   ret void
@@ -507,7 +507,7 @@
 ; SLOW_INC-LABEL: dec_64
 ; SLOW_INC-NOT: decq
 ; SLOW_INC-NOT: movq
-  %1 = load atomic i64* %p acquire, align 8
+  %1 = load atomic i64, i64* %p acquire, align 8
   %2 = sub i64 %1, 1
   store atomic i64 %2, i64* %p release, align 8
   ret void
@@ -518,7 +518,7 @@
 ; X64: xchgl
 ; X32-LABEL: dec_32_seq_cst
 ; X32: xchgl
-  %1 = load atomic i32* %p monotonic, align 4
+  %1 = load atomic i32, i32* %p monotonic, align 4
   %2 = sub i32 %1, 1
   store atomic i32 %2, i32* %p seq_cst, align 4
   ret void