[InstCombine] don't assume 'inbounds' for bitcast pointer to GEP transform (PR43501)

https://bugs.llvm.org/show_bug.cgi?id=43501
We can't declare a GEP 'inbounds' in general. But we may salvage that information if
we have known dereferenceable bytes on the source pointer.

Differential Revision: https://reviews.llvm.org/D68244

llvm-svn: 373847
diff --git a/clang/test/CodeGen/aapcs-bitfield.c b/clang/test/CodeGen/aapcs-bitfield.c
index ad7a73c..8d62f10 100644
--- a/clang/test/CodeGen/aapcs-bitfield.c
+++ b/clang/test/CodeGen/aapcs-bitfield.c
@@ -8,7 +8,7 @@
 
 // LE-LABEL: @st0_check_load(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
 // LE-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
@@ -17,7 +17,7 @@
 //
 // BE-LABEL: @st0_check_load(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
 // BE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -29,7 +29,7 @@
 
 // LE-LABEL: @st0_check_store(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
 // LE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
@@ -38,7 +38,7 @@
 //
 // BE-LABEL: @st0_check_store(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
 // BE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
@@ -56,7 +56,7 @@
 
 // LE-LABEL: @st1_check_load(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 10
 // LE-NEXT:    [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
@@ -64,7 +64,7 @@
 //
 // BE-LABEL: @st1_check_load(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
 // BE-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 10
 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 10
@@ -77,7 +77,7 @@
 
 // LE-LABEL: @st1_check_store(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 1023
 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1024
@@ -86,7 +86,7 @@
 //
 // BE-LABEL: @st1_check_store(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -64
 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
@@ -151,7 +151,7 @@
 
 // LE-LABEL: @st3_check_load(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 2
 // LE-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
@@ -160,7 +160,7 @@
 //
 // BE-LABEL: @st3_check_load(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 2
 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
 // BE-NEXT:    [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -172,7 +172,7 @@
 
 // LE-LABEL: @st3_check_store(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 2
 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
 // LE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
@@ -181,7 +181,7 @@
 //
 // BE-LABEL: @st3_check_store(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST3:%.*]], %struct.st3* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 2
 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
 // BE-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
@@ -199,7 +199,7 @@
 
 // LE-LABEL: @st4_check_load(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
 // LE-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 2
 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 11
@@ -210,7 +210,7 @@
 //
 // BE-LABEL: @st4_check_load(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
 // BE-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 9
 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 11
@@ -225,7 +225,7 @@
 
 // LE-LABEL: @st4_check_store(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -15873
 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 512
@@ -234,7 +234,7 @@
 //
 // BE-LABEL: @st4_check_store(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -125
 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 4
@@ -247,7 +247,7 @@
 
 // LE-LABEL: @st4_check_nonv_store(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -512
 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
@@ -256,7 +256,7 @@
 //
 // BE-LABEL: @st4_check_nonv_store(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 127
 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
@@ -323,7 +323,7 @@
 
 // LE-LABEL: @st6_check_load(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
 // LE-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 4
 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 4
@@ -342,7 +342,7 @@
 //
 // BE-LABEL: @st6_check_load(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
 // BE-NEXT:    [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
@@ -366,7 +366,7 @@
 
 // LE-LABEL: @st6_check_store(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -4096
 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
@@ -382,7 +382,7 @@
 //
 // BE-LABEL: @st6_check_store(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 15
 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 16
@@ -492,13 +492,13 @@
 
 // LE-LABEL: @st8_check_assignment(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    store i16 -1, i16* [[TMP0]], align 4
 // LE-NEXT:    ret i32 65535
 //
 // BE-LABEL: @st8_check_assignment(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    store i16 -1, i16* [[TMP0]], align 4
 // BE-NEXT:    ret i32 65535
 //
@@ -512,14 +512,14 @@
 
 // LE-LABEL: @read_st9(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
 // LE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // LE-NEXT:    ret i32 [[BF_CAST]]
 //
 // BE-LABEL: @read_st9(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
 // BE-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
 // BE-NEXT:    ret i32 [[BF_CAST]]
@@ -530,13 +530,13 @@
 
 // LE-LABEL: @store_st9(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    store volatile i8 1, i8* [[TMP0]], align 4
 // LE-NEXT:    ret void
 //
 // BE-LABEL: @store_st9(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    store volatile i8 1, i8* [[TMP0]], align 4
 // BE-NEXT:    ret void
 //
@@ -546,7 +546,7 @@
 
 // LE-LABEL: @increment_st9(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
 // LE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
 // LE-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
@@ -554,7 +554,7 @@
 //
 // BE-LABEL: @increment_st9(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
 // BE-NEXT:    [[INC:%.*]] = add i8 [[BF_LOAD]], 1
 // BE-NEXT:    store volatile i8 [[INC]], i8* [[TMP0]], align 4
@@ -571,7 +571,7 @@
 
 // LE-LABEL: @read_st10(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
 // LE-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 7
 // LE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
@@ -580,7 +580,7 @@
 //
 // BE-LABEL: @read_st10(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
 // BE-NEXT:    [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 1
 // BE-NEXT:    [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
@@ -593,7 +593,7 @@
 
 // LE-LABEL: @store_st10(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
 // LE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -511
 // LE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 2
@@ -602,7 +602,7 @@
 //
 // BE-LABEL: @store_st10(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
 // BE-NEXT:    [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -32641
 // BE-NEXT:    [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
@@ -615,7 +615,7 @@
 
 // LE-LABEL: @increment_st10(
 // LE-NEXT:  entry:
-// LE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
+// LE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
 // LE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
 // LE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[TMP0]], align 4
 // LE-NEXT:    [[TMP1:%.*]] = add i16 [[BF_LOAD]], 2
@@ -627,7 +627,7 @@
 //
 // BE-LABEL: @increment_st10(
 // BE-NEXT:  entry:
-// BE-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
+// BE-NEXT:    [[TMP0:%.*]] = getelementptr [[STRUCT_ST10:%.*]], %struct.st10* [[M:%.*]], i32 0, i32 0
 // BE-NEXT:    [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
 // BE-NEXT:    [[BF_LOAD1:%.*]] = load volatile i16, i16* [[TMP0]], align 4
 // BE-NEXT:    [[TMP1:%.*]] = add i16 [[BF_LOAD]], 128
diff --git a/clang/test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp b/clang/test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp
index 9567245..c99df0e 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp
@@ -20,7 +20,7 @@
 T* test2(A* x) { return &dynamic_cast<T&>(*x); }
 // CHECK-LABEL: define dso_local %struct.T* @"?test2@@YAPAUT@@PAUA@@@Z"(%struct.A* %x)
 // CHECK:        [[CAST:%.*]] = bitcast %struct.A* %x to i8*
-// CHECK-NEXT:   [[VBPTRPTR:%.*]] = getelementptr inbounds %struct.A, %struct.A* %x, i32 0, i32 0
+// CHECK-NEXT:   [[VBPTRPTR:%.*]] = getelementptr %struct.A, %struct.A* %x, i32 0, i32 0
 // CHECK-NEXT:   [[VBTBL:%.*]] = load i32*, i32** [[VBPTRPTR]], align 4
 // CHECK-NEXT:   [[VBOFFP:%.*]] = getelementptr inbounds i32, i32* [[VBTBL]], i32 1
 // CHECK-NEXT:   [[VBOFFS:%.*]] = load i32, i32* [[VBOFFP]], align 4
@@ -31,7 +31,7 @@
 
 T* test3(B* x) { return &dynamic_cast<T&>(*x); }
 // CHECK-LABEL: define dso_local %struct.T* @"?test3@@YAPAUT@@PAUB@@@Z"(%struct.B* %x)
-// CHECK:        [[VOIDP:%.*]] = getelementptr inbounds %struct.B, %struct.B* %x, i32 0, i32 0, i32 0
+// CHECK:        [[VOIDP:%.*]] = getelementptr %struct.B, %struct.B* %x, i32 0, i32 0, i32 0
 // CHECK-NEXT:   [[VBPTR:%.*]] = getelementptr inbounds i8, i8* [[VOIDP]], i32 4
 // CHECK-NEXT:   [[VBPTRPTR:%.*]] = bitcast i8* [[VBPTR:%.*]] to i32**
 // CHECK-NEXT:   [[VBTBL:%.*]] = load i32*, i32** [[VBPTRPTR]], align 4
@@ -55,7 +55,7 @@
 // CHECK:        [[CHECK:%.*]] = icmp eq %struct.A* %x, null
 // CHECK-NEXT:   br i1 [[CHECK]]
 // CHECK:        [[VOIDP:%.*]] = bitcast %struct.A* %x to i8*
-// CHECK-NEXT:   [[VBPTRPTR:%.*]] = getelementptr inbounds %struct.A, %struct.A* %x, i32 0, i32 0
+// CHECK-NEXT:   [[VBPTRPTR:%.*]] = getelementptr %struct.A, %struct.A* %x, i32 0, i32 0
 // CHECK-NEXT:   [[VBTBL:%.*]] = load i32*, i32** [[VBPTRPTR]], align 4
 // CHECK-NEXT:   [[VBOFFP:%.*]] = getelementptr inbounds i32, i32* [[VBTBL]], i32 1
 // CHECK-NEXT:   [[VBOFFS:%.*]] = load i32, i32* [[VBOFFP]], align 4
@@ -70,7 +70,7 @@
 // CHECK-LABEL: define dso_local %struct.T* @"?test6@@YAPAUT@@PAUB@@@Z"(%struct.B* %x)
 // CHECK:        [[CHECK:%.*]] = icmp eq %struct.B* %x, null
 // CHECK-NEXT:   br i1 [[CHECK]]
-// CHECK:        [[CAST:%.*]] = getelementptr inbounds %struct.B, %struct.B* %x, i32 0, i32 0, i32 0
+// CHECK:        [[CAST:%.*]] = getelementptr %struct.B, %struct.B* %x, i32 0, i32 0, i32 0
 // CHECK-NEXT:   [[VBPTR:%.*]] = getelementptr inbounds i8, i8* [[CAST]], i32 4
 // CHECK-NEXT:   [[VBPTRPTR:%.*]] = bitcast i8* [[VBPTR]] to i32**
 // CHECK-NEXT:   [[VBTBL:%.*]] = load i32*, i32** [[VBPTRPTR]], align 4
@@ -78,7 +78,7 @@
 // CHECK-NEXT:   [[VBOFFS:%.*]] = load i32, i32* [[VBOFFP]], align 4
 // CHECK-NEXT:   [[DELTA:%.*]] = add nsw i32 [[VBOFFS]], 4
 // CHECK-NEXT:   [[ADJ:%.*]] = getelementptr inbounds i8, i8* [[CAST]], i32 [[DELTA]]
-// CHECK-NEXT:   [[CALL:%.*]] = tail call i8* @__RTDynamicCast(i8* nonnull [[ADJ]], i32 [[DELTA]], i8* {{.*}}bitcast (%rtti.TypeDescriptor7* @"??_R0?AUB@@@8" to i8*), i8* {{.*}}bitcast (%rtti.TypeDescriptor7* @"??_R0?AUT@@@8" to i8*), i32 0)
+// CHECK-NEXT:   [[CALL:%.*]] = tail call i8* @__RTDynamicCast(i8* [[ADJ]], i32 [[DELTA]], i8* {{.*}}bitcast (%rtti.TypeDescriptor7* @"??_R0?AUB@@@8" to i8*), i8* {{.*}}bitcast (%rtti.TypeDescriptor7* @"??_R0?AUT@@@8" to i8*), i32 0)
 // CHECK-NEXT:   [[RES:%.*]] = bitcast i8* [[CALL]] to %struct.T*
 // CHECK-NEXT:   br label
 // CHECK:        [[RET:%.*]] = phi %struct.T*
@@ -95,7 +95,7 @@
 // CHECK:        [[CHECK:%.*]] = icmp eq %struct.A* %x, null
 // CHECK-NEXT:   br i1 [[CHECK]]
 // CHECK:        [[VOIDP:%.*]] = bitcast %struct.A* %x to i8*
-// CHECK-NEXT:   [[VBPTRPTR:%.*]] = getelementptr inbounds %struct.A, %struct.A* %x, i32 0, i32 0
+// CHECK-NEXT:   [[VBPTRPTR:%.*]] = getelementptr %struct.A, %struct.A* %x, i32 0, i32 0
 // CHECK-NEXT:   [[VBTBL:%.*]] = load i32*, i32** [[VBPTRPTR]], align 4
 // CHECK-NEXT:   [[VBOFFP:%.*]] = getelementptr inbounds i32, i32* [[VBTBL]], i32 1
 // CHECK-NEXT:   [[VBOFFS:%.*]] = load i32, i32* [[VBOFFP]], align 4
@@ -109,7 +109,7 @@
 // CHECK-LABEL: define dso_local i8* @"?test9@@YAPAXPAUB@@@Z"(%struct.B* %x)
 // CHECK:        [[CHECK:%.*]] = icmp eq %struct.B* %x, null
 // CHECK-NEXT:   br i1 [[CHECK]]
-// CHECK:        [[CAST:%.*]] = getelementptr inbounds %struct.B, %struct.B* %x, i32 0, i32 0, i32 0
+// CHECK:        [[CAST:%.*]] = getelementptr %struct.B, %struct.B* %x, i32 0, i32 0, i32 0
 // CHECK-NEXT:   [[VBPTR:%.*]] = getelementptr inbounds i8, i8* [[CAST]], i32 4
 // CHECK-NEXT:   [[VBPTRPTR:%.*]] = bitcast i8* [[VBPTR]] to i32**
 // CHECK-NEXT:   [[VBTBL:%.*]] = load i32*, i32** [[VBPTRPTR]], align 4
@@ -117,7 +117,7 @@
 // CHECK-NEXT:   [[VBOFFS:%.*]] = load i32, i32* [[VBOFFP]], align 4
 // CHECK-NEXT:   [[DELTA:%.*]] = add nsw i32 [[VBOFFS]], 4
 // CHECK-NEXT:   [[ADJ:%.*]] = getelementptr inbounds i8, i8* [[CAST]], i32 [[DELTA]]
-// CHECK-NEXT:   [[CALL:%.*]] = tail call i8* @__RTCastToVoid(i8* nonnull [[ADJ]])
+// CHECK-NEXT:   [[CALL:%.*]] = tail call i8* @__RTCastToVoid(i8* [[ADJ]])
 // CHECK-NEXT:   br label
 // CHECK:        [[RET:%.*]] = phi i8*
 // CHECK-NEXT:   ret i8* [[RET]]
diff --git a/clang/test/CodeGenCXX/microsoft-abi-typeid.cpp b/clang/test/CodeGenCXX/microsoft-abi-typeid.cpp
index 128f271..848e280 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-typeid.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-typeid.cpp
@@ -31,7 +31,7 @@
 // CHECK:        tail call i8* @__RTtypeid(i8* null)
 // CHECK-NEXT:   unreachable
 // CHECK:        [[THIS:%.*]] = bitcast %struct.A* [[CALL]] to i8*
-// CHECK-NEXT:   [[VBTBLP:%.*]] = getelementptr inbounds %struct.A, %struct.A* [[CALL]], i32 0, i32 0
+// CHECK-NEXT:   [[VBTBLP:%.*]] = getelementptr %struct.A, %struct.A* [[CALL]], i32 0, i32 0
 // CHECK-NEXT:   [[VBTBL:%.*]] = load i32*, i32** [[VBTBLP]], align 4
 // CHECK-NEXT:   [[VBSLOT:%.*]] = getelementptr inbounds i32, i32* [[VBTBL]], i32 1
 // CHECK-NEXT:   [[VBASE_OFFS:%.*]] = load i32, i32* [[VBSLOT]], align 4
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 74c6980..c58e63d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -2338,8 +2338,15 @@
     // If we found a path from the src to dest, create the getelementptr now.
     if (SrcElTy == DstElTy) {
       SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0));
-      return GetElementPtrInst::CreateInBounds(SrcPTy->getElementType(), Src,
-                                               Idxs);
+      GetElementPtrInst *GEP =
+          GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs);
+
+      // If the source pointer is dereferenceable, then assume it points to an
+      // allocated object and apply "inbounds" to the GEP.
+      bool CanBeNull;
+      if (Src->getPointerDereferenceableBytes(DL, CanBeNull))
+        GEP->setIsInBounds();
+      return GEP;
     }
   }
 
diff --git a/llvm/test/Transforms/InstCombine/addrspacecast.ll b/llvm/test/Transforms/InstCombine/addrspacecast.ll
index 6caefb1..2e34f61 100644
--- a/llvm/test/Transforms/InstCombine/addrspacecast.ll
+++ b/llvm/test/Transforms/InstCombine/addrspacecast.ll
@@ -104,7 +104,7 @@
 
 define i32 @canonicalize_addrspacecast([16 x i32] addrspace(1)* %arr) {
 ; CHECK-LABEL: @canonicalize_addrspacecast(
-; CHECK-NEXT: getelementptr inbounds [16 x i32], [16 x i32] addrspace(1)* %arr, i32 0, i32 0
+; CHECK-NEXT: getelementptr [16 x i32], [16 x i32] addrspace(1)* %arr, i32 0, i32 0
 ; CHECK-NEXT: addrspacecast i32 addrspace(1)* %{{[a-zA-Z0-9]+}} to i32*
 ; CHECK-NEXT: load i32, i32*
 ; CHECK-NEXT: ret i32
diff --git a/llvm/test/Transforms/InstCombine/cast.ll b/llvm/test/Transforms/InstCombine/cast.ll
index b6d1eda..fd35bd9 100644
--- a/llvm/test/Transforms/InstCombine/cast.ll
+++ b/llvm/test/Transforms/InstCombine/cast.ll
@@ -293,7 +293,7 @@
 
 define [4 x float]* @test27([9 x [4 x float]]* %A) {
 ; CHECK-LABEL: @test27(
-; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [9 x [4 x float]], [9 x [4 x float]]* [[A:%.*]], i64 0, i64 0
+; CHECK-NEXT:    [[C:%.*]] = getelementptr [9 x [4 x float]], [9 x [4 x float]]* [[A:%.*]], i64 0, i64 0
 ; CHECK-NEXT:    ret [4 x float]* [[C]]
 ;
   %c = bitcast [9 x [4 x float]]* %A to [4 x float]*
@@ -302,7 +302,7 @@
 
 define float* @test28([4 x float]* %A) {
 ; CHECK-LABEL: @test28(
-; CHECK-NEXT:    [[C:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[A:%.*]], i64 0, i64 0
+; CHECK-NEXT:    [[C:%.*]] = getelementptr [4 x float], [4 x float]* [[A:%.*]], i64 0, i64 0
 ; CHECK-NEXT:    ret float* [[C]]
 ;
   %c = bitcast [4 x float]* %A to float*
diff --git a/llvm/test/Transforms/InstCombine/load-bitcast-vec.ll b/llvm/test/Transforms/InstCombine/load-bitcast-vec.ll
index e6540ee..cb1b224 100644
--- a/llvm/test/Transforms/InstCombine/load-bitcast-vec.ll
+++ b/llvm/test/Transforms/InstCombine/load-bitcast-vec.ll
@@ -67,6 +67,41 @@
   ret float %r
 }
 
+define float @matching_scalar_smallest_deref(<4 x float>* dereferenceable(1) %p) {
+; CHECK-LABEL: @matching_scalar_smallest_deref(
+; CHECK-NEXT:    [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
+; CHECK-NEXT:    [[R:%.*]] = load float, float* [[BC]], align 16
+; CHECK-NEXT:    ret float [[R]]
+;
+  %bc = bitcast <4 x float>* %p to float*
+  %r = load float, float* %bc, align 16
+  ret float %r
+}
+
+define float @matching_scalar_smallest_deref_or_null(<4 x float>* dereferenceable_or_null(1) %p) {
+; CHECK-LABEL: @matching_scalar_smallest_deref_or_null(
+; CHECK-NEXT:    [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
+; CHECK-NEXT:    [[R:%.*]] = load float, float* [[BC]], align 16
+; CHECK-NEXT:    ret float [[R]]
+;
+  %bc = bitcast <4 x float>* %p to float*
+  %r = load float, float* %bc, align 16
+  ret float %r
+}
+
+; TODO: Is a null pointer inbounds in any address space?
+
+define float @matching_scalar_smallest_deref_or_null_addrspace(<4 x float> addrspace(4)* dereferenceable_or_null(1) %p) {
+; CHECK-LABEL: @matching_scalar_smallest_deref_or_null_addrspace(
+; CHECK-NEXT:    [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float> addrspace(4)* [[P:%.*]], i64 0, i64 0
+; CHECK-NEXT:    [[R:%.*]] = load float, float addrspace(4)* [[BC]], align 16
+; CHECK-NEXT:    ret float [[R]]
+;
+  %bc = bitcast <4 x float> addrspace(4)* %p to float addrspace(4)*
+  %r = load float, float addrspace(4)* %bc, align 16
+  ret float %r
+}
+
 define float @matching_scalar_volatile(<4 x float>* dereferenceable(16) %p) {
 ; CHECK-LABEL: @matching_scalar_volatile(
 ; CHECK-NEXT:    [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
diff --git a/llvm/test/Transforms/InstCombine/memset.ll b/llvm/test/Transforms/InstCombine/memset.ll
index 7d531f2..b994d97 100644
--- a/llvm/test/Transforms/InstCombine/memset.ll
+++ b/llvm/test/Transforms/InstCombine/memset.ll
@@ -3,7 +3,7 @@
 
 define i32 @test([1024 x i8]* %target) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[TARGET:%.*]], i64 0, i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [1024 x i8], [1024 x i8]* [[TARGET:%.*]], i64 0, i64 0
 ; CHECK-NEXT:    store i8 1, i8* [[TMP1]], align 1
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast [1024 x i8]* [[TARGET]] to i16*
 ; CHECK-NEXT:    store i16 257, i16* [[TMP2]], align 2
diff --git a/llvm/test/Transforms/InstCombine/unpack-fca.ll b/llvm/test/Transforms/InstCombine/unpack-fca.ll
index 3c5e417..1bfd53f 100644
--- a/llvm/test/Transforms/InstCombine/unpack-fca.ll
+++ b/llvm/test/Transforms/InstCombine/unpack-fca.ll
@@ -13,7 +13,7 @@
 
 define void @storeA(%A* %a.ptr) {
 ; CHECK-LABEL: storeA
-; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds %A, %A* %a.ptr, i64 0, i32 0
+; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr %A, %A* %a.ptr, i64 0, i32 0
 ; CHECK-NEXT: store %A__vtbl* @A__vtblZ, %A__vtbl** [[GEP]], align 8
 ; CHECK-NEXT: ret void
   store %A { %A__vtbl* @A__vtblZ }, %A* %a.ptr, align 8
@@ -33,7 +33,7 @@
 
 define void @storeStructOfA({ %A }* %sa.ptr) {
 ; CHECK-LABEL: storeStructOfA
-; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds { %A }, { %A }* %sa.ptr, i64 0, i32 0, i32 0
+; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr { %A }, { %A }* %sa.ptr, i64 0, i32 0, i32 0
 ; CHECK-NEXT: store %A__vtbl* @A__vtblZ, %A__vtbl** [[GEP]], align 8
 ; CHECK-NEXT: ret void
   store { %A } { %A { %A__vtbl* @A__vtblZ } }, { %A }* %sa.ptr, align 8
@@ -42,7 +42,7 @@
 
 define void @storeArrayOfA([1 x %A]* %aa.ptr) {
 ; CHECK-LABEL: storeArrayOfA
-; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds [1 x %A], [1 x %A]* %aa.ptr, i64 0, i64 0, i32 0
+; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr [1 x %A], [1 x %A]* %aa.ptr, i64 0, i64 0, i32 0
 ; CHECK-NEXT: store %A__vtbl* @A__vtblZ, %A__vtbl** [[GEP]], align 8
 ; CHECK-NEXT: ret void
   store [1 x %A] [%A { %A__vtbl* @A__vtblZ }], [1 x %A]* %aa.ptr, align 8
@@ -60,7 +60,7 @@
 
 define void @storeStructOfArrayOfA({ [1 x %A] }* %saa.ptr) {
 ; CHECK-LABEL: storeStructOfArrayOfA
-; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds { [1 x %A] }, { [1 x %A] }* %saa.ptr, i64 0, i32 0, i64 0, i32 0
+; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr { [1 x %A] }, { [1 x %A] }* %saa.ptr, i64 0, i32 0, i64 0, i32 0
 ; CHECK-NEXT: store %A__vtbl* @A__vtblZ, %A__vtbl** [[GEP]], align 8
 ; CHECK-NEXT: ret void
   store { [1 x %A] } { [1 x %A] [%A { %A__vtbl* @A__vtblZ }] }, { [1 x %A] }* %saa.ptr, align 8
@@ -90,7 +90,7 @@
 
 define %A @loadA(%A* %a.ptr) {
 ; CHECK-LABEL: loadA
-; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds %A, %A* %a.ptr, i64 0, i32 0
+; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr %A, %A* %a.ptr, i64 0, i32 0
 ; CHECK-NEXT: [[LOAD:%[a-z0-9\.]+]] = load %A__vtbl*, %A__vtbl** [[GEP]], align 8
 ; CHECK-NEXT: [[IV:%[a-z0-9\.]+]] = insertvalue %A undef, %A__vtbl* [[LOAD]], 0
 ; CHECK-NEXT: ret %A [[IV]]
@@ -113,7 +113,7 @@
 
 define { %A } @loadStructOfA({ %A }* %sa.ptr) {
 ; CHECK-LABEL: loadStructOfA
-; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds { %A }, { %A }* %sa.ptr, i64 0, i32 0, i32 0
+; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr { %A }, { %A }* %sa.ptr, i64 0, i32 0, i32 0
 ; CHECK-NEXT: [[LOAD:%[a-z0-9\.]+]] = load %A__vtbl*, %A__vtbl** [[GEP]], align 8
 ; CHECK-NEXT: [[IV1:%[a-z0-9\.]+]] = insertvalue %A undef, %A__vtbl* [[LOAD]], 0
 ; CHECK-NEXT: [[IV2:%[a-z0-9\.]+]] = insertvalue { %A } undef, %A [[IV1]], 0
@@ -124,7 +124,7 @@
 
 define [1 x %A] @loadArrayOfA([1 x %A]* %aa.ptr) {
 ; CHECK-LABEL: loadArrayOfA
-; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds [1 x %A], [1 x %A]* %aa.ptr, i64 0, i64 0, i32 0
+; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr [1 x %A], [1 x %A]* %aa.ptr, i64 0, i64 0, i32 0
 ; CHECK-NEXT: [[LOAD:%[a-z0-9\.]+]] = load %A__vtbl*, %A__vtbl** [[GEP]], align 8
 ; CHECK-NEXT: [[IV1:%[a-z0-9\.]+]] = insertvalue %A undef, %A__vtbl* [[LOAD]], 0
 ; CHECK-NEXT: [[IV2:%[a-z0-9\.]+]] = insertvalue [1 x %A] undef, %A [[IV1]], 0
@@ -135,7 +135,7 @@
 
 define { [1 x %A] } @loadStructOfArrayOfA({ [1 x %A] }* %saa.ptr) {
 ; CHECK-LABEL: loadStructOfArrayOfA
-; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds { [1 x %A] }, { [1 x %A] }* %saa.ptr, i64 0, i32 0, i64 0, i32 0
+; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr { [1 x %A] }, { [1 x %A] }* %saa.ptr, i64 0, i32 0, i64 0, i32 0
 ; CHECK-NEXT: [[LOAD:%[a-z0-9\.]+]] = load %A__vtbl*, %A__vtbl** [[GEP]], align 8
 ; CHECK-NEXT: [[IV1:%[a-z0-9\.]+]] = insertvalue %A undef, %A__vtbl* [[LOAD]], 0
 ; CHECK-NEXT: [[IV2:%[a-z0-9\.]+]] = insertvalue [1 x %A] undef, %A [[IV1]], 0
@@ -147,7 +147,7 @@
 
 define { %A } @structOfA({ %A }* %sa.ptr) {
 ; CHECK-LABEL: structOfA
-; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds { %A }, { %A }* %sa.ptr, i64 0, i32 0, i32 0
+; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr { %A }, { %A }* %sa.ptr, i64 0, i32 0, i32 0
 ; CHECK-NEXT: store %A__vtbl* @A__vtblZ, %A__vtbl** [[GEP]], align 8
 ; CHECK-NEXT: ret { %A } { %A { %A__vtbl* @A__vtblZ } }
   store { %A } { %A { %A__vtbl* @A__vtblZ } }, { %A }* %sa.ptr, align 8