blob: 455d14246e57c25198c07c71a60b53e23cca3042 [file] [log] [blame]
Chandler Carruth871ba722012-09-26 10:27:46 +00001; RUN: opt < %s -sroa -S | FileCheck %s
Chandler Carruth43c8b462012-10-04 10:39:28 +00002target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
Chandler Carruth871ba722012-09-26 10:27:46 +00003
Pete Cooper67cf9a72015-11-19 05:56:52 +00004declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
Chandler Carruth871ba722012-09-26 10:27:46 +00005
6define void @test1({ i8, i8 }* %a, { i8, i8 }* %b) {
Stephen Linc1c7a132013-07-14 01:42:54 +00007; CHECK-LABEL: @test1(
David Blaikie79e6c742015-02-27 19:29:02 +00008; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 0
David Blaikiea79ac142015-02-27 21:17:42 +00009; CHECK: %[[a0:.*]] = load i8, i8* %[[gep_a0]], align 16
David Blaikie79e6c742015-02-27 19:29:02 +000010; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 1
David Blaikiea79ac142015-02-27 21:17:42 +000011; CHECK: %[[a1:.*]] = load i8, i8* %[[gep_a1]], align 1
David Blaikie79e6c742015-02-27 19:29:02 +000012; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 0
Chandler Carruth871ba722012-09-26 10:27:46 +000013; CHECK: store i8 %[[a0]], i8* %[[gep_b0]], align 16
David Blaikie79e6c742015-02-27 19:29:02 +000014; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 1
Chandler Carruth871ba722012-09-26 10:27:46 +000015; CHECK: store i8 %[[a1]], i8* %[[gep_b1]], align 1
16; CHECK: ret void
17
18entry:
19 %alloca = alloca { i8, i8 }, align 16
David Blaikie79e6c742015-02-27 19:29:02 +000020 %gep_a = getelementptr { i8, i8 }, { i8, i8 }* %a, i32 0, i32 0
21 %gep_alloca = getelementptr { i8, i8 }, { i8, i8 }* %alloca, i32 0, i32 0
22 %gep_b = getelementptr { i8, i8 }, { i8, i8 }* %b, i32 0, i32 0
Chandler Carruth871ba722012-09-26 10:27:46 +000023
24 store i8 420, i8* %gep_alloca, align 16
25
Pete Cooper67cf9a72015-11-19 05:56:52 +000026 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_alloca, i8* %gep_a, i32 2, i32 16, i1 false)
27 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_b, i8* %gep_alloca, i32 2, i32 16, i1 false)
Chandler Carruth871ba722012-09-26 10:27:46 +000028 ret void
29}
30
Chandler Carruth3e4273d2012-09-26 10:45:28 +000031define void @test2() {
Stephen Linc1c7a132013-07-14 01:42:54 +000032; CHECK-LABEL: @test2(
Chandler Carruth903790e2012-09-29 10:41:21 +000033; CHECK: alloca i16
David Blaikiea79ac142015-02-27 21:17:42 +000034; CHECK: load i8, i8* %{{.*}}
Chandler Carruth4b2b38d2012-10-03 08:14:02 +000035; CHECK: store i8 42, i8* %{{.*}}
Chandler Carruth3e4273d2012-09-26 10:45:28 +000036; CHECK: ret void
37
38entry:
39 %a = alloca { i8, i8, i8, i8 }, align 2
David Blaikie79e6c742015-02-27 19:29:02 +000040 %gep1 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 1
Chandler Carruth3e4273d2012-09-26 10:45:28 +000041 %cast1 = bitcast i8* %gep1 to i16*
42 store volatile i16 0, i16* %cast1
David Blaikie79e6c742015-02-27 19:29:02 +000043 %gep2 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 2
David Blaikiea79ac142015-02-27 21:17:42 +000044 %result = load i8, i8* %gep2
Chandler Carruth4b2b38d2012-10-03 08:14:02 +000045 store i8 42, i8* %gep2
Chandler Carruth3e4273d2012-09-26 10:45:28 +000046 ret void
47}
48
Chandler Carruth871ba722012-09-26 10:27:46 +000049define void @PR13920(<2 x i64>* %a, i16* %b) {
50; Test that alignments on memcpy intrinsics get propagated to loads and stores.
Stephen Linc1c7a132013-07-14 01:42:54 +000051; CHECK-LABEL: @PR13920(
David Blaikiea79ac142015-02-27 21:17:42 +000052; CHECK: load <2 x i64>, <2 x i64>* %a, align 2
Chandler Carruth871ba722012-09-26 10:27:46 +000053; CHECK: store <2 x i64> {{.*}}, <2 x i64>* {{.*}}, align 2
54; CHECK: ret void
55
56entry:
57 %aa = alloca <2 x i64>, align 16
58 %aptr = bitcast <2 x i64>* %a to i8*
59 %aaptr = bitcast <2 x i64>* %aa to i8*
Pete Cooper67cf9a72015-11-19 05:56:52 +000060 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %aaptr, i8* %aptr, i32 16, i32 2, i1 false)
Chandler Carruth871ba722012-09-26 10:27:46 +000061 %bptr = bitcast i16* %b to i8*
Pete Cooper67cf9a72015-11-19 05:56:52 +000062 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %bptr, i8* %aaptr, i32 16, i32 2, i1 false)
Chandler Carruth871ba722012-09-26 10:27:46 +000063 ret void
64}
Chandler Carruth903790e2012-09-29 10:41:21 +000065
66define void @test3(i8* %x) {
67; Test that when we promote an alloca to a type with lower ABI alignment, we
68; provide the needed explicit alignment that code using the alloca may be
69; expecting. However, also check that any offset within an alloca can in turn
70; reduce the alignment.
Stephen Linc1c7a132013-07-14 01:42:54 +000071; CHECK-LABEL: @test3(
Chandler Carruth903790e2012-09-29 10:41:21 +000072; CHECK: alloca [22 x i8], align 8
73; CHECK: alloca [18 x i8], align 2
74; CHECK: ret void
75
76entry:
77 %a = alloca { i8*, i8*, i8* }
78 %b = alloca { i8*, i8*, i8* }
79 %a_raw = bitcast { i8*, i8*, i8* }* %a to i8*
Pete Cooper67cf9a72015-11-19 05:56:52 +000080 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a_raw, i8* %x, i32 22, i32 8, i1 false)
Chandler Carruth903790e2012-09-29 10:41:21 +000081 %b_raw = bitcast { i8*, i8*, i8* }* %b to i8*
David Blaikie79e6c742015-02-27 19:29:02 +000082 %b_gep = getelementptr i8, i8* %b_raw, i32 6
Pete Cooper67cf9a72015-11-19 05:56:52 +000083 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b_gep, i8* %x, i32 18, i32 2, i1 false)
Chandler Carruth903790e2012-09-29 10:41:21 +000084 ret void
85}
Chandler Carruth176ca712012-10-01 12:16:54 +000086
Chandler Carruth4b2b38d2012-10-03 08:14:02 +000087define void @test5() {
Chandler Carruth24ac8302015-01-02 03:55:54 +000088; Test that we preserve underaligned loads and stores when splitting. The use
89; of volatile in this test case is just to force the loads and stores to not be
90; split or promoted out of existence.
91;
Stephen Linc1c7a132013-07-14 01:42:54 +000092; CHECK-LABEL: @test5(
Chandler Carruth4b2b38d2012-10-03 08:14:02 +000093; CHECK: alloca [9 x i8]
94; CHECK: alloca [9 x i8]
95; CHECK: store volatile double 0.0{{.*}}, double* %{{.*}}, align 1
David Blaikiea79ac142015-02-27 21:17:42 +000096; CHECK: load volatile i16, i16* %{{.*}}, align 1
97; CHECK: load double, double* %{{.*}}, align 1
Chandler Carruth4b2b38d2012-10-03 08:14:02 +000098; CHECK: store volatile double %{{.*}}, double* %{{.*}}, align 1
David Blaikiea79ac142015-02-27 21:17:42 +000099; CHECK: load volatile i16, i16* %{{.*}}, align 1
Chandler Carruth4b2b38d2012-10-03 08:14:02 +0000100; CHECK: ret void
101
102entry:
103 %a = alloca [18 x i8]
David Blaikie79e6c742015-02-27 19:29:02 +0000104 %raw1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 0
Chandler Carruth4b2b38d2012-10-03 08:14:02 +0000105 %ptr1 = bitcast i8* %raw1 to double*
106 store volatile double 0.0, double* %ptr1, align 1
David Blaikie79e6c742015-02-27 19:29:02 +0000107 %weird_gep1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 7
Chandler Carruth4b2b38d2012-10-03 08:14:02 +0000108 %weird_cast1 = bitcast i8* %weird_gep1 to i16*
David Blaikiea79ac142015-02-27 21:17:42 +0000109 %weird_load1 = load volatile i16, i16* %weird_cast1, align 1
Chandler Carruth4b2b38d2012-10-03 08:14:02 +0000110
David Blaikie79e6c742015-02-27 19:29:02 +0000111 %raw2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 9
Chandler Carruth4b2b38d2012-10-03 08:14:02 +0000112 %ptr2 = bitcast i8* %raw2 to double*
David Blaikiea79ac142015-02-27 21:17:42 +0000113 %d1 = load double, double* %ptr1, align 1
Chandler Carruth4b2b38d2012-10-03 08:14:02 +0000114 store volatile double %d1, double* %ptr2, align 1
David Blaikie79e6c742015-02-27 19:29:02 +0000115 %weird_gep2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 16
Chandler Carruth4b2b38d2012-10-03 08:14:02 +0000116 %weird_cast2 = bitcast i8* %weird_gep2 to i16*
David Blaikiea79ac142015-02-27 21:17:42 +0000117 %weird_load2 = load volatile i16, i16* %weird_cast2, align 1
Chandler Carruth4b2b38d2012-10-03 08:14:02 +0000118
119 ret void
120}
121
122define void @test6() {
123; Test that we promote alignment when the underlying alloca switches to one
124; that innately provides it.
Stephen Linc1c7a132013-07-14 01:42:54 +0000125; CHECK-LABEL: @test6(
Chandler Carruth4b2b38d2012-10-03 08:14:02 +0000126; CHECK: alloca double
127; CHECK: alloca double
128; CHECK-NOT: align
129; CHECK: ret void
130
131entry:
132 %a = alloca [16 x i8]
David Blaikie79e6c742015-02-27 19:29:02 +0000133 %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0
Chandler Carruth4b2b38d2012-10-03 08:14:02 +0000134 %ptr1 = bitcast i8* %raw1 to double*
135 store volatile double 0.0, double* %ptr1, align 1
136
David Blaikie79e6c742015-02-27 19:29:02 +0000137 %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
Chandler Carruth4b2b38d2012-10-03 08:14:02 +0000138 %ptr2 = bitcast i8* %raw2 to double*
David Blaikiea79ac142015-02-27 21:17:42 +0000139 %val = load double, double* %ptr1, align 1
Chandler Carruth4b2b38d2012-10-03 08:14:02 +0000140 store volatile double %val, double* %ptr2, align 1
141
142 ret void
143}
Chandler Carruth08e5f492012-10-03 08:26:28 +0000144
145define void @test7(i8* %out) {
146; Test that we properly compute the destination alignment when rewriting
147; memcpys as direct loads or stores.
Stephen Linc1c7a132013-07-14 01:42:54 +0000148; CHECK-LABEL: @test7(
Chandler Carruth08e5f492012-10-03 08:26:28 +0000149; CHECK-NOT: alloca
150
151entry:
152 %a = alloca [16 x i8]
David Blaikie79e6c742015-02-27 19:29:02 +0000153 %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0
Chandler Carruth08e5f492012-10-03 08:26:28 +0000154 %ptr1 = bitcast i8* %raw1 to double*
David Blaikie79e6c742015-02-27 19:29:02 +0000155 %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
Chandler Carruth08e5f492012-10-03 08:26:28 +0000156 %ptr2 = bitcast i8* %raw2 to double*
157
Pete Cooper67cf9a72015-11-19 05:56:52 +0000158 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %raw1, i8* %out, i32 16, i32 0, i1 false)
David Blaikiea79ac142015-02-27 21:17:42 +0000159; CHECK: %[[val2:.*]] = load double, double* %{{.*}}, align 1
160; CHECK: %[[val1:.*]] = load double, double* %{{.*}}, align 1
Chandler Carruth08e5f492012-10-03 08:26:28 +0000161
David Blaikiea79ac142015-02-27 21:17:42 +0000162 %val1 = load double, double* %ptr2, align 1
163 %val2 = load double, double* %ptr1, align 1
Chandler Carruth08e5f492012-10-03 08:26:28 +0000164
165 store double %val1, double* %ptr1, align 1
166 store double %val2, double* %ptr2, align 1
167
Pete Cooper67cf9a72015-11-19 05:56:52 +0000168 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %out, i8* %raw1, i32 16, i32 0, i1 false)
Chandler Carruth08e5f492012-10-03 08:26:28 +0000169; CHECK: store double %[[val1]], double* %{{.*}}, align 1
170; CHECK: store double %[[val2]], double* %{{.*}}, align 1
171
172 ret void
173; CHECK: ret void
174}