blob: 2549afda8828003399a89b3035d6cb3c4b7e2a47 [file] [log] [blame]
Richard Smith4ae767b2018-04-29 04:55:46 +00001// RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOCOMPAT
2// RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-darwin10 -fclang-abi-compat=6.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V6COMPAT
John McCall7f416cc2015-09-08 08:05:57 +00003
4extern int int_source();
5extern void int_sink(int x);
6
7namespace test0 {
8 struct A {
9 int aField;
10 int bField;
11 };
12
13 struct B {
14 int onebit : 2;
15 int twobit : 6;
16 int intField;
17 };
18
19 struct __attribute__((packed, aligned(2))) C : A, B {
20 };
21
22 // These accesses should have alignment 4 because they're at offset 0
23 // in a reference with an assumed alignment of 4.
24 // CHECK-LABEL: @_ZN5test01aERNS_1BE
25 void a(B &b) {
26 // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
27 // CHECK: [[B_P:%.*]] = load [[B:%.*]]*, [[B]]**
28 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
29 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
30 // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
31 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
32 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
33 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
34 // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 4
35 b.onebit = int_source();
Denis Zobninb2dc2382016-02-02 12:39:08 +000036
John McCall7f416cc2015-09-08 08:05:57 +000037 // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
38 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
39 // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
40 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
41 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
42 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
43 // CHECK: call void @_Z8int_sinki(i32 [[T2]])
44 int_sink(b.onebit);
45 }
46
47 // These accesses should have alignment 2 because they're at offset 8
48 // in a reference/pointer with an assumed alignment of 2.
49 // CHECK-LABEL: @_ZN5test01bERNS_1CE
50 void b(C &c) {
51 // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
52 // CHECK: [[C_P:%.*]] = load [[C:%.*]]*, [[C]]**
53 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
54 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
55 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
56 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
57 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
Richard Smith4ae767b2018-04-29 04:55:46 +000058 // CHECK-V6COMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
59 // CHECK-NOCOMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
John McCall7f416cc2015-09-08 08:05:57 +000060 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
61 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
62 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
Richard Smith4ae767b2018-04-29 04:55:46 +000063 // CHECK-V6COMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 2
64 // CHECK-NOCOMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 4
John McCall7f416cc2015-09-08 08:05:57 +000065 c.onebit = int_source();
Denis Zobninb2dc2382016-02-02 12:39:08 +000066
John McCall7f416cc2015-09-08 08:05:57 +000067 // CHECK: [[C_P:%.*]] = load [[C]]*, [[C]]**
68 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
69 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
70 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
71 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
Richard Smith4ae767b2018-04-29 04:55:46 +000072 // CHECK-V6COMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
73 // CHECK-NOCOMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
John McCall7f416cc2015-09-08 08:05:57 +000074 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
75 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
76 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
77 // CHECK: call void @_Z8int_sinki(i32 [[T2]])
78 int_sink(c.onebit);
79 }
80
81 // CHECK-LABEL: @_ZN5test01cEPNS_1CE
82 void c(C *c) {
83 // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
84 // CHECK: [[C_P:%.*]] = load [[C]]*, [[C]]**
85 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
86 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
87 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
88 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
89 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
Richard Smith4ae767b2018-04-29 04:55:46 +000090 // CHECK-V6COMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
91 // CHECK-NOCOMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
John McCall7f416cc2015-09-08 08:05:57 +000092 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
93 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
94 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
Richard Smith4ae767b2018-04-29 04:55:46 +000095 // CHECK-V6COMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 2
96 // CHECK-NOCOMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 4
John McCall7f416cc2015-09-08 08:05:57 +000097 c->onebit = int_source();
98
99 // CHECK: [[C_P:%.*]] = load [[C:%.*]]*, [[C]]**
100 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
101 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
102 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
103 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
Richard Smith4ae767b2018-04-29 04:55:46 +0000104 // CHECK-V6COMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
105 // CHECK-NOCOMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
John McCall7f416cc2015-09-08 08:05:57 +0000106 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
107 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
108 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
109 // CHECK: call void @_Z8int_sinki(i32 [[T2]])
110 int_sink(c->onebit);
111 }
112
113 // These accesses should have alignment 2 because they're at offset 8
114 // in an alignment-2 variable.
115 // CHECK-LABEL: @_ZN5test01dEv
116 void d() {
Richard Smith4ae767b2018-04-29 04:55:46 +0000117 // CHECK-V6COMPAT: [[C_P:%.*]] = alloca [[C:%.*]], align 2
118 // CHECK-NOCOMPAT: [[C_P:%.*]] = alloca [[C:%.*]], align 4
John McCall7f416cc2015-09-08 08:05:57 +0000119 C c;
120
NAKAMURA Takumic88d2fa2015-09-08 09:31:04 +0000121 // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
John McCall7f416cc2015-09-08 08:05:57 +0000122 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
123 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
124 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
125 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
126 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
Richard Smith4ae767b2018-04-29 04:55:46 +0000127 // CHECK-V6COMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
128 // CHECK-NOCOMPAT: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
John McCall7f416cc2015-09-08 08:05:57 +0000129 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
130 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
131 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
Richard Smith4ae767b2018-04-29 04:55:46 +0000132 // CHECK-V6COMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 2
133 // CHECK-NOCOMPAT: store i8 [[T2]], i8* [[FIELD_P]], align 4
John McCall7f416cc2015-09-08 08:05:57 +0000134 c.onebit = int_source();
135
136 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
137 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
138 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
139 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
Richard Smith4ae767b2018-04-29 04:55:46 +0000140 // CHECK-V6COMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
141 // CHECK-NOCOMPAT: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
John McCall7f416cc2015-09-08 08:05:57 +0000142 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
143 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
144 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
145 // CHECK: call void @_Z8int_sinki(i32 [[T2]])
146 int_sink(c.onebit);
147 }
148
149 // These accesses should have alignment 8 because they're at offset 8
150 // in an alignment-16 variable.
151 // CHECK-LABEL: @_ZN5test01eEv
152 void e() {
153 // CHECK: [[C_P:%.*]] = alloca [[C:%.*]], align 16
154 __attribute__((aligned(16))) C c;
155
John McCalle78e08a2015-09-08 09:33:33 +0000156 // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
John McCall7f416cc2015-09-08 08:05:57 +0000157 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
158 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
159 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
160 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
161 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
162 // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 8
163 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
164 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
165 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
166 // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 8
167 c.onebit = int_source();
168
169 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
170 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
171 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
172 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
173 // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 8
174 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
175 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
176 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
177 // CHECK: call void @_Z8int_sinki(i32 [[T2]])
178 int_sink(c.onebit);
179 }
180}
181
182namespace test1 {
183 struct Array {
184 int elts[4];
185 };
186
187 struct A {
188 __attribute__((aligned(16))) Array aArray;
189 };
190
191 struct B : virtual A {
192 void *bPointer; // puts bArray at offset 16
193 Array bArray;
194 };
195
196 struct C : virtual A { // must be viable as primary base
197 // Non-empty, nv-size not a multiple of 16.
198 void *cPointer1;
199 void *cPointer2;
200 };
201
202 // Proof of concept that the non-virtual components of B do not have
203 // to be 16-byte-aligned.
204 struct D : C, B {};
205
206 // For the following tests, we want to assign into a variable whose
207 // alignment is high enough that it will absolutely not be the
208 // constraint on the memcpy alignment.
209 typedef __attribute__((aligned(64))) Array AlignedArray;
210
211 // CHECK-LABEL: @_ZN5test11aERNS_1AE
212 void a(A &a) {
213 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY:%.*]], align 64
214 // CHECK: [[A_P:%.*]] = load [[A:%.*]]*, [[A]]**
215 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
216 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
217 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
Daniel Neilsonc8bdc8d2018-01-28 17:27:45 +0000218 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false)
John McCall7f416cc2015-09-08 08:05:57 +0000219 AlignedArray result = a.aArray;
220 }
221
222 // CHECK-LABEL: @_ZN5test11bERNS_1BE
223 void b(B &b) {
224 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
225 // CHECK: [[B_P:%.*]] = load [[B:%.*]]*, [[B]]**
226 // CHECK: [[VPTR_P:%.*]] = bitcast [[B]]* [[B_P]] to i8**
227 // CHECK: [[VPTR:%.*]] = load i8*, i8** [[VPTR_P]], align 8
228 // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 -24
229 // CHECK: [[OFFSET_P:%.*]] = bitcast i8* [[T0]] to i64*
230 // CHECK: [[OFFSET:%.*]] = load i64, i64* [[OFFSET_P]], align 8
231 // CHECK: [[T0:%.*]] = bitcast [[B]]* [[B_P]] to i8*
232 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]]
233 // CHECK: [[A_P:%.*]] = bitcast i8* [[T1]] to [[A]]*
234 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
235 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
236 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
Daniel Neilsonc8bdc8d2018-01-28 17:27:45 +0000237 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false)
John McCall7f416cc2015-09-08 08:05:57 +0000238 AlignedArray result = b.aArray;
239 }
240
241 // CHECK-LABEL: @_ZN5test11cERNS_1BE
242 void c(B &b) {
243 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
244 // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
245 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
246 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
247 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
Daniel Neilsonc8bdc8d2018-01-28 17:27:45 +0000248 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 8 [[T1]], i64 16, i1 false)
John McCall7f416cc2015-09-08 08:05:57 +0000249 AlignedArray result = b.bArray;
250 }
251
252 // CHECK-LABEL: @_ZN5test11dEPNS_1BE
253 void d(B *b) {
254 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
255 // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
256 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
257 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
258 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
Daniel Neilsonc8bdc8d2018-01-28 17:27:45 +0000259 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 8 [[T1]], i64 16, i1 false)
John McCall7f416cc2015-09-08 08:05:57 +0000260 AlignedArray result = b->bArray;
261 }
262
263 // CHECK-LABEL: @_ZN5test11eEv
264 void e() {
265 // CHECK: [[B_P:%.*]] = alloca [[B]], align 16
266 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
267 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
268 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
269 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
Daniel Neilsonc8bdc8d2018-01-28 17:27:45 +0000270 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false)
John McCall7f416cc2015-09-08 08:05:57 +0000271 B b;
272 AlignedArray result = b.bArray;
273 }
274
275 // CHECK-LABEL: @_ZN5test11fEv
276 void f() {
277 // TODO: we should devirtualize this derived-to-base conversion.
278 // CHECK: [[D_P:%.*]] = alloca [[D:%.*]], align 16
279 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
280 // CHECK: [[VPTR_P:%.*]] = bitcast [[D]]* [[D_P]] to i8**
281 // CHECK: [[VPTR:%.*]] = load i8*, i8** [[VPTR_P]], align 16
282 // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 -24
283 // CHECK: [[OFFSET_P:%.*]] = bitcast i8* [[T0]] to i64*
284 // CHECK: [[OFFSET:%.*]] = load i64, i64* [[OFFSET_P]], align 8
285 // CHECK: [[T0:%.*]] = bitcast [[D]]* [[D_P]] to i8*
286 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]]
287 // CHECK: [[A_P:%.*]] = bitcast i8* [[T1]] to [[A]]*
288 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
289 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
290 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
Daniel Neilsonc8bdc8d2018-01-28 17:27:45 +0000291 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false)
John McCall7f416cc2015-09-08 08:05:57 +0000292 D d;
293 AlignedArray result = d.aArray;
294 }
295
296 // CHECK-LABEL: @_ZN5test11gEv
297 void g() {
298 // CHECK: [[D_P:%.*]] = alloca [[D]], align 16
299 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
300 // CHECK: [[T0:%.*]] = bitcast [[D]]* [[D_P]] to i8*
301 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 24
302 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
303 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
304 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
305 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
Daniel Neilsonc8bdc8d2018-01-28 17:27:45 +0000306 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 8 [[T1]], i64 16, i1 false)
John McCall7f416cc2015-09-08 08:05:57 +0000307 D d;
308 AlignedArray result = d.bArray;
309 }
310}