blob: db60092d3b2701b63cf02c5a18b17d70f3a8e9eb [file] [log] [blame]
John McCall7f416cc2015-09-08 08:05:57 +00001// RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-darwin10 | FileCheck %s
2
3extern int int_source();
4extern void int_sink(int x);
5
6namespace test0 {
7 struct A {
8 int aField;
9 int bField;
10 };
11
12 struct B {
13 int onebit : 2;
14 int twobit : 6;
15 int intField;
16 };
17
18 struct __attribute__((packed, aligned(2))) C : A, B {
19 };
20
21 // These accesses should have alignment 4 because they're at offset 0
22 // in a reference with an assumed alignment of 4.
23 // CHECK-LABEL: @_ZN5test01aERNS_1BE
24 void a(B &b) {
25 // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
26 // CHECK: [[B_P:%.*]] = load [[B:%.*]]*, [[B]]**
27 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
28 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
29 // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
30 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
31 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
32 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
33 // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 4
34 b.onebit = int_source();
35
36 // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
37 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
38 // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
39 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
40 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
41 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
42 // CHECK: call void @_Z8int_sinki(i32 [[T2]])
43 int_sink(b.onebit);
44 }
45
46 // These accesses should have alignment 2 because they're at offset 8
47 // in a reference/pointer with an assumed alignment of 2.
48 // CHECK-LABEL: @_ZN5test01bERNS_1CE
49 void b(C &c) {
50 // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
51 // CHECK: [[C_P:%.*]] = load [[C:%.*]]*, [[C]]**
52 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
53 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
54 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
55 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
56 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
57 // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
58 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
59 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
60 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
61 // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 2
62 c.onebit = int_source();
63
64 // CHECK: [[C_P:%.*]] = load [[C]]*, [[C]]**
65 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
66 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
67 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
68 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
69 // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
70 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
71 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
72 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
73 // CHECK: call void @_Z8int_sinki(i32 [[T2]])
74 int_sink(c.onebit);
75 }
76
77 // CHECK-LABEL: @_ZN5test01cEPNS_1CE
78 void c(C *c) {
79 // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
80 // CHECK: [[C_P:%.*]] = load [[C]]*, [[C]]**
81 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
82 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
83 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
84 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
85 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
86 // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
87 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
88 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
89 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
90 // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 2
91 c->onebit = int_source();
92
93 // CHECK: [[C_P:%.*]] = load [[C:%.*]]*, [[C]]**
94 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
95 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
96 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
97 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
98 // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
99 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
100 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
101 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
102 // CHECK: call void @_Z8int_sinki(i32 [[T2]])
103 int_sink(c->onebit);
104 }
105
106 // These accesses should have alignment 2 because they're at offset 8
107 // in an alignment-2 variable.
108 // CHECK-LABEL: @_ZN5test01dEv
109 void d() {
110 // CHECK: [[C_P:%.*]] = alloca [[C:%.*]], align 2
111 C c;
112
NAKAMURA Takumic88d2fa2015-09-08 09:31:04 +0000113 // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
John McCall7f416cc2015-09-08 08:05:57 +0000114 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
115 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
116 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
117 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
118 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
119 // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
120 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
121 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
122 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
123 // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 2
124 c.onebit = int_source();
125
126 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
127 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
128 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
129 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
130 // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
131 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
132 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
133 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
134 // CHECK: call void @_Z8int_sinki(i32 [[T2]])
135 int_sink(c.onebit);
136 }
137
138 // These accesses should have alignment 8 because they're at offset 8
139 // in an alignment-16 variable.
140 // CHECK-LABEL: @_ZN5test01eEv
141 void e() {
142 // CHECK: [[C_P:%.*]] = alloca [[C:%.*]], align 16
143 __attribute__((aligned(16))) C c;
144
145 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
146 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
147 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
148 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
149 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
150 // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 8
151 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
152 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
153 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
154 // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 8
155 c.onebit = int_source();
156
157 // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
158 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
159 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
160 // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
161 // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 8
162 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
163 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
164 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
165 // CHECK: call void @_Z8int_sinki(i32 [[T2]])
166 int_sink(c.onebit);
167 }
168}
169
170namespace test1 {
171 struct Array {
172 int elts[4];
173 };
174
175 struct A {
176 __attribute__((aligned(16))) Array aArray;
177 };
178
179 struct B : virtual A {
180 void *bPointer; // puts bArray at offset 16
181 Array bArray;
182 };
183
184 struct C : virtual A { // must be viable as primary base
185 // Non-empty, nv-size not a multiple of 16.
186 void *cPointer1;
187 void *cPointer2;
188 };
189
190 // Proof of concept that the non-virtual components of B do not have
191 // to be 16-byte-aligned.
192 struct D : C, B {};
193
194 // For the following tests, we want to assign into a variable whose
195 // alignment is high enough that it will absolutely not be the
196 // constraint on the memcpy alignment.
197 typedef __attribute__((aligned(64))) Array AlignedArray;
198
199 // CHECK-LABEL: @_ZN5test11aERNS_1AE
200 void a(A &a) {
201 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY:%.*]], align 64
202 // CHECK: [[A_P:%.*]] = load [[A:%.*]]*, [[A]]**
203 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
204 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
205 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
206 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 16, i1 false)
207 AlignedArray result = a.aArray;
208 }
209
210 // CHECK-LABEL: @_ZN5test11bERNS_1BE
211 void b(B &b) {
212 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
213 // CHECK: [[B_P:%.*]] = load [[B:%.*]]*, [[B]]**
214 // CHECK: [[VPTR_P:%.*]] = bitcast [[B]]* [[B_P]] to i8**
215 // CHECK: [[VPTR:%.*]] = load i8*, i8** [[VPTR_P]], align 8
216 // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 -24
217 // CHECK: [[OFFSET_P:%.*]] = bitcast i8* [[T0]] to i64*
218 // CHECK: [[OFFSET:%.*]] = load i64, i64* [[OFFSET_P]], align 8
219 // CHECK: [[T0:%.*]] = bitcast [[B]]* [[B_P]] to i8*
220 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]]
221 // CHECK: [[A_P:%.*]] = bitcast i8* [[T1]] to [[A]]*
222 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
223 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
224 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
225 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 16, i1 false)
226 AlignedArray result = b.aArray;
227 }
228
229 // CHECK-LABEL: @_ZN5test11cERNS_1BE
230 void c(B &b) {
231 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
232 // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
233 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
234 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
235 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
236 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 8, i1 false)
237 AlignedArray result = b.bArray;
238 }
239
240 // CHECK-LABEL: @_ZN5test11dEPNS_1BE
241 void d(B *b) {
242 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
243 // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
244 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
245 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
246 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
247 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 8, i1 false)
248 AlignedArray result = b->bArray;
249 }
250
251 // CHECK-LABEL: @_ZN5test11eEv
252 void e() {
253 // CHECK: [[B_P:%.*]] = alloca [[B]], align 16
254 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
255 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
256 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
257 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
258 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 16, i1 false)
259 B b;
260 AlignedArray result = b.bArray;
261 }
262
263 // CHECK-LABEL: @_ZN5test11fEv
264 void f() {
265 // TODO: we should devirtualize this derived-to-base conversion.
266 // CHECK: [[D_P:%.*]] = alloca [[D:%.*]], align 16
267 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
268 // CHECK: [[VPTR_P:%.*]] = bitcast [[D]]* [[D_P]] to i8**
269 // CHECK: [[VPTR:%.*]] = load i8*, i8** [[VPTR_P]], align 16
270 // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 -24
271 // CHECK: [[OFFSET_P:%.*]] = bitcast i8* [[T0]] to i64*
272 // CHECK: [[OFFSET:%.*]] = load i64, i64* [[OFFSET_P]], align 8
273 // CHECK: [[T0:%.*]] = bitcast [[D]]* [[D_P]] to i8*
274 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]]
275 // CHECK: [[A_P:%.*]] = bitcast i8* [[T1]] to [[A]]*
276 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
277 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
278 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
279 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 16, i1 false)
280 D d;
281 AlignedArray result = d.aArray;
282 }
283
284 // CHECK-LABEL: @_ZN5test11gEv
285 void g() {
286 // CHECK: [[D_P:%.*]] = alloca [[D]], align 16
287 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
288 // CHECK: [[T0:%.*]] = bitcast [[D]]* [[D_P]] to i8*
289 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 24
290 // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
291 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
292 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
293 // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
294 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 8, i1 false)
295 D d;
296 AlignedArray result = d.bArray;
297 }
298}