blob: 884d3a89d15aa21aab7fa37fef1a1ce3ee498a81 [file] [log] [blame]
Bill Schmidt7a6cb152012-10-16 13:30:53 +00001; RUN: llc -mcpu=pwr7 -O0 -disable-fp-elim < %s | FileCheck %s
2
3; FIXME: The code generation for packed structs is very poor because the
4; PowerPC target wrongly rejects all unaligned loads. This test case will
5; need to be revised when that is fixed.
6
7target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
8target triple = "powerpc64-unknown-linux-gnu"
9
10%struct.s1 = type { i8 }
11%struct.s2 = type { i16 }
12%struct.s4 = type { i32 }
13%struct.t1 = type { i8 }
14%struct.t3 = type <{ i16, i8 }>
15%struct.t5 = type <{ i32, i8 }>
16%struct.t6 = type <{ i32, i16 }>
17%struct.t7 = type <{ i32, i16, i8 }>
18%struct.s3 = type { i16, i8 }
19%struct.s5 = type { i32, i8 }
20%struct.s6 = type { i32, i16 }
21%struct.s7 = type { i32, i16, i8 }
22%struct.t2 = type <{ i16 }>
23%struct.t4 = type <{ i32 }>
24
25@caller1.p1 = private unnamed_addr constant %struct.s1 { i8 1 }, align 1
26@caller1.p2 = private unnamed_addr constant %struct.s2 { i16 2 }, align 2
27@caller1.p3 = private unnamed_addr constant { i16, i8, i8 } { i16 4, i8 8, i8 undef }, align 2
28@caller1.p4 = private unnamed_addr constant %struct.s4 { i32 16 }, align 4
29@caller1.p5 = private unnamed_addr constant { i32, i8, [3 x i8] } { i32 32, i8 64, [3 x i8] undef }, align 4
30@caller1.p6 = private unnamed_addr constant { i32, i16, [2 x i8] } { i32 128, i16 256, [2 x i8] undef }, align 4
31@caller1.p7 = private unnamed_addr constant { i32, i16, i8, i8 } { i32 512, i16 1024, i8 -3, i8 undef }, align 4
32@caller2.p1 = private unnamed_addr constant %struct.t1 { i8 1 }, align 1
33@caller2.p2 = private unnamed_addr constant { i16 } { i16 2 }, align 1
34@caller2.p3 = private unnamed_addr constant %struct.t3 <{ i16 4, i8 8 }>, align 1
35@caller2.p4 = private unnamed_addr constant { i32 } { i32 16 }, align 1
36@caller2.p5 = private unnamed_addr constant %struct.t5 <{ i32 32, i8 64 }>, align 1
37@caller2.p6 = private unnamed_addr constant %struct.t6 <{ i32 128, i16 256 }>, align 1
38@caller2.p7 = private unnamed_addr constant %struct.t7 <{ i32 512, i16 1024, i8 -3 }>, align 1
39
40define i32 @caller1() nounwind {
41entry:
42 %p1 = alloca %struct.s1, align 1
43 %p2 = alloca %struct.s2, align 2
44 %p3 = alloca %struct.s3, align 2
45 %p4 = alloca %struct.s4, align 4
46 %p5 = alloca %struct.s5, align 4
47 %p6 = alloca %struct.s6, align 4
48 %p7 = alloca %struct.s7, align 4
49 %0 = bitcast %struct.s1* %p1 to i8*
50 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false)
51 %1 = bitcast %struct.s2* %p2 to i8*
52 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i32 2, i1 false)
53 %2 = bitcast %struct.s3* %p3 to i8*
54 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i32 2, i1 false)
55 %3 = bitcast %struct.s4* %p4 to i8*
56 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i32 4, i1 false)
57 %4 = bitcast %struct.s5* %p5 to i8*
58 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i32 4, i1 false)
59 %5 = bitcast %struct.s6* %p6 to i8*
60 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i32 4, i1 false)
61 %6 = bitcast %struct.s7* %p7 to i8*
62 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i32 4, i1 false)
63 %call = call i32 @callee1(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7)
64 ret i32 %call
65
66; CHECK: stb {{[0-9]+}}, 119(1)
67; CHECK: sth {{[0-9]+}}, 126(1)
68; CHECK: stw {{[0-9]+}}, 132(1)
69; CHECK: stw {{[0-9]+}}, 140(1)
70; CHECK: std {{[0-9]+}}, 144(1)
71; CHECK: std {{[0-9]+}}, 152(1)
72; CHECK: std {{[0-9]+}}, 160(1)
73}
74
75declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
76
77define internal i32 @callee1(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind {
78entry:
79 %z1.addr = alloca i32, align 4
80 %z2.addr = alloca i32, align 4
81 %z3.addr = alloca i32, align 4
82 %z4.addr = alloca i32, align 4
83 %z5.addr = alloca i32, align 4
84 %z6.addr = alloca i32, align 4
85 %z7.addr = alloca i32, align 4
86 %z8.addr = alloca i32, align 4
87 store i32 %z1, i32* %z1.addr, align 4
88 store i32 %z2, i32* %z2.addr, align 4
89 store i32 %z3, i32* %z3.addr, align 4
90 store i32 %z4, i32* %z4.addr, align 4
91 store i32 %z5, i32* %z5.addr, align 4
92 store i32 %z6, i32* %z6.addr, align 4
93 store i32 %z7, i32* %z7.addr, align 4
94 store i32 %z8, i32* %z8.addr, align 4
95 %a = getelementptr inbounds %struct.s1* %v1, i32 0, i32 0
96 %0 = load i8* %a, align 1
97 %conv = zext i8 %0 to i32
98 %a1 = getelementptr inbounds %struct.s2* %v2, i32 0, i32 0
99 %1 = load i16* %a1, align 2
100 %conv2 = sext i16 %1 to i32
101 %add = add nsw i32 %conv, %conv2
102 %a3 = getelementptr inbounds %struct.s3* %v3, i32 0, i32 0
103 %2 = load i16* %a3, align 2
104 %conv4 = sext i16 %2 to i32
105 %add5 = add nsw i32 %add, %conv4
106 %a6 = getelementptr inbounds %struct.s4* %v4, i32 0, i32 0
107 %3 = load i32* %a6, align 4
108 %add7 = add nsw i32 %add5, %3
109 %a8 = getelementptr inbounds %struct.s5* %v5, i32 0, i32 0
110 %4 = load i32* %a8, align 4
111 %add9 = add nsw i32 %add7, %4
112 %a10 = getelementptr inbounds %struct.s6* %v6, i32 0, i32 0
113 %5 = load i32* %a10, align 4
114 %add11 = add nsw i32 %add9, %5
115 %a12 = getelementptr inbounds %struct.s7* %v7, i32 0, i32 0
116 %6 = load i32* %a12, align 4
117 %add13 = add nsw i32 %add11, %6
118 ret i32 %add13
119
120; CHECK: lha {{[0-9]+}}, 126(1)
121; CHECK: lbz {{[0-9]+}}, 119(1)
122; CHECK: lha {{[0-9]+}}, 132(1)
123; CHECK: lwz {{[0-9]+}}, 140(1)
124; CHECK: lwz {{[0-9]+}}, 144(1)
125; CHECK: lwz {{[0-9]+}}, 152(1)
126; CHECK: lwz {{[0-9]+}}, 160(1)
127}
128
129define i32 @caller2() nounwind {
130entry:
131 %p1 = alloca %struct.t1, align 1
132 %p2 = alloca %struct.t2, align 1
133 %p3 = alloca %struct.t3, align 1
134 %p4 = alloca %struct.t4, align 1
135 %p5 = alloca %struct.t5, align 1
136 %p6 = alloca %struct.t6, align 1
137 %p7 = alloca %struct.t7, align 1
138 %0 = bitcast %struct.t1* %p1 to i8*
139 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false)
140 %1 = bitcast %struct.t2* %p2 to i8*
141 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i32 1, i1 false)
142 %2 = bitcast %struct.t3* %p3 to i8*
143 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i32 1, i1 false)
144 %3 = bitcast %struct.t4* %p4 to i8*
145 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i32 1, i1 false)
146 %4 = bitcast %struct.t5* %p5 to i8*
147 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i32 1, i1 false)
148 %5 = bitcast %struct.t6* %p6 to i8*
149 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i32 1, i1 false)
150 %6 = bitcast %struct.t7* %p7 to i8*
151 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i32 1, i1 false)
152 %call = call i32 @callee2(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7)
153 ret i32 %call
154
155; CHECK: stb {{[0-9]+}}, 119(1)
156; CHECK: sth {{[0-9]+}}, 126(1)
157; CHECK: stb {{[0-9]+}}, 135(1)
158; CHECK: sth {{[0-9]+}}, 133(1)
159; CHECK: stw {{[0-9]+}}, 140(1)
160; CHECK: stb {{[0-9]+}}, 151(1)
161; CHECK: stw {{[0-9]+}}, 147(1)
162; CHECK: sth {{[0-9]+}}, 158(1)
163; CHECK: stw {{[0-9]+}}, 154(1)
164; CHECK: stb {{[0-9]+}}, 167(1)
165; CHECK: sth {{[0-9]+}}, 165(1)
166; CHECK: stw {{[0-9]+}}, 161(1)
167}
168
169define internal i32 @callee2(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.t1* byval %v1, %struct.t2* byval %v2, %struct.t3* byval %v3, %struct.t4* byval %v4, %struct.t5* byval %v5, %struct.t6* byval %v6, %struct.t7* byval %v7) nounwind {
170entry:
171 %z1.addr = alloca i32, align 4
172 %z2.addr = alloca i32, align 4
173 %z3.addr = alloca i32, align 4
174 %z4.addr = alloca i32, align 4
175 %z5.addr = alloca i32, align 4
176 %z6.addr = alloca i32, align 4
177 %z7.addr = alloca i32, align 4
178 %z8.addr = alloca i32, align 4
179 store i32 %z1, i32* %z1.addr, align 4
180 store i32 %z2, i32* %z2.addr, align 4
181 store i32 %z3, i32* %z3.addr, align 4
182 store i32 %z4, i32* %z4.addr, align 4
183 store i32 %z5, i32* %z5.addr, align 4
184 store i32 %z6, i32* %z6.addr, align 4
185 store i32 %z7, i32* %z7.addr, align 4
186 store i32 %z8, i32* %z8.addr, align 4
187 %a = getelementptr inbounds %struct.t1* %v1, i32 0, i32 0
188 %0 = load i8* %a, align 1
189 %conv = zext i8 %0 to i32
190 %a1 = getelementptr inbounds %struct.t2* %v2, i32 0, i32 0
191 %1 = load i16* %a1, align 1
192 %conv2 = sext i16 %1 to i32
193 %add = add nsw i32 %conv, %conv2
194 %a3 = getelementptr inbounds %struct.t3* %v3, i32 0, i32 0
195 %2 = load i16* %a3, align 1
196 %conv4 = sext i16 %2 to i32
197 %add5 = add nsw i32 %add, %conv4
198 %a6 = getelementptr inbounds %struct.t4* %v4, i32 0, i32 0
199 %3 = load i32* %a6, align 1
200 %add7 = add nsw i32 %add5, %3
201 %a8 = getelementptr inbounds %struct.t5* %v5, i32 0, i32 0
202 %4 = load i32* %a8, align 1
203 %add9 = add nsw i32 %add7, %4
204 %a10 = getelementptr inbounds %struct.t6* %v6, i32 0, i32 0
205 %5 = load i32* %a10, align 1
206 %add11 = add nsw i32 %add9, %5
207 %a12 = getelementptr inbounds %struct.t7* %v7, i32 0, i32 0
208 %6 = load i32* %a12, align 1
209 %add13 = add nsw i32 %add11, %6
210 ret i32 %add13
211
212; CHECK: lbz {{[0-9]+}}, 149(1)
213; CHECK: lbz {{[0-9]+}}, 150(1)
214; CHECK: lbz {{[0-9]+}}, 147(1)
215; CHECK: lbz {{[0-9]+}}, 148(1)
216; CHECK: lbz {{[0-9]+}}, 133(1)
217; CHECK: lbz {{[0-9]+}}, 134(1)
218; CHECK: lha {{[0-9]+}}, 126(1)
219; CHECK: lbz {{[0-9]+}}, 119(1)
220; CHECK: lwz {{[0-9]+}}, 140(1)
221; CHECK: lhz {{[0-9]+}}, 154(1)
222; CHECK: lhz {{[0-9]+}}, 156(1)
223; CHECK: lbz {{[0-9]+}}, 163(1)
224; CHECK: lbz {{[0-9]+}}, 164(1)
225; CHECK: lbz {{[0-9]+}}, 161(1)
226; CHECK: lbz {{[0-9]+}}, 162(1)
227}