blob: e9be348c66af75d8d133ff93a25f7d3d5c3c524f [file] [log] [blame]
Bill Schmidt419f3762012-09-19 15:42:13 +00001; RUN: llc -O0 -disable-fp-elim < %s | FileCheck %s
Bill Schmidt1bc12b52012-09-19 16:18:23 +00002
3; This test is currently disabled because the checked instructions
4; may appear in random order.
5
6; XFAIL *
7
Bill Schmidt419f3762012-09-19 15:42:13 +00008target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
9target triple = "powerpc64-unknown-linux-gnu"
10
11%struct.s1 = type { i8 }
12%struct.s2 = type { i16 }
13%struct.s4 = type { i32 }
14%struct.t1 = type { i8 }
15%struct.t3 = type <{ i16, i8 }>
16%struct.t5 = type <{ i32, i8 }>
17%struct.t6 = type <{ i32, i16 }>
18%struct.t7 = type <{ i32, i16, i8 }>
19%struct.s3 = type { i16, i8 }
20%struct.s5 = type { i32, i8 }
21%struct.s6 = type { i32, i16 }
22%struct.s7 = type { i32, i16, i8 }
23%struct.t2 = type <{ i16 }>
24%struct.t4 = type <{ i32 }>
25
26@caller1.p1 = private unnamed_addr constant %struct.s1 { i8 1 }, align 1
27@caller1.p2 = private unnamed_addr constant %struct.s2 { i16 2 }, align 2
28@caller1.p3 = private unnamed_addr constant { i16, i8, i8 } { i16 4, i8 8, i8 undef }, align 2
29@caller1.p4 = private unnamed_addr constant %struct.s4 { i32 16 }, align 4
30@caller1.p5 = private unnamed_addr constant { i32, i8, [3 x i8] } { i32 32, i8 64, [3 x i8] undef }, align 4
31@caller1.p6 = private unnamed_addr constant { i32, i16, [2 x i8] } { i32 128, i16 256, [2 x i8] undef }, align 4
32@caller1.p7 = private unnamed_addr constant { i32, i16, i8, i8 } { i32 512, i16 1024, i8 -3, i8 undef }, align 4
33@caller2.p1 = private unnamed_addr constant %struct.t1 { i8 1 }, align 1
34@caller2.p2 = private unnamed_addr constant { i16 } { i16 2 }, align 1
35@caller2.p3 = private unnamed_addr constant %struct.t3 <{ i16 4, i8 8 }>, align 1
36@caller2.p4 = private unnamed_addr constant { i32 } { i32 16 }, align 1
37@caller2.p5 = private unnamed_addr constant %struct.t5 <{ i32 32, i8 64 }>, align 1
38@caller2.p6 = private unnamed_addr constant %struct.t6 <{ i32 128, i16 256 }>, align 1
39@caller2.p7 = private unnamed_addr constant %struct.t7 <{ i32 512, i16 1024, i8 -3 }>, align 1
40
41define i32 @caller1() nounwind {
42entry:
43 %p1 = alloca %struct.s1, align 1
44 %p2 = alloca %struct.s2, align 2
45 %p3 = alloca %struct.s3, align 2
46 %p4 = alloca %struct.s4, align 4
47 %p5 = alloca %struct.s5, align 4
48 %p6 = alloca %struct.s6, align 4
49 %p7 = alloca %struct.s7, align 4
50 %0 = bitcast %struct.s1* %p1 to i8*
51 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false)
52 %1 = bitcast %struct.s2* %p2 to i8*
53 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i32 2, i1 false)
54 %2 = bitcast %struct.s3* %p3 to i8*
55 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i32 2, i1 false)
56 %3 = bitcast %struct.s4* %p4 to i8*
57 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i32 4, i1 false)
58 %4 = bitcast %struct.s5* %p5 to i8*
59 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i32 4, i1 false)
60 %5 = bitcast %struct.s6* %p6 to i8*
61 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i32 4, i1 false)
62 %6 = bitcast %struct.s7* %p7 to i8*
63 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i32 4, i1 false)
64 %call = call i32 @callee1(%struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7)
65 ret i32 %call
66
67; CHECK: ld 9, 128(31)
68; CHECK: ld 8, 136(31)
69; CHECK: ld 7, 144(31)
70; CHECK: lwz 6, 152(31)
71; CHECK: lwz 5, 160(31)
72; CHECK: lhz 4, 168(31)
73; CHECK: lbz 3, 176(31)
74}
75
76declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
77
78define internal i32 @callee1(%struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind {
79entry:
80 %a = getelementptr inbounds %struct.s1* %v1, i32 0, i32 0
81 %0 = load i8* %a, align 1
82 %conv = zext i8 %0 to i32
83 %a1 = getelementptr inbounds %struct.s2* %v2, i32 0, i32 0
84 %1 = load i16* %a1, align 2
85 %conv2 = sext i16 %1 to i32
86 %add = add nsw i32 %conv, %conv2
87 %a3 = getelementptr inbounds %struct.s3* %v3, i32 0, i32 0
88 %2 = load i16* %a3, align 2
89 %conv4 = sext i16 %2 to i32
90 %add5 = add nsw i32 %add, %conv4
91 %a6 = getelementptr inbounds %struct.s4* %v4, i32 0, i32 0
92 %3 = load i32* %a6, align 4
93 %add7 = add nsw i32 %add5, %3
94 %a8 = getelementptr inbounds %struct.s5* %v5, i32 0, i32 0
95 %4 = load i32* %a8, align 4
96 %add9 = add nsw i32 %add7, %4
97 %a10 = getelementptr inbounds %struct.s6* %v6, i32 0, i32 0
98 %5 = load i32* %a10, align 4
99 %add11 = add nsw i32 %add9, %5
100 %a12 = getelementptr inbounds %struct.s7* %v7, i32 0, i32 0
101 %6 = load i32* %a12, align 4
102 %add13 = add nsw i32 %add11, %6
103 ret i32 %add13
104
105; CHECK: std 9, 96(1)
106; CHECK: std 8, 88(1)
107; CHECK: std 7, 80(1)
108; CHECK: stw 6, 72(1)
109; CHECK: stw 5, 64(1)
110; CHECK: sth 4, 58(1)
111; CHECK: stb 3, 51(1)
112; CHECK: lha {{[0-9]+}}, 58(1)
113; CHECK: lbz {{[0-9]+}}, 51(1)
114; CHECK: lha {{[0-9]+}}, 64(1)
115; CHECK: lwz {{[0-9]+}}, 72(1)
116; CHECK: lwz {{[0-9]+}}, 80(1)
117; CHECK: lwz {{[0-9]+}}, 88(1)
118; CHECK: lwz {{[0-9]+}}, 96(1)
119}
120
121define i32 @caller2() nounwind {
122entry:
123 %p1 = alloca %struct.t1, align 1
124 %p2 = alloca %struct.t2, align 1
125 %p3 = alloca %struct.t3, align 1
126 %p4 = alloca %struct.t4, align 1
127 %p5 = alloca %struct.t5, align 1
128 %p6 = alloca %struct.t6, align 1
129 %p7 = alloca %struct.t7, align 1
130 %0 = bitcast %struct.t1* %p1 to i8*
131 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false)
132 %1 = bitcast %struct.t2* %p2 to i8*
133 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i32 1, i1 false)
134 %2 = bitcast %struct.t3* %p3 to i8*
135 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i32 1, i1 false)
136 %3 = bitcast %struct.t4* %p4 to i8*
137 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i32 1, i1 false)
138 %4 = bitcast %struct.t5* %p5 to i8*
139 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i32 1, i1 false)
140 %5 = bitcast %struct.t6* %p6 to i8*
141 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i32 1, i1 false)
142 %6 = bitcast %struct.t7* %p7 to i8*
143 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i32 1, i1 false)
144 %call = call i32 @callee2(%struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7)
145 ret i32 %call
146
147; CHECK: stb {{[0-9]+}}, 71(1)
148; CHECK: sth {{[0-9]+}}, 69(1)
149; CHECK: stb {{[0-9]+}}, 87(1)
150; CHECK: stw {{[0-9]+}}, 83(1)
151; CHECK: sth {{[0-9]+}}, 94(1)
152; CHECK: stw {{[0-9]+}}, 90(1)
153; CHECK: stb {{[0-9]+}}, 103(1)
154; CHECK: sth {{[0-9]+}}, 101(1)
155; CHECK: stw {{[0-9]+}}, 97(1)
156; CHECK: ld 9, 96(1)
157; CHECK: ld 8, 88(1)
158; CHECK: ld 7, 80(1)
159; CHECK: lwz 6, 152(31)
160; CHECK: ld 5, 64(1)
161; CHECK: lhz 4, 168(31)
162; CHECK: lbz 3, 176(31)
163}
164
165define internal i32 @callee2(%struct.t1* byval %v1, %struct.t2* byval %v2, %struct.t3* byval %v3, %struct.t4* byval %v4, %struct.t5* byval %v5, %struct.t6* byval %v6, %struct.t7* byval %v7) nounwind {
166entry:
167 %a = getelementptr inbounds %struct.t1* %v1, i32 0, i32 0
168 %0 = load i8* %a, align 1
169 %conv = zext i8 %0 to i32
170 %a1 = getelementptr inbounds %struct.t2* %v2, i32 0, i32 0
171 %1 = load i16* %a1, align 1
172 %conv2 = sext i16 %1 to i32
173 %add = add nsw i32 %conv, %conv2
174 %a3 = getelementptr inbounds %struct.t3* %v3, i32 0, i32 0
175 %2 = load i16* %a3, align 1
176 %conv4 = sext i16 %2 to i32
177 %add5 = add nsw i32 %add, %conv4
178 %a6 = getelementptr inbounds %struct.t4* %v4, i32 0, i32 0
179 %3 = load i32* %a6, align 1
180 %add7 = add nsw i32 %add5, %3
181 %a8 = getelementptr inbounds %struct.t5* %v5, i32 0, i32 0
182 %4 = load i32* %a8, align 1
183 %add9 = add nsw i32 %add7, %4
184 %a10 = getelementptr inbounds %struct.t6* %v6, i32 0, i32 0
185 %5 = load i32* %a10, align 1
186 %add11 = add nsw i32 %add9, %5
187 %a12 = getelementptr inbounds %struct.t7* %v7, i32 0, i32 0
188 %6 = load i32* %a12, align 1
189 %add13 = add nsw i32 %add11, %6
190 ret i32 %add13
191
192; CHECK: sldi 9, 9, 8
193; CHECK: sldi 8, 8, 16
194; CHECK: sldi 7, 7, 24
195; CHECK: sldi 5, 5, 40
196; CHECK: stw 6, 72(1)
197; CHECK: sth 4, 58(1)
198; CHECK: stb 3, 51(1)
199; CHECK: std 9, 96(1)
200; CHECK: std 8, 88(1)
201; CHECK: std 7, 80(1)
202; CHECK: std 5, 64(1)
203; CHECK: lha {{[0-9]+}}, 58(1)
204; CHECK: lbz {{[0-9]+}}, 51(1)
205; CHECK: lha {{[0-9]+}}, 64(1)
206; CHECK: lwz {{[0-9]+}}, 72(1)
207; CHECK: lwz {{[0-9]+}}, 80(1)
208; CHECK: lwz {{[0-9]+}}, 88(1)
209; CHECK: lwz {{[0-9]+}}, 96(1)
210}