blob: 0c416316903432e3e078f57cb5cb7f4751604006 [file] [log] [blame]
Bill Schmidtfe723b92015-04-27 19:57:34 +00001; RUN: llc -O3 -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
2; RUN: llc -O3 -mcpu=pwr8 -disable-ppc-vsx-swap-removal -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck -check-prefix=NOOPTSWAP %s
3
4; This test was generated from the following source:
5;
6; #define N 4096
7; int ca[N] __attribute__((aligned(16)));
8; int cb[N] __attribute__((aligned(16)));
9; int cc[N] __attribute__((aligned(16)));
10; int cd[N] __attribute__((aligned(16)));
11;
12; void foo ()
13; {
14; int i;
15; for (i = 0; i < N; i++) {
16; ca[i] = (cb[i] + cc[i]) * cd[i];
17; }
18; }
19
20@cb = common global [4096 x i32] zeroinitializer, align 16
21@cc = common global [4096 x i32] zeroinitializer, align 16
22@cd = common global [4096 x i32] zeroinitializer, align 16
23@ca = common global [4096 x i32] zeroinitializer, align 16
24
25define void @foo() {
26entry:
27 br label %vector.body
28
29vector.body:
30 %index = phi i64 [ 0, %entry ], [ %index.next.3, %vector.body ]
31 %0 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index
32 %1 = bitcast i32* %0 to <4 x i32>*
33 %wide.load = load <4 x i32>, <4 x i32>* %1, align 16
34 %2 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index
35 %3 = bitcast i32* %2 to <4 x i32>*
36 %wide.load13 = load <4 x i32>, <4 x i32>* %3, align 16
37 %4 = add nsw <4 x i32> %wide.load13, %wide.load
38 %5 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index
39 %6 = bitcast i32* %5 to <4 x i32>*
40 %wide.load14 = load <4 x i32>, <4 x i32>* %6, align 16
41 %7 = mul nsw <4 x i32> %4, %wide.load14
42 %8 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index
43 %9 = bitcast i32* %8 to <4 x i32>*
44 store <4 x i32> %7, <4 x i32>* %9, align 16
45 %index.next = add nuw nsw i64 %index, 4
46 %10 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next
47 %11 = bitcast i32* %10 to <4 x i32>*
48 %wide.load.1 = load <4 x i32>, <4 x i32>* %11, align 16
49 %12 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next
50 %13 = bitcast i32* %12 to <4 x i32>*
51 %wide.load13.1 = load <4 x i32>, <4 x i32>* %13, align 16
52 %14 = add nsw <4 x i32> %wide.load13.1, %wide.load.1
53 %15 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next
54 %16 = bitcast i32* %15 to <4 x i32>*
55 %wide.load14.1 = load <4 x i32>, <4 x i32>* %16, align 16
56 %17 = mul nsw <4 x i32> %14, %wide.load14.1
57 %18 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next
58 %19 = bitcast i32* %18 to <4 x i32>*
59 store <4 x i32> %17, <4 x i32>* %19, align 16
60 %index.next.1 = add nuw nsw i64 %index.next, 4
61 %20 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next.1
62 %21 = bitcast i32* %20 to <4 x i32>*
63 %wide.load.2 = load <4 x i32>, <4 x i32>* %21, align 16
64 %22 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next.1
65 %23 = bitcast i32* %22 to <4 x i32>*
66 %wide.load13.2 = load <4 x i32>, <4 x i32>* %23, align 16
67 %24 = add nsw <4 x i32> %wide.load13.2, %wide.load.2
68 %25 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next.1
69 %26 = bitcast i32* %25 to <4 x i32>*
70 %wide.load14.2 = load <4 x i32>, <4 x i32>* %26, align 16
71 %27 = mul nsw <4 x i32> %24, %wide.load14.2
72 %28 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next.1
73 %29 = bitcast i32* %28 to <4 x i32>*
74 store <4 x i32> %27, <4 x i32>* %29, align 16
75 %index.next.2 = add nuw nsw i64 %index.next.1, 4
76 %30 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next.2
77 %31 = bitcast i32* %30 to <4 x i32>*
78 %wide.load.3 = load <4 x i32>, <4 x i32>* %31, align 16
79 %32 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next.2
80 %33 = bitcast i32* %32 to <4 x i32>*
81 %wide.load13.3 = load <4 x i32>, <4 x i32>* %33, align 16
82 %34 = add nsw <4 x i32> %wide.load13.3, %wide.load.3
83 %35 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next.2
84 %36 = bitcast i32* %35 to <4 x i32>*
85 %wide.load14.3 = load <4 x i32>, <4 x i32>* %36, align 16
86 %37 = mul nsw <4 x i32> %34, %wide.load14.3
87 %38 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next.2
88 %39 = bitcast i32* %38 to <4 x i32>*
89 store <4 x i32> %37, <4 x i32>* %39, align 16
90 %index.next.3 = add nuw nsw i64 %index.next.2, 4
91 %40 = icmp eq i64 %index.next.3, 4096
92 br i1 %40, label %for.end, label %vector.body
93
94for.end:
95 ret void
96}
97
98; CHECK-LABEL: @foo
99; CHECK-NOT: xxpermdi
100; CHECK-NOT: xxswapd
101
102; CHECK: lxvd2x
103; CHECK: lxvd2x
104; CHECK-DAG: lxvd2x
105; CHECK-DAG: vadduwm
106; CHECK: vmuluwm
107; CHECK: stxvd2x
108
109; CHECK: lxvd2x
110; CHECK: lxvd2x
111; CHECK-DAG: lxvd2x
112; CHECK-DAG: vadduwm
113; CHECK: vmuluwm
114; CHECK: stxvd2x
115
116; CHECK: lxvd2x
117; CHECK: lxvd2x
118; CHECK-DAG: lxvd2x
119; CHECK-DAG: vadduwm
120; CHECK: vmuluwm
121; CHECK: stxvd2x
122
123; CHECK: lxvd2x
124; CHECK: lxvd2x
125; CHECK-DAG: lxvd2x
126; CHECK-DAG: vadduwm
127; CHECK: vmuluwm
128; CHECK: stxvd2x
129
130
131; NOOPTSWAP-LABEL: @foo
132
133; NOOPTSWAP: lxvd2x
134; NOOPTSWAP-DAG: lxvd2x
135; NOOPTSWAP-DAG: lxvd2x
136; NOOPTSWAP-DAG: xxswapd
137; NOOPTSWAP-DAG: xxswapd
138; NOOPTSWAP-DAG: xxswapd
139; NOOPTSWAP-DAG: vadduwm
140; NOOPTSWAP: vmuluwm
141; NOOPTSWAP: xxswapd
142; NOOPTSWAP-DAG: xxswapd
143; NOOPTSWAP-DAG: xxswapd
144; NOOPTSWAP-DAG: stxvd2x
145; NOOPTSWAP-DAG: stxvd2x
146; NOOPTSWAP: stxvd2x
147