blob: 57ac709c5414e8515a91c68b7b5621878aef8f3c [file] [log] [blame]
Dan Gohmanb7c0b242009-09-11 18:36:27 +00001; RUN: llc < %s -march=cellspu > %t1.s
Scott Michel504c3692007-12-17 22:32:34 +00002; RUN: grep nand %t1.s | count 90
Evan Cheng3927f432009-03-25 20:20:11 +00003; RUN: grep and %t1.s | count 94
4; RUN: grep xsbh %t1.s | count 2
5; RUN: grep xshw %t1.s | count 4
Dan Gohman65fd6562011-11-03 21:49:52 +00006
7; CellSPU legalization is over-sensitive to Legalize's traversal order.
8; XFAIL: *
9
Scott Michel9de5d0d2008-01-11 02:53:15 +000010target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
11target triple = "spu"
Scott Michel504c3692007-12-17 22:32:34 +000012
13define <4 x i32> @nand_v4i32_1(<4 x i32> %arg1, <4 x i32> %arg2) {
14 %A = and <4 x i32> %arg2, %arg1 ; <<4 x i32>> [#uses=1]
15 %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 >
16 ret <4 x i32> %B
17}
18
19define <4 x i32> @nand_v4i32_2(<4 x i32> %arg1, <4 x i32> %arg2) {
20 %A = and <4 x i32> %arg1, %arg2 ; <<4 x i32>> [#uses=1]
21 %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 >
22 ret <4 x i32> %B
23}
24
25define <8 x i16> @nand_v8i16_1(<8 x i16> %arg1, <8 x i16> %arg2) {
26 %A = and <8 x i16> %arg2, %arg1 ; <<8 x i16>> [#uses=1]
27 %B = xor <8 x i16> %A, < i16 -1, i16 -1, i16 -1, i16 -1,
28 i16 -1, i16 -1, i16 -1, i16 -1 >
29 ret <8 x i16> %B
30}
31
32define <8 x i16> @nand_v8i16_2(<8 x i16> %arg1, <8 x i16> %arg2) {
33 %A = and <8 x i16> %arg1, %arg2 ; <<8 x i16>> [#uses=1]
34 %B = xor <8 x i16> %A, < i16 -1, i16 -1, i16 -1, i16 -1,
35 i16 -1, i16 -1, i16 -1, i16 -1 >
36 ret <8 x i16> %B
37}
38
39define <16 x i8> @nand_v16i8_1(<16 x i8> %arg1, <16 x i8> %arg2) {
40 %A = and <16 x i8> %arg2, %arg1 ; <<16 x i8>> [#uses=1]
41 %B = xor <16 x i8> %A, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
42 i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
43 i8 -1, i8 -1, i8 -1, i8 -1 >
44 ret <16 x i8> %B
45}
46
47define <16 x i8> @nand_v16i8_2(<16 x i8> %arg1, <16 x i8> %arg2) {
48 %A = and <16 x i8> %arg1, %arg2 ; <<16 x i8>> [#uses=1]
49 %B = xor <16 x i8> %A, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
50 i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
51 i8 -1, i8 -1, i8 -1, i8 -1 >
52 ret <16 x i8> %B
53}
54
55define i32 @nand_i32_1(i32 %arg1, i32 %arg2) {
56 %A = and i32 %arg2, %arg1 ; <i32> [#uses=1]
57 %B = xor i32 %A, -1 ; <i32> [#uses=1]
58 ret i32 %B
59}
60
61define i32 @nand_i32_2(i32 %arg1, i32 %arg2) {
62 %A = and i32 %arg1, %arg2 ; <i32> [#uses=1]
63 %B = xor i32 %A, -1 ; <i32> [#uses=1]
64 ret i32 %B
65}
66
Chris Lattner26b00002011-06-17 03:14:27 +000067define signext i16 @nand_i16_1(i16 signext %arg1, i16 signext %arg2) {
Scott Michel504c3692007-12-17 22:32:34 +000068 %A = and i16 %arg2, %arg1 ; <i16> [#uses=1]
69 %B = xor i16 %A, -1 ; <i16> [#uses=1]
70 ret i16 %B
71}
72
Chris Lattner26b00002011-06-17 03:14:27 +000073define signext i16 @nand_i16_2(i16 signext %arg1, i16 signext %arg2) {
Scott Michel504c3692007-12-17 22:32:34 +000074 %A = and i16 %arg1, %arg2 ; <i16> [#uses=1]
75 %B = xor i16 %A, -1 ; <i16> [#uses=1]
76 ret i16 %B
77}
78
Chris Lattner26b00002011-06-17 03:14:27 +000079define zeroext i16 @nand_i16u_1(i16 zeroext %arg1, i16 zeroext %arg2) {
Scott Michel504c3692007-12-17 22:32:34 +000080 %A = and i16 %arg2, %arg1 ; <i16> [#uses=1]
81 %B = xor i16 %A, -1 ; <i16> [#uses=1]
82 ret i16 %B
83}
84
Chris Lattner26b00002011-06-17 03:14:27 +000085define zeroext i16 @nand_i16u_2(i16 zeroext %arg1, i16 zeroext %arg2) {
Scott Michel504c3692007-12-17 22:32:34 +000086 %A = and i16 %arg1, %arg2 ; <i16> [#uses=1]
87 %B = xor i16 %A, -1 ; <i16> [#uses=1]
88 ret i16 %B
89}
90
Chris Lattner26b00002011-06-17 03:14:27 +000091define zeroext i8 @nand_i8u_1(i8 zeroext %arg1, i8 zeroext %arg2) {
Scott Michel504c3692007-12-17 22:32:34 +000092 %A = and i8 %arg2, %arg1 ; <i8> [#uses=1]
93 %B = xor i8 %A, -1 ; <i8> [#uses=1]
94 ret i8 %B
95}
96
Chris Lattner26b00002011-06-17 03:14:27 +000097define zeroext i8 @nand_i8u_2(i8 zeroext %arg1, i8 zeroext %arg2) {
Scott Michel504c3692007-12-17 22:32:34 +000098 %A = and i8 %arg1, %arg2 ; <i8> [#uses=1]
99 %B = xor i8 %A, -1 ; <i8> [#uses=1]
100 ret i8 %B
101}
102
Chris Lattner26b00002011-06-17 03:14:27 +0000103define signext i8 @nand_i8_1(i8 signext %arg1, i8 signext %arg2) {
Scott Michel504c3692007-12-17 22:32:34 +0000104 %A = and i8 %arg2, %arg1 ; <i8> [#uses=1]
105 %B = xor i8 %A, -1 ; <i8> [#uses=1]
106 ret i8 %B
107}
108
Chris Lattner26b00002011-06-17 03:14:27 +0000109define signext i8 @nand_i8_2(i8 signext %arg1, i8 signext %arg2) {
Scott Michel504c3692007-12-17 22:32:34 +0000110 %A = and i8 %arg1, %arg2 ; <i8> [#uses=1]
111 %B = xor i8 %A, -1 ; <i8> [#uses=1]
112 ret i8 %B
113}
114
115define i8 @nand_i8_3(i8 %arg1, i8 %arg2) {
116 %A = and i8 %arg2, %arg1 ; <i8> [#uses=1]
117 %B = xor i8 %A, -1 ; <i8> [#uses=1]
118 ret i8 %B
119}
120
121define i8 @nand_i8_4(i8 %arg1, i8 %arg2) {
122 %A = and i8 %arg1, %arg2 ; <i8> [#uses=1]
123 %B = xor i8 %A, -1 ; <i8> [#uses=1]
124 ret i8 %B
125}