- Restore some i8 functionality in CellSPU
- New test case: nand.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@45130 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/CellSPU/and_ops.ll b/test/CodeGen/CellSPU/and_ops.ll
index 5c88d7e..f23355e 100644
--- a/test/CodeGen/CellSPU/and_ops.ll
+++ b/test/CodeGen/CellSPU/and_ops.ll
@@ -1,9 +1,9 @@
; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s
-; RUN: grep and %t1.s | count 227
+; RUN: grep and %t1.s | count 232
; RUN: grep andc %t1.s | count 85
; RUN: grep andi %t1.s | count 36
-; RUN: grep andhi %t1.s | count 31
-; RUN: grep andbi %t1.s | count 1
+; RUN: grep andhi %t1.s | count 30
+; RUN: grep andbi %t1.s | count 4
; AND instruction generation:
define <4 x i32> @and_v4i32_1(<4 x i32> %arg1, <4 x i32> %arg2) {
@@ -258,13 +258,19 @@
}
define i8 @and_u8(i8 zeroext %in) zeroext {
- ; ANDI generated:
- %tmp37 = and i8 %in, 37 ; <i8> [#uses=1]
+ ; ANDBI generated:
+ %tmp37 = and i8 %in, 37
ret i8 %tmp37
}
-define i8 @and_i8(i8 signext %in) signext {
- ; ANDHI generated
- %tmp38 = and i8 %in, 37 ; <i8> [#uses=1]
+define i8 @and_sext8(i8 signext %in) signext {
+ ; ANDBI generated
+ %tmp38 = and i8 %in, 37
+ ret i8 %tmp38
+}
+
+define i8 @and_i8(i8 %in) {
+ ; ANDBI generated
+ %tmp38 = and i8 %in, 205
ret i8 %tmp38
}
diff --git a/test/CodeGen/CellSPU/nand.ll b/test/CodeGen/CellSPU/nand.ll
new file mode 100644
index 0000000..091f4b2
--- /dev/null
+++ b/test/CodeGen/CellSPU/nand.ll
@@ -0,0 +1,119 @@
+; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s
+; RUN: grep nand %t1.s | count 90
+; RUN: grep and %t1.s | count 94
+; RUN: grep xsbh %t1.s | count 2
+; RUN: grep xshw %t1.s | count 4
+
+define <4 x i32> @nand_v4i32_1(<4 x i32> %arg1, <4 x i32> %arg2) {
+ %A = and <4 x i32> %arg2, %arg1 ; <<4 x i32>> [#uses=1]
+ %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 >
+ ret <4 x i32> %B
+}
+
+define <4 x i32> @nand_v4i32_2(<4 x i32> %arg1, <4 x i32> %arg2) {
+ %A = and <4 x i32> %arg1, %arg2 ; <<4 x i32>> [#uses=1]
+ %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 >
+ ret <4 x i32> %B
+}
+
+define <8 x i16> @nand_v8i16_1(<8 x i16> %arg1, <8 x i16> %arg2) {
+ %A = and <8 x i16> %arg2, %arg1 ; <<8 x i16>> [#uses=1]
+ %B = xor <8 x i16> %A, < i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1 >
+ ret <8 x i16> %B
+}
+
+define <8 x i16> @nand_v8i16_2(<8 x i16> %arg1, <8 x i16> %arg2) {
+ %A = and <8 x i16> %arg1, %arg2 ; <<8 x i16>> [#uses=1]
+ %B = xor <8 x i16> %A, < i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1 >
+ ret <8 x i16> %B
+}
+
+define <16 x i8> @nand_v16i8_1(<16 x i8> %arg1, <16 x i8> %arg2) {
+ %A = and <16 x i8> %arg2, %arg1 ; <<16 x i8>> [#uses=1]
+ %B = xor <16 x i8> %A, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1 >
+ ret <16 x i8> %B
+}
+
+define <16 x i8> @nand_v16i8_2(<16 x i8> %arg1, <16 x i8> %arg2) {
+ %A = and <16 x i8> %arg1, %arg2 ; <<16 x i8>> [#uses=1]
+ %B = xor <16 x i8> %A, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1 >
+ ret <16 x i8> %B
+}
+
+define i32 @nand_i32_1(i32 %arg1, i32 %arg2) {
+ %A = and i32 %arg2, %arg1 ; <i32> [#uses=1]
+ %B = xor i32 %A, -1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @nand_i32_2(i32 %arg1, i32 %arg2) {
+ %A = and i32 %arg1, %arg2 ; <i32> [#uses=1]
+ %B = xor i32 %A, -1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i16 @nand_i16_1(i16 signext %arg1, i16 signext %arg2) signext {
+ %A = and i16 %arg2, %arg1 ; <i16> [#uses=1]
+ %B = xor i16 %A, -1 ; <i16> [#uses=1]
+ ret i16 %B
+}
+
+define i16 @nand_i16_2(i16 signext %arg1, i16 signext %arg2) signext {
+ %A = and i16 %arg1, %arg2 ; <i16> [#uses=1]
+ %B = xor i16 %A, -1 ; <i16> [#uses=1]
+ ret i16 %B
+}
+
+define i16 @nand_i16u_1(i16 zeroext %arg1, i16 zeroext %arg2) zeroext {
+ %A = and i16 %arg2, %arg1 ; <i16> [#uses=1]
+ %B = xor i16 %A, -1 ; <i16> [#uses=1]
+ ret i16 %B
+}
+
+define i16 @nand_i16u_2(i16 zeroext %arg1, i16 zeroext %arg2) zeroext {
+ %A = and i16 %arg1, %arg2 ; <i16> [#uses=1]
+ %B = xor i16 %A, -1 ; <i16> [#uses=1]
+ ret i16 %B
+}
+
+define i8 @nand_i8u_1(i8 zeroext %arg1, i8 zeroext %arg2) zeroext {
+ %A = and i8 %arg2, %arg1 ; <i8> [#uses=1]
+ %B = xor i8 %A, -1 ; <i8> [#uses=1]
+ ret i8 %B
+}
+
+define i8 @nand_i8u_2(i8 zeroext %arg1, i8 zeroext %arg2) zeroext {
+ %A = and i8 %arg1, %arg2 ; <i8> [#uses=1]
+ %B = xor i8 %A, -1 ; <i8> [#uses=1]
+ ret i8 %B
+}
+
+define i8 @nand_i8_1(i8 signext %arg1, i8 signext %arg2) signext {
+ %A = and i8 %arg2, %arg1 ; <i8> [#uses=1]
+ %B = xor i8 %A, -1 ; <i8> [#uses=1]
+ ret i8 %B
+}
+
+define i8 @nand_i8_2(i8 signext %arg1, i8 signext %arg2) signext {
+ %A = and i8 %arg1, %arg2 ; <i8> [#uses=1]
+ %B = xor i8 %A, -1 ; <i8> [#uses=1]
+ ret i8 %B
+}
+
+define i8 @nand_i8_3(i8 %arg1, i8 %arg2) {
+ %A = and i8 %arg2, %arg1 ; <i8> [#uses=1]
+ %B = xor i8 %A, -1 ; <i8> [#uses=1]
+ ret i8 %B
+}
+
+define i8 @nand_i8_4(i8 %arg1, i8 %arg2) {
+ %A = and i8 %arg1, %arg2 ; <i8> [#uses=1]
+ %B = xor i8 %A, -1 ; <i8> [#uses=1]
+ ret i8 %B
+}