Scott Michel | 504c369 | 2007-12-17 22:32:34 +0000 | [diff] [blame] | 1 | ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s |
| 2 | ; RUN: grep nand %t1.s | count 90 |
Evan Cheng | 3927f43 | 2009-03-25 20:20:11 +0000 | [diff] [blame^] | 3 | ; RUN: grep and %t1.s | count 94 |
| 4 | ; RUN: grep xsbh %t1.s | count 2 |
| 5 | ; RUN: grep xshw %t1.s | count 4 |
Scott Michel | 9de5d0d | 2008-01-11 02:53:15 +0000 | [diff] [blame] | 6 | target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" |
| 7 | target triple = "spu" |
Scott Michel | 504c369 | 2007-12-17 22:32:34 +0000 | [diff] [blame] | 8 | |
| 9 | define <4 x i32> @nand_v4i32_1(<4 x i32> %arg1, <4 x i32> %arg2) { |
| 10 | %A = and <4 x i32> %arg2, %arg1 ; <<4 x i32>> [#uses=1] |
| 11 | %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 > |
| 12 | ret <4 x i32> %B |
| 13 | } |
| 14 | |
| 15 | define <4 x i32> @nand_v4i32_2(<4 x i32> %arg1, <4 x i32> %arg2) { |
| 16 | %A = and <4 x i32> %arg1, %arg2 ; <<4 x i32>> [#uses=1] |
| 17 | %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 > |
| 18 | ret <4 x i32> %B |
| 19 | } |
| 20 | |
| 21 | define <8 x i16> @nand_v8i16_1(<8 x i16> %arg1, <8 x i16> %arg2) { |
| 22 | %A = and <8 x i16> %arg2, %arg1 ; <<8 x i16>> [#uses=1] |
| 23 | %B = xor <8 x i16> %A, < i16 -1, i16 -1, i16 -1, i16 -1, |
| 24 | i16 -1, i16 -1, i16 -1, i16 -1 > |
| 25 | ret <8 x i16> %B |
| 26 | } |
| 27 | |
| 28 | define <8 x i16> @nand_v8i16_2(<8 x i16> %arg1, <8 x i16> %arg2) { |
| 29 | %A = and <8 x i16> %arg1, %arg2 ; <<8 x i16>> [#uses=1] |
| 30 | %B = xor <8 x i16> %A, < i16 -1, i16 -1, i16 -1, i16 -1, |
| 31 | i16 -1, i16 -1, i16 -1, i16 -1 > |
| 32 | ret <8 x i16> %B |
| 33 | } |
| 34 | |
| 35 | define <16 x i8> @nand_v16i8_1(<16 x i8> %arg1, <16 x i8> %arg2) { |
| 36 | %A = and <16 x i8> %arg2, %arg1 ; <<16 x i8>> [#uses=1] |
| 37 | %B = xor <16 x i8> %A, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 38 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 39 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 40 | ret <16 x i8> %B |
| 41 | } |
| 42 | |
| 43 | define <16 x i8> @nand_v16i8_2(<16 x i8> %arg1, <16 x i8> %arg2) { |
| 44 | %A = and <16 x i8> %arg1, %arg2 ; <<16 x i8>> [#uses=1] |
| 45 | %B = xor <16 x i8> %A, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 46 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 47 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 48 | ret <16 x i8> %B |
| 49 | } |
| 50 | |
| 51 | define i32 @nand_i32_1(i32 %arg1, i32 %arg2) { |
| 52 | %A = and i32 %arg2, %arg1 ; <i32> [#uses=1] |
| 53 | %B = xor i32 %A, -1 ; <i32> [#uses=1] |
| 54 | ret i32 %B |
| 55 | } |
| 56 | |
| 57 | define i32 @nand_i32_2(i32 %arg1, i32 %arg2) { |
| 58 | %A = and i32 %arg1, %arg2 ; <i32> [#uses=1] |
| 59 | %B = xor i32 %A, -1 ; <i32> [#uses=1] |
| 60 | ret i32 %B |
| 61 | } |
| 62 | |
| 63 | define i16 @nand_i16_1(i16 signext %arg1, i16 signext %arg2) signext { |
| 64 | %A = and i16 %arg2, %arg1 ; <i16> [#uses=1] |
| 65 | %B = xor i16 %A, -1 ; <i16> [#uses=1] |
| 66 | ret i16 %B |
| 67 | } |
| 68 | |
| 69 | define i16 @nand_i16_2(i16 signext %arg1, i16 signext %arg2) signext { |
| 70 | %A = and i16 %arg1, %arg2 ; <i16> [#uses=1] |
| 71 | %B = xor i16 %A, -1 ; <i16> [#uses=1] |
| 72 | ret i16 %B |
| 73 | } |
| 74 | |
| 75 | define i16 @nand_i16u_1(i16 zeroext %arg1, i16 zeroext %arg2) zeroext { |
| 76 | %A = and i16 %arg2, %arg1 ; <i16> [#uses=1] |
| 77 | %B = xor i16 %A, -1 ; <i16> [#uses=1] |
| 78 | ret i16 %B |
| 79 | } |
| 80 | |
| 81 | define i16 @nand_i16u_2(i16 zeroext %arg1, i16 zeroext %arg2) zeroext { |
| 82 | %A = and i16 %arg1, %arg2 ; <i16> [#uses=1] |
| 83 | %B = xor i16 %A, -1 ; <i16> [#uses=1] |
| 84 | ret i16 %B |
| 85 | } |
| 86 | |
| 87 | define i8 @nand_i8u_1(i8 zeroext %arg1, i8 zeroext %arg2) zeroext { |
| 88 | %A = and i8 %arg2, %arg1 ; <i8> [#uses=1] |
| 89 | %B = xor i8 %A, -1 ; <i8> [#uses=1] |
| 90 | ret i8 %B |
| 91 | } |
| 92 | |
| 93 | define i8 @nand_i8u_2(i8 zeroext %arg1, i8 zeroext %arg2) zeroext { |
| 94 | %A = and i8 %arg1, %arg2 ; <i8> [#uses=1] |
| 95 | %B = xor i8 %A, -1 ; <i8> [#uses=1] |
| 96 | ret i8 %B |
| 97 | } |
| 98 | |
| 99 | define i8 @nand_i8_1(i8 signext %arg1, i8 signext %arg2) signext { |
| 100 | %A = and i8 %arg2, %arg1 ; <i8> [#uses=1] |
| 101 | %B = xor i8 %A, -1 ; <i8> [#uses=1] |
| 102 | ret i8 %B |
| 103 | } |
| 104 | |
| 105 | define i8 @nand_i8_2(i8 signext %arg1, i8 signext %arg2) signext { |
| 106 | %A = and i8 %arg1, %arg2 ; <i8> [#uses=1] |
| 107 | %B = xor i8 %A, -1 ; <i8> [#uses=1] |
| 108 | ret i8 %B |
| 109 | } |
| 110 | |
| 111 | define i8 @nand_i8_3(i8 %arg1, i8 %arg2) { |
| 112 | %A = and i8 %arg2, %arg1 ; <i8> [#uses=1] |
| 113 | %B = xor i8 %A, -1 ; <i8> [#uses=1] |
| 114 | ret i8 %B |
| 115 | } |
| 116 | |
| 117 | define i8 @nand_i8_4(i8 %arg1, i8 %arg2) { |
| 118 | %A = and i8 %arg1, %arg2 ; <i8> [#uses=1] |
| 119 | %B = xor i8 %A, -1 ; <i8> [#uses=1] |
| 120 | ret i8 %B |
| 121 | } |