Scott Michel | 314acc2 | 2007-12-19 20:50:49 +0000 | [diff] [blame] | 1 | ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s |
| 2 | ; RUN: grep selb %t1.s | count 160 |
| 3 | ; RUN: grep and %t1.s | count 2 |
| 4 | ; RUN: grep xsbh %t1.s | count 1 |
| 5 | ; RUN: grep xshw %t1.s | count 2 |
Scott Michel | dbac4cf | 2008-01-11 02:53:15 +0000 | [diff] [blame^] | 6 | target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" |
| 7 | target triple = "spu" |
Scott Michel | 314acc2 | 2007-12-19 20:50:49 +0000 | [diff] [blame] | 8 | |
| 9 | define <16 x i8> @selb_v16i8_1(<16 x i8> %arg1, <16 x i8> %arg2, <16 x i8> %arg3) { |
| 10 | %A = xor <16 x i8> %arg3, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 11 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 12 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 13 | %B = and <16 x i8> %A, %arg1 ; <<16 x i8>> [#uses=1] |
| 14 | %C = and <16 x i8> %arg2, %arg3 ; <<16 x i8>> [#uses=1] |
| 15 | %D = or <16 x i8> %B, %C ; <<16 x i8>> [#uses=1] |
| 16 | ret <16 x i8> %D |
| 17 | } |
| 18 | |
| 19 | define <16 x i8> @selb_v16i8_11(<16 x i8> %arg1, <16 x i8> %arg2, <16 x i8> %arg3) { |
| 20 | %A = xor <16 x i8> %arg3, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 21 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 22 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 23 | %B = and <16 x i8> %arg1, %A ; <<16 x i8>> [#uses=1] |
| 24 | %C = and <16 x i8> %arg3, %arg2 ; <<16 x i8>> [#uses=1] |
| 25 | %D = or <16 x i8> %B, %C ; <<16 x i8>> [#uses=1] |
| 26 | ret <16 x i8> %D |
| 27 | } |
| 28 | |
| 29 | define <16 x i8> @selb_v16i8_12(<16 x i8> %arg1, <16 x i8> %arg2, <16 x i8> %arg3) { |
| 30 | %A = xor <16 x i8> %arg3, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 31 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 32 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 33 | %B = and <16 x i8> %arg1, %A ; <<16 x i8>> [#uses=1] |
| 34 | %C = and <16 x i8> %arg2, %arg3 ; <<16 x i8>> [#uses=1] |
| 35 | %D = or <16 x i8> %B, %C ; <<16 x i8>> [#uses=1] |
| 36 | ret <16 x i8> %D |
| 37 | } |
| 38 | |
| 39 | define <16 x i8> @selb_v16i8_13(<16 x i8> %arg1, <16 x i8> %arg2, <16 x i8> %arg3) { |
| 40 | %A = xor <16 x i8> %arg3, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 41 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 42 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 43 | %B = and <16 x i8> %A, %arg1 ; <<16 x i8>> [#uses=1] |
| 44 | %C = and <16 x i8> %arg2, %arg3 ; <<16 x i8>> [#uses=1] |
| 45 | %D = or <16 x i8> %B, %C ; <<16 x i8>> [#uses=1] |
| 46 | ret <16 x i8> %D |
| 47 | } |
| 48 | |
| 49 | define <16 x i8> @selb_v16i8_2(<16 x i8> %arg1, <16 x i8> %arg2, <16 x i8> %arg3) { |
| 50 | %A = xor <16 x i8> %arg1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 51 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 52 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 53 | %B = and <16 x i8> %A, %arg2 ; <<16 x i8>> [#uses=1] |
| 54 | %C = and <16 x i8> %arg3, %arg1 ; <<16 x i8>> [#uses=1] |
| 55 | %D = or <16 x i8> %B, %C ; <<16 x i8>> [#uses=1] |
| 56 | ret <16 x i8> %D |
| 57 | } |
| 58 | |
| 59 | define <16 x i8> @selb_v16i8_21(<16 x i8> %arg1, <16 x i8> %arg2, <16 x i8> %arg3) { |
| 60 | %A = xor <16 x i8> %arg1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 61 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 62 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 63 | %B = and <16 x i8> %arg2, %A ; <<16 x i8>> [#uses=1] |
| 64 | %C = and <16 x i8> %arg3, %arg1 ; <<16 x i8>> [#uses=1] |
| 65 | %D = or <16 x i8> %B, %C ; <<16 x i8>> [#uses=1] |
| 66 | ret <16 x i8> %D |
| 67 | } |
| 68 | |
| 69 | define <16 x i8> @selb_v16i8_3(<16 x i8> %arg1, <16 x i8> %arg2, <16 x i8> %arg3) { |
| 70 | %A = xor <16 x i8> %arg2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 71 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 72 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 73 | %B = and <16 x i8> %A, %arg1 ; <<16 x i8>> [#uses=1] |
| 74 | %C = and <16 x i8> %arg3, %arg2 ; <<16 x i8>> [#uses=1] |
| 75 | %D = or <16 x i8> %B, %C ; <<16 x i8>> [#uses=1] |
| 76 | ret <16 x i8> %D |
| 77 | } |
| 78 | |
| 79 | define <16 x i8> @selb_v16i8_4(<16 x i8> %arg1, <16 x i8> %arg2, <16 x i8> %arg3) { |
| 80 | %C = and <16 x i8> %arg3, %arg2 ; <<16 x i8>> [#uses=1] |
| 81 | %A = xor <16 x i8> %arg2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 82 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 83 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 84 | %B = and <16 x i8> %A, %arg1 ; <<16 x i8>> [#uses=1] |
| 85 | %D = or <16 x i8> %B, %C ; <<16 x i8>> [#uses=1] |
| 86 | ret <16 x i8> %D |
| 87 | } |
| 88 | |
| 89 | define <16 x i8> @selb_v16i8_41(<16 x i8> %arg1, <16 x i8> %arg2, <16 x i8> %arg3) { |
| 90 | %C = and <16 x i8> %arg2, %arg3 ; <<16 x i8>> [#uses=1] |
| 91 | %A = xor <16 x i8> %arg2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 92 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 93 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 94 | %B = and <16 x i8> %arg1, %A ; <<16 x i8>> [#uses=1] |
| 95 | %D = or <16 x i8> %C, %B ; <<16 x i8>> [#uses=1] |
| 96 | ret <16 x i8> %D |
| 97 | } |
| 98 | |
| 99 | define <16 x i8> @selb_v16i8_42(<16 x i8> %arg1, <16 x i8> %arg2, <16 x i8> %arg3) { |
| 100 | %C = and <16 x i8> %arg2, %arg3 ; <<16 x i8>> [#uses=1] |
| 101 | %A = xor <16 x i8> %arg2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 102 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 103 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 104 | %B = and <16 x i8> %A, %arg1 ; <<16 x i8>> [#uses=1] |
| 105 | %D = or <16 x i8> %C, %B ; <<16 x i8>> [#uses=1] |
| 106 | ret <16 x i8> %D |
| 107 | } |
| 108 | |
| 109 | define <16 x i8> @selb_v16i8_5(<16 x i8> %arg1, <16 x i8> %arg2, <16 x i8> %arg3) { |
| 110 | %C = and <16 x i8> %arg2, %arg1 ; <<16 x i8>> [#uses=1] |
| 111 | %A = xor <16 x i8> %arg1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 112 | i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, |
| 113 | i8 -1, i8 -1, i8 -1, i8 -1 > |
| 114 | %B = and <16 x i8> %A, %arg3 ; <<16 x i8>> [#uses=1] |
| 115 | %D = or <16 x i8> %B, %C ; <<16 x i8>> [#uses=1] |
| 116 | ret <16 x i8> %D |
| 117 | } |
| 118 | |
| 119 | define <8 x i16> @selb_v8i16_1(<8 x i16> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
| 120 | %A = xor <8 x i16> %arg3, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, |
| 121 | i16 -1, i16 -1 > |
| 122 | %B = and <8 x i16> %A, %arg1 ; <<8 x i16>> [#uses=1] |
| 123 | %C = and <8 x i16> %arg2, %arg3 ; <<8 x i16>> [#uses=1] |
| 124 | %D = or <8 x i16> %B, %C ; <<8 x i16>> [#uses=1] |
| 125 | ret <8 x i16> %D |
| 126 | } |
| 127 | |
| 128 | define <8 x i16> @selb_v8i16_11(<8 x i16> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
| 129 | %A = xor <8 x i16> %arg3, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, |
| 130 | i16 -1, i16 -1 > |
| 131 | %B = and <8 x i16> %arg1, %A ; <<8 x i16>> [#uses=1] |
| 132 | %C = and <8 x i16> %arg3, %arg2 ; <<8 x i16>> [#uses=1] |
| 133 | %D = or <8 x i16> %B, %C ; <<8 x i16>> [#uses=1] |
| 134 | ret <8 x i16> %D |
| 135 | } |
| 136 | |
| 137 | define <8 x i16> @selb_v8i16_12(<8 x i16> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
| 138 | %A = xor <8 x i16> %arg3, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, |
| 139 | i16 -1, i16 -1 > |
| 140 | %B = and <8 x i16> %arg1, %A ; <<8 x i16>> [#uses=1] |
| 141 | %C = and <8 x i16> %arg2, %arg3 ; <<8 x i16>> [#uses=1] |
| 142 | %D = or <8 x i16> %B, %C ; <<8 x i16>> [#uses=1] |
| 143 | ret <8 x i16> %D |
| 144 | } |
| 145 | |
| 146 | define <8 x i16> @selb_v8i16_13(<8 x i16> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
| 147 | %A = xor <8 x i16> %arg3, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, |
| 148 | i16 -1, i16 -1 > |
| 149 | %B = and <8 x i16> %A, %arg1 ; <<8 x i16>> [#uses=1] |
| 150 | %C = and <8 x i16> %arg2, %arg3 ; <<8 x i16>> [#uses=1] |
| 151 | %D = or <8 x i16> %B, %C ; <<8 x i16>> [#uses=1] |
| 152 | ret <8 x i16> %D |
| 153 | } |
| 154 | |
| 155 | define <8 x i16> @selb_v8i16_2(<8 x i16> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
| 156 | %A = xor <8 x i16> %arg1, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, |
| 157 | i16 -1, i16 -1 > |
| 158 | %B = and <8 x i16> %A, %arg2 ; <<8 x i16>> [#uses=1] |
| 159 | %C = and <8 x i16> %arg3, %arg1 ; <<8 x i16>> [#uses=1] |
| 160 | %D = or <8 x i16> %B, %C ; <<8 x i16>> [#uses=1] |
| 161 | ret <8 x i16> %D |
| 162 | } |
| 163 | |
| 164 | define <8 x i16> @selb_v8i16_21(<8 x i16> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
| 165 | %A = xor <8 x i16> %arg1, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, |
| 166 | i16 -1, i16 -1 > |
| 167 | %B = and <8 x i16> %arg2, %A ; <<8 x i16>> [#uses=1] |
| 168 | %C = and <8 x i16> %arg3, %arg1 ; <<8 x i16>> [#uses=1] |
| 169 | %D = or <8 x i16> %B, %C ; <<8 x i16>> [#uses=1] |
| 170 | ret <8 x i16> %D |
| 171 | } |
| 172 | |
| 173 | define <8 x i16> @selb_v8i16_3(<8 x i16> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
| 174 | %A = xor <8 x i16> %arg2, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, |
| 175 | i16 -1, i16 -1 > |
| 176 | %B = and <8 x i16> %A, %arg1 ; <<8 x i16>> [#uses=1] |
| 177 | %C = and <8 x i16> %arg3, %arg2 ; <<8 x i16>> [#uses=1] |
| 178 | %D = or <8 x i16> %B, %C ; <<8 x i16>> [#uses=1] |
| 179 | ret <8 x i16> %D |
| 180 | } |
| 181 | |
| 182 | define <8 x i16> @selb_v8i16_4(<8 x i16> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
| 183 | %C = and <8 x i16> %arg3, %arg2 ; <<8 x i16>> [#uses=1] |
| 184 | %A = xor <8 x i16> %arg2, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, |
| 185 | i16 -1, i16 -1 > |
| 186 | %B = and <8 x i16> %A, %arg1 ; <<8 x i16>> [#uses=1] |
| 187 | %D = or <8 x i16> %B, %C ; <<8 x i16>> [#uses=1] |
| 188 | ret <8 x i16> %D |
| 189 | } |
| 190 | |
| 191 | define <8 x i16> @selb_v8i16_41(<8 x i16> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
| 192 | %C = and <8 x i16> %arg2, %arg3 ; <<8 x i16>> [#uses=1] |
| 193 | %A = xor <8 x i16> %arg2, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, |
| 194 | i16 -1, i16 -1 > |
| 195 | %B = and <8 x i16> %arg1, %A ; <<8 x i16>> [#uses=1] |
| 196 | %D = or <8 x i16> %C, %B ; <<8 x i16>> [#uses=1] |
| 197 | ret <8 x i16> %D |
| 198 | } |
| 199 | |
| 200 | define <8 x i16> @selb_v8i16_42(<8 x i16> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
| 201 | %C = and <8 x i16> %arg2, %arg3 ; <<8 x i16>> [#uses=1] |
| 202 | %A = xor <8 x i16> %arg2, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, |
| 203 | i16 -1, i16 -1 > |
| 204 | %B = and <8 x i16> %A, %arg1 ; <<8 x i16>> [#uses=1] |
| 205 | %D = or <8 x i16> %C, %B ; <<8 x i16>> [#uses=1] |
| 206 | ret <8 x i16> %D |
| 207 | } |
| 208 | |
| 209 | define <8 x i16> @selb_v8i16_5(<8 x i16> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
| 210 | %C = and <8 x i16> %arg2, %arg1 ; <<8 x i16>> [#uses=1] |
| 211 | %A = xor <8 x i16> %arg1, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, |
| 212 | i16 -1, i16 -1 > |
| 213 | %B = and <8 x i16> %A, %arg3 ; <<8 x i16>> [#uses=1] |
| 214 | %D = or <8 x i16> %B, %C ; <<8 x i16>> [#uses=1] |
| 215 | ret <8 x i16> %D |
| 216 | } |
| 217 | |
| 218 | define <4 x i32> @selb_v4i32_1(<4 x i32> %arg1, <4 x i32> %arg2, <4 x i32> %arg3) { |
| 219 | %tmpnot = xor <4 x i32> %arg3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1] |
| 220 | %tmp2 = and <4 x i32> %tmpnot, %arg1 ; <<4 x i32>> [#uses=1] |
| 221 | %tmp5 = and <4 x i32> %arg2, %arg3 ; <<4 x i32>> [#uses=1] |
| 222 | %tmp6 = or <4 x i32> %tmp2, %tmp5 ; <<4 x i32>> [#uses=1] |
| 223 | ret <4 x i32> %tmp6 |
| 224 | } |
| 225 | |
| 226 | define <4 x i32> @selb_v4i32_2(<4 x i32> %arg1, <4 x i32> %arg2, <4 x i32> %arg3) { |
| 227 | %tmpnot = xor <4 x i32> %arg3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1] |
| 228 | %tmp2 = and <4 x i32> %tmpnot, %arg1 ; <<4 x i32>> [#uses=1] |
| 229 | %tmp5 = and <4 x i32> %arg2, %arg3 ; <<4 x i32>> [#uses=1] |
| 230 | %tmp6 = or <4 x i32> %tmp2, %tmp5 ; <<4 x i32>> [#uses=1] |
| 231 | ret <4 x i32> %tmp6 |
| 232 | } |
| 233 | |
| 234 | define <4 x i32> @selb_v4i32_3(<4 x i32> %arg1, <4 x i32> %arg2, <4 x i32> %arg3) { |
| 235 | %tmpnot = xor <4 x i32> %arg3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1] |
| 236 | %tmp2 = and <4 x i32> %tmpnot, %arg1 ; <<4 x i32>> [#uses=1] |
| 237 | %tmp5 = and <4 x i32> %arg3, %arg2 ; <<4 x i32>> [#uses=1] |
| 238 | %tmp6 = or <4 x i32> %tmp2, %tmp5 ; <<4 x i32>> [#uses=1] |
| 239 | ret <4 x i32> %tmp6 |
| 240 | } |
| 241 | |
| 242 | define <4 x i32> @selb_v4i32_4(<4 x i32> %arg1, <4 x i32> %arg2, <4 x i32> %arg3) { |
| 243 | %tmp2 = and <4 x i32> %arg3, %arg2 ; <<4 x i32>> [#uses=1] |
| 244 | %tmp3not = xor <4 x i32> %arg3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1] |
| 245 | %tmp5 = and <4 x i32> %tmp3not, %arg1 ; <<4 x i32>> [#uses=1] |
| 246 | %tmp6 = or <4 x i32> %tmp2, %tmp5 ; <<4 x i32>> [#uses=1] |
| 247 | ret <4 x i32> %tmp6 |
| 248 | } |
| 249 | |
| 250 | define <4 x i32> @selb_v4i32_5(<4 x i32> %arg1, <4 x i32> %arg2, <4 x i32> %arg3) { |
| 251 | %tmp2 = and <4 x i32> %arg3, %arg2 ; <<4 x i32>> [#uses=1] |
| 252 | %tmp3not = xor <4 x i32> %arg3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1] |
| 253 | %tmp5 = and <4 x i32> %tmp3not, %arg1 ; <<4 x i32>> [#uses=1] |
| 254 | %tmp6 = or <4 x i32> %tmp2, %tmp5 ; <<4 x i32>> [#uses=1] |
| 255 | ret <4 x i32> %tmp6 |
| 256 | } |
| 257 | |
| 258 | define i32 @selb_i32(i32 %arg1, i32 %arg2, i32 %arg3) { |
| 259 | %tmp1not = xor i32 %arg3, -1 ; <i32> [#uses=1] |
| 260 | %tmp3 = and i32 %tmp1not, %arg1 ; <i32> [#uses=1] |
| 261 | %tmp6 = and i32 %arg3, %arg2 ; <i32> [#uses=1] |
| 262 | %tmp7 = or i32 %tmp3, %tmp6 ; <i32> [#uses=1] |
| 263 | ret i32 %tmp7 |
| 264 | } |
| 265 | |
| 266 | define i16 @selb_i16(i16 signext %arg1, i16 signext %arg2, i16 signext %arg3) signext { |
| 267 | %tmp3 = and i16 %arg3, %arg1 ; <i16> [#uses=1] |
| 268 | %tmp4not = xor i16 %arg3, -1 ; <i16> [#uses=1] |
| 269 | %tmp6 = and i16 %tmp4not, %arg2 ; <i16> [#uses=1] |
| 270 | %retval1011 = or i16 %tmp3, %tmp6 ; <i16> [#uses=1] |
| 271 | ret i16 %retval1011 |
| 272 | } |
| 273 | |
| 274 | define i16 @selb_i16u(i16 zeroext %arg1, i16 zeroext %arg2, i16 zeroext %arg3) zeroext { |
| 275 | %tmp3 = and i16 %arg3, %arg1 ; <i16> [#uses=1] |
| 276 | %tmp4not = xor i16 %arg3, -1 ; <i16> [#uses=1] |
| 277 | %tmp6 = and i16 %tmp4not, %arg2 ; <i16> [#uses=1] |
| 278 | %retval1011 = or i16 %tmp3, %tmp6 ; <i16> [#uses=1] |
| 279 | ret i16 %retval1011 |
| 280 | } |
| 281 | |
| 282 | define i8 @selb_i8u(i8 zeroext %arg1, i8 zeroext %arg2, i8 zeroext %arg3) zeroext { |
| 283 | %tmp3 = and i8 %arg3, %arg1 ; <i8> [#uses=1] |
| 284 | %tmp4not = xor i8 %arg3, -1 ; <i8> [#uses=1] |
| 285 | %tmp6 = and i8 %tmp4not, %arg2 ; <i8> [#uses=1] |
| 286 | %retval1011 = or i8 %tmp3, %tmp6 ; <i8> [#uses=1] |
| 287 | ret i8 %retval1011 |
| 288 | } |
| 289 | |
| 290 | define i8 @selb_i8(i8 signext %arg1, i8 signext %arg2, i8 signext %arg3) signext { |
| 291 | %tmp3 = and i8 %arg3, %arg1 ; <i8> [#uses=1] |
| 292 | %tmp4not = xor i8 %arg3, -1 ; <i8> [#uses=1] |
| 293 | %tmp6 = and i8 %tmp4not, %arg2 ; <i8> [#uses=1] |
| 294 | %retval1011 = or i8 %tmp3, %tmp6 ; <i8> [#uses=1] |
| 295 | ret i8 %retval1011 |
| 296 | } |