Eli Friedman | c0a717b | 2016-10-18 21:03:40 +0000 | [diff] [blame^] | 1 | ; RUN: llc -mtriple=arm -mattr=+neon %s -o - | FileCheck %s |
| 2 | |
| 3 | ; Check codegen for 64-bit icmp operations, which don't directly map to any |
| 4 | ; instruction. |
| 5 | |
| 6 | define <2 x i64> @vne(<2 x i64>* %A, <2 x i64>* %B) nounwind { |
| 7 | ;CHECK-LABEL: vne: |
| 8 | ;CHECK: vceq.i32 |
| 9 | ;CHECK-NEXT: vrev64.32 |
| 10 | ;CHECK-NEXT: vand |
| 11 | ;CHECK-NEXT: vmvn |
| 12 | ;CHECK-NEXT: vmov |
| 13 | ;CHECK-NEXT: vmov |
| 14 | ;CHECK-NEXT: mov pc, lr |
| 15 | %tmp1 = load <2 x i64>, <2 x i64>* %A |
| 16 | %tmp2 = load <2 x i64>, <2 x i64>* %B |
| 17 | %tmp3 = icmp ne <2 x i64> %tmp1, %tmp2 |
| 18 | %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> |
| 19 | ret <2 x i64> %tmp4 |
| 20 | } |
| 21 | |
| 22 | define <2 x i64> @veq(<2 x i64>* %A, <2 x i64>* %B) nounwind { |
| 23 | ;CHECK-LABEL: veq: |
| 24 | ;CHECK: vceq.i32 |
| 25 | ;CHECK-NEXT: vrev64.32 |
| 26 | ;CHECK-NEXT: vand |
| 27 | ;CHECK-NEXT: vmov |
| 28 | ;CHECK-NEXT: vmov |
| 29 | ;CHECK-NEXT: mov pc, lr |
| 30 | %tmp1 = load <2 x i64>, <2 x i64>* %A |
| 31 | %tmp2 = load <2 x i64>, <2 x i64>* %B |
| 32 | %tmp3 = icmp eq <2 x i64> %tmp1, %tmp2 |
| 33 | %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> |
| 34 | ret <2 x i64> %tmp4 |
| 35 | } |
| 36 | |
| 37 | ; FIXME: We currently generate terrible code for this. |
| 38 | ; (Atop < Btop) | ((ATop == BTop) & (ABottom < BBottom)) |
| 39 | ; would come out to roughly 6 instructions, but we currently |
| 40 | ; scalarize it. |
| 41 | define <2 x i64> @vult(<2 x i64>* %A, <2 x i64>* %B) nounwind { |
| 42 | ;CHECK-LABEL: vult: |
| 43 | ;CHECK: subs |
| 44 | ;CHECK: sbcs |
| 45 | ;CHECK: subs |
| 46 | ;CHECK: sbcs |
| 47 | %tmp1 = load <2 x i64>, <2 x i64>* %A |
| 48 | %tmp2 = load <2 x i64>, <2 x i64>* %B |
| 49 | %tmp3 = icmp ult <2 x i64> %tmp1, %tmp2 |
| 50 | %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> |
| 51 | ret <2 x i64> %tmp4 |
| 52 | } |