Sanjay Patel | 8f20011 | 2017-04-10 23:26:31 +0000 | [diff] [blame] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | ; RUN: llc < %s -mtriple=arm-eabi -mattr=neon | FileCheck %s |
| 3 | |
| 4 | define i32 @sext_inc(i1 zeroext %x) { |
| 5 | ; CHECK-LABEL: sext_inc: |
| 6 | ; CHECK: @ BB#0: |
| 7 | ; CHECK-NEXT: rsb r0, r0, #1 |
| 8 | ; CHECK-NEXT: mov pc, lr |
| 9 | %ext = sext i1 %x to i32 |
| 10 | %add = add i32 %ext, 1 |
| 11 | ret i32 %add |
| 12 | } |
| 13 | |
| 14 | define <4 x i32> @sext_inc_vec(<4 x i1> %x) { |
| 15 | ; CHECK-LABEL: sext_inc_vec: |
| 16 | ; CHECK: @ BB#0: |
| 17 | ; CHECK-NEXT: vmov d16, r0, r1 |
| 18 | ; CHECK-NEXT: vmov.i32 q9, #0x1f |
| 19 | ; CHECK-NEXT: vmov.i32 q10, #0x1 |
| 20 | ; CHECK-NEXT: vmovl.u16 q8, d16 |
| 21 | ; CHECK-NEXT: vneg.s32 q9, q9 |
| 22 | ; CHECK-NEXT: vshl.i32 q8, q8, #31 |
| 23 | ; CHECK-NEXT: vshl.s32 q8, q8, q9 |
| 24 | ; CHECK-NEXT: vadd.i32 q8, q8, q10 |
| 25 | ; CHECK-NEXT: vmov r0, r1, d16 |
| 26 | ; CHECK-NEXT: vmov r2, r3, d17 |
| 27 | ; CHECK-NEXT: mov pc, lr |
| 28 | %ext = sext <4 x i1> %x to <4 x i32> |
| 29 | %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1> |
| 30 | ret <4 x i32> %add |
| 31 | } |
| 32 | |
Sanjay Patel | 6b01b4f | 2017-04-24 22:42:34 +0000 | [diff] [blame] | 33 | define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { |
| 34 | ; CHECK-LABEL: cmpgt_sext_inc_vec: |
| 35 | ; CHECK: @ BB#0: |
| 36 | ; CHECK-NEXT: mov r12, sp |
| 37 | ; CHECK-NEXT: vmov d19, r2, r3 |
| 38 | ; CHECK-NEXT: vmov.i32 q10, #0x1 |
| 39 | ; CHECK-NEXT: vld1.64 {d16, d17}, [r12] |
| 40 | ; CHECK-NEXT: vmov d18, r0, r1 |
| 41 | ; CHECK-NEXT: vcgt.s32 q8, q9, q8 |
| 42 | ; CHECK-NEXT: vadd.i32 q8, q8, q10 |
| 43 | ; CHECK-NEXT: vmov r0, r1, d16 |
| 44 | ; CHECK-NEXT: vmov r2, r3, d17 |
| 45 | ; CHECK-NEXT: mov pc, lr |
| 46 | %cmp = icmp sgt <4 x i32> %x, %y |
| 47 | %ext = sext <4 x i1> %cmp to <4 x i32> |
| 48 | %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1> |
| 49 | ret <4 x i32> %add |
| 50 | } |
| 51 | |
| 52 | define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { |
| 53 | ; CHECK-LABEL: cmpne_sext_inc_vec: |
| 54 | ; CHECK: @ BB#0: |
| 55 | ; CHECK-NEXT: mov r12, sp |
| 56 | ; CHECK-NEXT: vmov d19, r2, r3 |
| 57 | ; CHECK-NEXT: vld1.64 {d16, d17}, [r12] |
| 58 | ; CHECK-NEXT: vmov d18, r0, r1 |
| 59 | ; CHECK-NEXT: vceq.i32 q8, q9, q8 |
| 60 | ; CHECK-NEXT: vmov.i32 q9, #0x1 |
| 61 | ; CHECK-NEXT: vmvn q8, q8 |
| 62 | ; CHECK-NEXT: vadd.i32 q8, q8, q9 |
| 63 | ; CHECK-NEXT: vmov r0, r1, d16 |
| 64 | ; CHECK-NEXT: vmov r2, r3, d17 |
| 65 | ; CHECK-NEXT: mov pc, lr |
| 66 | %cmp = icmp ne <4 x i32> %x, %y |
| 67 | %ext = sext <4 x i1> %cmp to <4 x i32> |
| 68 | %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1> |
| 69 | ret <4 x i32> %add |
| 70 | } |
| 71 | |