Andrew Zhogin | af3d5fe | 2017-07-13 18:57:40 +0000 | [diff] [blame] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | ; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=bdver4 | FileCheck %s |
| 3 | |
| 4 | define <4 x i32> @rot_v4i32_splat(<4 x i32> %x) { |
| 5 | ; CHECK-LABEL: rot_v4i32_splat: |
| 6 | ; CHECK: # BB#0: |
| 7 | ; CHECK-NEXT: vprotd $31, %xmm0, %xmm0 |
| 8 | ; CHECK-NEXT: retq |
| 9 | %1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1> |
| 10 | %2 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31> |
| 11 | %3 = or <4 x i32> %1, %2 |
| 12 | ret <4 x i32> %3 |
| 13 | } |
| 14 | |
| 15 | define <4 x i32> @rot_v4i32_non_splat(<4 x i32> %x) { |
| 16 | ; CHECK-LABEL: rot_v4i32_non_splat: |
| 17 | ; CHECK: # BB#0: |
| 18 | ; CHECK-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm1 |
| 19 | ; CHECK-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 |
| 20 | ; CHECK-NEXT: vpor %xmm0, %xmm1, %xmm0 |
| 21 | ; CHECK-NEXT: retq |
| 22 | %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4> |
| 23 | %2 = shl <4 x i32> %x, <i32 31, i32 30, i32 29, i32 28> |
| 24 | %3 = or <4 x i32> %1, %2 |
| 25 | ret <4 x i32> %3 |
| 26 | } |
| 27 | |
| 28 | define <4 x i32> @rot_v4i32_splat_2masks(<4 x i32> %x) { |
| 29 | ; CHECK-LABEL: rot_v4i32_splat_2masks: |
| 30 | ; CHECK: # BB#0: |
| 31 | ; CHECK-NEXT: vprotd $31, %xmm0, %xmm0 |
| 32 | ; CHECK-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 |
| 33 | ; CHECK-NEXT: retq |
| 34 | %1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1> |
| 35 | %2 = and <4 x i32> %1, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760> |
| 36 | |
| 37 | %3 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31> |
| 38 | %4 = and <4 x i32> %3, <i32 0, i32 4294901760, i32 0, i32 4294901760> |
| 39 | %5 = or <4 x i32> %2, %4 |
| 40 | ret <4 x i32> %5 |
| 41 | } |
| 42 | |
| 43 | define <4 x i32> @rot_v4i32_non_splat_2masks(<4 x i32> %x) { |
| 44 | ; CHECK-LABEL: rot_v4i32_non_splat_2masks: |
| 45 | ; CHECK: # BB#0: |
| 46 | ; CHECK-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm1 |
| 47 | ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| 48 | ; CHECK-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4],xmm1[5],xmm2[6],xmm1[7] |
| 49 | ; CHECK-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 |
| 50 | ; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3],xmm2[4,5,6],xmm0[7] |
| 51 | ; CHECK-NEXT: vpor %xmm0, %xmm1, %xmm0 |
| 52 | ; CHECK-NEXT: retq |
| 53 | %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4> |
| 54 | %2 = and <4 x i32> %1, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760> |
| 55 | |
| 56 | %3 = shl <4 x i32> %x, <i32 31, i32 30, i32 29, i32 28> |
| 57 | %4 = and <4 x i32> %3, <i32 0, i32 4294901760, i32 0, i32 4294901760> |
| 58 | %5 = or <4 x i32> %2, %4 |
| 59 | ret <4 x i32> %5 |
| 60 | } |