blob: dc971f535a5cfafb155b2894f6093639bcc7d028 [file] [log] [blame]
Simon Pilgrimd1282222017-07-04 12:33:53 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=XOP
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=AVX512
4
5; fold (rot (rot x, c1), c2) -> rot x, c1+c2
6define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) {
7; XOP-LABEL: combine_vec_rot_rot:
8; XOP: # BB#0:
Andrew Zhogin67a64042017-07-16 23:11:45 +00009; XOP-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
Simon Pilgrimd1282222017-07-04 12:33:53 +000010; XOP-NEXT: retq
11;
12; AVX512-LABEL: combine_vec_rot_rot:
13; AVX512: # BB#0:
14; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
15; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
16; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
17; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
18; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
19; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
20; AVX512-NEXT: retq
21 %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
22 %2 = shl <4 x i32> %x, <i32 31, i32 30, i32 29, i32 28>
23 %3 = or <4 x i32> %1, %2
24 %4 = lshr <4 x i32> %3, <i32 12, i32 13, i32 14, i32 15>
25 %5 = shl <4 x i32> %3, <i32 20, i32 19, i32 18, i32 17>
26 %6 = or <4 x i32> %4, %5
27 ret <4 x i32> %6
28}
29
30define <4 x i32> @combine_vec_rot_rot_splat(<4 x i32> %x) {
31; XOP-LABEL: combine_vec_rot_rot_splat:
32; XOP: # BB#0:
Andrew Zhogin45d19282017-07-05 17:55:42 +000033; XOP-NEXT: vprotd $7, %xmm0, %xmm0
Simon Pilgrimd1282222017-07-04 12:33:53 +000034; XOP-NEXT: retq
35;
36; AVX512-LABEL: combine_vec_rot_rot_splat:
37; AVX512: # BB#0:
38; AVX512-NEXT: vpsrld $3, %xmm0, %xmm1
39; AVX512-NEXT: vpslld $29, %xmm0, %xmm0
40; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
41; AVX512-NEXT: vpsrld $22, %xmm0, %xmm1
42; AVX512-NEXT: vpslld $10, %xmm0, %xmm0
43; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
44; AVX512-NEXT: retq
45 %1 = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
46 %2 = shl <4 x i32> %x, <i32 29, i32 29, i32 29, i32 29>
47 %3 = or <4 x i32> %1, %2
48 %4 = lshr <4 x i32> %3, <i32 22, i32 22, i32 22, i32 22>
49 %5 = shl <4 x i32> %3, <i32 10, i32 10, i32 10, i32 10>
50 %6 = or <4 x i32> %4, %5
51 ret <4 x i32> %6
52}
53
54define <4 x i32> @combine_vec_rot_rot_splat_zero(<4 x i32> %x) {
55; XOP-LABEL: combine_vec_rot_rot_splat_zero:
56; XOP: # BB#0:
Simon Pilgrimd1282222017-07-04 12:33:53 +000057; XOP-NEXT: retq
58;
59; AVX512-LABEL: combine_vec_rot_rot_splat_zero:
60; AVX512: # BB#0:
61; AVX512-NEXT: vpsrld $1, %xmm0, %xmm1
62; AVX512-NEXT: vpslld $31, %xmm0, %xmm0
63; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
64; AVX512-NEXT: vpsrld $31, %xmm0, %xmm1
65; AVX512-NEXT: vpaddd %xmm0, %xmm0, %xmm0
66; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
67; AVX512-NEXT: retq
68 %1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
69 %2 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
70 %3 = or <4 x i32> %1, %2
71 %4 = lshr <4 x i32> %3, <i32 31, i32 31, i32 31, i32 31>
72 %5 = shl <4 x i32> %3, <i32 1, i32 1, i32 1, i32 1>
73 %6 = or <4 x i32> %4, %5
74 ret <4 x i32> %6
75}