blob: 6f28581d59e61b32010dc95a024c54b7e9b74aa3 [file] [log] [blame]
Roman Lebedevbaf26282018-09-11 15:34:26 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
3
4; Tests BuildUREMEqFold for 4 x i32 splat vectors with odd divisor.
5; See urem-seteq.ll for justification behind constants emitted.
6define <4 x i32> @test_urem_odd_vec_i32(<4 x i32> %X) nounwind readnone {
7; CHECK-LABEL: test_urem_odd_vec_i32:
8; CHECK: // %bb.0:
9; CHECK-NEXT: mov w8, #52429
10; CHECK-NEXT: movk w8, #52428, lsl #16
11; CHECK-NEXT: dup v2.4s, w8
12; CHECK-NEXT: umull2 v3.2d, v0.4s, v2.4s
13; CHECK-NEXT: umull v2.2d, v0.2s, v2.2s
14; CHECK-NEXT: uzp2 v2.4s, v2.4s, v3.4s
15; CHECK-NEXT: movi v1.4s, #5
16; CHECK-NEXT: ushr v2.4s, v2.4s, #2
17; CHECK-NEXT: mls v0.4s, v2.4s, v1.4s
18; CHECK-NEXT: cmeq v0.4s, v0.4s, #0
19; CHECK-NEXT: movi v1.4s, #1
20; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
21; CHECK-NEXT: ret
22 %urem = urem <4 x i32> %X, <i32 5, i32 5, i32 5, i32 5>
23 %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 0, i32 0, i32 0>
24 %ret = zext <4 x i1> %cmp to <4 x i32>
25 ret <4 x i32> %ret
26}
27
Simon Pilgrim135413d2019-02-03 19:38:15 +000028; Like test_urem_odd_vec_i32, but with 8 x i16 vectors.
29define <8 x i16> @test_urem_odd_vec_i16(<8 x i16> %X) nounwind readnone {
Roman Lebedevbaf26282018-09-11 15:34:26 +000030; CHECK-LABEL: test_urem_odd_vec_i16:
31; CHECK: // %bb.0:
Simon Pilgrim135413d2019-02-03 19:38:15 +000032; CHECK-NEXT: mov w8, #52429
33; CHECK-NEXT: dup v2.8h, w8
34; CHECK-NEXT: umull2 v3.4s, v0.8h, v2.8h
35; CHECK-NEXT: umull v2.4s, v0.4h, v2.4h
36; CHECK-NEXT: uzp2 v2.8h, v2.8h, v3.8h
37; CHECK-NEXT: movi v1.8h, #5
38; CHECK-NEXT: ushr v2.8h, v2.8h, #2
39; CHECK-NEXT: mls v0.8h, v2.8h, v1.8h
40; CHECK-NEXT: cmeq v0.8h, v0.8h, #0
41; CHECK-NEXT: movi v1.8h, #1
42; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
Roman Lebedevbaf26282018-09-11 15:34:26 +000043; CHECK-NEXT: ret
Simon Pilgrim135413d2019-02-03 19:38:15 +000044 %urem = urem <8 x i16> %X, <i16 5, i16 5, i16 5, i16 5,
45 i16 5, i16 5, i16 5, i16 5>
46 %cmp = icmp eq <8 x i16> %urem, <i16 0, i16 0, i16 0, i16 0,
47 i16 0, i16 0, i16 0, i16 0>
48 %ret = zext <8 x i1> %cmp to <8 x i16>
49 ret <8 x i16> %ret
Roman Lebedevbaf26282018-09-11 15:34:26 +000050}
51
52; Tests BuildUREMEqFold for 4 x i32 splat vectors with even divisor.
53; The expected behavior is that the fold is _not_ applied
54; because it requires a ROTR in the even case, which has to be expanded.
55define <4 x i32> @test_urem_even_vec_i32(<4 x i32> %X) nounwind readnone {
56; CHECK-LABEL: test_urem_even_vec_i32:
57; CHECK: // %bb.0:
58; CHECK-NEXT: mov w8, #9363
59; CHECK-NEXT: movk w8, #37449, lsl #16
60; CHECK-NEXT: ushr v1.4s, v0.4s, #1
61; CHECK-NEXT: dup v3.4s, w8
62; CHECK-NEXT: umull2 v4.2d, v1.4s, v3.4s
63; CHECK-NEXT: umull v1.2d, v1.2s, v3.2s
64; CHECK-NEXT: uzp2 v1.4s, v1.4s, v4.4s
65; CHECK-NEXT: movi v2.4s, #14
66; CHECK-NEXT: ushr v1.4s, v1.4s, #2
67; CHECK-NEXT: mls v0.4s, v1.4s, v2.4s
68; CHECK-NEXT: cmeq v0.4s, v0.4s, #0
69; CHECK-NEXT: movi v1.4s, #1
70; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
71; CHECK-NEXT: ret
72 %urem = urem <4 x i32> %X, <i32 14, i32 14, i32 14, i32 14>
73 %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 0, i32 0, i32 0>
74 %ret = zext <4 x i1> %cmp to <4 x i32>
75 ret <4 x i32> %ret
76}
77
Simon Pilgrim135413d2019-02-03 19:38:15 +000078; Like test_urem_even_vec_i32, but with 8 x i16 vectors.
Roman Lebedevbaf26282018-09-11 15:34:26 +000079; i16 is not legal for ROTR on AArch64, but ROTR also cannot be promoted to i32,
80; so this would crash if BuildUREMEqFold was applied.
Simon Pilgrim135413d2019-02-03 19:38:15 +000081define <8 x i16> @test_urem_even_vec_i16(<8 x i16> %X) nounwind readnone {
Roman Lebedevbaf26282018-09-11 15:34:26 +000082; CHECK-LABEL: test_urem_even_vec_i16:
83; CHECK: // %bb.0:
Simon Pilgrim135413d2019-02-03 19:38:15 +000084; CHECK-NEXT: mov w8, #18725
85; CHECK-NEXT: ushr v1.8h, v0.8h, #1
86; CHECK-NEXT: dup v3.8h, w8
87; CHECK-NEXT: umull2 v4.4s, v1.8h, v3.8h
88; CHECK-NEXT: umull v1.4s, v1.4h, v3.4h
89; CHECK-NEXT: uzp2 v1.8h, v1.8h, v4.8h
90; CHECK-NEXT: movi v2.8h, #14
91; CHECK-NEXT: ushr v1.8h, v1.8h, #1
92; CHECK-NEXT: mls v0.8h, v1.8h, v2.8h
93; CHECK-NEXT: cmeq v0.8h, v0.8h, #0
94; CHECK-NEXT: movi v1.8h, #1
95; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
Roman Lebedevbaf26282018-09-11 15:34:26 +000096; CHECK-NEXT: ret
Simon Pilgrim135413d2019-02-03 19:38:15 +000097 %urem = urem <8 x i16> %X, <i16 14, i16 14, i16 14, i16 14,
98 i16 14, i16 14, i16 14, i16 14>
99 %cmp = icmp eq <8 x i16> %urem, <i16 0, i16 0, i16 0, i16 0,
100 i16 0, i16 0, i16 0, i16 0>
101 %ret = zext <8 x i1> %cmp to <8 x i16>
102 ret <8 x i16> %ret
Roman Lebedevbaf26282018-09-11 15:34:26 +0000103}
104
105; We should not proceed with this fold if the divisor is 1 or -1
106define <4 x i32> @test_urem_one_vec(<4 x i32> %X) nounwind readnone {
107; CHECK-LABEL: test_urem_one_vec:
108; CHECK: // %bb.0:
109; CHECK-NEXT: movi v0.4s, #1
110; CHECK-NEXT: ret
111 %urem = urem <4 x i32> %X, <i32 1, i32 1, i32 1, i32 1>
112 %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 0, i32 0, i32 0>
113 %ret = zext <4 x i1> %cmp to <4 x i32>
114 ret <4 x i32> %ret
115}
116
117; BuildUREMEqFold does not work when the only odd factor of the divisor is 1.
118; This ensures we don't touch powers of two.
119define <4 x i32> @test_urem_pow2_vec(<4 x i32> %X) nounwind readnone {
120; CHECK-LABEL: test_urem_pow2_vec:
121; CHECK: // %bb.0:
122; CHECK-NEXT: movi v1.4s, #15
123; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
124; CHECK-NEXT: cmeq v0.4s, v0.4s, #0
125; CHECK-NEXT: movi v1.4s, #1
126; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
127; CHECK-NEXT: ret
128 %urem = urem <4 x i32> %X, <i32 16, i32 16, i32 16, i32 16>
129 %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 0, i32 0, i32 0>
130 %ret = zext <4 x i1> %cmp to <4 x i32>
131 ret <4 x i32> %ret
132}