blob: 56a7d32850569dab9c05b9e9ebdf97b15e37ea14 [file] [log] [blame]
Adam Nemet5117f5d2014-03-07 23:56:28 +00001; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=generic | FileCheck %s
2
3; Check that we recognize this idiom for rotation too:
Adam Nemet42030392014-03-08 21:51:55 +00004; a << (b & (OpSize-1)) | a >> ((0 - b) & (OpSize-1))
Adam Nemet5117f5d2014-03-07 23:56:28 +00005
6define i32 @rotate_left_32(i32 %a, i32 %b) {
7; CHECK-LABEL: rotate_left_32:
Adam Nemetd4e56072014-03-12 21:20:55 +00008; CHECK-NOT: and
Adam Nemet5117f5d2014-03-07 23:56:28 +00009; CHECK: roll
10entry:
11 %and = and i32 %b, 31
12 %shl = shl i32 %a, %and
13 %0 = sub i32 0, %b
14 %and3 = and i32 %0, 31
15 %shr = lshr i32 %a, %and3
16 %or = or i32 %shl, %shr
17 ret i32 %or
18}
19
20define i32 @rotate_right_32(i32 %a, i32 %b) {
21; CHECK-LABEL: rotate_right_32:
Adam Nemetd4e56072014-03-12 21:20:55 +000022; CHECK-NOT: and
Adam Nemet5117f5d2014-03-07 23:56:28 +000023; CHECK: rorl
24entry:
25 %and = and i32 %b, 31
26 %shl = lshr i32 %a, %and
27 %0 = sub i32 0, %b
28 %and3 = and i32 %0, 31
29 %shr = shl i32 %a, %and3
30 %or = or i32 %shl, %shr
31 ret i32 %or
32}
33
34define i64 @rotate_left_64(i64 %a, i64 %b) {
35; CHECK-LABEL: rotate_left_64:
Adam Nemetd4e56072014-03-12 21:20:55 +000036; CHECK-NOT: and
Adam Nemet5117f5d2014-03-07 23:56:28 +000037; CHECK: rolq
38entry:
39 %and = and i64 %b, 63
40 %shl = shl i64 %a, %and
41 %0 = sub i64 0, %b
42 %and3 = and i64 %0, 63
43 %shr = lshr i64 %a, %and3
44 %or = or i64 %shl, %shr
45 ret i64 %or
46}
47
48define i64 @rotate_right_64(i64 %a, i64 %b) {
49; CHECK-LABEL: rotate_right_64:
Adam Nemetd4e56072014-03-12 21:20:55 +000050; CHECK-NOT: and
Adam Nemet5117f5d2014-03-07 23:56:28 +000051; CHECK: rorq
52entry:
53 %and = and i64 %b, 63
54 %shl = lshr i64 %a, %and
55 %0 = sub i64 0, %b
56 %and3 = and i64 %0, 63
57 %shr = shl i64 %a, %and3
58 %or = or i64 %shl, %shr
59 ret i64 %or
60}
61
62; Also check mem operand.
63
64define void @rotate_left_m32(i32 *%pa, i32 %b) {
65; CHECK-LABEL: rotate_left_m32:
Adam Nemetd4e56072014-03-12 21:20:55 +000066; CHECK-NOT: and
Adam Nemet5117f5d2014-03-07 23:56:28 +000067; CHECK: roll
68; no store:
69; CHECK-NOT: mov
70entry:
David Blaikiea79ac142015-02-27 21:17:42 +000071 %a = load i32, i32* %pa, align 16
Adam Nemet5117f5d2014-03-07 23:56:28 +000072 %and = and i32 %b, 31
73 %shl = shl i32 %a, %and
74 %0 = sub i32 0, %b
75 %and3 = and i32 %0, 31
76 %shr = lshr i32 %a, %and3
77 %or = or i32 %shl, %shr
78 store i32 %or, i32* %pa, align 32
79 ret void
80}
81
82define void @rotate_right_m32(i32 *%pa, i32 %b) {
83; CHECK-LABEL: rotate_right_m32:
Adam Nemetd4e56072014-03-12 21:20:55 +000084; CHECK-NOT: and
Adam Nemet5117f5d2014-03-07 23:56:28 +000085; CHECK: rorl
86; no store:
87; CHECK-NOT: mov
88entry:
David Blaikiea79ac142015-02-27 21:17:42 +000089 %a = load i32, i32* %pa, align 16
Adam Nemet5117f5d2014-03-07 23:56:28 +000090 %and = and i32 %b, 31
91 %shl = lshr i32 %a, %and
92 %0 = sub i32 0, %b
93 %and3 = and i32 %0, 31
94 %shr = shl i32 %a, %and3
95 %or = or i32 %shl, %shr
96 store i32 %or, i32* %pa, align 32
97 ret void
98}
99
100define void @rotate_left_m64(i64 *%pa, i64 %b) {
101; CHECK-LABEL: rotate_left_m64:
Adam Nemetd4e56072014-03-12 21:20:55 +0000102; CHECK-NOT: and
Adam Nemet5117f5d2014-03-07 23:56:28 +0000103; CHECK: rolq
104; no store:
105; CHECK-NOT: mov
106entry:
David Blaikiea79ac142015-02-27 21:17:42 +0000107 %a = load i64, i64* %pa, align 16
Adam Nemet5117f5d2014-03-07 23:56:28 +0000108 %and = and i64 %b, 63
109 %shl = shl i64 %a, %and
110 %0 = sub i64 0, %b
111 %and3 = and i64 %0, 63
112 %shr = lshr i64 %a, %and3
113 %or = or i64 %shl, %shr
114 store i64 %or, i64* %pa, align 64
115 ret void
116}
117
118define void @rotate_right_m64(i64 *%pa, i64 %b) {
119; CHECK-LABEL: rotate_right_m64:
Adam Nemetd4e56072014-03-12 21:20:55 +0000120; CHECK-NOT: and
Adam Nemet5117f5d2014-03-07 23:56:28 +0000121; CHECK: rorq
122; no store:
123; CHECK-NOT: mov
124entry:
David Blaikiea79ac142015-02-27 21:17:42 +0000125 %a = load i64, i64* %pa, align 16
Adam Nemet5117f5d2014-03-07 23:56:28 +0000126 %and = and i64 %b, 63
127 %shl = lshr i64 %a, %and
128 %0 = sub i64 0, %b
129 %and3 = and i64 %0, 63
130 %shr = shl i64 %a, %and3
131 %or = or i64 %shl, %shr
132 store i64 %or, i64* %pa, align 64
133 ret void
134}