blob: b549a9bd935a5a964f62fb3e0ff14e5f079ade54 [file] [log] [blame]
Adam Nemet5117f5d2014-03-07 23:56:28 +00001; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=generic | FileCheck %s
2
3; Check that we recognize this idiom for rotation too:
Adam Nemet42030392014-03-08 21:51:55 +00004; a << (b & (OpSize-1)) | a >> ((0 - b) & (OpSize-1))
Adam Nemet5117f5d2014-03-07 23:56:28 +00005
6define i32 @rotate_left_32(i32 %a, i32 %b) {
7; CHECK-LABEL: rotate_left_32:
8; CHECK: roll
9entry:
10 %and = and i32 %b, 31
11 %shl = shl i32 %a, %and
12 %0 = sub i32 0, %b
13 %and3 = and i32 %0, 31
14 %shr = lshr i32 %a, %and3
15 %or = or i32 %shl, %shr
16 ret i32 %or
17}
18
19define i32 @rotate_right_32(i32 %a, i32 %b) {
20; CHECK-LABEL: rotate_right_32:
21; CHECK: rorl
22entry:
23 %and = and i32 %b, 31
24 %shl = lshr i32 %a, %and
25 %0 = sub i32 0, %b
26 %and3 = and i32 %0, 31
27 %shr = shl i32 %a, %and3
28 %or = or i32 %shl, %shr
29 ret i32 %or
30}
31
32define i64 @rotate_left_64(i64 %a, i64 %b) {
33; CHECK-LABEL: rotate_left_64:
34; CHECK: rolq
35entry:
36 %and = and i64 %b, 63
37 %shl = shl i64 %a, %and
38 %0 = sub i64 0, %b
39 %and3 = and i64 %0, 63
40 %shr = lshr i64 %a, %and3
41 %or = or i64 %shl, %shr
42 ret i64 %or
43}
44
45define i64 @rotate_right_64(i64 %a, i64 %b) {
46; CHECK-LABEL: rotate_right_64:
47; CHECK: rorq
48entry:
49 %and = and i64 %b, 63
50 %shl = lshr i64 %a, %and
51 %0 = sub i64 0, %b
52 %and3 = and i64 %0, 63
53 %shr = shl i64 %a, %and3
54 %or = or i64 %shl, %shr
55 ret i64 %or
56}
57
58; Also check mem operand.
59
60define void @rotate_left_m32(i32 *%pa, i32 %b) {
61; CHECK-LABEL: rotate_left_m32:
62; CHECK: roll
63; no store:
64; CHECK-NOT: mov
65entry:
66 %a = load i32* %pa, align 16
67 %and = and i32 %b, 31
68 %shl = shl i32 %a, %and
69 %0 = sub i32 0, %b
70 %and3 = and i32 %0, 31
71 %shr = lshr i32 %a, %and3
72 %or = or i32 %shl, %shr
73 store i32 %or, i32* %pa, align 32
74 ret void
75}
76
77define void @rotate_right_m32(i32 *%pa, i32 %b) {
78; CHECK-LABEL: rotate_right_m32:
79; CHECK: rorl
80; no store:
81; CHECK-NOT: mov
82entry:
83 %a = load i32* %pa, align 16
84 %and = and i32 %b, 31
85 %shl = lshr i32 %a, %and
86 %0 = sub i32 0, %b
87 %and3 = and i32 %0, 31
88 %shr = shl i32 %a, %and3
89 %or = or i32 %shl, %shr
90 store i32 %or, i32* %pa, align 32
91 ret void
92}
93
94define void @rotate_left_m64(i64 *%pa, i64 %b) {
95; CHECK-LABEL: rotate_left_m64:
96; CHECK: rolq
97; no store:
98; CHECK-NOT: mov
99entry:
100 %a = load i64* %pa, align 16
101 %and = and i64 %b, 63
102 %shl = shl i64 %a, %and
103 %0 = sub i64 0, %b
104 %and3 = and i64 %0, 63
105 %shr = lshr i64 %a, %and3
106 %or = or i64 %shl, %shr
107 store i64 %or, i64* %pa, align 64
108 ret void
109}
110
111define void @rotate_right_m64(i64 *%pa, i64 %b) {
112; CHECK-LABEL: rotate_right_m64:
113; CHECK: rorq
114; no store:
115; CHECK-NOT: mov
116entry:
117 %a = load i64* %pa, align 16
118 %and = and i64 %b, 63
119 %shl = lshr i64 %a, %and
120 %0 = sub i64 0, %b
121 %and3 = and i64 %0, 63
122 %shr = shl i64 %a, %and3
123 %or = or i64 %shl, %shr
124 store i64 %or, i64* %pa, align 64
125 ret void
126}