blob: 9c7ebeb97dd8d7eb11078ca2f471eaa4b16e0ab9 [file] [log] [blame]
Scott Michel0a92af42007-12-19 20:50:49 +00001; RUN: llvm-as -o - %s | llc -march=cellspu -f -o %t1.s
2; RUN: grep rot %t1.s | count 85
3; RUN: grep roth %t1.s | count 8
4; RUN: grep roti.*5 %t1.s | count 1
5; RUN: grep roti.*27 %t1.s | count 1
6; RUN grep rothi.*5 %t1.s | count 2
7; RUN grep rothi.*11 %t1.s | count 1
8; RUN grep rothi.*,.3 %t1.s | count 1
9; RUN: grep andhi %t1.s | count 4
10; RUN: grep shlhi %t1.s | count 4
Bill Wendling70fcb6b2008-08-31 02:32:12 +000011; XFAIL: *
12
13;; FIXME: ROTR hasn't been implemented in CellSPU! It's marked as a "legal"
14;; operation, but if used, the code generator complains that it can't
15;; be selected.
16
Scott Michel9de5d0d2008-01-11 02:53:15 +000017target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
18target triple = "spu"
Scott Michel0a92af42007-12-19 20:50:49 +000019
20; Vector rotates are not currently supported in gcc or llvm assembly. These are
21; not tested.
22
23; 32-bit rotates:
24define i32 @rotl32_1a(i32 %arg1, i8 %arg2) {
25 %tmp1 = zext i8 %arg2 to i32 ; <i32> [#uses=1]
26 %B = shl i32 %arg1, %tmp1 ; <i32> [#uses=1]
27 %arg22 = sub i8 32, %arg2 ; <i8> [#uses=1]
28 %tmp2 = zext i8 %arg22 to i32 ; <i32> [#uses=1]
29 %C = lshr i32 %arg1, %tmp2 ; <i32> [#uses=1]
30 %D = or i32 %B, %C ; <i32> [#uses=1]
31 ret i32 %D
32}
33
34define i32 @rotl32_1b(i32 %arg1, i16 %arg2) {
35 %tmp1 = zext i16 %arg2 to i32 ; <i32> [#uses=1]
36 %B = shl i32 %arg1, %tmp1 ; <i32> [#uses=1]
37 %arg22 = sub i16 32, %arg2 ; <i8> [#uses=1]
38 %tmp2 = zext i16 %arg22 to i32 ; <i32> [#uses=1]
39 %C = lshr i32 %arg1, %tmp2 ; <i32> [#uses=1]
40 %D = or i32 %B, %C ; <i32> [#uses=1]
41 ret i32 %D
42}
43
44define i32 @rotl32_2(i32 %arg1, i32 %arg2) {
45 %B = shl i32 %arg1, %arg2 ; <i32> [#uses=1]
46 %tmp1 = sub i32 32, %arg2 ; <i32> [#uses=1]
47 %C = lshr i32 %arg1, %tmp1 ; <i32> [#uses=1]
48 %D = or i32 %B, %C ; <i32> [#uses=1]
49 ret i32 %D
50}
51
52define i32 @rotl32_3(i32 %arg1, i32 %arg2) {
53 %tmp1 = sub i32 32, %arg2 ; <i32> [#uses=1]
54 %B = shl i32 %arg1, %arg2 ; <i32> [#uses=1]
55 %C = lshr i32 %arg1, %tmp1 ; <i32> [#uses=1]
56 %D = or i32 %B, %C ; <i32> [#uses=1]
57 ret i32 %D
58}
59
60define i32 @rotl32_4(i32 %arg1, i32 %arg2) {
61 %tmp1 = sub i32 32, %arg2 ; <i32> [#uses=1]
62 %C = lshr i32 %arg1, %tmp1 ; <i32> [#uses=1]
63 %B = shl i32 %arg1, %arg2 ; <i32> [#uses=1]
64 %D = or i32 %B, %C ; <i32> [#uses=1]
65 ret i32 %D
66}
67
68define i32 @rotr32_1(i32 %A, i8 %Amt) {
69 %tmp1 = zext i8 %Amt to i32 ; <i32> [#uses=1]
70 %B = lshr i32 %A, %tmp1 ; <i32> [#uses=1]
71 %Amt2 = sub i8 32, %Amt ; <i8> [#uses=1]
72 %tmp2 = zext i8 %Amt2 to i32 ; <i32> [#uses=1]
73 %C = shl i32 %A, %tmp2 ; <i32> [#uses=1]
74 %D = or i32 %B, %C ; <i32> [#uses=1]
75 ret i32 %D
76}
77
78define i32 @rotr32_2(i32 %A, i8 %Amt) {
79 %Amt2 = sub i8 32, %Amt ; <i8> [#uses=1]
80 %tmp1 = zext i8 %Amt to i32 ; <i32> [#uses=1]
81 %B = lshr i32 %A, %tmp1 ; <i32> [#uses=1]
82 %tmp2 = zext i8 %Amt2 to i32 ; <i32> [#uses=1]
83 %C = shl i32 %A, %tmp2 ; <i32> [#uses=1]
84 %D = or i32 %B, %C ; <i32> [#uses=1]
85 ret i32 %D
86}
87
88; Rotate left with immediate
89define i32 @rotli32(i32 %A) {
90 %B = shl i32 %A, 5 ; <i32> [#uses=1]
91 %C = lshr i32 %A, 27 ; <i32> [#uses=1]
92 %D = or i32 %B, %C ; <i32> [#uses=1]
93 ret i32 %D
94}
95
96; Rotate right with immediate
97define i32 @rotri32(i32 %A) {
98 %B = lshr i32 %A, 5 ; <i32> [#uses=1]
99 %C = shl i32 %A, 27 ; <i32> [#uses=1]
100 %D = or i32 %B, %C ; <i32> [#uses=1]
101 ret i32 %D
102}
103
104; 16-bit rotates:
105define i16 @rotr16_1(i16 %arg1, i8 %arg) {
106 %tmp1 = zext i8 %arg to i16 ; <i16> [#uses=1]
107 %B = lshr i16 %arg1, %tmp1 ; <i16> [#uses=1]
108 %arg2 = sub i8 16, %arg ; <i8> [#uses=1]
109 %tmp2 = zext i8 %arg2 to i16 ; <i16> [#uses=1]
110 %C = shl i16 %arg1, %tmp2 ; <i16> [#uses=1]
111 %D = or i16 %B, %C ; <i16> [#uses=1]
112 ret i16 %D
113}
114
115define i16 @rotr16_2(i16 %arg1, i16 %arg) {
116 %B = lshr i16 %arg1, %arg ; <i16> [#uses=1]
117 %tmp1 = sub i16 16, %arg ; <i16> [#uses=1]
118 %C = shl i16 %arg1, %tmp1 ; <i16> [#uses=1]
119 %D = or i16 %B, %C ; <i16> [#uses=1]
120 ret i16 %D
121}
122
123define i16 @rotli16(i16 %A) {
Scott Michel53dec472008-03-05 23:00:19 +0000124 %B = shl i16 %A, 5 ; <i16> [#uses=1]
125 %C = lshr i16 %A, 11 ; <i16> [#uses=1]
126 %D = or i16 %B, %C ; <i16> [#uses=1]
127 ret i16 %D
Scott Michel0a92af42007-12-19 20:50:49 +0000128}
129
130define i16 @rotri16(i16 %A) {
Scott Michel53dec472008-03-05 23:00:19 +0000131 %B = lshr i16 %A, 5 ; <i16> [#uses=1]
132 %C = shl i16 %A, 11 ; <i16> [#uses=1]
133 %D = or i16 %B, %C ; <i16> [#uses=1]
134 ret i16 %D
Scott Michel0a92af42007-12-19 20:50:49 +0000135}
136
137define i8 @rotl8(i8 %A, i8 %Amt) {
Scott Michel53dec472008-03-05 23:00:19 +0000138 %B = shl i8 %A, %Amt ; <i8> [#uses=1]
139 %Amt2 = sub i8 8, %Amt ; <i8> [#uses=1]
140 %C = lshr i8 %A, %Amt2 ; <i8> [#uses=1]
141 %D = or i8 %B, %C ; <i8> [#uses=1]
142 ret i8 %D
Scott Michel0a92af42007-12-19 20:50:49 +0000143}
144
145define i8 @rotr8(i8 %A, i8 %Amt) {
Scott Michel53dec472008-03-05 23:00:19 +0000146 %B = lshr i8 %A, %Amt ; <i8> [#uses=1]
147 %Amt2 = sub i8 8, %Amt ; <i8> [#uses=1]
148 %C = shl i8 %A, %Amt2 ; <i8> [#uses=1]
149 %D = or i8 %B, %C ; <i8> [#uses=1]
150 ret i8 %D
Scott Michel0a92af42007-12-19 20:50:49 +0000151}
152
153define i8 @rotli8(i8 %A) {
Scott Michel53dec472008-03-05 23:00:19 +0000154 %B = shl i8 %A, 5 ; <i8> [#uses=1]
155 %C = lshr i8 %A, 3 ; <i8> [#uses=1]
156 %D = or i8 %B, %C ; <i8> [#uses=1]
157 ret i8 %D
Scott Michel0a92af42007-12-19 20:50:49 +0000158}
159
160define i8 @rotri8(i8 %A) {
Scott Michel53dec472008-03-05 23:00:19 +0000161 %B = lshr i8 %A, 5 ; <i8> [#uses=1]
162 %C = shl i8 %A, 3 ; <i8> [#uses=1]
163 %D = or i8 %B, %C ; <i8> [#uses=1]
164 ret i8 %D
Scott Michel0a92af42007-12-19 20:50:49 +0000165}