Sanjay Patel | c71adc8 | 2018-07-16 22:59:31 +0000 | [diff] [blame] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | ; RUN: llc < %s -mtriple=powerpc64le-- | FileCheck %s |
| 3 | |
| 4 | declare i8 @llvm.fshl.i8(i8, i8, i8) |
| 5 | declare i16 @llvm.fshl.i16(i16, i16, i16) |
| 6 | declare i32 @llvm.fshl.i32(i32, i32, i32) |
| 7 | declare i64 @llvm.fshl.i64(i64, i64, i64) |
| 8 | declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) |
| 9 | |
| 10 | declare i8 @llvm.fshr.i8(i8, i8, i8) |
| 11 | declare i16 @llvm.fshr.i16(i16, i16, i16) |
| 12 | declare i32 @llvm.fshr.i32(i32, i32, i32) |
| 13 | declare i64 @llvm.fshr.i64(i64, i64, i64) |
| 14 | declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) |
| 15 | |
| 16 | ; When first 2 operands match, it's a rotate. |
| 17 | |
| 18 | define i8 @rotl_i8_const_shift(i8 %x) { |
| 19 | ; CHECK-LABEL: rotl_i8_const_shift: |
| 20 | ; CHECK: # %bb.0: |
| 21 | ; CHECK-NEXT: rlwinm 4, 3, 27, 0, 31 |
| 22 | ; CHECK-NEXT: rlwimi 4, 3, 3, 0, 28 |
| 23 | ; CHECK-NEXT: mr 3, 4 |
| 24 | ; CHECK-NEXT: blr |
| 25 | %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 3) |
| 26 | ret i8 %f |
| 27 | } |
| 28 | |
| 29 | define i64 @rotl_i64_const_shift(i64 %x) { |
| 30 | ; CHECK-LABEL: rotl_i64_const_shift: |
| 31 | ; CHECK: # %bb.0: |
| 32 | ; CHECK-NEXT: rotldi 3, 3, 3 |
| 33 | ; CHECK-NEXT: blr |
| 34 | %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 3) |
| 35 | ret i64 %f |
| 36 | } |
| 37 | |
| 38 | ; When first 2 operands match, it's a rotate (by variable amount). |
| 39 | |
| 40 | define i16 @rotl_i16(i16 %x, i16 %z) { |
| 41 | ; CHECK-LABEL: rotl_i16: |
| 42 | ; CHECK: # %bb.0: |
Sanjay Patel | 8aac22e | 2018-08-01 17:17:08 +0000 | [diff] [blame] | 43 | ; CHECK-NEXT: neg 5, 4 |
Sanjay Patel | c71adc8 | 2018-07-16 22:59:31 +0000 | [diff] [blame] | 44 | ; CHECK-NEXT: clrlwi 6, 3, 16 |
| 45 | ; CHECK-NEXT: rlwinm 4, 4, 0, 28, 31 |
| 46 | ; CHECK-NEXT: clrlwi 5, 5, 28 |
| 47 | ; CHECK-NEXT: slw 3, 3, 4 |
| 48 | ; CHECK-NEXT: srw 4, 6, 5 |
| 49 | ; CHECK-NEXT: or 3, 3, 4 |
| 50 | ; CHECK-NEXT: blr |
| 51 | %f = call i16 @llvm.fshl.i16(i16 %x, i16 %x, i16 %z) |
| 52 | ret i16 %f |
| 53 | } |
| 54 | |
| 55 | define i32 @rotl_i32(i32 %x, i32 %z) { |
| 56 | ; CHECK-LABEL: rotl_i32: |
| 57 | ; CHECK: # %bb.0: |
Sanjay Patel | c71adc8 | 2018-07-16 22:59:31 +0000 | [diff] [blame] | 58 | ; CHECK-NEXT: rlwnm 3, 3, 4, 0, 31 |
| 59 | ; CHECK-NEXT: blr |
| 60 | %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %z) |
| 61 | ret i32 %f |
| 62 | } |
| 63 | |
Sanjay Patel | f94c4c8 | 2018-07-25 21:25:50 +0000 | [diff] [blame] | 64 | define i64 @rotl_i64(i64 %x, i64 %z) { |
| 65 | ; CHECK-LABEL: rotl_i64: |
| 66 | ; CHECK: # %bb.0: |
Sanjay Patel | 215dcbf | 2018-07-25 21:38:30 +0000 | [diff] [blame] | 67 | ; CHECK-NEXT: rldcl 3, 3, 4, 0 |
Sanjay Patel | f94c4c8 | 2018-07-25 21:25:50 +0000 | [diff] [blame] | 68 | ; CHECK-NEXT: blr |
| 69 | %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 %z) |
| 70 | ret i64 %f |
| 71 | } |
| 72 | |
Sanjay Patel | c71adc8 | 2018-07-16 22:59:31 +0000 | [diff] [blame] | 73 | ; Vector rotate. |
| 74 | |
| 75 | define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) { |
| 76 | ; CHECK-LABEL: rotl_v4i32: |
| 77 | ; CHECK: # %bb.0: |
Sanjay Patel | 8aac22e | 2018-08-01 17:17:08 +0000 | [diff] [blame] | 78 | ; CHECK-NEXT: xxlxor 36, 36, 36 |
| 79 | ; CHECK-NEXT: vslw 5, 2, 3 |
| 80 | ; CHECK-NEXT: vsubuwm 3, 4, 3 |
| 81 | ; CHECK-NEXT: vsrw 2, 2, 3 |
| 82 | ; CHECK-NEXT: xxlor 34, 37, 34 |
Sanjay Patel | c71adc8 | 2018-07-16 22:59:31 +0000 | [diff] [blame] | 83 | ; CHECK-NEXT: blr |
| 84 | %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z) |
| 85 | ret <4 x i32> %f |
| 86 | } |
| 87 | |
| 88 | ; Vector rotate by constant splat amount. |
| 89 | |
| 90 | define <4 x i32> @rotl_v4i32_const_shift(<4 x i32> %x) { |
| 91 | ; CHECK-LABEL: rotl_v4i32_const_shift: |
| 92 | ; CHECK: # %bb.0: |
| 93 | ; CHECK-NEXT: vspltisw 3, -16 |
| 94 | ; CHECK-NEXT: vspltisw 4, 13 |
| 95 | ; CHECK-NEXT: vspltisw 5, 3 |
| 96 | ; CHECK-NEXT: vsubuwm 3, 4, 3 |
| 97 | ; CHECK-NEXT: vslw 4, 2, 5 |
| 98 | ; CHECK-NEXT: vsrw 2, 2, 3 |
| 99 | ; CHECK-NEXT: xxlor 34, 36, 34 |
| 100 | ; CHECK-NEXT: blr |
| 101 | %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>) |
| 102 | ret <4 x i32> %f |
| 103 | } |
| 104 | |
| 105 | ; Repeat everything for funnel shift right. |
| 106 | |
| 107 | define i8 @rotr_i8_const_shift(i8 %x) { |
| 108 | ; CHECK-LABEL: rotr_i8_const_shift: |
| 109 | ; CHECK: # %bb.0: |
| 110 | ; CHECK-NEXT: rlwinm 4, 3, 29, 0, 31 |
| 111 | ; CHECK-NEXT: rlwimi 4, 3, 5, 0, 26 |
| 112 | ; CHECK-NEXT: mr 3, 4 |
| 113 | ; CHECK-NEXT: blr |
| 114 | %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3) |
| 115 | ret i8 %f |
| 116 | } |
| 117 | |
| 118 | define i32 @rotr_i32_const_shift(i32 %x) { |
| 119 | ; CHECK-LABEL: rotr_i32_const_shift: |
| 120 | ; CHECK: # %bb.0: |
| 121 | ; CHECK-NEXT: rlwinm 3, 3, 29, 0, 31 |
| 122 | ; CHECK-NEXT: blr |
| 123 | %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 3) |
| 124 | ret i32 %f |
| 125 | } |
| 126 | |
| 127 | ; When first 2 operands match, it's a rotate (by variable amount). |
| 128 | |
| 129 | define i16 @rotr_i16(i16 %x, i16 %z) { |
| 130 | ; CHECK-LABEL: rotr_i16: |
| 131 | ; CHECK: # %bb.0: |
Sanjay Patel | 8aac22e | 2018-08-01 17:17:08 +0000 | [diff] [blame] | 132 | ; CHECK-NEXT: neg 5, 4 |
Sanjay Patel | c71adc8 | 2018-07-16 22:59:31 +0000 | [diff] [blame] | 133 | ; CHECK-NEXT: clrlwi 6, 3, 16 |
| 134 | ; CHECK-NEXT: rlwinm 4, 4, 0, 28, 31 |
| 135 | ; CHECK-NEXT: clrlwi 5, 5, 28 |
| 136 | ; CHECK-NEXT: srw 4, 6, 4 |
| 137 | ; CHECK-NEXT: slw 3, 3, 5 |
| 138 | ; CHECK-NEXT: or 3, 3, 4 |
| 139 | ; CHECK-NEXT: blr |
| 140 | %f = call i16 @llvm.fshr.i16(i16 %x, i16 %x, i16 %z) |
| 141 | ret i16 %f |
| 142 | } |
| 143 | |
Sanjay Patel | f94c4c8 | 2018-07-25 21:25:50 +0000 | [diff] [blame] | 144 | define i32 @rotr_i32(i32 %x, i32 %z) { |
| 145 | ; CHECK-LABEL: rotr_i32: |
| 146 | ; CHECK: # %bb.0: |
Sanjay Patel | 8aac22e | 2018-08-01 17:17:08 +0000 | [diff] [blame] | 147 | ; CHECK-NEXT: neg 4, 4 |
Sanjay Patel | f94c4c8 | 2018-07-25 21:25:50 +0000 | [diff] [blame] | 148 | ; CHECK-NEXT: rlwnm 3, 3, 4, 0, 31 |
| 149 | ; CHECK-NEXT: blr |
| 150 | %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %z) |
| 151 | ret i32 %f |
| 152 | } |
| 153 | |
Sanjay Patel | c71adc8 | 2018-07-16 22:59:31 +0000 | [diff] [blame] | 154 | define i64 @rotr_i64(i64 %x, i64 %z) { |
| 155 | ; CHECK-LABEL: rotr_i64: |
| 156 | ; CHECK: # %bb.0: |
Sanjay Patel | 8aac22e | 2018-08-01 17:17:08 +0000 | [diff] [blame] | 157 | ; CHECK-NEXT: neg 4, 4 |
Sanjay Patel | 15d1501 | 2018-08-09 17:26:22 +0000 | [diff] [blame] | 158 | ; CHECK-NEXT: rldcl 3, 3, 4, 0 |
Sanjay Patel | c71adc8 | 2018-07-16 22:59:31 +0000 | [diff] [blame] | 159 | ; CHECK-NEXT: blr |
| 160 | %f = call i64 @llvm.fshr.i64(i64 %x, i64 %x, i64 %z) |
| 161 | ret i64 %f |
| 162 | } |
| 163 | |
| 164 | ; Vector rotate. |
| 165 | |
| 166 | define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) { |
| 167 | ; CHECK-LABEL: rotr_v4i32: |
| 168 | ; CHECK: # %bb.0: |
Sanjay Patel | 8aac22e | 2018-08-01 17:17:08 +0000 | [diff] [blame] | 169 | ; CHECK-NEXT: xxlxor 36, 36, 36 |
| 170 | ; CHECK-NEXT: vsrw 5, 2, 3 |
| 171 | ; CHECK-NEXT: vsubuwm 3, 4, 3 |
| 172 | ; CHECK-NEXT: vslw 2, 2, 3 |
| 173 | ; CHECK-NEXT: xxlor 34, 34, 37 |
Sanjay Patel | c71adc8 | 2018-07-16 22:59:31 +0000 | [diff] [blame] | 174 | ; CHECK-NEXT: blr |
| 175 | %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z) |
| 176 | ret <4 x i32> %f |
| 177 | } |
| 178 | |
| 179 | ; Vector rotate by constant splat amount. |
| 180 | |
| 181 | define <4 x i32> @rotr_v4i32_const_shift(<4 x i32> %x) { |
| 182 | ; CHECK-LABEL: rotr_v4i32_const_shift: |
| 183 | ; CHECK: # %bb.0: |
| 184 | ; CHECK-NEXT: vspltisw 3, -16 |
| 185 | ; CHECK-NEXT: vspltisw 4, 13 |
| 186 | ; CHECK-NEXT: vspltisw 5, 3 |
| 187 | ; CHECK-NEXT: vsubuwm 3, 4, 3 |
| 188 | ; CHECK-NEXT: vsrw 4, 2, 5 |
| 189 | ; CHECK-NEXT: vslw 2, 2, 3 |
| 190 | ; CHECK-NEXT: xxlor 34, 34, 36 |
| 191 | ; CHECK-NEXT: blr |
| 192 | %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>) |
| 193 | ret <4 x i32> %f |
| 194 | } |
| 195 | |
| 196 | define i32 @rotl_i32_shift_by_bitwidth(i32 %x) { |
| 197 | ; CHECK-LABEL: rotl_i32_shift_by_bitwidth: |
| 198 | ; CHECK: # %bb.0: |
| 199 | ; CHECK-NEXT: blr |
| 200 | %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 32) |
| 201 | ret i32 %f |
| 202 | } |
| 203 | |
| 204 | define i32 @rotr_i32_shift_by_bitwidth(i32 %x) { |
| 205 | ; CHECK-LABEL: rotr_i32_shift_by_bitwidth: |
| 206 | ; CHECK: # %bb.0: |
| 207 | ; CHECK-NEXT: blr |
| 208 | %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 32) |
| 209 | ret i32 %f |
| 210 | } |
| 211 | |
| 212 | define <4 x i32> @rotl_v4i32_shift_by_bitwidth(<4 x i32> %x) { |
| 213 | ; CHECK-LABEL: rotl_v4i32_shift_by_bitwidth: |
| 214 | ; CHECK: # %bb.0: |
| 215 | ; CHECK-NEXT: blr |
| 216 | %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 32, i32 32, i32 32, i32 32>) |
| 217 | ret <4 x i32> %f |
| 218 | } |
| 219 | |
| 220 | define <4 x i32> @rotr_v4i32_shift_by_bitwidth(<4 x i32> %x) { |
| 221 | ; CHECK-LABEL: rotr_v4i32_shift_by_bitwidth: |
| 222 | ; CHECK: # %bb.0: |
| 223 | ; CHECK-NEXT: blr |
| 224 | %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 32, i32 32, i32 32, i32 32>) |
| 225 | ret <4 x i32> %f |
| 226 | } |
| 227 | |