| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 1 | ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=generic | FileCheck %s | 
|  | 2 |  | 
|  | 3 | ; Check that we recognize this idiom for rotation too: | 
| Adam Nemet | 4203039 | 2014-03-08 21:51:55 +0000 | [diff] [blame] | 4 | ;    a << (b & (OpSize-1)) | a >> ((0 - b) & (OpSize-1)) | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 5 |  | 
|  | 6 | define i32 @rotate_left_32(i32 %a, i32 %b) { | 
|  | 7 | ; CHECK-LABEL: rotate_left_32: | 
| Adam Nemet | d4e5607 | 2014-03-12 21:20:55 +0000 | [diff] [blame] | 8 | ; CHECK-NOT: and | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 9 | ; CHECK: roll | 
|  | 10 | entry: | 
|  | 11 | %and = and i32 %b, 31 | 
|  | 12 | %shl = shl i32 %a, %and | 
|  | 13 | %0 = sub i32 0, %b | 
|  | 14 | %and3 = and i32 %0, 31 | 
|  | 15 | %shr = lshr i32 %a, %and3 | 
|  | 16 | %or = or i32 %shl, %shr | 
|  | 17 | ret i32 %or | 
|  | 18 | } | 
|  | 19 |  | 
|  | 20 | define i32 @rotate_right_32(i32 %a, i32 %b) { | 
|  | 21 | ; CHECK-LABEL: rotate_right_32: | 
| Adam Nemet | d4e5607 | 2014-03-12 21:20:55 +0000 | [diff] [blame] | 22 | ; CHECK-NOT: and | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 23 | ; CHECK: rorl | 
|  | 24 | entry: | 
|  | 25 | %and = and i32 %b, 31 | 
|  | 26 | %shl = lshr i32 %a, %and | 
|  | 27 | %0 = sub i32 0, %b | 
|  | 28 | %and3 = and i32 %0, 31 | 
|  | 29 | %shr = shl i32 %a, %and3 | 
|  | 30 | %or = or i32 %shl, %shr | 
|  | 31 | ret i32 %or | 
|  | 32 | } | 
|  | 33 |  | 
|  | 34 | define i64 @rotate_left_64(i64 %a, i64 %b) { | 
|  | 35 | ; CHECK-LABEL: rotate_left_64: | 
| Adam Nemet | d4e5607 | 2014-03-12 21:20:55 +0000 | [diff] [blame] | 36 | ; CHECK-NOT: and | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 37 | ; CHECK: rolq | 
|  | 38 | entry: | 
|  | 39 | %and = and i64 %b, 63 | 
|  | 40 | %shl = shl i64 %a, %and | 
|  | 41 | %0 = sub i64 0, %b | 
|  | 42 | %and3 = and i64 %0, 63 | 
|  | 43 | %shr = lshr i64 %a, %and3 | 
|  | 44 | %or = or i64 %shl, %shr | 
|  | 45 | ret i64 %or | 
|  | 46 | } | 
|  | 47 |  | 
|  | 48 | define i64 @rotate_right_64(i64 %a, i64 %b) { | 
|  | 49 | ; CHECK-LABEL: rotate_right_64: | 
| Adam Nemet | d4e5607 | 2014-03-12 21:20:55 +0000 | [diff] [blame] | 50 | ; CHECK-NOT: and | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 51 | ; CHECK: rorq | 
|  | 52 | entry: | 
|  | 53 | %and = and i64 %b, 63 | 
|  | 54 | %shl = lshr i64 %a, %and | 
|  | 55 | %0 = sub i64 0, %b | 
|  | 56 | %and3 = and i64 %0, 63 | 
|  | 57 | %shr = shl i64 %a, %and3 | 
|  | 58 | %or = or i64 %shl, %shr | 
|  | 59 | ret i64 %or | 
|  | 60 | } | 
|  | 61 |  | 
|  | 62 | ; Also check mem operand. | 
|  | 63 |  | 
|  | 64 | define void @rotate_left_m32(i32 *%pa, i32 %b) { | 
|  | 65 | ; CHECK-LABEL: rotate_left_m32: | 
| Adam Nemet | d4e5607 | 2014-03-12 21:20:55 +0000 | [diff] [blame] | 66 | ; CHECK-NOT: and | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 67 | ; CHECK: roll | 
|  | 68 | ; no store: | 
|  | 69 | ; CHECK-NOT: mov | 
|  | 70 | entry: | 
| David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 71 | %a = load i32, i32* %pa, align 16 | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 72 | %and = and i32 %b, 31 | 
|  | 73 | %shl = shl i32 %a, %and | 
|  | 74 | %0 = sub i32 0, %b | 
|  | 75 | %and3 = and i32 %0, 31 | 
|  | 76 | %shr = lshr i32 %a, %and3 | 
|  | 77 | %or = or i32 %shl, %shr | 
|  | 78 | store i32 %or, i32* %pa, align 32 | 
|  | 79 | ret void | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | define void @rotate_right_m32(i32 *%pa, i32 %b) { | 
|  | 83 | ; CHECK-LABEL: rotate_right_m32: | 
| Adam Nemet | d4e5607 | 2014-03-12 21:20:55 +0000 | [diff] [blame] | 84 | ; CHECK-NOT: and | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 85 | ; CHECK: rorl | 
|  | 86 | ; no store: | 
|  | 87 | ; CHECK-NOT: mov | 
|  | 88 | entry: | 
| David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 89 | %a = load i32, i32* %pa, align 16 | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 90 | %and = and i32 %b, 31 | 
|  | 91 | %shl = lshr i32 %a, %and | 
|  | 92 | %0 = sub i32 0, %b | 
|  | 93 | %and3 = and i32 %0, 31 | 
|  | 94 | %shr = shl i32 %a, %and3 | 
|  | 95 | %or = or i32 %shl, %shr | 
|  | 96 | store i32 %or, i32* %pa, align 32 | 
|  | 97 | ret void | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | define void @rotate_left_m64(i64 *%pa, i64 %b) { | 
|  | 101 | ; CHECK-LABEL: rotate_left_m64: | 
| Adam Nemet | d4e5607 | 2014-03-12 21:20:55 +0000 | [diff] [blame] | 102 | ; CHECK-NOT: and | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 103 | ; CHECK: rolq | 
|  | 104 | ; no store: | 
|  | 105 | ; CHECK-NOT: mov | 
|  | 106 | entry: | 
| David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 107 | %a = load i64, i64* %pa, align 16 | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 108 | %and = and i64 %b, 63 | 
|  | 109 | %shl = shl i64 %a, %and | 
|  | 110 | %0 = sub i64 0, %b | 
|  | 111 | %and3 = and i64 %0, 63 | 
|  | 112 | %shr = lshr i64 %a, %and3 | 
|  | 113 | %or = or i64 %shl, %shr | 
|  | 114 | store i64 %or, i64* %pa, align 64 | 
|  | 115 | ret void | 
|  | 116 | } | 
|  | 117 |  | 
|  | 118 | define void @rotate_right_m64(i64 *%pa, i64 %b) { | 
|  | 119 | ; CHECK-LABEL: rotate_right_m64: | 
| Adam Nemet | d4e5607 | 2014-03-12 21:20:55 +0000 | [diff] [blame] | 120 | ; CHECK-NOT: and | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 121 | ; CHECK: rorq | 
|  | 122 | ; no store: | 
|  | 123 | ; CHECK-NOT: mov | 
|  | 124 | entry: | 
| David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 125 | %a = load i64, i64* %pa, align 16 | 
| Adam Nemet | 5117f5d | 2014-03-07 23:56:28 +0000 | [diff] [blame] | 126 | %and = and i64 %b, 63 | 
|  | 127 | %shl = lshr i64 %a, %and | 
|  | 128 | %0 = sub i64 0, %b | 
|  | 129 | %and3 = and i64 %0, 63 | 
|  | 130 | %shr = shl i64 %a, %and3 | 
|  | 131 | %or = or i64 %shl, %shr | 
|  | 132 | store i64 %or, i64* %pa, align 64 | 
|  | 133 | ret void | 
|  | 134 | } |