Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE |
| 3 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX |
| 4 | |
| 5 | declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone |
| 6 | declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone |
| 7 | |
| 8 | declare {<4 x i32>, <4 x i1>} @llvm.ssub.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone |
| 9 | declare {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone |
| 10 | |
| 11 | ; fold (ssub x, 0) -> x |
| 12 | define i32 @combine_ssub_zero(i32 %a0, i32 %a1) { |
| 13 | ; SSE-LABEL: combine_ssub_zero: |
| 14 | ; SSE: # %bb.0: |
| 15 | ; SSE-NEXT: movl %edi, %eax |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 16 | ; SSE-NEXT: retq |
| 17 | ; |
| 18 | ; AVX-LABEL: combine_ssub_zero: |
| 19 | ; AVX: # %bb.0: |
| 20 | ; AVX-NEXT: movl %edi, %eax |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 21 | ; AVX-NEXT: retq |
| 22 | %1 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a0, i32 zeroinitializer) |
| 23 | %2 = extractvalue {i32, i1} %1, 0 |
| 24 | %3 = extractvalue {i32, i1} %1, 1 |
| 25 | %4 = select i1 %3, i32 %a1, i32 %2 |
| 26 | ret i32 %4 |
| 27 | } |
| 28 | |
| 29 | define <4 x i32> @combine_vec_ssub_zero(<4 x i32> %a0, <4 x i32> %a1) { |
| 30 | ; SSE-LABEL: combine_vec_ssub_zero: |
| 31 | ; SSE: # %bb.0: |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 32 | ; SSE-NEXT: retq |
| 33 | ; |
| 34 | ; AVX-LABEL: combine_vec_ssub_zero: |
| 35 | ; AVX: # %bb.0: |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 36 | ; AVX-NEXT: retq |
| 37 | %1 = call {<4 x i32>, <4 x i1>} @llvm.ssub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer) |
| 38 | %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0 |
| 39 | %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1 |
| 40 | %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2 |
| 41 | ret <4 x i32> %4 |
| 42 | } |
| 43 | |
| 44 | ; fold (usub x, 0) -> x |
| 45 | define i32 @combine_usub_zero(i32 %a0, i32 %a1) { |
| 46 | ; SSE-LABEL: combine_usub_zero: |
| 47 | ; SSE: # %bb.0: |
| 48 | ; SSE-NEXT: movl %edi, %eax |
| 49 | ; SSE-NEXT: retq |
| 50 | ; |
| 51 | ; AVX-LABEL: combine_usub_zero: |
| 52 | ; AVX: # %bb.0: |
| 53 | ; AVX-NEXT: movl %edi, %eax |
| 54 | ; AVX-NEXT: retq |
| 55 | %1 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a0, i32 zeroinitializer) |
| 56 | %2 = extractvalue {i32, i1} %1, 0 |
| 57 | %3 = extractvalue {i32, i1} %1, 1 |
| 58 | %4 = select i1 %3, i32 %a1, i32 %2 |
| 59 | ret i32 %4 |
| 60 | } |
| 61 | |
| 62 | define <4 x i32> @combine_vec_usub_zero(<4 x i32> %a0, <4 x i32> %a1) { |
| 63 | ; SSE-LABEL: combine_vec_usub_zero: |
| 64 | ; SSE: # %bb.0: |
| 65 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 66 | ; SSE-NEXT: pminud %xmm0, %xmm0 |
| 67 | ; SSE-NEXT: pcmpeqd %xmm2, %xmm0 |
Simon Pilgrim | 468bb2e | 2019-03-06 10:54:43 +0000 | [diff] [blame] | 68 | ; SSE-NEXT: blendvps %xmm0, %xmm2, %xmm1 |
| 69 | ; SSE-NEXT: movaps %xmm1, %xmm0 |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 70 | ; SSE-NEXT: retq |
| 71 | ; |
| 72 | ; AVX-LABEL: combine_vec_usub_zero: |
| 73 | ; AVX: # %bb.0: |
| 74 | ; AVX-NEXT: vpminud %xmm0, %xmm0, %xmm2 |
| 75 | ; AVX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2 |
Simon Pilgrim | 468bb2e | 2019-03-06 10:54:43 +0000 | [diff] [blame] | 76 | ; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 77 | ; AVX-NEXT: retq |
| 78 | %1 = call {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer) |
| 79 | %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0 |
| 80 | %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1 |
| 81 | %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2 |
| 82 | ret <4 x i32> %4 |
| 83 | } |
| 84 | |
| 85 | ; fold (ssub x, x) -> 0 |
| 86 | define i32 @combine_ssub_self(i32 %a0, i32 %a1) { |
| 87 | ; SSE-LABEL: combine_ssub_self: |
| 88 | ; SSE: # %bb.0: |
Simon Pilgrim | 1bdc2d1 | 2019-03-06 14:22:21 +0000 | [diff] [blame^] | 89 | ; SSE-NEXT: xorl %eax, %eax |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 90 | ; SSE-NEXT: retq |
| 91 | ; |
| 92 | ; AVX-LABEL: combine_ssub_self: |
| 93 | ; AVX: # %bb.0: |
Simon Pilgrim | 1bdc2d1 | 2019-03-06 14:22:21 +0000 | [diff] [blame^] | 94 | ; AVX-NEXT: xorl %eax, %eax |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 95 | ; AVX-NEXT: retq |
| 96 | %1 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a0, i32 %a0) |
| 97 | %2 = extractvalue {i32, i1} %1, 0 |
| 98 | %3 = extractvalue {i32, i1} %1, 1 |
| 99 | %4 = select i1 %3, i32 %a1, i32 %2 |
| 100 | ret i32 %4 |
| 101 | } |
| 102 | |
| 103 | define <4 x i32> @combine_vec_ssub_self(<4 x i32> %a0, <4 x i32> %a1) { |
| 104 | ; SSE-LABEL: combine_vec_ssub_self: |
| 105 | ; SSE: # %bb.0: |
Simon Pilgrim | 1bdc2d1 | 2019-03-06 14:22:21 +0000 | [diff] [blame^] | 106 | ; SSE-NEXT: xorps %xmm0, %xmm0 |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 107 | ; SSE-NEXT: retq |
| 108 | ; |
| 109 | ; AVX-LABEL: combine_vec_ssub_self: |
| 110 | ; AVX: # %bb.0: |
Simon Pilgrim | 1bdc2d1 | 2019-03-06 14:22:21 +0000 | [diff] [blame^] | 111 | ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 112 | ; AVX-NEXT: retq |
| 113 | %1 = call {<4 x i32>, <4 x i1>} @llvm.ssub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a0) |
| 114 | %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0 |
| 115 | %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1 |
| 116 | %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2 |
| 117 | ret <4 x i32> %4 |
| 118 | } |
| 119 | |
| 120 | ; fold (usub x, x) -> x |
| 121 | define i32 @combine_usub_self(i32 %a0, i32 %a1) { |
| 122 | ; SSE-LABEL: combine_usub_self: |
| 123 | ; SSE: # %bb.0: |
| 124 | ; SSE-NEXT: xorl %eax, %eax |
| 125 | ; SSE-NEXT: retq |
| 126 | ; |
| 127 | ; AVX-LABEL: combine_usub_self: |
| 128 | ; AVX: # %bb.0: |
| 129 | ; AVX-NEXT: xorl %eax, %eax |
| 130 | ; AVX-NEXT: retq |
| 131 | %1 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a0, i32 %a0) |
| 132 | %2 = extractvalue {i32, i1} %1, 0 |
| 133 | %3 = extractvalue {i32, i1} %1, 1 |
| 134 | %4 = select i1 %3, i32 %a1, i32 %2 |
| 135 | ret i32 %4 |
| 136 | } |
| 137 | |
| 138 | define <4 x i32> @combine_vec_usub_self(<4 x i32> %a0, <4 x i32> %a1) { |
| 139 | ; SSE-LABEL: combine_vec_usub_self: |
| 140 | ; SSE: # %bb.0: |
| 141 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
| 142 | ; SSE-NEXT: psubd %xmm0, %xmm2 |
| 143 | ; SSE-NEXT: pminud %xmm2, %xmm0 |
| 144 | ; SSE-NEXT: pcmpeqd %xmm2, %xmm0 |
Simon Pilgrim | 468bb2e | 2019-03-06 10:54:43 +0000 | [diff] [blame] | 145 | ; SSE-NEXT: blendvps %xmm0, %xmm2, %xmm1 |
| 146 | ; SSE-NEXT: movaps %xmm1, %xmm0 |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 147 | ; SSE-NEXT: retq |
| 148 | ; |
| 149 | ; AVX-LABEL: combine_vec_usub_self: |
| 150 | ; AVX: # %bb.0: |
| 151 | ; AVX-NEXT: vpsubd %xmm0, %xmm0, %xmm2 |
| 152 | ; AVX-NEXT: vpminud %xmm0, %xmm2, %xmm0 |
| 153 | ; AVX-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0 |
Simon Pilgrim | 468bb2e | 2019-03-06 10:54:43 +0000 | [diff] [blame] | 154 | ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 155 | ; AVX-NEXT: retq |
| 156 | %1 = call {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a0) |
| 157 | %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0 |
| 158 | %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1 |
| 159 | %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2 |
| 160 | ret <4 x i32> %4 |
| 161 | } |
| 162 | |
| 163 | ; fold (usub -1, x) -> (xor x, -1) + no borrow |
| 164 | define i32 @combine_usub_negone(i32 %a0, i32 %a1) { |
| 165 | ; SSE-LABEL: combine_usub_negone: |
| 166 | ; SSE: # %bb.0: |
| 167 | ; SSE-NEXT: movl %edi, %eax |
| 168 | ; SSE-NEXT: notl %eax |
| 169 | ; SSE-NEXT: retq |
| 170 | ; |
| 171 | ; AVX-LABEL: combine_usub_negone: |
| 172 | ; AVX: # %bb.0: |
| 173 | ; AVX-NEXT: movl %edi, %eax |
| 174 | ; AVX-NEXT: notl %eax |
| 175 | ; AVX-NEXT: retq |
| 176 | %1 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 -1, i32 %a0) |
| 177 | %2 = extractvalue {i32, i1} %1, 0 |
| 178 | %3 = extractvalue {i32, i1} %1, 1 |
| 179 | %4 = select i1 %3, i32 %a1, i32 %2 |
| 180 | ret i32 %4 |
| 181 | } |
| 182 | |
| 183 | define <4 x i32> @combine_vec_usub_negone(<4 x i32> %a0, <4 x i32> %a1) { |
| 184 | ; SSE-LABEL: combine_vec_usub_negone: |
| 185 | ; SSE: # %bb.0: |
| 186 | ; SSE-NEXT: movdqa %xmm0, %xmm2 |
Simon Pilgrim | 468bb2e | 2019-03-06 10:54:43 +0000 | [diff] [blame] | 187 | ; SSE-NEXT: pcmpeqd %xmm0, %xmm0 |
| 188 | ; SSE-NEXT: pxor %xmm0, %xmm2 |
| 189 | ; SSE-NEXT: pminud %xmm2, %xmm0 |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 190 | ; SSE-NEXT: pcmpeqd %xmm2, %xmm0 |
Simon Pilgrim | 468bb2e | 2019-03-06 10:54:43 +0000 | [diff] [blame] | 191 | ; SSE-NEXT: blendvps %xmm0, %xmm2, %xmm1 |
| 192 | ; SSE-NEXT: movaps %xmm1, %xmm0 |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 193 | ; SSE-NEXT: retq |
| 194 | ; |
| 195 | ; AVX-LABEL: combine_vec_usub_negone: |
| 196 | ; AVX: # %bb.0: |
| 197 | ; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |
| 198 | ; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0 |
Simon Pilgrim | 468bb2e | 2019-03-06 10:54:43 +0000 | [diff] [blame] | 199 | ; AVX-NEXT: vpminud %xmm2, %xmm0, %xmm2 |
| 200 | ; AVX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2 |
| 201 | ; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 |
Simon Pilgrim | a3d06cc | 2019-03-05 14:52:42 +0000 | [diff] [blame] | 202 | ; AVX-NEXT: retq |
| 203 | %1 = call {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a0) |
| 204 | %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0 |
| 205 | %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1 |
| 206 | %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2 |
| 207 | ret <4 x i32> %4 |
| 208 | } |