Sanjay Patel | 9aad934 | 2017-03-06 15:50:07 +0000 | [diff] [blame^] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s |
| 3 | |
| 4 | ; FIXME: Div/rem by zero is undef. |
| 5 | |
| 6 | define i32 @srem0(i32 %x) { |
| 7 | ; CHECK-LABEL: srem0: |
| 8 | ; CHECK: # BB#0: |
| 9 | ; CHECK-NEXT: xorl %ecx, %ecx |
| 10 | ; CHECK-NEXT: movl %edi, %eax |
| 11 | ; CHECK-NEXT: cltd |
| 12 | ; CHECK-NEXT: idivl %ecx |
| 13 | ; CHECK-NEXT: movl %edx, %eax |
| 14 | ; CHECK-NEXT: retq |
| 15 | %rem = srem i32 %x, 0 |
| 16 | ret i32 %rem |
| 17 | } |
| 18 | |
| 19 | define i32 @urem0(i32 %x) { |
| 20 | ; CHECK-LABEL: urem0: |
| 21 | ; CHECK: # BB#0: |
| 22 | ; CHECK-NEXT: xorl %ecx, %ecx |
| 23 | ; CHECK-NEXT: xorl %edx, %edx |
| 24 | ; CHECK-NEXT: movl %edi, %eax |
| 25 | ; CHECK-NEXT: divl %ecx |
| 26 | ; CHECK-NEXT: movl %edx, %eax |
| 27 | ; CHECK-NEXT: retq |
| 28 | %rem = urem i32 %x, 0 |
| 29 | ret i32 %rem |
| 30 | } |
| 31 | |
| 32 | define i32 @sdiv0(i32 %x) { |
| 33 | ; CHECK-LABEL: sdiv0: |
| 34 | ; CHECK: # BB#0: |
| 35 | ; CHECK-NEXT: xorl %ecx, %ecx |
| 36 | ; CHECK-NEXT: movl %edi, %eax |
| 37 | ; CHECK-NEXT: cltd |
| 38 | ; CHECK-NEXT: idivl %ecx |
| 39 | ; CHECK-NEXT: retq |
| 40 | %div = sdiv i32 %x, 0 |
| 41 | ret i32 %div |
| 42 | } |
| 43 | |
| 44 | define i32 @udiv0(i32 %x) { |
| 45 | ; CHECK-LABEL: udiv0: |
| 46 | ; CHECK: # BB#0: |
| 47 | ; CHECK-NEXT: xorl %ecx, %ecx |
| 48 | ; CHECK-NEXT: xorl %edx, %edx |
| 49 | ; CHECK-NEXT: movl %edi, %eax |
| 50 | ; CHECK-NEXT: divl %ecx |
| 51 | ; CHECK-NEXT: retq |
| 52 | %div = udiv i32 %x, 0 |
| 53 | ret i32 %div |
| 54 | } |
| 55 | |
| 56 | ; FIXME: Div/rem by zero vectors is undef. |
| 57 | |
| 58 | define <4 x i32> @srem_vec0(<4 x i32> %x) { |
| 59 | ; CHECK-LABEL: srem_vec0: |
| 60 | ; CHECK: # BB#0: |
| 61 | ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] |
| 62 | ; CHECK-NEXT: movd %xmm1, %eax |
| 63 | ; CHECK-NEXT: xorl %ecx, %ecx |
| 64 | ; CHECK-NEXT: cltd |
| 65 | ; CHECK-NEXT: idivl %ecx |
| 66 | ; CHECK-NEXT: movd %edx, %xmm1 |
| 67 | ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] |
| 68 | ; CHECK-NEXT: movd %xmm2, %eax |
| 69 | ; CHECK-NEXT: cltd |
| 70 | ; CHECK-NEXT: idivl %ecx |
| 71 | ; CHECK-NEXT: movd %edx, %xmm2 |
| 72 | ; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| 73 | ; CHECK-NEXT: movd %xmm0, %eax |
| 74 | ; CHECK-NEXT: cltd |
| 75 | ; CHECK-NEXT: idivl %ecx |
| 76 | ; CHECK-NEXT: movd %edx, %xmm1 |
| 77 | ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] |
| 78 | ; CHECK-NEXT: movd %xmm0, %eax |
| 79 | ; CHECK-NEXT: cltd |
| 80 | ; CHECK-NEXT: idivl %ecx |
| 81 | ; CHECK-NEXT: movd %edx, %xmm0 |
| 82 | ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| 83 | ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| 84 | ; CHECK-NEXT: movdqa %xmm1, %xmm0 |
| 85 | ; CHECK-NEXT: retq |
| 86 | %rem = srem <4 x i32> %x, zeroinitializer |
| 87 | ret <4 x i32> %rem |
| 88 | } |
| 89 | |
| 90 | define <4 x i32> @urem_vec0(<4 x i32> %x) { |
| 91 | ; CHECK-LABEL: urem_vec0: |
| 92 | ; CHECK: # BB#0: |
| 93 | ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] |
| 94 | ; CHECK-NEXT: movd %xmm1, %eax |
| 95 | ; CHECK-NEXT: xorl %ecx, %ecx |
| 96 | ; CHECK-NEXT: xorl %edx, %edx |
| 97 | ; CHECK-NEXT: divl %ecx |
| 98 | ; CHECK-NEXT: movd %edx, %xmm1 |
| 99 | ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] |
| 100 | ; CHECK-NEXT: movd %xmm2, %eax |
| 101 | ; CHECK-NEXT: xorl %edx, %edx |
| 102 | ; CHECK-NEXT: divl %ecx |
| 103 | ; CHECK-NEXT: movd %edx, %xmm2 |
| 104 | ; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| 105 | ; CHECK-NEXT: movd %xmm0, %eax |
| 106 | ; CHECK-NEXT: xorl %edx, %edx |
| 107 | ; CHECK-NEXT: divl %ecx |
| 108 | ; CHECK-NEXT: movd %edx, %xmm1 |
| 109 | ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] |
| 110 | ; CHECK-NEXT: movd %xmm0, %eax |
| 111 | ; CHECK-NEXT: xorl %edx, %edx |
| 112 | ; CHECK-NEXT: divl %ecx |
| 113 | ; CHECK-NEXT: movd %edx, %xmm0 |
| 114 | ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| 115 | ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| 116 | ; CHECK-NEXT: movdqa %xmm1, %xmm0 |
| 117 | ; CHECK-NEXT: retq |
| 118 | %rem = urem <4 x i32> %x, zeroinitializer |
| 119 | ret <4 x i32> %rem |
| 120 | } |
| 121 | |
| 122 | define <4 x i32> @sdiv_vec0(<4 x i32> %x) { |
| 123 | ; CHECK-LABEL: sdiv_vec0: |
| 124 | ; CHECK: # BB#0: |
| 125 | ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] |
| 126 | ; CHECK-NEXT: movd %xmm1, %eax |
| 127 | ; CHECK-NEXT: xorl %ecx, %ecx |
| 128 | ; CHECK-NEXT: cltd |
| 129 | ; CHECK-NEXT: idivl %ecx |
| 130 | ; CHECK-NEXT: movd %eax, %xmm1 |
| 131 | ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] |
| 132 | ; CHECK-NEXT: movd %xmm2, %eax |
| 133 | ; CHECK-NEXT: cltd |
| 134 | ; CHECK-NEXT: idivl %ecx |
| 135 | ; CHECK-NEXT: movd %eax, %xmm2 |
| 136 | ; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| 137 | ; CHECK-NEXT: movd %xmm0, %eax |
| 138 | ; CHECK-NEXT: cltd |
| 139 | ; CHECK-NEXT: idivl %ecx |
| 140 | ; CHECK-NEXT: movd %eax, %xmm1 |
| 141 | ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] |
| 142 | ; CHECK-NEXT: movd %xmm0, %eax |
| 143 | ; CHECK-NEXT: cltd |
| 144 | ; CHECK-NEXT: idivl %ecx |
| 145 | ; CHECK-NEXT: movd %eax, %xmm0 |
| 146 | ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| 147 | ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| 148 | ; CHECK-NEXT: movdqa %xmm1, %xmm0 |
| 149 | ; CHECK-NEXT: retq |
| 150 | %div = sdiv <4 x i32> %x, zeroinitializer |
| 151 | ret <4 x i32> %div |
| 152 | } |
| 153 | |
| 154 | define <4 x i32> @udiv_vec0(<4 x i32> %x) { |
| 155 | ; CHECK-LABEL: udiv_vec0: |
| 156 | ; CHECK: # BB#0: |
| 157 | ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] |
| 158 | ; CHECK-NEXT: movd %xmm1, %eax |
| 159 | ; CHECK-NEXT: xorl %ecx, %ecx |
| 160 | ; CHECK-NEXT: xorl %edx, %edx |
| 161 | ; CHECK-NEXT: divl %ecx |
| 162 | ; CHECK-NEXT: movd %eax, %xmm1 |
| 163 | ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] |
| 164 | ; CHECK-NEXT: movd %xmm2, %eax |
| 165 | ; CHECK-NEXT: xorl %edx, %edx |
| 166 | ; CHECK-NEXT: divl %ecx |
| 167 | ; CHECK-NEXT: movd %eax, %xmm2 |
| 168 | ; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| 169 | ; CHECK-NEXT: movd %xmm0, %eax |
| 170 | ; CHECK-NEXT: xorl %edx, %edx |
| 171 | ; CHECK-NEXT: divl %ecx |
| 172 | ; CHECK-NEXT: movd %eax, %xmm1 |
| 173 | ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] |
| 174 | ; CHECK-NEXT: movd %xmm0, %eax |
| 175 | ; CHECK-NEXT: xorl %edx, %edx |
| 176 | ; CHECK-NEXT: divl %ecx |
| 177 | ; CHECK-NEXT: movd %eax, %xmm0 |
| 178 | ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] |
| 179 | ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| 180 | ; CHECK-NEXT: movdqa %xmm1, %xmm0 |
| 181 | ; CHECK-NEXT: retq |
| 182 | %div = udiv <4 x i32> %x, zeroinitializer |
| 183 | ret <4 x i32> %div |
| 184 | } |
| 185 | |