Matt Arsenault | 3e332a4 | 2014-06-05 08:00:36 +0000 | [diff] [blame] | 1 | ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s -check-prefix=FUNC |
| 2 | ; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s |
Tom Stellard | 3deddc5 | 2013-05-10 02:09:34 +0000 | [diff] [blame] | 3 | |
| 4 | ; mul24 and mad24 are affected |
Tom Stellard | 3deddc5 | 2013-05-10 02:09:34 +0000 | [diff] [blame] | 5 | |
Matt Arsenault | 869cd07 | 2014-09-03 23:24:35 +0000 | [diff] [blame^] | 6 | ; FUNC-LABEL: @test_mul_v2i32 |
Matt Arsenault | 3e332a4 | 2014-06-05 08:00:36 +0000 | [diff] [blame] | 7 | ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} |
| 8 | ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} |
Aaron Watry | 265eef5 | 2013-06-25 13:55:26 +0000 | [diff] [blame] | 9 | |
Matt Arsenault | 3e332a4 | 2014-06-05 08:00:36 +0000 | [diff] [blame] | 10 | ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}} |
| 11 | ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}} |
Aaron Watry | 265eef5 | 2013-06-25 13:55:26 +0000 | [diff] [blame] | 12 | |
Matt Arsenault | 869cd07 | 2014-09-03 23:24:35 +0000 | [diff] [blame^] | 13 | define void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { |
Aaron Watry | 265eef5 | 2013-06-25 13:55:26 +0000 | [diff] [blame] | 14 | %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1 |
| 15 | %a = load <2 x i32> addrspace(1) * %in |
| 16 | %b = load <2 x i32> addrspace(1) * %b_ptr |
| 17 | %result = mul <2 x i32> %a, %b |
| 18 | store <2 x i32> %result, <2 x i32> addrspace(1)* %out |
| 19 | ret void |
| 20 | } |
| 21 | |
Matt Arsenault | 869cd07 | 2014-09-03 23:24:35 +0000 | [diff] [blame^] | 22 | ; FUNC-LABEL: @v_mul_v4i32 |
Matt Arsenault | 3e332a4 | 2014-06-05 08:00:36 +0000 | [diff] [blame] | 23 | ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} |
| 24 | ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} |
| 25 | ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} |
| 26 | ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} |
Aaron Watry | 265eef5 | 2013-06-25 13:55:26 +0000 | [diff] [blame] | 27 | |
Matt Arsenault | 3e332a4 | 2014-06-05 08:00:36 +0000 | [diff] [blame] | 28 | ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}} |
| 29 | ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}} |
| 30 | ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}} |
| 31 | ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}} |
Aaron Watry | 265eef5 | 2013-06-25 13:55:26 +0000 | [diff] [blame] | 32 | |
Matt Arsenault | 869cd07 | 2014-09-03 23:24:35 +0000 | [diff] [blame^] | 33 | define void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { |
Tom Stellard | 3deddc5 | 2013-05-10 02:09:34 +0000 | [diff] [blame] | 34 | %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1 |
| 35 | %a = load <4 x i32> addrspace(1) * %in |
| 36 | %b = load <4 x i32> addrspace(1) * %b_ptr |
| 37 | %result = mul <4 x i32> %a, %b |
| 38 | store <4 x i32> %result, <4 x i32> addrspace(1)* %out |
| 39 | ret void |
| 40 | } |
Matt Arsenault | b517c81 | 2014-03-27 17:23:31 +0000 | [diff] [blame] | 41 | |
Matt Arsenault | 869cd07 | 2014-09-03 23:24:35 +0000 | [diff] [blame^] | 42 | ; FUNC-LABEL: @s_trunc_i64_mul_to_i32 |
| 43 | ; SI: S_LOAD_DWORD |
| 44 | ; SI: S_LOAD_DWORD |
| 45 | ; SI: S_MUL_I32 |
| 46 | ; SI: BUFFER_STORE_DWORD |
| 47 | define void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) { |
| 48 | %mul = mul i64 %b, %a |
| 49 | %trunc = trunc i64 %mul to i32 |
| 50 | store i32 %trunc, i32 addrspace(1)* %out, align 8 |
| 51 | ret void |
| 52 | } |
| 53 | |
| 54 | ; FUNC-LABEL: @v_trunc_i64_mul_to_i32 |
Matt Arsenault | 3e332a4 | 2014-06-05 08:00:36 +0000 | [diff] [blame] | 55 | ; SI: S_LOAD_DWORD |
| 56 | ; SI: S_LOAD_DWORD |
| 57 | ; SI: V_MUL_LO_I32 |
| 58 | ; SI: BUFFER_STORE_DWORD |
Matt Arsenault | 869cd07 | 2014-09-03 23:24:35 +0000 | [diff] [blame^] | 59 | define void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind { |
| 60 | %a = load i64 addrspace(1)* %aptr, align 8 |
| 61 | %b = load i64 addrspace(1)* %bptr, align 8 |
Matt Arsenault | b517c81 | 2014-03-27 17:23:31 +0000 | [diff] [blame] | 62 | %mul = mul i64 %b, %a |
| 63 | %trunc = trunc i64 %mul to i32 |
| 64 | store i32 %trunc, i32 addrspace(1)* %out, align 8 |
| 65 | ret void |
| 66 | } |
Tom Stellard | a1a5d9a | 2014-04-11 16:12:01 +0000 | [diff] [blame] | 67 | |
| 68 | ; This 64-bit multiply should just use MUL_HI and MUL_LO, since the top |
| 69 | ; 32-bits of both arguments are sign bits. |
| 70 | ; FUNC-LABEL: @mul64_sext_c |
| 71 | ; EG-DAG: MULLO_INT |
| 72 | ; EG-DAG: MULHI_INT |
Matt Arsenault | 869cd07 | 2014-09-03 23:24:35 +0000 | [diff] [blame^] | 73 | ; SI-DAG: S_MUL_I32 |
Tom Stellard | a1a5d9a | 2014-04-11 16:12:01 +0000 | [diff] [blame] | 74 | ; SI-DAG: V_MUL_HI_I32 |
| 75 | define void @mul64_sext_c(i64 addrspace(1)* %out, i32 %in) { |
| 76 | entry: |
| 77 | %0 = sext i32 %in to i64 |
| 78 | %1 = mul i64 %0, 80 |
| 79 | store i64 %1, i64 addrspace(1)* %out |
| 80 | ret void |
| 81 | } |
| 82 | |
Matt Arsenault | 869cd07 | 2014-09-03 23:24:35 +0000 | [diff] [blame^] | 83 | ; FUNC-LABEL: @v_mul64_sext_c: |
| 84 | ; EG-DAG: MULLO_INT |
| 85 | ; EG-DAG: MULHI_INT |
| 86 | ; SI-DAG: V_MUL_LO_I32 |
| 87 | ; SI-DAG: V_MUL_HI_I32 |
| 88 | ; SI: S_ENDPGM |
| 89 | define void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) { |
| 90 | %val = load i32 addrspace(1)* %in, align 4 |
| 91 | %ext = sext i32 %val to i64 |
| 92 | %mul = mul i64 %ext, 80 |
| 93 | store i64 %mul, i64 addrspace(1)* %out, align 8 |
| 94 | ret void |
| 95 | } |
| 96 | |
| 97 | ; FUNC-LABEL: @v_mul64_sext_inline_imm: |
| 98 | ; SI-DAG: V_MUL_LO_I32 v{{[0-9]+}}, 9, v{{[0-9]+}} |
| 99 | ; SI-DAG: V_MUL_HI_I32 v{{[0-9]+}}, 9, v{{[0-9]+}} |
| 100 | ; SI: S_ENDPGM |
| 101 | define void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %in) { |
| 102 | %val = load i32 addrspace(1)* %in, align 4 |
| 103 | %ext = sext i32 %val to i64 |
| 104 | %mul = mul i64 %ext, 9 |
| 105 | store i64 %mul, i64 addrspace(1)* %out, align 8 |
| 106 | ret void |
| 107 | } |
| 108 | |
| 109 | ; FUNC-LABEL: @s_mul_i32: |
| 110 | ; SI: S_LOAD_DWORD [[SRC0:s[0-9]+]], |
| 111 | ; SI: S_LOAD_DWORD [[SRC1:s[0-9]+]], |
| 112 | ; SI: S_MUL_I32 [[SRESULT:s[0-9]+]], [[SRC0]], [[SRC1]] |
| 113 | ; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]] |
| 114 | ; SI: BUFFER_STORE_DWORD [[VRESULT]], |
| 115 | ; SI: S_ENDPGM |
| 116 | define void @s_mul_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind { |
| 117 | %mul = mul i32 %a, %b |
| 118 | store i32 %mul, i32 addrspace(1)* %out, align 4 |
| 119 | ret void |
| 120 | } |
| 121 | |
| 122 | ; FUNC-LABEL: @v_mul_i32 |
| 123 | ; SI: V_MUL_LO_I32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} |
| 124 | define void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { |
| 125 | %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1 |
| 126 | %a = load i32 addrspace(1)* %in |
| 127 | %b = load i32 addrspace(1)* %b_ptr |
| 128 | %result = mul i32 %a, %b |
| 129 | store i32 %result, i32 addrspace(1)* %out |
| 130 | ret void |
| 131 | } |
| 132 | |
Tom Stellard | a1a5d9a | 2014-04-11 16:12:01 +0000 | [diff] [blame] | 133 | ; A standard 64-bit multiply. The expansion should be around 6 instructions. |
| 134 | ; It would be difficult to match the expansion correctly without writing |
| 135 | ; a really complicated list of FileCheck expressions. I don't want |
| 136 | ; to confuse people who may 'break' this test with a correct optimization, |
| 137 | ; so this test just uses FUNC-LABEL to make sure the compiler does not |
| 138 | ; crash with a 'failed to select' error. |
Matt Arsenault | 869cd07 | 2014-09-03 23:24:35 +0000 | [diff] [blame^] | 139 | |
| 140 | ; FUNC-LABEL: @s_mul_i64: |
| 141 | define void @s_mul_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind { |
| 142 | %mul = mul i64 %a, %b |
| 143 | store i64 %mul, i64 addrspace(1)* %out, align 8 |
| 144 | ret void |
| 145 | } |
| 146 | |
| 147 | ; FUNC-LABEL: @v_mul_i64 |
| 148 | ; SI: V_MUL_LO_I32 |
| 149 | define void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) { |
| 150 | %a = load i64 addrspace(1)* %aptr, align 8 |
| 151 | %b = load i64 addrspace(1)* %bptr, align 8 |
| 152 | %mul = mul i64 %a, %b |
| 153 | store i64 %mul, i64 addrspace(1)* %out, align 8 |
| 154 | ret void |
| 155 | } |
| 156 | |
| 157 | ; FUNC-LABEL: @mul32_in_branch |
| 158 | ; SI: V_MUL_LO_I32 |
| 159 | define void @mul32_in_branch(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b, i32 %c) { |
Tom Stellard | a1a5d9a | 2014-04-11 16:12:01 +0000 | [diff] [blame] | 160 | entry: |
Matt Arsenault | 869cd07 | 2014-09-03 23:24:35 +0000 | [diff] [blame^] | 161 | %0 = icmp eq i32 %a, 0 |
| 162 | br i1 %0, label %if, label %else |
| 163 | |
| 164 | if: |
| 165 | %1 = load i32 addrspace(1)* %in |
| 166 | br label %endif |
| 167 | |
| 168 | else: |
| 169 | %2 = mul i32 %a, %b |
| 170 | br label %endif |
| 171 | |
| 172 | endif: |
| 173 | %3 = phi i32 [%1, %if], [%2, %else] |
| 174 | store i32 %3, i32 addrspace(1)* %out |
| 175 | ret void |
| 176 | } |
| 177 | |
| 178 | ; FUNC-LABEL: @mul64_in_branch |
| 179 | ; SI-DAG: V_MUL_LO_I32 |
| 180 | ; SI-DAG: V_MUL_HI_U32 |
| 181 | ; SI: S_ENDPGM |
| 182 | define void @mul64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) { |
| 183 | entry: |
| 184 | %0 = icmp eq i64 %a, 0 |
| 185 | br i1 %0, label %if, label %else |
| 186 | |
| 187 | if: |
| 188 | %1 = load i64 addrspace(1)* %in |
| 189 | br label %endif |
| 190 | |
| 191 | else: |
| 192 | %2 = mul i64 %a, %b |
| 193 | br label %endif |
| 194 | |
| 195 | endif: |
| 196 | %3 = phi i64 [%1, %if], [%2, %else] |
| 197 | store i64 %3, i64 addrspace(1)* %out |
Tom Stellard | a1a5d9a | 2014-04-11 16:12:01 +0000 | [diff] [blame] | 198 | ret void |
| 199 | } |