Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 1 | ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s |
Tom Stellard | 6596ba7 | 2014-11-21 22:06:37 +0000 | [diff] [blame] | 2 | |
| 3 | ; CHECK-LABEL: {{^}}fold_sgpr: |
Matt Arsenault | e4d0c14 | 2015-08-29 07:16:50 +0000 | [diff] [blame] | 4 | ; CHECK: v_add_i32_e32 v{{[0-9]+}}, vcc, s |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 5 | define amdgpu_kernel void @fold_sgpr(i32 addrspace(1)* %out, i32 %fold) { |
Tom Stellard | 6596ba7 | 2014-11-21 22:06:37 +0000 | [diff] [blame] | 6 | entry: |
| 7 | %tmp0 = icmp ne i32 %fold, 0 |
| 8 | br i1 %tmp0, label %if, label %endif |
| 9 | |
| 10 | if: |
Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 11 | %id = call i32 @llvm.amdgcn.workitem.id.x() |
Tom Stellard | 6596ba7 | 2014-11-21 22:06:37 +0000 | [diff] [blame] | 12 | %offset = add i32 %fold, %id |
David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 13 | %tmp1 = getelementptr i32, i32 addrspace(1)* %out, i32 %offset |
Tom Stellard | 6596ba7 | 2014-11-21 22:06:37 +0000 | [diff] [blame] | 14 | store i32 0, i32 addrspace(1)* %tmp1 |
| 15 | br label %endif |
| 16 | |
| 17 | endif: |
| 18 | ret void |
| 19 | } |
| 20 | |
| 21 | ; CHECK-LABEL: {{^}}fold_imm: |
Ahmed Bougacha | 91b9593 | 2015-03-27 20:41:42 +0000 | [diff] [blame] | 22 | ; CHECK: v_or_b32_e32 v{{[0-9]+}}, 5 |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 23 | define amdgpu_kernel void @fold_imm(i32 addrspace(1)* %out, i32 %cmp) { |
Tom Stellard | 6596ba7 | 2014-11-21 22:06:37 +0000 | [diff] [blame] | 24 | entry: |
| 25 | %fold = add i32 3, 2 |
| 26 | %tmp0 = icmp ne i32 %cmp, 0 |
| 27 | br i1 %tmp0, label %if, label %endif |
| 28 | |
| 29 | if: |
Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 30 | %id = call i32 @llvm.amdgcn.workitem.id.x() |
Tom Stellard | 6596ba7 | 2014-11-21 22:06:37 +0000 | [diff] [blame] | 31 | %val = or i32 %id, %fold |
| 32 | store i32 %val, i32 addrspace(1)* %out |
| 33 | br label %endif |
| 34 | |
| 35 | endif: |
| 36 | ret void |
| 37 | } |
| 38 | |
Tom Stellard | ef3b864 | 2015-01-07 19:56:17 +0000 | [diff] [blame] | 39 | ; CHECK-LABEL: {{^}}fold_64bit_constant_add: |
| 40 | ; CHECK-NOT: s_mov_b64 |
| 41 | ; FIXME: It would be better if we could use v_add here and drop the extra |
| 42 | ; v_mov_b32 instructions. |
| 43 | ; CHECK-DAG: s_add_u32 [[LO:s[0-9]+]], s{{[0-9]+}}, 1 |
| 44 | ; CHECK-DAG: s_addc_u32 [[HI:s[0-9]+]], s{{[0-9]+}}, 0 |
| 45 | ; CHECK-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], [[LO]] |
| 46 | ; CHECK-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[HI]] |
| 47 | ; CHECK: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}, |
| 48 | |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 49 | define amdgpu_kernel void @fold_64bit_constant_add(i64 addrspace(1)* %out, i32 %cmp, i64 %val) { |
Tom Stellard | ef3b864 | 2015-01-07 19:56:17 +0000 | [diff] [blame] | 50 | entry: |
| 51 | %tmp0 = add i64 %val, 1 |
| 52 | store i64 %tmp0, i64 addrspace(1)* %out |
| 53 | ret void |
| 54 | } |
| 55 | |
Tom Stellard | 26cc18d | 2015-01-07 22:18:27 +0000 | [diff] [blame] | 56 | ; Inline constants should always be folded. |
| 57 | |
| 58 | ; CHECK-LABEL: {{^}}vector_inline: |
| 59 | ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} |
| 60 | ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} |
| 61 | ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} |
| 62 | ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} |
| 63 | |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 64 | define amdgpu_kernel void @vector_inline(<4 x i32> addrspace(1)* %out) { |
Tom Stellard | 26cc18d | 2015-01-07 22:18:27 +0000 | [diff] [blame] | 65 | entry: |
Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 66 | %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() |
Tom Stellard | 26cc18d | 2015-01-07 22:18:27 +0000 | [diff] [blame] | 67 | %tmp1 = add i32 %tmp0, 1 |
| 68 | %tmp2 = add i32 %tmp0, 2 |
| 69 | %tmp3 = add i32 %tmp0, 3 |
| 70 | %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0 |
| 71 | %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1 |
| 72 | %vec2 = insertelement <4 x i32> %vec1, i32 %tmp2, i32 2 |
| 73 | %vec3 = insertelement <4 x i32> %vec2, i32 %tmp3, i32 3 |
| 74 | %tmp4 = xor <4 x i32> <i32 5, i32 5, i32 5, i32 5>, %vec3 |
| 75 | store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %out |
| 76 | ret void |
| 77 | } |
| 78 | |
| 79 | ; Immediates with one use should be folded |
| 80 | ; CHECK-LABEL: {{^}}imm_one_use: |
| 81 | ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 0x64, v{{[0-9]+}} |
| 82 | |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 83 | define amdgpu_kernel void @imm_one_use(i32 addrspace(1)* %out) { |
Tom Stellard | 26cc18d | 2015-01-07 22:18:27 +0000 | [diff] [blame] | 84 | entry: |
Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 85 | %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() |
Tom Stellard | 26cc18d | 2015-01-07 22:18:27 +0000 | [diff] [blame] | 86 | %tmp1 = xor i32 %tmp0, 100 |
| 87 | store i32 %tmp1, i32 addrspace(1)* %out |
| 88 | ret void |
| 89 | } |
Tom Stellard | 654d669 | 2015-01-08 15:08:17 +0000 | [diff] [blame] | 90 | ; CHECK-LABEL: {{^}}vector_imm: |
| 91 | ; CHECK: s_movk_i32 [[IMM:s[0-9]+]], 0x64 |
| 92 | ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} |
| 93 | ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} |
| 94 | ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} |
| 95 | ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} |
| 96 | |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 97 | define amdgpu_kernel void @vector_imm(<4 x i32> addrspace(1)* %out) { |
Tom Stellard | 654d669 | 2015-01-08 15:08:17 +0000 | [diff] [blame] | 98 | entry: |
Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 99 | %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() |
Tom Stellard | 654d669 | 2015-01-08 15:08:17 +0000 | [diff] [blame] | 100 | %tmp1 = add i32 %tmp0, 1 |
| 101 | %tmp2 = add i32 %tmp0, 2 |
| 102 | %tmp3 = add i32 %tmp0, 3 |
| 103 | %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0 |
| 104 | %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1 |
| 105 | %vec2 = insertelement <4 x i32> %vec1, i32 %tmp2, i32 2 |
| 106 | %vec3 = insertelement <4 x i32> %vec2, i32 %tmp3, i32 3 |
| 107 | %tmp4 = xor <4 x i32> <i32 100, i32 100, i32 100, i32 100>, %vec3 |
| 108 | store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %out |
| 109 | ret void |
| 110 | } |
Tom Stellard | 26cc18d | 2015-01-07 22:18:27 +0000 | [diff] [blame] | 111 | |
Matt Arsenault | 3661e90 | 2016-08-15 16:18:36 +0000 | [diff] [blame] | 112 | ; A subregister use operand should not be tied. |
| 113 | ; CHECK-LABEL: {{^}}no_fold_tied_subregister: |
| 114 | ; CHECK: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}} |
| 115 | ; CHECK: v_mac_f32_e32 v[[LO]], 0x41200000, v[[HI]] |
| 116 | ; CHECK: buffer_store_dword v[[LO]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 117 | define amdgpu_kernel void @no_fold_tied_subregister() { |
Matt Arsenault | 3661e90 | 2016-08-15 16:18:36 +0000 | [diff] [blame] | 118 | %tmp1 = load volatile <2 x float>, <2 x float> addrspace(1)* undef |
| 119 | %tmp2 = extractelement <2 x float> %tmp1, i32 0 |
| 120 | %tmp3 = extractelement <2 x float> %tmp1, i32 1 |
| 121 | %tmp4 = fmul float %tmp3, 10.0 |
| 122 | %tmp5 = fadd float %tmp4, %tmp2 |
| 123 | store volatile float %tmp5, float addrspace(1)* undef |
| 124 | ret void |
| 125 | } |
| 126 | |
Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 127 | declare i32 @llvm.amdgcn.workitem.id.x() #0 |
| 128 | |
| 129 | attributes #0 = { nounwind readnone } |