Tom Stellard | 49f8bfd | 2015-01-06 18:00:21 +0000 | [diff] [blame] | 1 | ; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s |
Tom Stellard | 6596ba7 | 2014-11-21 22:06:37 +0000 | [diff] [blame] | 2 | |
| 3 | ; CHECK-LABEL: {{^}}fold_sgpr: |
Matt Arsenault | e4d0c14 | 2015-08-29 07:16:50 +0000 | [diff] [blame^] | 4 | ; CHECK: v_add_i32_e32 v{{[0-9]+}}, vcc, s |
Tom Stellard | 6596ba7 | 2014-11-21 22:06:37 +0000 | [diff] [blame] | 5 | define void @fold_sgpr(i32 addrspace(1)* %out, i32 %fold) { |
| 6 | entry: |
| 7 | %tmp0 = icmp ne i32 %fold, 0 |
| 8 | br i1 %tmp0, label %if, label %endif |
| 9 | |
| 10 | if: |
| 11 | %id = call i32 @llvm.r600.read.tidig.x() |
| 12 | %offset = add i32 %fold, %id |
David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 13 | %tmp1 = getelementptr i32, i32 addrspace(1)* %out, i32 %offset |
Tom Stellard | 6596ba7 | 2014-11-21 22:06:37 +0000 | [diff] [blame] | 14 | store i32 0, i32 addrspace(1)* %tmp1 |
| 15 | br label %endif |
| 16 | |
| 17 | endif: |
| 18 | ret void |
| 19 | } |
| 20 | |
| 21 | ; CHECK-LABEL: {{^}}fold_imm: |
Ahmed Bougacha | 91b9593 | 2015-03-27 20:41:42 +0000 | [diff] [blame] | 22 | ; CHECK: v_or_b32_e32 v{{[0-9]+}}, 5 |
Tom Stellard | 6596ba7 | 2014-11-21 22:06:37 +0000 | [diff] [blame] | 23 | define void @fold_imm(i32 addrspace(1)* %out, i32 %cmp) { |
| 24 | entry: |
| 25 | %fold = add i32 3, 2 |
| 26 | %tmp0 = icmp ne i32 %cmp, 0 |
| 27 | br i1 %tmp0, label %if, label %endif |
| 28 | |
| 29 | if: |
| 30 | %id = call i32 @llvm.r600.read.tidig.x() |
| 31 | %val = or i32 %id, %fold |
| 32 | store i32 %val, i32 addrspace(1)* %out |
| 33 | br label %endif |
| 34 | |
| 35 | endif: |
| 36 | ret void |
| 37 | } |
| 38 | |
Tom Stellard | ef3b864 | 2015-01-07 19:56:17 +0000 | [diff] [blame] | 39 | ; CHECK-LABEL: {{^}}fold_64bit_constant_add: |
| 40 | ; CHECK-NOT: s_mov_b64 |
| 41 | ; FIXME: It would be better if we could use v_add here and drop the extra |
| 42 | ; v_mov_b32 instructions. |
| 43 | ; CHECK-DAG: s_add_u32 [[LO:s[0-9]+]], s{{[0-9]+}}, 1 |
| 44 | ; CHECK-DAG: s_addc_u32 [[HI:s[0-9]+]], s{{[0-9]+}}, 0 |
| 45 | ; CHECK-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], [[LO]] |
| 46 | ; CHECK-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[HI]] |
| 47 | ; CHECK: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}, |
| 48 | |
| 49 | define void @fold_64bit_constant_add(i64 addrspace(1)* %out, i32 %cmp, i64 %val) { |
| 50 | entry: |
| 51 | %tmp0 = add i64 %val, 1 |
| 52 | store i64 %tmp0, i64 addrspace(1)* %out |
| 53 | ret void |
| 54 | } |
| 55 | |
Tom Stellard | 26cc18d | 2015-01-07 22:18:27 +0000 | [diff] [blame] | 56 | ; Inline constants should always be folded. |
| 57 | |
| 58 | ; CHECK-LABEL: {{^}}vector_inline: |
| 59 | ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} |
| 60 | ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} |
| 61 | ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} |
| 62 | ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} |
| 63 | |
| 64 | define void @vector_inline(<4 x i32> addrspace(1)* %out) { |
| 65 | entry: |
| 66 | %tmp0 = call i32 @llvm.r600.read.tidig.x() |
| 67 | %tmp1 = add i32 %tmp0, 1 |
| 68 | %tmp2 = add i32 %tmp0, 2 |
| 69 | %tmp3 = add i32 %tmp0, 3 |
| 70 | %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0 |
| 71 | %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1 |
| 72 | %vec2 = insertelement <4 x i32> %vec1, i32 %tmp2, i32 2 |
| 73 | %vec3 = insertelement <4 x i32> %vec2, i32 %tmp3, i32 3 |
| 74 | %tmp4 = xor <4 x i32> <i32 5, i32 5, i32 5, i32 5>, %vec3 |
| 75 | store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %out |
| 76 | ret void |
| 77 | } |
| 78 | |
| 79 | ; Immediates with one use should be folded |
| 80 | ; CHECK-LABEL: {{^}}imm_one_use: |
| 81 | ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 0x64, v{{[0-9]+}} |
| 82 | |
| 83 | define void @imm_one_use(i32 addrspace(1)* %out) { |
| 84 | entry: |
| 85 | %tmp0 = call i32 @llvm.r600.read.tidig.x() |
| 86 | %tmp1 = xor i32 %tmp0, 100 |
| 87 | store i32 %tmp1, i32 addrspace(1)* %out |
| 88 | ret void |
| 89 | } |
Tom Stellard | 654d669 | 2015-01-08 15:08:17 +0000 | [diff] [blame] | 90 | ; CHECK-LABEL: {{^}}vector_imm: |
| 91 | ; CHECK: s_movk_i32 [[IMM:s[0-9]+]], 0x64 |
| 92 | ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} |
| 93 | ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} |
| 94 | ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} |
| 95 | ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} |
| 96 | |
| 97 | define void @vector_imm(<4 x i32> addrspace(1)* %out) { |
| 98 | entry: |
| 99 | %tmp0 = call i32 @llvm.r600.read.tidig.x() |
| 100 | %tmp1 = add i32 %tmp0, 1 |
| 101 | %tmp2 = add i32 %tmp0, 2 |
| 102 | %tmp3 = add i32 %tmp0, 3 |
| 103 | %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0 |
| 104 | %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1 |
| 105 | %vec2 = insertelement <4 x i32> %vec1, i32 %tmp2, i32 2 |
| 106 | %vec3 = insertelement <4 x i32> %vec2, i32 %tmp3, i32 3 |
| 107 | %tmp4 = xor <4 x i32> <i32 100, i32 100, i32 100, i32 100>, %vec3 |
| 108 | store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %out |
| 109 | ret void |
| 110 | } |
Tom Stellard | 26cc18d | 2015-01-07 22:18:27 +0000 | [diff] [blame] | 111 | |
Tom Stellard | 6596ba7 | 2014-11-21 22:06:37 +0000 | [diff] [blame] | 112 | declare i32 @llvm.r600.read.tidig.x() #0 |
| 113 | attributes #0 = { readnone } |