Matt Arsenault | 2510a31 | 2016-09-03 06:57:55 +0000 | [diff] [blame] | 1 | ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s |
| 2 | ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s |
| 3 | ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s |
| 4 | ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -mattr=+vgpr-spilling -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s |
| 5 | |
| 6 | ; XXX - Why does it like to use vcc? |
| 7 | |
| 8 | ; GCN-LABEL: {{^}}spill_m0: |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame^] | 9 | ; TOSMEM: s_mov_b32 s88, SCRATCH_RSRC_DWORD0 |
| 10 | |
Matt Arsenault | 5d8eb25 | 2016-09-30 01:50:20 +0000 | [diff] [blame] | 11 | ; GCN: s_cmp_lg_u32 |
Matt Arsenault | 2510a31 | 2016-09-03 06:57:55 +0000 | [diff] [blame] | 12 | |
| 13 | ; TOVGPR: s_mov_b32 vcc_hi, m0 |
| 14 | ; TOVGPR: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], vcc_hi, 0 |
| 15 | |
| 16 | ; TOVMEM: v_mov_b32_e32 [[SPILL_VREG:v[0-9]+]], m0 |
| 17 | ; TOVMEM: buffer_store_dword [[SPILL_VREG]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} ; 4-byte Folded Spill |
| 18 | ; TOVMEM: s_waitcnt vmcnt(0) |
| 19 | ; GCN: s_cbranch_scc1 [[ENDIF:BB[0-9]+_[0-9]+]] |
| 20 | |
| 21 | ; GCN: [[ENDIF]]: |
| 22 | ; TOVGPR: v_readlane_b32 vcc_hi, [[SPILL_VREG]], 0 |
| 23 | ; TOVGPR: s_mov_b32 m0, vcc_hi |
| 24 | |
| 25 | ; TOVMEM: buffer_load_dword [[RELOAD_VREG:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} ; 4-byte Folded Reload |
| 26 | ; TOVMEM: s_waitcnt vmcnt(0) |
| 27 | ; TOVMEM: v_readfirstlane_b32 vcc_hi, [[RELOAD_VREG]] |
| 28 | ; TOVMEM: s_mov_b32 m0, vcc_hi |
| 29 | |
| 30 | ; GCN: s_add_i32 m0, m0, 1 |
| 31 | define void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 { |
| 32 | entry: |
| 33 | %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0 |
| 34 | %cmp0 = icmp eq i32 %cond, 0 |
| 35 | br i1 %cmp0, label %if, label %endif |
| 36 | |
| 37 | if: |
| 38 | call void asm sideeffect "v_nop", ""() #0 |
| 39 | br label %endif |
| 40 | |
| 41 | endif: |
| 42 | %foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{M0}"(i32 %m0) #0 |
| 43 | store i32 %foo, i32 addrspace(1)* %out |
| 44 | ret void |
| 45 | } |
| 46 | |
| 47 | @lds = internal addrspace(3) global [64 x float] undef |
| 48 | |
| 49 | ; GCN-LABEL: {{^}}spill_m0_lds: |
| 50 | ; GCN-NOT: v_readlane_b32 m0 |
| 51 | define amdgpu_ps void @spill_m0_lds(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg) #0 { |
| 52 | main_body: |
| 53 | %4 = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %3) |
| 54 | %cmp = fcmp ueq float 0.0, %4 |
| 55 | br i1 %cmp, label %if, label %else |
| 56 | |
| 57 | if: |
| 58 | %lds_ptr = getelementptr [64 x float], [64 x float] addrspace(3)* @lds, i32 0, i32 0 |
| 59 | %lds_data = load float, float addrspace(3)* %lds_ptr |
| 60 | br label %endif |
| 61 | |
| 62 | else: |
| 63 | %interp = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %3) |
| 64 | br label %endif |
| 65 | |
| 66 | endif: |
| 67 | %export = phi float [%lds_data, %if], [%interp, %else] |
| 68 | %5 = call i32 @llvm.SI.packf16(float %export, float %export) |
| 69 | %6 = bitcast i32 %5 to float |
| 70 | call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %6, float %6, float %6, float %6) |
| 71 | ret void |
| 72 | } |
| 73 | |
| 74 | declare float @llvm.SI.fs.constant(i32, i32, i32) readnone |
| 75 | |
| 76 | declare i32 @llvm.SI.packf16(float, float) readnone |
| 77 | |
| 78 | declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float) |
| 79 | |
| 80 | attributes #0 = { nounwind } |