blob: 8f1aebfe9ceb8aa3b837383b1900548eaa36d982 [file] [log] [blame]
Matt Arsenault2510a312016-09-03 06:57:55 +00001; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s
Marek Olsak79c05872016-11-25 17:37:09 +00002; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -amdgpu-spill-sgpr-to-smem=0 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s
Matt Arsenault2510a312016-09-03 06:57:55 +00003; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s
Marek Olsak79c05872016-11-25 17:37:09 +00004; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -amdgpu-spill-sgpr-to-smem=0 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s
5; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -amdgpu-spill-sgpr-to-smem=1 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOSMEM -check-prefix=GCN %s
Matt Arsenault2510a312016-09-03 06:57:55 +00006
7; XXX - Why does it like to use vcc?
8
9; GCN-LABEL: {{^}}spill_m0:
Marek Olsak693e9be2016-12-09 19:49:48 +000010; TOSMEM: s_mov_b32 s[[LO:[0-9]+]], SCRATCH_RSRC_DWORD0
11; TOSMEM: s_mov_b32 s[[HI:[0-9]+]], 0xe80000
Matt Arsenault08906a32016-10-28 19:43:31 +000012
Marek Olsak79c05872016-11-25 17:37:09 +000013; GCN-DAG: s_cmp_lg_u32
Matt Arsenault2510a312016-09-03 06:57:55 +000014
Marek Olsak79c05872016-11-25 17:37:09 +000015; TOVGPR-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
16; TOVGPR: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]], 0
Matt Arsenault2510a312016-09-03 06:57:55 +000017
Marek Olsak79c05872016-11-25 17:37:09 +000018; TOVMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
19; TOVMEM-DAG: v_mov_b32_e32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]]
Matt Arsenault707780b2017-02-22 21:05:25 +000020; TOVMEM: buffer_store_dword [[SPILL_VREG]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4 ; 4-byte Folded Spill
Matt Arsenault2510a312016-09-03 06:57:55 +000021; TOVMEM: s_waitcnt vmcnt(0)
Marek Olsak79c05872016-11-25 17:37:09 +000022
23; TOSMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
Matt Arsenault707780b2017-02-22 21:05:25 +000024; TOSMEM: s_add_u32 m0, s3, 0x100{{$}}
Marek Olsak79c05872016-11-25 17:37:09 +000025; TOSMEM-NOT: [[M0_COPY]]
Marek Olsak693e9be2016-12-09 19:49:48 +000026; TOSMEM: s_buffer_store_dword [[M0_COPY]], s{{\[}}[[LO]]:[[HI]]], m0 ; 4-byte Folded Spill
Marek Olsak79c05872016-11-25 17:37:09 +000027; TOSMEM: s_waitcnt lgkmcnt(0)
28
Matt Arsenault2510a312016-09-03 06:57:55 +000029; GCN: s_cbranch_scc1 [[ENDIF:BB[0-9]+_[0-9]+]]
30
31; GCN: [[ENDIF]]:
Marek Olsak79c05872016-11-25 17:37:09 +000032; TOVGPR: v_readlane_b32 [[M0_RESTORE:s[0-9]+]], [[SPILL_VREG]], 0
33; TOVGPR: s_mov_b32 m0, [[M0_RESTORE]]
Matt Arsenault2510a312016-09-03 06:57:55 +000034
Matt Arsenault707780b2017-02-22 21:05:25 +000035; TOVMEM: buffer_load_dword [[RELOAD_VREG:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4 ; 4-byte Folded Reload
Matt Arsenault2510a312016-09-03 06:57:55 +000036; TOVMEM: s_waitcnt vmcnt(0)
Marek Olsak79c05872016-11-25 17:37:09 +000037; TOVMEM: v_readfirstlane_b32 [[M0_RESTORE:s[0-9]+]], [[RELOAD_VREG]]
38; TOVMEM: s_mov_b32 m0, [[M0_RESTORE]]
Matt Arsenault2510a312016-09-03 06:57:55 +000039
Matt Arsenault707780b2017-02-22 21:05:25 +000040; TOSMEM: s_add_u32 m0, s3, 0x100{{$}}
Marek Olsak693e9be2016-12-09 19:49:48 +000041; TOSMEM: s_buffer_load_dword [[M0_RESTORE:s[0-9]+]], s{{\[}}[[LO]]:[[HI]]], m0 ; 4-byte Folded Reload
Marek Olsak79c05872016-11-25 17:37:09 +000042; TOSMEM-NOT: [[M0_RESTORE]]
43; TOSMEM: s_mov_b32 m0, [[M0_RESTORE]]
44
45; GCN: s_add_i32 s{{[0-9]+}}, m0, 1
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000046define amdgpu_kernel void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 {
Matt Arsenault2510a312016-09-03 06:57:55 +000047entry:
48 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
49 %cmp0 = icmp eq i32 %cond, 0
50 br i1 %cmp0, label %if, label %endif
51
52if:
53 call void asm sideeffect "v_nop", ""() #0
54 br label %endif
55
56endif:
57 %foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{M0}"(i32 %m0) #0
58 store i32 %foo, i32 addrspace(1)* %out
59 ret void
60}
61
62@lds = internal addrspace(3) global [64 x float] undef
63
Matt Arsenault640c44b2016-11-29 19:39:53 +000064; m0 is killed, so it isn't necessary during the entry block spill to preserve it
65; GCN-LABEL: {{^}}spill_kill_m0_lds:
Marek Olsak79c05872016-11-25 17:37:09 +000066; GCN: s_mov_b32 m0, s6
67; GCN: v_interp_mov_f32
68
Matt Arsenault640c44b2016-11-29 19:39:53 +000069; TOSMEM-NOT: s_m0
Matt Arsenault707780b2017-02-22 21:05:25 +000070; TOSMEM: s_add_u32 m0, s7, 0x100
Marek Olsak79c05872016-11-25 17:37:09 +000071; TOSMEM-NEXT: s_buffer_store_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Spill
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +000072; FIXME: RegScavenger::isRegUsed() always returns true if m0 is reserved, so we have to save and restore it
73; FIXME-TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +000074
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +000075; FIXME-TOSMEM-NOT: m0
Matt Arsenault707780b2017-02-22 21:05:25 +000076; TOSMEM: s_add_u32 m0, s7, 0x200
Matt Arsenaultc47701c2016-12-02 00:54:45 +000077; TOSMEM: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +000078; FIXME-TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +000079
80; TOSMEM: s_mov_b64 exec,
81; TOSMEM: s_cbranch_execz
82; TOSMEM: s_branch
83
84; TOSMEM: BB{{[0-9]+_[0-9]+}}:
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +000085; TOSMEM: s_add_u32 m0, s7, 0x200
Matt Arsenaultc47701c2016-12-02 00:54:45 +000086; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload
Marek Olsak79c05872016-11-25 17:37:09 +000087
88
Matt Arsenault2510a312016-09-03 06:57:55 +000089; GCN-NOT: v_readlane_b32 m0
Marek Olsak79c05872016-11-25 17:37:09 +000090; GCN-NOT: s_buffer_store_dword m0
91; GCN-NOT: s_buffer_load_dword m0
Matt Arsenaultd2c8a332017-02-16 02:01:13 +000092define amdgpu_ps void @spill_kill_m0_lds(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %m0) #0 {
Matt Arsenault2510a312016-09-03 06:57:55 +000093main_body:
Matt Arsenaultd2c8a332017-02-16 02:01:13 +000094 %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
Matt Arsenault640c44b2016-11-29 19:39:53 +000095 %cmp = fcmp ueq float 0.000000e+00, %tmp
Matt Arsenault2510a312016-09-03 06:57:55 +000096 br i1 %cmp, label %if, label %else
97
Matt Arsenault640c44b2016-11-29 19:39:53 +000098if: ; preds = %main_body
Matt Arsenault2510a312016-09-03 06:57:55 +000099 %lds_ptr = getelementptr [64 x float], [64 x float] addrspace(3)* @lds, i32 0, i32 0
100 %lds_data = load float, float addrspace(3)* %lds_ptr
101 br label %endif
102
Matt Arsenault640c44b2016-11-29 19:39:53 +0000103else: ; preds = %main_body
Matt Arsenaultd2c8a332017-02-16 02:01:13 +0000104 %interp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
Matt Arsenault640c44b2016-11-29 19:39:53 +0000105 br label %endif
106
107endif: ; preds = %else, %if
108 %export = phi float [ %lds_data, %if ], [ %interp, %else ]
Matt Arsenault1f17c662017-02-22 00:27:34 +0000109 %tmp4 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %export, float %export)
110 call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp4, <2 x half> %tmp4, i1 true, i1 true) #0
Matt Arsenault640c44b2016-11-29 19:39:53 +0000111 ret void
112}
113
114; Force save and restore of m0 during SMEM spill
115; GCN-LABEL: {{^}}m0_unavailable_spill:
116
117; GCN: ; def m0, 1
118
119; GCN: s_mov_b32 m0, s2
120; GCN: v_interp_mov_f32
121
122; GCN: ; clobber m0
123
124; TOSMEM: s_mov_b32 vcc_hi, m0
Matt Arsenault707780b2017-02-22 21:05:25 +0000125; TOSMEM: s_add_u32 m0, s3, 0x100
Matt Arsenaultc47701c2016-12-02 00:54:45 +0000126; TOSMEM-NEXT: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill
Matt Arsenault640c44b2016-11-29 19:39:53 +0000127; TOSMEM: s_mov_b32 m0, vcc_hi
128
129; TOSMEM: s_mov_b64 exec,
130; TOSMEM: s_cbranch_execz
131; TOSMEM: s_branch
132
133; TOSMEM: BB{{[0-9]+_[0-9]+}}:
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000134; TOSMEM: s_add_u32 m0, s3, 0x100
Matt Arsenaultc47701c2016-12-02 00:54:45 +0000135; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload
Matt Arsenault640c44b2016-11-29 19:39:53 +0000136
137; GCN-NOT: v_readlane_b32 m0
138; GCN-NOT: s_buffer_store_dword m0
139; GCN-NOT: s_buffer_load_dword m0
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000140define amdgpu_kernel void @m0_unavailable_spill(i32 %m0.arg) #0 {
Matt Arsenault640c44b2016-11-29 19:39:53 +0000141main_body:
142 %m0 = call i32 asm sideeffect "; def $0, 1", "={M0}"() #0
Matt Arsenaultd2c8a332017-02-16 02:01:13 +0000143 %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0.arg)
Matt Arsenault640c44b2016-11-29 19:39:53 +0000144 call void asm sideeffect "; clobber $0", "~{M0}"() #0
145 %cmp = fcmp ueq float 0.000000e+00, %tmp
146 br i1 %cmp, label %if, label %else
147
148if: ; preds = %main_body
149 store volatile i32 8, i32 addrspace(1)* undef
150 br label %endif
151
152else: ; preds = %main_body
153 store volatile i32 11, i32 addrspace(1)* undef
Matt Arsenault2510a312016-09-03 06:57:55 +0000154 br label %endif
155
156endif:
Matt Arsenault2510a312016-09-03 06:57:55 +0000157 ret void
158}
159
Marek Olsak79c05872016-11-25 17:37:09 +0000160; GCN-LABEL: {{^}}restore_m0_lds:
Matt Arsenaultc47701c2016-12-02 00:54:45 +0000161; TOSMEM: s_load_dwordx2 [[REG:s\[[0-9]+:[0-9]+\]]]
Marek Olsak79c05872016-11-25 17:37:09 +0000162; TOSMEM: s_cmp_eq_u32
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000163; FIXME: RegScavenger::isRegUsed() always returns true if m0 is reserved, so we have to save and restore it
164; FIXME-TOSMEM-NOT: m0
Matt Arsenault707780b2017-02-22 21:05:25 +0000165; TOSMEM: s_add_u32 m0, s3, 0x100
Marek Olsak693e9be2016-12-09 19:49:48 +0000166; TOSMEM: s_buffer_store_dwordx2 [[REG]], s[88:91], m0 ; 8-byte Folded Spill
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000167; FIXME-TOSMEM-NOT: m0
Matt Arsenault707780b2017-02-22 21:05:25 +0000168; TOSMEM: s_add_u32 m0, s3, 0x300
Marek Olsak693e9be2016-12-09 19:49:48 +0000169; TOSMEM: s_buffer_store_dword s{{[0-9]+}}, s[88:91], m0 ; 4-byte Folded Spill
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000170; FIXME-TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +0000171; TOSMEM: s_cbranch_scc1
172
173; TOSMEM: s_mov_b32 m0, -1
174
175; TOSMEM: s_mov_b32 vcc_hi, m0
Matt Arsenault707780b2017-02-22 21:05:25 +0000176; TOSMEM: s_add_u32 m0, s3, 0x100
Marek Olsak693e9be2016-12-09 19:49:48 +0000177; TOSMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[88:91], m0 ; 8-byte Folded Reload
Marek Olsak79c05872016-11-25 17:37:09 +0000178; TOSMEM: s_mov_b32 m0, vcc_hi
179; TOSMEM: s_waitcnt lgkmcnt(0)
180
181; TOSMEM: ds_write_b64
182
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000183; FIXME-TOSMEM-NOT: m0
Matt Arsenault707780b2017-02-22 21:05:25 +0000184; TOSMEM: s_add_u32 m0, s3, 0x300
Marek Olsak693e9be2016-12-09 19:49:48 +0000185; TOSMEM: s_buffer_load_dword s0, s[88:91], m0 ; 4-byte Folded Reload
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000186; FIXME-TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +0000187; TOSMEM: s_waitcnt lgkmcnt(0)
Matt Arsenault640c44b2016-11-29 19:39:53 +0000188; TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +0000189; TOSMEM: s_mov_b32 m0, s0
190; TOSMEM: ; use m0
191
192; TOSMEM: s_dcache_wb
193; TOSMEM: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000194define amdgpu_kernel void @restore_m0_lds(i32 %arg) {
Marek Olsak79c05872016-11-25 17:37:09 +0000195 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
196 %sval = load volatile i64, i64 addrspace(2)* undef
197 %cmp = icmp eq i32 %arg, 0
198 br i1 %cmp, label %ret, label %bb
199
200bb:
201 store volatile i64 %sval, i64 addrspace(3)* undef
202 call void asm sideeffect "; use $0", "{M0}"(i32 %m0) #0
203 br label %ret
204
205ret:
206 ret void
207}
208
Matt Arsenault3ea06332017-02-22 00:02:21 +0000209declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1
210declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
Matt Arsenault1f17c662017-02-22 00:27:34 +0000211declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
212declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
Matt Arsenault2510a312016-09-03 06:57:55 +0000213
214attributes #0 = { nounwind }
Matt Arsenaultd2c8a332017-02-16 02:01:13 +0000215attributes #1 = { nounwind readnone }