blob: 83ab44d191c6f6cba54d4cac2e47f60a0f15107e [file] [log] [blame]
Matt Arsenault2510a312016-09-03 06:57:55 +00001; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s
Marek Olsak79c05872016-11-25 17:37:09 +00002; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -amdgpu-spill-sgpr-to-smem=0 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s
Matt Arsenault2510a312016-09-03 06:57:55 +00003; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s
Marek Olsak79c05872016-11-25 17:37:09 +00004; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -amdgpu-spill-sgpr-to-smem=0 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s
5; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -amdgpu-spill-sgpr-to-smem=1 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOSMEM -check-prefix=GCN %s
Matt Arsenault2510a312016-09-03 06:57:55 +00006
7; XXX - Why does it like to use vcc?
8
9; GCN-LABEL: {{^}}spill_m0:
Marek Olsak79c05872016-11-25 17:37:09 +000010; TOSMEM: s_mov_b32 s84, SCRATCH_RSRC_DWORD0
Matt Arsenault08906a32016-10-28 19:43:31 +000011
Marek Olsak79c05872016-11-25 17:37:09 +000012; GCN-DAG: s_cmp_lg_u32
Matt Arsenault2510a312016-09-03 06:57:55 +000013
Marek Olsak79c05872016-11-25 17:37:09 +000014; TOVGPR-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
15; TOVGPR: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]], 0
Matt Arsenault2510a312016-09-03 06:57:55 +000016
Marek Olsak79c05872016-11-25 17:37:09 +000017; TOVMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
18; TOVMEM-DAG: v_mov_b32_e32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]]
Matt Arsenault2510a312016-09-03 06:57:55 +000019; TOVMEM: buffer_store_dword [[SPILL_VREG]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} ; 4-byte Folded Spill
20; TOVMEM: s_waitcnt vmcnt(0)
Marek Olsak79c05872016-11-25 17:37:09 +000021
22; TOSMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
23; TOSMEM: s_mov_b32 m0, s3{{$}}
24; TOSMEM-NOT: [[M0_COPY]]
25; TOSMEM: s_buffer_store_dword [[M0_COPY]], s[84:87], m0 ; 4-byte Folded Spill
26; TOSMEM: s_waitcnt lgkmcnt(0)
27
Matt Arsenault2510a312016-09-03 06:57:55 +000028; GCN: s_cbranch_scc1 [[ENDIF:BB[0-9]+_[0-9]+]]
29
30; GCN: [[ENDIF]]:
Marek Olsak79c05872016-11-25 17:37:09 +000031; TOVGPR: v_readlane_b32 [[M0_RESTORE:s[0-9]+]], [[SPILL_VREG]], 0
32; TOVGPR: s_mov_b32 m0, [[M0_RESTORE]]
Matt Arsenault2510a312016-09-03 06:57:55 +000033
34; TOVMEM: buffer_load_dword [[RELOAD_VREG:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} ; 4-byte Folded Reload
35; TOVMEM: s_waitcnt vmcnt(0)
Marek Olsak79c05872016-11-25 17:37:09 +000036; TOVMEM: v_readfirstlane_b32 [[M0_RESTORE:s[0-9]+]], [[RELOAD_VREG]]
37; TOVMEM: s_mov_b32 m0, [[M0_RESTORE]]
Matt Arsenault2510a312016-09-03 06:57:55 +000038
Marek Olsak79c05872016-11-25 17:37:09 +000039; TOSMEM: s_mov_b32 m0, s3{{$}}
40; TOSMEM: s_buffer_load_dword [[M0_RESTORE:s[0-9]+]], s[84:87], m0 ; 4-byte Folded Reload
41; TOSMEM-NOT: [[M0_RESTORE]]
42; TOSMEM: s_mov_b32 m0, [[M0_RESTORE]]
43
44; GCN: s_add_i32 s{{[0-9]+}}, m0, 1
Matt Arsenault2510a312016-09-03 06:57:55 +000045define void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 {
46entry:
47 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
48 %cmp0 = icmp eq i32 %cond, 0
49 br i1 %cmp0, label %if, label %endif
50
51if:
52 call void asm sideeffect "v_nop", ""() #0
53 br label %endif
54
55endif:
56 %foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{M0}"(i32 %m0) #0
57 store i32 %foo, i32 addrspace(1)* %out
58 ret void
59}
60
61@lds = internal addrspace(3) global [64 x float] undef
62
Matt Arsenault640c44b2016-11-29 19:39:53 +000063; m0 is killed, so it isn't necessary during the entry block spill to preserve it
64; GCN-LABEL: {{^}}spill_kill_m0_lds:
Marek Olsak79c05872016-11-25 17:37:09 +000065; GCN: s_mov_b32 m0, s6
66; GCN: v_interp_mov_f32
67
Matt Arsenault640c44b2016-11-29 19:39:53 +000068; TOSMEM-NOT: s_m0
Marek Olsak79c05872016-11-25 17:37:09 +000069; TOSMEM: s_mov_b32 m0, s7
70; TOSMEM-NEXT: s_buffer_store_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Spill
Matt Arsenault640c44b2016-11-29 19:39:53 +000071; TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +000072
Matt Arsenault640c44b2016-11-29 19:39:53 +000073; TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +000074; TOSMEM: s_add_u32 m0, s7, 0x100
75; TOSMEM: s_buffer_store_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Spill
76; TOSMEM: s_add_u32 m0, s7, 0x200
77; TOSMEM: s_buffer_store_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Spill
Matt Arsenault640c44b2016-11-29 19:39:53 +000078; TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +000079
80; TOSMEM: s_mov_b64 exec,
81; TOSMEM: s_cbranch_execz
82; TOSMEM: s_branch
83
84; TOSMEM: BB{{[0-9]+_[0-9]+}}:
85; TOSMEM-NEXT: s_add_u32 m0, s7, 0x100
86; TOSMEM-NEXT: s_buffer_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Reload
87
88
Matt Arsenault2510a312016-09-03 06:57:55 +000089; GCN-NOT: v_readlane_b32 m0
Marek Olsak79c05872016-11-25 17:37:09 +000090; GCN-NOT: s_buffer_store_dword m0
91; GCN-NOT: s_buffer_load_dword m0
Matt Arsenault640c44b2016-11-29 19:39:53 +000092define amdgpu_ps void @spill_kill_m0_lds(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3) #0 {
Matt Arsenault2510a312016-09-03 06:57:55 +000093main_body:
Matt Arsenault640c44b2016-11-29 19:39:53 +000094 %tmp = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %arg3)
95 %cmp = fcmp ueq float 0.000000e+00, %tmp
Matt Arsenault2510a312016-09-03 06:57:55 +000096 br i1 %cmp, label %if, label %else
97
Matt Arsenault640c44b2016-11-29 19:39:53 +000098if: ; preds = %main_body
Matt Arsenault2510a312016-09-03 06:57:55 +000099 %lds_ptr = getelementptr [64 x float], [64 x float] addrspace(3)* @lds, i32 0, i32 0
100 %lds_data = load float, float addrspace(3)* %lds_ptr
101 br label %endif
102
Matt Arsenault640c44b2016-11-29 19:39:53 +0000103else: ; preds = %main_body
104 %interp = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %arg3)
105 br label %endif
106
107endif: ; preds = %else, %if
108 %export = phi float [ %lds_data, %if ], [ %interp, %else ]
109 %tmp4 = call i32 @llvm.SI.packf16(float %export, float %export)
110 %tmp5 = bitcast i32 %tmp4 to float
111 call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp5, float %tmp5, float %tmp5, float %tmp5)
112 ret void
113}
114
115; Force save and restore of m0 during SMEM spill
116; GCN-LABEL: {{^}}m0_unavailable_spill:
117
118; GCN: ; def m0, 1
119
120; GCN: s_mov_b32 m0, s2
121; GCN: v_interp_mov_f32
122
123; GCN: ; clobber m0
124
125; TOSMEM: s_mov_b32 vcc_hi, m0
126; TOSMEM: s_mov_b32 m0, s3
127; TOSMEM-NEXT: s_buffer_store_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Spill
128; TOSMEM: s_add_u32 m0, s3, 0x100
129; TOSMEM: s_buffer_store_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Spill
130; TOSMEM: s_mov_b32 m0, vcc_hi
131
132; TOSMEM: s_mov_b64 exec,
133; TOSMEM: s_cbranch_execz
134; TOSMEM: s_branch
135
136; TOSMEM: BB{{[0-9]+_[0-9]+}}:
137; TOSMEM-NEXT: s_mov_b32 m0, s3
138; TOSMEM-NEXT: s_buffer_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Reload
139; TOSMEM-NEXT: s_add_u32 m0, s3, 0x100
140
141; FIXME: Could delay this wait
142; TOSMEM-NEXT: s_waitcnt lgkmcnt(0)
143; TOSMEM-NEXT: s_buffer_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Reload
144
145
146; GCN-NOT: v_readlane_b32 m0
147; GCN-NOT: s_buffer_store_dword m0
148; GCN-NOT: s_buffer_load_dword m0
149define void @m0_unavailable_spill(i32 %arg3) #0 {
150main_body:
151 %m0 = call i32 asm sideeffect "; def $0, 1", "={M0}"() #0
152 %tmp = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %arg3)
153 call void asm sideeffect "; clobber $0", "~{M0}"() #0
154 %cmp = fcmp ueq float 0.000000e+00, %tmp
155 br i1 %cmp, label %if, label %else
156
157if: ; preds = %main_body
158 store volatile i32 8, i32 addrspace(1)* undef
159 br label %endif
160
161else: ; preds = %main_body
162 store volatile i32 11, i32 addrspace(1)* undef
Matt Arsenault2510a312016-09-03 06:57:55 +0000163 br label %endif
164
165endif:
Matt Arsenault2510a312016-09-03 06:57:55 +0000166 ret void
167}
168
Marek Olsak79c05872016-11-25 17:37:09 +0000169; GCN-LABEL: {{^}}restore_m0_lds:
170; TOSMEM: s_cmp_eq_u32
Matt Arsenault640c44b2016-11-29 19:39:53 +0000171; TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +0000172; TOSMEM: s_mov_b32 m0, s3
173; TOSMEM: s_buffer_store_dword s4, s[84:87], m0 ; 4-byte Folded Spill
Matt Arsenault640c44b2016-11-29 19:39:53 +0000174; TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +0000175; TOSMEM: s_cbranch_scc1
176
177; TOSMEM: s_mov_b32 m0, -1
178
179; TOSMEM: s_mov_b32 vcc_hi, m0
180; TOSMEM: s_mov_b32 m0, s3
181; TOSMEM: s_buffer_load_dword s4, s[84:87], m0 ; 4-byte Folded Reload
182; TOSMEM: s_add_u32 m0, s3, 0x100
183; TOSMEM: s_waitcnt lgkmcnt(0)
184; TOSMEM: s_buffer_load_dword s5, s[84:87], m0 ; 4-byte Folded Reload
185; TOSMEM: s_mov_b32 m0, vcc_hi
186; TOSMEM: s_waitcnt lgkmcnt(0)
187
188; TOSMEM: ds_write_b64
189
Matt Arsenault640c44b2016-11-29 19:39:53 +0000190; TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +0000191; TOSMEM: s_add_u32 m0, s3, 0x200
192; TOSMEM: s_buffer_load_dword s0, s[84:87], m0 ; 4-byte Folded Reload
Matt Arsenault640c44b2016-11-29 19:39:53 +0000193; TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +0000194; TOSMEM: s_waitcnt lgkmcnt(0)
Matt Arsenault640c44b2016-11-29 19:39:53 +0000195; TOSMEM-NOT: m0
Marek Olsak79c05872016-11-25 17:37:09 +0000196; TOSMEM: s_mov_b32 m0, s0
197; TOSMEM: ; use m0
198
199; TOSMEM: s_dcache_wb
200; TOSMEM: s_endpgm
201define void @restore_m0_lds(i32 %arg) {
202 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
203 %sval = load volatile i64, i64 addrspace(2)* undef
204 %cmp = icmp eq i32 %arg, 0
205 br i1 %cmp, label %ret, label %bb
206
207bb:
208 store volatile i64 %sval, i64 addrspace(3)* undef
209 call void asm sideeffect "; use $0", "{M0}"(i32 %m0) #0
210 br label %ret
211
212ret:
213 ret void
214}
215
Matt Arsenault2510a312016-09-03 06:57:55 +0000216declare float @llvm.SI.fs.constant(i32, i32, i32) readnone
217
218declare i32 @llvm.SI.packf16(float, float) readnone
219
220declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
221
222attributes #0 = { nounwind }