blob: 071bcbcf81bfde016370f4143a28a7ca228379ba [file] [log] [blame]
Matt Arsenault70b92822017-11-12 23:53:44 +00001; RUN: llc -O0 -mtriple=amdgcn--amdhsa -march=amdgcn -amdgpu-spill-sgpr-to-vgpr=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=VMEM -check-prefix=GCN %s
2; RUN: llc -O0 -mtriple=amdgcn--amdhsa -march=amdgcn -amdgpu-spill-sgpr-to-vgpr=1 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=VGPR -check-prefix=GCN %s
Matt Arsenaulte6740752016-09-29 01:44:16 +00003
4; Verify registers used for tracking exec mask changes when all
5; registers are spilled at the end of the block. The SGPR spill
6; placement relative to the exec modifications are important.
7
8; FIXME: This checks with SGPR to VGPR spilling disabled, but this may
9; not work correctly in cases where no workitems take a branch.
10
11
12; GCN-LABEL: {{^}}divergent_if_endif:
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +000013; VGPR: workitem_private_segment_byte_size = 12{{$}}
14
Matt Arsenaulte6740752016-09-29 01:44:16 +000015
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +000016; GCN: {{^}}; %bb.0:
Matt Arsenaulte6740752016-09-29 01:44:16 +000017; GCN: s_mov_b32 m0, -1
18; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
19
Matt Arsenault3d463192016-11-01 22:55:07 +000020; GCN: v_cmp_eq_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], s{{[0-9]+}}, v0
Matt Arsenaulte6740752016-09-29 01:44:16 +000021; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
22; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, [[CMP0]]
Matt Arsenaulte6740752016-09-29 01:44:16 +000023
24; Spill saved exec
25; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]
26; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]], [[SAVEEXEC_HI_LANE:[0-9]+]]
27
28
29; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
Matt Arsenault707780b2017-02-22 21:05:25 +000030; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 offset:4 ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +000031; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
Matt Arsenault707780b2017-02-22 21:05:25 +000032; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:8 ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +000033
34; Spill load
Matt Arsenault707780b2017-02-22 21:05:25 +000035; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 offset:[[LOAD0_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +000036
Matt Arsenaulte6740752016-09-29 01:44:16 +000037; GCN: s_mov_b64 exec, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}
38
Matt Arsenaulte6740752016-09-29 01:44:16 +000039; GCN: mask branch [[ENDIF:BB[0-9]+_[0-9]+]]
40
41; GCN: {{^}}BB{{[0-9]+}}_1: ; %if
42; GCN: s_mov_b32 m0, -1
43; GCN: ds_read_b32 [[LOAD1:v[0-9]+]]
Mark Searles70359ac2017-06-02 14:19:25 +000044; GCN: s_waitcnt lgkmcnt(0)
Matt Arsenault707780b2017-02-22 21:05:25 +000045; GCN: buffer_load_dword [[RELOAD_LOAD0:v[0-9]+]], off, s[0:3], s7 offset:[[LOAD0_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +000046
47; Spill val register
48; GCN: v_add_i32_e32 [[VAL:v[0-9]+]], vcc, [[LOAD1]], [[RELOAD_LOAD0]]
Matt Arsenault253640e2016-10-13 13:10:00 +000049; GCN: buffer_store_dword [[VAL]], off, s[0:3], s7 offset:[[VAL_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +000050
51; VMEM: [[ENDIF]]:
52; Reload and restore exec mask
Mark Searles70359ac2017-06-02 14:19:25 +000053; VGPR: s_waitcnt lgkmcnt(0)
Matt Arsenaulte6740752016-09-29 01:44:16 +000054; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]
55; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]
56
57
58
Matt Arsenault707780b2017-02-22 21:05:25 +000059; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:4 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +000060; VMEM: s_waitcnt vmcnt(0)
61; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[V_RELOAD_SAVEEXEC_LO]]
62
Matt Arsenault707780b2017-02-22 21:05:25 +000063; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:8 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +000064; VMEM: s_waitcnt vmcnt(0)
65; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[V_RELOAD_SAVEEXEC_HI]]
66
67; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]]:[[S_RELOAD_SAVEEXEC_HI]]{{\]}}
68
69; Restore val
Matt Arsenault253640e2016-10-13 13:10:00 +000070; GCN: buffer_load_dword [[RELOAD_VAL:v[0-9]+]], off, s[0:3], s7 offset:[[VAL_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +000071
72; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RELOAD_VAL]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000073define amdgpu_kernel void @divergent_if_endif(i32 addrspace(1)* %out) #0 {
Matt Arsenaulte6740752016-09-29 01:44:16 +000074entry:
75 %tid = call i32 @llvm.amdgcn.workitem.id.x()
76 %load0 = load volatile i32, i32 addrspace(3)* undef
77 %cmp0 = icmp eq i32 %tid, 0
78 br i1 %cmp0, label %if, label %endif
79
80if:
81 %load1 = load volatile i32, i32 addrspace(3)* undef
82 %val = add i32 %load0, %load1
83 br label %endif
84
85endif:
86 %tmp4 = phi i32 [ %val, %if ], [ 0, %entry ]
87 store i32 %tmp4, i32 addrspace(1)* %out
88 ret void
89}
90
91; GCN-LABEL: {{^}}divergent_loop:
Alexander Timofeevc1425c9d2017-12-01 11:56:34 +000092; VGPR: workitem_private_segment_byte_size = 12{{$}}
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +000093
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +000094; GCN: {{^}}; %bb.0:
Matt Arsenaulte6740752016-09-29 01:44:16 +000095
96; GCN: s_mov_b32 m0, -1
97; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
98
Matt Arsenault3d463192016-11-01 22:55:07 +000099; GCN: v_cmp_eq_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], s{{[0-9]+}}, v0
Matt Arsenaulte6740752016-09-29 01:44:16 +0000100
101; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
102; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, [[CMP0]]
Matt Arsenaulte6740752016-09-29 01:44:16 +0000103
Matt Arsenault3d463192016-11-01 22:55:07 +0000104; Spill load
Matt Arsenault707780b2017-02-22 21:05:25 +0000105; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 offset:4 ; 4-byte Folded Spill
Matt Arsenault3d463192016-11-01 22:55:07 +0000106
Matt Arsenaulte6740752016-09-29 01:44:16 +0000107; Spill saved exec
108; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]
109; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]], [[SAVEEXEC_HI_LANE:[0-9]+]]
110
111
112; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
Matt Arsenault707780b2017-02-22 21:05:25 +0000113; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 offset:20 ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000114; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
Matt Arsenault707780b2017-02-22 21:05:25 +0000115; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:24 ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000116
117; GCN: s_mov_b64 exec, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}
118
Matt Arsenaulte6740752016-09-29 01:44:16 +0000119; GCN-NEXT: ; mask branch [[END:BB[0-9]+_[0-9]+]]
120; GCN-NEXT: s_cbranch_execz [[END]]
121
122
123; GCN: [[LOOP:BB[0-9]+_[0-9]+]]:
Matt Arsenault707780b2017-02-22 21:05:25 +0000124; GCN: buffer_load_dword v[[VAL_LOOP_RELOAD:[0-9]+]], off, s[0:3], s7 offset:4 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000125; GCN: v_subrev_i32_e32 [[VAL_LOOP:v[0-9]+]], vcc, v{{[0-9]+}}, v[[VAL_LOOP_RELOAD]]
Alexander Timofeevc1425c9d2017-12-01 11:56:34 +0000126; GCN: s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
Matt Arsenault253640e2016-10-13 13:10:00 +0000127; GCN: buffer_store_dword [[VAL_LOOP]], off, s[0:3], s7 offset:[[VAL_SUB_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Alexander Timofeevc1425c9d2017-12-01 11:56:34 +0000128; GCN-NEXT: s_cbranch_scc1 [[LOOP]]
Matt Arsenaulte6740752016-09-29 01:44:16 +0000129
130
131; GCN: [[END]]:
132; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]
133; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]
134
Matt Arsenault707780b2017-02-22 21:05:25 +0000135; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:20 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000136; VMEM: s_waitcnt vmcnt(0)
137; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[V_RELOAD_SAVEEXEC_LO]]
138
Matt Arsenault707780b2017-02-22 21:05:25 +0000139; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:24 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000140; VMEM: s_waitcnt vmcnt(0)
141; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[V_RELOAD_SAVEEXEC_HI]]
142
143; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]]:[[S_RELOAD_SAVEEXEC_HI]]{{\]}}
Matt Arsenault253640e2016-10-13 13:10:00 +0000144; GCN: buffer_load_dword v[[VAL_END:[0-9]+]], off, s[0:3], s7 offset:[[VAL_SUB_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000145
146; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[VAL_END]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000147define amdgpu_kernel void @divergent_loop(i32 addrspace(1)* %out) #0 {
Matt Arsenaulte6740752016-09-29 01:44:16 +0000148entry:
149 %tid = call i32 @llvm.amdgcn.workitem.id.x()
150 %load0 = load volatile i32, i32 addrspace(3)* undef
151 %cmp0 = icmp eq i32 %tid, 0
152 br i1 %cmp0, label %loop, label %end
153
154loop:
155 %i = phi i32 [ %i.inc, %loop ], [ 0, %entry ]
156 %val = phi i32 [ %val.sub, %loop ], [ %load0, %entry ]
157 %load1 = load volatile i32, i32 addrspace(3)* undef
158 %i.inc = add i32 %i, 1
159 %val.sub = sub i32 %val, %load1
160 %cmp1 = icmp ne i32 %i, 256
161 br i1 %cmp1, label %loop, label %end
162
163end:
164 %tmp4 = phi i32 [ %val.sub, %loop ], [ 0, %entry ]
165 store i32 %tmp4, i32 addrspace(1)* %out
166 ret void
167}
168
169; GCN-LABEL: {{^}}divergent_if_else_endif:
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +0000170; GCN: {{^}}; %bb.0:
Matt Arsenaulte6740752016-09-29 01:44:16 +0000171
172; GCN: s_mov_b32 m0, -1
Matt Arsenault70b92822017-11-12 23:53:44 +0000173; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
Matt Arsenaulte6740752016-09-29 01:44:16 +0000174
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000175; GCN: v_cmp_ne_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], v0,
Matt Arsenaulte6740752016-09-29 01:44:16 +0000176
177; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
178; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, [[CMP0]]
179; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}
180
181; Spill load
Matt Arsenault707780b2017-02-22 21:05:25 +0000182; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 offset:4 ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000183
184; Spill saved exec
185; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]
186; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]], [[SAVEEXEC_HI_LANE:[0-9]+]]
187
188; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
Marek Olsak79c05872016-11-25 17:37:09 +0000189; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 offset:[[SAVEEXEC_LO_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000190; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
Marek Olsak79c05872016-11-25 17:37:09 +0000191; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:[[SAVEEXEC_HI_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000192
193; GCN: s_mov_b64 exec, [[CMP0]]
Matt Arsenaulte6740752016-09-29 01:44:16 +0000194
195; FIXME: It makes no sense to put this skip here
196; GCN-NEXT: ; mask branch [[FLOW:BB[0-9]+_[0-9]+]]
197; GCN: s_cbranch_execz [[FLOW]]
198; GCN-NEXT: s_branch [[ELSE:BB[0-9]+_[0-9]+]]
199
200; GCN: [[FLOW]]: ; %Flow
201; VGPR: v_readlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]
202; VGPR: v_readlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]
203
204
Matt Arsenault253640e2016-10-13 13:10:00 +0000205; VMEM: buffer_load_dword v[[FLOW_V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:[[SAVEEXEC_LO_OFFSET]]
Matt Arsenaulte6740752016-09-29 01:44:16 +0000206; VMEM: s_waitcnt vmcnt(0)
207; VMEM: v_readfirstlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[FLOW_V_RELOAD_SAVEEXEC_LO]]
208
Marek Olsak79c05872016-11-25 17:37:09 +0000209; VMEM: buffer_load_dword v[[FLOW_V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:[[SAVEEXEC_HI_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000210; VMEM: s_waitcnt vmcnt(0)
211; VMEM: v_readfirstlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[FLOW_V_RELOAD_SAVEEXEC_HI]]
212
213; GCN: s_or_saveexec_b64 s{{\[}}[[FLOW_S_RELOAD_SAVEEXEC_LO]]:[[FLOW_S_RELOAD_SAVEEXEC_HI]]{{\]}}, s{{\[}}[[FLOW_S_RELOAD_SAVEEXEC_LO]]:[[FLOW_S_RELOAD_SAVEEXEC_HI]]{{\]}}
214
215; Regular spill value restored after exec modification
Matt Arsenault253640e2016-10-13 13:10:00 +0000216; GCN: buffer_load_dword [[FLOW_VAL:v[0-9]+]], off, s[0:3], s7 offset:[[FLOW_VAL_OFFSET:[0-9]+]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000217
218
219; Spill saved exec
220; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[FLOW_S_RELOAD_SAVEEXEC_LO]], [[FLOW_SAVEEXEC_LO_LANE:[0-9]+]]
221; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[FLOW_S_RELOAD_SAVEEXEC_HI]], [[FLOW_SAVEEXEC_HI_LANE:[0-9]+]]
222
223
224; VMEM: v_mov_b32_e32 v[[FLOW_V_SAVEEXEC_LO:[0-9]+]], s[[FLOW_S_RELOAD_SAVEEXEC_LO]]
Marek Olsak79c05872016-11-25 17:37:09 +0000225; VMEM: buffer_store_dword v[[FLOW_V_SAVEEXEC_LO]], off, s[0:3], s7 offset:[[FLOW_SAVEEXEC_LO_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000226; VMEM: v_mov_b32_e32 v[[FLOW_V_SAVEEXEC_HI:[0-9]+]], s[[FLOW_S_RELOAD_SAVEEXEC_HI]]
Marek Olsak79c05872016-11-25 17:37:09 +0000227; VMEM: buffer_store_dword v[[FLOW_V_SAVEEXEC_HI]], off, s[0:3], s7 offset:[[FLOW_SAVEEXEC_HI_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000228
Matt Arsenault253640e2016-10-13 13:10:00 +0000229; GCN: buffer_store_dword [[FLOW_VAL]], off, s[0:3], s7 offset:[[RESULT_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000230; GCN: s_xor_b64 exec, exec, s{{\[}}[[FLOW_S_RELOAD_SAVEEXEC_LO]]:[[FLOW_S_RELOAD_SAVEEXEC_HI]]{{\]}}
Matt Arsenaulte6740752016-09-29 01:44:16 +0000231; GCN-NEXT: ; mask branch [[ENDIF:BB[0-9]+_[0-9]+]]
232; GCN-NEXT: s_cbranch_execz [[ENDIF]]
233
234
235; GCN: BB{{[0-9]+}}_2: ; %if
236; GCN: ds_read_b32
Matt Arsenault707780b2017-02-22 21:05:25 +0000237; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[0:3], s7 offset:4 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000238; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, v{{[0-9]+}}, v[[LOAD0_RELOAD]]
Matt Arsenault253640e2016-10-13 13:10:00 +0000239; GCN: buffer_store_dword [[ADD]], off, s[0:3], s7 offset:[[RESULT_OFFSET]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000240; GCN-NEXT: s_branch [[ENDIF:BB[0-9]+_[0-9]+]]
241
242; GCN: [[ELSE]]: ; %else
Matt Arsenault707780b2017-02-22 21:05:25 +0000243; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[0:3], s7 offset:4 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000244; GCN: v_subrev_i32_e32 [[SUB:v[0-9]+]], vcc, v{{[0-9]+}}, v[[LOAD0_RELOAD]]
Matt Arsenault253640e2016-10-13 13:10:00 +0000245; GCN: buffer_store_dword [[ADD]], off, s[0:3], s7 offset:[[FLOW_RESULT_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000246; GCN-NEXT: s_branch [[FLOW]]
247
248; GCN: [[ENDIF]]:
249; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[FLOW_SAVEEXEC_LO_LANE]]
250; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[FLOW_SAVEEXEC_HI_LANE]]
251
252
Marek Olsak79c05872016-11-25 17:37:09 +0000253; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:[[FLOW_SAVEEXEC_LO_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000254; VMEM: s_waitcnt vmcnt(0)
255; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[V_RELOAD_SAVEEXEC_LO]]
256
Marek Olsak79c05872016-11-25 17:37:09 +0000257; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:[[FLOW_SAVEEXEC_HI_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000258; VMEM: s_waitcnt vmcnt(0)
259; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[V_RELOAD_SAVEEXEC_HI]]
260
261; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]]:[[S_RELOAD_SAVEEXEC_HI]]{{\]}}
262
Matt Arsenault253640e2016-10-13 13:10:00 +0000263; GCN: buffer_load_dword v[[RESULT:[0-9]+]], off, s[0:3], s7 offset:[[RESULT_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000264; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RESULT]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000265define amdgpu_kernel void @divergent_if_else_endif(i32 addrspace(1)* %out) #0 {
Matt Arsenaulte6740752016-09-29 01:44:16 +0000266entry:
267 %tid = call i32 @llvm.amdgcn.workitem.id.x()
268 %load0 = load volatile i32, i32 addrspace(3)* undef
269 %cmp0 = icmp eq i32 %tid, 0
270 br i1 %cmp0, label %if, label %else
271
272if:
273 %load1 = load volatile i32, i32 addrspace(3)* undef
274 %val0 = add i32 %load0, %load1
275 br label %endif
276
277else:
278 %load2 = load volatile i32, i32 addrspace(3)* undef
279 %val1 = sub i32 %load0, %load2
280 br label %endif
281
282endif:
283 %result = phi i32 [ %val0, %if ], [ %val1, %else ]
284 store i32 %result, i32 addrspace(1)* %out
285 ret void
286}
287
288declare i32 @llvm.amdgcn.workitem.id.x() #1
289
290attributes #0 = { nounwind }
291attributes #1 = { nounwind readnone }