blob: c6fb2da788c0444b41d0eec8ff2165846102aacf [file] [log] [blame]
Matt Arsenaulte6740752016-09-29 01:44:16 +00001; RUN: llc -O0 -mtriple=amdgcn--amdhsa -march=amdgcn -amdgpu-spill-sgpr-to-vgpr=0 -verify-machineinstrs < %s | FileCheck -check-prefix=VMEM -check-prefix=GCN %s
2; RUN: llc -O0 -mtriple=amdgcn--amdhsa -march=amdgcn -amdgpu-spill-sgpr-to-vgpr=1 -verify-machineinstrs < %s | FileCheck -check-prefix=VGPR -check-prefix=GCN %s
3
4; Verify registers used for tracking exec mask changes when all
5; registers are spilled at the end of the block. The SGPR spill
6; placement relative to the exec modifications are important.
7
8; FIXME: This checks with SGPR to VGPR spilling disabled, but this may
9; not work correctly in cases where no workitems take a branch.
10
11
12; GCN-LABEL: {{^}}divergent_if_endif:
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +000013; VGPR: workitem_private_segment_byte_size = 12{{$}}
14
Matt Arsenaulte6740752016-09-29 01:44:16 +000015
16; GCN: {{^}}; BB#0:
17; GCN: s_mov_b32 m0, -1
18; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
19
Matt Arsenault3d463192016-11-01 22:55:07 +000020; GCN: v_cmp_eq_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], s{{[0-9]+}}, v0
Matt Arsenaulte6740752016-09-29 01:44:16 +000021; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
22; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, [[CMP0]]
23; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}
24
25; Spill saved exec
26; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]
27; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]], [[SAVEEXEC_HI_LANE:[0-9]+]]
28
29
30; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
Marek Olsak79c05872016-11-25 17:37:09 +000031; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +000032; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
Marek Olsak79c05872016-11-25 17:37:09 +000033; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:4 ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +000034
35; Spill load
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +000036; VMEM: buffer_store_dword [[LOAD0]], off, s[0:3], s7 offset:[[LOAD0_OFFSET:[0-9]+]] ; 4-byte Folded Spill
37; VGPR: buffer_store_dword [[LOAD0]], off, s[0:3], s7 ; 4-byte Folded Spill
38
Matt Arsenaulte6740752016-09-29 01:44:16 +000039; GCN: s_mov_b64 exec, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}
40
41; GCN: s_waitcnt vmcnt(0) expcnt(0)
42; GCN: mask branch [[ENDIF:BB[0-9]+_[0-9]+]]
43
44; GCN: {{^}}BB{{[0-9]+}}_1: ; %if
45; GCN: s_mov_b32 m0, -1
46; GCN: ds_read_b32 [[LOAD1:v[0-9]+]]
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +000047; VMEM: buffer_load_dword [[RELOAD_LOAD0:v[0-9]+]], off, s[0:3], s7 offset:[[LOAD0_OFFSET]] ; 4-byte Folded Reload
48; VGPR: buffer_load_dword [[RELOAD_LOAD0:v[0-9]+]], off, s[0:3], s7 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +000049; GCN: s_waitcnt vmcnt(0)
50
51; Spill val register
52; GCN: v_add_i32_e32 [[VAL:v[0-9]+]], vcc, [[LOAD1]], [[RELOAD_LOAD0]]
Matt Arsenault253640e2016-10-13 13:10:00 +000053; GCN: buffer_store_dword [[VAL]], off, s[0:3], s7 offset:[[VAL_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +000054; GCN: s_waitcnt vmcnt(0)
55
56; VMEM: [[ENDIF]]:
57; Reload and restore exec mask
58; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]
59; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]
60
61
62
Marek Olsak79c05872016-11-25 17:37:09 +000063; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +000064; VMEM: s_waitcnt vmcnt(0)
65; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[V_RELOAD_SAVEEXEC_LO]]
66
Marek Olsak79c05872016-11-25 17:37:09 +000067; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:4 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +000068; VMEM: s_waitcnt vmcnt(0)
69; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[V_RELOAD_SAVEEXEC_HI]]
70
71; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]]:[[S_RELOAD_SAVEEXEC_HI]]{{\]}}
72
73; Restore val
Matt Arsenault253640e2016-10-13 13:10:00 +000074; GCN: buffer_load_dword [[RELOAD_VAL:v[0-9]+]], off, s[0:3], s7 offset:[[VAL_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +000075
76; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RELOAD_VAL]]
77define void @divergent_if_endif(i32 addrspace(1)* %out) #0 {
78entry:
79 %tid = call i32 @llvm.amdgcn.workitem.id.x()
80 %load0 = load volatile i32, i32 addrspace(3)* undef
81 %cmp0 = icmp eq i32 %tid, 0
82 br i1 %cmp0, label %if, label %endif
83
84if:
85 %load1 = load volatile i32, i32 addrspace(3)* undef
86 %val = add i32 %load0, %load1
87 br label %endif
88
89endif:
90 %tmp4 = phi i32 [ %val, %if ], [ 0, %entry ]
91 store i32 %tmp4, i32 addrspace(1)* %out
92 ret void
93}
94
95; GCN-LABEL: {{^}}divergent_loop:
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +000096; VGPR: workitem_private_segment_byte_size = 16{{$}}
97
Matt Arsenaulte6740752016-09-29 01:44:16 +000098; GCN: {{^}}; BB#0:
99
100; GCN: s_mov_b32 m0, -1
101; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
102
Matt Arsenault3d463192016-11-01 22:55:07 +0000103; GCN: v_cmp_eq_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], s{{[0-9]+}}, v0
Matt Arsenaulte6740752016-09-29 01:44:16 +0000104
105; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
106; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, [[CMP0]]
107; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}
108
Matt Arsenault3d463192016-11-01 22:55:07 +0000109; Spill load
110; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 ; 4-byte Folded Spill
111
Matt Arsenaulte6740752016-09-29 01:44:16 +0000112; Spill saved exec
113; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]
114; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]], [[SAVEEXEC_HI_LANE:[0-9]+]]
115
116
117; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
Marek Olsak79c05872016-11-25 17:37:09 +0000118; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 offset:16 ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000119; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
Marek Olsak79c05872016-11-25 17:37:09 +0000120; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:20 ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000121
122; GCN: s_mov_b64 exec, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}
123
124; GCN: s_waitcnt vmcnt(0) expcnt(0)
125; GCN-NEXT: ; mask branch [[END:BB[0-9]+_[0-9]+]]
126; GCN-NEXT: s_cbranch_execz [[END]]
127
128
129; GCN: [[LOOP:BB[0-9]+_[0-9]+]]:
Matt Arsenault3d463192016-11-01 22:55:07 +0000130; GCN: buffer_load_dword v[[VAL_LOOP_RELOAD:[0-9]+]], off, s[0:3], s7 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000131; GCN: v_subrev_i32_e32 [[VAL_LOOP:v[0-9]+]], vcc, v{{[0-9]+}}, v[[VAL_LOOP_RELOAD]]
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000132; GCN: v_cmp_ne_u32_e32 vcc,
Matt Arsenaulte6740752016-09-29 01:44:16 +0000133; GCN: s_and_b64 vcc, exec, vcc
Matt Arsenault253640e2016-10-13 13:10:00 +0000134; GCN: buffer_store_dword [[VAL_LOOP]], off, s[0:3], s7 offset:[[VAL_SUB_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000135; GCN: s_waitcnt vmcnt(0) expcnt(0)
136; GCN-NEXT: s_cbranch_vccnz [[LOOP]]
137
138
139; GCN: [[END]]:
140; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]
141; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]
142
Marek Olsak79c05872016-11-25 17:37:09 +0000143; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:16 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000144; VMEM: s_waitcnt vmcnt(0)
145; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[V_RELOAD_SAVEEXEC_LO]]
146
Marek Olsak79c05872016-11-25 17:37:09 +0000147; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:20 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000148; VMEM: s_waitcnt vmcnt(0)
149; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[V_RELOAD_SAVEEXEC_HI]]
150
151; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]]:[[S_RELOAD_SAVEEXEC_HI]]{{\]}}
Matt Arsenault253640e2016-10-13 13:10:00 +0000152; GCN: buffer_load_dword v[[VAL_END:[0-9]+]], off, s[0:3], s7 offset:[[VAL_SUB_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000153
154; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[VAL_END]]
155define void @divergent_loop(i32 addrspace(1)* %out) #0 {
156entry:
157 %tid = call i32 @llvm.amdgcn.workitem.id.x()
158 %load0 = load volatile i32, i32 addrspace(3)* undef
159 %cmp0 = icmp eq i32 %tid, 0
160 br i1 %cmp0, label %loop, label %end
161
162loop:
163 %i = phi i32 [ %i.inc, %loop ], [ 0, %entry ]
164 %val = phi i32 [ %val.sub, %loop ], [ %load0, %entry ]
165 %load1 = load volatile i32, i32 addrspace(3)* undef
166 %i.inc = add i32 %i, 1
167 %val.sub = sub i32 %val, %load1
168 %cmp1 = icmp ne i32 %i, 256
169 br i1 %cmp1, label %loop, label %end
170
171end:
172 %tmp4 = phi i32 [ %val.sub, %loop ], [ 0, %entry ]
173 store i32 %tmp4, i32 addrspace(1)* %out
174 ret void
175}
176
177; GCN-LABEL: {{^}}divergent_if_else_endif:
178; GCN: {{^}}; BB#0:
179
180; GCN: s_mov_b32 m0, -1
181; VMEM: ds_read_b32 [[LOAD0:v[0-9]+]]
182
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000183; GCN: v_cmp_ne_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], v0,
Matt Arsenaulte6740752016-09-29 01:44:16 +0000184
185; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
186; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, [[CMP0]]
187; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}
188
189; Spill load
Matt Arsenault253640e2016-10-13 13:10:00 +0000190; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000191
192; Spill saved exec
193; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]
194; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]], [[SAVEEXEC_HI_LANE:[0-9]+]]
195
196; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
Marek Olsak79c05872016-11-25 17:37:09 +0000197; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 offset:[[SAVEEXEC_LO_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000198; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
Marek Olsak79c05872016-11-25 17:37:09 +0000199; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:[[SAVEEXEC_HI_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000200
201; GCN: s_mov_b64 exec, [[CMP0]]
202; GCN: s_waitcnt vmcnt(0) expcnt(0)
203
204; FIXME: It makes no sense to put this skip here
205; GCN-NEXT: ; mask branch [[FLOW:BB[0-9]+_[0-9]+]]
206; GCN: s_cbranch_execz [[FLOW]]
207; GCN-NEXT: s_branch [[ELSE:BB[0-9]+_[0-9]+]]
208
209; GCN: [[FLOW]]: ; %Flow
210; VGPR: v_readlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]
211; VGPR: v_readlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]
212
213
Matt Arsenault253640e2016-10-13 13:10:00 +0000214; VMEM: buffer_load_dword v[[FLOW_V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:[[SAVEEXEC_LO_OFFSET]]
Matt Arsenaulte6740752016-09-29 01:44:16 +0000215; VMEM: s_waitcnt vmcnt(0)
216; VMEM: v_readfirstlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[FLOW_V_RELOAD_SAVEEXEC_LO]]
217
Marek Olsak79c05872016-11-25 17:37:09 +0000218; VMEM: buffer_load_dword v[[FLOW_V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:[[SAVEEXEC_HI_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000219; VMEM: s_waitcnt vmcnt(0)
220; VMEM: v_readfirstlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[FLOW_V_RELOAD_SAVEEXEC_HI]]
221
222; GCN: s_or_saveexec_b64 s{{\[}}[[FLOW_S_RELOAD_SAVEEXEC_LO]]:[[FLOW_S_RELOAD_SAVEEXEC_HI]]{{\]}}, s{{\[}}[[FLOW_S_RELOAD_SAVEEXEC_LO]]:[[FLOW_S_RELOAD_SAVEEXEC_HI]]{{\]}}
223
224; Regular spill value restored after exec modification
Matt Arsenault253640e2016-10-13 13:10:00 +0000225; GCN: buffer_load_dword [[FLOW_VAL:v[0-9]+]], off, s[0:3], s7 offset:[[FLOW_VAL_OFFSET:[0-9]+]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000226
227
228; Spill saved exec
229; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[FLOW_S_RELOAD_SAVEEXEC_LO]], [[FLOW_SAVEEXEC_LO_LANE:[0-9]+]]
230; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[FLOW_S_RELOAD_SAVEEXEC_HI]], [[FLOW_SAVEEXEC_HI_LANE:[0-9]+]]
231
232
233; VMEM: v_mov_b32_e32 v[[FLOW_V_SAVEEXEC_LO:[0-9]+]], s[[FLOW_S_RELOAD_SAVEEXEC_LO]]
Marek Olsak79c05872016-11-25 17:37:09 +0000234; VMEM: buffer_store_dword v[[FLOW_V_SAVEEXEC_LO]], off, s[0:3], s7 offset:[[FLOW_SAVEEXEC_LO_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000235; VMEM: v_mov_b32_e32 v[[FLOW_V_SAVEEXEC_HI:[0-9]+]], s[[FLOW_S_RELOAD_SAVEEXEC_HI]]
Marek Olsak79c05872016-11-25 17:37:09 +0000236; VMEM: buffer_store_dword v[[FLOW_V_SAVEEXEC_HI]], off, s[0:3], s7 offset:[[FLOW_SAVEEXEC_HI_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000237
Matt Arsenault253640e2016-10-13 13:10:00 +0000238; GCN: buffer_store_dword [[FLOW_VAL]], off, s[0:3], s7 offset:[[RESULT_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000239; GCN: s_xor_b64 exec, exec, s{{\[}}[[FLOW_S_RELOAD_SAVEEXEC_LO]]:[[FLOW_S_RELOAD_SAVEEXEC_HI]]{{\]}}
240; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
241; GCN-NEXT: ; mask branch [[ENDIF:BB[0-9]+_[0-9]+]]
242; GCN-NEXT: s_cbranch_execz [[ENDIF]]
243
244
245; GCN: BB{{[0-9]+}}_2: ; %if
246; GCN: ds_read_b32
Matt Arsenault253640e2016-10-13 13:10:00 +0000247; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[0:3], s7 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000248; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, v{{[0-9]+}}, v[[LOAD0_RELOAD]]
Matt Arsenault253640e2016-10-13 13:10:00 +0000249; GCN: buffer_store_dword [[ADD]], off, s[0:3], s7 offset:[[RESULT_OFFSET]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000250; GCN: s_waitcnt vmcnt(0) expcnt(0)
251; GCN-NEXT: s_branch [[ENDIF:BB[0-9]+_[0-9]+]]
252
253; GCN: [[ELSE]]: ; %else
Matt Arsenault253640e2016-10-13 13:10:00 +0000254; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[0:3], s7 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000255; GCN: v_subrev_i32_e32 [[SUB:v[0-9]+]], vcc, v{{[0-9]+}}, v[[LOAD0_RELOAD]]
Matt Arsenault253640e2016-10-13 13:10:00 +0000256; GCN: buffer_store_dword [[ADD]], off, s[0:3], s7 offset:[[FLOW_RESULT_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000257; GCN: s_waitcnt vmcnt(0) expcnt(0)
258; GCN-NEXT: s_branch [[FLOW]]
259
260; GCN: [[ENDIF]]:
261; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[FLOW_SAVEEXEC_LO_LANE]]
262; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[FLOW_SAVEEXEC_HI_LANE]]
263
264
Marek Olsak79c05872016-11-25 17:37:09 +0000265; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:[[FLOW_SAVEEXEC_LO_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000266; VMEM: s_waitcnt vmcnt(0)
267; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[V_RELOAD_SAVEEXEC_LO]]
268
Marek Olsak79c05872016-11-25 17:37:09 +0000269; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:[[FLOW_SAVEEXEC_HI_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000270; VMEM: s_waitcnt vmcnt(0)
271; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[V_RELOAD_SAVEEXEC_HI]]
272
273; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]]:[[S_RELOAD_SAVEEXEC_HI]]{{\]}}
274
Matt Arsenault253640e2016-10-13 13:10:00 +0000275; GCN: buffer_load_dword v[[RESULT:[0-9]+]], off, s[0:3], s7 offset:[[RESULT_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000276; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RESULT]]
277define void @divergent_if_else_endif(i32 addrspace(1)* %out) #0 {
278entry:
279 %tid = call i32 @llvm.amdgcn.workitem.id.x()
280 %load0 = load volatile i32, i32 addrspace(3)* undef
281 %cmp0 = icmp eq i32 %tid, 0
282 br i1 %cmp0, label %if, label %else
283
284if:
285 %load1 = load volatile i32, i32 addrspace(3)* undef
286 %val0 = add i32 %load0, %load1
287 br label %endif
288
289else:
290 %load2 = load volatile i32, i32 addrspace(3)* undef
291 %val1 = sub i32 %load0, %load2
292 br label %endif
293
294endif:
295 %result = phi i32 [ %val0, %if ], [ %val1, %else ]
296 store i32 %result, i32 addrspace(1)* %out
297 ret void
298}
299
300declare i32 @llvm.amdgcn.workitem.id.x() #1
301
302attributes #0 = { nounwind }
303attributes #1 = { nounwind readnone }