blob: 27b3cc0e435e42b44539d1edc7a2bd25ad0b1e55 [file] [log] [blame]
Matt Arsenaulte6740752016-09-29 01:44:16 +00001; RUN: llc -O0 -mtriple=amdgcn--amdhsa -march=amdgcn -amdgpu-spill-sgpr-to-vgpr=0 -verify-machineinstrs < %s | FileCheck -check-prefix=VMEM -check-prefix=GCN %s
2; RUN: llc -O0 -mtriple=amdgcn--amdhsa -march=amdgcn -amdgpu-spill-sgpr-to-vgpr=1 -verify-machineinstrs < %s | FileCheck -check-prefix=VGPR -check-prefix=GCN %s
3
4; Verify registers used for tracking exec mask changes when all
5; registers are spilled at the end of the block. The SGPR spill
6; placement relative to the exec modifications are important.
7
8; FIXME: This checks with SGPR to VGPR spilling disabled, but this may
9; not work correctly in cases where no workitems take a branch.
10
11
12; GCN-LABEL: {{^}}divergent_if_endif:
13
14; GCN: {{^}}; BB#0:
15; GCN: s_mov_b32 m0, -1
16; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
17
Matt Arsenault3d463192016-11-01 22:55:07 +000018; GCN: v_cmp_eq_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], s{{[0-9]+}}, v0
Matt Arsenaulte6740752016-09-29 01:44:16 +000019; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
20; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, [[CMP0]]
21; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}
22
23; Spill saved exec
24; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]
25; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]], [[SAVEEXEC_HI_LANE:[0-9]+]]
26
27
28; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
Matt Arsenault253640e2016-10-13 13:10:00 +000029; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 ; 8-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +000030; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
Matt Arsenault253640e2016-10-13 13:10:00 +000031; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:4 ; 8-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +000032
33; Spill load
Matt Arsenault253640e2016-10-13 13:10:00 +000034; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 offset:[[LOAD0_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +000035; GCN: s_mov_b64 exec, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}
36
37; GCN: s_waitcnt vmcnt(0) expcnt(0)
38; GCN: mask branch [[ENDIF:BB[0-9]+_[0-9]+]]
39
40; GCN: {{^}}BB{{[0-9]+}}_1: ; %if
41; GCN: s_mov_b32 m0, -1
42; GCN: ds_read_b32 [[LOAD1:v[0-9]+]]
Matt Arsenault253640e2016-10-13 13:10:00 +000043; GCN: buffer_load_dword [[RELOAD_LOAD0:v[0-9]+]], off, s[0:3], s7 offset:[[LOAD0_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +000044; GCN: s_waitcnt vmcnt(0)
45
46; Spill val register
47; GCN: v_add_i32_e32 [[VAL:v[0-9]+]], vcc, [[LOAD1]], [[RELOAD_LOAD0]]
Matt Arsenault253640e2016-10-13 13:10:00 +000048; GCN: buffer_store_dword [[VAL]], off, s[0:3], s7 offset:[[VAL_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +000049; GCN: s_waitcnt vmcnt(0)
50
51; VMEM: [[ENDIF]]:
52; Reload and restore exec mask
53; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]
54; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]
55
56
57
Matt Arsenault253640e2016-10-13 13:10:00 +000058; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 ; 8-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +000059; VMEM: s_waitcnt vmcnt(0)
60; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[V_RELOAD_SAVEEXEC_LO]]
61
Matt Arsenault253640e2016-10-13 13:10:00 +000062; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:4 ; 8-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +000063; VMEM: s_waitcnt vmcnt(0)
64; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[V_RELOAD_SAVEEXEC_HI]]
65
66; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]]:[[S_RELOAD_SAVEEXEC_HI]]{{\]}}
67
68; Restore val
Matt Arsenault253640e2016-10-13 13:10:00 +000069; GCN: buffer_load_dword [[RELOAD_VAL:v[0-9]+]], off, s[0:3], s7 offset:[[VAL_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +000070
71; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RELOAD_VAL]]
72define void @divergent_if_endif(i32 addrspace(1)* %out) #0 {
73entry:
74 %tid = call i32 @llvm.amdgcn.workitem.id.x()
75 %load0 = load volatile i32, i32 addrspace(3)* undef
76 %cmp0 = icmp eq i32 %tid, 0
77 br i1 %cmp0, label %if, label %endif
78
79if:
80 %load1 = load volatile i32, i32 addrspace(3)* undef
81 %val = add i32 %load0, %load1
82 br label %endif
83
84endif:
85 %tmp4 = phi i32 [ %val, %if ], [ 0, %entry ]
86 store i32 %tmp4, i32 addrspace(1)* %out
87 ret void
88}
89
90; GCN-LABEL: {{^}}divergent_loop:
91; GCN: {{^}}; BB#0:
92
93; GCN: s_mov_b32 m0, -1
94; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
95
Matt Arsenault3d463192016-11-01 22:55:07 +000096; GCN: v_cmp_eq_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], s{{[0-9]+}}, v0
Matt Arsenaulte6740752016-09-29 01:44:16 +000097
98; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
99; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, [[CMP0]]
100; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}
101
Matt Arsenault3d463192016-11-01 22:55:07 +0000102; Spill load
103; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 ; 4-byte Folded Spill
104
Matt Arsenaulte6740752016-09-29 01:44:16 +0000105; Spill saved exec
106; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]
107; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]], [[SAVEEXEC_HI_LANE:[0-9]+]]
108
109
110; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
Matt Arsenault3d463192016-11-01 22:55:07 +0000111; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 offset:12 ; 8-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000112; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
Matt Arsenault3d463192016-11-01 22:55:07 +0000113; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:16 ; 8-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000114
115; GCN: s_mov_b64 exec, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}
116
117; GCN: s_waitcnt vmcnt(0) expcnt(0)
118; GCN-NEXT: ; mask branch [[END:BB[0-9]+_[0-9]+]]
119; GCN-NEXT: s_cbranch_execz [[END]]
120
121
122; GCN: [[LOOP:BB[0-9]+_[0-9]+]]:
Matt Arsenault3d463192016-11-01 22:55:07 +0000123; GCN: buffer_load_dword v[[VAL_LOOP_RELOAD:[0-9]+]], off, s[0:3], s7 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000124; GCN: v_subrev_i32_e32 [[VAL_LOOP:v[0-9]+]], vcc, v{{[0-9]+}}, v[[VAL_LOOP_RELOAD]]
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000125; GCN: v_cmp_ne_u32_e32 vcc,
Matt Arsenaulte6740752016-09-29 01:44:16 +0000126; GCN: s_and_b64 vcc, exec, vcc
Matt Arsenault253640e2016-10-13 13:10:00 +0000127; GCN: buffer_store_dword [[VAL_LOOP]], off, s[0:3], s7 offset:[[VAL_SUB_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000128; GCN: s_waitcnt vmcnt(0) expcnt(0)
129; GCN-NEXT: s_cbranch_vccnz [[LOOP]]
130
131
132; GCN: [[END]]:
133; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]
134; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]
135
Matt Arsenault3d463192016-11-01 22:55:07 +0000136; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:12 ; 8-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000137; VMEM: s_waitcnt vmcnt(0)
138; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[V_RELOAD_SAVEEXEC_LO]]
139
Matt Arsenault3d463192016-11-01 22:55:07 +0000140; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:16 ; 8-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000141; VMEM: s_waitcnt vmcnt(0)
142; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[V_RELOAD_SAVEEXEC_HI]]
143
144; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]]:[[S_RELOAD_SAVEEXEC_HI]]{{\]}}
Matt Arsenault253640e2016-10-13 13:10:00 +0000145; GCN: buffer_load_dword v[[VAL_END:[0-9]+]], off, s[0:3], s7 offset:[[VAL_SUB_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000146
147; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[VAL_END]]
148define void @divergent_loop(i32 addrspace(1)* %out) #0 {
149entry:
150 %tid = call i32 @llvm.amdgcn.workitem.id.x()
151 %load0 = load volatile i32, i32 addrspace(3)* undef
152 %cmp0 = icmp eq i32 %tid, 0
153 br i1 %cmp0, label %loop, label %end
154
155loop:
156 %i = phi i32 [ %i.inc, %loop ], [ 0, %entry ]
157 %val = phi i32 [ %val.sub, %loop ], [ %load0, %entry ]
158 %load1 = load volatile i32, i32 addrspace(3)* undef
159 %i.inc = add i32 %i, 1
160 %val.sub = sub i32 %val, %load1
161 %cmp1 = icmp ne i32 %i, 256
162 br i1 %cmp1, label %loop, label %end
163
164end:
165 %tmp4 = phi i32 [ %val.sub, %loop ], [ 0, %entry ]
166 store i32 %tmp4, i32 addrspace(1)* %out
167 ret void
168}
169
170; GCN-LABEL: {{^}}divergent_if_else_endif:
171; GCN: {{^}}; BB#0:
172
173; GCN: s_mov_b32 m0, -1
174; VMEM: ds_read_b32 [[LOAD0:v[0-9]+]]
175
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000176; GCN: v_cmp_ne_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], v0,
Matt Arsenaulte6740752016-09-29 01:44:16 +0000177
178; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
179; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, [[CMP0]]
180; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}
181
182; Spill load
Matt Arsenault253640e2016-10-13 13:10:00 +0000183; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000184
185; Spill saved exec
186; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]
187; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]], [[SAVEEXEC_HI_LANE:[0-9]+]]
188
189; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
Matt Arsenault253640e2016-10-13 13:10:00 +0000190; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 offset:[[SAVEEXEC_LO_OFFSET:[0-9]+]] ; 8-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000191; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
Matt Arsenault253640e2016-10-13 13:10:00 +0000192; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:[[SAVEEXEC_HI_OFFSET:[0-9]+]] ; 8-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000193
194; GCN: s_mov_b64 exec, [[CMP0]]
195; GCN: s_waitcnt vmcnt(0) expcnt(0)
196
197; FIXME: It makes no sense to put this skip here
198; GCN-NEXT: ; mask branch [[FLOW:BB[0-9]+_[0-9]+]]
199; GCN: s_cbranch_execz [[FLOW]]
200; GCN-NEXT: s_branch [[ELSE:BB[0-9]+_[0-9]+]]
201
202; GCN: [[FLOW]]: ; %Flow
203; VGPR: v_readlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]
204; VGPR: v_readlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]
205
206
Matt Arsenault253640e2016-10-13 13:10:00 +0000207; VMEM: buffer_load_dword v[[FLOW_V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:[[SAVEEXEC_LO_OFFSET]]
Matt Arsenaulte6740752016-09-29 01:44:16 +0000208; VMEM: s_waitcnt vmcnt(0)
209; VMEM: v_readfirstlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[FLOW_V_RELOAD_SAVEEXEC_LO]]
210
Matt Arsenault253640e2016-10-13 13:10:00 +0000211; VMEM: buffer_load_dword v[[FLOW_V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:[[SAVEEXEC_HI_OFFSET]] ; 8-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000212; VMEM: s_waitcnt vmcnt(0)
213; VMEM: v_readfirstlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[FLOW_V_RELOAD_SAVEEXEC_HI]]
214
215; GCN: s_or_saveexec_b64 s{{\[}}[[FLOW_S_RELOAD_SAVEEXEC_LO]]:[[FLOW_S_RELOAD_SAVEEXEC_HI]]{{\]}}, s{{\[}}[[FLOW_S_RELOAD_SAVEEXEC_LO]]:[[FLOW_S_RELOAD_SAVEEXEC_HI]]{{\]}}
216
217; Regular spill value restored after exec modification
Matt Arsenault253640e2016-10-13 13:10:00 +0000218; GCN: buffer_load_dword [[FLOW_VAL:v[0-9]+]], off, s[0:3], s7 offset:[[FLOW_VAL_OFFSET:[0-9]+]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000219
220
221; Spill saved exec
222; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[FLOW_S_RELOAD_SAVEEXEC_LO]], [[FLOW_SAVEEXEC_LO_LANE:[0-9]+]]
223; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[FLOW_S_RELOAD_SAVEEXEC_HI]], [[FLOW_SAVEEXEC_HI_LANE:[0-9]+]]
224
225
226; VMEM: v_mov_b32_e32 v[[FLOW_V_SAVEEXEC_LO:[0-9]+]], s[[FLOW_S_RELOAD_SAVEEXEC_LO]]
Matt Arsenault253640e2016-10-13 13:10:00 +0000227; VMEM: buffer_store_dword v[[FLOW_V_SAVEEXEC_LO]], off, s[0:3], s7 offset:[[FLOW_SAVEEXEC_LO_OFFSET:[0-9]+]] ; 8-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000228; VMEM: v_mov_b32_e32 v[[FLOW_V_SAVEEXEC_HI:[0-9]+]], s[[FLOW_S_RELOAD_SAVEEXEC_HI]]
Matt Arsenault253640e2016-10-13 13:10:00 +0000229; VMEM: buffer_store_dword v[[FLOW_V_SAVEEXEC_HI]], off, s[0:3], s7 offset:[[FLOW_SAVEEXEC_HI_OFFSET:[0-9]+]] ; 8-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000230
Matt Arsenault253640e2016-10-13 13:10:00 +0000231; GCN: buffer_store_dword [[FLOW_VAL]], off, s[0:3], s7 offset:[[RESULT_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000232; GCN: s_xor_b64 exec, exec, s{{\[}}[[FLOW_S_RELOAD_SAVEEXEC_LO]]:[[FLOW_S_RELOAD_SAVEEXEC_HI]]{{\]}}
233; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
234; GCN-NEXT: ; mask branch [[ENDIF:BB[0-9]+_[0-9]+]]
235; GCN-NEXT: s_cbranch_execz [[ENDIF]]
236
237
238; GCN: BB{{[0-9]+}}_2: ; %if
239; GCN: ds_read_b32
Matt Arsenault253640e2016-10-13 13:10:00 +0000240; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[0:3], s7 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000241; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, v{{[0-9]+}}, v[[LOAD0_RELOAD]]
Matt Arsenault253640e2016-10-13 13:10:00 +0000242; GCN: buffer_store_dword [[ADD]], off, s[0:3], s7 offset:[[RESULT_OFFSET]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000243; GCN: s_waitcnt vmcnt(0) expcnt(0)
244; GCN-NEXT: s_branch [[ENDIF:BB[0-9]+_[0-9]+]]
245
246; GCN: [[ELSE]]: ; %else
Matt Arsenault253640e2016-10-13 13:10:00 +0000247; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[0:3], s7 ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000248; GCN: v_subrev_i32_e32 [[SUB:v[0-9]+]], vcc, v{{[0-9]+}}, v[[LOAD0_RELOAD]]
Matt Arsenault253640e2016-10-13 13:10:00 +0000249; GCN: buffer_store_dword [[ADD]], off, s[0:3], s7 offset:[[FLOW_RESULT_OFFSET:[0-9]+]] ; 4-byte Folded Spill
Matt Arsenaulte6740752016-09-29 01:44:16 +0000250; GCN: s_waitcnt vmcnt(0) expcnt(0)
251; GCN-NEXT: s_branch [[FLOW]]
252
253; GCN: [[ENDIF]]:
254; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[FLOW_SAVEEXEC_LO_LANE]]
255; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[FLOW_SAVEEXEC_HI_LANE]]
256
257
Matt Arsenault253640e2016-10-13 13:10:00 +0000258; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:[[FLOW_SAVEEXEC_LO_OFFSET]] ; 8-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000259; VMEM: s_waitcnt vmcnt(0)
260; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[V_RELOAD_SAVEEXEC_LO]]
261
Matt Arsenault253640e2016-10-13 13:10:00 +0000262; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:[[FLOW_SAVEEXEC_HI_OFFSET]] ; 8-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000263; VMEM: s_waitcnt vmcnt(0)
264; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[V_RELOAD_SAVEEXEC_HI]]
265
266; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]]:[[S_RELOAD_SAVEEXEC_HI]]{{\]}}
267
Matt Arsenault253640e2016-10-13 13:10:00 +0000268; GCN: buffer_load_dword v[[RESULT:[0-9]+]], off, s[0:3], s7 offset:[[RESULT_OFFSET]] ; 4-byte Folded Reload
Matt Arsenaulte6740752016-09-29 01:44:16 +0000269; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RESULT]]
270define void @divergent_if_else_endif(i32 addrspace(1)* %out) #0 {
271entry:
272 %tid = call i32 @llvm.amdgcn.workitem.id.x()
273 %load0 = load volatile i32, i32 addrspace(3)* undef
274 %cmp0 = icmp eq i32 %tid, 0
275 br i1 %cmp0, label %if, label %else
276
277if:
278 %load1 = load volatile i32, i32 addrspace(3)* undef
279 %val0 = add i32 %load0, %load1
280 br label %endif
281
282else:
283 %load2 = load volatile i32, i32 addrspace(3)* undef
284 %val1 = sub i32 %load0, %load2
285 br label %endif
286
287endif:
288 %result = phi i32 [ %val0, %if ], [ %val1, %else ]
289 store i32 %result, i32 addrspace(1)* %out
290 ret void
291}
292
293declare i32 @llvm.amdgcn.workitem.id.x() #1
294
295attributes #0 = { nounwind }
296attributes #1 = { nounwind readnone }