blob: 942853218b7245d0ca3a271919a99826eb0e629f [file] [log] [blame]
Ron Liebermancac749a2018-11-16 01:13:34 +00001; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,IDXMODE,GFX9 %s
2
3; indexing of vectors.
4
5; Subtest below moved from file test/CodeGen/AMDGPU/indirect-addressing-si.ll
6; to avoid gfx9 scheduling induced issues.
7
8
9; GCN-LABEL: {{^}}insert_vgpr_offset_multiple_in_block:
10; GCN-DAG: s_load_dwordx4 s{{\[}}[[S_ELT0:[0-9]+]]:[[S_ELT3:[0-9]+]]{{\]}}
11; GCN-DAG: {{buffer|flat|global}}_load_dword [[IDX0:v[0-9]+]]
12; GCN-DAG: v_mov_b32 [[INS0:v[0-9]+]], 62
13
14; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT3:[0-9]+]], s[[S_ELT3]]
15; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT2:[0-9]+]], s{{[0-9]+}}
16; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT1:[0-9]+]], s{{[0-9]+}}
17; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT0:3]], s[[S_ELT0]]
18
19; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]:
20; GCN-NEXT: s_waitcnt vmcnt(0)
21; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
22; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
23; GCN: s_and_saveexec_b64 vcc, vcc
24
25; MOVREL: s_mov_b32 m0, [[READLANE]]
26; MOVREL-NEXT: v_movreld_b32_e32 v[[VEC_ELT0]], [[INS0]]
27
28; IDXMODE: s_set_gpr_idx_on [[READLANE]], dst
29; IDXMODE-NEXT: v_mov_b32_e32 v[[VEC_ELT0]], [[INS0]]
30; IDXMODE: s_set_gpr_idx_off
31
32; GCN-NEXT: s_xor_b64 exec, exec, vcc
33; GCN: s_cbranch_execnz [[LOOP0]]
34
35; FIXME: Redundant copy
36; GCN: s_mov_b64 exec, [[MASK:s\[[0-9]+:[0-9]+\]]]
37
38; GCN: s_mov_b64 [[MASK]], exec
39
40; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]:
41; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
42; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
43; GCN: s_and_saveexec_b64 vcc, vcc
44
45; MOVREL: s_mov_b32 m0, [[READLANE]]
46; MOVREL-NEXT: v_movreld_b32_e32 v[[VEC_ELT1]], 63
47
48; IDXMODE: s_set_gpr_idx_on [[READLANE]], dst
49; IDXMODE-NEXT: v_mov_b32_e32 v[[VEC_ELT1]], 63
50; IDXMODE: s_set_gpr_idx_off
51
52; GCN-NEXT: s_xor_b64 exec, exec, vcc
53; GCN: s_cbranch_execnz [[LOOP1]]
54
55; GCN: buffer_store_dwordx4 v{{\[}}[[VEC_ELT0]]:
56
57; GCN: buffer_store_dword [[INS0]]
58define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <4 x i32> %vec0) #0 {
59entry:
60 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
61 %id.ext = zext i32 %id to i64
62 %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext
63 %idx0 = load volatile i32, i32 addrspace(1)* %gep
64 %idx1 = add i32 %idx0, 1
65 %live.out.val = call i32 asm sideeffect "v_mov_b32 $0, 62", "=v"()
66 %vec1 = insertelement <4 x i32> %vec0, i32 %live.out.val, i32 %idx0
67 %vec2 = insertelement <4 x i32> %vec1, i32 63, i32 %idx1
68 store volatile <4 x i32> %vec2, <4 x i32> addrspace(1)* %out0
69 %cmp = icmp eq i32 %id, 0
70 br i1 %cmp, label %bb1, label %bb2
71
72bb1:
73 store volatile i32 %live.out.val, i32 addrspace(1)* undef
74 br label %bb2
75
76bb2:
77 ret void
78}
79
80declare i32 @llvm.amdgcn.workitem.id.x() #1
81declare void @llvm.amdgcn.s.barrier() #2
82
83attributes #0 = { nounwind }
84attributes #1 = { nounwind readnone }
85attributes #2 = { nounwind convergent }