blob: a57e4f5953226dadcb0bfad32426dc3e7fca9da3 [file] [log] [blame]
Matt Arsenault4c519d32016-07-18 18:34:59 +00001; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
2; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN %s
3
4@local_memory.local_mem = internal unnamed_addr addrspace(3) global [128 x i32] undef, align 4
5
6; Check that the LDS size emitted correctly
7; SI: .long 47180
8; SI-NEXT: .long 65668
9; CI: .long 47180
10; CI-NEXT: .long 32900
11
12; GCN-LABEL: {{^}}local_memory:
13
14; GCN-NOT: s_wqm_b64
15; GCN: ds_write_b32
16
17; GCN: s_barrier
18
19; GCN: ds_read_b32 {{v[0-9]+}},
20define void @local_memory(i32 addrspace(1)* %out) #0 {
21entry:
22 %y.i = call i32 @llvm.amdgcn.workitem.id.x() #1
23 %arrayidx = getelementptr inbounds [128 x i32], [128 x i32] addrspace(3)* @local_memory.local_mem, i32 0, i32 %y.i
24 store i32 %y.i, i32 addrspace(3)* %arrayidx, align 4
25 %add = add nsw i32 %y.i, 1
26 %cmp = icmp eq i32 %add, 16
27 %.add = select i1 %cmp, i32 0, i32 %add
28 call void @llvm.amdgcn.s.barrier()
29 %arrayidx1 = getelementptr inbounds [128 x i32], [128 x i32] addrspace(3)* @local_memory.local_mem, i32 0, i32 %.add
30 %tmp = load i32, i32 addrspace(3)* %arrayidx1, align 4
31 %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %y.i
32 store i32 %tmp, i32 addrspace(1)* %arrayidx2, align 4
33 ret void
34}
35
36@local_memory_two_objects.local_mem0 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4
37@local_memory_two_objects.local_mem1 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4
38
39; Check that the LDS size emitted correctly
40; EG: .long 166120
41; EG-NEXT: .long 8
42; GCN: .long 47180
43; GCN-NEXT: .long 32900
44
45; GCN-LABEL: {{^}}local_memory_two_objects:
46; GCN: v_lshlrev_b32_e32 [[ADDRW:v[0-9]+]], 2, v0
Tom Stellardc2ff0eb2016-08-29 19:15:22 +000047; CI-DAG: ds_write2_b32 [[ADDRW]], {{v[0-9]+}}, {{v[0-9]+}} offset1:4
Matt Arsenault4c519d32016-07-18 18:34:59 +000048
49; SI: v_add_i32_e32 [[ADDRW_OFF:v[0-9]+]], vcc, 16, [[ADDRW]]
50
51; SI-DAG: ds_write_b32 [[ADDRW]],
52; SI-DAG: ds_write_b32 [[ADDRW_OFF]],
53
54; GCN: s_barrier
55
56; SI-DAG: v_sub_i32_e32 [[SUB0:v[0-9]+]], vcc, 28, [[ADDRW]]
57; SI-DAG: v_sub_i32_e32 [[SUB1:v[0-9]+]], vcc, 12, [[ADDRW]]
58
59; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[SUB0]]
60; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[SUB1]]
61
62; CI: v_sub_i32_e32 [[SUB:v[0-9]+]], vcc, 0, [[ADDRW]]
63; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, [[SUB]] offset0:3 offset1:7
64define void @local_memory_two_objects(i32 addrspace(1)* %out) #0 {
65entry:
66 %x.i = call i32 @llvm.amdgcn.workitem.id.x()
67 %arrayidx = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %x.i
68 store i32 %x.i, i32 addrspace(3)* %arrayidx, align 4
69 %mul = shl nsw i32 %x.i, 1
70 %arrayidx1 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem1, i32 0, i32 %x.i
71 store i32 %mul, i32 addrspace(3)* %arrayidx1, align 4
72 %sub = sub nsw i32 3, %x.i
73 call void @llvm.amdgcn.s.barrier()
74 %arrayidx2 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %sub
75 %tmp = load i32, i32 addrspace(3)* %arrayidx2, align 4
76 %arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %x.i
77 store i32 %tmp, i32 addrspace(1)* %arrayidx3, align 4
78 %arrayidx4 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem1, i32 0, i32 %sub
79 %tmp1 = load i32, i32 addrspace(3)* %arrayidx4, align 4
80 %add = add nsw i32 %x.i, 4
81 %arrayidx5 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %add
82 store i32 %tmp1, i32 addrspace(1)* %arrayidx5, align 4
83 ret void
84}
85
86declare i32 @llvm.amdgcn.workitem.id.x() #1
87declare void @llvm.amdgcn.s.barrier() #2
88
89attributes #0 = { nounwind }
90attributes #1 = { nounwind readnone }
91attributes #2 = { convergent nounwind }