blob: aba0b63a254f5601e12b026878b28119c10dfc80 [file] [log] [blame]
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +00001; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck %s
2
Matt Arsenault08906a32016-10-28 19:43:31 +00003; CHECK-LABEL: {{^}}max_14_sgprs:
4
5; FIXME: Should be ablo to skip this copying of the private segment
6; buffer because all the SGPR spills are to VGPRs.
7
8; CHECK: s_mov_b64 s[6:7], s[2:3]
9; CHECK: s_mov_b64 s[4:5], s[0:1]
10
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +000011; CHECK: SGPRBlocks: 1
Matt Arsenault08906a32016-10-28 19:43:31 +000012; CHECK: NumSGPRsForWavesPerEU: 14
13define void @max_14_sgprs(i32 addrspace(1)* %out1,
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +000014 i32 addrspace(1)* %out2,
15 i32 addrspace(1)* %out3,
16 i32 addrspace(1)* %out4,
17 i32 %one, i32 %two, i32 %three, i32 %four) #0 {
18 store i32 %one, i32 addrspace(1)* %out1
19 store i32 %two, i32 addrspace(1)* %out2
20 store i32 %three, i32 addrspace(1)* %out3
21 store i32 %four, i32 addrspace(1)* %out4
22 ret void
23}
Matt Arsenault08906a32016-10-28 19:43:31 +000024
25; private resource: 4
26; scratch wave offset: 1
27; workgroup ids: 3
28; dispatch id: 2
29; queue ptr: 2
30; flat scratch init: 2
31; ---------------------
32; total: 14
33
34; + reserved vcc, flat_scratch = 18
35
36; Because we can't handle re-using the last few input registers as the
37; special vcc etc. registers (as well as decide to not use the unused
38; features when the number of registers is frozen), this ends up using
39; more than expected.
40
41; ALL-LABEL: {{^}}max_12_sgprs_14_input_sgprs:
42; TOSGPR: SGPRBlocks: 2
43; TOSGPR: NumSGPRsForWavesPerEU: 18
44
45; TOSMEM: s_mov_b64 s[6:7], s[2:3]
46; TOSMEM: s_mov_b32 s9, s13
47; TOSMEM: s_mov_b64 s[4:5], s[0:1]
48
49; TOSMEM: SGPRBlocks: 2
50; TOSMEM: NumSGPRsForWavesPerEU: 18
51define void @max_12_sgprs_14_input_sgprs(i32 addrspace(1)* %out1,
52 i32 addrspace(1)* %out2,
53 i32 addrspace(1)* %out3,
54 i32 addrspace(1)* %out4,
55 i32 %one, i32 %two, i32 %three, i32 %four) #2 {
56 store volatile i32 0, i32* undef
57 %x.0 = call i32 @llvm.amdgcn.workgroup.id.x()
58 store volatile i32 %x.0, i32 addrspace(1)* undef
59 %x.1 = call i32 @llvm.amdgcn.workgroup.id.y()
60 store volatile i32 %x.0, i32 addrspace(1)* undef
61 %x.2 = call i32 @llvm.amdgcn.workgroup.id.z()
62 store volatile i32 %x.0, i32 addrspace(1)* undef
63 %x.3 = call i64 @llvm.amdgcn.dispatch.id()
64 store volatile i64 %x.3, i64 addrspace(1)* undef
65 %x.4 = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
66 store volatile i8 addrspace(2)* %x.4, i8 addrspace(2)* addrspace(1)* undef
67 %x.5 = call i8 addrspace(2)* @llvm.amdgcn.queue.ptr()
68 store volatile i8 addrspace(2)* %x.5, i8 addrspace(2)* addrspace(1)* undef
69
70 store i32 %one, i32 addrspace(1)* %out1
71 store i32 %two, i32 addrspace(1)* %out2
72 store i32 %three, i32 addrspace(1)* %out3
73 store i32 %four, i32 addrspace(1)* %out4
74 ret void
75}
76
77; ALL-LABEL: max_12_sgprs_12_input_sgprs{{$}}
78; ; Make sure copies for input buffer are not clobbered. This requires
79; ; swapping the order the registers are copied from what normally
80; ; happens.
81
82; TOSMEM: s_mov_b64 s[6:7], s[2:3]
83; TOSMEM: s_mov_b64 s[4:5], s[0:1]
84; TOSMEM: s_mov_b32 s3, s11
85
86; ALL: SGPRBlocks: 1
87; ALL: NumSGPRsForWavesPerEU: 16
88define void @max_12_sgprs_12_input_sgprs(i32 addrspace(1)* %out1,
89 i32 addrspace(1)* %out2,
90 i32 addrspace(1)* %out3,
91 i32 addrspace(1)* %out4,
92 i32 %one, i32 %two, i32 %three, i32 %four) #2 {
93 store volatile i32 0, i32* undef
94 %x.0 = call i32 @llvm.amdgcn.workgroup.id.x()
95 store volatile i32 %x.0, i32 addrspace(1)* undef
96 %x.1 = call i32 @llvm.amdgcn.workgroup.id.y()
97 store volatile i32 %x.0, i32 addrspace(1)* undef
98 %x.2 = call i32 @llvm.amdgcn.workgroup.id.z()
99 store volatile i32 %x.0, i32 addrspace(1)* undef
100 %x.3 = call i64 @llvm.amdgcn.dispatch.id()
101 store volatile i64 %x.3, i64 addrspace(1)* undef
102 %x.4 = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
103 store volatile i8 addrspace(2)* %x.4, i8 addrspace(2)* addrspace(1)* undef
104
105 store i32 %one, i32 addrspace(1)* %out1
106 store i32 %two, i32 addrspace(1)* %out2
107 store i32 %three, i32 addrspace(1)* %out3
108 store i32 %four, i32 addrspace(1)* %out4
109 ret void
110}
111
112declare i32 @llvm.amdgcn.workgroup.id.x() #1
113declare i32 @llvm.amdgcn.workgroup.id.y() #1
114declare i32 @llvm.amdgcn.workgroup.id.z() #1
115declare i64 @llvm.amdgcn.dispatch.id() #1
116declare i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #1
117declare i8 addrspace(2)* @llvm.amdgcn.queue.ptr() #1
118
119attributes #0 = { nounwind "amdgpu-num-sgpr"="14" }
120attributes #1 = { nounwind readnone }
121attributes #2 = { nounwind "amdgpu-num-sgpr"="12" }
122attributes #3 = { nounwind "amdgpu-num-sgpr"="11" }