blob: ca1b27e7cbc31994e1e6ec6d8ec9f46ce6e3e648 [file] [log] [blame]
Marek Olsak79c05872016-11-25 17:37:09 +00001; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -amdgpu-spill-sgpr-to-smem=0 -verify-machineinstrs < %s | FileCheck -check-prefix=TOSGPR -check-prefix=ALL %s
2; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -amdgpu-spill-sgpr-to-smem=1 -verify-machineinstrs < %s | FileCheck -check-prefix=TOSMEM -check-prefix=ALL %s
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +00003
Marek Olsak79c05872016-11-25 17:37:09 +00004; If spilling to smem, additional registers are used for the resource
5; descriptor.
6
Valery Pykhtin75d1de92017-01-26 10:51:47 +00007; ALL-LABEL: {{^}}max_9_sgprs:
Matt Arsenault08906a32016-10-28 19:43:31 +00008
Marek Olsak79c05872016-11-25 17:37:09 +00009; ALL: SGPRBlocks: 1
Valery Pykhtin75d1de92017-01-26 10:51:47 +000010; ALL: NumSGPRsForWavesPerEU: 9
11define void @max_9_sgprs(i32 addrspace(1)* %out1,
Marek Olsak79c05872016-11-25 17:37:09 +000012
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +000013 i32 addrspace(1)* %out2,
14 i32 addrspace(1)* %out3,
15 i32 addrspace(1)* %out4,
16 i32 %one, i32 %two, i32 %three, i32 %four) #0 {
17 store i32 %one, i32 addrspace(1)* %out1
18 store i32 %two, i32 addrspace(1)* %out2
19 store i32 %three, i32 addrspace(1)* %out3
20 store i32 %four, i32 addrspace(1)* %out4
21 ret void
22}
Matt Arsenault08906a32016-10-28 19:43:31 +000023
24; private resource: 4
25; scratch wave offset: 1
26; workgroup ids: 3
27; dispatch id: 2
28; queue ptr: 2
29; flat scratch init: 2
30; ---------------------
31; total: 14
32
Marek Olsak693e9be2016-12-09 19:49:48 +000033; + reserved vcc = 16
Matt Arsenault08906a32016-10-28 19:43:31 +000034
35; Because we can't handle re-using the last few input registers as the
36; special vcc etc. registers (as well as decide to not use the unused
37; features when the number of registers is frozen), this ends up using
38; more than expected.
39
40; ALL-LABEL: {{^}}max_12_sgprs_14_input_sgprs:
Marek Olsak693e9be2016-12-09 19:49:48 +000041; TOSGPR: SGPRBlocks: 1
42; TOSGPR: NumSGPRsForWavesPerEU: 16
Matt Arsenault08906a32016-10-28 19:43:31 +000043
Marek Olsak0f55fba2016-12-09 19:49:54 +000044; TOSMEM: s_mov_b64 s[10:11], s[2:3]
45; TOSMEM: s_mov_b64 s[8:9], s[0:1]
46; TOSMEM: s_mov_b32 s7, s13
Matt Arsenault08906a32016-10-28 19:43:31 +000047
Marek Olsak693e9be2016-12-09 19:49:48 +000048; TOSMEM: SGPRBlocks: 1
49; TOSMEM: NumSGPRsForWavesPerEU: 16
Matt Arsenault08906a32016-10-28 19:43:31 +000050define void @max_12_sgprs_14_input_sgprs(i32 addrspace(1)* %out1,
51 i32 addrspace(1)* %out2,
52 i32 addrspace(1)* %out3,
53 i32 addrspace(1)* %out4,
54 i32 %one, i32 %two, i32 %three, i32 %four) #2 {
55 store volatile i32 0, i32* undef
56 %x.0 = call i32 @llvm.amdgcn.workgroup.id.x()
57 store volatile i32 %x.0, i32 addrspace(1)* undef
58 %x.1 = call i32 @llvm.amdgcn.workgroup.id.y()
59 store volatile i32 %x.0, i32 addrspace(1)* undef
60 %x.2 = call i32 @llvm.amdgcn.workgroup.id.z()
61 store volatile i32 %x.0, i32 addrspace(1)* undef
62 %x.3 = call i64 @llvm.amdgcn.dispatch.id()
63 store volatile i64 %x.3, i64 addrspace(1)* undef
64 %x.4 = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
65 store volatile i8 addrspace(2)* %x.4, i8 addrspace(2)* addrspace(1)* undef
66 %x.5 = call i8 addrspace(2)* @llvm.amdgcn.queue.ptr()
67 store volatile i8 addrspace(2)* %x.5, i8 addrspace(2)* addrspace(1)* undef
68
69 store i32 %one, i32 addrspace(1)* %out1
70 store i32 %two, i32 addrspace(1)* %out2
71 store i32 %three, i32 addrspace(1)* %out3
72 store i32 %four, i32 addrspace(1)* %out4
73 ret void
74}
75
Matthias Braun709a4cc2016-12-01 22:39:51 +000076; The following test is commented out for now; http://llvm.org/PR31230
77; XALL-LABEL: max_12_sgprs_12_input_sgprs{{$}}
Matt Arsenault08906a32016-10-28 19:43:31 +000078; ; Make sure copies for input buffer are not clobbered. This requires
79; ; swapping the order the registers are copied from what normally
80; ; happens.
81
Matthias Braun709a4cc2016-12-01 22:39:51 +000082; XTOSMEM: s_mov_b32 s5, s11
83; XTOSMEM: s_add_u32 m0, s5,
84; XTOSMEM: s_buffer_store_dword vcc_lo, s[0:3], m0
Matt Arsenault08906a32016-10-28 19:43:31 +000085
Matthias Braun709a4cc2016-12-01 22:39:51 +000086; XALL: SGPRBlocks: 2
87; XALL: NumSGPRsForWavesPerEU: 18
88;define void @max_12_sgprs_12_input_sgprs(i32 addrspace(1)* %out1,
89; i32 addrspace(1)* %out2,
90; i32 addrspace(1)* %out3,
91; i32 addrspace(1)* %out4,
92; i32 %one, i32 %two, i32 %three, i32 %four) #2 {
93; store volatile i32 0, i32* undef
94; %x.0 = call i32 @llvm.amdgcn.workgroup.id.x()
95; store volatile i32 %x.0, i32 addrspace(1)* undef
96; %x.1 = call i32 @llvm.amdgcn.workgroup.id.y()
97; store volatile i32 %x.0, i32 addrspace(1)* undef
98; %x.2 = call i32 @llvm.amdgcn.workgroup.id.z()
99; store volatile i32 %x.0, i32 addrspace(1)* undef
100; %x.3 = call i64 @llvm.amdgcn.dispatch.id()
101; store volatile i64 %x.3, i64 addrspace(1)* undef
102; %x.4 = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
103; store volatile i8 addrspace(2)* %x.4, i8 addrspace(2)* addrspace(1)* undef
104;
105; store i32 %one, i32 addrspace(1)* %out1
106; store i32 %two, i32 addrspace(1)* %out2
107; store i32 %three, i32 addrspace(1)* %out3
108; store i32 %four, i32 addrspace(1)* %out4
109; ret void
110;}
Matt Arsenault08906a32016-10-28 19:43:31 +0000111
112declare i32 @llvm.amdgcn.workgroup.id.x() #1
113declare i32 @llvm.amdgcn.workgroup.id.y() #1
114declare i32 @llvm.amdgcn.workgroup.id.z() #1
115declare i64 @llvm.amdgcn.dispatch.id() #1
116declare i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #1
117declare i8 addrspace(2)* @llvm.amdgcn.queue.ptr() #1
118
119attributes #0 = { nounwind "amdgpu-num-sgpr"="14" }
120attributes #1 = { nounwind readnone }
121attributes #2 = { nounwind "amdgpu-num-sgpr"="12" }
122attributes #3 = { nounwind "amdgpu-num-sgpr"="11" }