blob: 99ef41b24e4fe8f9bdf650f76e077c029eb86262 [file] [log] [blame]
Tom Stellard49f8bfd2015-01-06 18:00:21 +00001; RUN: llc -O0 -march=amdgcn -mcpu=bonaire -mattr=-promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-NO-PROMOTE %s
2; RUN: llc -O0 -march=amdgcn -mcpu=bonaire -mattr=+promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-PROMOTE %s
Matt Arsenault3f981402014-09-15 15:41:53 +00003
4; Disable optimizations in case there are optimizations added that
5; specialize away generic pointer accesses.
6
7
Tom Stellard79243d92014-10-01 17:15:17 +00008; CHECK-LABEL: {{^}}branch_use_flat_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +00009; CHECK: flat_store_dword {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, [M0, FLAT_SCRATCH]
10; CHECK: s_endpgm
Matt Arsenault3f981402014-09-15 15:41:53 +000011define void @branch_use_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %gptr, i32 addrspace(3)* %lptr, i32 %x, i32 %c) #0 {
12entry:
13 %cmp = icmp ne i32 %c, 0
14 br i1 %cmp, label %local, label %global
15
16local:
17 %flat_local = addrspacecast i32 addrspace(3)* %lptr to i32 addrspace(4)*
18 br label %end
19
20global:
21 %flat_global = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
22 br label %end
23
24end:
25 %fptr = phi i32 addrspace(4)* [ %flat_local, %local ], [ %flat_global, %global ]
26 store i32 %x, i32 addrspace(4)* %fptr, align 4
27; %val = load i32 addrspace(4)* %fptr, align 4
28; store i32 %val, i32 addrspace(1)* %out, align 4
29 ret void
30}
31
32
33
34; These testcases might become useless when there are optimizations to
35; remove generic pointers.
36
Tom Stellard79243d92014-10-01 17:15:17 +000037; CHECK-LABEL: {{^}}store_flat_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000038; CHECK: v_mov_b32_e32 v[[DATA:[0-9]+]], {{s[0-9]+}}
39; CHECK: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], {{s[0-9]+}}
40; CHECK: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], {{s[0-9]+}}
41; CHECK: flat_store_dword v[[DATA]], v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
Matt Arsenault3f981402014-09-15 15:41:53 +000042define void @store_flat_i32(i32 addrspace(1)* %gptr, i32 %x) #0 {
43 %fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
44 store i32 %x, i32 addrspace(4)* %fptr, align 4
45 ret void
46}
47
Tom Stellard79243d92014-10-01 17:15:17 +000048; CHECK-LABEL: {{^}}store_flat_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +000049; CHECK: flat_store_dwordx2
Matt Arsenault3f981402014-09-15 15:41:53 +000050define void @store_flat_i64(i64 addrspace(1)* %gptr, i64 %x) #0 {
51 %fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)*
52 store i64 %x, i64 addrspace(4)* %fptr, align 8
53 ret void
54}
55
Tom Stellard79243d92014-10-01 17:15:17 +000056; CHECK-LABEL: {{^}}store_flat_v4i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000057; CHECK: flat_store_dwordx4
Matt Arsenault3f981402014-09-15 15:41:53 +000058define void @store_flat_v4i32(<4 x i32> addrspace(1)* %gptr, <4 x i32> %x) #0 {
59 %fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)*
60 store <4 x i32> %x, <4 x i32> addrspace(4)* %fptr, align 16
61 ret void
62}
63
Tom Stellard79243d92014-10-01 17:15:17 +000064; CHECK-LABEL: {{^}}store_flat_trunc_i16:
Tom Stellard326d6ec2014-11-05 14:50:53 +000065; CHECK: flat_store_short
Matt Arsenault3f981402014-09-15 15:41:53 +000066define void @store_flat_trunc_i16(i16 addrspace(1)* %gptr, i32 %x) #0 {
67 %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
68 %y = trunc i32 %x to i16
69 store i16 %y, i16 addrspace(4)* %fptr, align 2
70 ret void
71}
72
Tom Stellard79243d92014-10-01 17:15:17 +000073; CHECK-LABEL: {{^}}store_flat_trunc_i8:
Tom Stellard326d6ec2014-11-05 14:50:53 +000074; CHECK: flat_store_byte
Matt Arsenault3f981402014-09-15 15:41:53 +000075define void @store_flat_trunc_i8(i8 addrspace(1)* %gptr, i32 %x) #0 {
76 %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
77 %y = trunc i32 %x to i8
78 store i8 %y, i8 addrspace(4)* %fptr, align 2
79 ret void
80}
81
82
83
84; CHECK-LABEL @load_flat_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000085; CHECK: flat_load_dword
Matt Arsenault3f981402014-09-15 15:41:53 +000086define void @load_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %gptr) #0 {
87 %fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
88 %fload = load i32 addrspace(4)* %fptr, align 4
89 store i32 %fload, i32 addrspace(1)* %out, align 4
90 ret void
91}
92
93; CHECK-LABEL @load_flat_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +000094; CHECK: flat_load_dwordx2
Matt Arsenault3f981402014-09-15 15:41:53 +000095define void @load_flat_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %gptr) #0 {
96 %fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)*
97 %fload = load i64 addrspace(4)* %fptr, align 4
98 store i64 %fload, i64 addrspace(1)* %out, align 8
99 ret void
100}
101
102; CHECK-LABEL @load_flat_v4i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000103; CHECK: flat_load_dwordx4
Matt Arsenault3f981402014-09-15 15:41:53 +0000104define void @load_flat_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %gptr) #0 {
105 %fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)*
106 %fload = load <4 x i32> addrspace(4)* %fptr, align 4
107 store <4 x i32> %fload, <4 x i32> addrspace(1)* %out, align 8
108 ret void
109}
110
111; CHECK-LABEL @sextload_flat_i8:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000112; CHECK: flat_load_sbyte
Matt Arsenault3f981402014-09-15 15:41:53 +0000113define void @sextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
114 %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
115 %fload = load i8 addrspace(4)* %fptr, align 4
116 %ext = sext i8 %fload to i32
117 store i32 %ext, i32 addrspace(1)* %out, align 4
118 ret void
119}
120
121; CHECK-LABEL @zextload_flat_i8:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000122; CHECK: flat_load_ubyte
Matt Arsenault3f981402014-09-15 15:41:53 +0000123define void @zextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
124 %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
125 %fload = load i8 addrspace(4)* %fptr, align 4
126 %ext = zext i8 %fload to i32
127 store i32 %ext, i32 addrspace(1)* %out, align 4
128 ret void
129}
130
131; CHECK-LABEL @sextload_flat_i16:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000132; CHECK: flat_load_sshort
Matt Arsenault3f981402014-09-15 15:41:53 +0000133define void @sextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
134 %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
135 %fload = load i16 addrspace(4)* %fptr, align 4
136 %ext = sext i16 %fload to i32
137 store i32 %ext, i32 addrspace(1)* %out, align 4
138 ret void
139}
140
141; CHECK-LABEL @zextload_flat_i16:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000142; CHECK: flat_load_ushort
Matt Arsenault3f981402014-09-15 15:41:53 +0000143define void @zextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
144 %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
145 %fload = load i16 addrspace(4)* %fptr, align 4
146 %ext = zext i16 %fload to i32
147 store i32 %ext, i32 addrspace(1)* %out, align 4
148 ret void
149}
150
151
152
153; TODO: This should not be zero when registers are used for small
154; scratch allocations again.
155
156; Check for prologue initializing special SGPRs pointing to scratch.
Tom Stellard79243d92014-10-01 17:15:17 +0000157; CHECK-LABEL: {{^}}store_flat_scratch:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000158; CHECK: s_movk_i32 flat_scratch_lo, 0
Matt Arsenault77849922014-11-13 20:44:23 +0000159; CHECK-NO-PROMOTE: s_movk_i32 flat_scratch_hi, 0x28{{$}}
160; CHECK-PROMOTE: s_movk_i32 flat_scratch_hi, 0x0{{$}}
Tom Stellard326d6ec2014-11-05 14:50:53 +0000161; CHECK: flat_store_dword
162; CHECK: s_barrier
163; CHECK: flat_load_dword
Matt Arsenault3f981402014-09-15 15:41:53 +0000164define void @store_flat_scratch(i32 addrspace(1)* noalias %out, i32) #0 {
165 %alloca = alloca i32, i32 9, align 4
166 %x = call i32 @llvm.r600.read.tidig.x() #3
167 %pptr = getelementptr i32* %alloca, i32 %x
168 %fptr = addrspacecast i32* %pptr to i32 addrspace(4)*
169 store i32 %x, i32 addrspace(4)* %fptr
170 ; Dummy call
171 call void @llvm.AMDGPU.barrier.local() #1
172 %reload = load i32 addrspace(4)* %fptr, align 4
173 store i32 %reload, i32 addrspace(1)* %out, align 4
174 ret void
175}
176
177declare void @llvm.AMDGPU.barrier.local() #1
178declare i32 @llvm.r600.read.tidig.x() #3
179
180attributes #0 = { nounwind }
181attributes #1 = { nounwind noduplicate }
182attributes #3 = { nounwind readnone }