blob: b40fcb3e49208f94f88c2e5fb76c57c574f0e388 [file] [log] [blame]
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00001; RUN: opt -mtriple=amdgcn-unknown-amdhsa -S -amdgpu-annotate-kernel-features < %s | FileCheck -check-prefix=HSA %s
2
Daniel Neilson1e687242018-01-19 17:13:12 +00003declare void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* nocapture, i32 addrspace(4)* nocapture, i32, i1) #0
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004
5@lds.i32 = unnamed_addr addrspace(3) global i32 undef, align 4
6@lds.arr = unnamed_addr addrspace(3) global [256 x i32] undef, align 4
7
8@global.i32 = unnamed_addr addrspace(1) global i32 undef, align 4
9@global.arr = unnamed_addr addrspace(1) global [256 x i32] undef, align 4
10
11; HSA: @store_cast_0_flat_to_group_addrspacecast() #1
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000012define amdgpu_kernel void @store_cast_0_flat_to_group_addrspacecast() #1 {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000013 store i32 7, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*)
14 ret void
15}
16
17; HSA: @store_cast_0_group_to_flat_addrspacecast() #2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000018define amdgpu_kernel void @store_cast_0_group_to_flat_addrspacecast() #1 {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000019 store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*)
20 ret void
21}
22
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000023; HSA: define amdgpu_kernel void @store_constant_cast_group_gv_to_flat() #2
24define amdgpu_kernel void @store_constant_cast_group_gv_to_flat() #1 {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000025 store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds.i32 to i32 addrspace(4)*)
26 ret void
27}
28
29; HSA: @store_constant_cast_group_gv_gep_to_flat() #2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000030define amdgpu_kernel void @store_constant_cast_group_gv_gep_to_flat() #1 {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000031 store i32 7, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8)
32 ret void
33}
34
35; HSA: @store_constant_cast_global_gv_to_flat() #1
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000036define amdgpu_kernel void @store_constant_cast_global_gv_to_flat() #1 {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000037 store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global.i32 to i32 addrspace(4)*)
38 ret void
39}
40
41; HSA: @store_constant_cast_global_gv_gep_to_flat() #1
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000042define amdgpu_kernel void @store_constant_cast_global_gv_gep_to_flat() #1 {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000043 store i32 7, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(1)* @global.arr to [256 x i32] addrspace(4)*), i64 0, i64 8)
44 ret void
45}
46
47; HSA: @load_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000048define amdgpu_kernel void @load_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000049 %val = load i32, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8)
50 store i32 %val, i32 addrspace(1)* %out
51 ret void
52}
53
54; HSA: @atomicrmw_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000055define amdgpu_kernel void @atomicrmw_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000056 %val = atomicrmw add i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 1 seq_cst
57 store i32 %val, i32 addrspace(1)* %out
58 ret void
59}
60
61; HSA: @cmpxchg_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000062define amdgpu_kernel void @cmpxchg_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000063 %val = cmpxchg i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 0, i32 1 seq_cst seq_cst
64 %val0 = extractvalue { i32, i1 } %val, 0
65 store i32 %val0, i32 addrspace(1)* %out
66 ret void
67}
68
69; HSA: @memcpy_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000070define amdgpu_kernel void @memcpy_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
Daniel Neilson1e687242018-01-19 17:13:12 +000071 call void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* align 4 %out, i32 addrspace(4)* align 4 getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 32, i1 false)
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000072 ret void
73}
74
75; Can't just search the pointer value
76; HSA: @store_value_constant_cast_lds_gv_gep_to_flat(i32 addrspace(4)* addrspace(1)* %out) #2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000077define amdgpu_kernel void @store_value_constant_cast_lds_gv_gep_to_flat(i32 addrspace(4)* addrspace(1)* %out) #1 {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000078 store i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 addrspace(4)* addrspace(1)* %out
79 ret void
80}
81
82; Can't just search pointer types
83; HSA: @store_ptrtoint_value_constant_cast_lds_gv_gep_to_flat(i64 addrspace(1)* %out) #2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000084define amdgpu_kernel void @store_ptrtoint_value_constant_cast_lds_gv_gep_to_flat(i64 addrspace(1)* %out) #1 {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000085 store i64 ptrtoint (i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8) to i64), i64 addrspace(1)* %out
86 ret void
87}
88
89; Cast group to flat, do GEP, cast back to group
90; HSA: @store_constant_cast_group_gv_gep_to_flat_to_group() #2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000091define amdgpu_kernel void @store_constant_cast_group_gv_gep_to_flat_to_group() #1 {
Matt Arsenault3b2e2a52016-06-06 20:03:31 +000092 store i32 7, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8) to i32 addrspace(3)*)
93 ret void
94}
95
96; HSA: @ret_constant_cast_group_gv_gep_to_flat_to_group() #2
97define i32 addrspace(3)* @ret_constant_cast_group_gv_gep_to_flat_to_group() #1 {
98 ret i32 addrspace(3)* addrspacecast (i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8) to i32 addrspace(3)*)
99}
100
101; HSA: attributes #0 = { argmemonly nounwind }
102; HSA: attributes #1 = { nounwind }
103; HSA: attributes #2 = { nounwind "amdgpu-queue-ptr" }
104
105attributes #0 = { argmemonly nounwind }
106attributes #1 = { nounwind }