blob: 4b56d6f19832670fb0dac1e5c49386ff7299f2b4 [file] [log] [blame]
Tom Stellard49f8bfd2015-01-06 18:00:21 +00001; RUN: llc -O0 -march=amdgcn -mcpu=bonaire -mattr=-promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-NO-PROMOTE %s
2; RUN: llc -O0 -march=amdgcn -mcpu=bonaire -mattr=+promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-PROMOTE %s
Marek Olsak75170772015-01-27 17:27:15 +00003; RUN: llc -O0 -march=amdgcn -mcpu=tonga -mattr=-promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-NO-PROMOTE %s
4; RUN: llc -O0 -march=amdgcn -mcpu=tonga -mattr=+promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-PROMOTE %s
Matt Arsenault3f981402014-09-15 15:41:53 +00005
6; Disable optimizations in case there are optimizations added that
7; specialize away generic pointer accesses.
8
9
Matt Arsenault3f981402014-09-15 15:41:53 +000010; These testcases might become useless when there are optimizations to
11; remove generic pointers.
12
Tom Stellard79243d92014-10-01 17:15:17 +000013; CHECK-LABEL: {{^}}store_flat_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000014; CHECK: v_mov_b32_e32 v[[DATA:[0-9]+]], {{s[0-9]+}}
15; CHECK: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], {{s[0-9]+}}
16; CHECK: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], {{s[0-9]+}}
17; CHECK: flat_store_dword v[[DATA]], v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
Matt Arsenault3f981402014-09-15 15:41:53 +000018define void @store_flat_i32(i32 addrspace(1)* %gptr, i32 %x) #0 {
19 %fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
20 store i32 %x, i32 addrspace(4)* %fptr, align 4
21 ret void
22}
23
Tom Stellard79243d92014-10-01 17:15:17 +000024; CHECK-LABEL: {{^}}store_flat_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +000025; CHECK: flat_store_dwordx2
Matt Arsenault3f981402014-09-15 15:41:53 +000026define void @store_flat_i64(i64 addrspace(1)* %gptr, i64 %x) #0 {
27 %fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)*
28 store i64 %x, i64 addrspace(4)* %fptr, align 8
29 ret void
30}
31
Tom Stellard79243d92014-10-01 17:15:17 +000032; CHECK-LABEL: {{^}}store_flat_v4i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000033; CHECK: flat_store_dwordx4
Matt Arsenault3f981402014-09-15 15:41:53 +000034define void @store_flat_v4i32(<4 x i32> addrspace(1)* %gptr, <4 x i32> %x) #0 {
35 %fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)*
36 store <4 x i32> %x, <4 x i32> addrspace(4)* %fptr, align 16
37 ret void
38}
39
Tom Stellard79243d92014-10-01 17:15:17 +000040; CHECK-LABEL: {{^}}store_flat_trunc_i16:
Tom Stellard326d6ec2014-11-05 14:50:53 +000041; CHECK: flat_store_short
Matt Arsenault3f981402014-09-15 15:41:53 +000042define void @store_flat_trunc_i16(i16 addrspace(1)* %gptr, i32 %x) #0 {
43 %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
44 %y = trunc i32 %x to i16
45 store i16 %y, i16 addrspace(4)* %fptr, align 2
46 ret void
47}
48
Tom Stellard79243d92014-10-01 17:15:17 +000049; CHECK-LABEL: {{^}}store_flat_trunc_i8:
Tom Stellard326d6ec2014-11-05 14:50:53 +000050; CHECK: flat_store_byte
Matt Arsenault3f981402014-09-15 15:41:53 +000051define void @store_flat_trunc_i8(i8 addrspace(1)* %gptr, i32 %x) #0 {
52 %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
53 %y = trunc i32 %x to i8
54 store i8 %y, i8 addrspace(4)* %fptr, align 2
55 ret void
56}
57
58
59
Hans Wennborg4a613702015-08-31 21:10:35 +000060; CHECK-LABEL: load_flat_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000061; CHECK: flat_load_dword
Matt Arsenault3f981402014-09-15 15:41:53 +000062define void @load_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %gptr) #0 {
63 %fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
David Blaikiea79ac142015-02-27 21:17:42 +000064 %fload = load i32, i32 addrspace(4)* %fptr, align 4
Matt Arsenault3f981402014-09-15 15:41:53 +000065 store i32 %fload, i32 addrspace(1)* %out, align 4
66 ret void
67}
68
Hans Wennborg4a613702015-08-31 21:10:35 +000069; CHECK-LABEL: load_flat_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +000070; CHECK: flat_load_dwordx2
Matt Arsenault3f981402014-09-15 15:41:53 +000071define void @load_flat_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %gptr) #0 {
72 %fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)*
David Blaikiea79ac142015-02-27 21:17:42 +000073 %fload = load i64, i64 addrspace(4)* %fptr, align 4
Matt Arsenault3f981402014-09-15 15:41:53 +000074 store i64 %fload, i64 addrspace(1)* %out, align 8
75 ret void
76}
77
Hans Wennborg4a613702015-08-31 21:10:35 +000078; CHECK-LABEL: load_flat_v4i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000079; CHECK: flat_load_dwordx4
Matt Arsenault3f981402014-09-15 15:41:53 +000080define void @load_flat_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %gptr) #0 {
81 %fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)*
David Blaikiea79ac142015-02-27 21:17:42 +000082 %fload = load <4 x i32>, <4 x i32> addrspace(4)* %fptr, align 4
Matt Arsenault3f981402014-09-15 15:41:53 +000083 store <4 x i32> %fload, <4 x i32> addrspace(1)* %out, align 8
84 ret void
85}
86
Hans Wennborg4a613702015-08-31 21:10:35 +000087; CHECK-LABEL: sextload_flat_i8:
Tom Stellard326d6ec2014-11-05 14:50:53 +000088; CHECK: flat_load_sbyte
Matt Arsenault3f981402014-09-15 15:41:53 +000089define void @sextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
90 %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
David Blaikiea79ac142015-02-27 21:17:42 +000091 %fload = load i8, i8 addrspace(4)* %fptr, align 4
Matt Arsenault3f981402014-09-15 15:41:53 +000092 %ext = sext i8 %fload to i32
93 store i32 %ext, i32 addrspace(1)* %out, align 4
94 ret void
95}
96
Hans Wennborg4a613702015-08-31 21:10:35 +000097; CHECK-LABEL: zextload_flat_i8:
Tom Stellard326d6ec2014-11-05 14:50:53 +000098; CHECK: flat_load_ubyte
Matt Arsenault3f981402014-09-15 15:41:53 +000099define void @zextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
100 %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
David Blaikiea79ac142015-02-27 21:17:42 +0000101 %fload = load i8, i8 addrspace(4)* %fptr, align 4
Matt Arsenault3f981402014-09-15 15:41:53 +0000102 %ext = zext i8 %fload to i32
103 store i32 %ext, i32 addrspace(1)* %out, align 4
104 ret void
105}
106
Hans Wennborg4a613702015-08-31 21:10:35 +0000107; CHECK-LABEL: sextload_flat_i16:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000108; CHECK: flat_load_sshort
Matt Arsenault3f981402014-09-15 15:41:53 +0000109define void @sextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
110 %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
David Blaikiea79ac142015-02-27 21:17:42 +0000111 %fload = load i16, i16 addrspace(4)* %fptr, align 4
Matt Arsenault3f981402014-09-15 15:41:53 +0000112 %ext = sext i16 %fload to i32
113 store i32 %ext, i32 addrspace(1)* %out, align 4
114 ret void
115}
116
Hans Wennborg4a613702015-08-31 21:10:35 +0000117; CHECK-LABEL: zextload_flat_i16:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000118; CHECK: flat_load_ushort
Matt Arsenault3f981402014-09-15 15:41:53 +0000119define void @zextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
120 %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
David Blaikiea79ac142015-02-27 21:17:42 +0000121 %fload = load i16, i16 addrspace(4)* %fptr, align 4
Matt Arsenault3f981402014-09-15 15:41:53 +0000122 %ext = zext i16 %fload to i32
123 store i32 %ext, i32 addrspace(1)* %out, align 4
124 ret void
125}
126
Matt Arsenault3f981402014-09-15 15:41:53 +0000127declare void @llvm.AMDGPU.barrier.local() #1
128declare i32 @llvm.r600.read.tidig.x() #3
129
130attributes #0 = { nounwind }
131attributes #1 = { nounwind noduplicate }
132attributes #3 = { nounwind readnone }