blob: 5997e27fd815e462d7a47529690e925bbd398765 [file] [log] [blame]
Matt Arsenault9c47dd52016-02-11 06:02:01 +00001; RUN: llc -march=amdgcn -verify-machineinstrs -mattr=+load-store-opt < %s | FileCheck -check-prefix=SI --check-prefix=CHECK %s
Matt Arsenault706f9302015-07-06 16:01:58 +00002; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt < %s | FileCheck -check-prefix=CI --check-prefix=CHECK %s
Matt Arsenault9c47dd52016-02-11 06:02:01 +00003; RUN: llc -march=amdgcn -verify-machineinstrs -mattr=+load-store-opt,+unsafe-ds-offset-folding < %s | FileCheck -check-prefix=CI --check-prefix=CHECK %s
Matt Arsenault5015a892014-08-15 17:17:07 +00004
Matt Arsenault9c47dd52016-02-11 06:02:01 +00005declare i32 @llvm.amdgcn.workitem.id.x() #0
6declare void @llvm.amdgcn.s.barrier() #1
Matt Arsenault5015a892014-08-15 17:17:07 +00007
8; Function Attrs: nounwind
Tom Stellard79243d92014-10-01 17:15:17 +00009; CHECK-LABEL: {{^}}signed_ds_offset_addressing_loop:
Tom Stellard85e8b6d2014-08-22 18:49:33 +000010; CHECK: BB0_1:
Tom Stellard326d6ec2014-11-05 14:50:53 +000011; CHECK: v_add_i32_e32 [[VADDR:v[0-9]+]],
12; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR]]
Matt Arsenault4578d6a2016-05-25 17:42:39 +000013; SI-DAG: v_add_i32_e32 [[VADDR8:v[0-9]+]], vcc, 8, [[VADDR]]
14; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR8]]
Matt Arsenaulte4d0c142015-08-29 07:16:50 +000015; SI-DAG: v_add_i32_e32 [[VADDR0x80:v[0-9]+]], vcc, 0x80, [[VADDR]]
Tom Stellard326d6ec2014-11-05 14:50:53 +000016; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR0x80]]
Matt Arsenault4578d6a2016-05-25 17:42:39 +000017; SI-DAG: v_add_i32_e32 [[VADDR0x88:v[0-9]+]], vcc, 0x88, [[VADDR]]
18; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR0x88]]
Matt Arsenaulte4d0c142015-08-29 07:16:50 +000019; SI-DAG: v_add_i32_e32 [[VADDR0x100:v[0-9]+]], vcc, 0x100, [[VADDR]]
Tom Stellard326d6ec2014-11-05 14:50:53 +000020; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR0x100]]
Tom Stellard85e8b6d2014-08-22 18:49:33 +000021
Matt Arsenault4578d6a2016-05-25 17:42:39 +000022; CI-DAG: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[VADDR]] offset1:2
23; CI-DAG: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[VADDR]] offset0:32 offset1:34
Tom Stellard326d6ec2014-11-05 14:50:53 +000024; CI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR]] offset:256
25; CHECK: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000026define amdgpu_kernel void @signed_ds_offset_addressing_loop(float addrspace(1)* noalias nocapture %out, float addrspace(3)* noalias nocapture readonly %lptr, i32 %n) #2 {
Matt Arsenault5015a892014-08-15 17:17:07 +000027entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +000028 %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #0
Matt Arsenault5015a892014-08-15 17:17:07 +000029 %mul = shl nsw i32 %x.i, 1
30 br label %for.body
31
32for.body: ; preds = %for.body, %entry
33 %sum.03 = phi float [ 0.000000e+00, %entry ], [ %add13, %for.body ]
34 %offset.02 = phi i32 [ %mul, %entry ], [ %add14, %for.body ]
35 %k.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
Matt Arsenault9c47dd52016-02-11 06:02:01 +000036 tail call void @llvm.amdgcn.s.barrier() #1
David Blaikie79e6c742015-02-27 19:29:02 +000037 %arrayidx = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %offset.02
David Blaikiea79ac142015-02-27 21:17:42 +000038 %tmp = load float, float addrspace(3)* %arrayidx, align 4
Matt Arsenault4578d6a2016-05-25 17:42:39 +000039 %add1 = add nsw i32 %offset.02, 2
David Blaikie79e6c742015-02-27 19:29:02 +000040 %arrayidx2 = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %add1
David Blaikiea79ac142015-02-27 21:17:42 +000041 %tmp1 = load float, float addrspace(3)* %arrayidx2, align 4
Matt Arsenault5015a892014-08-15 17:17:07 +000042 %add3 = add nsw i32 %offset.02, 32
David Blaikie79e6c742015-02-27 19:29:02 +000043 %arrayidx4 = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %add3
David Blaikiea79ac142015-02-27 21:17:42 +000044 %tmp2 = load float, float addrspace(3)* %arrayidx4, align 4
Matt Arsenault4578d6a2016-05-25 17:42:39 +000045 %add5 = add nsw i32 %offset.02, 34
David Blaikie79e6c742015-02-27 19:29:02 +000046 %arrayidx6 = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %add5
David Blaikiea79ac142015-02-27 21:17:42 +000047 %tmp3 = load float, float addrspace(3)* %arrayidx6, align 4
Matt Arsenault5015a892014-08-15 17:17:07 +000048 %add7 = add nsw i32 %offset.02, 64
David Blaikie79e6c742015-02-27 19:29:02 +000049 %arrayidx8 = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %add7
David Blaikiea79ac142015-02-27 21:17:42 +000050 %tmp4 = load float, float addrspace(3)* %arrayidx8, align 4
Matt Arsenault5015a892014-08-15 17:17:07 +000051 %add9 = fadd float %tmp, %tmp1
52 %add10 = fadd float %add9, %tmp2
53 %add11 = fadd float %add10, %tmp3
54 %add12 = fadd float %add11, %tmp4
55 %add13 = fadd float %sum.03, %add12
56 %inc = add nsw i32 %k.01, 1
57 %add14 = add nsw i32 %offset.02, 97
58 %exitcond = icmp eq i32 %inc, 8
59 br i1 %exitcond, label %for.end, label %for.body
60
61for.end: ; preds = %for.body
62 %tmp5 = sext i32 %x.i to i64
David Blaikie79e6c742015-02-27 19:29:02 +000063 %arrayidx15 = getelementptr inbounds float, float addrspace(1)* %out, i64 %tmp5
Matt Arsenault5015a892014-08-15 17:17:07 +000064 store float %add13, float addrspace(1)* %arrayidx15, align 4
65 ret void
66}
67
68attributes #0 = { nounwind readnone }
Matt Arsenault2aed6ca2015-12-19 01:46:41 +000069attributes #1 = { convergent nounwind }
Matt Arsenault45f82162016-07-11 23:35:48 +000070attributes #2 = { nounwind }