blob: 41afd503ef8821f27e61e9f4734d170c2e00f51f [file] [log] [blame]
Tom Stellard49f8bfd2015-01-06 18:00:21 +00001; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI --check-prefix=CHECK %s
2; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=CI --check-prefix=CHECK %s
Matt Arsenault5015a892014-08-15 17:17:07 +00003
4declare i32 @llvm.r600.read.tidig.x() #0
5declare void @llvm.AMDGPU.barrier.local() #1
6
7; Function Attrs: nounwind
Tom Stellard79243d92014-10-01 17:15:17 +00008; CHECK-LABEL: {{^}}signed_ds_offset_addressing_loop:
Tom Stellard85e8b6d2014-08-22 18:49:33 +00009; CHECK: BB0_1:
Tom Stellard326d6ec2014-11-05 14:50:53 +000010; CHECK: v_add_i32_e32 [[VADDR:v[0-9]+]],
11; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR]]
12; SI-DAG: v_add_i32_e32 [[VADDR4:v[0-9]+]], 4, [[VADDR]]
13; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR4]]
14; SI-DAG: v_add_i32_e32 [[VADDR0x80:v[0-9]+]], 0x80, [[VADDR]]
15; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR0x80]]
16; SI-DAG: v_add_i32_e32 [[VADDR0x84:v[0-9]+]], 0x84, [[VADDR]]
17; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR0x84]]
18; SI-DAG: v_add_i32_e32 [[VADDR0x100:v[0-9]+]], 0x100, [[VADDR]]
19; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR0x100]]
Tom Stellard85e8b6d2014-08-22 18:49:33 +000020
Tom Stellard326d6ec2014-11-05 14:50:53 +000021; CI-DAG: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[VADDR]] offset0:0 offset1:1
22; CI-DAG: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[VADDR]] offset0:32 offset1:33
23; CI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR]] offset:256
24; CHECK: s_endpgm
Matt Arsenault5015a892014-08-15 17:17:07 +000025define void @signed_ds_offset_addressing_loop(float addrspace(1)* noalias nocapture %out, float addrspace(3)* noalias nocapture readonly %lptr, i32 %n) #2 {
26entry:
27 %x.i = tail call i32 @llvm.r600.read.tidig.x() #0
28 %mul = shl nsw i32 %x.i, 1
29 br label %for.body
30
31for.body: ; preds = %for.body, %entry
32 %sum.03 = phi float [ 0.000000e+00, %entry ], [ %add13, %for.body ]
33 %offset.02 = phi i32 [ %mul, %entry ], [ %add14, %for.body ]
34 %k.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
35 tail call void @llvm.AMDGPU.barrier.local() #1
36 %arrayidx = getelementptr inbounds float addrspace(3)* %lptr, i32 %offset.02
37 %tmp = load float addrspace(3)* %arrayidx, align 4
38 %add1 = add nsw i32 %offset.02, 1
39 %arrayidx2 = getelementptr inbounds float addrspace(3)* %lptr, i32 %add1
40 %tmp1 = load float addrspace(3)* %arrayidx2, align 4
41 %add3 = add nsw i32 %offset.02, 32
42 %arrayidx4 = getelementptr inbounds float addrspace(3)* %lptr, i32 %add3
43 %tmp2 = load float addrspace(3)* %arrayidx4, align 4
44 %add5 = add nsw i32 %offset.02, 33
45 %arrayidx6 = getelementptr inbounds float addrspace(3)* %lptr, i32 %add5
46 %tmp3 = load float addrspace(3)* %arrayidx6, align 4
47 %add7 = add nsw i32 %offset.02, 64
48 %arrayidx8 = getelementptr inbounds float addrspace(3)* %lptr, i32 %add7
49 %tmp4 = load float addrspace(3)* %arrayidx8, align 4
50 %add9 = fadd float %tmp, %tmp1
51 %add10 = fadd float %add9, %tmp2
52 %add11 = fadd float %add10, %tmp3
53 %add12 = fadd float %add11, %tmp4
54 %add13 = fadd float %sum.03, %add12
55 %inc = add nsw i32 %k.01, 1
56 %add14 = add nsw i32 %offset.02, 97
57 %exitcond = icmp eq i32 %inc, 8
58 br i1 %exitcond, label %for.end, label %for.body
59
60for.end: ; preds = %for.body
61 %tmp5 = sext i32 %x.i to i64
62 %arrayidx15 = getelementptr inbounds float addrspace(1)* %out, i64 %tmp5
63 store float %add13, float addrspace(1)* %arrayidx15, align 4
64 ret void
65}
66
67attributes #0 = { nounwind readnone }
68attributes #1 = { noduplicate nounwind }
69attributes #2 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }