blob: 3e80fcf85b529b1dc88117468c3408e1a02a3cac [file] [log] [blame]
Matt Arsenault73d2f892016-07-15 22:32:02 +00001; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
Matthias Braune29b7682016-05-20 23:02:13 +00002; We may have subregister live ranges that are undefined on some paths. The
3; verifier should not complain about this.
Matthias Braune29b7682016-05-20 23:02:13 +00004
Matt Arsenault73d2f892016-07-15 22:32:02 +00005
6; CHECK-LABEL: {{^}}func:
Matt Arsenault3dbeefa2017-03-21 21:39:51 +00007define amdgpu_kernel void @func() #0 {
Matthias Braune29b7682016-05-20 23:02:13 +00008B0:
9 br i1 undef, label %B1, label %B2
10
11B1:
12 br label %B2
13
14B2:
15 %v0 = phi <4 x float> [ zeroinitializer, %B1 ], [ <float 0.0, float 0.0, float 0.0, float undef>, %B0 ]
16 br i1 undef, label %B30.1, label %B30.2
17
18B30.1:
19 %sub = fsub <4 x float> %v0, undef
20 br label %B30.2
21
22B30.2:
23 %v3 = phi <4 x float> [ %sub, %B30.1 ], [ %v0, %B2 ]
24 %ve0 = extractelement <4 x float> %v3, i32 0
25 store float %ve0, float addrspace(3)* undef, align 4
26 ret void
27}
Matt Arsenault73d2f892016-07-15 22:32:02 +000028
29; FIXME: Extra undef subregister copy should be removed before
30; overwritten with defined copy
31; CHECK-LABEL: {{^}}valley_partially_undef_copy:
32define amdgpu_ps float @valley_partially_undef_copy() #0 {
33bb:
34 %tmp = load volatile i32, i32 addrspace(1)* undef, align 4
35 %tmp1 = load volatile i32, i32 addrspace(1)* undef, align 4
36 %tmp2 = insertelement <4 x i32> undef, i32 %tmp1, i32 0
37 %tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
Matt Arsenault964a8482017-03-21 16:24:12 +000038 %tmp3.cast = bitcast <4 x i32> %tmp3 to <4 x float>
39 %tmp4 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tmp3.cast, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
Matt Arsenault73d2f892016-07-15 22:32:02 +000040 %tmp5 = extractelement <4 x float> %tmp4, i32 0
41 %tmp6 = fmul float %tmp5, undef
42 %tmp7 = fadd float %tmp6, %tmp6
43 %tmp8 = insertelement <4 x i32> %tmp2, i32 %tmp, i32 1
44 store <4 x i32> %tmp8, <4 x i32> addrspace(1)* undef, align 16
45 store float %tmp7, float addrspace(1)* undef, align 4
46 br label %bb9
47
48bb9: ; preds = %bb9, %bb
49 %tmp10 = icmp eq i32 %tmp, 0
50 br i1 %tmp10, label %bb9, label %bb11
51
52bb11: ; preds = %bb9
53 store <4 x i32> %tmp2, <4 x i32> addrspace(1)* undef, align 16
54 ret float undef
55}
56
57; FIXME: Should be able to remove the undef copies
58
59; CHECK-LABEL: {{^}}partially_undef_copy:
60; CHECK: v_mov_b32_e32 v5, 5
61; CHECK: v_mov_b32_e32 v6, 6
62
63; CHECK: v_mov_b32_e32 v[[OUTPUT_LO:[0-9]+]], v5
64
65; Undef copy
66; CHECK: v_mov_b32_e32 v1, v6
67
68; undef copy
69; CHECK: v_mov_b32_e32 v2, v7
70
71; CHECK: v_mov_b32_e32 v[[OUTPUT_HI:[0-9]+]], v8
72; CHECK: v_mov_b32_e32 v[[OUTPUT_LO]], v6
73
74; CHECK: buffer_store_dwordx4 v{{\[}}[[OUTPUT_LO]]:[[OUTPUT_HI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000075define amdgpu_kernel void @partially_undef_copy() #0 {
Matt Arsenault73d2f892016-07-15 22:32:02 +000076 %tmp0 = call i32 asm sideeffect "v_mov_b32_e32 v5, 5", "={VGPR5}"()
77 %tmp1 = call i32 asm sideeffect "v_mov_b32_e32 v6, 6", "={VGPR6}"()
78
79 %partially.undef.0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0
80 %partially.undef.1 = insertelement <4 x i32> %partially.undef.0, i32 %tmp1, i32 0
81
82 store volatile <4 x i32> %partially.undef.1, <4 x i32> addrspace(1)* undef, align 16
83 tail call void asm sideeffect "v_nop", "v={VGPR5_VGPR6_VGPR7_VGPR8}"(<4 x i32> %partially.undef.0)
84 ret void
85}
86
Matt Arsenault964a8482017-03-21 16:24:12 +000087declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1
Matt Arsenault73d2f892016-07-15 22:32:02 +000088
89attributes #0 = { nounwind }
Matt Arsenault964a8482017-03-21 16:24:12 +000090attributes #1 = { nounwind readonly }