blob: 6ed12078bddb46a15e25f4a65d1a63484f554af3 [file] [log] [blame]
Ron Lieberman16de4fd2018-12-03 13:04:54 +00001; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9,GCN %s
2; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefixes=FIJI,GCN %s
3
4; GCN-LABEL: {{^}}test_add_co_sdwa:
5; GFX9: v_add_co_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
6; GFX9: v_addc_co_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
7; FIJI: v_add_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
8; FIJI: v_addc_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
9define amdgpu_kernel void @test_add_co_sdwa(i64 addrspace(1)* %arg, i32 addrspace(1)* %arg1) #0 {
10bb:
11 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
12 %tmp3 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tmp
13 %tmp4 = load i32, i32 addrspace(1)* %tmp3, align 4
14 %tmp5 = and i32 %tmp4, 255
15 %tmp6 = zext i32 %tmp5 to i64
16 %tmp7 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp
17 %tmp8 = load i64, i64 addrspace(1)* %tmp7, align 8
18 %tmp9 = add nsw i64 %tmp8, %tmp6
19 store i64 %tmp9, i64 addrspace(1)* %tmp7, align 8
20 ret void
21}
22
23
24; GCN-LABEL: {{^}}test_sub_co_sdwa:
25; GFX9: v_sub_co_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
26; GFX9: v_subbrev_co_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
27; FIJI: v_sub_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
28; FIJI: v_subbrev_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
29define amdgpu_kernel void @test_sub_co_sdwa(i64 addrspace(1)* %arg, i32 addrspace(1)* %arg1) #0 {
30bb:
31 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
32 %tmp3 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tmp
33 %tmp4 = load i32, i32 addrspace(1)* %tmp3, align 4
34 %tmp5 = and i32 %tmp4, 255
35 %tmp6 = zext i32 %tmp5 to i64
36 %tmp7 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp
37 %tmp8 = load i64, i64 addrspace(1)* %tmp7, align 8
38 %tmp9 = sub nsw i64 %tmp8, %tmp6
39 store i64 %tmp9, i64 addrspace(1)* %tmp7, align 8
40 ret void
41}
42
43; GCN-LABEL: {{^}}test1_add_co_sdwa:
44; GFX9: v_add_co_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
45; GFX9: v_addc_co_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
46; GFX9: v_add_co_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
47; GFX9: v_addc_co_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
48; FIJI: v_add_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
49; FIJI: v_addc_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
50; FIJI: v_add_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
51; FIJI: v_addc_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
52define amdgpu_kernel void @test1_add_co_sdwa(i64 addrspace(1)* %arg, i32 addrspace(1)* %arg1, i64 addrspace(1)* %arg2) #0 {
53bb:
54 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
55 %tmp3 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tmp
56 %tmp4 = load i32, i32 addrspace(1)* %tmp3, align 4
57 %tmp5 = and i32 %tmp4, 255
58 %tmp6 = zext i32 %tmp5 to i64
59 %tmp7 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp
60 %tmp8 = load i64, i64 addrspace(1)* %tmp7, align 8
61 %tmp9 = add nsw i64 %tmp8, %tmp6
62 store i64 %tmp9, i64 addrspace(1)* %tmp7, align 8
63 %tmp13 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tmp
64 %tmp14 = load i32, i32 addrspace(1)* %tmp13, align 4
65 %tmp15 = and i32 %tmp14, 255
66 %tmp16 = zext i32 %tmp15 to i64
67 %tmp17 = getelementptr inbounds i64, i64 addrspace(1)* %arg2, i32 %tmp
68 %tmp18 = load i64, i64 addrspace(1)* %tmp17, align 8
69 %tmp19 = add nsw i64 %tmp18, %tmp16
70 store i64 %tmp19, i64 addrspace(1)* %tmp17, align 8
71 ret void
72}
73
74declare i32 @llvm.amdgcn.workitem.id.x()