blob: 0371cc68f040956f0ad41f5fc5a1fa2fa3c4fda6 [file] [log] [blame]
Konstantin Zhuravlyovca8946a2017-09-18 21:22:45 +00001; RUN: llc -march=amdgcn -mcpu=gfx600 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX600 %s
2; RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX700 %s
Stanislav Mekhanoshin7137f602018-05-31 19:39:54 +00003; RUN: llc -march=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX801 %s
Konstantin Zhuravlyovca8946a2017-09-18 21:22:45 +00004; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX900 %s
Matt Arsenault0084adc2018-04-30 19:08:16 +00005; RUN: llc -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN-DL --check-prefix=GFX906 %s
Konstantin Zhuravlyovca8946a2017-09-18 21:22:45 +00006
7; GCN-LABEL: {{^}}scalar_xnor_i32_one_use
8; GCN: s_xnor_b32
9define amdgpu_kernel void @scalar_xnor_i32_one_use(
10 i32 addrspace(1)* %r0, i32 %a, i32 %b) {
11entry:
12 %xor = xor i32 %a, %b
13 %r0.val = xor i32 %xor, -1
14 store i32 %r0.val, i32 addrspace(1)* %r0
15 ret void
16}
17
18; GCN-LABEL: {{^}}scalar_xnor_i32_mul_use
19; GCN-NOT: s_xnor_b32
20; GCN: s_xor_b32
21; GCN: s_not_b32
22; GCN: s_add_i32
23define amdgpu_kernel void @scalar_xnor_i32_mul_use(
24 i32 addrspace(1)* %r0, i32 addrspace(1)* %r1, i32 %a, i32 %b) {
25entry:
26 %xor = xor i32 %a, %b
27 %r0.val = xor i32 %xor, -1
28 %r1.val = add i32 %xor, %a
29 store i32 %r0.val, i32 addrspace(1)* %r0
30 store i32 %r1.val, i32 addrspace(1)* %r1
31 ret void
32}
33
34; GCN-LABEL: {{^}}scalar_xnor_i64_one_use
35; GCN: s_xnor_b64
36define amdgpu_kernel void @scalar_xnor_i64_one_use(
37 i64 addrspace(1)* %r0, i64 %a, i64 %b) {
38entry:
39 %xor = xor i64 %a, %b
40 %r0.val = xor i64 %xor, -1
41 store i64 %r0.val, i64 addrspace(1)* %r0
42 ret void
43}
44
45; GCN-LABEL: {{^}}scalar_xnor_i64_mul_use
46; GCN-NOT: s_xnor_b64
47; GCN: s_xor_b64
48; GCN: s_not_b64
49; GCN: s_add_u32
50; GCN: s_addc_u32
51define amdgpu_kernel void @scalar_xnor_i64_mul_use(
52 i64 addrspace(1)* %r0, i64 addrspace(1)* %r1, i64 %a, i64 %b) {
53entry:
54 %xor = xor i64 %a, %b
55 %r0.val = xor i64 %xor, -1
56 %r1.val = add i64 %xor, %a
57 store i64 %r0.val, i64 addrspace(1)* %r0
58 store i64 %r1.val, i64 addrspace(1)* %r1
59 ret void
60}
61
62; GCN-LABEL: {{^}}vector_xnor_i32_one_use
63; GCN-NOT: s_xnor_b32
64; GCN: v_xor_b32
65; GCN: v_not_b32
Matt Arsenault0084adc2018-04-30 19:08:16 +000066; GCN-DL: v_xnor_b32
Konstantin Zhuravlyovca8946a2017-09-18 21:22:45 +000067define i32 @vector_xnor_i32_one_use(i32 %a, i32 %b) {
68entry:
69 %xor = xor i32 %a, %b
70 %r = xor i32 %xor, -1
71 ret i32 %r
72}
73
74; GCN-LABEL: {{^}}vector_xnor_i64_one_use
75; GCN-NOT: s_xnor_b64
76; GCN: v_xor_b32
77; GCN: v_xor_b32
78; GCN: v_not_b32
79; GCN: v_not_b32
Matt Arsenault0084adc2018-04-30 19:08:16 +000080; GCN-DL: v_xnor_b32
81; GCN-DL: v_xnor_b32
Konstantin Zhuravlyovca8946a2017-09-18 21:22:45 +000082define i64 @vector_xnor_i64_one_use(i64 %a, i64 %b) {
83entry:
84 %xor = xor i64 %a, %b
85 %r = xor i64 %xor, -1
86 ret i64 %r
87}