blob: 7a395ccb38d01368ea15403b03a56d48fb9895ce [file] [log] [blame]
Matt Arsenault284ae082014-06-09 08:36:53 +00001; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
Tom Stellard49f8bfd2015-01-06 18:00:21 +00002; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
Marek Olsak75170772015-01-27 17:27:15 +00003; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
Tom Stellard75aadc22012-12-11 21:25:42 +00004
Tom Stellard79243d92014-10-01 17:15:17 +00005; FUNC-LABEL: {{^}}test2:
Matt Arsenault284ae082014-06-09 08:36:53 +00006; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
7; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Tom Stellard75aadc22012-12-11 21:25:42 +00008
Tom Stellard326d6ec2014-11-05 14:50:53 +00009; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
10; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Aaron Watry00aeb112013-06-25 13:55:23 +000011
12define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
13 %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
14 %a = load <2 x i32> addrspace(1) * %in
15 %b = load <2 x i32> addrspace(1) * %b_ptr
16 %result = and <2 x i32> %a, %b
17 store <2 x i32> %result, <2 x i32> addrspace(1)* %out
18 ret void
19}
20
Tom Stellard79243d92014-10-01 17:15:17 +000021; FUNC-LABEL: {{^}}test4:
Matt Arsenault284ae082014-06-09 08:36:53 +000022; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
23; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
24; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
25; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Aaron Watry00aeb112013-06-25 13:55:23 +000026
Tom Stellard326d6ec2014-11-05 14:50:53 +000027; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
28; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
29; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
30; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Aaron Watry00aeb112013-06-25 13:55:23 +000031
32define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
Tom Stellard75aadc22012-12-11 21:25:42 +000033 %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
34 %a = load <4 x i32> addrspace(1) * %in
35 %b = load <4 x i32> addrspace(1) * %b_ptr
36 %result = and <4 x i32> %a, %b
37 store <4 x i32> %result, <4 x i32> addrspace(1)* %out
38 ret void
39}
Matt Arsenault284ae082014-06-09 08:36:53 +000040
Tom Stellard79243d92014-10-01 17:15:17 +000041; FUNC-LABEL: {{^}}s_and_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000042; SI: s_and_b32
Matt Arsenault284ae082014-06-09 08:36:53 +000043define void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
44 %and = and i32 %a, %b
45 store i32 %and, i32 addrspace(1)* %out, align 4
46 ret void
47}
48
Tom Stellard79243d92014-10-01 17:15:17 +000049; FUNC-LABEL: {{^}}s_and_constant_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000050; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687
Matt Arsenault284ae082014-06-09 08:36:53 +000051define void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
52 %and = and i32 %a, 1234567
53 store i32 %and, i32 addrspace(1)* %out, align 4
54 ret void
55}
56
Tom Stellard79243d92014-10-01 17:15:17 +000057; FUNC-LABEL: {{^}}v_and_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000058; SI: v_and_b32
Matt Arsenault284ae082014-06-09 08:36:53 +000059define void @v_and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
60 %a = load i32 addrspace(1)* %aptr, align 4
61 %b = load i32 addrspace(1)* %bptr, align 4
62 %and = and i32 %a, %b
63 store i32 %and, i32 addrspace(1)* %out, align 4
64 ret void
65}
66
Tom Stellard79243d92014-10-01 17:15:17 +000067; FUNC-LABEL: {{^}}v_and_constant_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000068; SI: v_and_b32
Matt Arsenault284ae082014-06-09 08:36:53 +000069define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
70 %a = load i32 addrspace(1)* %aptr, align 4
71 %and = and i32 %a, 1234567
72 store i32 %and, i32 addrspace(1)* %out, align 4
73 ret void
74}
75
Tom Stellard79243d92014-10-01 17:15:17 +000076; FUNC-LABEL: {{^}}s_and_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +000077; SI: s_and_b64
Matt Arsenault284ae082014-06-09 08:36:53 +000078define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
79 %and = and i64 %a, %b
80 store i64 %and, i64 addrspace(1)* %out, align 8
81 ret void
82}
83
Matt Arsenault0d89e842014-07-15 21:44:37 +000084; FIXME: Should use SGPRs
Tom Stellard79243d92014-10-01 17:15:17 +000085; FUNC-LABEL: {{^}}s_and_i1:
Tom Stellard326d6ec2014-11-05 14:50:53 +000086; SI: v_and_b32
Matt Arsenault0d89e842014-07-15 21:44:37 +000087define void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
88 %and = and i1 %a, %b
89 store i1 %and, i1 addrspace(1)* %out
90 ret void
91}
92
Tom Stellard79243d92014-10-01 17:15:17 +000093; FUNC-LABEL: {{^}}s_and_constant_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +000094; SI: s_and_b64
Matt Arsenault284ae082014-06-09 08:36:53 +000095define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
96 %and = and i64 %a, 281474976710655
97 store i64 %and, i64 addrspace(1)* %out, align 8
98 ret void
99}
100
Tom Stellard79243d92014-10-01 17:15:17 +0000101; FUNC-LABEL: {{^}}v_and_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000102; SI: v_and_b32
103; SI: v_and_b32
Matt Arsenault284ae082014-06-09 08:36:53 +0000104define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
105 %a = load i64 addrspace(1)* %aptr, align 8
106 %b = load i64 addrspace(1)* %bptr, align 8
107 %and = and i64 %a, %b
108 store i64 %and, i64 addrspace(1)* %out, align 8
109 ret void
110}
111
Tom Stellard79243d92014-10-01 17:15:17 +0000112; FUNC-LABEL: {{^}}v_and_i64_br:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000113; SI: v_and_b32
114; SI: v_and_b32
Tom Stellard102c6872014-09-03 15:22:41 +0000115define void @v_and_i64_br(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i32 %cond) {
116entry:
117 %tmp0 = icmp eq i32 %cond, 0
118 br i1 %tmp0, label %if, label %endif
119
120if:
121 %a = load i64 addrspace(1)* %aptr, align 8
122 %b = load i64 addrspace(1)* %bptr, align 8
123 %and = and i64 %a, %b
124 br label %endif
125
126endif:
127 %tmp1 = phi i64 [%and, %if], [0, %entry]
128 store i64 %tmp1, i64 addrspace(1)* %out, align 8
129 ret void
130}
131
Tom Stellard79243d92014-10-01 17:15:17 +0000132; FUNC-LABEL: {{^}}v_and_constant_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000133; SI: v_and_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
134; SI: v_and_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
Matt Arsenault284ae082014-06-09 08:36:53 +0000135define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
136 %a = load i64 addrspace(1)* %aptr, align 8
137 %and = and i64 %a, 1234567
138 store i64 %and, i64 addrspace(1)* %out, align 8
139 ret void
140}
Matt Arsenault49dd4282014-09-15 17:15:02 +0000141
142; FIXME: Replace and 0 with mov 0
Tom Stellard79243d92014-10-01 17:15:17 +0000143; FUNC-LABEL: {{^}}v_and_inline_imm_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000144; SI: v_and_b32_e32 {{v[0-9]+}}, 64, {{v[0-9]+}}
145; SI: v_and_b32_e32 {{v[0-9]+}}, 0, {{v[0-9]+}}
Matt Arsenault49dd4282014-09-15 17:15:02 +0000146define void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
147 %a = load i64 addrspace(1)* %aptr, align 8
148 %and = and i64 %a, 64
149 store i64 %and, i64 addrspace(1)* %out, align 8
150 ret void
151}
152
Tom Stellard79243d92014-10-01 17:15:17 +0000153; FUNC-LABEL: {{^}}s_and_inline_imm_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000154; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 64
Matt Arsenault49dd4282014-09-15 17:15:02 +0000155define void @s_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
156 %and = and i64 %a, 64
157 store i64 %and, i64 addrspace(1)* %out, align 8
158 ret void
159}