blob: 941a1b90dcc119680149cabc3a1fb097a66a92b9 [file] [log] [blame]
Tom Stellardb3c3bda2015-12-10 02:12:53 +00001; RUN: llc < %s -march=amdgcn -mcpu=bonaire -verify-machineinstrs | FileCheck --check-prefix=GCN %s
2; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=GCN %s
3
4; GCN-LABEL: {{^}}inline_reg_constraints:
5; GCN: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
6; GCN: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
7; GCN: flat_load_dwordx4 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
8; GCN: s_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}]
9; GCN: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
10; GCN: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
11; GCN: s_load_dwordx8 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
12
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000013define amdgpu_kernel void @inline_reg_constraints(i32 addrspace(1)* %ptr) {
Tom Stellardb3c3bda2015-12-10 02:12:53 +000014entry:
15 %v32 = tail call i32 asm sideeffect "flat_load_dword $0, $1", "=v,v"(i32 addrspace(1)* %ptr)
16 %v64 = tail call <2 x i32> asm sideeffect "flat_load_dwordx2 $0, $1", "=v,v"(i32 addrspace(1)* %ptr)
17 %v128 = tail call <4 x i32> asm sideeffect "flat_load_dwordx4 $0, $1", "=v,v"(i32 addrspace(1)* %ptr)
18 %s32 = tail call i32 asm sideeffect "s_load_dword $0, $1", "=s,s"(i32 addrspace(1)* %ptr)
19 %s64 = tail call <2 x i32> asm sideeffect "s_load_dwordx2 $0, $1", "=s,s"(i32 addrspace(1)* %ptr)
20 %s128 = tail call <4 x i32> asm sideeffect "s_load_dwordx4 $0, $1", "=s,s"(i32 addrspace(1)* %ptr)
21 %s256 = tail call <8 x i32> asm sideeffect "s_load_dwordx8 $0, $1", "=s,s"(i32 addrspace(1)* %ptr)
22 ret void
23}
Matt Arsenaulta609e2d2016-08-30 20:50:08 +000024
Marek Olsak79c05872016-11-25 17:37:09 +000025; FIXME: Should be able to avoid copy
Matt Arsenaulta609e2d2016-08-30 20:50:08 +000026; GCN-LABEL: {{^}}inline_sreg_constraint_m0:
27; GCN: s_mov_b32 m0, -1
Marek Olsak79c05872016-11-25 17:37:09 +000028; GCN: s_mov_b32 [[COPY_M0:s[0-9]+]], m0
29; GCN: ; use [[COPY_M0]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000030define amdgpu_kernel void @inline_sreg_constraint_m0() {
Matt Arsenaulta609e2d2016-08-30 20:50:08 +000031 %m0 = tail call i32 asm sideeffect "s_mov_b32 m0, -1", "={M0}"()
32 tail call void asm sideeffect "; use $0", "s"(i32 %m0)
33 ret void
34}
Matt Arsenault3d463192016-11-01 22:55:07 +000035
36; GCN-LABEL: {{^}}inline_sreg_constraint_imm_i32:
37; GCN: s_mov_b32 [[REG:s[0-9]+]], 32
38; GCN: ; use [[REG]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000039define amdgpu_kernel void @inline_sreg_constraint_imm_i32() {
Matt Arsenault3d463192016-11-01 22:55:07 +000040 tail call void asm sideeffect "; use $0", "s"(i32 32)
41 ret void
42}
43
44; GCN-LABEL: {{^}}inline_sreg_constraint_imm_f32:
45; GCN: s_mov_b32 [[REG:s[0-9]+]], 1.0
46; GCN: ; use [[REG]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000047define amdgpu_kernel void @inline_sreg_constraint_imm_f32() {
Matt Arsenault3d463192016-11-01 22:55:07 +000048 tail call void asm sideeffect "; use $0", "s"(float 1.0)
49 ret void
50}
51
52; FIXME: Should be able to use s_mov_b64
53; GCN-LABEL: {{^}}inline_sreg_constraint_imm_i64:
54; GCN-DAG: s_mov_b32 s[[REG_LO:[0-9]+]], -4{{$}}
55; GCN-DAG: s_mov_b32 s[[REG_HI:[0-9]+]], -1{{$}}
56; GCN: ; use s{{\[}}[[REG_LO]]:[[REG_HI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000057define amdgpu_kernel void @inline_sreg_constraint_imm_i64() {
Matt Arsenault3d463192016-11-01 22:55:07 +000058 tail call void asm sideeffect "; use $0", "s"(i64 -4)
59 ret void
60}
61
62; GCN-LABEL: {{^}}inline_sreg_constraint_imm_f64:
63; GCN-DAG: s_mov_b32 s[[REG_LO:[0-9]+]], 0{{$}}
64; GCN-DAG: s_mov_b32 s[[REG_HI:[0-9]+]], 0x3ff00000{{$}}
65; GCN: ; use s{{\[}}[[REG_LO]]:[[REG_HI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000066define amdgpu_kernel void @inline_sreg_constraint_imm_f64() {
Matt Arsenault3d463192016-11-01 22:55:07 +000067 tail call void asm sideeffect "; use $0", "s"(double 1.0)
68 ret void
69}