AMDGPU: Start defining a calling convention
Partially implement callee-side for arguments and return values.
byval doesn't work properly, and most likely sret or other on-stack
return values most as well.
llvm-svn: 303308
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll
index 5b78009..cdfb667 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll
@@ -6,7 +6,8 @@
; Tests for add.
; CHECK: name: addi32
; CHECK: {{%[0-9]+}}(s32) = G_ADD
-define i32 @addi32(i32 %arg1, i32 %arg2) {
+define amdgpu_kernel void @addi32(i32 %arg1, i32 %arg2) {
%res = add i32 %arg1, %arg2
- ret i32 %res
+ store i32 %res, i32 addrspace(1)* undef
+ ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
new file mode 100644
index 0000000..d67988b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
@@ -0,0 +1,124 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; Test that non-entry function frame indices are expanded properly to
+; give an index relative to the scratch wave offset register
+
+; Materialize into a mov. Make sure there isn't an unnecessary copy.
+; GCN-LABEL: {{^}}func_mov_fi_i32:
+; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN: s_sub_u32 vcc_hi, s5, s4
+; GCN-NEXT: s_lshr_b32 vcc_hi, vcc_hi, 6
+; GCN-NEXT: v_add_i32_e64 v0, vcc, vcc_hi, 4
+; GCN-NOT: v_mov
+; GCN: ds_write_b32 v0, v0
+define void @func_mov_fi_i32() #0 {
+ %alloca = alloca i32
+ store volatile i32* %alloca, i32* addrspace(3)* undef
+ ret void
+}
+
+; Materialize into an add of a constant offset from the FI.
+; FIXME: Should be able to merge adds
+
+; GCN-LABEL: {{^}}func_add_constant_to_fi_i32:
+; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN: s_sub_u32 s6, s5, s4
+; GCN-NEXT: s_lshr_b32 s6, s6, 6
+; GCN-NEXT: v_add_i32_e64 v0, s{{\[[0-9]+:[0-9]+\]}}, s6, 4
+; GCN-NEXT: v_add_i32_e32 v0, vcc, 4, v0
+; GCN-NOT: v_mov
+; GCN: ds_write_b32 v0, v0
+define void @func_add_constant_to_fi_i32() #0 {
+ %alloca = alloca [2 x i32], align 4
+ %gep0 = getelementptr inbounds [2 x i32], [2 x i32]* %alloca, i32 0, i32 1
+ store volatile i32* %gep0, i32* addrspace(3)* undef
+ ret void
+}
+
+; A user the materialized frame index can't be meaningfully folded
+; into.
+
+; GCN-LABEL: {{^}}func_other_fi_user_i32:
+; GCN: s_sub_u32 vcc_hi, s5, s4
+; GCN-NEXT: s_lshr_b32 vcc_hi, vcc_hi, 6
+; GCN-NEXT: v_add_i32_e64 v0, vcc, vcc_hi, 4
+; GCN-NEXT: v_mul_lo_i32 v0, v0, 9
+; GCN-NOT: v_mov
+; GCN: ds_write_b32 v0, v0
+define void @func_other_fi_user_i32() #0 {
+ %alloca = alloca [2 x i32], align 4
+ %ptrtoint = ptrtoint [2 x i32]* %alloca to i32
+ %mul = mul i32 %ptrtoint, 9
+ store volatile i32 %mul, i32 addrspace(3)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}func_store_private_arg_i32_ptr:
+; GCN: v_mov_b32_e32 v1, 15{{$}}
+; GCN: buffer_store_dword v1, v0, s[0:3], s4 offen{{$}}
+define void @func_store_private_arg_i32_ptr(i32* %ptr) #0 {
+ store volatile i32 15, i32* %ptr
+ ret void
+}
+
+; GCN-LABEL: {{^}}func_load_private_arg_i32_ptr:
+; GCN: s_waitcnt
+; GCN-NEXT: buffer_load_dword v0, v0, s[0:3], s4 offen{{$}}
+define void @func_load_private_arg_i32_ptr(i32* %ptr) #0 {
+ %val = load volatile i32, i32* %ptr
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_ptr:
+; GCN: s_waitcnt
+; GCN-NEXT: s_sub_u32 s6, s5, s4
+; GCN-NEXT: v_lshr_b32_e64 v0, s6, 6
+; GCN-NEXT: v_add_i32_e32 v0, vcc, 4, v0
+; GCN-NOT: v_mov
+; GCN: ds_write_b32 v0, v0
+define void @void_func_byval_struct_i8_i32_ptr({ i8, i32 }* byval %arg0) #0 {
+ %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 0
+ %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 1
+ %load1 = load i32, i32* %gep1
+ store volatile i32* %gep1, i32* addrspace(3)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_ptr_value:
+; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: buffer_load_ubyte v0, off, s[0:3], s5
+; GCN_NEXT: buffer_load_dword v1, off, s[0:3], s5 offset:4
+define void @void_func_byval_struct_i8_i32_ptr_value({ i8, i32 }* byval %arg0) #0 {
+ %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 0
+ %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 1
+ %load0 = load i8, i8* %gep0
+ %load1 = load i32, i32* %gep1
+ store volatile i8 %load0, i8 addrspace(3)* undef
+ store volatile i32 %load1, i32 addrspace(3)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_ptr_nonentry_block:
+; GCN: s_sub_u32 s8, s5, s4
+; GCN: v_lshr_b32_e64 v1, s8, 6
+; GCN: s_and_saveexec_b64
+
+; GCN: v_add_i32_e32 v0, vcc, 4, v1
+; GCN: buffer_load_dword v1, v1, s[0:3], s4 offen offset:4
+; GCN: ds_write_b32
+define void @void_func_byval_struct_i8_i32_ptr_nonentry_block({ i8, i32 }* byval %arg0, i32 %arg2) #0 {
+ %cmp = icmp eq i32 %arg2, 0
+ br i1 %cmp, label %bb, label %ret
+
+bb:
+ %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 0
+ %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 1
+ %load1 = load volatile i32, i32* %gep1
+ store volatile i32* %gep1, i32* addrspace(3)* undef
+ br label %ret
+
+ret:
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/function-args.ll b/llvm/test/CodeGen/AMDGPU/function-args.ll
new file mode 100644
index 0000000..9b13684
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/function-args.ll
@@ -0,0 +1,734 @@
+; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=CI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}void_func_i1:
+; GCN: v_and_b32_e32 v0, 1, v0
+; GCN: buffer_store_byte v0, off
+define void @void_func_i1(i1 %arg0) #0 {
+ store i1 %arg0, i1 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_i1_zeroext:
+; GCN: s_waitcnt
+; GCN-NEXT: v_or_b32_e32 v0, 12, v0
+; GCN-NOT: v0
+; GCN: buffer_store_dword v0, off
+define void @void_func_i1_zeroext(i1 zeroext %arg0) #0 {
+ %ext = zext i1 %arg0 to i32
+ %add = add i32 %ext, 12
+ store i32 %add, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_i1_signext:
+; GCN: s_waitcnt
+; GCN-NEXT: v_add_i32_e32 v0, vcc, 12, v0
+; GCN-NOT: v0
+; GCN: buffer_store_dword v0, off
+define void @void_func_i1_signext(i1 signext %arg0) #0 {
+ %ext = sext i1 %arg0 to i32
+ %add = add i32 %ext, 12
+ store i32 %add, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_i8:
+; GCN-NOT: v0
+; GCN: buffer_store_byte v0, off
+define void @void_func_i8(i8 %arg0) #0 {
+ store i8 %arg0, i8 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_i8_zeroext:
+; GCN-NOT: and_b32
+; GCN: v_add_i32_e32 v0, vcc, 12, v0
+define void @void_func_i8_zeroext(i8 zeroext %arg0) #0 {
+ %ext = zext i8 %arg0 to i32
+ %add = add i32 %ext, 12
+ store i32 %add, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_i8_signext:
+; GCN-NOT: v_bfe_i32
+; GCN: v_add_i32_e32 v0, vcc, 12, v0
+define void @void_func_i8_signext(i8 signext %arg0) #0 {
+ %ext = sext i8 %arg0 to i32
+ %add = add i32 %ext, 12
+ store i32 %add, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_i16:
+; GCN: buffer_store_short v0, off
+define void @void_func_i16(i16 %arg0) #0 {
+ store i16 %arg0, i16 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_i16_zeroext:
+; GCN-NOT: v0
+; GCN: v_add_i32_e32 v0, vcc, 12, v0
+define void @void_func_i16_zeroext(i16 zeroext %arg0) #0 {
+ %ext = zext i16 %arg0 to i32
+ %add = add i32 %ext, 12
+ store i32 %add, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_i16_signext:
+; GCN-NOT: v0
+; GCN: v_add_i32_e32 v0, vcc, 12, v0
+define void @void_func_i16_signext(i16 signext %arg0) #0 {
+ %ext = sext i16 %arg0 to i32
+ %add = add i32 %ext, 12
+ store i32 %add, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_i32:
+; GCN-NOT: v0
+; GCN: buffer_store_dword v0, off
+define void @void_func_i32(i32 %arg0) #0 {
+ store i32 %arg0, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_i64:
+; GCN-NOT: v[0:1]
+; GCN-NOT: v0
+; GCN-NOT: v1
+; GCN: buffer_store_dwordx2 v[0:1], off
+define void @void_func_i64(i64 %arg0) #0 {
+ store i64 %arg0, i64 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_f16:
+; VI-NOT: v0
+; CI: v_cvt_f16_f32_e32 v0, v0
+; GCN: buffer_store_short v0, off
+define void @void_func_f16(half %arg0) #0 {
+ store half %arg0, half addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_f32
+; GCN-NOT: v0
+; GCN: buffer_store_dword v0, off
+define void @void_func_f32(float %arg0) #0 {
+ store float %arg0, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_f64:
+; GCN-NOT: v[0:1]
+; GCN-NOT: v0
+; GCN-NOT: v1
+; GCN: buffer_store_dwordx2 v[0:1], off
+define void @void_func_f64(double %arg0) #0 {
+ store double %arg0, double addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v2i32:
+; GCN-NOT: v[0:1]
+; GCN-NOT: v0
+; GCN-NOT: v1
+; GCN: buffer_store_dwordx2 v[0:1], off
+define void @void_func_v2i32(<2 x i32> %arg0) #0 {
+ store <2 x i32> %arg0, <2 x i32> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v3i32:
+; GCN-DAG: buffer_store_dword v2, off
+; GCN-DAG: buffer_store_dwordx2 v[0:1], off
+define void @void_func_v3i32(<3 x i32> %arg0) #0 {
+ store <3 x i32> %arg0, <3 x i32> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v4i32:
+; GCN: buffer_store_dwordx4 v[0:3], off
+define void @void_func_v4i32(<4 x i32> %arg0) #0 {
+ store <4 x i32> %arg0, <4 x i32> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v5i32:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dword v4, off
+define void @void_func_v5i32(<5 x i32> %arg0) #0 {
+ store <5 x i32> %arg0, <5 x i32> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v8i32:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+define void @void_func_v8i32(<8 x i32> %arg0) #0 {
+ store <8 x i32> %arg0, <8 x i32> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v16i32:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+; GCN-DAG: buffer_store_dwordx4 v[8:11], off
+; GCN-DAG: buffer_store_dwordx4 v[12:15], off
+define void @void_func_v16i32(<16 x i32> %arg0) #0 {
+ store <16 x i32> %arg0, <16 x i32> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v32i32:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+; GCN-DAG: buffer_store_dwordx4 v[8:11], off
+; GCN-DAG: buffer_store_dwordx4 v[12:15], off
+; GCN-DAG: buffer_store_dwordx4 v[16:19], off
+; GCN-DAG: buffer_store_dwordx4 v[20:23], off
+; GCN-DAG: buffer_store_dwordx4 v[24:27], off
+; GCN-DAG: buffer_store_dwordx4 v[28:31], off
+define void @void_func_v32i32(<32 x i32> %arg0) #0 {
+ store <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
+ ret void
+}
+
+; 1 over register limit
+; GCN-LABEL: {{^}}void_func_v33i32:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+; GCN-DAG: buffer_store_dwordx4 v[8:11], off
+; GCN-DAG: buffer_store_dwordx4 v[12:15], off
+; GCN-DAG: buffer_load_dword [[STACKLOAD:v[0-9]+]], off, s[0:3], s5
+; GCN-DAG: buffer_store_dwordx4 v[16:19], off
+; GCN-DAG: buffer_store_dwordx4 v[20:23], off
+; GCN-DAG: buffer_store_dwordx4 v[24:27], off
+; GCN-DAG: buffer_store_dwordx4 v[28:31], off
+; GCN: buffer_store_dword [[STACKLOAD]], off
+define void @void_func_v33i32(<33 x i32> %arg0) #0 {
+ store <33 x i32> %arg0, <33 x i32> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v2i64:
+; GCN: buffer_store_dwordx4 v[0:3], off
+define void @void_func_v2i64(<2 x i64> %arg0) #0 {
+ store <2 x i64> %arg0, <2 x i64> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v3i64:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx2 v[4:5], off
+define void @void_func_v3i64(<3 x i64> %arg0) #0 {
+ store <3 x i64> %arg0, <3 x i64> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v4i64:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+define void @void_func_v4i64(<4 x i64> %arg0) #0 {
+ store <4 x i64> %arg0, <4 x i64> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v5i64:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+; GCN-DAG: buffer_store_dwordx2 v[8:9], off
+define void @void_func_v5i64(<5 x i64> %arg0) #0 {
+ store <5 x i64> %arg0, <5 x i64> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v8i64:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+; GCN-DAG: buffer_store_dwordx4 v[8:11], off
+; GCN-DAG: buffer_store_dwordx4 v[12:15], off
+define void @void_func_v8i64(<8 x i64> %arg0) #0 {
+ store <8 x i64> %arg0, <8 x i64> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v16i64:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+; GCN-DAG: buffer_store_dwordx4 v[8:11], off
+; GCN-DAG: buffer_store_dwordx4 v[12:15], off
+; GCN-DAG: buffer_store_dwordx4 v[16:19], off
+; GCN-DAG: buffer_store_dwordx4 v[20:23], off
+; GCN-DAG: buffer_store_dwordx4 v[24:27], off
+; GCN-DAG: buffer_store_dwordx4 v[28:31], off
+define void @void_func_v16i64(<16 x i64> %arg0) #0 {
+ store <16 x i64> %arg0, <16 x i64> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v2i16:
+; GFX9-NOT: v0
+; GFX9: buffer_store_dword v0, off
+define void @void_func_v2i16(<2 x i16> %arg0) #0 {
+ store <2 x i16> %arg0, <2 x i16> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v3i16:
+; GCN-DAG: buffer_store_dword v0, off
+; GCN-DAG: buffer_store_short v2, off
+define void @void_func_v3i16(<3 x i16> %arg0) #0 {
+ store <3 x i16> %arg0, <3 x i16> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v4i16:
+; GFX9-NOT: v0
+; GFX9-NOT: v1
+; GFX9: buffer_store_dwordx2 v[0:1], off
+define void @void_func_v4i16(<4 x i16> %arg0) #0 {
+ store <4 x i16> %arg0, <4 x i16> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v5i16:
+; GCN-DAG: buffer_store_short v4, off,
+; GCN-DAG: buffer_store_dwordx2 v[1:2], off
+define void @void_func_v5i16(<5 x i16> %arg0) #0 {
+ store <5 x i16> %arg0, <5 x i16> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v8i16:
+; GFX9-DAG: buffer_store_dwordx4 v[0:3], off
+define void @void_func_v8i16(<8 x i16> %arg0) #0 {
+ store <8 x i16> %arg0, <8 x i16> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v16i16:
+; GFX9-DAG: buffer_store_dwordx4 v[0:3], off
+; GFX9-DAG: buffer_store_dwordx4 v[4:7], off
+define void @void_func_v16i16(<16 x i16> %arg0) #0 {
+ store <16 x i16> %arg0, <16 x i16> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v2f32:
+; GCN-NOT: v[0:1]
+; GCN-NOT: v0
+; GCN-NOT: v1
+; GCN: buffer_store_dwordx2 v[0:1], off
+define void @void_func_v2f32(<2 x float> %arg0) #0 {
+ store <2 x float> %arg0, <2 x float> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v3f32:
+; GCN-DAG: buffer_store_dword v2, off
+; GCN-DAG: buffer_store_dwordx2 v[0:1], off
+define void @void_func_v3f32(<3 x float> %arg0) #0 {
+ store <3 x float> %arg0, <3 x float> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v4f32:
+; GCN: buffer_store_dwordx4 v[0:3], off
+define void @void_func_v4f32(<4 x float> %arg0) #0 {
+ store <4 x float> %arg0, <4 x float> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v8f32:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+define void @void_func_v8f32(<8 x float> %arg0) #0 {
+ store <8 x float> %arg0, <8 x float> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v16f32:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+; GCN-DAG: buffer_store_dwordx4 v[8:11], off
+; GCN-DAG: buffer_store_dwordx4 v[12:15], off
+define void @void_func_v16f32(<16 x float> %arg0) #0 {
+ store <16 x float> %arg0, <16 x float> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v2f64:
+; GCN: buffer_store_dwordx4 v[0:3], off
+define void @void_func_v2f64(<2 x double> %arg0) #0 {
+ store <2 x double> %arg0, <2 x double> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v3f64:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx2 v[4:5], off
+define void @void_func_v3f64(<3 x double> %arg0) #0 {
+ store <3 x double> %arg0, <3 x double> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v4f64:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+define void @void_func_v4f64(<4 x double> %arg0) #0 {
+ store <4 x double> %arg0, <4 x double> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v8f64:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+; GCN-DAG: buffer_store_dwordx4 v[8:11], off
+; GCN-DAG: buffer_store_dwordx4 v[12:15], off
+define void @void_func_v8f64(<8 x double> %arg0) #0 {
+ store <8 x double> %arg0, <8 x double> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v16f64:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+; GCN-DAG: buffer_store_dwordx4 v[8:11], off
+; GCN-DAG: buffer_store_dwordx4 v[12:15], off
+; GCN-DAG: buffer_store_dwordx4 v[16:19], off
+; GCN-DAG: buffer_store_dwordx4 v[20:23], off
+; GCN-DAG: buffer_store_dwordx4 v[24:27], off
+; GCN-DAG: buffer_store_dwordx4 v[28:31], off
+define void @void_func_v16f64(<16 x double> %arg0) #0 {
+ store <16 x double> %arg0, <16 x double> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v2f16:
+; GFX9-NOT: v0
+; GFX9: buffer_store_dword v0, off
+define void @void_func_v2f16(<2 x half> %arg0) #0 {
+ store <2 x half> %arg0, <2 x half> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v3f16:
+; GFX9-NOT: v0
+; GCN-DAG: buffer_store_dword v0, off
+; GCN-DAG: buffer_store_short v2, off
+define void @void_func_v3f16(<3 x half> %arg0) #0 {
+ store <3 x half> %arg0, <3 x half> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v4f16:
+; GFX9-NOT: v0
+; GFX9-NOT: v1
+; GFX9-NOT: v[0:1]
+; GFX9: buffer_store_dwordx2 v[0:1], off
+define void @void_func_v4f16(<4 x half> %arg0) #0 {
+ store <4 x half> %arg0, <4 x half> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v8f16:
+; GFX9-NOT: v0
+; GFX9-NOT: v1
+; GFX9: buffer_store_dwordx4 v[0:3], off
+define void @void_func_v8f16(<8 x half> %arg0) #0 {
+ store <8 x half> %arg0, <8 x half> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v16f16:
+; GFX9-NOT: v0
+; GFX9-NOT: v1
+; GFX9-DAG: buffer_store_dwordx4 v[0:3], off
+; GFX9-DAG: buffer_store_dwordx4 v[4:7], off
+define void @void_func_v16f16(<16 x half> %arg0) #0 {
+ store <16 x half> %arg0, <16 x half> addrspace(1)* undef
+ ret void
+}
+
+; Make sure there is no alignment requirement for passed vgprs.
+; GCN-LABEL: {{^}}void_func_i32_i64_i32:
+; GCN-NOT: v0
+; GCN: buffer_store_dword v0, off
+; GCN: buffer_store_dwordx2 v[1:2]
+; GCN: buffer_store_dword v3
+define void @void_func_i32_i64_i32(i32 %arg0, i64 %arg1, i32 %arg2) #0 {
+ store volatile i32 %arg0, i32 addrspace(1)* undef
+ store volatile i64 %arg1, i64 addrspace(1)* undef
+ store volatile i32 %arg2, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_struct_i32:
+; GCN-NOT: v0
+; GCN: buffer_store_dword v0, off
+define void @void_func_struct_i32({ i32 } %arg0) #0 {
+ store { i32 } %arg0, { i32 } addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_struct_i8_i32:
+; GCN-DAG: buffer_store_byte v0, off
+; GCN-DAG: buffer_store_dword v1, off
+define void @void_func_struct_i8_i32({ i8, i32 } %arg0) #0 {
+ store { i8, i32 } %arg0, { i8, i32 } addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32:
+; GCN-DAG: buffer_load_ubyte v[[ELT0:[0-9]+]], off, s[0:3], s5{{$}}
+; GCN-DAG: buffer_load_dword v[[ELT1:[0-9]+]], off, s[0:3], s5 offset:4{{$}}
+; GCN-DAG: buffer_store_dword v[[ELT1]]
+; GCN-DAG: buffer_store_byte v[[ELT0]]
+define void @void_func_byval_struct_i8_i32({ i8, i32 }* byval %arg0) #0 {
+ %arg0.load = load { i8, i32 }, { i8, i32 }* %arg0
+ store { i8, i32 } %arg0.load, { i8, i32 } addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_x2:
+; GCN: buffer_load_ubyte v[[ELT0_0:[0-9]+]], off, s[0:3], s5{{$}}
+; GCN: buffer_load_dword v[[ELT1_0:[0-9]+]], off, s[0:3], s5 offset:4{{$}}
+; GCN: buffer_load_ubyte v[[ELT0_1:[0-9]+]], off, s[0:3], s5 offset:8{{$}}
+; GCN: buffer_load_dword v[[ELT1_1:[0-9]+]], off, s[0:3], s5 offset:12{{$}}
+
+; GCN: ds_write_b32 v0, v0
+; GCN: s_setpc_b64
+define void @void_func_byval_struct_i8_i32_x2({ i8, i32 }* byval %arg0, { i8, i32 }* byval %arg1, i32 %arg2) #0 {
+ %arg0.load = load volatile { i8, i32 }, { i8, i32 }* %arg0
+ %arg1.load = load volatile { i8, i32 }, { i8, i32 }* %arg1
+ store volatile { i8, i32 } %arg0.load, { i8, i32 } addrspace(1)* undef
+ store volatile { i8, i32 } %arg1.load, { i8, i32 } addrspace(1)* undef
+ store volatile i32 %arg2, i32 addrspace(3)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_byval_i32_byval_i64:
+; GCN-DAG: buffer_load_dword v[[ARG0_LOAD:[0-9]+]], off, s[0:3], s5{{$}}
+; GCN-DAG: buffer_load_dword v[[ARG1_LOAD0:[0-9]+]], off, s[0:3], s5 offset:8{{$}}
+; GCN-DAG: buffer_load_dword v[[ARG1_LOAD1:[0-9]+]], off, s[0:3], s5 offset:12{{$}}
+; GCN-DAG: buffer_store_dword v[[ARG0_LOAD]], off
+; GCN-DAG: buffer_store_dwordx2 v{{\[}}[[ARG1_LOAD0]]:[[ARG1_LOAD1]]{{\]}}, off
+define void @void_func_byval_i32_byval_i64(i32* byval %arg0, i64* byval %arg1) #0 {
+ %arg0.load = load i32, i32* %arg0
+ %arg1.load = load i64, i64* %arg1
+ store i32 %arg0.load, i32 addrspace(1)* undef
+ store i64 %arg1.load, i64 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v32i32_i32_i64:
+; GCN-DAG: buffer_store_dwordx4 v[0:3], off
+; GCN-DAG: buffer_store_dwordx4 v[4:7], off
+; GCN-DAG: buffer_store_dwordx4 v[8:11], off
+; GCN-DAG: buffer_store_dwordx4 v[12:15], off
+; GCN-DAG: buffer_store_dwordx4 v[16:19], off
+; GCN-DAG: buffer_store_dwordx4 v[20:23], off
+; GCN-DAG: buffer_store_dwordx4 v[24:27], off
+; GCN-DAG: buffer_store_dwordx4 v[28:31], off
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1:[0-9]+]], off, s[0:3], s5{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_0:[0-9]+]], off, s[0:3], s5 offset:4
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_1:[0-9]+]], off, s[0:3], s5 offset:8
+
+; GCN: buffer_store_dword v[[LOAD_ARG1]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[LOAD_ARG2_0]]:[[LOAD_ARG2_1]]{{\]}}, off
+define void @void_func_v32i32_i32_i64(<32 x i32> %arg0, i32 %arg1, i64 %arg2) #0 {
+ store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
+ store volatile i32 %arg1, i32 addrspace(1)* undef
+ store volatile i64 %arg2, i64 addrspace(1)* undef
+ ret void
+}
+
+; FIXME: Different ext load types on CI vs. VI
+; GCN-LABEL: {{^}}void_func_v32i32_i1_i8_i16:
+; GCN-DAG: buffer_load_ubyte [[LOAD_ARG1:v[0-9]+]], off, s[0:3], s5{{$}}
+; VI-DAG: buffer_load_ushort [[LOAD_ARG2:v[0-9]+]], off, s[0:3], s5 offset:4{{$}}
+; VI-DAG: buffer_load_ushort [[LOAD_ARG3:v[0-9]+]], off, s[0:3], s5 offset:8{{$}}
+; VI-DAG: buffer_load_ushort [[LOAD_ARG4:v[0-9]+]], off, s[0:3], s5 offset:12{{$}}
+
+; CI-DAG: buffer_load_dword [[LOAD_ARG2:v[0-9]+]], off, s[0:3], s5 offset:4{{$}}
+; CI-DAG: buffer_load_dword [[LOAD_ARG3:v[0-9]+]], off, s[0:3], s5 offset:8{{$}}
+; CI-DAG: buffer_load_dword [[LOAD_ARG4:v[0-9]+]], off, s[0:3], s5 offset:12{{$}}
+
+; GCN-DAG: v_and_b32_e32 [[TRUNC_ARG1_I1:v[0-9]+]], 1, [[LOAD_ARG1]]
+; CI-DAG: v_cvt_f16_f32_e32 [[CVT_ARG4:v[0-9]+]], [[LOAD_ARG4]]
+
+; GCN: buffer_store_byte [[TRUNC_ARG1_I1]], off
+; GCN: buffer_store_byte [[LOAD_ARG2]], off
+; GCN: buffer_store_short [[LOAD_ARG3]], off
+; VI: buffer_store_short [[LOAD_ARG4]], off
+
+; CI: buffer_store_short [[CVT_ARG4]], off
+define void @void_func_v32i32_i1_i8_i16(<32 x i32> %arg0, i1 %arg1, i8 %arg2, i16 %arg3, half %arg4) #0 {
+ store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
+ store volatile i1 %arg1, i1 addrspace(1)* undef
+ store volatile i8 %arg2, i8 addrspace(1)* undef
+ store volatile i16 %arg3, i16 addrspace(1)* undef
+ store volatile half %arg4, half addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v32i32_v2i32_v2f32:
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_0:[0-9]+]], off, s[0:3], s5{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_1:[0-9]+]], off, s[0:3], s5 offset:4{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_0:[0-9]+]], off, s[0:3], s5 offset:8{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_1:[0-9]+]], off, s[0:3], s5 offset:12{{$}}
+
+; GCN: buffer_store_dwordx2 v{{\[}}[[LOAD_ARG1_0]]:[[LOAD_ARG1_1]]{{\]}}, off
+; GCN: buffer_store_dwordx2 v{{\[}}[[LOAD_ARG2_0]]:[[LOAD_ARG2_1]]{{\]}}, off
+define void @void_func_v32i32_v2i32_v2f32(<32 x i32> %arg0, <2 x i32> %arg1, <2 x float> %arg2) #0 {
+ store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
+ store volatile <2 x i32> %arg1, <2 x i32> addrspace(1)* undef
+ store volatile <2 x float> %arg2, <2 x float> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v32i32_v2i16_v2f16:
+; GFX9-DAG: buffer_load_dword [[LOAD_ARG1:v[0-9]+]], off, s[0:3], s5{{$}}
+; GFX9-DAG: buffer_load_dword [[LOAD_ARG2:v[0-9]+]], off, s[0:3], s5 offset:4{{$}}
+; GFX9: buffer_store_dword [[LOAD_ARG1]], off
+; GFX9: buffer_store_short [[LOAD_ARG2]], off
+define void @void_func_v32i32_v2i16_v2f16(<32 x i32> %arg0, <2 x i16> %arg1, <2 x half> %arg2) #0 {
+ store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
+ store volatile <2 x i16> %arg1, <2 x i16> addrspace(1)* undef
+ store volatile <2 x half> %arg2, <2 x half> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v32i32_v2i64_v2f64:
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_0:[0-9]+]], off, s[0:3], s5{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_1:[0-9]+]], off, s[0:3], s5 offset:4{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_2:[0-9]+]], off, s[0:3], s5 offset:8{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_3:[0-9]+]], off, s[0:3], s5 offset:12{{$}}
+
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_0:[0-9]+]], off, s[0:3], s5 offset:16{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_1:[0-9]+]], off, s[0:3], s5 offset:20{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_2:[0-9]+]], off, s[0:3], s5 offset:24{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_3:[0-9]+]], off, s[0:3], s5 offset:28{{$}}
+
+; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG1_0]]:[[LOAD_ARG1_3]]{{\]}}, off
+; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG2_0]]:[[LOAD_ARG2_3]]{{\]}}, off
+define void @void_func_v32i32_v2i64_v2f64(<32 x i32> %arg0, <2 x i64> %arg1, <2 x double> %arg2) #0 {
+ store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
+ store volatile <2 x i64> %arg1, <2 x i64> addrspace(1)* undef
+ store volatile <2 x double> %arg2, <2 x double> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v32i32_v4i32_v4f32:
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_0:[0-9]+]], off, s[0:3], s5{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_1:[0-9]+]], off, s[0:3], s5 offset:4{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_2:[0-9]+]], off, s[0:3], s5 offset:8{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_3:[0-9]+]], off, s[0:3], s5 offset:12{{$}}
+
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_0:[0-9]+]], off, s[0:3], s5 offset:16{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_1:[0-9]+]], off, s[0:3], s5 offset:20{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_2:[0-9]+]], off, s[0:3], s5 offset:24{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_3:[0-9]+]], off, s[0:3], s5 offset:28{{$}}
+
+; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG1_0]]:[[LOAD_ARG1_3]]{{\]}}, off
+; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG2_0]]:[[LOAD_ARG2_3]]{{\]}}, off
+define void @void_func_v32i32_v4i32_v4f32(<32 x i32> %arg0, <4 x i32> %arg1, <4 x float> %arg2) #0 {
+ store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
+ store volatile <4 x i32> %arg1, <4 x i32> addrspace(1)* undef
+ store volatile <4 x float> %arg2, <4 x float> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v32i32_v8i32_v8f32:
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_0:[0-9]+]], off, s[0:3], s5{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_1:[0-9]+]], off, s[0:3], s5 offset:4{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_2:[0-9]+]], off, s[0:3], s5 offset:8{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_3:[0-9]+]], off, s[0:3], s5 offset:12{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_4:[0-9]+]], off, s[0:3], s5 offset:16{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_5:[0-9]+]], off, s[0:3], s5 offset:20{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_6:[0-9]+]], off, s[0:3], s5 offset:24{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_7:[0-9]+]], off, s[0:3], s5 offset:28{{$}}
+
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_0:[0-9]+]], off, s[0:3], s5 offset:32{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_1:[0-9]+]], off, s[0:3], s5 offset:36{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_2:[0-9]+]], off, s[0:3], s5 offset:40{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_3:[0-9]+]], off, s[0:3], s5 offset:44{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_4:[0-9]+]], off, s[0:3], s5 offset:48{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_5:[0-9]+]], off, s[0:3], s5 offset:52{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_6:[0-9]+]], off, s[0:3], s5 offset:56{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_7:[0-9]+]], off, s[0:3], s5 offset:60{{$}}
+
+; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG1_4]]:[[LOAD_ARG1_7]]{{\]}}, off
+; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG1_0]]:[[LOAD_ARG1_3]]{{\]}}, off
+; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG2_4]]:[[LOAD_ARG2_7]]{{\]}}, off
+; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG2_0]]:[[LOAD_ARG2_3]]{{\]}}, off
+define void @void_func_v32i32_v8i32_v8f32(<32 x i32> %arg0, <8 x i32> %arg1, <8 x float> %arg2) #0 {
+ store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
+ store volatile <8 x i32> %arg1, <8 x i32> addrspace(1)* undef
+ store volatile <8 x float> %arg2, <8 x float> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}void_func_v32i32_v16i32_v16f32:
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_0:[0-9]+]], off, s[0:3], s5{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_1:[0-9]+]], off, s[0:3], s5 offset:4{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_2:[0-9]+]], off, s[0:3], s5 offset:8{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_3:[0-9]+]], off, s[0:3], s5 offset:12{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_4:[0-9]+]], off, s[0:3], s5 offset:16{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_5:[0-9]+]], off, s[0:3], s5 offset:20{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_6:[0-9]+]], off, s[0:3], s5 offset:24{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_7:[0-9]+]], off, s[0:3], s5 offset:28{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_8:[0-9]+]], off, s[0:3], s5 offset:32{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_9:[0-9]+]], off, s[0:3], s5 offset:36{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_10:[0-9]+]], off, s[0:3], s5 offset:40{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_11:[0-9]+]], off, s[0:3], s5 offset:44{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_12:[0-9]+]], off, s[0:3], s5 offset:48{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_13:[0-9]+]], off, s[0:3], s5 offset:52{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_14:[0-9]+]], off, s[0:3], s5 offset:56{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_15:[0-9]+]], off, s[0:3], s5 offset:60{{$}}
+
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_0:[0-9]+]], off, s[0:3], s5 offset:64{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_1:[0-9]+]], off, s[0:3], s5 offset:68{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_2:[0-9]+]], off, s[0:3], s5 offset:72{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_3:[0-9]+]], off, s[0:3], s5 offset:76{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_4:[0-9]+]], off, s[0:3], s5 offset:80{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_5:[0-9]+]], off, s[0:3], s5 offset:84{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_6:[0-9]+]], off, s[0:3], s5 offset:88{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_7:[0-9]+]], off, s[0:3], s5 offset:92{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_8:[0-9]+]], off, s[0:3], s5 offset:96{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_9:[0-9]+]], off, s[0:3], s5 offset:100{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_10:[0-9]+]], off, s[0:3], s5 offset:104{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_11:[0-9]+]], off, s[0:3], s5 offset:108{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_12:[0-9]+]], off, s[0:3], s5 offset:112{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_13:[0-9]+]], off, s[0:3], s5 offset:116{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_14:[0-9]+]], off, s[0:3], s5 offset:120{{$}}
+; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_15:[0-9]+]], off, s[0:3], s5 offset:124{{$}}
+define void @void_func_v32i32_v16i32_v16f32(<32 x i32> %arg0, <16 x i32> %arg1, <16 x float> %arg2) #0 {
+ store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
+ store volatile <16 x i32> %arg1, <16 x i32> addrspace(1)* undef
+ store volatile <16 x float> %arg2, <16 x float> addrspace(1)* undef
+ ret void
+}
+
+; Check there is no crash.
+; GCN-LABEL: {{^}}void_func_v16i8:
+define void @void_func_v16i8(<16 x i8> %arg0) #0 {
+ store volatile <16 x i8> %arg0, <16 x i8> addrspace(1)* undef
+ ret void
+}
+
+; Check there is no crash.
+; GCN-LABEL: {{^}}void_func_v32i32_v16i8:
+define void @void_func_v32i32_v16i8(<32 x i32> %arg0, <16 x i8> %arg1) #0 {
+ store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
+ store volatile <16 x i8> %arg1, <16 x i8> addrspace(1)* undef
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/function-returns.ll b/llvm/test/CodeGen/AMDGPU/function-returns.ll
new file mode 100644
index 0000000..f704d43
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/function-returns.ll
@@ -0,0 +1,514 @@
+; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=CI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}i1_func_void:
+; GCN: buffer_load_ubyte v0, off
+; GCN-NEXT: s_waitcnt
+; GCN-NEXT: s_setpc_b64
+define i1 @i1_func_void() #0 {
+ %val = load i1, i1 addrspace(1)* undef
+ ret i1 %val
+}
+
+; FIXME: Missing and?
+; GCN-LABEL: {{^}}i1_zeroext_func_void:
+; GCN: buffer_load_ubyte v0, off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define zeroext i1 @i1_zeroext_func_void() #0 {
+ %val = load i1, i1 addrspace(1)* undef
+ ret i1 %val
+}
+
+; GCN-LABEL: {{^}}i1_signext_func_void:
+; GCN: buffer_load_ubyte v0, off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_bfe_i32 v0, v0, 0, 1{{$}}
+; GCN-NEXT: s_setpc_b64
+define signext i1 @i1_signext_func_void() #0 {
+ %val = load i1, i1 addrspace(1)* undef
+ ret i1 %val
+}
+
+; GCN-LABEL: {{^}}i8_func_void:
+; GCN: buffer_load_ubyte v0, off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define i8 @i8_func_void() #0 {
+ %val = load i8, i8 addrspace(1)* undef
+ ret i8 %val
+}
+
+; GCN-LABEL: {{^}}i8_zeroext_func_void:
+; GCN: buffer_load_ubyte v0, off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define zeroext i8 @i8_zeroext_func_void() #0 {
+ %val = load i8, i8 addrspace(1)* undef
+ ret i8 %val
+}
+
+; GCN-LABEL: {{^}}i8_signext_func_void:
+; GCN: buffer_load_sbyte v0, off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define signext i8 @i8_signext_func_void() #0 {
+ %val = load i8, i8 addrspace(1)* undef
+ ret i8 %val
+}
+
+; GCN-LABEL: {{^}}i16_func_void:
+; GCN: buffer_load_ushort v0, off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define i16 @i16_func_void() #0 {
+ %val = load i16, i16 addrspace(1)* undef
+ ret i16 %val
+}
+
+; GCN-LABEL: {{^}}i16_zeroext_func_void:
+; GCN: buffer_load_ushort v0, off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define zeroext i16 @i16_zeroext_func_void() #0 {
+ %val = load i16, i16 addrspace(1)* undef
+ ret i16 %val
+}
+
+; GCN-LABEL: {{^}}i16_signext_func_void:
+; GCN: buffer_load_sshort v0, off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define signext i16 @i16_signext_func_void() #0 {
+ %val = load i16, i16 addrspace(1)* undef
+ ret i16 %val
+}
+
+; GCN-LABEL: {{^}}i32_func_void:
+; GCN: buffer_load_dword v0, off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define i32 @i32_func_void() #0 {
+ %val = load i32, i32 addrspace(1)* undef
+ ret i32 %val
+}
+
+; GCN-LABEL: {{^}}i64_func_void:
+; GCN: buffer_load_dwordx2 v[0:1], off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define i64 @i64_func_void() #0 {
+ %val = load i64, i64 addrspace(1)* undef
+ ret i64 %val
+}
+
+; GCN-LABEL: {{^}}f32_func_void:
+; GCN: buffer_load_dword v0, off, s[8:11], 0
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define float @f32_func_void() #0 {
+ %val = load float, float addrspace(1)* undef
+ ret float %val
+}
+
+; GCN-LABEL: {{^}}f64_func_void:
+; GCN: buffer_load_dwordx2 v[0:1], off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define double @f64_func_void() #0 {
+ %val = load double, double addrspace(1)* undef
+ ret double %val
+}
+
+; GCN-LABEL: {{^}}v2i32_func_void:
+; GCN: buffer_load_dwordx2 v[0:1], off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <2 x i32> @v2i32_func_void() #0 {
+ %val = load <2 x i32>, <2 x i32> addrspace(1)* undef
+ ret <2 x i32> %val
+}
+
+; GCN-LABEL: {{^}}v3i32_func_void:
+; GCN: buffer_load_dwordx4 v[0:3], off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <3 x i32> @v3i32_func_void() #0 {
+ %val = load <3 x i32>, <3 x i32> addrspace(1)* undef
+ ret <3 x i32> %val
+}
+
+; GCN-LABEL: {{^}}v4i32_func_void:
+; GCN: buffer_load_dwordx4 v[0:3], off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <4 x i32> @v4i32_func_void() #0 {
+ %val = load <4 x i32>, <4 x i32> addrspace(1)* undef
+ ret <4 x i32> %val
+}
+
+; GCN-LABEL: {{^}}v5i32_func_void:
+; GCN-DAG: buffer_load_dword v4, off
+; GCN-DAG: buffer_load_dwordx4 v[0:3], off
+; GCN: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <5 x i32> @v5i32_func_void() #0 {
+ %val = load volatile <5 x i32>, <5 x i32> addrspace(1)* undef
+ ret <5 x i32> %val
+}
+
+; GCN-LABEL: {{^}}v8i32_func_void:
+; GCN-DAG: buffer_load_dwordx4 v[0:3], off
+; GCN-DAG: buffer_load_dwordx4 v[4:7], off
+; GCN: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <8 x i32> @v8i32_func_void() #0 {
+ %ptr = load volatile <8 x i32> addrspace(1)*, <8 x i32> addrspace(1)* addrspace(2)* undef
+ %val = load <8 x i32>, <8 x i32> addrspace(1)* %ptr
+ ret <8 x i32> %val
+}
+
+; GCN-LABEL: {{^}}v16i32_func_void:
+; GCN-DAG: buffer_load_dwordx4 v[0:3], off
+; GCN-DAG: buffer_load_dwordx4 v[4:7], off
+; GCN-DAG: buffer_load_dwordx4 v[8:11], off
+; GCN-DAG: buffer_load_dwordx4 v[12:15], off
+; GCN: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <16 x i32> @v16i32_func_void() #0 {
+ %ptr = load volatile <16 x i32> addrspace(1)*, <16 x i32> addrspace(1)* addrspace(2)* undef
+ %val = load <16 x i32>, <16 x i32> addrspace(1)* %ptr
+ ret <16 x i32> %val
+}
+
+; GCN-LABEL: {{^}}v32i32_func_void:
+; GCN-DAG: buffer_load_dwordx4 v[0:3], off
+; GCN-DAG: buffer_load_dwordx4 v[4:7], off
+; GCN-DAG: buffer_load_dwordx4 v[8:11], off
+; GCN-DAG: buffer_load_dwordx4 v[12:15], off
+; GCN-DAG: buffer_load_dwordx4 v[16:19], off
+; GCN-DAG: buffer_load_dwordx4 v[20:23], off
+; GCN-DAG: buffer_load_dwordx4 v[24:27], off
+; GCN-DAG: buffer_load_dwordx4 v[28:31], off
+; GCN: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <32 x i32> @v32i32_func_void() #0 {
+ %ptr = load volatile <32 x i32> addrspace(1)*, <32 x i32> addrspace(1)* addrspace(2)* undef
+ %val = load <32 x i32>, <32 x i32> addrspace(1)* %ptr
+ ret <32 x i32> %val
+}
+
+; GCN-LABEL: {{^}}v2i64_func_void:
+; GCN: buffer_load_dwordx4 v[0:3], off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <2 x i64> @v2i64_func_void() #0 {
+ %val = load <2 x i64>, <2 x i64> addrspace(1)* undef
+ ret <2 x i64> %val
+}
+
+; GCN-LABEL: {{^}}v3i64_func_void:
+; GCN-DAG: buffer_load_dwordx4 v[0:3], off
+; GCN-DAG: buffer_load_dwordx4 v[4:7], off
+; GCN: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <3 x i64> @v3i64_func_void() #0 {
+ %ptr = load volatile <3 x i64> addrspace(1)*, <3 x i64> addrspace(1)* addrspace(2)* undef
+ %val = load <3 x i64>, <3 x i64> addrspace(1)* %ptr
+ ret <3 x i64> %val
+}
+
+; GCN-LABEL: {{^}}v4i64_func_void:
+; GCN: buffer_load_dwordx4 v[0:3], off
+; GCN: buffer_load_dwordx4 v[4:7], off
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <4 x i64> @v4i64_func_void() #0 {
+ %ptr = load volatile <4 x i64> addrspace(1)*, <4 x i64> addrspace(1)* addrspace(2)* undef
+ %val = load <4 x i64>, <4 x i64> addrspace(1)* %ptr
+ ret <4 x i64> %val
+}
+
+; GCN-LABEL: {{^}}v5i64_func_void:
+; GCN-DAG: buffer_load_dwordx4 v[0:3], off
+; GCN-DAG: buffer_load_dwordx4 v[4:7], off
+; GCN-DAG: buffer_load_dwordx4 v[8:11], off
+; GCN: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <5 x i64> @v5i64_func_void() #0 {
+ %ptr = load volatile <5 x i64> addrspace(1)*, <5 x i64> addrspace(1)* addrspace(2)* undef
+ %val = load <5 x i64>, <5 x i64> addrspace(1)* %ptr
+ ret <5 x i64> %val
+}
+
+; GCN-LABEL: {{^}}v8i64_func_void:
+; GCN-DAG: buffer_load_dwordx4 v[0:3], off
+; GCN-DAG: buffer_load_dwordx4 v[4:7], off
+; GCN-DAG: buffer_load_dwordx4 v[8:11], off
+; GCN-DAG: buffer_load_dwordx4 v[12:15], off
+; GCN: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <8 x i64> @v8i64_func_void() #0 {
+ %ptr = load volatile <8 x i64> addrspace(1)*, <8 x i64> addrspace(1)* addrspace(2)* undef
+ %val = load <8 x i64>, <8 x i64> addrspace(1)* %ptr
+ ret <8 x i64> %val
+}
+
+; GCN-LABEL: {{^}}v16i64_func_void:
+; GCN-DAG: buffer_load_dwordx4 v[0:3], off
+; GCN-DAG: buffer_load_dwordx4 v[4:7], off
+; GCN-DAG: buffer_load_dwordx4 v[8:11], off
+; GCN-DAG: buffer_load_dwordx4 v[12:15], off
+; GCN-DAG: buffer_load_dwordx4 v[16:19], off
+; GCN-DAG: buffer_load_dwordx4 v[20:23], off
+; GCN-DAG: buffer_load_dwordx4 v[24:27], off
+; GCN-DAG: buffer_load_dwordx4 v[28:31], off
+; GCN: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <16 x i64> @v16i64_func_void() #0 {
+ %ptr = load volatile <16 x i64> addrspace(1)*, <16 x i64> addrspace(1)* addrspace(2)* undef
+ %val = load <16 x i64>, <16 x i64> addrspace(1)* %ptr
+ ret <16 x i64> %val
+}
+
+; GCN-LABEL: {{^}}v2i16_func_void:
+; GFX9: buffer_load_dword v0, off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64
+define <2 x i16> @v2i16_func_void() #0 {
+ %val = load <2 x i16>, <2 x i16> addrspace(1)* undef
+ ret <2 x i16> %val
+}
+
+; GCN-LABEL: {{^}}v3i16_func_void:
+; GFX9: buffer_load_dwordx2 v[0:1], off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64
+define <3 x i16> @v3i16_func_void() #0 {
+ %val = load <3 x i16>, <3 x i16> addrspace(1)* undef
+ ret <3 x i16> %val
+}
+
+; GCN-LABEL: {{^}}v4i16_func_void:
+; GFX9: buffer_load_dwordx2 v[0:1], off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64
+define <4 x i16> @v4i16_func_void() #0 {
+ %val = load <4 x i16>, <4 x i16> addrspace(1)* undef
+ ret <4 x i16> %val
+}
+
+; FIXME: Should not scalarize
+; GCN-LABEL: {{^}}v5i16_func_void:
+; GFX9: buffer_load_dwordx2 v[0:1]
+; GFX9: buffer_load_ushort v4
+; GFX9: v_lshrrev_b32_e32 v3, 16, v1
+; GFX9: v_mov_b32_e32 v2, v1
+; GFX9: v_lshrrev_b32_e32 v3, 16, v0
+; GCN: s_setpc_b64
+define <5 x i16> @v5i16_func_void() #0 {
+ %ptr = load volatile <5 x i16> addrspace(1)*, <5 x i16> addrspace(1)* addrspace(2)* undef
+ %val = load <5 x i16>, <5 x i16> addrspace(1)* %ptr
+ ret <5 x i16> %val
+}
+
+; GCN-LABEL: {{^}}v8i16_func_void:
+; GFX9-DAG: buffer_load_dwordx4 v[0:3], off
+; GFX9: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64
+define <8 x i16> @v8i16_func_void() #0 {
+ %ptr = load volatile <8 x i16> addrspace(1)*, <8 x i16> addrspace(1)* addrspace(2)* undef
+ %val = load <8 x i16>, <8 x i16> addrspace(1)* %ptr
+ ret <8 x i16> %val
+}
+
+; GCN-LABEL: {{^}}v16i16_func_void:
+; GFX9: buffer_load_dwordx4 v[0:3], off
+; GFX9: buffer_load_dwordx4 v[4:7], off
+; GFX9: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64
+define <16 x i16> @v16i16_func_void() #0 {
+ %ptr = load volatile <16 x i16> addrspace(1)*, <16 x i16> addrspace(1)* addrspace(2)* undef
+ %val = load <16 x i16>, <16 x i16> addrspace(1)* %ptr
+ ret <16 x i16> %val
+}
+
+; FIXME: Should pack
+; GCN-LABEL: {{^}}v16i8_func_void:
+; GCN-DAG: v12
+; GCN-DAG: v13
+; GCN-DAG: v14
+; GCN-DAG: v15
+define <16 x i8> @v16i8_func_void() #0 {
+ %ptr = load volatile <16 x i8> addrspace(1)*, <16 x i8> addrspace(1)* addrspace(2)* undef
+ %val = load <16 x i8>, <16 x i8> addrspace(1)* %ptr
+ ret <16 x i8> %val
+}
+
+; FIXME: Should pack
+; GCN-LABEL: {{^}}v4i8_func_void:
+; GCN: buffer_load_dword v0
+; GCN-DAG: v_lshrrev_b32_e32 v2, 16, v0
+; GCN-DAG: v_lshrrev_b32_e32 v3, 24, v0
+; CI-DAG: v_bfe_u32 v1, v0, 8, 8
+; VI-DAG: v_lshrrev_b16_e32 v1, 8, v0
+; GCN: s_setpc_b64
+define <4 x i8> @v4i8_func_void() #0 {
+ %ptr = load volatile <4 x i8> addrspace(1)*, <4 x i8> addrspace(1)* addrspace(2)* undef
+ %val = load <4 x i8>, <4 x i8> addrspace(1)* %ptr
+ ret <4 x i8> %val
+}
+
+; GCN-LABEL: {{^}}struct_i8_i32_func_void:
+; GCN-DAG: buffer_load_dword v1
+; GCN-DAG: buffer_load_ubyte v0
+; GCN: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define {i8, i32} @struct_i8_i32_func_void() #0 {
+ %val = load { i8, i32 }, { i8, i32 } addrspace(1)* undef
+ ret { i8, i32 } %val
+}
+
+; GCN-LABEL: {{^}}void_func_sret_struct_i8_i32:
+; GCN: buffer_load_ubyte [[VAL0:v[0-9]+]]
+; GCN: buffer_load_dword [[VAL1:v[0-9]+]]
+; GCN: buffer_store_byte [[VAL0]], v0, s[0:3], s4 offen{{$}}
+; GCN: buffer_store_dword [[VAL1]], v0, s[0:3], s4 offen offset:4{{$}}
+define void @void_func_sret_struct_i8_i32({ i8, i32 }* sret %arg0) #0 {
+ %val0 = load volatile i8, i8 addrspace(1)* undef
+ %val1 = load volatile i32, i32 addrspace(1)* undef
+ %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 0
+ %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 1
+ store i8 %val0, i8* %gep0
+ store i32 %val1, i32* %gep1
+ ret void
+}
+
+; GCN-LABEL: {{^}}v33i32_func_void:
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:4{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:8{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:12{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:16{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:20{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:24{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:28{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:32{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:36{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:40{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:44{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:48{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:52{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:56{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:60{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:64{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:68{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:72{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:76{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:80{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:84{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:88{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:92{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:96{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:100{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:104{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:108{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:112{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:116{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:120{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:124{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:128{{$}}
+; GCN: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define <33 x i32> @v33i32_func_void() #0 {
+ %ptr = load volatile <33 x i32> addrspace(1)*, <33 x i32> addrspace(1)* addrspace(2)* undef
+ %val = load <33 x i32>, <33 x i32> addrspace(1)* %ptr
+ ret <33 x i32> %val
+}
+
+; GCN-LABEL: {{^}}struct_v32i32_i32_func_void:
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:4{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:8{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:12{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:16{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:20{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:24{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:28{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:32{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:36{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:40{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:44{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:48{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:52{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:56{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:60{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:64{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:68{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:72{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:76{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:80{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:84{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:88{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:92{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:96{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:100{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:104{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:108{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:112{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:116{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:120{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:124{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:128{{$}}
+; GCN: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 {
+ %ptr = load volatile { <32 x i32>, i32 } addrspace(1)*, { <32 x i32>, i32 } addrspace(1)* addrspace(2)* undef
+ %val = load { <32 x i32>, i32 }, { <32 x i32>, i32 } addrspace(1)* %ptr
+ ret { <32 x i32>, i32 }%val
+}
+
+; GCN-LABEL: {{^}}struct_i32_v32i32_func_void:
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:128{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:132{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:136{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:140{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:144{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:148{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:152{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:156{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:160{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:164{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:168{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:172{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:176{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:180{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:184{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:188{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:192{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:196{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:200{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:204{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:208{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:212{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:216{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:220{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:224{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:228{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:232{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:236{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:240{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:244{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:248{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:252{{$}}
+; GCN: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_setpc_b64
+define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 {
+ %ptr = load volatile { i32, <32 x i32> } addrspace(1)*, { i32, <32 x i32> } addrspace(1)* addrspace(2)* undef
+ %val = load { i32, <32 x i32> }, { i32, <32 x i32> } addrspace(1)* %ptr
+ ret { i32, <32 x i32> }%val
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-func.ll b/llvm/test/CodeGen/AMDGPU/hsa-func.ll
index d96b796..35aeeea 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-func.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-func.ll
@@ -27,7 +27,7 @@
; ELF: Symbol {
; ELF: Name: simple
-; ELF: Size: 44
+; ELF: Size: 48
; ELF: Type: Function (0x2)
; ELF: }
@@ -41,14 +41,12 @@
; HSA: .p2align 2
; HSA: {{^}}simple:
; HSA-NOT: amd_kernel_code_t
-
-; FIXME: Check this isn't a kernarg load when calling convention implemented.
-; XHSA-NOT: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x0
+; HSA-NOT: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x0
; Make sure we are setting the ATC bit:
-; HSA-CI: s_mov_b32 s[[HI:[0-9]]], 0x100f000
+; HSA-CI: s_mov_b32 s[[HI:[0-9]+]], 0x100f000
; On VI+ we also need to set MTYPE = 2
-; HSA-VI: s_mov_b32 s[[HI:[0-9]]], 0x1100f000
+; HSA-VI: s_mov_b32 s[[HI:[0-9]+]], 0x1100f000
; Make sure we generate flat store for HSA
; HSA: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}
@@ -56,8 +54,9 @@
; HSA: .size simple, .Lfunc_end0-simple
; HSA: ; Function info:
; HSA-NOT: COMPUTE_PGM_RSRC2
-define void @simple(i32 addrspace(1)* %out) {
+define void @simple(i32 addrspace(1)* addrspace(2)* %ptr.out) {
entry:
+ %out = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(2)* %ptr.out
store i32 0, i32 addrspace(1)* %out
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/inline-asm.ll b/llvm/test/CodeGen/AMDGPU/inline-asm.ll
index 636b45d..36441cf 100644
--- a/llvm/test/CodeGen/AMDGPU/inline-asm.ll
+++ b/llvm/test/CodeGen/AMDGPU/inline-asm.ll
@@ -191,7 +191,7 @@
; CHECK: v_mov_b32_e32 v0, s0
; CHECK: v_mov_b32_e32 v1, s1
; CHECK: use v[0:1]
-define void @i64_imm_input_phys_vgpr() {
+define amdgpu_kernel void @i64_imm_input_phys_vgpr() {
entry:
call void asm sideeffect "; use $0 ", "{VGPR0_VGPR1}"(i64 123456)
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/subreg_interference.mir b/llvm/test/CodeGen/AMDGPU/subreg_interference.mir
index 24d06a5..6fc22c8 100644
--- a/llvm/test/CodeGen/AMDGPU/subreg_interference.mir
+++ b/llvm/test/CodeGen/AMDGPU/subreg_interference.mir
@@ -1,4 +1,12 @@
# RUN: llc -o - %s -mtriple=amdgcn--amdhsa -verify-machineinstrs -run-pass=greedy,virtregrewriter | FileCheck %s
+--- |
+
+ define amdgpu_kernel void @func0() {
+ ret void
+ }
+
+...
+
---
# We should not detect any interference between v0/v1 here and only allocate
# sgpr0-sgpr3.