Revert "AMDGPU: Add VI i16 support"
This reverts commit r285939 and r285948. These broke some conformance tests.
llvm-svn: 285995
diff --git a/llvm/test/CodeGen/AMDGPU/add.i16.ll b/llvm/test/CodeGen/AMDGPU/add.i16.ll
deleted file mode 100644
index 3c7a2c1..0000000
--- a/llvm/test/CodeGen/AMDGPU/add.i16.ll
+++ /dev/null
@@ -1,149 +0,0 @@
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN %s
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_add_i16:
-; VI: flat_load_ushort [[A:v[0-9]+]]
-; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; VI-NEXT: buffer_store_short [[ADD]]
-define void @v_test_add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
- %gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
- %gep.in1 = getelementptr inbounds i16, i16 addrspace(1)* %in1, i32 %tid
- %a = load volatile i16, i16 addrspace(1)* %gep.in0
- %b = load volatile i16, i16 addrspace(1)* %gep.in1
- %add = add i16 %a, %b
- store i16 %add, i16 addrspace(1)* %out
- ret void
-}
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_add_i16_constant:
-; VI: flat_load_ushort [[A:v[0-9]+]]
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]], 0x7b, [[A]]
-; VI-NEXT: buffer_store_short [[ADD]]
-define void @v_test_add_i16_constant(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
- %gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
- %a = load volatile i16, i16 addrspace(1)* %gep.in0
- %add = add i16 %a, 123
- store i16 %add, i16 addrspace(1)* %out
- ret void
-}
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_add_i16_neg_constant:
-; VI: flat_load_ushort [[A:v[0-9]+]]
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]], 0xfffffcb3, [[A]]
-; VI-NEXT: buffer_store_short [[ADD]]
-define void @v_test_add_i16_neg_constant(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
- %gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
- %a = load volatile i16, i16 addrspace(1)* %gep.in0
- %add = add i16 %a, -845
- store i16 %add, i16 addrspace(1)* %out
- ret void
-}
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_add_i16_inline_neg1:
-; VI: flat_load_ushort [[A:v[0-9]+]]
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]], -1, [[A]]
-; VI-NEXT: buffer_store_short [[ADD]]
-define void @v_test_add_i16_inline_neg1(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
- %gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
- %a = load volatile i16, i16 addrspace(1)* %gep.in0
- %add = add i16 %a, -1
- store i16 %add, i16 addrspace(1)* %out
- ret void
-}
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_add_i16_zext_to_i32:
-; VI: flat_load_ushort [[A:v[0-9]+]]
-; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; VI-NEXT: buffer_store_dword [[ADD]]
-define void @v_test_add_i16_zext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %tid
- %gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
- %gep.in1 = getelementptr inbounds i16, i16 addrspace(1)* %in1, i32 %tid
- %a = load volatile i16, i16 addrspace(1)* %gep.in0
- %b = load volatile i16, i16 addrspace(1)* %gep.in1
- %add = add i16 %a, %b
- %ext = zext i16 %add to i32
- store i32 %ext, i32 addrspace(1)* %out
- ret void
-}
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_add_i16_zext_to_i64:
-; VI: flat_load_ushort [[A:v[0-9]+]]
-; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI-DAG: v_add_u16_e32 v[[ADD:[0-9]+]], [[A]], [[B]]
-; VI-DAG: v_mov_b32_e32 v[[VZERO:[0-9]+]], 0
-; VI: buffer_store_dwordx2 v{{\[}}[[ADD]]:[[VZERO]]{{\]}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0{{$}}
-define void @v_test_add_i16_zext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %gep.out = getelementptr inbounds i64, i64 addrspace(1)* %out, i32 %tid
- %gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
- %gep.in1 = getelementptr inbounds i16, i16 addrspace(1)* %in1, i32 %tid
- %a = load volatile i16, i16 addrspace(1)* %gep.in0
- %b = load volatile i16, i16 addrspace(1)* %gep.in1
- %add = add i16 %a, %b
- %ext = zext i16 %add to i64
- store i64 %ext, i64 addrspace(1)* %out
- ret void
-}
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_add_i16_sext_to_i32:
-; VI: flat_load_ushort [[A:v[0-9]+]]
-; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; VI-NEXT: v_bfe_i32 [[SEXT:v[0-9]+]], [[ADD]], 0, 16
-; VI-NEXT: buffer_store_dword [[SEXT]]
-define void @v_test_add_i16_sext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %tid
- %gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
- %gep.in1 = getelementptr inbounds i16, i16 addrspace(1)* %in1, i32 %tid
- %a = load i16, i16 addrspace(1)* %gep.in0
- %b = load i16, i16 addrspace(1)* %gep.in1
- %add = add i16 %a, %b
- %ext = sext i16 %add to i32
- store i32 %ext, i32 addrspace(1)* %out
- ret void
-}
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_add_i16_sext_to_i64:
-; VI: flat_load_ushort [[A:v[0-9]+]]
-; VI: flat_load_ushort [[B:v[0-9]+]]
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; VI-NEXT: v_bfe_i32 v[[LO:[0-9]+]], [[ADD]], 0, 16
-; VI-NEXT: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
-; VI-NEXT: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @v_test_add_i16_sext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
- %tid = call i32 @llvm.amdgcn.workitem.id.x()
- %gep.out = getelementptr inbounds i64, i64 addrspace(1)* %out, i32 %tid
- %gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
- %gep.in1 = getelementptr inbounds i16, i16 addrspace(1)* %in1, i32 %tid
- %a = load i16, i16 addrspace(1)* %gep.in0
- %b = load i16, i16 addrspace(1)* %gep.in1
- %add = add i16 %a, %b
- %ext = sext i16 %add to i64
- store i64 %ext, i64 addrspace(1)* %out
- ret void
-}
-
-declare i32 @llvm.amdgcn.workitem.id.x() #0
-
-attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/anyext.ll b/llvm/test/CodeGen/AMDGPU/anyext.ll
index a1d3715..48d8f31 100644
--- a/llvm/test/CodeGen/AMDGPU/anyext.ll
+++ b/llvm/test/CodeGen/AMDGPU/anyext.ll
@@ -1,40 +1,15 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
-declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
-declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
-
-; GCN-LABEL: {{^}}anyext_i1_i32:
-; GCN: v_cndmask_b32_e64
+; CHECK-LABEL: {{^}}anyext_i1_i32:
+; CHECK: v_cndmask_b32_e64
define void @anyext_i1_i32(i32 addrspace(1)* %out, i32 %cond) {
entry:
- %tmp = icmp eq i32 %cond, 0
- %tmp1 = zext i1 %tmp to i8
- %tmp2 = xor i8 %tmp1, -1
- %tmp3 = and i8 %tmp2, 1
- %tmp4 = zext i8 %tmp3 to i32
- store i32 %tmp4, i32 addrspace(1)* %out
- ret void
-}
-
-; GCN-LABEL: {{^}}s_anyext_i16_i32:
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]],
-; VI: v_xor_b32_e32 [[XOR:v[0-9]+]], -1, [[ADD]]
-; VI: v_and_b32_e32 [[AND:v[0-9]+]], 1, [[XOR]]
-; VI: buffer_store_dword [[AND]]
-define void @s_anyext_i16_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %a, i16 addrspace(1)* %b) {
-entry:
- %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
- %tid.y = call i32 @llvm.amdgcn.workitem.id.y()
- %a.ptr = getelementptr i16, i16 addrspace(1)* %a, i32 %tid.x
- %b.ptr = getelementptr i16, i16 addrspace(1)* %b, i32 %tid.y
- %a.l = load i16, i16 addrspace(1)* %a.ptr
- %b.l = load i16, i16 addrspace(1)* %b.ptr
- %tmp = add i16 %a.l, %b.l
- %tmp1 = trunc i16 %tmp to i8
- %tmp2 = xor i8 %tmp1, -1
- %tmp3 = and i8 %tmp2, 1
- %tmp4 = zext i8 %tmp3 to i32
- store i32 %tmp4, i32 addrspace(1)* %out
+ %0 = icmp eq i32 %cond, 0
+ %1 = zext i1 %0 to i8
+ %2 = xor i8 %1, -1
+ %3 = and i8 %2, 1
+ %4 = zext i8 %3 to i32
+ store i32 %4, i32 addrspace(1)* %out
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/bitreverse.ll b/llvm/test/CodeGen/AMDGPU/bitreverse.ll
index aca88a9..0acacea 100644
--- a/llvm/test/CodeGen/AMDGPU/bitreverse.ll
+++ b/llvm/test/CodeGen/AMDGPU/bitreverse.ll
@@ -1,6 +1,5 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=FUNC %s
declare i16 @llvm.bitreverse.i16(i16) #1
declare i32 @llvm.bitreverse.i32(i32) #1
@@ -13,7 +12,7 @@
declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) #1
; FUNC-LABEL: {{^}}s_brev_i16:
-; SI: s_brev_b32
+; SI: s_brev_b32
define void @s_brev_i16(i16 addrspace(1)* noalias %out, i16 %val) #0 {
%brev = call i16 @llvm.bitreverse.i16(i16 %val) #1
store i16 %brev, i16 addrspace(1)* %out
diff --git a/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll b/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
index baf6eb1..33daf02 100644
--- a/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
+++ b/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
@@ -116,19 +116,14 @@
; OPT: store
; OPT: ret
-; For GFX8: since i16 is legal type, we cannot sink lshr into BBs.
; GCN-LABEL: {{^}}sink_ubfe_i16:
; GCN-NOT: lshr
-; VI: s_bfe_u32 s0, s0, 0xc0004
; GCN: s_cbranch_vccnz
-; SI: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80004
-; VI: s_and_b32 s0, s0, 0xff
-
+; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80004
; GCN: BB2_2:
-; SI: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x70004
-; VI: s_and_b32 s0, s0, 0x7f
+; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x70004
; GCN: BB2_3:
; GCN: buffer_store_short
diff --git a/llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll b/llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll
index 6918dff..00d2257 100644
--- a/llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll
+++ b/llvm/test/CodeGen/AMDGPU/copy-illegal-type.ll
@@ -1,13 +1,10 @@
-; RUN: llc -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
-
-declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
-declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
+; RUN: llc -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}test_copy_v4i8:
-; GCN: buffer_load_dword [[REG:v[0-9]+]]
-; GCN: buffer_store_dword [[REG]]
-; GCN: s_endpgm
+; SI: buffer_load_dword [[REG:v[0-9]+]]
+; SI: buffer_store_dword [[REG]]
+; SI: s_endpgm
define void @test_copy_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
@@ -15,10 +12,10 @@
}
; FUNC-LABEL: {{^}}test_copy_v4i8_x2:
-; GCN: buffer_load_dword [[REG:v[0-9]+]]
-; GCN: buffer_store_dword [[REG]]
-; GCN: buffer_store_dword [[REG]]
-; GCN: s_endpgm
+; SI: buffer_load_dword [[REG:v[0-9]+]]
+; SI: buffer_store_dword [[REG]]
+; SI: buffer_store_dword [[REG]]
+; SI: s_endpgm
define void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
@@ -27,11 +24,11 @@
}
; FUNC-LABEL: {{^}}test_copy_v4i8_x3:
-; GCN: buffer_load_dword [[REG:v[0-9]+]]
-; GCN: buffer_store_dword [[REG]]
-; GCN: buffer_store_dword [[REG]]
-; GCN: buffer_store_dword [[REG]]
-; GCN: s_endpgm
+; SI: buffer_load_dword [[REG:v[0-9]+]]
+; SI: buffer_store_dword [[REG]]
+; SI: buffer_store_dword [[REG]]
+; SI: buffer_store_dword [[REG]]
+; SI: s_endpgm
define void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
@@ -41,12 +38,12 @@
}
; FUNC-LABEL: {{^}}test_copy_v4i8_x4:
-; GCN: buffer_load_dword [[REG:v[0-9]+]]
-; GCN: buffer_store_dword [[REG]]
-; GCN: buffer_store_dword [[REG]]
-; GCN: buffer_store_dword [[REG]]
-; GCN: buffer_store_dword [[REG]]
-; GCN: s_endpgm
+; SI: buffer_load_dword [[REG:v[0-9]+]]
+; SI: buffer_store_dword [[REG]]
+; SI: buffer_store_dword [[REG]]
+; SI: buffer_store_dword [[REG]]
+; SI: buffer_store_dword [[REG]]
+; SI: s_endpgm
define void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %out3, <4 x i8> addrspace(1)* %in) nounwind {
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
@@ -57,14 +54,14 @@
}
; FUNC-LABEL: {{^}}test_copy_v4i8_extra_use:
-; GCN: buffer_load_dword
-; GCN-DAG: v_lshrrev_b32
-; GCN: v_and_b32
-; GCN: v_or_b32
-; GCN-DAG: buffer_store_dword
-; GCN-DAG: buffer_store_dword
+; SI: buffer_load_dword
+; SI-DAG: v_lshrrev_b32
+; SI: v_and_b32
+; SI: v_or_b32
+; SI-DAG: buffer_store_dword
+; SI-DAG: buffer_store_dword
-; GCN: s_endpgm
+; SI: s_endpgm
define void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
%add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
@@ -73,22 +70,18 @@
ret void
}
-; FIXME: Need to handle non-uniform case for function below (load without gep).
; FUNC-LABEL: {{^}}test_copy_v4i8_x2_extra_use:
-; GCN: {{buffer|flat}}_load_dword
-; GCN-DAG: v_lshrrev_b32
+; SI: buffer_load_dword
+; SI-DAG: v_lshrrev_b32
; SI-DAG: v_add_i32
-; VI-DAG: v_add_u16
-; GCN-DAG: v_and_b32
-; GCN-DAG: v_or_b32
-; GCN-DAG: {{buffer|flat}}_store_dword
-; GCN: {{buffer|flat}}_store_dword
-; GCN: {{buffer|flat}}_store_dword
-; GCN: s_endpgm
+; SI-DAG: v_and_b32
+; SI-DAG: v_or_b32
+; SI-DAG: buffer_store_dword
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+; SI: s_endpgm
define void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
- %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
- %in.ptr = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
- %val = load <4 x i8>, <4 x i8> addrspace(1)* %in.ptr, align 4
+ %val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
%add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
store <4 x i8> %add, <4 x i8> addrspace(1)* %out1, align 4
@@ -97,10 +90,10 @@
}
; FUNC-LABEL: {{^}}test_copy_v3i8_align4:
-; GCN: buffer_load_dword
-; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
-; GCN: s_endpgm
+; SI: buffer_load_dword
+; SI-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
+; SI: s_endpgm
define void @test_copy_v3i8_align4(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
%val = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 4
store <3 x i8> %val, <3 x i8> addrspace(1)* %out, align 4
@@ -108,11 +101,11 @@
}
; FUNC-LABEL: {{^}}test_copy_v3i8_align2:
-; GCN-DAG: buffer_load_ushort v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-; GCN-DAG: buffer_load_ubyte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
-; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
-; GCN: s_endpgm
+; SI-DAG: buffer_load_ushort v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_load_ubyte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
+; SI-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
+; SI: s_endpgm
define void @test_copy_v3i8_align2(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
%val = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 2
store <3 x i8> %val, <3 x i8> addrspace(1)* %out, align 2
@@ -120,14 +113,14 @@
}
; FUNC-LABEL: {{^}}test_copy_v3i8_align1:
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
-; GCN: buffer_store_byte
-; GCN: buffer_store_byte
-; GCN: buffer_store_byte
-; GCN: s_endpgm
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: s_endpgm
define void @test_copy_v3i8_align1(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
%val = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 1
store <3 x i8> %val, <3 x i8> addrspace(1)* %out, align 1
@@ -135,12 +128,12 @@
}
; FUNC-LABEL: {{^}}test_copy_v4i8_volatile_load:
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_store_dword
-; GCN: s_endpgm
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_store_dword
+; SI: s_endpgm
define void @test_copy_v4i8_volatile_load(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
%val = load volatile <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
@@ -148,15 +141,15 @@
}
; FUNC-LABEL: {{^}}test_copy_v4i8_volatile_store:
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_store_byte
-; GCN: buffer_store_byte
-; GCN: buffer_store_byte
-; GCN: buffer_store_byte
-; GCN: s_endpgm
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: s_endpgm
define void @test_copy_v4i8_volatile_store(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
store volatile <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/ctlz.ll b/llvm/test/CodeGen/AMDGPU/ctlz.ll
index 0d0ead6..597f11c 100644
--- a/llvm/test/CodeGen/AMDGPU/ctlz.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctlz.ll
@@ -100,7 +100,6 @@
; GCN: buffer_load_ubyte [[VAL:v[0-9]+]],
; GCN-DAG: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_byte [[RESULT]],
-; GCN: s_endpgm
define void @v_ctlz_i8(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
%val = load i8, i8 addrspace(1)* %valptr
%ctlz = call i8 @llvm.ctlz.i8(i8 %val, i1 false) nounwind readnone
diff --git a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
index 900938b..8c3a93d 100644
--- a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=FUNC -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i8 @llvm.ctlz.i8(i8, i1) nounwind readnone
diff --git a/llvm/test/CodeGen/AMDGPU/cube.ll b/llvm/test/CodeGen/AMDGPU/cube.ll
index c5d1f86..ab99af5 100644
--- a/llvm/test/CodeGen/AMDGPU/cube.ll
+++ b/llvm/test/CodeGen/AMDGPU/cube.ll
@@ -30,10 +30,10 @@
}
; GCN-LABEL: {{^}}legacy_cube:
-; GCN-DAG: v_cubeid_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_cubesc_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_cubetc_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_cubema_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
+; GCN-DAG: v_cubeid_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DAG: v_cubesc_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DAG: v_cubetc_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DAG: v_cubema_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: buffer_store_dwordx4
define void @legacy_cube(<4 x float> addrspace(1)* %out, <4 x float> %abcx) #1 {
%cube = call <4 x float> @llvm.AMDGPU.cube(<4 x float> %abcx)
diff --git a/llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll b/llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
index ed9b827..36275ff 100644
--- a/llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
+++ b/llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
@@ -1,15 +1,15 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
-; GCN-LABEL: {{^}}load_i8_to_f32:
-; GCN: buffer_load_ubyte [[LOADREG:v[0-9]+]],
-; GCN-NOT: bfe
-; GCN-NOT: lshr
-; GCN: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[LOADREG]]
-; GCN: buffer_store_dword [[CONV]],
+; SI-LABEL: {{^}}load_i8_to_f32:
+; SI: buffer_load_ubyte [[LOADREG:v[0-9]+]],
+; SI-NOT: bfe
+; SI-NOT: lshr
+; SI: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[LOADREG]]
+; SI: buffer_store_dword [[CONV]],
define void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
%load = load i8, i8 addrspace(1)* %in, align 1
%cvt = uitofp i8 %load to float
@@ -17,11 +17,11 @@
ret void
}
-; GCN-LABEL: {{^}}load_v2i8_to_v2f32:
-; GCN: buffer_load_ushort [[LD:v[0-9]+]]
-; GCN-DAG: v_cvt_f32_ubyte1_e32 v[[HIRESULT:[0-9]+]], [[LD]]
-; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LD]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
+; SI-LABEL: {{^}}load_v2i8_to_v2f32:
+; SI: buffer_load_ushort [[LD:v[0-9]+]]
+; SI-DAG: v_cvt_f32_ubyte1_e32 v[[HIRESULT:[0-9]+]], [[LD]]
+; SI-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LD]]
+; SI: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
define void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <2 x i8>, <2 x i8> addrspace(1)* %in, align 2
%cvt = uitofp <2 x i8> %load to <2 x float>
@@ -29,13 +29,13 @@
ret void
}
-; GCN-LABEL: {{^}}load_v3i8_to_v3f32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]]
-; GCN-NOT: v_cvt_f32_ubyte3_e32
-; GCN-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, [[VAL]]
-; GCN-DAG: v_cvt_f32_ubyte1_e32 v[[HIRESULT:[0-9]+]], [[VAL]]
-; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[VAL]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
+; SI-LABEL: {{^}}load_v3i8_to_v3f32:
+; SI: buffer_load_dword [[VAL:v[0-9]+]]
+; SI-NOT: v_cvt_f32_ubyte3_e32
+; SI-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, [[VAL]]
+; SI-DAG: v_cvt_f32_ubyte1_e32 v[[HIRESULT:[0-9]+]], [[VAL]]
+; SI-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[VAL]]
+; SI: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
define void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 4
%cvt = uitofp <3 x i8> %load to <3 x float>
@@ -43,15 +43,15 @@
ret void
}
-; GCN-LABEL: {{^}}load_v4i8_to_v4f32:
-; GCN: buffer_load_dword [[LOADREG:v[0-9]+]]
-; GCN-NOT: bfe
-; GCN-NOT: lshr
-; GCN-DAG: v_cvt_f32_ubyte3_e32 v[[HIRESULT:[0-9]+]], [[LOADREG]]
-; GCN-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, [[LOADREG]]
-; GCN-DAG: v_cvt_f32_ubyte1_e32 v{{[0-9]+}}, [[LOADREG]]
-; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
-; GCN: buffer_store_dwordx4 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
+; SI-LABEL: {{^}}load_v4i8_to_v4f32:
+; SI: buffer_load_dword [[LOADREG:v[0-9]+]]
+; SI-NOT: bfe
+; SI-NOT: lshr
+; SI-DAG: v_cvt_f32_ubyte3_e32 v[[HIRESULT:[0-9]+]], [[LOADREG]]
+; SI-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, [[LOADREG]]
+; SI-DAG: v_cvt_f32_ubyte1_e32 v{{[0-9]+}}, [[LOADREG]]
+; SI-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
+; SI: buffer_store_dwordx4 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
define void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
%cvt = uitofp <4 x i8> %load to <4 x float>
@@ -63,19 +63,19 @@
; position in the word for the component.
; FIXME: Packing bytes
-; GCN-LABEL: {{^}}load_v4i8_to_v4f32_unaligned:
-; GCN: buffer_load_ubyte [[LOADREG3:v[0-9]+]]
-; GCN: buffer_load_ubyte [[LOADREG2:v[0-9]+]]
-; GCN: buffer_load_ubyte [[LOADREG1:v[0-9]+]]
-; GCN: buffer_load_ubyte [[LOADREG0:v[0-9]+]]
-; GCN-DAG: v_lshlrev_b32
-; GCN-DAG: v_or_b32
-; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]],
-; GCN-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}},
-; GCN-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}},
-; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[HIRESULT:[0-9]+]]
+; SI-LABEL: {{^}}load_v4i8_to_v4f32_unaligned:
+; SI: buffer_load_ubyte [[LOADREG3:v[0-9]+]]
+; SI: buffer_load_ubyte [[LOADREG2:v[0-9]+]]
+; SI: buffer_load_ubyte [[LOADREG1:v[0-9]+]]
+; SI: buffer_load_ubyte [[LOADREG0:v[0-9]+]]
+; SI-DAG: v_lshlrev_b32
+; SI-DAG: v_or_b32
+; SI-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]],
+; SI-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}},
+; SI-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}},
+; SI-DAG: v_cvt_f32_ubyte0_e32 v[[HIRESULT:[0-9]+]]
-; GCN: buffer_store_dwordx4
+; SI: buffer_store_dwordx4
define void @load_v4i8_to_v4f32_unaligned(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 1
%cvt = uitofp <4 x i8> %load to <4 x float>
@@ -85,31 +85,25 @@
; FIXME: Need to handle non-uniform case for function below (load without gep).
; Instructions still emitted to repack bytes for add use.
+; SI-LABEL: {{^}}load_v4i8_to_v4f32_2_uses:
+; SI: {{buffer|flat}}_load_dword
+; SI-DAG: v_cvt_f32_ubyte0_e32
+; SI-DAG: v_cvt_f32_ubyte1_e32
+; SI-DAG: v_cvt_f32_ubyte2_e32
+; SI-DAG: v_cvt_f32_ubyte3_e32
-; GCN-LABEL: {{^}}load_v4i8_to_v4f32_2_uses:
-; GCN: {{buffer|flat}}_load_dword
-; GCN-DAG: v_cvt_f32_ubyte0_e32
-; GCN-DAG: v_cvt_f32_ubyte1_e32
-; GCN-DAG: v_cvt_f32_ubyte2_e32
-; GCN-DAG: v_cvt_f32_ubyte3_e32
-
-; GCN-DAG: v_lshrrev_b32_e32 v{{[0-9]+}}, 24
-; GCN-DAG: v_lshrrev_b32_e32 v{{[0-9]+}}, 16
-
+; SI-DAG: v_lshrrev_b32_e32 v{{[0-9]+}}, 24
+; SI-DAG: v_lshrrev_b32_e32 v{{[0-9]+}}, 16
; SI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 16
; SI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 8
; SI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xffff,
; SI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xff00,
; SI-DAG: v_add_i32
-; VI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xffffff00,
-; VI-DAG: v_add_u16_e32
-; VI-DAG: v_add_u16_e32
+; SI: {{buffer|flat}}_store_dwordx4
+; SI: {{buffer|flat}}_store_dword
-; GCN: {{buffer|flat}}_store_dwordx4
-; GCN: {{buffer|flat}}_store_dword
-
-; GCN: s_endpgm
+; SI: s_endpgm
define void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %out2, <4 x i8> addrspace(1)* noalias %in) nounwind {
%tid.x = call i32 @llvm.amdgcn.workitem.id.x()
%in.ptr = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
@@ -122,8 +116,8 @@
}
; Make sure this doesn't crash.
-; GCN-LABEL: {{^}}load_v7i8_to_v7f32:
-; GCN: s_endpgm
+; SI-LABEL: {{^}}load_v7i8_to_v7f32:
+; SI: s_endpgm
define void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <7 x i8>, <7 x i8> addrspace(1)* %in, align 1
%cvt = uitofp <7 x i8> %load to <7 x float>
@@ -131,22 +125,22 @@
ret void
}
-; GCN-LABEL: {{^}}load_v8i8_to_v8f32:
-; GCN: buffer_load_dwordx2 v{{\[}}[[LOLOAD:[0-9]+]]:[[HILOAD:[0-9]+]]{{\]}},
-; GCN-NOT: bfe
-; GCN-NOT: lshr
-; GCN-DAG: v_cvt_f32_ubyte3_e32 v{{[0-9]+}}, v[[LOLOAD]]
-; GCN-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, v[[LOLOAD]]
-; GCN-DAG: v_cvt_f32_ubyte1_e32 v{{[0-9]+}}, v[[LOLOAD]]
-; GCN-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}}, v[[LOLOAD]]
-; GCN-DAG: v_cvt_f32_ubyte3_e32 v{{[0-9]+}}, v[[HILOAD]]
-; GCN-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, v[[HILOAD]]
-; GCN-DAG: v_cvt_f32_ubyte1_e32 v{{[0-9]+}}, v[[HILOAD]]
-; GCN-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}}, v[[HILOAD]]
-; GCN-NOT: bfe
-; GCN-NOT: lshr
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
+; SI-LABEL: {{^}}load_v8i8_to_v8f32:
+; SI: buffer_load_dwordx2 v{{\[}}[[LOLOAD:[0-9]+]]:[[HILOAD:[0-9]+]]{{\]}},
+; SI-NOT: bfe
+; SI-NOT: lshr
+; SI-DAG: v_cvt_f32_ubyte3_e32 v{{[0-9]+}}, v[[LOLOAD]]
+; SI-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, v[[LOLOAD]]
+; SI-DAG: v_cvt_f32_ubyte1_e32 v{{[0-9]+}}, v[[LOLOAD]]
+; SI-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}}, v[[LOLOAD]]
+; SI-DAG: v_cvt_f32_ubyte3_e32 v{{[0-9]+}}, v[[HILOAD]]
+; SI-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, v[[HILOAD]]
+; SI-DAG: v_cvt_f32_ubyte1_e32 v{{[0-9]+}}, v[[HILOAD]]
+; SI-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}}, v[[HILOAD]]
+; SI-NOT: bfe
+; SI-NOT: lshr
+; SI: buffer_store_dwordx4
+; SI: buffer_store_dwordx4
define void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <8 x i8>, <8 x i8> addrspace(1)* %in, align 8
%cvt = uitofp <8 x i8> %load to <8 x float>
@@ -154,11 +148,11 @@
ret void
}
-; GCN-LABEL: {{^}}i8_zext_inreg_i32_to_f32:
-; GCN: buffer_load_dword [[LOADREG:v[0-9]+]],
-; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, 2, [[LOADREG]]
-; GCN-NEXT: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[ADD]]
-; GCN: buffer_store_dword [[CONV]],
+; SI-LABEL: {{^}}i8_zext_inreg_i32_to_f32:
+; SI: buffer_load_dword [[LOADREG:v[0-9]+]],
+; SI: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, 2, [[LOADREG]]
+; SI-NEXT: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[ADD]]
+; SI: buffer_store_dword [[CONV]],
define void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%load = load i32, i32 addrspace(1)* %in, align 4
%add = add i32 %load, 2
@@ -168,7 +162,7 @@
ret void
}
-; GCN-LABEL: {{^}}i8_zext_inreg_hi1_to_f32:
+; SI-LABEL: {{^}}i8_zext_inreg_hi1_to_f32:
define void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%load = load i32, i32 addrspace(1)* %in, align 4
%inreg = and i32 %load, 65280
@@ -180,7 +174,7 @@
; We don't get these ones because of the zext, but instcombine removes
; them so it shouldn't really matter.
-; GCN-LABEL: {{^}}i8_zext_i32_to_f32:
+; SI-LABEL: {{^}}i8_zext_i32_to_f32:
define void @i8_zext_i32_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
%load = load i8, i8 addrspace(1)* %in, align 1
%ext = zext i8 %load to i32
@@ -189,7 +183,7 @@
ret void
}
-; GCN-LABEL: {{^}}v4i8_zext_v4i32_to_v4f32:
+; SI-LABEL: {{^}}v4i8_zext_v4i32_to_v4f32:
define void @v4i8_zext_v4i32_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 1
%ext = zext <4 x i8> %load to <4 x i32>
@@ -198,11 +192,11 @@
ret void
}
-; GCN-LABEL: {{^}}extract_byte0_to_f32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]]
-; GCN-NOT: [[VAL]]
-; GCN: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[VAL]]
-; GCN: buffer_store_dword [[CONV]]
+; SI-LABEL: {{^}}extract_byte0_to_f32:
+; SI: buffer_load_dword [[VAL:v[0-9]+]]
+; SI-NOT: [[VAL]]
+; SI: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[VAL]]
+; SI: buffer_store_dword [[CONV]]
define void @extract_byte0_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32, i32 addrspace(1)* %in
%and = and i32 %val, 255
@@ -211,11 +205,11 @@
ret void
}
-; GCN-LABEL: {{^}}extract_byte1_to_f32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]]
-; GCN-NOT: [[VAL]]
-; GCN: v_cvt_f32_ubyte1_e32 [[CONV:v[0-9]+]], [[VAL]]
-; GCN: buffer_store_dword [[CONV]]
+; SI-LABEL: {{^}}extract_byte1_to_f32:
+; SI: buffer_load_dword [[VAL:v[0-9]+]]
+; SI-NOT: [[VAL]]
+; SI: v_cvt_f32_ubyte1_e32 [[CONV:v[0-9]+]], [[VAL]]
+; SI: buffer_store_dword [[CONV]]
define void @extract_byte1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32, i32 addrspace(1)* %in
%srl = lshr i32 %val, 8
@@ -225,11 +219,11 @@
ret void
}
-; GCN-LABEL: {{^}}extract_byte2_to_f32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]]
-; GCN-NOT: [[VAL]]
-; GCN: v_cvt_f32_ubyte2_e32 [[CONV:v[0-9]+]], [[VAL]]
-; GCN: buffer_store_dword [[CONV]]
+; SI-LABEL: {{^}}extract_byte2_to_f32:
+; SI: buffer_load_dword [[VAL:v[0-9]+]]
+; SI-NOT: [[VAL]]
+; SI: v_cvt_f32_ubyte2_e32 [[CONV:v[0-9]+]], [[VAL]]
+; SI: buffer_store_dword [[CONV]]
define void @extract_byte2_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32, i32 addrspace(1)* %in
%srl = lshr i32 %val, 16
@@ -239,11 +233,11 @@
ret void
}
-; GCN-LABEL: {{^}}extract_byte3_to_f32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]]
-; GCN-NOT: [[VAL]]
-; GCN: v_cvt_f32_ubyte3_e32 [[CONV:v[0-9]+]], [[VAL]]
-; GCN: buffer_store_dword [[CONV]]
+; SI-LABEL: {{^}}extract_byte3_to_f32:
+; SI: buffer_load_dword [[VAL:v[0-9]+]]
+; SI-NOT: [[VAL]]
+; SI: v_cvt_f32_ubyte3_e32 [[CONV:v[0-9]+]], [[VAL]]
+; SI: buffer_store_dword [[CONV]]
define void @extract_byte3_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32, i32 addrspace(1)* %in
%srl = lshr i32 %val, 24
diff --git a/llvm/test/CodeGen/AMDGPU/global-extload-i16.ll b/llvm/test/CodeGen/AMDGPU/global-extload-i16.ll
deleted file mode 100644
index 4d99929..0000000
--- a/llvm/test/CodeGen/AMDGPU/global-extload-i16.ll
+++ /dev/null
@@ -1,302 +0,0 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; XUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; FIXME: cypress is broken because the bigger testcases spill and it's not implemented
-
-; FUNC-LABEL: {{^}}zextload_global_i16_to_i32:
-; SI: buffer_load_ushort
-; SI: buffer_store_dword
-; SI: s_endpgm
-define void @zextload_global_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
- %a = load i16, i16 addrspace(1)* %in
- %ext = zext i16 %a to i32
- store i32 %ext, i32 addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_i16_to_i32:
-; SI: buffer_load_sshort
-; SI: buffer_store_dword
-; SI: s_endpgm
-define void @sextload_global_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
- %a = load i16, i16 addrspace(1)* %in
- %ext = sext i16 %a to i32
- store i32 %ext, i32 addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v1i16_to_v1i32:
-; SI: buffer_load_ushort
-; SI: s_endpgm
-define void @zextload_global_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <1 x i16>, <1 x i16> addrspace(1)* %in
- %ext = zext <1 x i16> %load to <1 x i32>
- store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v1i16_to_v1i32:
-; SI: buffer_load_sshort
-; SI: s_endpgm
-define void @sextload_global_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <1 x i16>, <1 x i16> addrspace(1)* %in
- %ext = sext <1 x i16> %load to <1 x i32>
- store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v2i16_to_v2i32:
-; SI: s_endpgm
-define void @zextload_global_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <2 x i16>, <2 x i16> addrspace(1)* %in
- %ext = zext <2 x i16> %load to <2 x i32>
- store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v2i16_to_v2i32:
-; SI: s_endpgm
-define void @sextload_global_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <2 x i16>, <2 x i16> addrspace(1)* %in
- %ext = sext <2 x i16> %load to <2 x i32>
- store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v4i16_to_v4i32:
-; SI: s_endpgm
-define void @zextload_global_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <4 x i16>, <4 x i16> addrspace(1)* %in
- %ext = zext <4 x i16> %load to <4 x i32>
- store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v4i16_to_v4i32:
-; SI: s_endpgm
-define void @sextload_global_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <4 x i16>, <4 x i16> addrspace(1)* %in
- %ext = sext <4 x i16> %load to <4 x i32>
- store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v8i16_to_v8i32:
-; SI: s_endpgm
-define void @zextload_global_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <8 x i16>, <8 x i16> addrspace(1)* %in
- %ext = zext <8 x i16> %load to <8 x i32>
- store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v8i16_to_v8i32:
-; SI: s_endpgm
-define void @sextload_global_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <8 x i16>, <8 x i16> addrspace(1)* %in
- %ext = sext <8 x i16> %load to <8 x i32>
- store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v16i16_to_v16i32:
-; SI: s_endpgm
-define void @zextload_global_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <16 x i16>, <16 x i16> addrspace(1)* %in
- %ext = zext <16 x i16> %load to <16 x i32>
- store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v16i16_to_v16i32:
-; SI: s_endpgm
-define void @sextload_global_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <16 x i16>, <16 x i16> addrspace(1)* %in
- %ext = sext <16 x i16> %load to <16 x i32>
- store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v32i16_to_v32i32:
-; SI: s_endpgm
-define void @zextload_global_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <32 x i16>, <32 x i16> addrspace(1)* %in
- %ext = zext <32 x i16> %load to <32 x i32>
- store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v32i16_to_v32i32:
-; SI: s_endpgm
-define void @sextload_global_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <32 x i16>, <32 x i16> addrspace(1)* %in
- %ext = sext <32 x i16> %load to <32 x i32>
- store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v64i16_to_v64i32:
-; SI: s_endpgm
-define void @zextload_global_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <64 x i16>, <64 x i16> addrspace(1)* %in
- %ext = zext <64 x i16> %load to <64 x i32>
- store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v64i16_to_v64i32:
-; SI: s_endpgm
-define void @sextload_global_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <64 x i16>, <64 x i16> addrspace(1)* %in
- %ext = sext <64 x i16> %load to <64 x i32>
- store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_i16_to_i64:
-; SI-DAG: buffer_load_ushort v[[LO:[0-9]+]],
-; SI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
-; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]]
-define void @zextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
- %a = load i16, i16 addrspace(1)* %in
- %ext = zext i16 %a to i64
- store i64 %ext, i64 addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_i16_to_i64:
-; VI: buffer_load_ushort [[LOAD:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0
-; VI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[LOAD]]
-; VI: buffer_store_dwordx2 v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0
-define void @sextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
- %a = load i16, i16 addrspace(1)* %in
- %ext = sext i16 %a to i64
- store i64 %ext, i64 addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v1i16_to_v1i64:
-; SI: s_endpgm
-define void @zextload_global_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <1 x i16>, <1 x i16> addrspace(1)* %in
- %ext = zext <1 x i16> %load to <1 x i64>
- store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v1i16_to_v1i64:
-; SI: s_endpgm
-define void @sextload_global_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <1 x i16>, <1 x i16> addrspace(1)* %in
- %ext = sext <1 x i16> %load to <1 x i64>
- store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v2i16_to_v2i64:
-; SI: s_endpgm
-define void @zextload_global_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <2 x i16>, <2 x i16> addrspace(1)* %in
- %ext = zext <2 x i16> %load to <2 x i64>
- store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v2i16_to_v2i64:
-; SI: s_endpgm
-define void @sextload_global_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <2 x i16>, <2 x i16> addrspace(1)* %in
- %ext = sext <2 x i16> %load to <2 x i64>
- store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v4i16_to_v4i64:
-; SI: s_endpgm
-define void @zextload_global_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <4 x i16>, <4 x i16> addrspace(1)* %in
- %ext = zext <4 x i16> %load to <4 x i64>
- store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v4i16_to_v4i64:
-; SI: s_endpgm
-define void @sextload_global_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <4 x i16>, <4 x i16> addrspace(1)* %in
- %ext = sext <4 x i16> %load to <4 x i64>
- store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v8i16_to_v8i64:
-; SI: s_endpgm
-define void @zextload_global_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <8 x i16>, <8 x i16> addrspace(1)* %in
- %ext = zext <8 x i16> %load to <8 x i64>
- store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v8i16_to_v8i64:
-; SI: s_endpgm
-define void @sextload_global_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <8 x i16>, <8 x i16> addrspace(1)* %in
- %ext = sext <8 x i16> %load to <8 x i64>
- store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v16i16_to_v16i64:
-; SI: s_endpgm
-define void @zextload_global_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <16 x i16>, <16 x i16> addrspace(1)* %in
- %ext = zext <16 x i16> %load to <16 x i64>
- store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v16i16_to_v16i64:
-; SI: s_endpgm
-define void @sextload_global_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <16 x i16>, <16 x i16> addrspace(1)* %in
- %ext = sext <16 x i16> %load to <16 x i64>
- store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v32i16_to_v32i64:
-; SI: s_endpgm
-define void @zextload_global_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <32 x i16>, <32 x i16> addrspace(1)* %in
- %ext = zext <32 x i16> %load to <32 x i64>
- store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v32i16_to_v32i64:
-; SI: s_endpgm
-define void @sextload_global_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <32 x i16>, <32 x i16> addrspace(1)* %in
- %ext = sext <32 x i16> %load to <32 x i64>
- store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_v64i16_to_v64i64:
-; SI: s_endpgm
-define void @zextload_global_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <64 x i16>, <64 x i16> addrspace(1)* %in
- %ext = zext <64 x i16> %load to <64 x i64>
- store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_v64i16_to_v64i64:
-; SI: s_endpgm
-define void @sextload_global_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
- %load = load <64 x i16>, <64 x i16> addrspace(1)* %in
- %ext = sext <64 x i16> %load to <64 x i64>
- store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
- ret void
-}
diff --git a/llvm/test/CodeGen/AMDGPU/half.ll b/llvm/test/CodeGen/AMDGPU/half.ll
index b63ba8e..aa1f5b7 100644
--- a/llvm/test/CodeGen/AMDGPU/half.ll
+++ b/llvm/test/CodeGen/AMDGPU/half.ll
@@ -379,33 +379,19 @@
; GCN-LABEL: {{^}}global_extload_v3f16_to_v3f64:
-; XSI: buffer_load_dwordx2 [[LOAD:v\[[0-9]+:[0-9]+\]]]
-; XSI: v_cvt_f32_f16_e32
-; XSI: v_cvt_f32_f16_e32
-; XSI-DAG: v_lshrrev_b32_e32 {{v[0-9]+}}, 16, {{v[0-9]+}}
-; XSI: v_cvt_f32_f16_e32
-; XSI-NOT: v_cvt_f32_f16
+; GCN: buffer_load_dwordx2 [[LOAD:v\[[0-9]+:[0-9]+\]]]
+; GCN-DAG: v_cvt_f32_f16_e32
+; GCN-DAG: v_lshrrev_b32_e32 {{v[0-9]+}}, 16, {{v[0-9]+}}
+; GCN-DAG: v_cvt_f32_f16_e32
+; GCN-DAG: v_cvt_f32_f16_e32
-; XVI: buffer_load_dwordx2 [[LOAD:v\[[0-9]+:[0-9]+\]]]
-; XVI: v_cvt_f32_f16_e32
-; XVI: v_cvt_f32_f16_e32
-; XVI-DAG: v_lshrrev_b32_e32 {{v[0-9]+}}, 16, {{v[0-9]+}}
-; XVI: v_cvt_f32_f16_e32
-; XVI-NOT: v_cvt_f32_f16
-
-; GCN: buffer_load_dwordx2 v{{\[}}[[IN_LO:[0-9]+]]:[[IN_HI:[0-9]+]]
-; GCN: v_cvt_f32_f16_e32 [[Z32:v[0-9]+]], v[[IN_HI]]
-; GCN: v_cvt_f32_f16_e32 [[X32:v[0-9]+]], v[[IN_LO]]
-; GCN: v_lshrrev_b32_e32 [[Y16:v[0-9]+]], 16, v[[IN_LO]]
-; GCN: v_cvt_f32_f16_e32 [[Y32:v[0-9]+]], [[Y16]]
-
-; GCN: v_cvt_f64_f32_e32 [[Z:v\[[0-9]+:[0-9]+\]]], [[Z32]]
-; GCN: v_cvt_f64_f32_e32 v{{\[}}[[XLO:[0-9]+]]:{{[0-9]+}}], [[X32]]
-; GCN: v_cvt_f64_f32_e32 v[{{[0-9]+}}:[[YHI:[0-9]+]]{{\]}}, [[Y32]]
+; GCN: v_cvt_f64_f32_e32
+; GCN: v_cvt_f64_f32_e32
+; GCN: v_cvt_f64_f32_e32
; GCN-NOT: v_cvt_f64_f32_e32
-; GCN-DAG: buffer_store_dwordx4 v{{\[}}[[XLO]]:[[YHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-; GCN-DAG: buffer_store_dwordx2 [[Z]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; GCN-DAG: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16
; GCN: s_endpgm
define void @global_extload_v3f16_to_v3f64(<3 x double> addrspace(1)* %out, <3 x half> addrspace(1)* %in) #0 {
%val = load <3 x half>, <3 x half> addrspace(1)* %in
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.u32.ll b/llvm/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.u32.ll
index bf5d492..5411192 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.u32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.u32.ll
@@ -1,6 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=FUNC -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=FUNC -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.AMDGPU.bfe.u32(i32, i32, i32) nounwind readnone
@@ -74,14 +73,11 @@
}
; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i8:
-; GCN: buffer_load_dword
+; SI: buffer_load_dword
; SI: v_add_i32
; SI-NEXT: v_and_b32_e32
-; FIXME: Should be using s_add_i32
-; VI: v_add_i32
-; VI-NEXT: v_and_b32_e32
; SI-NOT: {{[^@]}}bfe
-; GCN: s_endpgm
+; SI: s_endpgm
define void @bfe_u32_zext_in_reg_i8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%load = load i32, i32 addrspace(1)* %in, align 4
%add = add i32 %load, 1
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
index 628d285..e3ec664 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,GCN-NOHSA-SI,FUNC %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,GCN-NOHSA-VI,FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}constant_load_i16:
@@ -428,15 +428,8 @@
}
; FUNC-LABEL: {{^}}constant_sextload_i16_to_i64:
-; FIXME: Need to optimize this sequence to avoid extra bfe:
-; t28: i32,ch = load<LD2[%in(addrspace=1)], anyext from i16> t12, t27, undef:i64
-; t31: i64 = any_extend t28
-; t33: i64 = sign_extend_inreg t31, ValueType:ch:i16
-
-; GCN-NOHSA-SI-DAG: buffer_load_sshort v[[LO:[0-9]+]],
+; GCN-NOHSA-DAG: buffer_load_sshort v[[LO:[0-9]+]],
; GCN-HSA-DAG: flat_load_sshort v[[LO:[0-9]+]],
-; GCN-NOHSA-VI-DAG: buffer_load_ushort v[[ULO:[0-9]+]],
-; GCN-NOHSA-VI-DAG: v_bfe_i32 v[[LO:[0-9]+]], v[[ULO]], 0, 16
; GCN-DAG: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
; GCN-NOHSA: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]]
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
index f398dd3..a79c901 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,GCN-NOHSA-SI,FUNC %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-HSA,FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,GCN-NOHSA-VI,FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
@@ -444,15 +444,8 @@
}
; FUNC-LABEL: {{^}}global_sextload_i16_to_i64:
-; FIXME: Need to optimize this sequence to avoid extra bfe:
-; t28: i32,ch = load<LD2[%in(addrspace=1)], anyext from i16> t12, t27, undef:i64
-; t31: i64 = any_extend t28
-; t33: i64 = sign_extend_inreg t31, ValueType:ch:i16
-
-; GCN-NOHSA-SI-DAG: buffer_load_sshort v[[LO:[0-9]+]],
+; GCN-NOHSA-DAG: buffer_load_sshort v[[LO:[0-9]+]],
; GCN-HSA-DAG: flat_load_sshort v[[LO:[0-9]+]],
-; GCN-NOHSA-VI-DAG: buffer_load_ushort v[[ULO:[0-9]+]],
-; GCN-NOHSA-VI-DAG: v_bfe_i32 v[[LO:[0-9]+]], v[[ULO]], 0, 16
; GCN-DAG: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
; GCN-NOHSA: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]]
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i8.ll b/llvm/test/CodeGen/AMDGPU/load-global-i8.ll
index b183b6c..ae032a9 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i8.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,SI,FUNC %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-HSA,SI,FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,VI,FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
@@ -163,8 +163,7 @@
; GCN-NOHSA: buffer_load_dword v
; GCN-HSA: flat_load_dword v
-; SI-DAG: v_bfe_u32 v{{[0-9]+}}, v{{[0-9]+}}, 8, 8
-; VI-DAG: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; GCN-DAG: v_bfe_u32 v{{[0-9]+}}, v{{[0-9]+}}, 8, 8
; GCN-DAG: v_bfe_u32 v{{[0-9]+}}, v{{[0-9]+}}, 16, 8
; GCN-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xff,
@@ -186,16 +185,7 @@
; GCN-NOHSA: buffer_load_dword v
; GCN-HSA: flat_load_dword v
-;FIXME: Need to optimize this sequence to avoid extra shift on VI.
-
-; t23: i16 = truncate t18
-; t49: i16 = srl t23, Constant:i32<8>
-; t57: i32 = any_extend t49
-; t58: i32 = sign_extend_inreg t57, ValueType:ch:i8
-
-; SI-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 8, 8
-; VI-DAG: v_lshrrev_b16_e32 [[SHIFT:v[0-9]+]], 8, v{{[0-9]+}}
-; VI-DAG: v_bfe_i32 v{{[0-9]+}}, [[SHIFT]], 0, 8
+; GCN-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 8, 8
; GCN-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
; GCN-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 16, 8
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
index d4e86de..9b0cbaa 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}local_load_i16:
@@ -539,13 +539,7 @@
}
; FUNC-LABEL: {{^}}local_sextload_i16_to_i64:
-; FIXME: Need to optimize this sequence to avoid an extra shift.
-; t25: i32,ch = load<LD2[%in(addrspace=3)], anyext from i16> t12, t10, undef:i32
-; t28: i64 = any_extend t25
-; t30: i64 = sign_extend_inreg t28, ValueType:ch:i16
-; SI: ds_read_i16 v[[LO:[0-9]+]],
-; VI: ds_read_u16 v[[ULO:[0-9]+]]
-; VI: v_bfe_i32 v[[LO:[0-9]+]], v[[ULO]], 0, 16
+; GCN: ds_read_i16 v[[LO:[0-9]+]],
; GCN-DAG: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
; GCN: ds_write_b64 v{{[0-9]+}}, v{{\[}}[[LO]]:[[HI]]]
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i8.ll b/llvm/test/CodeGen/AMDGPU/load-local-i8.ll
index 02b59e8..6b5b1cf 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-i8.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
@@ -141,17 +141,8 @@
; GCN-NOT: s_wqm_b64
; GCN: s_mov_b32 m0
; GCN: ds_read_u16
-; FIXME: Need to optimize this sequence to avoid extra shift on VI.
-; t23: i16 = srl t39, Constant:i32<8>
-; t31: i32 = any_extend t23
-; t33: i32 = sign_extend_inreg t31, ValueType:ch:i8
-
-; SI-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 8, 8
-; SI-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
-
-; VI-DAG: v_lshrrev_b16_e32 [[SHIFT:v[0-9]+]], 8, v{{[0-9]+}}
-; VI-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
-; VI-DAG: v_bfe_i32 v{{[0-9]+}}, [[SHIFT]], 0, 8
+; GCN-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 8, 8
+; GCN-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
; EG: LDS_USHORT_READ_RET
; EG-DAG: BFE_INT
@@ -166,8 +157,7 @@
; FUNC-LABEL: {{^}}local_zextload_v3i8_to_v3i32:
; GCN: ds_read_b32
-; SI-DAG: v_bfe_u32 v{{[0-9]+}}, v{{[0-9]+}}, 8, 8
-; VI-DAG: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, {{v[0-9]+}}
+; GCN-DAG: v_bfe_u32 v{{[0-9]+}}, v{{[0-9]+}}, 8, 8
; GCN-DAG: v_bfe_u32 v{{[0-9]+}}, v{{[0-9]+}}, 16, 8
; GCN-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xff,
diff --git a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
index 2145347..f107775 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
@@ -1,15 +1,11 @@
+; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC
-; RUN: llc < %s -march=amdgcn -mcpu=fiji -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC
-
-declare i32 @llvm.r600.read.tidig.x() nounwind readnone
; FUNC-LABEL: {{^}}u32_mad24:
; EG: MULADD_UINT24
; SI: v_mad_u32_u24
-; VI: v_mad_u32_u24
define void @u32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
entry:
@@ -29,9 +25,9 @@
; The result must be sign-extended
; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
; EG: 16
-; FIXME: Should be using scalar instructions here.
-; GCN: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; GCN: v_bfe_i32 v{{[0-9]}}, [[MAD]], 0, 16
+; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; SI: v_bfe_i32 v{{[0-9]}}, [[MAD]], 0, 16
+
define void @i16_mad24(i32 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
entry:
%0 = mul i16 %a, %b
@@ -41,14 +37,14 @@
ret void
}
-; FIXME: Need to handle non-uniform case for function below (load without gep).
; FUNC-LABEL: {{^}}i8_mad24:
; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
; The result must be sign-extended
; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
; EG: 8
-; GCN: v_mad_u32_u24 [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
+; SI: v_mad_u32_u24 [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; SI: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
+
define void @i8_mad24(i32 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
entry:
%0 = mul i8 %a, %b
diff --git a/llvm/test/CodeGen/AMDGPU/max.i16.ll b/llvm/test/CodeGen/AMDGPU/max.i16.ll
deleted file mode 100644
index 0b0e026..0000000
--- a/llvm/test/CodeGen/AMDGPU/max.i16.ll
+++ /dev/null
@@ -1,87 +0,0 @@
-; RUN: llc < %s -march=amdgcn -mcpu=fiji -verify-machineinstrs | FileCheck -check-prefix=GCN -check-prefix=VI %s
-
-
-declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_imax_sge_i16:
-; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_test_imax_sge_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
- %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
- %gep0 = getelementptr i16, i16 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i16, i16 addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr i16, i16 addrspace(1)* %out, i32 %tid
- %a = load i16, i16 addrspace(1)* %gep0, align 4
- %b = load i16, i16 addrspace(1)* %gep1, align 4
- %cmp = icmp sge i16 %a, %b
- %val = select i1 %cmp, i16 %a, i16 %b
- store i16 %val, i16 addrspace(1)* %outgep, align 4
- ret void
-}
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_imax_sge_v4i16:
-; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_test_imax_sge_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %aptr, <4 x i16> addrspace(1)* %bptr) nounwind {
- %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
- %gep0 = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %out, i32 %tid
- %a = load <4 x i16>, <4 x i16> addrspace(1)* %gep0, align 4
- %b = load <4 x i16>, <4 x i16> addrspace(1)* %gep1, align 4
- %cmp = icmp sge <4 x i16> %a, %b
- %val = select <4 x i1> %cmp, <4 x i16> %a, <4 x i16> %b
- store <4 x i16> %val, <4 x i16> addrspace(1)* %outgep, align 4
- ret void
-}
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_imax_sgt_i16:
-; VI: v_max_i16_e32
-define void @v_test_imax_sgt_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
- %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
- %gep0 = getelementptr i16, i16 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i16, i16 addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr i16, i16 addrspace(1)* %out, i32 %tid
- %a = load i16, i16 addrspace(1)* %gep0, align 4
- %b = load i16, i16 addrspace(1)* %gep1, align 4
- %cmp = icmp sgt i16 %a, %b
- %val = select i1 %cmp, i16 %a, i16 %b
- store i16 %val, i16 addrspace(1)* %outgep, align 4
- ret void
-}
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_umax_uge_i16:
-; VI: v_max_u16_e32
-define void @v_test_umax_uge_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
- %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
- %gep0 = getelementptr i16, i16 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i16, i16 addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr i16, i16 addrspace(1)* %out, i32 %tid
- %a = load i16, i16 addrspace(1)* %gep0, align 4
- %b = load i16, i16 addrspace(1)* %gep1, align 4
- %cmp = icmp uge i16 %a, %b
- %val = select i1 %cmp, i16 %a, i16 %b
- store i16 %val, i16 addrspace(1)* %outgep, align 4
- ret void
-}
-
-; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_umax_ugt_i16:
-; VI: v_max_u16_e32
-define void @v_test_umax_ugt_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
- %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
- %gep0 = getelementptr i16, i16 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i16, i16 addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr i16, i16 addrspace(1)* %out, i32 %tid
- %a = load i16, i16 addrspace(1)* %gep0, align 4
- %b = load i16, i16 addrspace(1)* %gep1, align 4
- %cmp = icmp ugt i16 %a, %b
- %val = select i1 %cmp, i16 %a, i16 %b
- store i16 %val, i16 addrspace(1)* %outgep, align 4
- ret void
-}
diff --git a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
index 7686933..9e5f31a 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
@@ -31,8 +31,7 @@
}
; FUNC-LABEL: {{^}}test_umul24_i16_vgpr_sext:
-; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; VI: v_mul_lo_u16_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; GCN: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 16
define void @test_umul24_i16_vgpr_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
%tid.x = call i32 @llvm.amdgcn.workitem.id.x()
@@ -63,9 +62,8 @@
}
; FUNC-LABEL: {{^}}test_umul24_i16_vgpr:
-; SI: v_mul_u32_u24_e32
-; SI: v_and_b32_e32
-; VI: v_mul_lo_u16
+; GCN: v_mul_u32_u24_e32
+; GCN: v_and_b32_e32
define void @test_umul24_i16_vgpr(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
%tid.x = call i32 @llvm.amdgcn.workitem.id.x()
%tid.y = call i32 @llvm.amdgcn.workitem.id.y()
@@ -79,9 +77,9 @@
ret void
}
+; FIXME: Need to handle non-uniform case for function below (load without gep).
; FUNC-LABEL: {{^}}test_umul24_i8_vgpr:
-; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; VI: v_mul_lo_u16_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; GCN: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
define void @test_umul24_i8_vgpr(i32 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b) {
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/shl.ll b/llvm/test/CodeGen/AMDGPU/shl.ll
index 45ae1a3..5a2b03b 100644
--- a/llvm/test/CodeGen/AMDGPU/shl.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl.ll
@@ -53,48 +53,6 @@
ret void
}
-;VI: {{^}}shl_i16:
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
-
-define void @shl_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
- %b_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
- %a = load i16, i16 addrspace(1) * %in
- %b = load i16, i16 addrspace(1) * %b_ptr
- %result = shl i16 %a, %b
- store i16 %result, i16 addrspace(1)* %out
- ret void
-}
-
-
-;VI: {{^}}shl_v2i16:
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
-
-define void @shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i16 1
- %a = load <2 x i16>, <2 x i16> addrspace(1) * %in
- %b = load <2 x i16>, <2 x i16> addrspace(1) * %b_ptr
- %result = shl <2 x i16> %a, %b
- store <2 x i16> %result, <2 x i16> addrspace(1)* %out
- ret void
-}
-
-
-;VI: {{^}}shl_v4i16:
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
-;VI: v_lshlrev_b16_e32 v{{[0-9]+, [0-9]+, [0-9]+}}
-
-define void @shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i16 1
- %a = load <4 x i16>, <4 x i16> addrspace(1) * %in
- %b = load <4 x i16>, <4 x i16> addrspace(1) * %b_ptr
- %result = shl <4 x i16> %a, %b
- store <4 x i16> %result, <4 x i16> addrspace(1)* %out
- ret void
-}
-
;EG-LABEL: {{^}}shl_i64:
;EG: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
;EG: LSHR {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
diff --git a/llvm/test/CodeGen/AMDGPU/sign_extend.ll b/llvm/test/CodeGen/AMDGPU/sign_extend.ll
index a7db28e..30e6bd1 100644
--- a/llvm/test/CodeGen/AMDGPU/sign_extend.ll
+++ b/llvm/test/CodeGen/AMDGPU/sign_extend.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}s_sext_i1_to_i32:
; GCN: v_cndmask_b32_e64
@@ -55,43 +55,22 @@
}
; GCN-LABEL: {{^}}s_sext_i16_to_i64:
-; GCN: s_bfe_i64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x100000
+; GCN: s_endpgm
define void @s_sext_i16_to_i64(i64 addrspace(1)* %out, i16 %a) nounwind {
%sext = sext i16 %a to i64
store i64 %sext, i64 addrspace(1)* %out, align 8
ret void
}
-; GCN-LABEL: {{^}}s_sext_i1_to_i16:
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1
-; GCN-NEXT: buffer_store_short [[RESULT]]
-define void @s_sext_i1_to_i16(i16 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
- %cmp = icmp eq i32 %a, %b
- %sext = sext i1 %cmp to i16
- store i16 %sext, i16 addrspace(1)* %out
- ret void
-}
-
; GCN-LABEL: {{^}}s_sext_v4i8_to_v4i32:
; GCN: s_load_dword [[VAL:s[0-9]+]]
+; GCN-DAG: s_sext_i32_i8 [[EXT0:s[0-9]+]], [[VAL]]
+; GCN-DAG: s_bfe_i32 [[EXT1:s[0-9]+]], [[VAL]], 0x80008
; GCN-DAG: s_bfe_i32 [[EXT2:s[0-9]+]], [[VAL]], 0x80010
; GCN-DAG: s_ashr_i32 [[EXT3:s[0-9]+]], [[VAL]], 24
-; SI-DAG: s_bfe_i32 [[EXT1:s[0-9]+]], [[VAL]], 0x80008
-; GCN-DAG: s_sext_i32_i8 [[EXT0:s[0-9]+]], [[VAL]]
-
-; FIXME: We end up with a v_bfe instruction, because the i16 srl
-; gets selected to a v_lshrrev_b16 instructions, so the input to
-; the bfe is a vector registers. To fix this we need to be able to
-; optimize:
-; t29: i16 = truncate t10
-; t55: i16 = srl t29, Constant:i32<8>
-; t63: i32 = any_extend t55
-; t64: i32 = sign_extend_inreg t63, ValueType:ch:i8
-
-; VI-DAG: v_bfe_i32 [[VEXT1:v[0-9]+]], v{{[0-9]+}}, 0, 8
; GCN-DAG: v_mov_b32_e32 [[VEXT0:v[0-9]+]], [[EXT0]]
-; SI-DAG: v_mov_b32_e32 [[VEXT1:v[0-9]+]], [[EXT1]]
+; GCN-DAG: v_mov_b32_e32 [[VEXT1:v[0-9]+]], [[EXT1]]
; GCN-DAG: v_mov_b32_e32 [[VEXT2:v[0-9]+]], [[EXT2]]
; GCN-DAG: v_mov_b32_e32 [[VEXT3:v[0-9]+]], [[EXT3]]
@@ -117,17 +96,10 @@
; GCN-LABEL: {{^}}v_sext_v4i8_to_v4i32:
; GCN: buffer_load_dword [[VAL:v[0-9]+]]
-; FIXME: need to optimize same sequence as above test to avoid
-; this shift.
-; VI-DAG: v_lshrrev_b16_e32 [[SH16:v[0-9]+]], 8, [[VAL]]
+; GCN-DAG: v_bfe_i32 [[EXT0:v[0-9]+]], [[VAL]], 0, 8
+; GCN-DAG: v_bfe_i32 [[EXT1:v[0-9]+]], [[VAL]], 8, 8
+; GCN-DAG: v_bfe_i32 [[EXT2:v[0-9]+]], [[VAL]], 16, 8
; GCN-DAG: v_ashrrev_i32_e32 [[EXT3:v[0-9]+]], 24, [[VAL]]
-; VI-DAG: v_bfe_i32 [[EXT0:v[0-9]+]], [[VAL]], 0, 8
-; VI-DAG: v_bfe_i32 [[EXT2:v[0-9]+]], [[VAL]], 16, 8
-; VI-DAG: v_bfe_i32 [[EXT1:v[0-9]+]], [[SH16]], 0, 8
-
-; SI-DAG: v_bfe_i32 [[EXT2:v[0-9]+]], [[VAL]], 16, 8
-; SI-DAG: v_bfe_i32 [[EXT1:v[0-9]+]], [[VAL]], 8, 8
-; SI: v_bfe_i32 [[EXT0:v[0-9]+]], [[VAL]], 0, 8
; GCN: buffer_store_dword [[EXT0]]
; GCN: buffer_store_dword [[EXT1]]
diff --git a/llvm/test/CodeGen/AMDGPU/sra.ll b/llvm/test/CodeGen/AMDGPU/sra.ll
index 7105474..dddfbfd 100644
--- a/llvm/test/CodeGen/AMDGPU/sra.ll
+++ b/llvm/test/CodeGen/AMDGPU/sra.ll
@@ -46,36 +46,6 @@
ret void
}
-; FUNC-LABEL: {{^}}ashr_v2i16:
-; FIXME: The ashr operation is uniform, but because its operands come from a
-; global load we end up with the vector instructions rather than scalar.
-; VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @ashr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i16 1
- %a = load <2 x i16>, <2 x i16> addrspace(1)* %in
- %b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr
- %result = ashr <2 x i16> %a, %b
- store <2 x i16> %result, <2 x i16> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}ashr_v4i16:
-; FIXME: The ashr operation is uniform, but because its operands come from a
-; global load we end up with the vector instructions rather than scalar.
-; VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @ashr_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i16 1
- %a = load <4 x i16>, <4 x i16> addrspace(1)* %in
- %b = load <4 x i16>, <4 x i16> addrspace(1)* %b_ptr
- %result = ashr <4 x i16> %a, %b
- store <4 x i16> %result, <4 x i16> addrspace(1)* %out
- ret void
-}
-
; FUNC-LABEL: {{^}}s_ashr_i64:
; GCN: s_ashr_i64 s[{{[0-9]}}:{{[0-9]}}], s[{{[0-9]}}:{{[0-9]}}], 8
diff --git a/llvm/test/CodeGen/AMDGPU/sub.ll b/llvm/test/CodeGen/AMDGPU/sub.ll
index 5816345..5a026cd 100644
--- a/llvm/test/CodeGen/AMDGPU/sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/sub.ll
@@ -54,46 +54,6 @@
ret void
}
-; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
- %b_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
- %a = load i16, i16 addrspace(1)* %in
- %b = load i16, i16 addrspace(1)* %b_ptr
- %result = sub i16 %a, %b
- store i16 %result, i16 addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}test_sub_v2i16:
-
-; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-
-define void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i16 1
- %a = load <2 x i16>, <2 x i16> addrspace(1) * %in
- %b = load <2 x i16>, <2 x i16> addrspace(1) * %b_ptr
- %result = sub <2 x i16> %a, %b
- store <2 x i16> %result, <2 x i16> addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}test_sub_v4i16:
-
-; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-
-define void @test_sub_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i16 1
- %a = load <4 x i16>, <4 x i16> addrspace(1) * %in
- %b = load <4 x i16>, <4 x i16> addrspace(1) * %b_ptr
- %result = sub <4 x i16> %a, %b
- store <4 x i16> %result, <4 x i16> addrspace(1)* %out
- ret void
-}
-
; FUNC-LABEL: {{^}}s_sub_i64:
; SI: s_sub_u32
; SI: s_subb_u32
diff --git a/llvm/test/CodeGen/AMDGPU/trunc-bitcast-vector.ll b/llvm/test/CodeGen/AMDGPU/trunc-bitcast-vector.ll
index f7aa4bc..9e2373c 100644
--- a/llvm/test/CodeGen/AMDGPU/trunc-bitcast-vector.ll
+++ b/llvm/test/CodeGen/AMDGPU/trunc-bitcast-vector.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck --check-prefix=VI %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
; CHECK-LABEL: {{^}}trunc_i64_bitcast_v2i32:
; CHECK: buffer_load_dword v
@@ -47,12 +47,7 @@
}
; CHECK-LABEL: {{^}}trunc_i16_bitcast_v4i16:
-; FIXME We need to teach the dagcombiner to reduce load width for:
-; t21: v2i32,ch = load<LD8[%in(addrspace=1)]> t12, t10, undef:i64
-; t23: i64 = bitcast t21
-; t30: i16 = truncate t23
-; SI: buffer_load_dword v[[VAL:[0-9]+]]
-; VI: buffer_load_dwordx2 v{{\[}}[[VAL:[0-9]+]]
+; CHECK: buffer_load_dword [[VAL:v[0-9]+]]
; CHECK: buffer_store_short [[VAL]]
define void @trunc_i16_bitcast_v4i16(i16 addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
%ld = load <4 x i16>, <4 x i16> addrspace(1)* %in
diff --git a/llvm/test/CodeGen/AMDGPU/trunc-store-i1.ll b/llvm/test/CodeGen/AMDGPU/trunc-store-i1.ll
index b1e879f..b71a838 100644
--- a/llvm/test/CodeGen/AMDGPU/trunc-store-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/trunc-store-i1.ll
@@ -21,20 +21,13 @@
ret void
}
-; SI-LABEL: {{^}}s_arg_global_truncstore_i16_to_i1:
+; SI-LABEL: {{^}}global_truncstore_i16_to_i1:
; SI: s_load_dword [[LOAD:s[0-9]+]],
; SI: s_and_b32 [[SREG:s[0-9]+]], [[LOAD]], 1
; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], [[SREG]]
; SI: buffer_store_byte [[VREG]],
-define void @s_arg_global_truncstore_i16_to_i1(i1 addrspace(1)* %out, i16 %val) nounwind {
+define void @global_truncstore_i16_to_i1(i1 addrspace(1)* %out, i16 %val) nounwind {
%trunc = trunc i16 %val to i1
store i1 %trunc, i1 addrspace(1)* %out, align 1
ret void
}
-; SI-LABEL: {{^}}global_truncstore_i16_to_i1:
-define void @global_truncstore_i16_to_i1(i1 addrspace(1)* %out, i16 %val0, i16 %val1) nounwind {
- %add = add i16 %val0, %val1
- %trunc = trunc i16 %add to i1
- store i1 %trunc, i1 addrspace(1)* %out, align 1
- ret void
-}
diff --git a/llvm/test/CodeGen/AMDGPU/zero_extend.ll b/llvm/test/CodeGen/AMDGPU/zero_extend.ll
index b30cb73f..5353992 100644
--- a/llvm/test/CodeGen/AMDGPU/zero_extend.ll
+++ b/llvm/test/CodeGen/AMDGPU/zero_extend.ll
@@ -2,58 +2,39 @@
; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600
-; R600: {{^}}s_mad_zext_i32_to_i64:
+; R600: {{^}}test:
; R600: MEM_RAT_CACHELESS STORE_RAW
; R600: MEM_RAT_CACHELESS STORE_RAW
-; SI: {{^}}s_mad_zext_i32_to_i64:
+; SI: {{^}}test:
; SI: v_mov_b32_e32 v[[V_ZERO:[0-9]]], 0{{$}}
; SI: buffer_store_dwordx2 v[0:[[V_ZERO]]{{\]}}
-define void @s_mad_zext_i32_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) #0 {
+define void @test(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
entry:
- %tmp0 = mul i32 %a, %b
- %tmp1 = add i32 %tmp0, %c
- %tmp2 = zext i32 %tmp1 to i64
- store i64 %tmp2, i64 addrspace(1)* %out
+ %0 = mul i32 %a, %b
+ %1 = add i32 %0, %c
+ %2 = zext i32 %1 to i64
+ store i64 %2, i64 addrspace(1)* %out
ret void
}
-; SI-LABEL: {{^}}s_cmp_zext_i1_to_i32
+; SI-LABEL: {{^}}testi1toi32:
; SI: v_cndmask_b32
-define void @s_cmp_zext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define void @testi1toi32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
- %tmp0 = icmp eq i32 %a, %b
- %tmp1 = zext i1 %tmp0 to i32
- store i32 %tmp1, i32 addrspace(1)* %out
+ %0 = icmp eq i32 %a, %b
+ %1 = zext i1 %0 to i32
+ store i32 %1, i32 addrspace(1)* %out
ret void
}
-; SI-LABEL: {{^}}s_arg_zext_i1_to_i64:
-define void @s_arg_zext_i1_to_i64(i64 addrspace(1)* %out, i1 zeroext %arg) #0 {
- %ext = zext i1 %arg to i64
- store i64 %ext, i64 addrspace(1)* %out, align 8
- ret void
-}
-
-; SI-LABEL: {{^}}s_cmp_zext_i1_to_i64:
+; SI-LABEL: {{^}}zext_i1_to_i64:
; SI: s_mov_b32 s{{[0-9]+}}, 0
; SI: v_cmp_eq_u32
; SI: v_cndmask_b32
-define void @s_cmp_zext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define void @zext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%cmp = icmp eq i32 %a, %b
%ext = zext i1 %cmp to i64
store i64 %ext, i64 addrspace(1)* %out, align 8
ret void
}
-
-; SI-LABEL: {{^}}s_cmp_zext_i1_to_i16
-; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
-; SI: buffer_store_short [[RESULT]]
-define void @s_cmp_zext_i1_to_i16(i16 addrspace(1)* %out, i16 zeroext %a, i16 zeroext %b) #0 {
- %tmp0 = icmp eq i16 %a, %b
- %tmp1 = zext i1 %tmp0 to i16
- store i16 %tmp1, i16 addrspace(1)* %out
- ret void
-}
-
-attributes #0 = { nounwind }