[AMDGPU] Supported ds_read_b128 generation; Widened vector length for local address-space.
Summary: Starting from GCN 2nd generation, ISA supports ds_read_b128 on top of ds_read_b64.
This patch supports ds_read_b128 instruction pattern and generation of this instruction.
In the vectorizer, this patch also widen the vector length so that vectorizer generates
128 bit loads for local address-space which gets translated to ds_read_b128.
Since the performance benefit is not clear; compiler generates ds_read_b128 under -amdgpu-ds128.
Author: FarhanaAleen
Reviewed By: rampitec, arsenm
Subscribers: llvm-commits, AMDGPU
Differential Revision: https://reviews.llvm.org/D44210
llvm-svn: 327153
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-f32.ll b/llvm/test/CodeGen/AMDGPU/load-local-f32.ll
index f035d22..3d87208 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-f32.ll
@@ -2,6 +2,11 @@
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SICIVI,FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG,FUNC %s
+; Testing for ds_read_128
+; RUN: llc -march=amdgcn -mcpu=tahiti -amdgpu-ds128 < %s | FileCheck -check-prefixes=SI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+
; FUNC-LABEL: {{^}}load_f32_local:
; SICIVI: s_mov_b32 m0
; GFX9-NOT: m0
@@ -122,4 +127,18 @@
ret void
}
+; Tests if ds_read_b128 gets generated for the 16 byte aligned load.
+; FUNC-LABEL: {{^}}local_v4f32_to_128:
+; SI-NOT: ds_read_b128
+; CIVI: ds_read_b128
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+define amdgpu_kernel void @local_v4f32_to_128(<4 x float> addrspace(3)* %out, <4 x float> addrspace(3)* %in) {
+ %ld = load <4 x float>, <4 x float> addrspace(3)* %in, align 16
+ store <4 x float> %ld, <4 x float> addrspace(3)* %out
+ ret void
+}
+
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-f64.ll b/llvm/test/CodeGen/AMDGPU/load-local-f64.ll
index ffb6710..14c31e6 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-f64.ll
@@ -4,6 +4,10 @@
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG,FUNC %s
+; Testing for ds_read_b128
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+
; FUNC-LABEL: {{^}}local_load_f64:
; SICIV: s_mov_b32 m0
; GFX9-NOT: m0
@@ -170,4 +174,18 @@
ret void
}
+; Tests if ds_read_b128 gets generated for the 16 byte aligned load.
+; FUNC-LABEL: {{^}}local_load_v2f64_to_128:
+; CIVI: ds_read_b128
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+define amdgpu_kernel void @local_load_v2f64_to_128(<2 x double> addrspace(3)* %out, <2 x double> addrspace(3)* %in) {
+entry:
+ %ld = load <2 x double>, <2 x double> addrspace(3)* %in, align 16
+ store <2 x double> %ld, <2 x double> addrspace(3)* %out
+ ret void
+}
+
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
index d3557c1..7438fd2 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
@@ -3,6 +3,10 @@
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,GFX89,FUNC %s
; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; Testing for ds_read_b128
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+
; FUNC-LABEL: {{^}}local_load_i16:
; GFX9-NOT: m0
; SICIVI: s_mov_b32 m0
@@ -935,4 +939,18 @@
; ret void
; }
+; Tests if ds_read_b128 gets generated for the 16 byte aligned load.
+; FUNC-LABEL: {{^}}local_v8i16_to_128:
+; SI-NOT: ds_read_b128
+; CIVI: ds_read_b128
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+define amdgpu_kernel void @local_v8i16_to_128(<8 x i16> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) {
+ %ld = load <8 x i16>, <8 x i16> addrspace(3)* %in, align 16
+ store <8 x i16> %ld, <8 x i16> addrspace(3)* %out
+ ret void
+}
+
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i32.ll b/llvm/test/CodeGen/AMDGPU/load-local-i32.ll
index c736586..1dd7daf 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-i32.ll
@@ -3,6 +3,11 @@
; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; Testing for ds_read_128
+; RUN: llc -march=amdgcn -mcpu=tahiti -amdgpu-ds128 < %s | FileCheck -check-prefixes=SI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+
; FUNC-LABEL: {{^}}local_load_i32:
; GCN-NOT: s_wqm_b64
; SICIVI: s_mov_b32 m0, -1
@@ -175,6 +180,20 @@
ret void
}
+; Tests if ds_read_b128 gets generated for the 16 byte aligned load.
+; FUNC-LABEL: {{^}}local_v4i32_to_128:
+; SI-NOT: ds_read_b128
+; CIVI: ds_read_b128
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+define amdgpu_kernel void @local_v4i32_to_128(<4 x i32> addrspace(3)* %out, <4 x i32> addrspace(3)* %in) {
+ %ld = load <4 x i32>, <4 x i32> addrspace(3)* %in, align 16
+ store <4 x i32> %ld, <4 x i32> addrspace(3)* %out
+ ret void
+}
+
; FUNC-LABEL: {{^}}local_zextload_v8i32_to_v8i64:
; SICIVI: s_mov_b32 m0, -1
; GFX9-NOT: m0
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i64.ll b/llvm/test/CodeGen/AMDGPU/load-local-i64.ll
index 376f6f5..359fbb4 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-i64.ll
@@ -4,6 +4,10 @@
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG,FUNC %s
+; Testing for ds_read_b128
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+
; FUNC-LABEL: {{^}}local_load_i64:
; SICIVI: s_mov_b32 m0
; GFX9-NOT: m0
@@ -36,6 +40,16 @@
ret void
}
+; Tests if ds_read_b128 gets generated for the 16 byte aligned load.
+; FUNC-LABEL: {{^}}local_load_v2i64_to_128:
+; CIVI: ds_read_b128
+define amdgpu_kernel void @local_load_v2i64_to_128(<2 x i64> addrspace(3)* %out, <2 x i64> addrspace(3)* %in) {
+entry:
+ %ld = load <2 x i64>, <2 x i64> addrspace(3)* %in
+ store <2 x i64> %ld, <2 x i64> addrspace(3)* %out
+ ret void
+}
+
; FUNC-LABEL: {{^}}local_load_v3i64:
; SICIVI: s_mov_b32 m0
; GFX9-NOT: m0
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i8.ll b/llvm/test/CodeGen/AMDGPU/load-local-i8.ll
index 72f5408..e1931af 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-i8.ll
@@ -3,6 +3,9 @@
; RUN: llc -march=amdgcn -mtriple=amdgcn---amdgiz -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
; RUN: llc -march=r600 -mtriple=r600---amdgiz -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; Testing for ds_read_b128
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
; FUNC-LABEL: {{^}}local_load_i8:
; GCN-NOT: s_wqm_b64
@@ -1021,4 +1024,18 @@
; ret void
; }
+; Tests if ds_read_b128 gets generated for the 16 byte aligned load.
+; FUNC-LABEL: {{^}}local_v16i8_to_128:
+; SI-NOT: ds_read_b128
+; CIVI: ds_read_b128
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+define amdgpu_kernel void @local_v16i8_to_128(<16 x i8> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) {
+ %ld = load <16 x i8>, <16 x i8> addrspace(3)* %in, align 16
+ store <16 x i8> %ld, <16 x i8> addrspace(3)* %out
+ ret void
+}
+
attributes #0 = { nounwind }
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
index 19fc44b..5eb3b25 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
@@ -504,8 +504,7 @@
}
; CHECK-LABEL: @merge_local_store_4_constants_i32
-; CHECK: store <2 x i32> <i32 456, i32 333>, <2 x i32> addrspace(3)*
-; CHECK: store <2 x i32> <i32 1234, i32 123>, <2 x i32> addrspace(3)*
+; CHECK: store <4 x i32> <i32 1234, i32 123, i32 456, i32 333>, <4 x i32> addrspace(3)*
define amdgpu_kernel void @merge_local_store_4_constants_i32(i32 addrspace(3)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(3)* %out, i32 2
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
index 8a78f3d..b684ca8 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
@@ -29,11 +29,10 @@
; longest chain vectorized
; CHECK-LABEL: @interleave_get_longest
-; CHECK: load <2 x i32>
+; CHECK: load <4 x i32>
; CHECK: load i32
; CHECK: store <2 x i32> zeroinitializer
; CHECK: load i32
-; CHECK: load <2 x i32>
; CHECK: load i32
; CHECK: load i32