AMDGPU: Avoid selecting ds_{read,write}2_b32 on SI
Summary:
To workaround a hardware issue in the (base + offset) calculation
when base is negative. The impact on code quality should be limited
since SILoadStoreOptimizer still runs afterwards and is able to
combine loads/stores based on known sign information.
This fixes visible corruption in Hitman on SI (easily reproducible
by running benchmark mode).
Change-Id: Ia178d207a5e2ac38ae7cd98b532ea2ae74704e5f
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=99923
Reviewers: arsenm, mareko
Subscribers: jholewinski, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D53160
llvm-svn: 344698
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 4010b77..025e2de7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -978,8 +978,6 @@
// default case
- // FIXME: This is broken on SI where we still need to check if the base
- // pointer is positive here.
Base = Addr;
Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index cdc6ab9..31d2ebe 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -728,7 +728,9 @@
(i1 0))
>;
-let OtherPredicates = [LDSRequiresM0Init] in {
+// v2i32 loads are split into i32 loads on SI during lowering, due to a bug
+// related to bounds checking.
+let OtherPredicates = [LDSRequiresM0Init, isCIVI] in {
def : DS64Bit4ByteAlignedReadPat<DS_READ2_B32, load_local_m0>;
def : DS64Bit4ByteAlignedWritePat<DS_WRITE2_B32, store_local_m0>;
}
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index c2cf307..81ff640 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6292,6 +6292,17 @@
if (NumElements > 2)
return SplitVectorLoad(Op, DAG);
+
+ // SI has a hardware bug in the LDS / GDS boounds checking: if the base
+ // address is negative, then the instruction is incorrectly treated as
+ // out-of-bounds even if base + offsets is in bounds. Split vectorized
+ // loads here to avoid emitting ds_read2_b32. We may re-combine the
+ // load later in the SILoadStoreOptimizer.
+ if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
+ NumElements == 2 && MemVT.getStoreSize() == 8 &&
+ Load->getAlignment() < 8) {
+ return SplitVectorLoad(Op, DAG);
+ }
}
return SDValue();
}
@@ -6694,6 +6705,18 @@
if (NumElements > 2)
return SplitVectorStore(Op, DAG);
+
+ // SI has a hardware bug in the LDS / GDS boounds checking: if the base
+ // address is negative, then the instruction is incorrectly treated as
+ // out-of-bounds even if base + offsets is in bounds. Split vectorized
+ // stores here to avoid emitting ds_write2_b32. We may re-combine the
+ // store later in the SILoadStoreOptimizer.
+ if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
+ NumElements == 2 && VT.getStoreSize() == 8 &&
+ Store->getAlignment() < 8) {
+ return SplitVectorStore(Op, DAG);
+ }
+
return SDValue();
} else {
llvm_unreachable("unhandled address space");
diff --git a/llvm/test/CodeGen/AMDGPU/lds-bounds.ll b/llvm/test/CodeGen/AMDGPU/lds-bounds.ll
new file mode 100644
index 0000000..80a2628
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/lds-bounds.ll
@@ -0,0 +1,129 @@
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s
+; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,NOSI %s
+
+@compute_lds = external addrspace(3) global [512 x i32], align 16
+
+; GCN-LABEL: {{^}}store_aligned:
+; GCN: ds_write_b64
+define amdgpu_cs void @store_aligned(i32 addrspace(3)* %ptr) #0 {
+entry:
+ %ptr.gep.1 = getelementptr i32, i32 addrspace(3)* %ptr, i32 1
+
+ store i32 42, i32 addrspace(3)* %ptr, align 8
+ store i32 43, i32 addrspace(3)* %ptr.gep.1
+ ret void
+}
+
+
+; GCN-LABEL: {{^}}load_aligned:
+; GCN: ds_read_b64
+define amdgpu_cs <2 x float> @load_aligned(i32 addrspace(3)* %ptr) #0 {
+entry:
+ %ptr.gep.1 = getelementptr i32, i32 addrspace(3)* %ptr, i32 1
+
+ %v.0 = load i32, i32 addrspace(3)* %ptr, align 8
+ %v.1 = load i32, i32 addrspace(3)* %ptr.gep.1
+
+ %r.0 = insertelement <2 x i32> undef, i32 %v.0, i32 0
+ %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1
+ %bc = bitcast <2 x i32> %r.1 to <2 x float>
+ ret <2 x float> %bc
+}
+
+
+; GCN-LABEL: {{^}}store_global_const_idx:
+; GCN: ds_write2_b32
+define amdgpu_cs void @store_global_const_idx() #0 {
+entry:
+ %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 3
+ %ptr.b = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 4
+
+ store i32 42, i32 addrspace(3)* %ptr.a
+ store i32 43, i32 addrspace(3)* %ptr.b
+ ret void
+}
+
+
+; GCN-LABEL: {{^}}load_global_const_idx:
+; GCN: ds_read2_b32
+define amdgpu_cs <2 x float> @load_global_const_idx() #0 {
+entry:
+ %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 3
+ %ptr.b = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 4
+
+ %v.0 = load i32, i32 addrspace(3)* %ptr.a
+ %v.1 = load i32, i32 addrspace(3)* %ptr.b
+
+ %r.0 = insertelement <2 x i32> undef, i32 %v.0, i32 0
+ %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1
+ %bc = bitcast <2 x i32> %r.1 to <2 x float>
+ ret <2 x float> %bc
+}
+
+
+; GCN-LABEL: {{^}}store_global_var_idx_case1:
+; SI: ds_write_b32
+; SI: ds_write_b32
+; NONSI: ds_write2_b32
+define amdgpu_cs void @store_global_var_idx_case1(i32 %idx) #0 {
+entry:
+ %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 %idx
+ %ptr.b = getelementptr i32, i32 addrspace(3)* %ptr.a, i32 1
+
+ store i32 42, i32 addrspace(3)* %ptr.a
+ store i32 43, i32 addrspace(3)* %ptr.b
+ ret void
+}
+
+
+; GCN-LABEL: {{^}}load_global_var_idx_case1:
+; SI: ds_read_b32
+; SI: ds_read_b32
+; NONSI: ds_read2_b32
+define amdgpu_cs <2 x float> @load_global_var_idx_case1(i32 %idx) #0 {
+entry:
+ %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 %idx
+ %ptr.b = getelementptr i32, i32 addrspace(3)* %ptr.a, i32 1
+
+ %v.0 = load i32, i32 addrspace(3)* %ptr.a
+ %v.1 = load i32, i32 addrspace(3)* %ptr.b
+
+ %r.0 = insertelement <2 x i32> undef, i32 %v.0, i32 0
+ %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1
+ %bc = bitcast <2 x i32> %r.1 to <2 x float>
+ ret <2 x float> %bc
+}
+
+
+; GCN-LABEL: {{^}}store_global_var_idx_case2:
+; GCN: ds_write2_b32
+define amdgpu_cs void @store_global_var_idx_case2(i32 %idx) #0 {
+entry:
+ %idx.and = and i32 %idx, 255
+ %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 %idx.and
+ %ptr.b = getelementptr i32, i32 addrspace(3)* %ptr.a, i32 1
+
+ store i32 42, i32 addrspace(3)* %ptr.a
+ store i32 43, i32 addrspace(3)* %ptr.b
+ ret void
+}
+
+
+; GCN-LABEL: {{^}}load_global_var_idx_case2:
+; GCN: ds_read2_b32
+define amdgpu_cs <2 x float> @load_global_var_idx_case2(i32 %idx) #0 {
+entry:
+ %idx.and = and i32 %idx, 255
+ %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 %idx.and
+ %ptr.b = getelementptr i32, i32 addrspace(3)* %ptr.a, i32 1
+
+ %v.0 = load i32, i32 addrspace(3)* %ptr.a
+ %v.1 = load i32, i32 addrspace(3)* %ptr.b
+
+ %r.0 = insertelement <2 x i32> undef, i32 %v.0, i32 0
+ %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1
+ %bc = bitcast <2 x i32> %r.1 to <2 x float>
+ ret <2 x float> %bc
+}
+
+attributes #0 = { nounwind }