AMDGPU: Remove some old intrinsic uses from tests
llvm-svn: 260493
diff --git a/llvm/test/Transforms/CodeGenPrepare/AMDGPU/no-sink-addrspacecast.ll b/llvm/test/Transforms/CodeGenPrepare/AMDGPU/no-sink-addrspacecast.ll
index f6f898f..6cec253 100644
--- a/llvm/test/Transforms/CodeGenPrepare/AMDGPU/no-sink-addrspacecast.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/AMDGPU/no-sink-addrspacecast.ll
@@ -8,7 +8,7 @@
define void @test_sink_ptrtoint_asc(float addrspace(1)* nocapture %arg, float addrspace(1)* nocapture readonly %arg1, float addrspace(3)* %arg2) #0 {
bb:
%tmp = getelementptr inbounds float, float addrspace(3)* %arg2, i32 16
- %tmp2 = tail call i32 @llvm.r600.read.tidig.x() #1
+ %tmp2 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%tmp3 = sext i32 %tmp2 to i64
%tmp4 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 %tmp3
%tmp5 = load float, float addrspace(1)* %tmp4, align 4
@@ -43,7 +43,7 @@
}
declare float @llvm.fma.f32(float, float, float) #1
-declare i32 @llvm.r600.read.tidig.x() #1
+declare i32 @llvm.amdgcn.workitem.id.x() #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-barrier.ll b/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-barrier.ll
index 3cbb702..e732ddc 100644
--- a/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-barrier.ll
+++ b/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-barrier.ll
@@ -1,10 +1,10 @@
; RUN: opt -mtriple=amdgcn-unknown-amdhsa -mcpu=hawaii -loop-unroll -S < %s | FileCheck %s
; CHECK-LABEL: @test_unroll_convergent_barrier(
-; CHECK: call void @llvm.AMDGPU.barrier.global()
-; CHECK: call void @llvm.AMDGPU.barrier.global()
-; CHECK: call void @llvm.AMDGPU.barrier.global()
-; CHECK: call void @llvm.AMDGPU.barrier.global()
+; CHECK: call void @llvm.amdgcn.s.barrier()
+; CHECK: call void @llvm.amdgcn.s.barrier()
+; CHECK: call void @llvm.amdgcn.s.barrier()
+; CHECK: call void @llvm.amdgcn.s.barrier()
; CHECK-NOT: br
define void @test_unroll_convergent_barrier(i32 addrspace(1)* noalias nocapture %out, i32 addrspace(1)* noalias nocapture %in) #0 {
entry:
@@ -16,7 +16,7 @@
%arrayidx.in = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 %indvars.iv
%arrayidx.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %indvars.iv
%load = load i32, i32 addrspace(1)* %arrayidx.in
- call void @llvm.AMDGPU.barrier.global() #1
+ call void @llvm.amdgcn.s.barrier() #1
%add = add i32 %load, %sum.02
store i32 %add, i32 addrspace(1)* %arrayidx.out
%indvars.iv.next = add i32 %indvars.iv, 1
@@ -27,7 +27,7 @@
ret void
}
-declare void @llvm.AMDGPU.barrier.global() #1
+declare void @llvm.amdgcn.s.barrier() #1
attributes #0 = { nounwind }
attributes #1 = { nounwind convergent }
diff --git a/llvm/test/Transforms/StructurizeCFG/nested-loop-order.ll b/llvm/test/Transforms/StructurizeCFG/nested-loop-order.ll
index 9f1e5a9..2ef1a4f 100644
--- a/llvm/test/Transforms/StructurizeCFG/nested-loop-order.ll
+++ b/llvm/test/Transforms/StructurizeCFG/nested-loop-order.ll
@@ -63,17 +63,6 @@
br i1 %tmp36, label %ENDLOOP, label %LOOP.outer
}
-; Function Attrs: nounwind readnone
-declare <4 x float> @llvm.SI.vs.load.input(<16 x i8>, i32, i32) #1
-
-; Function Attrs: readnone
-declare float @llvm.AMDGPU.clamp.f32(float, float, float) #2
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
attributes #0 = { "ShaderType"="1" "enable-no-nans-fp-math"="true" "unsafe-fp-math"="true" }
attributes #1 = { nounwind readnone }
attributes #2 = { readnone }
-
-!0 = !{!1, !1, i64 0, i32 1}
-!1 = !{!"const", null}