AMDGPU: Cost model for basic integer operations
This resolves bug 21148 by preventing promotion to
i64 induction variables.
llvm-svn: 264376
diff --git a/llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll b/llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll
new file mode 100644
index 0000000..76b21d2
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll
@@ -0,0 +1,138 @@
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck %s
+
+; CHECK: 'add_i32'
+; CHECK: estimated cost of 1 for {{.*}} add i32
+define void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+ %vec = load i32, i32 addrspace(1)* %vaddr
+ %add = add i32 %vec, %b
+ store i32 %add, i32 addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'add_v2i32'
+; CHECK: estimated cost of 2 for {{.*}} add <2 x i32>
+define void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 {
+ %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
+ %add = add <2 x i32> %vec, %b
+ store <2 x i32> %add, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'add_v3i32'
+; CHECK: estimated cost of 3 for {{.*}} add <3 x i32>
+define void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 {
+ %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
+ %add = add <3 x i32> %vec, %b
+ store <3 x i32> %add, <3 x i32> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'add_v4i32'
+; CHECK: estimated cost of 4 for {{.*}} add <4 x i32>
+define void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 {
+ %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
+ %add = add <4 x i32> %vec, %b
+ store <4 x i32> %add, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'add_i64'
+; CHECK: estimated cost of 2 for {{.*}} add i64
+define void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+ %vec = load i64, i64 addrspace(1)* %vaddr
+ %add = add i64 %vec, %b
+ store i64 %add, i64 addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'add_v2i64'
+; CHECK: estimated cost of 4 for {{.*}} add <2 x i64>
+define void @add_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 {
+ %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr
+ %add = add <2 x i64> %vec, %b
+ store <2 x i64> %add, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'add_v3i64'
+; CHECK: estimated cost of 6 for {{.*}} add <3 x i64>
+define void @add_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 {
+ %vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr
+ %add = add <3 x i64> %vec, %b
+ store <3 x i64> %add, <3 x i64> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'add_v4i64'
+; CHECK: estimated cost of 8 for {{.*}} add <4 x i64>
+define void @add_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 {
+ %vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr
+ %add = add <4 x i64> %vec, %b
+ store <4 x i64> %add, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'add_v16i64'
+; CHECK: estimated cost of 32 for {{.*}} add <16 x i64>
+define void @add_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %vaddr, <16 x i64> %b) #0 {
+ %vec = load <16 x i64>, <16 x i64> addrspace(1)* %vaddr
+ %add = add <16 x i64> %vec, %b
+ store <16 x i64> %add, <16 x i64> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'add_i16'
+; CHECK: estimated cost of 1 for {{.*}} add i16
+define void @add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 {
+ %vec = load i16, i16 addrspace(1)* %vaddr
+ %add = add i16 %vec, %b
+ store i16 %add, i16 addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'add_v2i16'
+; CHECK: estimated cost of 2 for {{.*}} add <2 x i16>
+define void @add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
+ %add = add <2 x i16> %vec, %b
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'sub_i32'
+; CHECK: estimated cost of 1 for {{.*}} sub i32
+define void @sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+ %vec = load i32, i32 addrspace(1)* %vaddr
+ %sub = sub i32 %vec, %b
+ store i32 %sub, i32 addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'sub_i64'
+; CHECK: estimated cost of 2 for {{.*}} sub i64
+define void @sub_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+ %vec = load i64, i64 addrspace(1)* %vaddr
+ %sub = sub i64 %vec, %b
+ store i64 %sub, i64 addrspace(1)* %out
+ ret void
+}
+; CHECK: 'sub_i16'
+; CHECK: estimated cost of 1 for {{.*}} sub i16
+define void @sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 {
+ %vec = load i16, i16 addrspace(1)* %vaddr
+ %sub = sub i16 %vec, %b
+ store i16 %sub, i16 addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'sub_v2i16'
+; CHECK: estimated cost of 2 for {{.*}} sub <2 x i16>
+define void @sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
+ %sub = sub <2 x i16> %vec, %b
+ store <2 x i16> %sub, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/Analysis/CostModel/AMDGPU/bit-ops.ll b/llvm/test/Analysis/CostModel/AMDGPU/bit-ops.ll
new file mode 100644
index 0000000..a809dbd
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/AMDGPU/bit-ops.ll
@@ -0,0 +1,59 @@
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s
+
+; CHECK: 'or_i32'
+; CHECK: estimated cost of 1 for {{.*}} or i32
+define void @or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+ %vec = load i32, i32 addrspace(1)* %vaddr
+ %or = or i32 %vec, %b
+ store i32 %or, i32 addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'or_i64'
+; CHECK: estimated cost of 2 for {{.*}} or i64
+define void @or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+ %vec = load i64, i64 addrspace(1)* %vaddr
+ %or = or i64 %vec, %b
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'xor_i32'
+; CHECK: estimated cost of 1 for {{.*}} xor i32
+define void @xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+ %vec = load i32, i32 addrspace(1)* %vaddr
+ %or = xor i32 %vec, %b
+ store i32 %or, i32 addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'xor_i64'
+; CHECK: estimated cost of 2 for {{.*}} xor i64
+define void @xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+ %vec = load i64, i64 addrspace(1)* %vaddr
+ %or = xor i64 %vec, %b
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+
+; CHECK: 'and_i32'
+; CHECK: estimated cost of 1 for {{.*}} and i32
+define void @and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+ %vec = load i32, i32 addrspace(1)* %vaddr
+ %or = and i32 %vec, %b
+ store i32 %or, i32 addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'and_i64'
+; CHECK: estimated cost of 2 for {{.*}} and i64
+define void @and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+ %vec = load i64, i64 addrspace(1)* %vaddr
+ %or = and i64 %vec, %b
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/Analysis/CostModel/AMDGPU/mul.ll b/llvm/test/Analysis/CostModel/AMDGPU/mul.ll
new file mode 100644
index 0000000..cbc755a
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/AMDGPU/mul.ll
@@ -0,0 +1,85 @@
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s
+
+; CHECK: 'mul_i32'
+; CHECK: estimated cost of 3 for {{.*}} mul i32
+define void @mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+ %vec = load i32, i32 addrspace(1)* %vaddr
+ %mul = mul i32 %vec, %b
+ store i32 %mul, i32 addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'mul_v2i32'
+; CHECK: estimated cost of 6 for {{.*}} mul <2 x i32>
+define void @mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 {
+ %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
+ %mul = mul <2 x i32> %vec, %b
+ store <2 x i32> %mul, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'mul_v3i32'
+; CHECK: estimated cost of 9 for {{.*}} mul <3 x i32>
+define void @mul_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 {
+ %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
+ %mul = mul <3 x i32> %vec, %b
+ store <3 x i32> %mul, <3 x i32> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'mul_v4i32'
+; CHECK: estimated cost of 12 for {{.*}} mul <4 x i32>
+define void @mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 {
+ %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
+ %mul = mul <4 x i32> %vec, %b
+ store <4 x i32> %mul, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'mul_i64'
+; CHECK: estimated cost of 16 for {{.*}} mul i64
+define void @mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+ %vec = load i64, i64 addrspace(1)* %vaddr
+ %mul = mul i64 %vec, %b
+ store i64 %mul, i64 addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'mul_v2i64'
+; CHECK: estimated cost of 32 for {{.*}} mul <2 x i64>
+define void @mul_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 {
+ %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr
+ %mul = mul <2 x i64> %vec, %b
+ store <2 x i64> %mul, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'mul_v3i64'
+; CHECK: estimated cost of 48 for {{.*}} mul <3 x i64>
+define void @mul_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 {
+ %vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr
+ %mul = mul <3 x i64> %vec, %b
+ store <3 x i64> %mul, <3 x i64> addrspace(1)* %out
+ ret void
+}
+
+; CHECK: 'mul_v4i64'
+; CHECK: estimated cost of 64 for {{.*}} mul <4 x i64>
+define void @mul_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 {
+ %vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr
+ %mul = mul <4 x i64> %vec, %b
+ store <4 x i64> %mul, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
+
+; CHECK: 'mul_v8i64'
+; CHECK: estimated cost of 128 for {{.*}} mul <8 x i64>
+define void @mul_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr, <8 x i64> %b) #0 {
+ %vec = load <8 x i64>, <8 x i64> addrspace(1)* %vaddr
+ %mul = mul <8 x i64> %vec, %b
+ store <8 x i64> %mul, <8 x i64> addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/Analysis/CostModel/AMDGPU/shifts.ll b/llvm/test/Analysis/CostModel/AMDGPU/shifts.ll
new file mode 100644
index 0000000..003aed7
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/AMDGPU/shifts.ll
@@ -0,0 +1,61 @@
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=FAST64 %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=SLOW64 %s
+
+; ALL: 'shl_i32'
+; ALL: estimated cost of 1 for {{.*}} shl i32
+define void @shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+ %vec = load i32, i32 addrspace(1)* %vaddr
+ %or = shl i32 %vec, %b
+ store i32 %or, i32 addrspace(1)* %out
+ ret void
+}
+
+; ALL: 'shl_i64'
+; FAST64: estimated cost of 2 for {{.*}} shl i64
+; SLOW64: estimated cost of 3 for {{.*}} shl i64
+define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+ %vec = load i64, i64 addrspace(1)* %vaddr
+ %or = shl i64 %vec, %b
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; ALL: 'lshr_i32'
+; ALL: estimated cost of 1 for {{.*}} lshr i32
+define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+ %vec = load i32, i32 addrspace(1)* %vaddr
+ %or = lshr i32 %vec, %b
+ store i32 %or, i32 addrspace(1)* %out
+ ret void
+}
+
+; ALL: 'lshr_i64'
+; FAST64: estimated cost of 2 for {{.*}} lshr i64
+; SLOW64: estimated cost of 3 for {{.*}} lshr i64
+define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+ %vec = load i64, i64 addrspace(1)* %vaddr
+ %or = lshr i64 %vec, %b
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; ALL: 'ashr_i32'
+; ALL: estimated cost of 1 for {{.*}} ashr i32
+define void @ashr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+ %vec = load i32, i32 addrspace(1)* %vaddr
+ %or = ashr i32 %vec, %b
+ store i32 %or, i32 addrspace(1)* %out
+ ret void
+}
+
+; ALL: 'ashr_i64'
+; FAST64: estimated cost of 2 for {{.*}} ashr i64
+; SLOW64: estimated cost of 3 for {{.*}} ashr i64
+define void @ashr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+ %vec = load i64, i64 addrspace(1)* %vaddr
+ %or = ashr i64 %vec, %b
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }