Tom Stellard | 70f13db | 2013-10-10 17:11:46 +0000 | [diff] [blame^] | 1 | ;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 2 | |
Tom Stellard | 3494b7e | 2013-08-14 22:22:14 +0000 | [diff] [blame] | 3 | ;CHECK-DAG: IMAGE_LOAD {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 15, 0, 0, -1 |
Tom Stellard | 4c0ffcc | 2013-08-06 23:08:18 +0000 | [diff] [blame] | 4 | ;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+_VGPR[0-9]+}}, 3, 0, 0, 0 |
| 5 | ;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+}}, 2, 0, 0, 0 |
| 6 | ;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+}}, 1, 0, 0, 0 |
| 7 | ;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+}}, 4, 0, 0, 0 |
| 8 | ;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+}}, 8, 0, 0, 0 |
| 9 | ;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+_VGPR[0-9]+}}, 5, 0, 0, 0 |
| 10 | ;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+_VGPR[0-9]+}}, 12, 0, 0, -1 |
| 11 | ;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 7, 0, 0, 0 |
| 12 | ;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+}}, 8, 0, 0, -1 |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 13 | |
| 14 | define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) { |
| 15 | %v1 = insertelement <4 x i32> undef, i32 %a1, i32 0 |
| 16 | %v2 = insertelement <4 x i32> undef, i32 %a1, i32 1 |
| 17 | %v3 = insertelement <4 x i32> undef, i32 %a1, i32 2 |
| 18 | %v4 = insertelement <4 x i32> undef, i32 %a1, i32 3 |
| 19 | %v5 = insertelement <4 x i32> undef, i32 %a2, i32 0 |
| 20 | %v6 = insertelement <4 x i32> undef, i32 %a2, i32 1 |
| 21 | %v10 = insertelement <4 x i32> undef, i32 %a3, i32 1 |
| 22 | %v11 = insertelement <4 x i32> undef, i32 %a3, i32 2 |
| 23 | %v15 = insertelement <4 x i32> undef, i32 %a4, i32 2 |
| 24 | %v16 = insertelement <4 x i32> undef, i32 %a4, i32 3 |
| 25 | %res1 = call <4 x i32> @llvm.SI.imageload.(<4 x i32> %v1, |
Tom Stellard | b81df0c | 2013-08-14 23:24:37 +0000 | [diff] [blame] | 26 | <32 x i8> undef, i32 1) |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 27 | %res2 = call <4 x i32> @llvm.SI.imageload.(<4 x i32> %v2, |
Tom Stellard | b81df0c | 2013-08-14 23:24:37 +0000 | [diff] [blame] | 28 | <32 x i8> undef, i32 2) |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 29 | %res3 = call <4 x i32> @llvm.SI.imageload.(<4 x i32> %v3, |
Tom Stellard | b81df0c | 2013-08-14 23:24:37 +0000 | [diff] [blame] | 30 | <32 x i8> undef, i32 3) |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 31 | %res4 = call <4 x i32> @llvm.SI.imageload.(<4 x i32> %v4, |
Tom Stellard | b81df0c | 2013-08-14 23:24:37 +0000 | [diff] [blame] | 32 | <32 x i8> undef, i32 4) |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 33 | %res5 = call <4 x i32> @llvm.SI.imageload.(<4 x i32> %v5, |
Tom Stellard | b81df0c | 2013-08-14 23:24:37 +0000 | [diff] [blame] | 34 | <32 x i8> undef, i32 5) |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 35 | %res6 = call <4 x i32> @llvm.SI.imageload.(<4 x i32> %v6, |
Tom Stellard | b81df0c | 2013-08-14 23:24:37 +0000 | [diff] [blame] | 36 | <32 x i8> undef, i32 6) |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 37 | %res10 = call <4 x i32> @llvm.SI.imageload.(<4 x i32> %v10, |
Tom Stellard | b81df0c | 2013-08-14 23:24:37 +0000 | [diff] [blame] | 38 | <32 x i8> undef, i32 10) |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 39 | %res11 = call <4 x i32> @llvm.SI.imageload.(<4 x i32> %v11, |
Tom Stellard | b81df0c | 2013-08-14 23:24:37 +0000 | [diff] [blame] | 40 | <32 x i8> undef, i32 11) |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 41 | %res15 = call <4 x i32> @llvm.SI.imageload.(<4 x i32> %v15, |
Tom Stellard | b81df0c | 2013-08-14 23:24:37 +0000 | [diff] [blame] | 42 | <32 x i8> undef, i32 15) |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 43 | %res16 = call <4 x i32> @llvm.SI.imageload.(<4 x i32> %v16, |
Tom Stellard | b81df0c | 2013-08-14 23:24:37 +0000 | [diff] [blame] | 44 | <32 x i8> undef, i32 16) |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 45 | %e1 = extractelement <4 x i32> %res1, i32 0 |
| 46 | %e2 = extractelement <4 x i32> %res2, i32 1 |
| 47 | %e3 = extractelement <4 x i32> %res3, i32 2 |
| 48 | %e4 = extractelement <4 x i32> %res4, i32 3 |
| 49 | %t0 = extractelement <4 x i32> %res5, i32 0 |
| 50 | %t1 = extractelement <4 x i32> %res5, i32 1 |
| 51 | %e5 = add i32 %t0, %t1 |
| 52 | %t2 = extractelement <4 x i32> %res6, i32 0 |
| 53 | %t3 = extractelement <4 x i32> %res6, i32 2 |
| 54 | %e6 = add i32 %t2, %t3 |
| 55 | %t10 = extractelement <4 x i32> %res10, i32 2 |
| 56 | %t11 = extractelement <4 x i32> %res10, i32 3 |
| 57 | %e10 = add i32 %t10, %t11 |
| 58 | %t12 = extractelement <4 x i32> %res11, i32 0 |
| 59 | %t13 = extractelement <4 x i32> %res11, i32 1 |
| 60 | %t14 = extractelement <4 x i32> %res11, i32 2 |
| 61 | %t15 = add i32 %t12, %t13 |
| 62 | %e11 = add i32 %t14, %t15 |
| 63 | %t28 = extractelement <4 x i32> %res15, i32 0 |
| 64 | %t29 = extractelement <4 x i32> %res15, i32 1 |
| 65 | %t30 = extractelement <4 x i32> %res15, i32 2 |
| 66 | %t31 = extractelement <4 x i32> %res15, i32 3 |
| 67 | %t32 = add i32 %t28, %t29 |
| 68 | %t33 = add i32 %t30, %t31 |
| 69 | %e15 = add i32 %t32, %t33 |
| 70 | %e16 = extractelement <4 x i32> %res16, i32 3 |
| 71 | %s1 = add i32 %e1, %e2 |
| 72 | %s2 = add i32 %s1, %e3 |
| 73 | %s3 = add i32 %s2, %e4 |
| 74 | %s4 = add i32 %s3, %e5 |
| 75 | %s5 = add i32 %s4, %e6 |
| 76 | %s9 = add i32 %s5, %e10 |
| 77 | %s10 = add i32 %s9, %e11 |
| 78 | %s14 = add i32 %s10, %e15 |
| 79 | %s15 = add i32 %s14, %e16 |
| 80 | %s16 = bitcast i32 %s15 to float |
| 81 | call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %s16, float %s16, float %s16, float %s16) |
| 82 | ret void |
| 83 | } |
| 84 | |
Tom Stellard | 16a9a20 | 2013-08-14 23:24:17 +0000 | [diff] [blame] | 85 | ; Test that ccordinates are stored in vgprs and not sgprs |
| 86 | ; CHECK: vgpr_coords |
| 87 | ; CHECK: IMAGE_LOAD_MIP VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}, 15, 0, 0, 0, 0, 0, 0, 0, VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}} |
| 88 | define void @vgpr_coords(float addrspace(2)* addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 { |
| 89 | main_body: |
| 90 | %20 = getelementptr float addrspace(2)* addrspace(2)* %0, i32 0 |
Manman Ren | adf4cc1 | 2013-09-30 18:17:55 +0000 | [diff] [blame] | 91 | %21 = load float addrspace(2)* addrspace(2)* %20, !tbaa !2 |
Tom Stellard | 16a9a20 | 2013-08-14 23:24:17 +0000 | [diff] [blame] | 92 | %22 = getelementptr float addrspace(2)* %21, i32 0 |
Manman Ren | adf4cc1 | 2013-09-30 18:17:55 +0000 | [diff] [blame] | 93 | %23 = load float addrspace(2)* %22, !tbaa !2, !invariant.load !1 |
Tom Stellard | 16a9a20 | 2013-08-14 23:24:17 +0000 | [diff] [blame] | 94 | %24 = getelementptr float addrspace(2)* %21, i32 1 |
Manman Ren | adf4cc1 | 2013-09-30 18:17:55 +0000 | [diff] [blame] | 95 | %25 = load float addrspace(2)* %24, !tbaa !2, !invariant.load !1 |
Tom Stellard | 16a9a20 | 2013-08-14 23:24:17 +0000 | [diff] [blame] | 96 | %26 = getelementptr float addrspace(2)* %21, i32 4 |
Manman Ren | adf4cc1 | 2013-09-30 18:17:55 +0000 | [diff] [blame] | 97 | %27 = load float addrspace(2)* %26, !tbaa !2, !invariant.load !1 |
Tom Stellard | 16a9a20 | 2013-08-14 23:24:17 +0000 | [diff] [blame] | 98 | %28 = getelementptr <32 x i8> addrspace(2)* %2, i32 0 |
Manman Ren | adf4cc1 | 2013-09-30 18:17:55 +0000 | [diff] [blame] | 99 | %29 = load <32 x i8> addrspace(2)* %28, !tbaa !2 |
Tom Stellard | 16a9a20 | 2013-08-14 23:24:17 +0000 | [diff] [blame] | 100 | %30 = bitcast float %27 to i32 |
| 101 | %31 = bitcast float %23 to i32 |
| 102 | %32 = bitcast float %25 to i32 |
| 103 | %33 = insertelement <4 x i32> undef, i32 %31, i32 0 |
| 104 | %34 = insertelement <4 x i32> %33, i32 %32, i32 1 |
| 105 | %35 = insertelement <4 x i32> %34, i32 %30, i32 2 |
| 106 | %36 = insertelement <4 x i32> %35, i32 undef, i32 3 |
| 107 | %37 = call <4 x i32> @llvm.SI.imageload.v4i32(<4 x i32> %36, <32 x i8> %29, i32 2) |
| 108 | %38 = extractelement <4 x i32> %37, i32 0 |
| 109 | %39 = extractelement <4 x i32> %37, i32 1 |
| 110 | %40 = extractelement <4 x i32> %37, i32 2 |
| 111 | %41 = extractelement <4 x i32> %37, i32 3 |
| 112 | %42 = bitcast i32 %38 to float |
| 113 | %43 = bitcast i32 %39 to float |
| 114 | %44 = bitcast i32 %40 to float |
| 115 | %45 = bitcast i32 %41 to float |
| 116 | call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %42, float %43, float %44, float %45) |
| 117 | ret void |
| 118 | } |
| 119 | |
Tom Stellard | b81df0c | 2013-08-14 23:24:37 +0000 | [diff] [blame] | 120 | declare <4 x i32> @llvm.SI.imageload.(<4 x i32>, <32 x i8>, i32) readnone |
Tom Stellard | 16a9a20 | 2013-08-14 23:24:17 +0000 | [diff] [blame] | 121 | ; Function Attrs: nounwind readnone |
| 122 | declare <4 x i32> @llvm.SI.imageload.v4i32(<4 x i32>, <32 x i8>, i32) #1 |
Michel Danzer | 7bbd7aa | 2013-05-08 13:07:29 +0000 | [diff] [blame] | 123 | |
| 124 | declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float) |
Tom Stellard | 16a9a20 | 2013-08-14 23:24:17 +0000 | [diff] [blame] | 125 | |
| 126 | attributes #0 = { "ShaderType"="0" } |
| 127 | attributes #1 = { nounwind readnone } |
| 128 | |
Manman Ren | adf4cc1 | 2013-09-30 18:17:55 +0000 | [diff] [blame] | 129 | !0 = metadata !{metadata !"const", null} |
Tom Stellard | 16a9a20 | 2013-08-14 23:24:17 +0000 | [diff] [blame] | 130 | !1 = metadata !{} |
Manman Ren | adf4cc1 | 2013-09-30 18:17:55 +0000 | [diff] [blame] | 131 | !2 = metadata !{metadata !0, metadata !0, i64 0, i32 1} |