Krzysztof Parzyszek | d91a9e2 | 2018-08-02 22:17:53 +0000 | [diff] [blame] | 1 | ; RUN: llc -march=hexagon -O2 -mcpu=hexagonv60 -hexagon-initial-cfg-cleanup=0 --stats -o - 2>&1 < %s | FileCheck %s |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 2 | ; This was aborting while processing SUnits. |
Krzysztof Parzyszek | d91a9e2 | 2018-08-02 22:17:53 +0000 | [diff] [blame] | 3 | ; REQUIRES: asserts |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 4 | |
| 5 | ; CHECK: vmem |
| 6 | |
Roorda, Jan-Willem | 4b8bcf0 | 2018-03-07 18:53:36 +0000 | [diff] [blame] | 7 | ; CHECK-NOT: Number of node order issues found |
| 8 | ; CHECK: Number of loops software pipelined |
| 9 | ; CHECK-NOT: Number of node order issues found |
Krzysztof Parzyszek | d91a9e2 | 2018-08-02 22:17:53 +0000 | [diff] [blame] | 10 | |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 11 | target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" |
| 12 | target triple = "hexagon-unknown--elf" |
| 13 | |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 14 | declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #0 |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 15 | declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #0 |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 16 | declare <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32>, <16 x i32>, i32) #0 |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 17 | declare <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32>, <16 x i32>, i32) #0 |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 18 | declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #0 |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 19 | declare <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32>, <16 x i32>) #0 |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 20 | declare <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32>, <16 x i32>) #0 |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 21 | declare <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32>, <16 x i32>) #0 |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 22 | declare <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32>, <16 x i32>, i32) #0 |
| 23 | |
Krzysztof Parzyszek | d91a9e2 | 2018-08-02 22:17:53 +0000 | [diff] [blame] | 24 | define void @f0() #1 { |
| 25 | b0: |
| 26 | %v0 = load i16*, i16** undef, align 4 |
| 27 | %v1 = load i32*, i32** undef, align 4 |
| 28 | br label %b1 |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 29 | |
Krzysztof Parzyszek | d91a9e2 | 2018-08-02 22:17:53 +0000 | [diff] [blame] | 30 | b1: ; preds = %b3, %b0 |
| 31 | %v2 = phi i32 [ 0, %b0 ], [ %v129, %b3 ] |
| 32 | %v3 = mul nuw nsw i32 %v2, 768 |
| 33 | %v4 = add nuw nsw i32 %v3, 32 |
| 34 | %v5 = add nuw nsw i32 %v3, 64 |
| 35 | %v6 = add nuw nsw i32 %v3, 96 |
| 36 | br label %b2 |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 37 | |
Krzysztof Parzyszek | d91a9e2 | 2018-08-02 22:17:53 +0000 | [diff] [blame] | 38 | b2: ; preds = %b2, %b1 |
| 39 | %v7 = phi i32* [ %v1, %b1 ], [ %v127, %b2 ] |
| 40 | %v8 = phi i16* [ %v0, %b1 ], [ %v128, %b2 ] |
| 41 | %v9 = phi i32 [ 0, %b1 ], [ %v125, %b2 ] |
| 42 | %v10 = mul nuw nsw i32 %v9, 32 |
| 43 | %v11 = bitcast i32* %v7 to <16 x i32>* |
| 44 | %v12 = load <16 x i32>, <16 x i32>* %v11, align 64, !tbaa !1 |
| 45 | %v13 = add nuw nsw i32 %v10, 16 |
| 46 | %v14 = getelementptr inbounds i32, i32* %v1, i32 %v13 |
| 47 | %v15 = bitcast i32* %v14 to <16 x i32>* |
| 48 | %v16 = load <16 x i32>, <16 x i32>* %v15, align 64, !tbaa !1 |
| 49 | %v17 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v16, <16 x i32> %v12) |
| 50 | %v18 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v17) #2 |
| 51 | %v19 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v17) #2 |
| 52 | %v20 = tail call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> %v19, <16 x i32> %v18, i32 -4) #2 |
| 53 | %v21 = bitcast i16* %v8 to <16 x i32>* |
| 54 | %v22 = load <16 x i32>, <16 x i32>* %v21, align 64, !tbaa !4 |
| 55 | %v23 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v20) #2 |
| 56 | %v24 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v20) #2 |
| 57 | %v25 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v24, <16 x i32> %v23) #2 |
| 58 | %v26 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v24, <16 x i32> %v23) #2 |
| 59 | %v27 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v25, <16 x i32> %v22) #2 |
| 60 | %v28 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v26, <16 x i32> %v22) #2 |
| 61 | %v29 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v27) #2 |
| 62 | %v30 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v28) #2 |
| 63 | %v31 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %v29, <16 x i32> %v30, i32 16) #2 |
| 64 | %v32 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v28) #2 |
| 65 | %v33 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> undef, <16 x i32> %v32, i32 16) #2 |
| 66 | %v34 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v33, <16 x i32> %v31) #2 |
| 67 | %v35 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v34) #2 |
| 68 | %v36 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v34) #2 |
| 69 | %v37 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v36, <16 x i32> %v35, i32 -4) #2 |
| 70 | %v38 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v37) |
| 71 | %v39 = add nuw nsw i32 %v10, %v3 |
| 72 | %v40 = getelementptr inbounds i32, i32* undef, i32 %v39 |
| 73 | %v41 = bitcast i32* %v40 to <16 x i32>* |
| 74 | store <16 x i32> %v38, <16 x i32>* %v41, align 64, !tbaa !6 |
| 75 | %v42 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v37) |
| 76 | store <16 x i32> %v42, <16 x i32>* undef, align 64, !tbaa !6 |
| 77 | %v43 = getelementptr i32, i32* %v7, i32 32 |
| 78 | %v44 = getelementptr i16, i16* %v8, i32 32 |
| 79 | %v45 = bitcast i32* %v43 to <16 x i32>* |
| 80 | %v46 = load <16 x i32>, <16 x i32>* %v45, align 64, !tbaa !1 |
| 81 | %v47 = add nuw nsw i32 %v10, 48 |
| 82 | %v48 = getelementptr inbounds i32, i32* %v1, i32 %v47 |
| 83 | %v49 = bitcast i32* %v48 to <16 x i32>* |
| 84 | %v50 = load <16 x i32>, <16 x i32>* %v49, align 64, !tbaa !1 |
| 85 | %v51 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v50, <16 x i32> %v46) |
| 86 | %v52 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v51) #2 |
| 87 | %v53 = tail call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> undef, <16 x i32> %v52, i32 -4) #2 |
| 88 | %v54 = bitcast i16* %v44 to <16 x i32>* |
| 89 | %v55 = load <16 x i32>, <16 x i32>* %v54, align 64, !tbaa !4 |
| 90 | %v56 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v53) #2 |
| 91 | %v57 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> undef, <16 x i32> %v56) #2 |
| 92 | %v58 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> undef, <16 x i32> %v56) #2 |
| 93 | %v59 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v57, <16 x i32> %v55) #2 |
| 94 | %v60 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v58, <16 x i32> %v55) #2 |
| 95 | %v61 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v59) #2 |
| 96 | %v62 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %v61, <16 x i32> undef, i32 16) #2 |
| 97 | %v63 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v59) #2 |
| 98 | %v64 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v60) #2 |
| 99 | %v65 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %v63, <16 x i32> %v64, i32 16) #2 |
| 100 | %v66 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v65, <16 x i32> %v62) #2 |
| 101 | %v67 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v66) #2 |
| 102 | %v68 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v66) #2 |
| 103 | %v69 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v68, <16 x i32> %v67, i32 -4) #2 |
| 104 | %v70 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v69) |
| 105 | %v71 = add nuw nsw i32 %v4, %v10 |
| 106 | %v72 = getelementptr inbounds i32, i32* undef, i32 %v71 |
| 107 | %v73 = bitcast i32* %v72 to <16 x i32>* |
| 108 | store <16 x i32> %v70, <16 x i32>* %v73, align 64, !tbaa !6 |
| 109 | %v74 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v69) |
| 110 | %v75 = add nuw nsw i32 %v71, 16 |
| 111 | %v76 = getelementptr inbounds i32, i32* undef, i32 %v75 |
| 112 | %v77 = bitcast i32* %v76 to <16 x i32>* |
| 113 | store <16 x i32> %v74, <16 x i32>* %v77, align 64, !tbaa !6 |
| 114 | %v78 = getelementptr i32, i32* %v7, i32 64 |
| 115 | %v79 = getelementptr i16, i16* %v8, i32 64 |
| 116 | %v80 = bitcast i32* %v78 to <16 x i32>* |
| 117 | %v81 = load <16 x i32>, <16 x i32>* %v80, align 64, !tbaa !1 |
| 118 | %v82 = add nuw nsw i32 %v10, 80 |
| 119 | %v83 = getelementptr inbounds i32, i32* %v1, i32 %v82 |
| 120 | %v84 = bitcast i32* %v83 to <16 x i32>* |
| 121 | %v85 = load <16 x i32>, <16 x i32>* %v84, align 64, !tbaa !1 |
| 122 | %v86 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v85, <16 x i32> %v81) |
| 123 | %v87 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v86) #2 |
| 124 | %v88 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v86) #2 |
| 125 | %v89 = tail call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> %v88, <16 x i32> %v87, i32 -4) #2 |
| 126 | %v90 = bitcast i16* %v79 to <16 x i32>* |
| 127 | %v91 = load <16 x i32>, <16 x i32>* %v90, align 64, !tbaa !4 |
| 128 | %v92 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v89) #2 |
| 129 | %v93 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v89) #2 |
| 130 | %v94 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v93, <16 x i32> %v92) #2 |
| 131 | %v95 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v93, <16 x i32> %v92) #2 |
| 132 | %v96 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v94, <16 x i32> %v91) #2 |
| 133 | %v97 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v95, <16 x i32> %v91) #2 |
| 134 | %v98 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v97) #2 |
| 135 | %v99 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> undef, <16 x i32> %v98, i32 16) #2 |
| 136 | %v100 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v96) #2 |
| 137 | %v101 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v97) #2 |
| 138 | %v102 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %v100, <16 x i32> %v101, i32 16) #2 |
| 139 | %v103 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v102, <16 x i32> %v99) #2 |
| 140 | %v104 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v103) #2 |
| 141 | %v105 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v103) #2 |
| 142 | %v106 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v105, <16 x i32> %v104, i32 -4) #2 |
| 143 | %v107 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v106) |
| 144 | %v108 = add nuw nsw i32 %v5, %v10 |
| 145 | %v109 = getelementptr inbounds i32, i32* undef, i32 %v108 |
| 146 | %v110 = bitcast i32* %v109 to <16 x i32>* |
| 147 | store <16 x i32> %v107, <16 x i32>* %v110, align 64, !tbaa !6 |
| 148 | %v111 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v106) |
| 149 | %v112 = add nuw nsw i32 %v108, 16 |
| 150 | %v113 = getelementptr inbounds i32, i32* undef, i32 %v112 |
| 151 | %v114 = bitcast i32* %v113 to <16 x i32>* |
| 152 | store <16 x i32> %v111, <16 x i32>* %v114, align 64, !tbaa !6 |
| 153 | %v115 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> undef) #2 |
| 154 | %v116 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> undef, <16 x i32> %v115, i32 -4) #2 |
| 155 | %v117 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v116) |
| 156 | %v118 = add nuw nsw i32 %v6, %v10 |
| 157 | %v119 = getelementptr inbounds i32, i32* undef, i32 %v118 |
| 158 | %v120 = bitcast i32* %v119 to <16 x i32>* |
| 159 | store <16 x i32> %v117, <16 x i32>* %v120, align 64, !tbaa !6 |
| 160 | %v121 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v116) |
| 161 | %v122 = add nuw nsw i32 %v118, 16 |
| 162 | %v123 = getelementptr inbounds i32, i32* undef, i32 %v122 |
| 163 | %v124 = bitcast i32* %v123 to <16 x i32>* |
| 164 | store <16 x i32> %v121, <16 x i32>* %v124, align 64, !tbaa !6 |
| 165 | %v125 = add nuw nsw i32 %v9, 4 |
| 166 | %v126 = icmp eq i32 %v125, 24 |
| 167 | %v127 = getelementptr i32, i32* %v7, i32 128 |
| 168 | %v128 = getelementptr i16, i16* %v8, i32 128 |
| 169 | br i1 %v126, label %b3, label %b2 |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 170 | |
Krzysztof Parzyszek | d91a9e2 | 2018-08-02 22:17:53 +0000 | [diff] [blame] | 171 | b3: ; preds = %b2 |
| 172 | %v129 = add nuw nsw i32 %v2, 1 |
| 173 | br label %b1 |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | attributes #0 = { nounwind readnone } |
Sumanth Gundapaneni | e1983bc | 2017-10-18 18:07:07 +0000 | [diff] [blame] | 177 | attributes #1 = { "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" } |
Ron Lieberman | da5df7c | 2016-09-17 16:21:09 +0000 | [diff] [blame] | 178 | attributes #2 = { nounwind } |
| 179 | |
| 180 | !llvm.module.flags = !{!0} |
| 181 | |
| 182 | !0 = !{i32 2, !"halide_mattrs", !"+hvx"} |
| 183 | !1 = !{!2, !2, i64 0} |
| 184 | !2 = !{!"in_u32", !3} |
| 185 | !3 = !{!"Halide buffer"} |
| 186 | !4 = !{!5, !5, i64 0} |
| 187 | !5 = !{!"in_u16", !3} |
| 188 | !6 = !{!7, !7, i64 0} |
| 189 | !7 = !{!"op_vmpy_v__uh_v__uh__1", !3} |