blob: 610cae0dcad93dde295a0a99182bc39e5debdeb3 [file] [log] [blame]
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +00001; RUN: llc -march=hexagon -O2 < %s
2
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +00003target triple = "hexagon"
4
5; We would fail on this file with:
6; Unimplemented
7; UNREACHABLE executed at llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp:615!
8; This happened because after unrolling a loop with a ldd_circ instruction we
9; would have several TFCR and ldd_circ instruction sequences.
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +000010; %0 (CRRegs) = TFCR %0 (IntRegs)
11; = ldd_circ( , , %0)
12; %1 (CRRegs) = TFCR %1 (IntRegs)
13; = ldd_circ( , , %0)
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +000014; The scheduler would move the CRRegs to the top of the loop. The allocator
15; would try to spill the CRRegs after running out of them. We don't have code to
16; spill CRRegs and the above assertion would be triggered.
17declare i8* @llvm.hexagon.circ.ldd(i8*, i8*, i32, i32) nounwind
18
19define i32 @test(i16 zeroext %var0, i16* %var1, i16 signext %var2, i16* nocapture %var3) nounwind {
20entry:
21 %var4 = alloca i64, align 8
22 %conv = zext i16 %var0 to i32
23 %shr5 = lshr i32 %conv, 1
24 %idxprom = sext i16 %var2 to i32
25 %arrayidx = getelementptr inbounds i16, i16* %var1, i32 %idxprom
26 %0 = bitcast i16* %var3 to i64*
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +000027 %1 = load i64, i64* %0, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +000028 %2 = bitcast i16* %arrayidx to i8*
29 %3 = bitcast i64* %var4 to i8*
30 %shl = shl nuw nsw i32 %shr5, 3
31 %or = or i32 %shl, 83886080
32 %4 = call i8* @llvm.hexagon.circ.ldd(i8* %2, i8* %3, i32 %or, i32 -8)
33 %sub = add nsw i32 %shr5, -1
34 %cmp6 = icmp sgt i32 %sub, 0
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +000035 %5 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +000036 %6 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 0, i64 %1, i64 %5)
37 br i1 %cmp6, label %for.body.lr.ph, label %for.end
38
39for.body.lr.ph: ; preds = %entry
40 %incdec.ptr = getelementptr inbounds i16, i16* %var3, i32 4
41 %7 = bitcast i16* %incdec.ptr to i64*
42 %8 = zext i16 %var0 to i32
43 %9 = lshr i32 %8, 1
44 %10 = add i32 %9, -1
45 %xtraiter = urem i32 %10, 8
46 %lcmp = icmp ne i32 %xtraiter, 0
47 br i1 %lcmp, label %unr.cmp60, label %for.body.lr.ph.split.split
48
49unr.cmp60: ; preds = %for.body.lr.ph
50 %un.tmp61 = icmp eq i32 %xtraiter, 1
51 br i1 %un.tmp61, label %for.body.unr53, label %unr.cmp51
52
53unr.cmp51: ; preds = %unr.cmp60
54 %un.tmp52 = icmp eq i32 %xtraiter, 2
55 br i1 %un.tmp52, label %for.body.unr44, label %unr.cmp42
56
57unr.cmp42: ; preds = %unr.cmp51
58 %un.tmp43 = icmp eq i32 %xtraiter, 3
59 br i1 %un.tmp43, label %for.body.unr35, label %unr.cmp33
60
61unr.cmp33: ; preds = %unr.cmp42
62 %un.tmp34 = icmp eq i32 %xtraiter, 4
63 br i1 %un.tmp34, label %for.body.unr26, label %unr.cmp24
64
65unr.cmp24: ; preds = %unr.cmp33
66 %un.tmp25 = icmp eq i32 %xtraiter, 5
67 br i1 %un.tmp25, label %for.body.unr17, label %unr.cmp
68
69unr.cmp: ; preds = %unr.cmp24
70 %un.tmp = icmp eq i32 %xtraiter, 6
71 br i1 %un.tmp, label %for.body.unr13, label %for.body.unr
72
73for.body.unr: ; preds = %unr.cmp
74 %11 = call i8* @llvm.hexagon.circ.ldd(i8* %4, i8* %3, i32 %or, i32 -8)
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +000075 %12 = load i64, i64* %7, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +000076 %inc.unr = add nsw i32 0, 1
77 %incdec.ptr4.unr = getelementptr inbounds i64, i64* %7, i32 1
78 %cmp.unr = icmp slt i32 %inc.unr, %sub
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +000079 %13 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +000080 %14 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %6, i64 %12, i64 %13)
81 br label %for.body.unr13
82
83for.body.unr13: ; preds = %for.body.unr, %unr.cmp
84 %15 = phi i64 [ %6, %unr.cmp ], [ %14, %for.body.unr ]
85 %pvar6.09.unr = phi i64* [ %7, %unr.cmp ], [ %incdec.ptr4.unr, %for.body.unr ]
86 %var8.0.in8.unr = phi i8* [ %4, %unr.cmp ], [ %11, %for.body.unr ]
87 %i.07.unr = phi i32 [ 0, %unr.cmp ], [ %inc.unr, %for.body.unr ]
88 %16 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr, i8* %3, i32 %or, i32 -8)
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +000089 %17 = load i64, i64* %pvar6.09.unr, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +000090 %inc.unr14 = add nsw i32 %i.07.unr, 1
91 %incdec.ptr4.unr15 = getelementptr inbounds i64, i64* %pvar6.09.unr, i32 1
92 %cmp.unr16 = icmp slt i32 %inc.unr14, %sub
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +000093 %18 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +000094 %19 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %15, i64 %17, i64 %18)
95 br label %for.body.unr17
96
97for.body.unr17: ; preds = %for.body.unr13, %unr.cmp24
98 %20 = phi i64 [ %6, %unr.cmp24 ], [ %19, %for.body.unr13 ]
99 %pvar6.09.unr18 = phi i64* [ %7, %unr.cmp24 ], [ %incdec.ptr4.unr15, %for.body.unr13 ]
100 %var8.0.in8.unr19 = phi i8* [ %4, %unr.cmp24 ], [ %16, %for.body.unr13 ]
101 %i.07.unr20 = phi i32 [ 0, %unr.cmp24 ], [ %inc.unr14, %for.body.unr13 ]
102 %21 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr19, i8* %3, i32 %or, i32 -8)
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000103 %22 = load i64, i64* %pvar6.09.unr18, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000104 %inc.unr21 = add nsw i32 %i.07.unr20, 1
105 %incdec.ptr4.unr22 = getelementptr inbounds i64, i64* %pvar6.09.unr18, i32 1
106 %cmp.unr23 = icmp slt i32 %inc.unr21, %sub
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000107 %23 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000108 %24 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %20, i64 %22, i64 %23)
109 br label %for.body.unr26
110
111for.body.unr26: ; preds = %for.body.unr17, %unr.cmp33
112 %25 = phi i64 [ %6, %unr.cmp33 ], [ %24, %for.body.unr17 ]
113 %pvar6.09.unr27 = phi i64* [ %7, %unr.cmp33 ], [ %incdec.ptr4.unr22, %for.body.unr17 ]
114 %var8.0.in8.unr28 = phi i8* [ %4, %unr.cmp33 ], [ %21, %for.body.unr17 ]
115 %i.07.unr29 = phi i32 [ 0, %unr.cmp33 ], [ %inc.unr21, %for.body.unr17 ]
116 %26 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr28, i8* %3, i32 %or, i32 -8)
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000117 %27 = load i64, i64* %pvar6.09.unr27, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000118 %inc.unr30 = add nsw i32 %i.07.unr29, 1
119 %incdec.ptr4.unr31 = getelementptr inbounds i64, i64* %pvar6.09.unr27, i32 1
120 %cmp.unr32 = icmp slt i32 %inc.unr30, %sub
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000121 %28 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000122 %29 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %25, i64 %27, i64 %28)
123 br label %for.body.unr35
124
125for.body.unr35: ; preds = %for.body.unr26, %unr.cmp42
126 %30 = phi i64 [ %6, %unr.cmp42 ], [ %29, %for.body.unr26 ]
127 %pvar6.09.unr36 = phi i64* [ %7, %unr.cmp42 ], [ %incdec.ptr4.unr31, %for.body.unr26 ]
128 %var8.0.in8.unr37 = phi i8* [ %4, %unr.cmp42 ], [ %26, %for.body.unr26 ]
129 %i.07.unr38 = phi i32 [ 0, %unr.cmp42 ], [ %inc.unr30, %for.body.unr26 ]
130 %31 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr37, i8* %3, i32 %or, i32 -8)
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000131 %32 = load i64, i64* %pvar6.09.unr36, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000132 %inc.unr39 = add nsw i32 %i.07.unr38, 1
133 %incdec.ptr4.unr40 = getelementptr inbounds i64, i64* %pvar6.09.unr36, i32 1
134 %cmp.unr41 = icmp slt i32 %inc.unr39, %sub
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000135 %33 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000136 %34 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %30, i64 %32, i64 %33)
137 br label %for.body.unr44
138
139for.body.unr44: ; preds = %for.body.unr35, %unr.cmp51
140 %35 = phi i64 [ %6, %unr.cmp51 ], [ %34, %for.body.unr35 ]
141 %pvar6.09.unr45 = phi i64* [ %7, %unr.cmp51 ], [ %incdec.ptr4.unr40, %for.body.unr35 ]
142 %var8.0.in8.unr46 = phi i8* [ %4, %unr.cmp51 ], [ %31, %for.body.unr35 ]
143 %i.07.unr47 = phi i32 [ 0, %unr.cmp51 ], [ %inc.unr39, %for.body.unr35 ]
144 %36 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr46, i8* %3, i32 %or, i32 -8)
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000145 %37 = load i64, i64* %pvar6.09.unr45, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000146 %inc.unr48 = add nsw i32 %i.07.unr47, 1
147 %incdec.ptr4.unr49 = getelementptr inbounds i64, i64* %pvar6.09.unr45, i32 1
148 %cmp.unr50 = icmp slt i32 %inc.unr48, %sub
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000149 %38 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000150 %39 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %35, i64 %37, i64 %38)
151 br label %for.body.unr53
152
153for.body.unr53: ; preds = %for.body.unr44, %unr.cmp60
154 %40 = phi i64 [ %6, %unr.cmp60 ], [ %39, %for.body.unr44 ]
155 %pvar6.09.unr54 = phi i64* [ %7, %unr.cmp60 ], [ %incdec.ptr4.unr49, %for.body.unr44 ]
156 %var8.0.in8.unr55 = phi i8* [ %4, %unr.cmp60 ], [ %36, %for.body.unr44 ]
157 %i.07.unr56 = phi i32 [ 0, %unr.cmp60 ], [ %inc.unr48, %for.body.unr44 ]
158 %41 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr55, i8* %3, i32 %or, i32 -8)
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000159 %42 = load i64, i64* %pvar6.09.unr54, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000160 %inc.unr57 = add nsw i32 %i.07.unr56, 1
161 %incdec.ptr4.unr58 = getelementptr inbounds i64, i64* %pvar6.09.unr54, i32 1
162 %cmp.unr59 = icmp slt i32 %inc.unr57, %sub
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000163 %43 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000164 %44 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %40, i64 %42, i64 %43)
165 br label %for.body.lr.ph.split
166
167for.body.lr.ph.split: ; preds = %for.body.unr53
168 %45 = icmp ult i32 %10, 8
169 br i1 %45, label %for.end.loopexit, label %for.body.lr.ph.split.split
170
171for.body.lr.ph.split.split: ; preds = %for.body.lr.ph.split, %for.body.lr.ph
172 %.unr = phi i64 [ %44, %for.body.lr.ph.split ], [ %6, %for.body.lr.ph ]
173 %pvar6.09.unr62 = phi i64* [ %incdec.ptr4.unr58, %for.body.lr.ph.split ], [ %7, %for.body.lr.ph ]
174 %var8.0.in8.unr63 = phi i8* [ %41, %for.body.lr.ph.split ], [ %4, %for.body.lr.ph ]
175 %i.07.unr64 = phi i32 [ %inc.unr57, %for.body.lr.ph.split ], [ 0, %for.body.lr.ph ]
176 %.lcssa12.unr = phi i64 [ %44, %for.body.lr.ph.split ], [ 0, %for.body.lr.ph ]
177 br label %for.body
178
179for.body: ; preds = %for.body, %for.body.lr.ph.split.split
180 %46 = phi i64 [ %.unr, %for.body.lr.ph.split.split ], [ %78, %for.body ]
181 %pvar6.09 = phi i64* [ %pvar6.09.unr62, %for.body.lr.ph.split.split ], [ %scevgep71, %for.body ]
182 %var8.0.in8 = phi i8* [ %var8.0.in8.unr63, %for.body.lr.ph.split.split ], [ %75, %for.body ]
183 %i.07 = phi i32 [ %i.07.unr64, %for.body.lr.ph.split.split ], [ %inc.7, %for.body ]
184 %47 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8, i8* %3, i32 %or, i32 -8)
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000185 %48 = load i64, i64* %pvar6.09, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000186 %inc = add nsw i32 %i.07, 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000187 %49 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000188 %50 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %46, i64 %48, i64 %49)
189 %51 = call i8* @llvm.hexagon.circ.ldd(i8* %47, i8* %3, i32 %or, i32 -8)
190 %scevgep = getelementptr i64, i64* %pvar6.09, i32 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000191 %52 = load i64, i64* %scevgep, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000192 %inc.1 = add nsw i32 %inc, 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000193 %53 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000194 %54 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %50, i64 %52, i64 %53)
195 %55 = call i8* @llvm.hexagon.circ.ldd(i8* %51, i8* %3, i32 %or, i32 -8)
196 %scevgep65 = getelementptr i64, i64* %scevgep, i32 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000197 %56 = load i64, i64* %scevgep65, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000198 %inc.2 = add nsw i32 %inc.1, 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000199 %57 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000200 %58 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %54, i64 %56, i64 %57)
201 %59 = call i8* @llvm.hexagon.circ.ldd(i8* %55, i8* %3, i32 %or, i32 -8)
202 %scevgep66 = getelementptr i64, i64* %scevgep65, i32 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000203 %60 = load i64, i64* %scevgep66, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000204 %inc.3 = add nsw i32 %inc.2, 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000205 %61 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000206 %62 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %58, i64 %60, i64 %61)
207 %63 = call i8* @llvm.hexagon.circ.ldd(i8* %59, i8* %3, i32 %or, i32 -8)
208 %scevgep67 = getelementptr i64, i64* %scevgep66, i32 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000209 %64 = load i64, i64* %scevgep67, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000210 %inc.4 = add nsw i32 %inc.3, 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000211 %65 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000212 %66 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %62, i64 %64, i64 %65)
213 %67 = call i8* @llvm.hexagon.circ.ldd(i8* %63, i8* %3, i32 %or, i32 -8)
214 %scevgep68 = getelementptr i64, i64* %scevgep67, i32 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000215 %68 = load i64, i64* %scevgep68, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000216 %inc.5 = add nsw i32 %inc.4, 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000217 %69 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000218 %70 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %66, i64 %68, i64 %69)
219 %71 = call i8* @llvm.hexagon.circ.ldd(i8* %67, i8* %3, i32 %or, i32 -8)
220 %scevgep69 = getelementptr i64, i64* %scevgep68, i32 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000221 %72 = load i64, i64* %scevgep69, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000222 %inc.6 = add nsw i32 %inc.5, 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000223 %73 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000224 %74 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %70, i64 %72, i64 %73)
225 %75 = call i8* @llvm.hexagon.circ.ldd(i8* %71, i8* %3, i32 %or, i32 -8)
226 %scevgep70 = getelementptr i64, i64* %scevgep69, i32 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000227 %76 = load i64, i64* %scevgep70, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000228 %inc.7 = add nsw i32 %inc.6, 1
Krzysztof Parzyszek18484de2018-03-06 19:15:58 +0000229 %77 = load i64, i64* %var4, align 8
Krzysztof Parzyszek47ab1f22015-03-18 16:23:44 +0000230 %78 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %74, i64 %76, i64 %77)
231 %cmp.7 = icmp slt i32 %inc.7, %sub
232 %scevgep71 = getelementptr i64, i64* %scevgep70, i32 1
233 br i1 %cmp.7, label %for.body, label %for.end.loopexit.unr-lcssa
234
235for.end.loopexit.unr-lcssa: ; preds = %for.body
236 %.lcssa12.ph = phi i64 [ %78, %for.body ]
237 br label %for.end.loopexit
238
239for.end.loopexit: ; preds = %for.end.loopexit.unr-lcssa, %for.body.lr.ph.split
240 %.lcssa12 = phi i64 [ %44, %for.body.lr.ph.split ], [ %.lcssa12.ph, %for.end.loopexit.unr-lcssa ]
241 br label %for.end
242
243for.end: ; preds = %for.end.loopexit, %entry
244 %.lcssa = phi i64 [ %6, %entry ], [ %.lcssa12, %for.end.loopexit ]
245 %79 = call i32 @llvm.hexagon.S2.vrndpackwhs(i64 %.lcssa)
246 ret i32 %79
247}
248
249declare i64 @llvm.hexagon.M2.vdmacs.s1(i64, i64, i64) nounwind readnone
250
251declare i32 @llvm.hexagon.S2.vrndpackwhs(i64) nounwind readnone