blob: d77d20d85adeb5c4de277bee93b81c752a938659 [file] [log] [blame]
Nate Begemanb64af912004-08-10 20:42:36 +00001TODO:
Nate Begemanef9531e2005-04-11 20:48:57 +00002* gpr0 allocation
Nate Begeman4a0de072004-10-26 04:10:53 +00003* implement do-loop -> bdnz transform
Nate Begemanca068e82004-08-14 22:16:36 +00004* implement powerpc-64 for darwin
Nate Begemand332fd52004-08-29 22:02:43 +00005* use stfiwx in float->int
Nate Begeman50fb3c42005-12-24 01:00:15 +00006
7* Fold add and sub with constant into non-extern, non-weak addresses so this:
Nate Begeman4ad870d2005-07-26 18:59:06 +00008 lis r2, ha16(l2__ZTV4Cell)
9 la r2, lo16(l2__ZTV4Cell)(r2)
10 addi r2, r2, 8
Nate Begeman50fb3c42005-12-24 01:00:15 +000011becomes:
12 lis r2, ha16(l2__ZTV4Cell+8)
13 la r2, lo16(l2__ZTV4Cell+8)(r2)
14
Chris Lattnerb65975a2005-07-26 19:07:51 +000015
Nate Begeman5a014812005-08-14 01:17:16 +000016* Teach LLVM how to codegen this:
17unsigned short foo(float a) { return a; }
18as:
19_foo:
20 fctiwz f0,f1
21 stfd f0,-8(r1)
22 lhz r3,-2(r1)
23 blr
24not:
25_foo:
26 fctiwz f0, f1
27 stfd f0, -8(r1)
28 lwz r2, -4(r1)
29 rlwinm r3, r2, 0, 16, 31
30 blr
31
Chris Lattner6281ae42005-08-05 19:18:32 +000032* Support 'update' load/store instructions. These are cracked on the G5, but
33 are still a codesize win.
34
Misha Brukman4ce5ce22004-07-27 18:43:04 +000035* should hint to the branch select pass that it doesn't need to print the
36 second unconditional branch, so we don't end up with things like:
Misha Brukman4ce5ce22004-07-27 18:43:04 +000037 b .LBBl42__2E_expand_function_8_674 ; loopentry.24
38 b .LBBl42__2E_expand_function_8_42 ; NewDefault
39 b .LBBl42__2E_expand_function_8_42 ; NewDefault
Chris Lattner424dcbd2005-08-23 06:27:59 +000040
Chris Lattnera3c44542005-08-24 18:15:24 +000041===-------------------------------------------------------------------------===
42
Chris Lattner424dcbd2005-08-23 06:27:59 +000043* Codegen this:
44
45 void test2(int X) {
46 if (X == 0x12345678) bar();
47 }
48
49 as:
50
51 xoris r0,r3,0x1234
52 cmpwi cr0,r0,0x5678
53 beq cr0,L6
54
55 not:
56
57 lis r2, 4660
58 ori r2, r2, 22136
59 cmpw cr0, r3, r2
60 bne .LBB_test2_2
61
Chris Lattnera3c44542005-08-24 18:15:24 +000062===-------------------------------------------------------------------------===
63
64Lump the constant pool for each function into ONE pic object, and reference
65pieces of it as offsets from the start. For functions like this (contrived
66to have lots of constants obviously):
67
68double X(double Y) { return (Y*1.23 + 4.512)*2.34 + 14.38; }
69
70We generate:
71
72_X:
73 lis r2, ha16(.CPI_X_0)
74 lfd f0, lo16(.CPI_X_0)(r2)
75 lis r2, ha16(.CPI_X_1)
76 lfd f2, lo16(.CPI_X_1)(r2)
77 fmadd f0, f1, f0, f2
78 lis r2, ha16(.CPI_X_2)
79 lfd f1, lo16(.CPI_X_2)(r2)
80 lis r2, ha16(.CPI_X_3)
81 lfd f2, lo16(.CPI_X_3)(r2)
82 fmadd f1, f0, f1, f2
83 blr
84
85It would be better to materialize .CPI_X into a register, then use immediates
86off of the register to avoid the lis's. This is even more important in PIC
87mode.
88
89===-------------------------------------------------------------------------===
Nate Begeman92cce902005-09-06 15:30:48 +000090
91Implement Newton-Rhapson method for improving estimate instructions to the
92correct accuracy, and implementing divide as multiply by reciprocal when it has
93more than one use. Itanium will want this too.
Nate Begeman21e463b2005-10-16 05:39:50 +000094
95===-------------------------------------------------------------------------===
96
Nate Begeman5cd61ce2005-10-25 23:50:02 +000097#define ARRAY_LENGTH 16
98
99union bitfield {
100 struct {
101#ifndef __ppc__
102 unsigned int field0 : 6;
103 unsigned int field1 : 6;
104 unsigned int field2 : 6;
105 unsigned int field3 : 6;
106 unsigned int field4 : 3;
107 unsigned int field5 : 4;
108 unsigned int field6 : 1;
109#else
110 unsigned int field6 : 1;
111 unsigned int field5 : 4;
112 unsigned int field4 : 3;
113 unsigned int field3 : 6;
114 unsigned int field2 : 6;
115 unsigned int field1 : 6;
116 unsigned int field0 : 6;
117#endif
118 } bitfields, bits;
119 unsigned int u32All;
120 signed int i32All;
121 float f32All;
122};
123
124
125typedef struct program_t {
126 union bitfield array[ARRAY_LENGTH];
127 int size;
128 int loaded;
129} program;
130
131
132void AdjustBitfields(program* prog, unsigned int fmt1)
133{
134 unsigned int shift = 0;
135 unsigned int texCount = 0;
136 unsigned int i;
137
138 for (i = 0; i < 8; i++)
139 {
140 prog->array[i].bitfields.field0 = texCount;
141 prog->array[i].bitfields.field1 = texCount + 1;
142 prog->array[i].bitfields.field2 = texCount + 2;
143 prog->array[i].bitfields.field3 = texCount + 3;
144
145 texCount += (fmt1 >> shift) & 0x7;
146 shift += 3;
147 }
148}
149
150In the loop above, the bitfield adds get generated as
151(add (shl bitfield, C1), (shl C2, C1)) where C2 is 1, 2 or 3.
152
153Since the input to the (or and, and) is an (add) rather than a (shl), the shift
154doesn't get folded into the rlwimi instruction. We should ideally see through
155things like this, rather than forcing llvm to generate the equivalent
156
157(shl (add bitfield, C2), C1) with some kind of mask.
Chris Lattner01959102005-10-28 00:20:45 +0000158
159===-------------------------------------------------------------------------===
160
Chris Lattnerae4664a2005-11-05 08:57:56 +0000161Compile this:
162
163int %f1(int %a, int %b) {
164 %tmp.1 = and int %a, 15 ; <int> [#uses=1]
165 %tmp.3 = and int %b, 240 ; <int> [#uses=1]
166 %tmp.4 = or int %tmp.3, %tmp.1 ; <int> [#uses=1]
167 ret int %tmp.4
168}
169
170without a copy. We make this currently:
171
172_f1:
173 rlwinm r2, r4, 0, 24, 27
174 rlwimi r2, r3, 0, 28, 31
175 or r3, r2, r2
176 blr
177
178The two-addr pass or RA needs to learn when it is profitable to commute an
179instruction to avoid a copy AFTER the 2-addr instruction. The 2-addr pass
180currently only commutes to avoid inserting a copy BEFORE the two addr instr.
181
Chris Lattner62c08dd2005-12-08 07:13:28 +0000182===-------------------------------------------------------------------------===
183
Nate Begemaneb20ed62006-01-28 01:22:10 +0000184176.gcc contains a bunch of code like this (this occurs dozens of times):
185
186int %test(uint %mode.0.i.0) {
187 %tmp.79 = cast uint %mode.0.i.0 to sbyte ; <sbyte> [#uses=1]
188 %tmp.80 = cast sbyte %tmp.79 to int ; <int> [#uses=1]
189 %tmp.81 = shl int %tmp.80, ubyte 16 ; <int> [#uses=1]
190 %tmp.82 = and int %tmp.81, 16711680
191 ret int %tmp.82
192}
193
194which we compile to:
195
196_test:
197 extsb r2, r3
198 rlwinm r3, r2, 16, 8, 15
199 blr
200
201The extsb is obviously dead. This can be handled by a future thing like
202MaskedValueIsZero that checks to see if bits are ever demanded (in this case,
203the sign bits are never used, so we can fold the sext_inreg to nothing).
204
205I'm seeing code like this:
206
207 srwi r3, r3, 16
208 extsb r3, r3
209 rlwimi r4, r3, 16, 8, 15
210
211in which the extsb is preventing the srwi from being nuked.
212
213===-------------------------------------------------------------------------===
214
215Another example that occurs is:
216
217uint %test(int %specbits.6.1) {
218 %tmp.2540 = shr int %specbits.6.1, ubyte 11 ; <int> [#uses=1]
219 %tmp.2541 = cast int %tmp.2540 to uint ; <uint> [#uses=1]
220 %tmp.2542 = shl uint %tmp.2541, ubyte 13 ; <uint> [#uses=1]
221 %tmp.2543 = and uint %tmp.2542, 8192 ; <uint> [#uses=1]
222 ret uint %tmp.2543
223}
224
225which we codegen as:
226
227l1_test:
228 srawi r2, r3, 11
229 rlwinm r3, r2, 13, 18, 18
230 blr
231
232the srawi can be nuked by turning the SAR into a logical SHR (the sext bits are
233dead), which I think can then be folded into the rlwinm.
234
235===-------------------------------------------------------------------------===
236
Chris Lattner62c08dd2005-12-08 07:13:28 +0000237Compile offsets from allocas:
238
239int *%test() {
240 %X = alloca { int, int }
241 %Y = getelementptr {int,int}* %X, int 0, uint 1
242 ret int* %Y
243}
244
245into a single add, not two:
246
247_test:
248 addi r2, r1, -8
249 addi r3, r2, 4
250 blr
251
252--> important for C++.
253
Chris Lattner39706e62005-12-22 17:19:28 +0000254===-------------------------------------------------------------------------===
255
256int test3(int a, int b) { return (a < 0) ? a : 0; }
257
258should be branch free code. LLVM is turning it into < 1 because of the RHS.
259
260===-------------------------------------------------------------------------===
261
Chris Lattner39706e62005-12-22 17:19:28 +0000262No loads or stores of the constants should be needed:
263
264struct foo { double X, Y; };
265void xxx(struct foo F);
266void bar() { struct foo R = { 1.0, 2.0 }; xxx(R); }
267
Chris Lattner1db4b4f2006-01-16 17:53:00 +0000268===-------------------------------------------------------------------------===
269
Chris Lattner98fbc2f2006-01-16 17:58:54 +0000270Darwin Stub LICM optimization:
271
272Loops like this:
273
274 for (...) bar();
275
276Have to go through an indirect stub if bar is external or linkonce. It would
277be better to compile it as:
278
279 fp = &bar;
280 for (...) fp();
281
282which only computes the address of bar once (instead of each time through the
283stub). This is Darwin specific and would have to be done in the code generator.
284Probably not a win on x86.
285
286===-------------------------------------------------------------------------===
287
288PowerPC i1/setcc stuff (depends on subreg stuff):
289
290Check out the PPC code we get for 'compare' in this testcase:
291http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19672
292
293oof. on top of not doing the logical crnand instead of (mfcr, mfcr,
294invert, invert, or), we then have to compare it against zero instead of
295using the value already in a CR!
296
297that should be something like
298 cmpw cr7, r8, r5
299 cmpw cr0, r7, r3
300 crnand cr0, cr0, cr7
301 bne cr0, LBB_compare_4
302
303instead of
304 cmpw cr7, r8, r5
305 cmpw cr0, r7, r3
306 mfcr r7, 1
307 mcrf cr7, cr0
308 mfcr r8, 1
309 rlwinm r7, r7, 30, 31, 31
310 rlwinm r8, r8, 30, 31, 31
311 xori r7, r7, 1
312 xori r8, r8, 1
313 addi r2, r2, 1
314 or r7, r8, r7
315 cmpwi cr0, r7, 0
316 bne cr0, LBB_compare_4 ; loopexit
317
318===-------------------------------------------------------------------------===
319
320Simple IPO for argument passing, change:
321 void foo(int X, double Y, int Z) -> void foo(int X, int Z, double Y)
322
323the Darwin ABI specifies that any integer arguments in the first 32 bytes worth
324of arguments get assigned to r3 through r10. That is, if you have a function
325foo(int, double, int) you get r3, f1, r6, since the 64 bit double ate up the
326argument bytes for r4 and r5. The trick then would be to shuffle the argument
327order for functions we can internalize so that the maximum number of
328integers/pointers get passed in regs before you see any of the fp arguments.
329
330Instead of implementing this, it would actually probably be easier to just
331implement a PPC fastcc, where we could do whatever we wanted to the CC,
332including having this work sanely.
333
334===-------------------------------------------------------------------------===
335
336Fix Darwin FP-In-Integer Registers ABI
337
338Darwin passes doubles in structures in integer registers, which is very very
339bad. Add something like a BIT_CONVERT to LLVM, then do an i-p transformation
340that percolates these things out of functions.
341
342Check out how horrible this is:
343http://gcc.gnu.org/ml/gcc/2005-10/msg01036.html
344
345This is an extension of "interprocedural CC unmunging" that can't be done with
346just fastcc.
347
348===-------------------------------------------------------------------------===
349
350Code Gen IPO optimization:
351
352Squish small scalar globals together into a single global struct, allowing the
353address of the struct to be CSE'd, avoiding PIC accesses (also reduces the size
354of the GOT on targets with one).
355
Chris Lattner3cda14f2006-01-19 02:09:38 +0000356===-------------------------------------------------------------------------===
357
358Generate lwbrx and other byteswapping load/store instructions when reasonable.
359
Chris Lattner96909792006-01-28 05:40:47 +0000360===-------------------------------------------------------------------------===
361
362Implement TargetConstantVec, and set up PPC to custom lower ConstantVec into
363TargetConstantVec's if it's one of the many forms that are algorithmically
364computable using the spiffy altivec instructions.
365
Chris Lattner56b69642006-01-31 02:55:28 +0000366===-------------------------------------------------------------------------===
367
368Compile this:
369
370double %test(double %X) {
371 %Y = cast double %X to long
372 %Z = cast long %Y to double
373 ret double %Z
374}
375
376to this:
377
378_test:
379 fctidz f0, f1
380 stfd f0, -8(r1)
381 lwz r2, -4(r1)
382 lwz r3, -8(r1)
383 stw r2, -12(r1)
384 stw r3, -16(r1)
385 lfd f0, -16(r1)
386 fcfid f1, f0
387 blr
388
389without the lwz/stw's.
390
Chris Lattner83e64ba2006-01-31 07:16:34 +0000391===-------------------------------------------------------------------------===
392
393Compile this:
394
395int foo(int a) {
396 int b = (a < 8);
397 if (b) {
398 return b * 3; // ignore the fact that this is always 3.
399 } else {
400 return 2;
401 }
402}
403
404into something not this:
405
406_foo:
4071) cmpwi cr7, r3, 8
408 mfcr r2, 1
409 rlwinm r2, r2, 29, 31, 31
4101) cmpwi cr0, r3, 7
411 bgt cr0, LBB1_2 ; UnifiedReturnBlock
412LBB1_1: ; then
413 rlwinm r2, r2, 0, 31, 31
414 mulli r3, r2, 3
415 blr
416LBB1_2: ; UnifiedReturnBlock
417 li r3, 2
418 blr
419
420In particular, the two compares (marked 1) could be shared by reversing one.
421This could be done in the dag combiner, by swapping a BR_CC when a SETCC of the
422same operands (but backwards) exists. In this case, this wouldn't save us
423anything though, because the compares still wouldn't be shared.
Chris Lattner0ddc1802006-02-01 00:28:12 +0000424
Chris Lattner5a7efc92006-02-01 17:54:23 +0000425===-------------------------------------------------------------------------===
426
427The legalizer should lower this:
428
429bool %test(ulong %x) {
430 %tmp = setlt ulong %x, 4294967296
431 ret bool %tmp
432}
433
434into "if x.high == 0", not:
435
436_test:
437 addi r2, r3, -1
438 cntlzw r2, r2
439 cntlzw r3, r3
440 srwi r2, r2, 5
Nate Begeman93c740b2006-02-02 07:27:56 +0000441 srwi r4, r3, 5
442 li r3, 0
Chris Lattner5a7efc92006-02-01 17:54:23 +0000443 cmpwi cr0, r2, 0
444 bne cr0, LBB1_2 ;
445LBB1_1:
Nate Begeman93c740b2006-02-02 07:27:56 +0000446 or r3, r4, r4
Chris Lattner5a7efc92006-02-01 17:54:23 +0000447LBB1_2:
Chris Lattner5a7efc92006-02-01 17:54:23 +0000448 blr
449
450noticed in 2005-05-11-Popcount-ffs-fls.c.