blob: e49bda0fcd167c530578dfa8dd1a496ab5c97f7c [file] [log] [blame]
Chris Lattnerb86bd2c2006-03-27 07:04:16 +00001//===- README.txt - Notes for improving PowerPC-specific code gen ---------===//
2
Nate Begemanb64af912004-08-10 20:42:36 +00003TODO:
Nate Begemanef9531e2005-04-11 20:48:57 +00004* gpr0 allocation
Nate Begeman4a0de072004-10-26 04:10:53 +00005* implement do-loop -> bdnz transform
Nate Begemana6ed0aa2008-02-11 04:16:09 +00006* lmw/stmw pass a la arm load store optimizer for prolog/epilog
Nate Begeman50fb3c42005-12-24 01:00:15 +00007
Nate Begemana63fee82006-02-03 05:17:06 +00008===-------------------------------------------------------------------------===
Nate Begeman50fb3c42005-12-24 01:00:15 +00009
Chris Lattnerddac7062010-01-07 17:53:10 +000010On PPC64, this:
11
12long f2 (long x) { return 0xfffffff000000000UL; }
13long f3 (long x) { return 0x1ffffffffUL; }
14
15could compile into:
16
17_f2:
18 li r3,-1
19 rldicr r3,r3,0,27
20 blr
21_f3:
22 li r3,-1
23 rldicl r3,r3,0,31
24 blr
25
26we produce:
27
28_f2:
29 lis r2, 4095
30 ori r2, r2, 65535
31 sldi r3, r2, 36
32 blr
33_f3:
34 li r2, 1
35 sldi r2, r2, 32
36 oris r2, r2, 65535
37 ori r3, r2, 65535
38 blr
39
40
41===-------------------------------------------------------------------------===
42
Nate Begemana63fee82006-02-03 05:17:06 +000043Support 'update' load/store instructions. These are cracked on the G5, but are
44still a codesize win.
45
Chris Lattner26ddb502006-11-10 01:33:53 +000046With preinc enabled, this:
47
48long *%test4(long *%X, long *%dest) {
49 %Y = getelementptr long* %X, int 4
50 %A = load long* %Y
51 store long %A, long* %dest
52 ret long* %Y
53}
54
55compiles to:
56
57_test4:
58 mr r2, r3
59 lwzu r5, 32(r2)
60 lwz r3, 36(r3)
61 stw r5, 0(r4)
62 stw r3, 4(r4)
63 mr r3, r2
64 blr
65
66with -sched=list-burr, I get:
67
68_test4:
69 lwz r2, 36(r3)
70 lwzu r5, 32(r3)
71 stw r2, 4(r4)
72 stw r5, 0(r4)
73 blr
74
Nate Begemana63fee82006-02-03 05:17:06 +000075===-------------------------------------------------------------------------===
76
Chris Lattner6e112952006-11-07 18:30:21 +000077We compile the hottest inner loop of viterbi to:
78
79 li r6, 0
80 b LBB1_84 ;bb432.i
81LBB1_83: ;bb420.i
82 lbzx r8, r5, r7
83 addi r6, r7, 1
84 stbx r8, r4, r7
85LBB1_84: ;bb432.i
86 mr r7, r6
87 cmplwi cr0, r7, 143
88 bne cr0, LBB1_83 ;bb420.i
89
90The CBE manages to produce:
91
92 li r0, 143
93 mtctr r0
94loop:
95 lbzx r2, r2, r11
96 stbx r0, r2, r9
97 addi r2, r2, 1
98 bdz later
99 b loop
100
101This could be much better (bdnz instead of bdz) but it still beats us. If we
102produced this with bdnz, the loop would be a single dispatch group.
103
104===-------------------------------------------------------------------------===
105
Chris Lattner6a250ec2006-10-13 20:20:58 +0000106Compile:
107
108void foo(int *P) {
109 if (P) *P = 0;
110}
111
112into:
113
114_foo:
115 cmpwi cr0,r3,0
116 beqlr cr0
117 li r0,0
118 stw r0,0(r3)
119 blr
120
121This is effectively a simple form of predication.
122
123===-------------------------------------------------------------------------===
124
Chris Lattnera3c44542005-08-24 18:15:24 +0000125Lump the constant pool for each function into ONE pic object, and reference
126pieces of it as offsets from the start. For functions like this (contrived
127to have lots of constants obviously):
128
129double X(double Y) { return (Y*1.23 + 4.512)*2.34 + 14.38; }
130
131We generate:
132
133_X:
134 lis r2, ha16(.CPI_X_0)
135 lfd f0, lo16(.CPI_X_0)(r2)
136 lis r2, ha16(.CPI_X_1)
137 lfd f2, lo16(.CPI_X_1)(r2)
138 fmadd f0, f1, f0, f2
139 lis r2, ha16(.CPI_X_2)
140 lfd f1, lo16(.CPI_X_2)(r2)
141 lis r2, ha16(.CPI_X_3)
142 lfd f2, lo16(.CPI_X_3)(r2)
143 fmadd f1, f0, f1, f2
144 blr
145
146It would be better to materialize .CPI_X into a register, then use immediates
147off of the register to avoid the lis's. This is even more important in PIC
148mode.
149
Chris Lattner39b248b2006-02-02 23:50:22 +0000150Note that this (and the static variable version) is discussed here for GCC:
151http://gcc.gnu.org/ml/gcc-patches/2006-02/msg00133.html
152
Chris Lattneraabd0352007-08-23 15:16:03 +0000153Here's another example (the sgn function):
154double testf(double a) {
155 return a == 0.0 ? 0.0 : (a > 0.0 ? 1.0 : -1.0);
156}
157
158it produces a BB like this:
159LBB1_1: ; cond_true
160 lis r2, ha16(LCPI1_0)
161 lfs f0, lo16(LCPI1_0)(r2)
162 lis r2, ha16(LCPI1_1)
163 lis r3, ha16(LCPI1_2)
164 lfs f2, lo16(LCPI1_2)(r3)
165 lfs f3, lo16(LCPI1_1)(r2)
166 fsub f0, f0, f1
167 fsel f1, f0, f2, f3
168 blr
169
Chris Lattnera3c44542005-08-24 18:15:24 +0000170===-------------------------------------------------------------------------===
Nate Begeman92cce902005-09-06 15:30:48 +0000171
Chris Lattner33c1dab2006-02-03 06:22:11 +0000172PIC Code Gen IPO optimization:
173
174Squish small scalar globals together into a single global struct, allowing the
175address of the struct to be CSE'd, avoiding PIC accesses (also reduces the size
176of the GOT on targets with one).
177
178Note that this is discussed here for GCC:
179http://gcc.gnu.org/ml/gcc-patches/2006-02/msg00133.html
180
181===-------------------------------------------------------------------------===
182
Nate Begeman92cce902005-09-06 15:30:48 +0000183Implement Newton-Rhapson method for improving estimate instructions to the
184correct accuracy, and implementing divide as multiply by reciprocal when it has
Dan Gohmand2cb3d22009-07-24 00:30:09 +0000185more than one use. Itanium would want this too.
Nate Begeman21e463b2005-10-16 05:39:50 +0000186
187===-------------------------------------------------------------------------===
188
Chris Lattner62c08dd2005-12-08 07:13:28 +0000189Compile offsets from allocas:
190
191int *%test() {
192 %X = alloca { int, int }
193 %Y = getelementptr {int,int}* %X, int 0, uint 1
194 ret int* %Y
195}
196
197into a single add, not two:
198
199_test:
200 addi r2, r1, -8
201 addi r3, r2, 4
202 blr
203
204--> important for C++.
205
Chris Lattner39706e62005-12-22 17:19:28 +0000206===-------------------------------------------------------------------------===
207
Chris Lattner39706e62005-12-22 17:19:28 +0000208No loads or stores of the constants should be needed:
209
210struct foo { double X, Y; };
211void xxx(struct foo F);
212void bar() { struct foo R = { 1.0, 2.0 }; xxx(R); }
213
Chris Lattner1db4b4f2006-01-16 17:53:00 +0000214===-------------------------------------------------------------------------===
215
Dale Johannesen7074fea2009-07-01 23:36:02 +0000216Darwin Stub removal:
217
218We still generate calls to foo$stub, and stubs, on Darwin. This is not
Chris Lattnerc4b0b402009-07-02 01:24:34 +0000219necessary when building with the Leopard (10.5) or later linker, as stubs are
220generated by ld when necessary. Parameterizing this based on the deployment
221target (-mmacosx-version-min) is probably enough. x86-32 does this right, see
222its logic.
Dale Johannesen7074fea2009-07-01 23:36:02 +0000223
224===-------------------------------------------------------------------------===
225
Chris Lattner98fbc2f2006-01-16 17:58:54 +0000226Darwin Stub LICM optimization:
227
228Loops like this:
229
230 for (...) bar();
231
232Have to go through an indirect stub if bar is external or linkonce. It would
233be better to compile it as:
234
235 fp = &bar;
236 for (...) fp();
237
238which only computes the address of bar once (instead of each time through the
239stub). This is Darwin specific and would have to be done in the code generator.
240Probably not a win on x86.
241
242===-------------------------------------------------------------------------===
243
Chris Lattner98fbc2f2006-01-16 17:58:54 +0000244Simple IPO for argument passing, change:
245 void foo(int X, double Y, int Z) -> void foo(int X, int Z, double Y)
246
247the Darwin ABI specifies that any integer arguments in the first 32 bytes worth
248of arguments get assigned to r3 through r10. That is, if you have a function
249foo(int, double, int) you get r3, f1, r6, since the 64 bit double ate up the
250argument bytes for r4 and r5. The trick then would be to shuffle the argument
251order for functions we can internalize so that the maximum number of
252integers/pointers get passed in regs before you see any of the fp arguments.
253
254Instead of implementing this, it would actually probably be easier to just
255implement a PPC fastcc, where we could do whatever we wanted to the CC,
256including having this work sanely.
257
258===-------------------------------------------------------------------------===
259
260Fix Darwin FP-In-Integer Registers ABI
261
262Darwin passes doubles in structures in integer registers, which is very very
263bad. Add something like a BIT_CONVERT to LLVM, then do an i-p transformation
264that percolates these things out of functions.
265
266Check out how horrible this is:
267http://gcc.gnu.org/ml/gcc/2005-10/msg01036.html
268
269This is an extension of "interprocedural CC unmunging" that can't be done with
270just fastcc.
271
272===-------------------------------------------------------------------------===
273
Chris Lattner56b69642006-01-31 02:55:28 +0000274Compile this:
275
Chris Lattner83e64ba2006-01-31 07:16:34 +0000276int foo(int a) {
277 int b = (a < 8);
278 if (b) {
279 return b * 3; // ignore the fact that this is always 3.
280 } else {
281 return 2;
282 }
283}
284
285into something not this:
286
287_foo:
2881) cmpwi cr7, r3, 8
289 mfcr r2, 1
290 rlwinm r2, r2, 29, 31, 31
2911) cmpwi cr0, r3, 7
292 bgt cr0, LBB1_2 ; UnifiedReturnBlock
293LBB1_1: ; then
294 rlwinm r2, r2, 0, 31, 31
295 mulli r3, r2, 3
296 blr
297LBB1_2: ; UnifiedReturnBlock
298 li r3, 2
299 blr
300
301In particular, the two compares (marked 1) could be shared by reversing one.
302This could be done in the dag combiner, by swapping a BR_CC when a SETCC of the
303same operands (but backwards) exists. In this case, this wouldn't save us
304anything though, because the compares still wouldn't be shared.
Chris Lattner0ddc1802006-02-01 00:28:12 +0000305
Chris Lattner5a7efc92006-02-01 17:54:23 +0000306===-------------------------------------------------------------------------===
307
Chris Lattner275b8842006-02-02 07:37:11 +0000308We should custom expand setcc instead of pretending that we have it. That
309would allow us to expose the access of the crbit after the mfcr, allowing
310that access to be trivially folded into other ops. A simple example:
311
312int foo(int a, int b) { return (a < b) << 4; }
313
314compiles into:
315
316_foo:
317 cmpw cr7, r3, r4
318 mfcr r2, 1
319 rlwinm r2, r2, 29, 31, 31
320 slwi r3, r2, 4
321 blr
322
Chris Lattnerd463f7f2006-02-03 01:49:49 +0000323===-------------------------------------------------------------------------===
324
Nate Begemana63fee82006-02-03 05:17:06 +0000325Fold add and sub with constant into non-extern, non-weak addresses so this:
326
327static int a;
328void bar(int b) { a = b; }
329void foo(unsigned char *c) {
330 *c = a;
331}
332
333So that
334
335_foo:
336 lis r2, ha16(_a)
337 la r2, lo16(_a)(r2)
338 lbz r2, 3(r2)
339 stb r2, 0(r3)
340 blr
341
342Becomes
343
344_foo:
345 lis r2, ha16(_a+3)
346 lbz r2, lo16(_a+3)(r2)
347 stb r2, 0(r3)
348 blr
Chris Lattner21384532006-02-05 05:27:35 +0000349
350===-------------------------------------------------------------------------===
351
352We generate really bad code for this:
353
354int f(signed char *a, _Bool b, _Bool c) {
355 signed char t = 0;
356 if (b) t = *a;
357 if (c) *a = t;
358}
359
Chris Lattner00d18f02006-03-01 06:36:20 +0000360===-------------------------------------------------------------------------===
361
362This:
363int test(unsigned *P) { return *P >> 24; }
364
365Should compile to:
366
367_test:
368 lbz r3,0(r3)
369 blr
370
371not:
372
373_test:
374 lwz r2, 0(r3)
375 srwi r3, r2, 24
376 blr
377
Chris Lattner5a63c472006-03-07 04:42:59 +0000378===-------------------------------------------------------------------------===
379
380On the G5, logical CR operations are more expensive in their three
381address form: ops that read/write the same register are half as expensive as
382those that read from two registers that are different from their destination.
383
384We should model this with two separate instructions. The isel should generate
385the "two address" form of the instructions. When the register allocator
386detects that it needs to insert a copy due to the two-addresness of the CR
387logical op, it will invoke PPCInstrInfo::convertToThreeAddress. At this point
388we can convert to the "three address" instruction, to save code space.
389
390This only matters when we start generating cr logical ops.
391
Chris Lattner49f398b2006-03-08 00:25:47 +0000392===-------------------------------------------------------------------------===
393
394We should compile these two functions to the same thing:
395
396#include <stdlib.h>
397void f(int a, int b, int *P) {
398 *P = (a-b)>=0?(a-b):(b-a);
399}
400void g(int a, int b, int *P) {
401 *P = abs(a-b);
402}
403
404Further, they should compile to something better than:
405
406_g:
407 subf r2, r4, r3
408 subfic r3, r2, 0
409 cmpwi cr0, r2, -1
410 bgt cr0, LBB2_2 ; entry
411LBB2_1: ; entry
412 mr r2, r3
413LBB2_2: ; entry
414 stw r2, 0(r5)
415 blr
416
417GCC produces:
418
419_g:
420 subf r4,r4,r3
421 srawi r2,r4,31
422 xor r0,r2,r4
423 subf r0,r2,r0
424 stw r0,0(r5)
425 blr
426
427... which is much nicer.
428
429This theoretically may help improve twolf slightly (used in dimbox.c:142?).
430
431===-------------------------------------------------------------------------===
432
Chris Lattner3f6bfda2010-01-24 02:27:03 +0000433PR5945: This:
434define i32 @clamp0g(i32 %a) {
435entry:
436 %cmp = icmp slt i32 %a, 0
437 %sel = select i1 %cmp, i32 0, i32 %a
438 ret i32 %sel
439}
440
441Is compile to this with the PowerPC (32-bit) backend:
442
443_clamp0g:
444 cmpwi cr0, r3, 0
445 li r2, 0
446 blt cr0, LBB1_2
447; BB#1: ; %entry
448 mr r2, r3
449LBB1_2: ; %entry
450 mr r3, r2
451 blr
452
453This could be reduced to the much simpler:
454
455_clamp0g:
456 srawi r2, r3, 31
457 andc r3, r3, r2
458 blr
459
460===-------------------------------------------------------------------------===
461
Nate Begeman2df99282006-03-16 18:50:44 +0000462int foo(int N, int ***W, int **TK, int X) {
463 int t, i;
464
465 for (t = 0; t < N; ++t)
466 for (i = 0; i < 4; ++i)
467 W[t / X][i][t % X] = TK[i][t];
468
469 return 5;
470}
471
Chris Lattnered511692006-03-16 22:25:55 +0000472We generate relatively atrocious code for this loop compared to gcc.
473
Chris Lattneref040dd2006-03-21 00:47:09 +0000474We could also strength reduce the rem and the div:
475http://www.lcs.mit.edu/pubs/pdf/MIT-LCS-TM-600.pdf
476
Chris Lattner28b1a0b2006-03-19 05:33:30 +0000477===-------------------------------------------------------------------------===
Chris Lattnered511692006-03-16 22:25:55 +0000478
Nate Begemanc0a8b6d2006-03-21 18:58:20 +0000479float foo(float X) { return (int)(X); }
480
Chris Lattner9d86a9d2006-03-22 05:33:23 +0000481Currently produces:
Nate Begemanc0a8b6d2006-03-21 18:58:20 +0000482
483_foo:
Nate Begemanc0a8b6d2006-03-21 18:58:20 +0000484 fctiwz f0, f1
485 stfd f0, -8(r1)
Chris Lattner9d86a9d2006-03-22 05:33:23 +0000486 lwz r2, -4(r1)
487 extsw r2, r2
488 std r2, -16(r1)
489 lfd f0, -16(r1)
490 fcfid f0, f0
Nate Begemanc0a8b6d2006-03-21 18:58:20 +0000491 frsp f1, f0
492 blr
493
Chris Lattner9d86a9d2006-03-22 05:33:23 +0000494We could use a target dag combine to turn the lwz/extsw into an lwa when the
495lwz has a single use. Since LWA is cracked anyway, this would be a codesize
496win only.
Nate Begemanc0a8b6d2006-03-21 18:58:20 +0000497
Chris Lattner716aefc2006-03-23 21:28:44 +0000498===-------------------------------------------------------------------------===
499
Chris Lattner057f09b2006-03-24 20:04:27 +0000500We generate ugly code for this:
501
502void func(unsigned int *ret, float dx, float dy, float dz, float dw) {
503 unsigned code = 0;
504 if(dx < -dw) code |= 1;
505 if(dx > dw) code |= 2;
506 if(dy < -dw) code |= 4;
507 if(dy > dw) code |= 8;
508 if(dz < -dw) code |= 16;
509 if(dz > dw) code |= 32;
510 *ret = code;
511}
512
Chris Lattner420736d2006-03-25 06:47:10 +0000513===-------------------------------------------------------------------------===
514
Chris Lattnered937902006-04-13 16:48:00 +0000515Complete the signed i32 to FP conversion code using 64-bit registers
516transformation, good for PI. See PPCISelLowering.cpp, this comment:
Chris Lattner220d2b82006-04-02 07:20:00 +0000517
Chris Lattnered937902006-04-13 16:48:00 +0000518 // FIXME: disable this lowered code. This generates 64-bit register values,
519 // and we don't model the fact that the top part is clobbered by calls. We
520 // need to flag these together so that the value isn't live across a call.
521 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
Chris Lattner220d2b82006-04-02 07:20:00 +0000522
Chris Lattner9d62fa42006-05-17 19:02:25 +0000523Also, if the registers are spilled to the stack, we have to ensure that all
52464-bits of them are save/restored, otherwise we will miscompile the code. It
525sounds like we need to get the 64-bit register classes going.
526
Chris Lattner55c63252006-05-05 05:36:15 +0000527===-------------------------------------------------------------------------===
528
Nate Begeman908049b2007-01-29 21:21:22 +0000529%struct.B = type { i8, [3 x i8] }
Nate Begeman75146202006-05-08 20:54:02 +0000530
Nate Begeman908049b2007-01-29 21:21:22 +0000531define void @bar(%struct.B* %b) {
Nate Begeman75146202006-05-08 20:54:02 +0000532entry:
Nate Begeman908049b2007-01-29 21:21:22 +0000533 %tmp = bitcast %struct.B* %b to i32* ; <uint*> [#uses=1]
534 %tmp = load i32* %tmp ; <uint> [#uses=1]
535 %tmp3 = bitcast %struct.B* %b to i32* ; <uint*> [#uses=1]
536 %tmp4 = load i32* %tmp3 ; <uint> [#uses=1]
537 %tmp8 = bitcast %struct.B* %b to i32* ; <uint*> [#uses=2]
538 %tmp9 = load i32* %tmp8 ; <uint> [#uses=1]
539 %tmp4.mask17 = shl i32 %tmp4, i8 1 ; <uint> [#uses=1]
540 %tmp1415 = and i32 %tmp4.mask17, 2147483648 ; <uint> [#uses=1]
541 %tmp.masked = and i32 %tmp, 2147483648 ; <uint> [#uses=1]
542 %tmp11 = or i32 %tmp1415, %tmp.masked ; <uint> [#uses=1]
543 %tmp12 = and i32 %tmp9, 2147483647 ; <uint> [#uses=1]
544 %tmp13 = or i32 %tmp12, %tmp11 ; <uint> [#uses=1]
545 store i32 %tmp13, i32* %tmp8
Chris Lattner55c63252006-05-05 05:36:15 +0000546 ret void
547}
548
549We emit:
550
551_foo:
552 lwz r2, 0(r3)
Nate Begeman75146202006-05-08 20:54:02 +0000553 slwi r4, r2, 1
554 or r4, r4, r2
555 rlwimi r2, r4, 0, 0, 0
Nate Begeman4667f2c2006-05-08 17:38:32 +0000556 stw r2, 0(r3)
Chris Lattner55c63252006-05-05 05:36:15 +0000557 blr
558
Nate Begeman75146202006-05-08 20:54:02 +0000559We could collapse a bunch of those ORs and ANDs and generate the following
560equivalent code:
Chris Lattner55c63252006-05-05 05:36:15 +0000561
Nate Begeman4667f2c2006-05-08 17:38:32 +0000562_foo:
563 lwz r2, 0(r3)
Nate Begemand8624ed2006-05-08 19:09:24 +0000564 rlwinm r4, r2, 1, 0, 0
Nate Begeman4667f2c2006-05-08 17:38:32 +0000565 or r2, r2, r4
566 stw r2, 0(r3)
567 blr
Chris Lattner1eeedae2006-07-14 04:07:29 +0000568
569===-------------------------------------------------------------------------===
570
Chris Lattnerf0613e12006-09-14 20:56:30 +0000571We compile:
572
573unsigned test6(unsigned x) {
574 return ((x & 0x00FF0000) >> 16) | ((x & 0x000000FF) << 16);
575}
576
577into:
578
579_test6:
580 lis r2, 255
581 rlwinm r3, r3, 16, 0, 31
582 ori r2, r2, 255
583 and r3, r3, r2
584 blr
585
586GCC gets it down to:
587
588_test6:
589 rlwinm r0,r3,16,8,15
590 rlwinm r3,r3,16,24,31
591 or r3,r3,r0
592 blr
593
Chris Lattnerafd7a082007-01-18 07:34:57 +0000594
595===-------------------------------------------------------------------------===
596
597Consider a function like this:
598
599float foo(float X) { return X + 1234.4123f; }
600
601The FP constant ends up in the constant pool, so we need to get the LR register.
602 This ends up producing code like this:
603
604_foo:
605.LBB_foo_0: ; entry
606 mflr r11
607*** stw r11, 8(r1)
608 bl "L00000$pb"
609"L00000$pb":
610 mflr r2
611 addis r2, r2, ha16(.CPI_foo_0-"L00000$pb")
612 lfs f0, lo16(.CPI_foo_0-"L00000$pb")(r2)
613 fadds f1, f1, f0
614*** lwz r11, 8(r1)
615 mtlr r11
616 blr
617
618This is functional, but there is no reason to spill the LR register all the way
619to the stack (the two marked instrs): spilling it to a GPR is quite enough.
620
621Implementing this will require some codegen improvements. Nate writes:
622
623"So basically what we need to support the "no stack frame save and restore" is a
624generalization of the LR optimization to "callee-save regs".
625
626Currently, we have LR marked as a callee-save reg. The register allocator sees
627that it's callee save, and spills it directly to the stack.
628
629Ideally, something like this would happen:
630
631LR would be in a separate register class from the GPRs. The class of LR would be
632marked "unspillable". When the register allocator came across an unspillable
633reg, it would ask "what is the best class to copy this into that I *can* spill"
634If it gets a class back, which it will in this case (the gprs), it grabs a free
635register of that class. If it is then later necessary to spill that reg, so be
636it.
637
638===-------------------------------------------------------------------------===
Chris Lattner95b9d6e2007-01-31 19:49:20 +0000639
640We compile this:
641int test(_Bool X) {
642 return X ? 524288 : 0;
643}
644
645to:
646_test:
647 cmplwi cr0, r3, 0
648 lis r2, 8
649 li r3, 0
650 beq cr0, LBB1_2 ;entry
651LBB1_1: ;entry
652 mr r3, r2
653LBB1_2: ;entry
654 blr
655
656instead of:
657_test:
658 addic r2,r3,-1
659 subfe r0,r2,r3
660 slwi r3,r0,19
661 blr
662
663This sort of thing occurs a lot due to globalopt.
664
665===-------------------------------------------------------------------------===
Chris Lattner8abcfe12007-02-09 17:38:01 +0000666
Chris Lattnera9cf5b32010-01-23 18:42:37 +0000667We compile:
668
669define i32 @bar(i32 %x) nounwind readnone ssp {
670entry:
671 %0 = icmp eq i32 %x, 0 ; <i1> [#uses=1]
Chris Lattnerabb992d2010-01-24 00:09:49 +0000672 %neg = sext i1 %0 to i32 ; <i32> [#uses=1]
Chris Lattnera9cf5b32010-01-23 18:42:37 +0000673 ret i32 %neg
674}
675
676to:
677
678_bar:
Chris Lattnerabb992d2010-01-24 00:09:49 +0000679 cntlzw r2, r3
680 slwi r2, r2, 26
681 srawi r3, r2, 31
Chris Lattnera9cf5b32010-01-23 18:42:37 +0000682 blr
683
Chris Lattnerabb992d2010-01-24 00:09:49 +0000684it would be better to produce:
Chris Lattnera9cf5b32010-01-23 18:42:37 +0000685
686_bar:
687 addic r3,r3,-1
688 subfe r3,r3,r3
689 blr
690
691===-------------------------------------------------------------------------===
692
Chris Lattner8abcfe12007-02-09 17:38:01 +0000693We currently compile 32-bit bswap:
694
695declare i32 @llvm.bswap.i32(i32 %A)
696define i32 @test(i32 %A) {
697 %B = call i32 @llvm.bswap.i32(i32 %A)
698 ret i32 %B
699}
700
701to:
702
703_test:
704 rlwinm r2, r3, 24, 16, 23
705 slwi r4, r3, 24
706 rlwimi r2, r3, 8, 24, 31
707 rlwimi r4, r3, 8, 8, 15
708 rlwimi r4, r2, 0, 16, 31
709 mr r3, r4
710 blr
711
712it would be more efficient to produce:
713
714_foo: mr r0,r3
715 rlwinm r3,r3,8,0xffffffff
716 rlwimi r3,r0,24,0,7
717 rlwimi r3,r0,24,16,23
718 blr
719
720===-------------------------------------------------------------------------===
Chris Lattner013e0512007-03-25 04:46:28 +0000721
722test/CodeGen/PowerPC/2007-03-24-cntlzd.ll compiles to:
723
724__ZNK4llvm5APInt17countLeadingZerosEv:
725 ld r2, 0(r3)
726 cntlzd r2, r2
727 or r2, r2, r2 <<-- silly.
728 addi r3, r2, -64
729 blr
730
731The dead or is a 'truncate' from 64- to 32-bits.
732
733===-------------------------------------------------------------------------===
Chris Lattnerfcb1e612007-03-31 07:06:25 +0000734
735We generate horrible ppc code for this:
736
737#define N 2000000
738double a[N],c[N];
739void simpleloop() {
740 int j;
741 for (j=0; j<N; j++)
742 c[j] = a[j];
743}
744
745LBB1_1: ;bb
746 lfdx f0, r3, r4
747 addi r5, r5, 1 ;; Extra IV for the exit value compare.
748 stfdx f0, r2, r4
749 addi r4, r4, 8
750
751 xoris r6, r5, 30 ;; This is due to a large immediate.
752 cmplwi cr0, r6, 33920
753 bne cr0, LBB1_1
754
Chris Lattnerbf8ae842007-09-10 21:43:18 +0000755//===---------------------------------------------------------------------===//
756
757This:
758 #include <algorithm>
759 inline std::pair<unsigned, bool> full_add(unsigned a, unsigned b)
760 { return std::make_pair(a + b, a + b < a); }
761 bool no_overflow(unsigned a, unsigned b)
762 { return !full_add(a, b).second; }
763
764Should compile to:
765
766__Z11no_overflowjj:
767 add r4,r3,r4
768 subfc r3,r3,r4
769 li r3,0
770 adde r3,r3,r3
771 blr
772
773(or better) not:
774
775__Z11no_overflowjj:
776 add r2, r4, r3
777 cmplw cr7, r2, r3
778 mfcr r2
779 rlwinm r2, r2, 29, 31, 31
780 xori r3, r2, 1
781 blr
782
783//===---------------------------------------------------------------------===//
Chris Lattnerfcb1e612007-03-31 07:06:25 +0000784
Chris Lattnerfe39edd2008-01-08 06:46:30 +0000785We compile some FP comparisons into an mfcr with two rlwinms and an or. For
786example:
787#include <math.h>
788int test(double x, double y) { return islessequal(x, y);}
789int test2(double x, double y) { return islessgreater(x, y);}
790int test3(double x, double y) { return !islessequal(x, y);}
791
792Compiles into (all three are similar, but the bits differ):
793
794_test:
795 fcmpu cr7, f1, f2
796 mfcr r2
797 rlwinm r3, r2, 29, 31, 31
798 rlwinm r2, r2, 31, 31, 31
799 or r3, r2, r3
800 blr
801
802GCC compiles this into:
803
804 _test:
805 fcmpu cr7,f1,f2
806 cror 30,28,30
807 mfcr r3
808 rlwinm r3,r3,31,1
809 blr
810
811which is more efficient and can use mfocr. See PR642 for some more context.
812
813//===---------------------------------------------------------------------===//
Chris Lattner150943c2008-03-02 19:27:34 +0000814
815void foo(float *data, float d) {
816 long i;
817 for (i = 0; i < 8000; i++)
818 data[i] = d;
819}
820void foo2(float *data, float d) {
821 long i;
822 data--;
823 for (i = 0; i < 8000; i++) {
824 data[1] = d;
825 data++;
826 }
827}
828
829These compile to:
830
831_foo:
832 li r2, 0
833LBB1_1: ; bb
834 addi r4, r2, 4
835 stfsx f1, r3, r2
836 cmplwi cr0, r4, 32000
837 mr r2, r4
838 bne cr0, LBB1_1 ; bb
839 blr
840_foo2:
841 li r2, 0
842LBB2_1: ; bb
843 addi r4, r2, 4
844 stfsx f1, r3, r2
845 cmplwi cr0, r4, 32000
846 mr r2, r4
847 bne cr0, LBB2_1 ; bb
848 blr
849
850The 'mr' could be eliminated to folding the add into the cmp better.
851
852//===---------------------------------------------------------------------===//
Dale Johannesena7647e62008-11-17 18:56:34 +0000853Codegen for the following (low-probability) case deteriorated considerably
854when the correctness fixes for unordered comparisons went in (PR 642, 58871).
855It should be possible to recover the code quality described in the comments.
856
857; RUN: llvm-as < %s | llc -march=ppc32 | grep or | count 3
858; This should produce one 'or' or 'cror' instruction per function.
859
860; RUN: llvm-as < %s | llc -march=ppc32 | grep mfcr | count 3
861; PR2964
862
863define i32 @test(double %x, double %y) nounwind {
864entry:
865 %tmp3 = fcmp ole double %x, %y ; <i1> [#uses=1]
866 %tmp345 = zext i1 %tmp3 to i32 ; <i32> [#uses=1]
867 ret i32 %tmp345
868}
869
870define i32 @test2(double %x, double %y) nounwind {
871entry:
872 %tmp3 = fcmp one double %x, %y ; <i1> [#uses=1]
873 %tmp345 = zext i1 %tmp3 to i32 ; <i32> [#uses=1]
874 ret i32 %tmp345
875}
876
877define i32 @test3(double %x, double %y) nounwind {
878entry:
879 %tmp3 = fcmp ugt double %x, %y ; <i1> [#uses=1]
880 %tmp34 = zext i1 %tmp3 to i32 ; <i32> [#uses=1]
881 ret i32 %tmp34
882}
883//===----------------------------------------------------------------------===//
884; RUN: llvm-as < %s | llc -march=ppc32 | not grep fneg
885
886; This could generate FSEL with appropriate flags (FSEL is not IEEE-safe, and
887; should not be generated except with -enable-finite-only-fp-math or the like).
888; With the correctness fixes for PR642 (58871) LowerSELECT_CC would need to
889; recognize a more elaborate tree than a simple SETxx.
890
891define double @test_FNEG_sel(double %A, double %B, double %C) {
892 %D = sub double -0.000000e+00, %A ; <double> [#uses=1]
893 %Cond = fcmp ugt double %D, -0.000000e+00 ; <i1> [#uses=1]
894 %E = select i1 %Cond, double %B, double %C ; <double> [#uses=1]
895 ret double %E
896}
897
Dale Johannesen15ce1d72010-02-12 23:16:24 +0000898//===----------------------------------------------------------------------===//
899The save/restore sequence for CR in prolog/epilog is terrible:
900- Each CR subreg is saved individually, rather than doing one save as a unit.
901- On Darwin, the save is done after the decrement of SP, which means the offset
902from SP of the save slot can be too big for a store instruction, which means we
903need an additional register (currently hacked in 96015+96020; the solution there
904is correct, but poor).
905- On SVR4 the same thing can happen, and I don't think saving before the SP
906decrement is safe on that target, as there is no red zone. This is currently
907broken AFAIK, although it's not a target I can exercise.
908The following demonstrates the problem:
909extern void bar(char *p);
910void foo() {
911 char x[100000];
912 bar(x);
913 __asm__("" ::: "cr2");
914}