blob: 91bdb538e94ae09fd61c4fb6ef9bca1ea2cc4568 [file] [log] [blame]
Chris Lattner1171ff42005-10-23 19:52:42 +00001//===---------------------------------------------------------------------===//
2// Random ideas for the X86 backend.
3//===---------------------------------------------------------------------===//
4
5Add a MUL2U and MUL2S nodes to represent a multiply that returns both the
6Hi and Lo parts (combination of MUL and MULH[SU] into one node). Add this to
7X86, & make the dag combiner produce it when needed. This will eliminate one
8imul from the code generated for:
9
10long long test(long long X, long long Y) { return X*Y; }
11
12by using the EAX result from the mul. We should add a similar node for
13DIVREM.
14
Chris Lattner865874c2005-12-02 00:11:20 +000015another case is:
16
17long long test(int X, int Y) { return (long long)X*Y; }
18
19... which should only be one imul instruction.
20
Chris Lattner1171ff42005-10-23 19:52:42 +000021//===---------------------------------------------------------------------===//
22
23This should be one DIV/IDIV instruction, not a libcall:
24
25unsigned test(unsigned long long X, unsigned Y) {
26 return X/Y;
27}
28
29This can be done trivially with a custom legalizer. What about overflow
30though? http://gcc.gnu.org/bugzilla/show_bug.cgi?id=14224
31
32//===---------------------------------------------------------------------===//
33
Chris Lattner1171ff42005-10-23 19:52:42 +000034Some targets (e.g. athlons) prefer freep to fstp ST(0):
35http://gcc.gnu.org/ml/gcc-patches/2004-04/msg00659.html
36
37//===---------------------------------------------------------------------===//
38
Evan Chenga3195e82006-01-12 22:54:21 +000039This should use fiadd on chips where it is profitable:
Chris Lattner1171ff42005-10-23 19:52:42 +000040double foo(double P, int *I) { return P+*I; }
41
Evan Cheng755ee8f2006-02-20 19:58:27 +000042We have fiadd patterns now but the followings have the same cost and
43complexity. We need a way to specify the later is more profitable.
44
45def FpADD32m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
46 [(set RFP:$dst, (fadd RFP:$src1,
47 (extloadf64f32 addr:$src2)))]>;
48 // ST(0) = ST(0) + [mem32]
49
50def FpIADD32m : FpI<(ops RFP:$dst, RFP:$src1, i32mem:$src2), OneArgFPRW,
51 [(set RFP:$dst, (fadd RFP:$src1,
52 (X86fild addr:$src2, i32)))]>;
53 // ST(0) = ST(0) + [mem32int]
54
Chris Lattner1171ff42005-10-23 19:52:42 +000055//===---------------------------------------------------------------------===//
56
57The FP stackifier needs to be global. Also, it should handle simple permutates
58to reduce number of shuffle instructions, e.g. turning:
59
60fld P -> fld Q
61fld Q fld P
62fxch
63
64or:
65
66fxch -> fucomi
67fucomi jl X
68jg X
69
Chris Lattner1db4b4f2006-01-16 17:53:00 +000070Ideas:
71http://gcc.gnu.org/ml/gcc-patches/2004-11/msg02410.html
72
73
Chris Lattner1171ff42005-10-23 19:52:42 +000074//===---------------------------------------------------------------------===//
75
76Improvements to the multiply -> shift/add algorithm:
77http://gcc.gnu.org/ml/gcc-patches/2004-08/msg01590.html
78
79//===---------------------------------------------------------------------===//
80
81Improve code like this (occurs fairly frequently, e.g. in LLVM):
82long long foo(int x) { return 1LL << x; }
83
84http://gcc.gnu.org/ml/gcc-patches/2004-09/msg01109.html
85http://gcc.gnu.org/ml/gcc-patches/2004-09/msg01128.html
86http://gcc.gnu.org/ml/gcc-patches/2004-09/msg01136.html
87
88Another useful one would be ~0ULL >> X and ~0ULL << X.
89
Chris Lattnerffff6172005-10-23 21:44:59 +000090//===---------------------------------------------------------------------===//
91
Chris Lattner1e4ed932005-11-28 04:52:39 +000092Compile this:
93_Bool f(_Bool a) { return a!=1; }
94
95into:
96 movzbl %dil, %eax
97 xorl $1, %eax
98 ret
Evan Cheng8dee8cc2005-12-17 01:25:19 +000099
100//===---------------------------------------------------------------------===//
101
102Some isel ideas:
103
1041. Dynamic programming based approach when compile time if not an
105 issue.
1062. Code duplication (addressing mode) during isel.
1073. Other ideas from "Register-Sensitive Selection, Duplication, and
108 Sequencing of Instructions".
Chris Lattnercb298902006-02-08 07:12:07 +00001094. Scheduling for reduced register pressure. E.g. "Minimum Register
110 Instruction Sequence Problem: Revisiting Optimal Code Generation for DAGs"
111 and other related papers.
112 http://citeseer.ist.psu.edu/govindarajan01minimum.html
Evan Cheng8dee8cc2005-12-17 01:25:19 +0000113
114//===---------------------------------------------------------------------===//
115
116Should we promote i16 to i32 to avoid partial register update stalls?
Evan Cheng98abbfb2005-12-17 06:54:43 +0000117
118//===---------------------------------------------------------------------===//
119
120Leave any_extend as pseudo instruction and hint to register
121allocator. Delay codegen until post register allocation.
Evan Chenga3195e82006-01-12 22:54:21 +0000122
123//===---------------------------------------------------------------------===//
124
125Add a target specific hook to DAG combiner to handle SINT_TO_FP and
126FP_TO_SINT when the source operand is already in memory.
127
128//===---------------------------------------------------------------------===//
129
Evan Chenge08c2702006-01-13 01:20:42 +0000130Model X86 EFLAGS as a real register to avoid redudant cmp / test. e.g.
131
132 cmpl $1, %eax
133 setg %al
134 testb %al, %al # unnecessary
135 jne .BB7
Chris Lattner1db4b4f2006-01-16 17:53:00 +0000136
137//===---------------------------------------------------------------------===//
138
139Count leading zeros and count trailing zeros:
140
141int clz(int X) { return __builtin_clz(X); }
142int ctz(int X) { return __builtin_ctz(X); }
143
144$ gcc t.c -S -o - -O3 -fomit-frame-pointer -masm=intel
145clz:
146 bsr %eax, DWORD PTR [%esp+4]
147 xor %eax, 31
148 ret
149ctz:
150 bsf %eax, DWORD PTR [%esp+4]
151 ret
152
153however, check that these are defined for 0 and 32. Our intrinsics are, GCC's
154aren't.
155
156//===---------------------------------------------------------------------===//
157
158Use push/pop instructions in prolog/epilog sequences instead of stores off
159ESP (certain code size win, perf win on some [which?] processors).
Evan Cheng53f280a2006-02-25 10:04:07 +0000160Also, it appears icc use push for parameter passing. Need to investigate.
Chris Lattner1db4b4f2006-01-16 17:53:00 +0000161
162//===---------------------------------------------------------------------===//
163
164Only use inc/neg/not instructions on processors where they are faster than
165add/sub/xor. They are slower on the P4 due to only updating some processor
166flags.
167
168//===---------------------------------------------------------------------===//
169
170Open code rint,floor,ceil,trunc:
171http://gcc.gnu.org/ml/gcc-patches/2004-08/msg02006.html
172http://gcc.gnu.org/ml/gcc-patches/2004-08/msg02011.html
173
174//===---------------------------------------------------------------------===//
175
176Combine: a = sin(x), b = cos(x) into a,b = sincos(x).
177
Chris Lattner8f77b732006-02-08 06:52:06 +0000178Expand these to calls of sin/cos and stores:
179 double sincos(double x, double *sin, double *cos);
180 float sincosf(float x, float *sin, float *cos);
181 long double sincosl(long double x, long double *sin, long double *cos);
182
183Doing so could allow SROA of the destination pointers. See also:
184http://gcc.gnu.org/bugzilla/show_bug.cgi?id=17687
185
Evan Chenge826a012006-01-27 22:11:01 +0000186//===---------------------------------------------------------------------===//
187
Chris Lattnerb638cd82006-01-29 09:08:15 +0000188The instruction selector sometimes misses folding a load into a compare. The
189pattern is written as (cmp reg, (load p)). Because the compare isn't
190commutative, it is not matched with the load on both sides. The dag combiner
191should be made smart enough to cannonicalize the load into the RHS of a compare
192when it can invert the result of the compare for free.
193
Chris Lattner6a284562006-01-29 09:14:47 +0000194//===---------------------------------------------------------------------===//
195
Chris Lattner5164a312006-01-29 09:42:20 +0000196LSR should be turned on for the X86 backend and tuned to take advantage of its
197addressing modes.
198
Chris Lattnerc7097af2006-01-29 09:46:06 +0000199//===---------------------------------------------------------------------===//
200
201When compiled with unsafemath enabled, "main" should enable SSE DAZ mode and
202other fast SSE modes.
Chris Lattnerbdde4652006-01-31 00:20:38 +0000203
204//===---------------------------------------------------------------------===//
205
Chris Lattner594086d2006-01-31 00:45:37 +0000206Think about doing i64 math in SSE regs.
207
Chris Lattner8e38ae62006-01-31 02:10:06 +0000208//===---------------------------------------------------------------------===//
209
210The DAG Isel doesn't fold the loads into the adds in this testcase. The
211pattern selector does. This is because the chain value of the load gets
212selected first, and the loads aren't checking to see if they are only used by
213and add.
214
215.ll:
216
217int %test(int* %x, int* %y, int* %z) {
218 %X = load int* %x
219 %Y = load int* %y
220 %Z = load int* %z
221 %a = add int %X, %Y
222 %b = add int %a, %Z
223 ret int %b
224}
225
226dag isel:
227
228_test:
229 movl 4(%esp), %eax
230 movl (%eax), %eax
231 movl 8(%esp), %ecx
232 movl (%ecx), %ecx
233 addl %ecx, %eax
234 movl 12(%esp), %ecx
235 movl (%ecx), %ecx
236 addl %ecx, %eax
237 ret
238
239pattern isel:
240
241_test:
242 movl 12(%esp), %ecx
243 movl 4(%esp), %edx
244 movl 8(%esp), %eax
245 movl (%eax), %eax
246 addl (%edx), %eax
247 addl (%ecx), %eax
248 ret
249
250This is bad for register pressure, though the dag isel is producing a
251better schedule. :)
Chris Lattner3e1d5e52006-02-01 01:44:25 +0000252
253//===---------------------------------------------------------------------===//
254
255This testcase should have no SSE instructions in it, and only one load from
256a constant pool:
257
258double %test3(bool %B) {
259 %C = select bool %B, double 123.412, double 523.01123123
260 ret double %C
261}
262
263Currently, the select is being lowered, which prevents the dag combiner from
264turning 'select (load CPI1), (load CPI2)' -> 'load (select CPI1, CPI2)'
265
266The pattern isel got this one right.
267
Chris Lattner1f7c6302006-02-01 06:40:32 +0000268//===---------------------------------------------------------------------===//
269
Chris Lattner3e2b94a2006-02-01 21:44:48 +0000270We need to lower switch statements to tablejumps when appropriate instead of
271always into binary branch trees.
Chris Lattner4d7db402006-02-01 23:38:08 +0000272
273//===---------------------------------------------------------------------===//
274
275SSE doesn't have [mem] op= reg instructions. If we have an SSE instruction
276like this:
277
278 X += y
279
280and the register allocator decides to spill X, it is cheaper to emit this as:
281
282Y += [xslot]
283store Y -> [xslot]
284
285than as:
286
287tmp = [xslot]
288tmp += y
289store tmp -> [xslot]
290
291..and this uses one fewer register (so this should be done at load folding
292time, not at spiller time). *Note* however that this can only be done
293if Y is dead. Here's a testcase:
294
295%.str_3 = external global [15 x sbyte] ; <[15 x sbyte]*> [#uses=0]
296implementation ; Functions:
297declare void %printf(int, ...)
298void %main() {
299build_tree.exit:
300 br label %no_exit.i7
301no_exit.i7: ; preds = %no_exit.i7, %build_tree.exit
302 %tmp.0.1.0.i9 = phi double [ 0.000000e+00, %build_tree.exit ], [ %tmp.34.i18, %no_exit.i7 ] ; <double> [#uses=1]
303 %tmp.0.0.0.i10 = phi double [ 0.000000e+00, %build_tree.exit ], [ %tmp.28.i16, %no_exit.i7 ] ; <double> [#uses=1]
304 %tmp.28.i16 = add double %tmp.0.0.0.i10, 0.000000e+00
305 %tmp.34.i18 = add double %tmp.0.1.0.i9, 0.000000e+00
306 br bool false, label %Compute_Tree.exit23, label %no_exit.i7
307Compute_Tree.exit23: ; preds = %no_exit.i7
308 tail call void (int, ...)* %printf( int 0 )
309 store double %tmp.34.i18, double* null
310 ret void
311}
312
313We currently emit:
314
315.BBmain_1:
316 xorpd %XMM1, %XMM1
317 addsd %XMM0, %XMM1
318*** movsd %XMM2, QWORD PTR [%ESP + 8]
319*** addsd %XMM2, %XMM1
320*** movsd QWORD PTR [%ESP + 8], %XMM2
321 jmp .BBmain_1 # no_exit.i7
322
323This is a bugpoint reduced testcase, which is why the testcase doesn't make
324much sense (e.g. its an infinite loop). :)
325
Evan Cheng8b6e4e62006-02-02 02:40:17 +0000326//===---------------------------------------------------------------------===//
327
328None of the FPStack instructions are handled in
329X86RegisterInfo::foldMemoryOperand, which prevents the spiller from
330folding spill code into the instructions.
Chris Lattner9acddcd2006-02-02 19:16:34 +0000331
332//===---------------------------------------------------------------------===//
333
334In many cases, LLVM generates code like this:
335
336_test:
337 movl 8(%esp), %eax
338 cmpl %eax, 4(%esp)
339 setl %al
340 movzbl %al, %eax
341 ret
342
343on some processors (which ones?), it is more efficient to do this:
344
345_test:
346 movl 8(%esp), %ebx
347 xor %eax, %eax
348 cmpl %ebx, 4(%esp)
349 setl %al
350 ret
351
352Doing this correctly is tricky though, as the xor clobbers the flags.
353
Chris Lattnerd395d092006-02-02 19:43:28 +0000354//===---------------------------------------------------------------------===//
355
356We should generate 'test' instead of 'cmp' in various cases, e.g.:
357
358bool %test(int %X) {
359 %Y = shl int %X, ubyte 1
360 %C = seteq int %Y, 0
361 ret bool %C
362}
363bool %test(int %X) {
364 %Y = and int %X, 8
365 %C = seteq int %Y, 0
366 ret bool %C
367}
368
369This may just be a matter of using 'test' to write bigger patterns for X86cmp.
370
371//===---------------------------------------------------------------------===//
372
Chris Lattnerd395d092006-02-02 19:43:28 +0000373SSE should implement 'select_cc' using 'emulated conditional moves' that use
374pcmp/pand/pandn/por to do a selection instead of a conditional branch:
375
376double %X(double %Y, double %Z, double %A, double %B) {
377 %C = setlt double %A, %B
378 %z = add double %Z, 0.0 ;; select operand is not a load
379 %D = select bool %C, double %Y, double %z
380 ret double %D
381}
382
383We currently emit:
384
385_X:
386 subl $12, %esp
387 xorpd %xmm0, %xmm0
388 addsd 24(%esp), %xmm0
389 movsd 32(%esp), %xmm1
390 movsd 16(%esp), %xmm2
391 ucomisd 40(%esp), %xmm1
392 jb LBB_X_2
393LBB_X_1:
394 movsd %xmm0, %xmm2
395LBB_X_2:
396 movsd %xmm2, (%esp)
397 fldl (%esp)
398 addl $12, %esp
399 ret
Chris Lattner9acddcd2006-02-02 19:16:34 +0000400
Evan Cheng183fff92006-02-07 08:35:44 +0000401//===---------------------------------------------------------------------===//
402
Chris Lattner8f77b732006-02-08 06:52:06 +0000403We should generate bts/btr/etc instructions on targets where they are cheap or
404when codesize is important. e.g., for:
405
406void setbit(int *target, int bit) {
407 *target |= (1 << bit);
408}
409void clearbit(int *target, int bit) {
410 *target &= ~(1 << bit);
411}
412
Chris Lattnerdba382b2006-02-08 17:47:22 +0000413//===---------------------------------------------------------------------===//
414
Evan Cheng952b7d62006-02-14 08:25:32 +0000415Instead of the following for memset char*, 1, 10:
416
417 movl $16843009, 4(%edx)
418 movl $16843009, (%edx)
419 movw $257, 8(%edx)
420
421It might be better to generate
422
423 movl $16843009, %eax
424 movl %eax, 4(%edx)
425 movl %eax, (%edx)
426 movw al, 8(%edx)
427
428when we can spare a register. It reduces code size.
Evan Cheng7634ac42006-02-17 00:04:28 +0000429
430//===---------------------------------------------------------------------===//
431
432It's not clear whether we should use pxor or xorps / xorpd to clear XMM
433registers. The choice may depend on subtarget information. We should do some
434more experiments on different x86 machines.
Chris Lattnera648df22006-02-17 04:20:13 +0000435
436//===---------------------------------------------------------------------===//
437
438Evaluate what the best way to codegen sdiv X, (2^C) is. For X/8, we currently
439get this:
440
441int %test1(int %X) {
442 %Y = div int %X, 8
443 ret int %Y
444}
445
446_test1:
447 movl 4(%esp), %eax
448 movl %eax, %ecx
449 sarl $31, %ecx
450 shrl $29, %ecx
451 addl %ecx, %eax
452 sarl $3, %eax
453 ret
454
455GCC knows several different ways to codegen it, one of which is this:
456
457_test1:
458 movl 4(%esp), %eax
459 cmpl $-1, %eax
460 leal 7(%eax), %ecx
461 cmovle %ecx, %eax
462 sarl $3, %eax
463 ret
464
465which is probably slower, but it's interesting at least :)
466
Evan Cheng755ee8f2006-02-20 19:58:27 +0000467//===---------------------------------------------------------------------===//
468
469Currently the x86 codegen isn't very good at mixing SSE and FPStack
470code:
471
472unsigned int foo(double x) { return x; }
473
474foo:
475 subl $20, %esp
476 movsd 24(%esp), %xmm0
477 movsd %xmm0, 8(%esp)
478 fldl 8(%esp)
479 fisttpll (%esp)
480 movl (%esp), %eax
481 addl $20, %esp
482 ret
483
484This will be solved when we go to a dynamic programming based isel.
Evan Cheng30324102006-02-23 02:50:21 +0000485
486//===---------------------------------------------------------------------===//
487
Evan Cheng7ab54042006-03-21 07:18:26 +0000488Should generate min/max for stuff like:
489
490void minf(float a, float b, float *X) {
491 *X = a <= b ? a : b;
492}
493
Evan Cheng30324102006-02-23 02:50:21 +0000494Make use of floating point min / max instructions. Perhaps introduce ISD::FMIN
495and ISD::FMAX node types?
496
497//===---------------------------------------------------------------------===//
498
Chris Lattner205065a2006-02-23 05:17:43 +0000499The first BB of this code:
500
501declare bool %foo()
502int %bar() {
503 %V = call bool %foo()
504 br bool %V, label %T, label %F
505T:
506 ret int 1
507F:
508 call bool %foo()
509 ret int 12
510}
511
512compiles to:
513
514_bar:
515 subl $12, %esp
516 call L_foo$stub
517 xorb $1, %al
518 testb %al, %al
519 jne LBB_bar_2 # F
520
521It would be better to emit "cmp %al, 1" than a xor and test.
522
Evan Cheng53f280a2006-02-25 10:04:07 +0000523//===---------------------------------------------------------------------===//
Chris Lattner205065a2006-02-23 05:17:43 +0000524
Evan Cheng53f280a2006-02-25 10:04:07 +0000525Enable X86InstrInfo::convertToThreeAddress().
Evan Chengaafc1412006-02-28 23:38:49 +0000526
527//===---------------------------------------------------------------------===//
528
529Investigate whether it is better to codegen the following
530
531 %tmp.1 = mul int %x, 9
532to
533
534 movl 4(%esp), %eax
535 leal (%eax,%eax,8), %eax
536
537as opposed to what llc is currently generating:
538
539 imull $9, 4(%esp), %eax
540
541Currently the load folding imull has a higher complexity than the LEA32 pattern.
Evan Chengf42f5162006-03-04 07:49:50 +0000542
543//===---------------------------------------------------------------------===//
544
545Lower memcpy / memset to a series of SSE 128 bit move instructions when it's
546feasible.
Chris Lattnera4929df2006-03-05 01:15:18 +0000547
548//===---------------------------------------------------------------------===//
549
Chris Lattner9d5da1d2006-03-24 07:12:19 +0000550Teach the coalescer to commute 2-addr instructions, allowing us to eliminate
Chris Lattnera4929df2006-03-05 01:15:18 +0000551the reg-reg copy in this example:
552
553float foo(int *x, float *y, unsigned c) {
554 float res = 0.0;
555 unsigned i;
556 for (i = 0; i < c; i++) {
557 float xx = (float)x[i];
558 xx = xx * y[i];
559 xx += res;
560 res = xx;
561 }
562 return res;
563}
564
565LBB_foo_3: # no_exit
566 cvtsi2ss %XMM0, DWORD PTR [%EDX + 4*%ESI]
567 mulss %XMM0, DWORD PTR [%EAX + 4*%ESI]
568 addss %XMM0, %XMM1
569 inc %ESI
570 cmp %ESI, %ECX
571**** movaps %XMM1, %XMM0
572 jb LBB_foo_3 # no_exit
573
574//===---------------------------------------------------------------------===//
Chris Lattner181b9c62006-03-09 01:39:46 +0000575
576Codegen:
577 if (copysign(1.0, x) == copysign(1.0, y))
578into:
579 if (x^y & mask)
580when using SSE.
581
582//===---------------------------------------------------------------------===//
583
584Optimize this into something reasonable:
585 x * copysign(1.0, y) * copysign(1.0, z)
586
587//===---------------------------------------------------------------------===//
588
589Optimize copysign(x, *y) to use an integer load from y.
590
591//===---------------------------------------------------------------------===//
592
Evan Cheng2771d212006-03-16 22:44:22 +0000593%X = weak global int 0
594
595void %foo(int %N) {
596 %N = cast int %N to uint
597 %tmp.24 = setgt int %N, 0
598 br bool %tmp.24, label %no_exit, label %return
599
600no_exit:
601 %indvar = phi uint [ 0, %entry ], [ %indvar.next, %no_exit ]
602 %i.0.0 = cast uint %indvar to int
603 volatile store int %i.0.0, int* %X
604 %indvar.next = add uint %indvar, 1
605 %exitcond = seteq uint %indvar.next, %N
606 br bool %exitcond, label %return, label %no_exit
607
608return:
609 ret void
610}
611
612compiles into:
613
614 .text
615 .align 4
616 .globl _foo
617_foo:
618 movl 4(%esp), %eax
619 cmpl $1, %eax
620 jl LBB_foo_4 # return
621LBB_foo_1: # no_exit.preheader
622 xorl %ecx, %ecx
623LBB_foo_2: # no_exit
624 movl L_X$non_lazy_ptr, %edx
625 movl %ecx, (%edx)
626 incl %ecx
627 cmpl %eax, %ecx
628 jne LBB_foo_2 # no_exit
629LBB_foo_3: # return.loopexit
630LBB_foo_4: # return
631 ret
632
633We should hoist "movl L_X$non_lazy_ptr, %edx" out of the loop after
634remateralization is implemented. This can be accomplished with 1) a target
635dependent LICM pass or 2) makeing SelectDAG represent the whole function.
636
637//===---------------------------------------------------------------------===//
Evan Cheng0def9c32006-03-19 06:08:11 +0000638
639The following tests perform worse with LSR:
640
641lambda, siod, optimizer-eval, ackermann, hash2, nestedloop, strcat, and Treesor.
Chris Lattner8bcf9262006-03-19 22:27:41 +0000642
643//===---------------------------------------------------------------------===//
644
Chris Lattner9d5da1d2006-03-24 07:12:19 +0000645Teach the coalescer to coalesce vregs of different register classes. e.g. FR32 /
Evan Cheng50a6d8c2006-03-21 07:12:57 +0000646FR64 to VR128.
Evan Chengb20aace2006-03-24 02:57:03 +0000647
648//===---------------------------------------------------------------------===//
649
650mov $reg, 48(%esp)
651...
652leal 48(%esp), %eax
653mov %eax, (%esp)
654call _foo
655
656Obviously it would have been better for the first mov (or any op) to store
657directly %esp[0] if there are no other uses.
Evan Cheng5217a5b2006-03-24 06:40:32 +0000658
659//===---------------------------------------------------------------------===//
660
661Add more vector shuffle special cases using unpckhps and unpcklps.