Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 1 | //====--- SPU64InstrInfo.td - Cell SPU 64-bit operations -*- tablegen -*--====// |
| 2 | // |
| 3 | // Cell SPU 64-bit operations |
| 4 | // |
| 5 | // Primary author: Scott Michel (scottm@aero.org) |
| 6 | //===----------------------------------------------------------------------===// |
| 7 | |
Scott Michel | 06eabde | 2008-12-27 04:51:36 +0000 | [diff] [blame] | 8 | //-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ |
| 9 | // 64-bit comparisons: |
| 10 | // |
| 11 | // 1. The instruction sequences for vector vice scalar differ by a |
Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 12 | // constant. In the scalar case, we're only interested in the |
| 13 | // top two 32-bit slots, whereas we're interested in an exact |
| 14 | // all-four-slot match in the vector case. |
Scott Michel | 06eabde | 2008-12-27 04:51:36 +0000 | [diff] [blame] | 15 | // |
| 16 | // 2. There are no "immediate" forms, since loading 64-bit constants |
| 17 | // could be a constant pool load. |
| 18 | // |
| 19 | // 3. i64 setcc results are i32, which are subsequently converted to a FSM |
| 20 | // mask when used in a select pattern. |
| 21 | // |
Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 22 | // 4. v2i64 setcc results are v4i32, which can be converted to a FSM mask (TODO) |
| 23 | // [Note: this may be moot, since gb produces v4i32 or r32.] |
Scott Michel | 06eabde | 2008-12-27 04:51:36 +0000 | [diff] [blame] | 24 | // |
Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 25 | // M00$E B!tes Kan be Pretty N@sTi!!!!! (appologies to Monty!) |
Scott Michel | 06eabde | 2008-12-27 04:51:36 +0000 | [diff] [blame] | 26 | //-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ |
| 27 | |
| 28 | // selb instruction definition for i64. Note that the selection mask is |
| 29 | // a vector, produced by various forms of FSM: |
| 30 | def SELBr64_cond: |
| 31 | SELBInst<(outs R64C:$rT), (ins R64C:$rA, R64C:$rB, VECREG:$rC), |
| 32 | [/* no pattern */]>; |
| 33 | |
Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 34 | // select the negative condition: |
| 35 | class I64SELECTNegCond<PatFrag cond, CodeFrag compare>: |
Scott Michel | 06eabde | 2008-12-27 04:51:36 +0000 | [diff] [blame] | 36 | Pat<(select (i32 (cond R64C:$rA, R64C:$rB)), R64C:$rTrue, R64C:$rFalse), |
Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 37 | (SELBr64_cond R64C:$rTrue, R64C:$rFalse, (FSMr32 compare.Fragment))>; |
Scott Michel | 06eabde | 2008-12-27 04:51:36 +0000 | [diff] [blame] | 38 | |
Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 39 | // setcc the negative condition: |
| 40 | class I64SETCCNegCond<PatFrag cond, CodeFrag compare>: |
Scott Michel | 06eabde | 2008-12-27 04:51:36 +0000 | [diff] [blame] | 41 | Pat<(cond R64C:$rA, R64C:$rB), |
Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 42 | (XORIr32 compare.Fragment, -1)>; |
Scott Michel | 06eabde | 2008-12-27 04:51:36 +0000 | [diff] [blame] | 43 | |
| 44 | // The i64 seteq fragment that does the scalar->vector conversion and |
| 45 | // comparison: |
| 46 | def CEQr64compare: |
| 47 | CodeFrag<(CGTIv4i32 (GBv4i32 (CEQv4i32 (ORv2i64_i64 R64C:$rA), |
| 48 | (ORv2i64_i64 R64C:$rB))), |
| 49 | 0x0000000c)>; |
| 50 | |
| 51 | |
| 52 | // The i64 seteq fragment that does the vector comparison |
| 53 | def CEQv2i64compare: |
| 54 | CodeFrag<(CGTIv4i32 (GBv4i32 (CEQv4i32 VECREG:$rA, VECREG:$rB)), |
| 55 | 0x0000000f)>; |
| 56 | |
| 57 | // i64 seteq (equality): the setcc result is i32, which is converted to a |
| 58 | // vector FSM mask when used in a select pattern. |
| 59 | // |
| 60 | // v2i64 seteq (equality): the setcc result is v4i32 |
| 61 | multiclass CompareEqual64 { |
| 62 | // Plain old comparison, converts back to i32 scalar |
| 63 | def r64: CodeFrag<(ORi32_v4i32 CEQr64compare.Fragment)>; |
| 64 | def v2i64: CodeFrag<(ORi32_v4i32 CEQv2i64compare.Fragment)>; |
| 65 | |
| 66 | // SELB mask from FSM: |
| 67 | def r64mask: CodeFrag<(ORi32_v4i32 (FSMv4i32 CEQr64compare.Fragment))>; |
| 68 | def v2i64mask: CodeFrag<(ORi32_v4i32 (FSMv4i32 CEQv2i64compare.Fragment))>; |
| 69 | } |
| 70 | |
| 71 | defm I64EQ: CompareEqual64; |
| 72 | |
| 73 | def : Pat<(seteq R64C:$rA, R64C:$rB), I64EQr64.Fragment>; |
Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 74 | def : Pat<(seteq (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)), I64EQv2i64.Fragment>; |
Scott Michel | 06eabde | 2008-12-27 04:51:36 +0000 | [diff] [blame] | 75 | |
Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 76 | def : Pat<(select R32C:$rC, R64C:$rB, R64C:$rA), |
| 77 | (SELBr64_cond R64C:$rA, R64C:$rB, (FSMr32 R32C:$rC))>; |
Scott Michel | 06eabde | 2008-12-27 04:51:36 +0000 | [diff] [blame] | 78 | |
Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 79 | // i64 setne: |
Scott Michel | 06eabde | 2008-12-27 04:51:36 +0000 | [diff] [blame] | 80 | def : I64SETCCNegCond<setne, I64EQr64>; |
Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 81 | def : I64SELECTNegCond<setne, I64EQr64>; |
Scott Michel | 06eabde | 2008-12-27 04:51:36 +0000 | [diff] [blame] | 82 | |
Scott Michel | 4d07fb7 | 2008-12-30 23:28:25 +0000 | [diff] [blame^] | 83 | // i64 setugt: |