More cleanups for CellSPU:
- Expand tabs... (poss 80-col violations, will get them later...)
- Consolidate logic for SelectDFormAddr and SelectDForm2Addr into a single
function, simplifying maintenance. Also reduced custom instruction
generation for SPUvecinsert/INSERT_MASK.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46544 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp
index c7d7f97..407b8e6 100644
--- a/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -39,8 +39,8 @@
//! MVT::ValueType mapping to useful data for Cell SPU
struct valtype_map_s {
- const MVT::ValueType valtype;
- const int prefslot_byte;
+ const MVT::ValueType valtype;
+ const int prefslot_byte;
};
const valtype_map_s valtype_map[] = {
@@ -61,16 +61,16 @@
for (size_t i = 0; i < n_valtype_map; ++i) {
if (valtype_map[i].valtype == VT) {
- retval = valtype_map + i;
- break;
+ retval = valtype_map + i;
+ break;
}
}
#ifndef NDEBUG
if (retval == 0) {
cerr << "getValueTypeMapEntry returns NULL for "
- << MVT::getValueTypeString(VT)
- << "\n";
+ << MVT::getValueTypeString(VT)
+ << "\n";
abort();
}
#endif
@@ -106,7 +106,7 @@
{
const unsigned Opc = Op.getOpcode();
return (Opc == ISD::Register
- || Opc == SPUISD::LDRESULT);
+ || Opc == SPUISD::LDRESULT);
}
}
@@ -508,7 +508,7 @@
if (!isMemoryOperand(basePtr) || (alignOffs & ~0xf) != 0) {
basePtr = DAG.getNode(ISD::ADD, PtrVT,
basePtr,
- DAG.getConstant((alignOffs & ~0xf), PtrVT));
+ DAG.getConstant((alignOffs & ~0xf), PtrVT));
}
// Emit the vector load:
@@ -526,7 +526,7 @@
// Add the offset
basePtr = DAG.getNode(ISD::ADD, PtrVT, basePtr,
- DAG.getConstant((alignOffs & ~0xf), PtrVT));
+ DAG.getConstant((alignOffs & ~0xf), PtrVT));
was16aligned = false;
return DAG.getLoad(MVT::v16i8, chain, basePtr,
LSN->getSrcValue(), LSN->getSrcValueOffset(),
@@ -570,10 +570,10 @@
if (was16aligned) {
Ops[2] = DAG.getConstant(rotamt, MVT::i16);
} else {
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
LoadSDNode *LN1 = cast<LoadSDNode>(result);
Ops[2] = DAG.getNode(ISD::ADD, PtrVT, LN1->getBasePtr(),
- DAG.getConstant(rotamt, PtrVT));
+ DAG.getConstant(rotamt, PtrVT));
}
result = DAG.getNode(SPUISD::ROTBYTES_LEFT_CHAINED, vecvts, Ops, 3);
@@ -616,10 +616,9 @@
}
SDVTList retvts = DAG.getVTList(OpVT, MVT::Other);
- SDOperand retops[3] = {
+ SDOperand retops[2] = {
result,
- the_chain,
- DAG.getConstant(alignment, MVT::i32)
+ the_chain
};
result = DAG.getNode(SPUISD::LDRESULT, retvts,
@@ -683,8 +682,8 @@
SDOperand result;
if (StVT != VT
- && (theValue.getOpcode() == ISD::AssertZext
- || theValue.getOpcode() == ISD::AssertSext)) {
+ && (theValue.getOpcode() == ISD::AssertZext
+ || theValue.getOpcode() == ISD::AssertSext)) {
// Drill down and get the value for zero- and sign-extended
// quantities
theValue = theValue.getOperand(0);
@@ -722,9 +721,9 @@
insertEltOp = DAG.getNode(SPUISD::INSERT_MASK, stVecVT, insertEltPtr);
result = DAG.getNode(SPUISD::SHUFB, vecVT,
- DAG.getNode(ISD::SCALAR_TO_VECTOR, vecVT, theValue),
- alignLoadVec,
- DAG.getNode(ISD::BIT_CONVERT, vecVT, insertEltOp));
+ DAG.getNode(ISD::SCALAR_TO_VECTOR, vecVT, theValue),
+ alignLoadVec,
+ DAG.getNode(ISD::BIT_CONVERT, vecVT, insertEltOp));
result = DAG.getStore(the_chain, result, basePtr,
LN->getSrcValue(), LN->getSrcValueOffset(),
@@ -818,7 +817,7 @@
}
} else {
cerr << "LowerGlobalAddress: Relocation model other than static not "
- << "supported.\n";
+ << "supported.\n";
abort();
/*NOTREACHED*/
}
@@ -839,12 +838,12 @@
if (VT == MVT::i64) {
SDOperand T = DAG.getConstant(CN->getValue(), MVT::i64);
return DAG.getNode(SPUISD::EXTRACT_ELT0, VT,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T));
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T));
} else {
cerr << "LowerConstant: unhandled constant type "
- << MVT::getValueTypeString(VT)
- << "\n";
+ << MVT::getValueTypeString(VT)
+ << "\n";
abort();
/*NOTREACHED*/
}
@@ -864,16 +863,16 @@
ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.Val);
assert((FP != 0) &&
- "LowerConstantFP: Node is not ConstantFPSDNode");
+ "LowerConstantFP: Node is not ConstantFPSDNode");
if (VT == MVT::f32) {
float targetConst = FP->getValueAPF().convertToFloat();
return DAG.getNode(SPUISD::SFPConstant, VT,
- DAG.getTargetConstantFP(targetConst, VT));
+ DAG.getTargetConstantFP(targetConst, VT));
} else if (VT == MVT::f64) {
uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
return DAG.getNode(ISD::BIT_CONVERT, VT,
- LowerConstant(DAG.getConstant(dbits, MVT::i64), DAG));
+ LowerConstant(DAG.getConstant(dbits, MVT::i64), DAG));
}
return SDOperand();
@@ -926,7 +925,7 @@
switch (ObjectVT) {
default: {
cerr << "LowerFORMAL_ARGUMENTS Unhandled argument type: "
- << MVT::getValueTypeString(ObjectVT)
+ << MVT::getValueTypeString(ObjectVT)
<< "\n";
abort();
}
@@ -1126,7 +1125,7 @@
RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
} else {
MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
- ArgOffset += StackSlotSize;
+ ArgOffset += StackSlotSize;
}
break;
case MVT::f32:
@@ -1135,7 +1134,7 @@
RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
} else {
MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
- ArgOffset += StackSlotSize;
+ ArgOffset += StackSlotSize;
}
break;
case MVT::v4f32:
@@ -1146,7 +1145,7 @@
RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
} else {
MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
- ArgOffset += StackSlotSize;
+ ArgOffset += StackSlotSize;
}
break;
}
@@ -1378,19 +1377,19 @@
int SExtValue = ((Value & 0xffff) << 16) >> 16;
if (Value == SExtValue)
- return DAG.getConstant(Value, ValueType);
+ return DAG.getConstant(Value, ValueType);
} else if (ValueType == MVT::i16) {
short Value = (short) CN->getValue();
int SExtValue = ((int) Value << 16) >> 16;
if (Value == (short) SExtValue)
- return DAG.getConstant(Value, ValueType);
+ return DAG.getConstant(Value, ValueType);
} else if (ValueType == MVT::i64) {
int64_t Value = CN->getValue();
int64_t SExtValue = ((Value & 0xffff) << (64 - 16)) >> (64 - 16);
if (Value == SExtValue)
- return DAG.getConstant(Value, ValueType);
+ return DAG.getConstant(Value, ValueType);
}
}
@@ -1405,7 +1404,7 @@
if (ConstantSDNode *CN = getVecImm(N)) {
int Value = (int) CN->getValue();
if ((ValueType == MVT::i32 && isS10Constant(Value))
- || (ValueType == MVT::i16 && isS10Constant((short) Value)))
+ || (ValueType == MVT::i16 && isS10Constant((short) Value)))
return DAG.getConstant(Value, ValueType);
}
@@ -1424,11 +1423,11 @@
if (ConstantSDNode *CN = getVecImm(N)) {
int Value = (int) CN->getValue();
if (ValueType == MVT::i16
- && Value <= 0xffff /* truncated from uint64_t */
- && ((short) Value >> 8) == ((short) Value & 0xff))
+ && Value <= 0xffff /* truncated from uint64_t */
+ && ((short) Value >> 8) == ((short) Value & 0xff))
return DAG.getConstant(Value & 0xff, ValueType);
else if (ValueType == MVT::i8
- && (Value & 0xff) == Value)
+ && (Value & 0xff) == Value)
return DAG.getConstant(Value, ValueType);
}
@@ -1443,8 +1442,8 @@
if (ConstantSDNode *CN = getVecImm(N)) {
uint64_t Value = CN->getValue();
if ((ValueType == MVT::i32
- && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
- || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
+ && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
+ || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
return DAG.getConstant(Value >> 16, ValueType);
}
@@ -1496,8 +1495,8 @@
} else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
const APFloat &apf = CN->getValueAPF();
EltBits = (CN->getValueType(0) == MVT::f32
- ? FloatToBits(apf.convertToFloat())
- : DoubleToBits(apf.convertToDouble()));
+ ? FloatToBits(apf.convertToFloat())
+ : DoubleToBits(apf.convertToDouble()));
} else {
// Nonconstant element.
return true;
@@ -1517,7 +1516,7 @@
/// SplatSize = 1 byte.
static bool isConstantSplat(const uint64_t Bits128[2],
const uint64_t Undef128[2],
- int MinSplatBits,
+ int MinSplatBits,
uint64_t &SplatBits, uint64_t &SplatUndef,
int &SplatSize) {
// Don't let undefs prevent splats from matching. See if the top 64-bits are
@@ -1535,34 +1534,34 @@
// Check that the top 32-bits are the same as the lower 32-bits, ignoring
// undefs.
if ((Bits64 & (~Undef64 >> 32)) == ((Bits64 >> 32) & ~Undef64)) {
- if (MinSplatBits < 32) {
+ if (MinSplatBits < 32) {
- // If the top 16-bits are different than the lower 16-bits, ignoring
- // undefs, we have an i32 splat.
- if ((Bits32 & (~Undef32 >> 16)) == ((Bits32 >> 16) & ~Undef32)) {
- if (MinSplatBits < 16) {
- // If the top 8-bits are different than the lower 8-bits, ignoring
- // undefs, we have an i16 splat.
- if ((Bits16 & (uint16_t(~Undef16) >> 8)) == ((Bits16 >> 8) & ~Undef16)) {
- // Otherwise, we have an 8-bit splat.
- SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
- SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
- SplatSize = 1;
- return true;
- }
- } else {
- SplatBits = Bits16;
- SplatUndef = Undef16;
- SplatSize = 2;
- return true;
- }
- }
- } else {
- SplatBits = Bits32;
- SplatUndef = Undef32;
- SplatSize = 4;
- return true;
- }
+ // If the top 16-bits are different than the lower 16-bits, ignoring
+ // undefs, we have an i32 splat.
+ if ((Bits32 & (~Undef32 >> 16)) == ((Bits32 >> 16) & ~Undef32)) {
+ if (MinSplatBits < 16) {
+ // If the top 8-bits are different than the lower 8-bits, ignoring
+ // undefs, we have an i16 splat.
+ if ((Bits16 & (uint16_t(~Undef16) >> 8)) == ((Bits16 >> 8) & ~Undef16)) {
+ // Otherwise, we have an 8-bit splat.
+ SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
+ SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
+ SplatSize = 1;
+ return true;
+ }
+ } else {
+ SplatBits = Bits16;
+ SplatUndef = Undef16;
+ SplatSize = 2;
+ return true;
+ }
+ }
+ } else {
+ SplatBits = Bits32;
+ SplatUndef = Undef32;
+ SplatSize = 4;
+ return true;
+ }
}
} else {
SplatBits = Bits128[0];
@@ -1592,7 +1591,7 @@
int SplatSize;
if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits)
|| !isConstantSplat(VectorBits, UndefBits,
- MVT::getSizeInBits(MVT::getVectorElementType(VT)),
+ MVT::getSizeInBits(MVT::getVectorElementType(VT)),
SplatBits, SplatUndef, SplatSize))
return SDOperand(); // Not a constant vector, not a splat.
@@ -1601,21 +1600,21 @@
case MVT::v4f32: {
uint32_t Value32 = SplatBits;
assert(SplatSize == 4
- && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
+ && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
// NOTE: pretend the constant is an integer. LLVM won't load FP constants
SDOperand T = DAG.getConstant(Value32, MVT::i32);
return DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, T, T, T, T));
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, T, T, T, T));
break;
}
case MVT::v2f64: {
uint64_t f64val = SplatBits;
assert(SplatSize == 8
- && "LowerBUILD_VECTOR: 64-bit float vector element: unexpected size.");
+ && "LowerBUILD_VECTOR: 64-bit float vector element: unexpected size.");
// NOTE: pretend the constant is an integer. LLVM won't load FP constants
SDOperand T = DAG.getConstant(f64val, MVT::i64);
return DAG.getNode(ISD::BIT_CONVERT, MVT::v2f64,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T));
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T));
break;
}
case MVT::v16i8: {
@@ -1665,69 +1664,69 @@
// Create lower vector if not a special pattern
if (!lower_special) {
- SDOperand LO32C = DAG.getConstant(lower, MVT::i32);
- LO32 = DAG.getNode(ISD::BIT_CONVERT, VT,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
- LO32C, LO32C, LO32C, LO32C));
+ SDOperand LO32C = DAG.getConstant(lower, MVT::i32);
+ LO32 = DAG.getNode(ISD::BIT_CONVERT, VT,
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
+ LO32C, LO32C, LO32C, LO32C));
}
// Create upper vector if not a special pattern
if (!upper_special) {
- SDOperand HI32C = DAG.getConstant(upper, MVT::i32);
- HI32 = DAG.getNode(ISD::BIT_CONVERT, VT,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
- HI32C, HI32C, HI32C, HI32C));
+ SDOperand HI32C = DAG.getConstant(upper, MVT::i32);
+ HI32 = DAG.getNode(ISD::BIT_CONVERT, VT,
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
+ HI32C, HI32C, HI32C, HI32C));
}
// If either upper or lower are special, then the two input operands are
// the same (basically, one of them is a "don't care")
if (lower_special)
- LO32 = HI32;
+ LO32 = HI32;
if (upper_special)
- HI32 = LO32;
+ HI32 = LO32;
if (lower_special && upper_special) {
- // Unhappy situation... both upper and lower are special, so punt with
- // a target constant:
+ // Unhappy situation... both upper and lower are special, so punt with
+ // a target constant:
SDOperand Zero = DAG.getConstant(0, MVT::i32);
- HI32 = LO32 = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Zero, Zero,
+ HI32 = LO32 = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Zero, Zero,
Zero, Zero);
}
for (int i = 0; i < 4; ++i) {
- for (int j = 0; j < 4; ++j) {
- SDOperand V;
- bool process_upper, process_lower;
- uint64_t val = 0;
+ for (int j = 0; j < 4; ++j) {
+ SDOperand V;
+ bool process_upper, process_lower;
+ uint64_t val = 0;
- process_upper = (upper_special && (i & 1) == 0);
- process_lower = (lower_special && (i & 1) == 1);
+ process_upper = (upper_special && (i & 1) == 0);
+ process_lower = (lower_special && (i & 1) == 1);
- if (process_upper || process_lower) {
- if ((process_upper && upper == 0)
- || (process_lower && lower == 0))
- val = 0x80;
- else if ((process_upper && upper == 0xffffffff)
- || (process_lower && lower == 0xffffffff))
- val = 0xc0;
- else if ((process_upper && upper == 0x80000000)
- || (process_lower && lower == 0x80000000))
- val = (j == 0 ? 0xe0 : 0x80);
- } else
- val = i * 4 + j + ((i & 1) * 16);
+ if (process_upper || process_lower) {
+ if ((process_upper && upper == 0)
+ || (process_lower && lower == 0))
+ val = 0x80;
+ else if ((process_upper && upper == 0xffffffff)
+ || (process_lower && lower == 0xffffffff))
+ val = 0xc0;
+ else if ((process_upper && upper == 0x80000000)
+ || (process_lower && lower == 0x80000000))
+ val = (j == 0 ? 0xe0 : 0x80);
+ } else
+ val = i * 4 + j + ((i & 1) * 16);
- ShufBytes.push_back(DAG.getConstant(val, MVT::i8));
- }
+ ShufBytes.push_back(DAG.getConstant(val, MVT::i8));
+ }
}
return DAG.getNode(SPUISD::SHUFB, VT, HI32, LO32,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
- &ShufBytes[0], ShufBytes.size()));
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
+ &ShufBytes[0], ShufBytes.size()));
} else {
// For zero, this can be lowered efficiently via v4i32 BUILD_VECTOR
SDOperand Zero = DAG.getConstant(0, MVT::i32);
return DAG.getNode(ISD::BIT_CONVERT, VT,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
- Zero, Zero, Zero, Zero));
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
+ Zero, Zero, Zero, Zero));
}
}
}
@@ -1804,8 +1803,8 @@
// Copy register's contents as index in INSERT_MASK:
SDOperand ShufMaskOp =
DAG.getNode(SPUISD::INSERT_MASK, V1.getValueType(),
- DAG.getTargetConstant(V2Elt, MVT::i32),
- DAG.getCopyFromReg(InitTempReg, VReg, PtrVT));
+ DAG.getTargetConstant(V2Elt, MVT::i32),
+ DAG.getCopyFromReg(InitTempReg, VReg, PtrVT));
// Use shuffle mask in SHUFB synthetic instruction:
return DAG.getNode(SPUISD::SHUFB, V1.getValueType(), V2, V1, ShufMaskOp);
} else {
@@ -1816,24 +1815,24 @@
for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
unsigned SrcElt;
if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
- SrcElt = 0;
+ SrcElt = 0;
else
- SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
+ SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
for (unsigned j = 0; j != BytesPerElement; ++j) {
- ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
- MVT::i8));
+ ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
+ MVT::i8));
}
}
SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
- &ResultMask[0], ResultMask.size());
+ &ResultMask[0], ResultMask.size());
return DAG.getNode(SPUISD::SHUFB, V1.getValueType(), V1, V2, VPermMask);
}
}
static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
- SDOperand Op0 = Op.getOperand(0); // Op0 = the scalar
+ SDOperand Op0 = Op.getOperand(0); // Op0 = the scalar
if (Op0.Val->getOpcode() == ISD::Constant) {
// For a constant, build the appropriate constant vector, which will
@@ -1847,7 +1846,7 @@
// Create a constant vector:
switch (Op.getValueType()) {
default: assert(0 && "Unexpected constant value type in "
- "LowerSCALAR_TO_VECTOR");
+ "LowerSCALAR_TO_VECTOR");
case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
@@ -1861,7 +1860,7 @@
ConstVecValues.push_back(CValue);
return DAG.getNode(ISD::BUILD_VECTOR, Op.getValueType(),
- &ConstVecValues[0], ConstVecValues.size());
+ &ConstVecValues[0], ConstVecValues.size());
} else {
// Otherwise, copy the value from one register to another:
switch (Op0.getValueType()) {
@@ -1912,24 +1911,24 @@
SDOperand FSMBOp =
DAG.getCopyToReg(Chain, FSMBIreg,
- DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
- DAG.getConstant(0xcccc, MVT::i32)));
+ DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
+ DAG.getConstant(0xcccc, MVT::i32)));
SDOperand HHProd =
DAG.getCopyToReg(FSMBOp, HiProdReg,
- DAG.getNode(SPUISD::MPYHH, MVT::v8i16, rA, rB));
+ DAG.getNode(SPUISD::MPYHH, MVT::v8i16, rA, rB));
SDOperand HHProd_v4i32 =
DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32,
- DAG.getCopyFromReg(HHProd, HiProdReg, MVT::v4i32));
+ DAG.getCopyFromReg(HHProd, HiProdReg, MVT::v4i32));
return DAG.getNode(SPUISD::SELB, MVT::v8i16,
- DAG.getNode(SPUISD::MPY, MVT::v8i16, rA, rB),
- DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(),
- DAG.getNode(SPUISD::VEC_SHL, MVT::v4i32,
- HHProd_v4i32,
- DAG.getConstant(16, MVT::i16))),
- DAG.getCopyFromReg(FSMBOp, FSMBIreg, MVT::v4i32));
+ DAG.getNode(SPUISD::MPY, MVT::v8i16, rA, rB),
+ DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(),
+ DAG.getNode(SPUISD::VEC_SHL, MVT::v4i32,
+ HHProd_v4i32,
+ DAG.getConstant(16, MVT::i16))),
+ DAG.getCopyFromReg(FSMBOp, FSMBIreg, MVT::v4i32));
}
// This M00sE is N@stI! (apologies to Monty Python)
@@ -1952,8 +1951,8 @@
SDOperand LLProd =
DAG.getNode(SPUISD::MPY, MVT::v8i16,
- DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rA),
- DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rB));
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rA),
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rB));
SDOperand rALH = DAG.getNode(SPUISD::VEC_SRA, MVT::v8i16, rA, c8);
@@ -1961,70 +1960,70 @@
SDOperand LHProd =
DAG.getNode(SPUISD::VEC_SHL, MVT::v8i16,
- DAG.getNode(SPUISD::MPY, MVT::v8i16, rALH, rBLH), c8);
+ DAG.getNode(SPUISD::MPY, MVT::v8i16, rALH, rBLH), c8);
SDOperand FSMBdef_2222 =
DAG.getCopyToReg(Chain, FSMBreg_2222,
- DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
- DAG.getConstant(0x2222, MVT::i32)));
+ DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
+ DAG.getConstant(0x2222, MVT::i32)));
SDOperand FSMBuse_2222 =
DAG.getCopyFromReg(FSMBdef_2222, FSMBreg_2222, MVT::v4i32);
SDOperand LoProd_1 =
DAG.getCopyToReg(Chain, LoProd_reg,
- DAG.getNode(SPUISD::SELB, MVT::v8i16, LLProd, LHProd,
- FSMBuse_2222));
+ DAG.getNode(SPUISD::SELB, MVT::v8i16, LLProd, LHProd,
+ FSMBuse_2222));
SDOperand LoProdMask = DAG.getConstant(0xffff, MVT::i32);
SDOperand LoProd =
DAG.getNode(ISD::AND, MVT::v4i32,
- DAG.getCopyFromReg(LoProd_1, LoProd_reg, MVT::v4i32),
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
- LoProdMask, LoProdMask,
- LoProdMask, LoProdMask));
+ DAG.getCopyFromReg(LoProd_1, LoProd_reg, MVT::v4i32),
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
+ LoProdMask, LoProdMask,
+ LoProdMask, LoProdMask));
SDOperand rAH =
DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32,
- DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, rA), c16);
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, rA), c16);
SDOperand rBH =
DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32,
- DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, rB), c16);
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, rB), c16);
SDOperand HLProd =
DAG.getNode(SPUISD::MPY, MVT::v8i16,
- DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rAH),
- DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rBH));
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rAH),
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rBH));
SDOperand HHProd_1 =
DAG.getNode(SPUISD::MPY, MVT::v8i16,
- DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16,
- DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, rAH, c8)),
- DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16,
- DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, rBH, c8)));
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16,
+ DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, rAH, c8)),
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16,
+ DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, rBH, c8)));
SDOperand HHProd =
DAG.getCopyToReg(Chain, HiProd_reg,
- DAG.getNode(SPUISD::SELB, MVT::v8i16,
- HLProd,
- DAG.getNode(SPUISD::VEC_SHL, MVT::v8i16, HHProd_1, c8),
- FSMBuse_2222));
+ DAG.getNode(SPUISD::SELB, MVT::v8i16,
+ HLProd,
+ DAG.getNode(SPUISD::VEC_SHL, MVT::v8i16, HHProd_1, c8),
+ FSMBuse_2222));
SDOperand HiProd =
DAG.getNode(SPUISD::VEC_SHL, MVT::v4i32,
- DAG.getCopyFromReg(HHProd, HiProd_reg, MVT::v4i32), c16);
+ DAG.getCopyFromReg(HHProd, HiProd_reg, MVT::v4i32), c16);
return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8,
- DAG.getNode(ISD::OR, MVT::v4i32,
- LoProd, HiProd));
+ DAG.getNode(ISD::OR, MVT::v4i32,
+ LoProd, HiProd));
}
default:
cerr << "CellSPU: Unknown vector multiplication, got "
<< MVT::getValueTypeString(Op.getValueType())
- << "\n";
+ << "\n";
abort();
/*NOTREACHED*/
}
@@ -2056,24 +2055,24 @@
// (Floating Interpolate (FP Reciprocal Estimate B))
SDOperand BRcpl =
DAG.getCopyToReg(DAG.getEntryNode(), VRegBR,
- DAG.getNode(SPUISD::FPInterp, VT, B,
- DAG.getNode(SPUISD::FPRecipEst, VT, B)));
+ DAG.getNode(SPUISD::FPInterp, VT, B,
+ DAG.getNode(SPUISD::FPRecipEst, VT, B)));
// Computes A * BRcpl and stores in a temporary register
SDOperand AxBRcpl =
DAG.getCopyToReg(BRcpl, VRegC,
- DAG.getNode(ISD::FMUL, VT, A,
- DAG.getCopyFromReg(BRcpl, VRegBR, VT)));
+ DAG.getNode(ISD::FMUL, VT, A,
+ DAG.getCopyFromReg(BRcpl, VRegBR, VT)));
// What's the Chain variable do? It's magic!
// TODO: set Chain = Op(0).getEntryNode()
return DAG.getNode(ISD::FADD, VT,
- DAG.getCopyFromReg(AxBRcpl, VRegC, VT),
- DAG.getNode(ISD::FMUL, VT,
- DAG.getCopyFromReg(AxBRcpl, VRegBR, VT),
- DAG.getNode(ISD::FSUB, VT, A,
- DAG.getNode(ISD::FMUL, VT, B,
- DAG.getCopyFromReg(AxBRcpl, VRegC, VT)))));
+ DAG.getCopyFromReg(AxBRcpl, VRegC, VT),
+ DAG.getNode(ISD::FMUL, VT,
+ DAG.getCopyFromReg(AxBRcpl, VRegBR, VT),
+ DAG.getNode(ISD::FSUB, VT, A,
+ DAG.getNode(ISD::FMUL, VT, B,
+ DAG.getCopyFromReg(AxBRcpl, VRegC, VT)))));
}
static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
@@ -2126,7 +2125,7 @@
}
assert(prefslot_begin != -1 && prefslot_end != -1 &&
- "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
+ "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
for (int i = 0; i < 16; ++i) {
// zero fill uppper part of preferred slot, don't care about the
@@ -2135,9 +2134,9 @@
if (i <= prefslot_end) {
mask_val =
- ((i < prefslot_begin)
- ? 0x80
- : elt_byte + (i - prefslot_begin));
+ ((i < prefslot_begin)
+ ? 0x80
+ : elt_byte + (i - prefslot_begin));
ShufMask[i] = DAG.getConstant(mask_val, MVT::i8);
} else
@@ -2146,13 +2145,13 @@
SDOperand ShufMaskVec =
DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
- &ShufMask[0],
- sizeof(ShufMask) / sizeof(ShufMask[0]));
+ &ShufMask[0],
+ sizeof(ShufMask) / sizeof(ShufMask[0]));
return DAG.getNode(SPUISD::EXTRACT_ELT0, VT,
- DAG.getNode(SPUISD::SHUFB, N.getValueType(),
- N, N, ShufMaskVec));
-
+ DAG.getNode(SPUISD::SHUFB, N.getValueType(),
+ N, N, ShufMaskVec));
+
}
static SDOperand LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
@@ -2176,7 +2175,7 @@
DAG.getNode(ISD::ADD, PtrVT,
PtrBase,
DAG.getConstant(CN->getValue(),
- PtrVT))));
+ PtrVT))));
return result;
}
@@ -2283,7 +2282,7 @@
ConstVec = Op.getOperand(1);
Arg = Op.getOperand(0);
if (ConstVec.Val->getOpcode() == ISD::BIT_CONVERT) {
- ConstVec = ConstVec.getOperand(0);
+ ConstVec = ConstVec.getOperand(0);
}
}
}
@@ -2295,19 +2294,19 @@
int SplatSize;
if (!GetConstantBuildVectorBits(ConstVec.Val, VectorBits, UndefBits)
- && isConstantSplat(VectorBits, UndefBits,
- MVT::getSizeInBits(MVT::getVectorElementType(VT)),
- SplatBits, SplatUndef, SplatSize)) {
+ && isConstantSplat(VectorBits, UndefBits,
+ MVT::getSizeInBits(MVT::getVectorElementType(VT)),
+ SplatBits, SplatUndef, SplatSize)) {
SDOperand tcVec[16];
SDOperand tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
const size_t tcVecSize = sizeof(tcVec) / sizeof(tcVec[0]);
// Turn the BUILD_VECTOR into a set of target constants:
for (size_t i = 0; i < tcVecSize; ++i)
- tcVec[i] = tc;
+ tcVec[i] = tc;
return DAG.getNode(Op.Val->getOpcode(), VT, Arg,
- DAG.getNode(ISD::BUILD_VECTOR, VT, tcVec, tcVecSize));
+ DAG.getNode(ISD::BUILD_VECTOR, VT, tcVec, tcVecSize));
}
}
@@ -2321,7 +2320,7 @@
default:
cerr << "CellSPU: Unknown LowerMUL value type, got "
<< MVT::getValueTypeString(Op.getValueType())
- << "\n";
+ << "\n";
abort();
/*NOTREACHED*/
@@ -2330,10 +2329,10 @@
SDOperand rB = Op.getOperand(1);
return DAG.getNode(ISD::ADD, MVT::i32,
- DAG.getNode(ISD::ADD, MVT::i32,
- DAG.getNode(SPUISD::MPYH, MVT::i32, rA, rB),
- DAG.getNode(SPUISD::MPYH, MVT::i32, rB, rA)),
- DAG.getNode(SPUISD::MPYU, MVT::i32, rA, rB));
+ DAG.getNode(ISD::ADD, MVT::i32,
+ DAG.getNode(SPUISD::MPYH, MVT::i32, rA, rB),
+ DAG.getNode(SPUISD::MPYH, MVT::i32, rB, rA)),
+ DAG.getNode(SPUISD::MPYU, MVT::i32, rA, rB));
}
}
@@ -2379,18 +2378,18 @@
// CNTB_reg, SUM1_reg become associated:
SDOperand CNTB_result =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, CNTB, Elt0);
-
+
SDOperand CNTB_rescopy =
DAG.getCopyToReg(CNTB_result, CNTB_reg, CNTB_result);
SDOperand Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i16);
return DAG.getNode(ISD::AND, MVT::i16,
- DAG.getNode(ISD::ADD, MVT::i16,
- DAG.getNode(ISD::SRL, MVT::i16,
- Tmp1, Shift1),
- Tmp1),
- Mask0);
+ DAG.getNode(ISD::ADD, MVT::i16,
+ DAG.getNode(ISD::SRL, MVT::i16,
+ Tmp1, Shift1),
+ Tmp1),
+ Mask0);
}
case MVT::i32: {
@@ -2413,28 +2412,28 @@
// CNTB_reg, SUM1_reg become associated:
SDOperand CNTB_result =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, CNTB, Elt0);
-
+
SDOperand CNTB_rescopy =
DAG.getCopyToReg(CNTB_result, CNTB_reg, CNTB_result);
SDOperand Comp1 =
DAG.getNode(ISD::SRL, MVT::i32,
- DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i32), Shift1);
+ DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i32), Shift1);
SDOperand Sum1 =
DAG.getNode(ISD::ADD, MVT::i32,
- Comp1, DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i32));
+ Comp1, DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i32));
SDOperand Sum1_rescopy =
DAG.getCopyToReg(CNTB_result, SUM1_reg, Sum1);
SDOperand Comp2 =
DAG.getNode(ISD::SRL, MVT::i32,
- DAG.getCopyFromReg(Sum1_rescopy, SUM1_reg, MVT::i32),
- Shift2);
+ DAG.getCopyFromReg(Sum1_rescopy, SUM1_reg, MVT::i32),
+ Shift2);
SDOperand Sum2 =
DAG.getNode(ISD::ADD, MVT::i32, Comp2,
- DAG.getCopyFromReg(Sum1_rescopy, SUM1_reg, MVT::i32));
+ DAG.getCopyFromReg(Sum1_rescopy, SUM1_reg, MVT::i32));
return DAG.getNode(ISD::AND, MVT::i32, Sum2, Mask0);
}
@@ -2558,7 +2557,7 @@
#endif
const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
SelectionDAG &DAG = DCI.DAG;
- SDOperand N0 = N->getOperand(0); // everything has at least one operand
+ SDOperand N0 = N->getOperand(0); // everything has at least one operand
switch (N->getOpcode()) {
default: break;
@@ -2683,11 +2682,11 @@
void
SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
- uint64_t Mask,
- uint64_t &KnownZero,
- uint64_t &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth ) const {
+ uint64_t Mask,
+ uint64_t &KnownZero,
+ uint64_t &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth ) const {
KnownZero = 0;
KnownOne = 0;
}