Fix support for encodings up to 64-bits in length. TableGen was silently truncating them to 32-bits prior to this.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@152148 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/utils/TableGen/CodeEmitterGen.cpp b/utils/TableGen/CodeEmitterGen.cpp
index f6701af..91d8446 100644
--- a/utils/TableGen/CodeEmitterGen.cpp
+++ b/utils/TableGen/CodeEmitterGen.cpp
@@ -163,7 +163,7 @@
--bit;
}
- unsigned opMask = ~0U >> (32-N);
+ uint64_t opMask = ~0U >> (64-N);
int opShift = beginVarBit - N + 1;
opMask <<= opShift;
opShift = beginInstBit - beginVarBit;
@@ -228,7 +228,7 @@
o << "CodeEmitter::getBinaryCodeForInstr(const MachineInstr &MI) const {\n";
// Emit instruction base values
- o << " static const unsigned InstBits[] = {\n";
+ o << " static const uint64_t InstBits[] = {\n";
for (std::vector<const CodeGenInstruction*>::const_iterator
IN = NumberedInstructions.begin(),
EN = NumberedInstructions.end();
@@ -245,10 +245,10 @@
BitsInit *BI = R->getValueAsBitsInit("Inst");
// Start by filling in fixed values.
- unsigned Value = 0;
+ uint64_t Value = 0;
for (unsigned i = 0, e = BI->getNumBits(); i != e; ++i) {
if (BitInit *B = dynamic_cast<BitInit*>(BI->getBit(e-i-1)))
- Value |= B->getValue() << (e-i-1);
+ Value |= (uint64_t)B->getValue() << (e-i-1);
}
o << " UINT64_C(" << Value << ")," << '\t' << "// " << R->getName() << "\n";
}
@@ -273,8 +273,8 @@
// Emit initial function code
o << " const unsigned opcode = MI.getOpcode();\n"
- << " unsigned Value = InstBits[opcode];\n"
- << " unsigned op = 0;\n"
+ << " uint64_t Value = InstBits[opcode];\n"
+ << " uint64_t op = 0;\n"
<< " (void)op; // suppress warning\n"
<< " switch (opcode) {\n";