[X86] Make the instructions that use AdSize16/32/64 co-exist together without using mode predicates.
This is necessary to allow the disassembler to be able to handle AdSize32 instructions in 64-bit mode when address size prefix is used.
Eventually we should probably also support 'addr32' and 'addr16' in the assembler to override the address size on some of these instructions. But for now we'll just use special operand types that will lookup the current mode size to select the right instruction.
llvm-svn: 225075
diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
index 12e900e..29c698d 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
@@ -261,6 +261,23 @@
int64_t Displacement,
MCContext &Ctx, int64_t *Residue);
+ bool is64BitMode() const {
+ return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
+ }
+ bool is32BitMode() const {
+ return (STI.getFeatureBits() & X86::Mode32Bit) != 0;
+ }
+ bool is16BitMode() const {
+ return (STI.getFeatureBits() & X86::Mode16Bit) != 0;
+ }
+
+ unsigned getPointerWidth() {
+ if (is16BitMode()) return 16;
+ if (is32BitMode()) return 32;
+ if (is64BitMode()) return 64;
+ llvm_unreachable("invalid mode");
+ }
+
// True when previous instruction was actually REP prefix.
bool RepPrefix;
@@ -301,7 +318,7 @@
{
const MCExpr *Disp = MCConstantExpr::Create(0, Ctx);
std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
- 0, Disp, SrcReg, 0, AccessSize, SMLoc(), SMLoc()));
+ getPointerWidth(), 0, Disp, SrcReg, 0, AccessSize, SMLoc(), SMLoc()));
InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
Out);
}
@@ -310,7 +327,8 @@
{
const MCExpr *Disp = MCConstantExpr::Create(-1, Ctx);
std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
- 0, Disp, SrcReg, CntReg, AccessSize, SMLoc(), SMLoc()));
+ getPointerWidth(), 0, Disp, SrcReg, CntReg, AccessSize, SMLoc(),
+ SMLoc()));
InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
Out);
}
@@ -319,7 +337,7 @@
{
const MCExpr *Disp = MCConstantExpr::Create(0, Ctx);
std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
- 0, Disp, DstReg, 0, AccessSize, SMLoc(), SMLoc()));
+ getPointerWidth(), 0, Disp, DstReg, 0, AccessSize, SMLoc(), SMLoc()));
InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
}
@@ -327,7 +345,8 @@
{
const MCExpr *Disp = MCConstantExpr::Create(-1, Ctx);
std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
- 0, Disp, DstReg, CntReg, AccessSize, SMLoc(), SMLoc()));
+ getPointerWidth(), 0, Disp, DstReg, CntReg, AccessSize, SMLoc(),
+ SMLoc()));
InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
}
@@ -445,7 +464,8 @@
const MCConstantExpr *Disp =
MCConstantExpr::Create(ApplyDisplacementBounds(Residue), Ctx);
std::unique_ptr<X86Operand> DispOp =
- X86Operand::CreateMem(0, Disp, Reg, 0, 1, SMLoc(), SMLoc());
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, Reg, 0, 1, SMLoc(),
+ SMLoc());
EmitLEA(*DispOp, VT, Reg, Out);
Residue -= Disp->getValue();
}
@@ -459,9 +479,10 @@
if (Displacement == 0 ||
(Op.getMemDisp() && Op.getMemDisp()->getKind() != MCExpr::Constant)) {
*Residue = Displacement;
- return X86Operand::CreateMem(Op.getMemSegReg(), Op.getMemDisp(),
- Op.getMemBaseReg(), Op.getMemIndexReg(),
- Op.getMemScale(), SMLoc(), SMLoc());
+ return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(),
+ Op.getMemDisp(), Op.getMemBaseReg(),
+ Op.getMemIndexReg(), Op.getMemScale(),
+ SMLoc(), SMLoc());
}
int64_t OrigDisplacement =
@@ -474,9 +495,9 @@
*Residue = Displacement - NewDisplacement;
const MCExpr *Disp = MCConstantExpr::Create(NewDisplacement, Ctx);
- return X86Operand::CreateMem(Op.getMemSegReg(), Disp, Op.getMemBaseReg(),
- Op.getMemIndexReg(), Op.getMemScale(), SMLoc(),
- SMLoc());
+ return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(), Disp,
+ Op.getMemBaseReg(), Op.getMemIndexReg(),
+ Op.getMemScale(), SMLoc(), SMLoc());
}
class X86AddressSanitizer32 : public X86AddressSanitizer {
@@ -625,7 +646,8 @@
Inst.addOperand(MCOperand::CreateReg(ShadowRegI8));
const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, ShadowRegI32, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
+ SMLoc(), SMLoc()));
Op->addMemOperands(Inst, 5);
EmitInstruction(Out, Inst);
}
@@ -649,7 +671,8 @@
case 2: {
const MCExpr *Disp = MCConstantExpr::Create(1, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, ScratchRegI32, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
+ SMLoc(), SMLoc()));
EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
break;
}
@@ -704,7 +727,8 @@
}
const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, ShadowRegI32, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
+ SMLoc(), SMLoc()));
Op->addMemOperands(Inst, 5);
Inst.addOperand(MCOperand::CreateImm(0));
EmitInstruction(Out, Inst);
@@ -843,7 +867,8 @@
void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) {
const MCExpr *Disp = MCConstantExpr::Create(Offset, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, X86::RSP, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, X86::RSP, 0, 1,
+ SMLoc(), SMLoc()));
EmitLEA(*Op, MVT::i64, X86::RSP, Out);
OrigSPOffset += Offset;
}
@@ -896,7 +921,8 @@
Inst.addOperand(MCOperand::CreateReg(ShadowRegI8));
const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, ShadowRegI64, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
+ SMLoc(), SMLoc()));
Op->addMemOperands(Inst, 5);
EmitInstruction(Out, Inst);
}
@@ -920,7 +946,8 @@
case 2: {
const MCExpr *Disp = MCConstantExpr::Create(1, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, ScratchRegI32, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
+ SMLoc(), SMLoc()));
EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
break;
}
@@ -975,7 +1002,8 @@
}
const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
std::unique_ptr<X86Operand> Op(
- X86Operand::CreateMem(0, Disp, ShadowRegI64, 0, 1, SMLoc(), SMLoc()));
+ X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
+ SMLoc(), SMLoc()));
Op->addMemOperands(Inst, 5);
Inst.addOperand(MCOperand::CreateImm(0));
EmitInstruction(Out, Inst);
diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index b2754e6..74b9c5a 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -711,13 +711,6 @@
uint64_t &ErrorInfo,
bool MatchingInlineAsm);
- unsigned getPointerSize() {
- if (is16BitMode()) return 16;
- if (is32BitMode()) return 32;
- if (is64BitMode()) return 64;
- llvm_unreachable("invalid mode");
- }
-
bool OmitRegisterFromClobberLists(unsigned RegNo) override;
/// doSrcDstMatch - Returns true if operands are matching in their
@@ -977,16 +970,18 @@
unsigned basereg =
is64BitMode() ? X86::RSI : (is32BitMode() ? X86::ESI : X86::SI);
const MCExpr *Disp = MCConstantExpr::Create(0, getContext());
- return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/basereg,
- /*IndexReg=*/0, /*Scale=*/1, Loc, Loc, 0);
+ return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
+ /*BaseReg=*/basereg, /*IndexReg=*/0, /*Scale=*/1,
+ Loc, Loc, 0);
}
std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(SMLoc Loc) {
unsigned basereg =
is64BitMode() ? X86::RDI : (is32BitMode() ? X86::EDI : X86::DI);
const MCExpr *Disp = MCConstantExpr::Create(0, getContext());
- return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/basereg,
- /*IndexReg=*/0, /*Scale=*/1, Loc, Loc, 0);
+ return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
+ /*BaseReg=*/basereg, /*IndexReg=*/0, /*Scale=*/1,
+ Loc, Loc, 0);
}
std::unique_ptr<X86Operand> X86AsmParser::ParseOperand() {
@@ -1027,8 +1022,8 @@
// Create an absolute memory reference in order to match against
// instructions taking a PC relative operand.
- return X86Operand::CreateMem(Disp, Start, End, Size, Identifier,
- Info.OpDecl);
+ return X86Operand::CreateMem(getPointerWidth(), Disp, Start, End, Size,
+ Identifier, Info.OpDecl);
}
// We either have a direct symbol reference, or an offset from a symbol. The
@@ -1050,8 +1045,9 @@
// if we don't know the actual value at this time. This is necessary to
// get the matching correct in some cases.
BaseReg = BaseReg ? BaseReg : 1;
- return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, Start,
- End, Size, Identifier, Info.OpDecl);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
+ IndexReg, Scale, Start, End, Size, Identifier,
+ Info.OpDecl);
}
static void
@@ -1292,17 +1288,17 @@
// handle [-42]
if (!BaseReg && !IndexReg) {
if (!SegReg)
- return X86Operand::CreateMem(Disp, Start, End, Size);
- else
- return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, Start, End, Size);
+ return X86Operand::CreateMem(getPointerWidth(), Disp, Start, End, Size);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1,
+ Start, End, Size);
}
StringRef ErrMsg;
if (CheckBaseRegAndIndexReg(BaseReg, IndexReg, ErrMsg)) {
Error(StartInBrac, ErrMsg);
return nullptr;
}
- return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, Start,
- End, Size);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
+ IndexReg, Scale, Start, End, Size);
}
InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo();
@@ -1383,9 +1379,9 @@
// be followed by a bracketed expression. If it isn't we know we have our
// final segment override.
const MCExpr *Disp = MCConstantExpr::Create(ImmDisp, getContext());
- return X86Operand::CreateMem(SegReg, Disp, /*BaseReg=*/0, /*IndexReg=*/0,
- /*Scale=*/1, Start, ImmDispToken.getEndLoc(),
- Size);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp,
+ /*BaseReg=*/0, /*IndexReg=*/0, /*Scale=*/1,
+ Start, ImmDispToken.getEndLoc(), Size);
}
}
@@ -1398,7 +1394,7 @@
if (getParser().parsePrimaryExpr(Val, End))
return ErrorOperand(Tok.getLoc(), "unknown token in expression");
- return X86Operand::CreateMem(Val, Start, End, Size);
+ return X86Operand::CreateMem(getPointerWidth(), Val, Start, End, Size);
}
InlineAsmIdentifierInfo Info;
@@ -1428,7 +1424,7 @@
if (getParser().parsePrimaryExpr(Val, End))
return ErrorOperand(Tok.getLoc(), "unknown token in expression");
- return X86Operand::CreateMem(Val, Start, End, Size);
+ return X86Operand::CreateMem(getPointerWidth(), Val, Start, End, Size);
}
InlineAsmIdentifierInfo Info;
@@ -1466,9 +1462,9 @@
// BaseReg is non-zero to avoid assertions. In the context of inline asm,
// we're pointing to a local variable in memory, so the base register is
// really the frame or stack pointer.
- return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/1, /*IndexReg=*/0,
- /*Scale=*/1, Start, End, Size, Identifier,
- Info.OpDecl);
+ return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
+ /*BaseReg=*/1, /*IndexReg=*/0, /*Scale=*/1,
+ Start, End, Size, Identifier, Info.OpDecl);
}
/// Parse the '.' operator.
@@ -1643,7 +1639,8 @@
// to the MCExpr with the directional local symbol and this is a
// memory operand not an immediate operand.
if (SM.getSym())
- return X86Operand::CreateMem(SM.getSym(), Start, End, Size);
+ return X86Operand::CreateMem(getPointerWidth(), SM.getSym(), Start, End,
+ Size);
const MCExpr *ImmExpr = MCConstantExpr::Create(Imm, getContext());
return X86Operand::CreateImm(ImmExpr, Start, End);
@@ -1802,8 +1799,9 @@
if (getLexer().isNot(AsmToken::LParen)) {
// Unless we have a segment register, treat this as an immediate.
if (SegReg == 0)
- return X86Operand::CreateMem(Disp, MemStart, ExprEnd);
- return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd);
+ return X86Operand::CreateMem(getPointerWidth(), Disp, MemStart, ExprEnd);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1,
+ MemStart, ExprEnd);
}
// Eat the '('.
@@ -1829,8 +1827,10 @@
if (getLexer().isNot(AsmToken::LParen)) {
// Unless we have a segment register, treat this as an immediate.
if (SegReg == 0)
- return X86Operand::CreateMem(Disp, LParenLoc, ExprEnd);
- return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd);
+ return X86Operand::CreateMem(getPointerWidth(), Disp, LParenLoc,
+ ExprEnd);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1,
+ MemStart, ExprEnd);
}
// Eat the '('.
@@ -1946,9 +1946,9 @@
}
if (SegReg || BaseReg || IndexReg)
- return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale,
- MemStart, MemEnd);
- return X86Operand::CreateMem(Disp, MemStart, MemEnd);
+ return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
+ IndexReg, Scale, MemStart, MemEnd);
+ return X86Operand::CreateMem(getPointerWidth(), Disp, MemStart, MemEnd);
}
bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
@@ -2614,7 +2614,7 @@
static const char *const PtrSizedInstrs[] = {"call", "jmp", "push"};
for (const char *Instr : PtrSizedInstrs) {
if (Mnemonic == Instr) {
- UnsizedMemOp->Mem.Size = getPointerSize();
+ UnsizedMemOp->Mem.Size = getPointerWidth();
break;
}
}
diff --git a/llvm/lib/Target/X86/AsmParser/X86Operand.h b/llvm/lib/Target/X86/AsmParser/X86Operand.h
index e0fab8d..0461433 100644
--- a/llvm/lib/Target/X86/AsmParser/X86Operand.h
+++ b/llvm/lib/Target/X86/AsmParser/X86Operand.h
@@ -53,6 +53,7 @@
unsigned IndexReg;
unsigned Scale;
unsigned Size;
+ unsigned ModeSize;
};
union {
@@ -120,6 +121,10 @@
assert(Kind == Memory && "Invalid access!");
return Mem.Scale;
}
+ unsigned getMemModeSize() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.ModeSize;
+ }
bool isToken() const override {return Kind == Token; }
@@ -288,21 +293,40 @@
return isMem64() && isDstIdx();
}
- bool isMemOffs8() const {
- return Kind == Memory && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 8);
+ bool isMemOffs() const {
+ return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() &&
+ getMemScale() == 1;
}
- bool isMemOffs16() const {
- return Kind == Memory && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 16);
+
+ bool isMemOffs16_8() const {
+ return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8);
}
- bool isMemOffs32() const {
- return Kind == Memory && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 32);
+ bool isMemOffs16_16() const {
+ return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16);
}
- bool isMemOffs64() const {
- return Kind == Memory && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 64);
+ bool isMemOffs16_32() const {
+ return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32);
+ }
+ bool isMemOffs32_8() const {
+ return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8);
+ }
+ bool isMemOffs32_16() const {
+ return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16);
+ }
+ bool isMemOffs32_32() const {
+ return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32);
+ }
+ bool isMemOffs64_8() const {
+ return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8);
+ }
+ bool isMemOffs64_16() const {
+ return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16);
+ }
+ bool isMemOffs64_32() const {
+ return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32);
+ }
+ bool isMemOffs64_64() const {
+ return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64);
}
bool isReg() const override { return Kind == Register; }
@@ -430,8 +454,9 @@
/// Create an absolute memory operand.
static std::unique_ptr<X86Operand>
- CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, unsigned Size = 0,
- StringRef SymName = StringRef(), void *OpDecl = nullptr) {
+ CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
+ unsigned Size = 0, StringRef SymName = StringRef(),
+ void *OpDecl = nullptr) {
auto Res = llvm::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
Res->Mem.SegReg = 0;
Res->Mem.Disp = Disp;
@@ -439,6 +464,7 @@
Res->Mem.IndexReg = 0;
Res->Mem.Scale = 1;
Res->Mem.Size = Size;
+ Res->Mem.ModeSize = ModeSize;
Res->SymName = SymName;
Res->OpDecl = OpDecl;
Res->AddressOf = false;
@@ -447,9 +473,9 @@
/// Create a generalized memory operand.
static std::unique_ptr<X86Operand>
- CreateMem(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg,
- unsigned IndexReg, unsigned Scale, SMLoc StartLoc, SMLoc EndLoc,
- unsigned Size = 0, StringRef SymName = StringRef(),
+ CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
+ unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc,
+ SMLoc EndLoc, unsigned Size = 0, StringRef SymName = StringRef(),
void *OpDecl = nullptr) {
// We should never just have a displacement, that should be parsed as an
// absolute memory operand.
@@ -465,6 +491,7 @@
Res->Mem.IndexReg = IndexReg;
Res->Mem.Scale = Scale;
Res->Mem.Size = Size;
+ Res->Mem.ModeSize = ModeSize;
Res->SymName = SymName;
Res->OpDecl = OpDecl;
Res->AddressOf = false;
diff --git a/llvm/lib/Target/X86/X86InstrControl.td b/llvm/lib/Target/X86/X86InstrControl.td
index 28de158..80ff5e9 100644
--- a/llvm/lib/Target/X86/X86InstrControl.td
+++ b/llvm/lib/Target/X86/X86InstrControl.td
@@ -106,20 +106,14 @@
// jecxz.
let Uses = [CX] in
def JCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jcxz\t$dst", [], IIC_JCXZ>, AdSize16, Requires<[Not64BitMode]>;
+ "jcxz\t$dst", [], IIC_JCXZ>, AdSize16;
let Uses = [ECX] in
- def JECXZ_32 : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jecxz\t$dst", [], IIC_JCXZ>, AdSize32, Requires<[Not64BitMode]>;
+ def JECXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
+ "jecxz\t$dst", [], IIC_JCXZ>, AdSize32;
- // J*CXZ instruction: 64-bit versions of this instruction for the asmparser.
- // In 64-bit mode, the address size prefix is jecxz and the unprefixed version
- // is jrcxz.
- let Uses = [ECX] in
- def JECXZ_64 : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jecxz\t$dst", [], IIC_JCXZ>, AdSize32, Requires<[In64BitMode]>;
let Uses = [RCX] in
def JRCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jrcxz\t$dst", [], IIC_JCXZ>, AdSize64, Requires<[In64BitMode]>;
+ "jrcxz\t$dst", [], IIC_JCXZ>, AdSize64;
}
// Indirect branches
diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td
index 8dc4311..567c3ce 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/llvm/lib/Target/X86/X86InstrInfo.td
@@ -464,23 +464,53 @@
let RenderMethod = "addDstIdxOperands";
let SuperClasses = [X86Mem64AsmOperand];
}
-def X86MemOffs8AsmOperand : AsmOperandClass {
- let Name = "MemOffs8";
+def X86MemOffs16_8AsmOperand : AsmOperandClass {
+ let Name = "MemOffs16_8";
let RenderMethod = "addMemOffsOperands";
let SuperClasses = [X86Mem8AsmOperand];
}
-def X86MemOffs16AsmOperand : AsmOperandClass {
- let Name = "MemOffs16";
+def X86MemOffs16_16AsmOperand : AsmOperandClass {
+ let Name = "MemOffs16_16";
let RenderMethod = "addMemOffsOperands";
let SuperClasses = [X86Mem16AsmOperand];
}
-def X86MemOffs32AsmOperand : AsmOperandClass {
- let Name = "MemOffs32";
+def X86MemOffs16_32AsmOperand : AsmOperandClass {
+ let Name = "MemOffs16_32";
let RenderMethod = "addMemOffsOperands";
let SuperClasses = [X86Mem32AsmOperand];
}
-def X86MemOffs64AsmOperand : AsmOperandClass {
- let Name = "MemOffs64";
+def X86MemOffs32_8AsmOperand : AsmOperandClass {
+ let Name = "MemOffs32_8";
+ let RenderMethod = "addMemOffsOperands";
+ let SuperClasses = [X86Mem8AsmOperand];
+}
+def X86MemOffs32_16AsmOperand : AsmOperandClass {
+ let Name = "MemOffs32_16";
+ let RenderMethod = "addMemOffsOperands";
+ let SuperClasses = [X86Mem16AsmOperand];
+}
+def X86MemOffs32_32AsmOperand : AsmOperandClass {
+ let Name = "MemOffs32_32";
+ let RenderMethod = "addMemOffsOperands";
+ let SuperClasses = [X86Mem32AsmOperand];
+}
+def X86MemOffs64_8AsmOperand : AsmOperandClass {
+ let Name = "MemOffs64_8";
+ let RenderMethod = "addMemOffsOperands";
+ let SuperClasses = [X86Mem8AsmOperand];
+}
+def X86MemOffs64_16AsmOperand : AsmOperandClass {
+ let Name = "MemOffs64_16";
+ let RenderMethod = "addMemOffsOperands";
+ let SuperClasses = [X86Mem16AsmOperand];
+}
+def X86MemOffs64_32AsmOperand : AsmOperandClass {
+ let Name = "MemOffs64_32";
+ let RenderMethod = "addMemOffsOperands";
+ let SuperClasses = [X86Mem32AsmOperand];
+}
+def X86MemOffs64_64AsmOperand : AsmOperandClass {
+ let Name = "MemOffs64_64";
let RenderMethod = "addMemOffsOperands";
let SuperClasses = [X86Mem64AsmOperand];
}
@@ -517,20 +547,44 @@
let ParserMatchClass = X86DstIdx64Operand;
let MIOperandInfo = (ops ptr_rc);
let PrintMethod = "printDstIdx64"; }
-def offset8 : Operand<iPTR> {
- let ParserMatchClass = X86MemOffs8AsmOperand;
+def offset16_8 : Operand<iPTR> {
+ let ParserMatchClass = X86MemOffs16_8AsmOperand;
let MIOperandInfo = (ops i64imm, i8imm);
let PrintMethod = "printMemOffs8"; }
-def offset16 : Operand<iPTR> {
- let ParserMatchClass = X86MemOffs16AsmOperand;
+def offset16_16 : Operand<iPTR> {
+ let ParserMatchClass = X86MemOffs16_16AsmOperand;
let MIOperandInfo = (ops i64imm, i8imm);
let PrintMethod = "printMemOffs16"; }
-def offset32 : Operand<iPTR> {
- let ParserMatchClass = X86MemOffs32AsmOperand;
+def offset16_32 : Operand<iPTR> {
+ let ParserMatchClass = X86MemOffs16_32AsmOperand;
let MIOperandInfo = (ops i64imm, i8imm);
let PrintMethod = "printMemOffs32"; }
-def offset64 : Operand<iPTR> {
- let ParserMatchClass = X86MemOffs64AsmOperand;
+def offset32_8 : Operand<iPTR> {
+ let ParserMatchClass = X86MemOffs32_8AsmOperand;
+ let MIOperandInfo = (ops i64imm, i8imm);
+ let PrintMethod = "printMemOffs8"; }
+def offset32_16 : Operand<iPTR> {
+ let ParserMatchClass = X86MemOffs32_16AsmOperand;
+ let MIOperandInfo = (ops i64imm, i8imm);
+ let PrintMethod = "printMemOffs16"; }
+def offset32_32 : Operand<iPTR> {
+ let ParserMatchClass = X86MemOffs32_32AsmOperand;
+ let MIOperandInfo = (ops i64imm, i8imm);
+ let PrintMethod = "printMemOffs32"; }
+def offset64_8 : Operand<iPTR> {
+ let ParserMatchClass = X86MemOffs64_8AsmOperand;
+ let MIOperandInfo = (ops i64imm, i8imm);
+ let PrintMethod = "printMemOffs8"; }
+def offset64_16 : Operand<iPTR> {
+ let ParserMatchClass = X86MemOffs64_16AsmOperand;
+ let MIOperandInfo = (ops i64imm, i8imm);
+ let PrintMethod = "printMemOffs16"; }
+def offset64_32 : Operand<iPTR> {
+ let ParserMatchClass = X86MemOffs64_32AsmOperand;
+ let MIOperandInfo = (ops i64imm, i8imm);
+ let PrintMethod = "printMemOffs32"; }
+def offset64_64 : Operand<iPTR> {
+ let ParserMatchClass = X86MemOffs64_64AsmOperand;
let MIOperandInfo = (ops i64imm, i8imm);
let PrintMethod = "printMemOffs64"; }
}
@@ -1253,57 +1307,57 @@
let SchedRW = [WriteALU] in {
let mayLoad = 1 in {
let Defs = [AL] in
-def MOV8o8a : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset8:$src),
+def MOV8o8a : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset32_8:$src),
"mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>,
- AdSize32, Requires<[In32BitMode]>;
+ AdSize32;
let Defs = [AX] in
-def MOV16o16a : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset16:$src),
+def MOV16o16a : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_16:$src),
"mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>,
- OpSize16, AdSize32, Requires<[In32BitMode]>;
+ OpSize16, AdSize32;
let Defs = [EAX] in
-def MOV32o32a : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32:$src),
+def MOV32o32a : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_32:$src),
"mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>,
- OpSize32, AdSize32, Requires<[In32BitMode]>;
+ OpSize32, AdSize32;
let Defs = [AL] in
-def MOV8o8a_16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset8:$src),
+def MOV8o8a_16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset16_8:$src),
"mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>,
- AdSize16, Requires<[In16BitMode]>;
+ AdSize16;
let Defs = [AX] in
-def MOV16o16a_16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16:$src),
+def MOV16o16a_16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_16:$src),
"mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>,
- OpSize16, AdSize16, Requires<[In16BitMode]>;
+ OpSize16, AdSize16;
let Defs = [EAX] in
-def MOV32o32a_16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset32:$src),
+def MOV32o32a_16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
"mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>,
- AdSize16, OpSize32, Requires<[In16BitMode]>;
+ AdSize16, OpSize32;
}
let mayStore = 1 in {
let Uses = [AL] in
-def MOV8ao8 : Ii32<0xA2, RawFrmMemOffs, (outs offset8:$dst), (ins),
+def MOV8ao8 : Ii32<0xA2, RawFrmMemOffs, (outs offset32_8:$dst), (ins),
"mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>,
- AdSize32, Requires<[In32BitMode]>;
+ AdSize32;
let Uses = [AX] in
-def MOV16ao16 : Ii32<0xA3, RawFrmMemOffs, (outs offset16:$dst), (ins),
+def MOV16ao16 : Ii32<0xA3, RawFrmMemOffs, (outs offset32_16:$dst), (ins),
"mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
- OpSize16, AdSize32, Requires<[In32BitMode]>;
+ OpSize16, AdSize32;
let Uses = [EAX] in
-def MOV32ao32 : Ii32<0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins),
+def MOV32ao32 : Ii32<0xA3, RawFrmMemOffs, (outs offset32_32:$dst), (ins),
"mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
- OpSize32, AdSize32, Requires<[In32BitMode]>;
+ OpSize32, AdSize32;
let Uses = [AL] in
-def MOV8ao8_16 : Ii16<0xA2, RawFrmMemOffs, (outs offset8:$dst), (ins),
+def MOV8ao8_16 : Ii16<0xA2, RawFrmMemOffs, (outs offset16_8:$dst), (ins),
"mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>,
- AdSize16, Requires<[In16BitMode]>;
+ AdSize16;
let Uses = [AX] in
-def MOV16ao16_16 : Ii16<0xA3, RawFrmMemOffs, (outs offset16:$dst), (ins),
+def MOV16ao16_16 : Ii16<0xA3, RawFrmMemOffs, (outs offset16_16:$dst), (ins),
"mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
- OpSize16, AdSize16, Requires<[In16BitMode]>;
+ OpSize16, AdSize16;
let Uses = [EAX] in
-def MOV32ao32_16 : Ii16<0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins),
+def MOV32ao32_16 : Ii16<0xA3, RawFrmMemOffs, (outs offset16_32:$dst), (ins),
"mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
- OpSize32, AdSize16, Requires<[In16BitMode]>;
+ OpSize32, AdSize16;
}
}
@@ -1311,40 +1365,34 @@
// and use the movabs mnemonic to indicate this specific form.
let mayLoad = 1 in {
let Defs = [AL] in
-def MOV64o8a : RIi64_NOREX<0xA0, RawFrmMemOffs, (outs), (ins offset8:$src),
- "movabs{b}\t{$src, %al|al, $src}", []>,
- AdSize64, Requires<[In64BitMode]>;
+def MOV64o8a : RIi64_NOREX<0xA0, RawFrmMemOffs, (outs), (ins offset64_8:$src),
+ "movabs{b}\t{$src, %al|al, $src}", []>, AdSize64;
let Defs = [AX] in
-def MOV64o16a : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset16:$src),
- "movabs{w}\t{$src, %ax|ax, $src}", []>, OpSize16,
- AdSize64, Requires<[In64BitMode]>;
+def MOV64o16a : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset64_16:$src),
+ "movabs{w}\t{$src, %ax|ax, $src}", []>, OpSize16, AdSize64;
let Defs = [EAX] in
-def MOV64o32a : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset32:$src),
+def MOV64o32a : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset64_32:$src),
"movabs{l}\t{$src, %eax|eax, $src}", []>, OpSize32,
- AdSize64, Requires<[In64BitMode]>;
+ AdSize64;
let Defs = [RAX] in
-def MOV64o64a : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64:$src),
- "movabs{q}\t{$src, %rax|rax, $src}", []>,
- AdSize64, Requires<[In64BitMode]>;
+def MOV64o64a : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
+ "movabs{q}\t{$src, %rax|rax, $src}", []>, AdSize64;
}
let mayStore = 1 in {
let Uses = [AL] in
-def MOV64ao8 : RIi64_NOREX<0xA2, RawFrmMemOffs, (outs offset8:$dst), (ins),
- "movabs{b}\t{%al, $dst|$dst, al}", []>,
- AdSize64, Requires<[In64BitMode]>;
+def MOV64ao8 : RIi64_NOREX<0xA2, RawFrmMemOffs, (outs offset64_8:$dst), (ins),
+ "movabs{b}\t{%al, $dst|$dst, al}", []>, AdSize64;
let Uses = [AX] in
-def MOV64ao16 : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset16:$dst), (ins),
- "movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize16,
- AdSize64, Requires<[In64BitMode]>;
+def MOV64ao16 : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset64_16:$dst), (ins),
+ "movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize16, AdSize64;
let Uses = [EAX] in
-def MOV64ao32 : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins),
+def MOV64ao32 : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset64_32:$dst), (ins),
"movabs{l}\t{%eax, $dst|$dst, eax}", []>, OpSize32,
- AdSize64, Requires<[In64BitMode]>;
+ AdSize64;
let Uses = [RAX] in
-def MOV64ao64 : RIi64<0xA3, RawFrmMemOffs, (outs offset64:$dst), (ins),
- "movabs{q}\t{%rax, $dst|$dst, rax}", []>,
- AdSize64, Requires<[In64BitMode]>;
+def MOV64ao64 : RIi64<0xA3, RawFrmMemOffs, (outs offset64_64:$dst), (ins),
+ "movabs{q}\t{%rax, $dst|$dst, rax}", []>, AdSize64;
}
} // hasSideEffects = 0
diff --git a/llvm/test/MC/Disassembler/X86/moffs.txt b/llvm/test/MC/Disassembler/X86/moffs.txt
index d9b7f18..ac18859 100644
--- a/llvm/test/MC/Disassembler/X86/moffs.txt
+++ b/llvm/test/MC/Disassembler/X86/moffs.txt
@@ -4,83 +4,83 @@
# 16: movb 0x5a5a, %al # encoding: [0xa0,0x5a,0x5a]
# 32: movb 0x5a5a5a5a, %al # encoding: [0xa0,0x5a,0x5a,0x5a,0x5a]
-# 64: movabsb 0x5a5a5a5a5a5a5a5a, %al
+# 64: movabsb 0x5a5a5a5a5a5a5a5a, %al # encoding: [0xa0,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a]
0xa0 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movb 0x5a5a5a5a, %al # encoding: [0x67,0xa0,0x5a,0x5a,0x5a,0x5a]
# 32: movb 0x5a5a, %al # encoding: [0x67,0xa0,0x5a,0x5a]
-# 64: movabsb 0x5a5a5a5a, %al
+# 64: movb 0x5a5a5a5a, %al # encoding: [0x67,0xa0,0x5a,0x5a,0x5a,0x5a]
0x67 0xa0 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movw 0x5a5a, %ax # encoding: [0xa1,0x5a,0x5a]
# 32: movl 0x5a5a5a5a, %eax # encoding: [0xa1,0x5a,0x5a,0x5a,0x5a]
-# 64: movabsl 0x5a5a5a5a5a5a5a5a, %eax
+# 64: movabsl 0x5a5a5a5a5a5a5a5a, %eax # encoding: [0xa1,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a]
0xa1 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movw 0x5a5a5a5a, %ax # encoding: [0x67,0xa1,0x5a,0x5a,0x5a,0x5a]
# 32: movl 0x5a5a, %eax # encoding: [0x67,0xa1,0x5a,0x5a]
-# 64: movabsl 0x5a5a5a5a, %eax
+# 64: movl 0x5a5a5a5a, %eax # encoding: [0x67,0xa1,0x5a,0x5a,0x5a,0x5a]
0x67 0xa1 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movl 0x5a5a, %eax # encoding: [0x66,0xa1,0x5a,0x5a]
# 32: movw 0x5a5a5a5a, %ax # encoding: [0x66,0xa1,0x5a,0x5a,0x5a,0x5a]
-# 64: movabsw 0x5a5a5a5a5a5a5a5a, %ax
+# 64: movabsw 0x5a5a5a5a5a5a5a5a, %ax # encoding: [0x66,0xa1,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a]
0x66 0xa1 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movl 0x5a5a5a5a, %eax # encoding: [0x67,0x66,0xa1,0x5a,0x5a,0x5a,0x5a]
# 32: movw 0x5a5a, %ax # encoding: [0x67,0x66,0xa1,0x5a,0x5a]
-# 64: movabsw 0x5a5a5a5a, %ax
+# 64: movw 0x5a5a5a5a, %ax # encoding: [0x67,0x66,0xa1,0x5a,0x5a,0x5a,0x5a]
0x66 0x67 0xa1 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movl 0x5a5a5a5a, %eax # encoding: [0x67,0x66,0xa1,0x5a,0x5a,0x5a,0x5a]
# 32: movw 0x5a5a, %ax # encoding: [0x67,0x66,0xa1,0x5a,0x5a]
-# 64: movabsw 0x5a5a5a5a, %ax
+# 64: movw 0x5a5a5a5a, %ax # encoding: [0x67,0x66,0xa1,0x5a,0x5a,0x5a,0x5a]
0x67 0x66 0xa1 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movl %es:0x5a5a5a5a, %eax # encoding: [0x67,0x66,0x26,0xa1,0x5a,0x5a,0x5a,0x5a]
# 32: movw %es:0x5a5a, %ax # encoding: [0x67,0x66,0x26,0xa1,0x5a,0x5a]
-# 64: movabsw %es:0x5a5a5a5a, %ax
+# 64: movw %es:0x5a5a5a5a, %ax # encoding: [0x67,0x66,0x26,0xa1,0x5a,0x5a,0x5a,0x5a]
0x67 0x26 0x66 0xa1 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a # encoding: [0xa0,0x5a,0x5a]
# 16: movb %al, 0x5a5a # encoding: [0xa2,0x5a,0x5a]
# 32: movb %al, 0x5a5a5a5a # encoding: [0xa2,0x5a,0x5a,0x5a,0x5a]
-# 64: movabsb %al, 0x5a5a5a5a5a5a5a5a
+# 64: movabsb %al, 0x5a5a5a5a5a5a5a5a # encoding: [0xa2,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a]
0xa2 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a # encoding: [0xa0,0x5a,0x5a]
# 16: movb %al, 0x5a5a5a5a # encoding: [0x67,0xa2,0x5a,0x5a,0x5a,0x5a]
# 32: movb %al, 0x5a5a # encoding: [0x67,0xa2,0x5a,0x5a]
-# 64: movabsb %al, 0x5a5a5a5a
+# 64: movb %al, 0x5a5a5a5a # encoding: [0x67,0xa2,0x5a,0x5a,0x5a,0x5a]
0x67 0xa2 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movw %ax, 0x5a5a # encoding: [0xa3,0x5a,0x5a]
# 32: movl %eax, 0x5a5a5a5a # encoding: [0xa3,0x5a,0x5a,0x5a,0x5a]
-# 64: movabsl %eax, 0x5a5a5a5a5a5a5a5a
+# 64: movabsl %eax, 0x5a5a5a5a5a5a5a5a # encoding: [0xa3,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a]
0xa3 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movw %ax, %gs:0x5a5a5a5a # encoding: [0x67,0x65,0xa3,0x5a,0x5a,0x5a,0x5a]
# 32: movl %eax, %gs:0x5a5a # encoding: [0x67,0x65,0xa3,0x5a,0x5a]
-# 64: movabsl %eax, %gs:0x5a5a5a5a
+# 64: movl %eax, %gs:0x5a5a5a5a # encoding: [0x67,0x65,0xa3,0x5a,0x5a,0x5a,0x5a]
0x65 0x67 0xa3 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movl %eax, 0x5a5a # encoding: [0x66,0xa3,0x5a,0x5a]
# 32: movw %ax, 0x5a5a5a5a # encoding: [0x66,0xa3,0x5a,0x5a,0x5a,0x5a]
-# 64: movabsw %ax, 0x5a5a5a5a5a5a5a5a
+# 64: movabsw %ax, 0x5a5a5a5a5a5a5a5a # encoding: [0x66,0xa3,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a,0x5a]
0x66 0xa3 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movl %eax, 0x5a5a5a5a # encoding: [0x67,0x66,0xa3,0x5a,0x5a,0x5a,0x5a]
# 32: movw %ax, 0x5a5a # encoding: [0x67,0x66,0xa3,0x5a,0x5a]
-# 64: movabsw %ax, 0x5a5a5a5a
+# 64: movw %ax, 0x5a5a5a5a # encoding: [0x67,0x66,0xa3,0x5a,0x5a,0x5a,0x5a]
0x66 0x67 0xa3 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movl %eax, 0x5a5a5a5a # encoding: [0x67,0x66,0xa3,0x5a,0x5a,0x5a,0x5a]
# 32: movw %ax, 0x5a5a # encoding: [0x67,0x66,0xa3,0x5a,0x5a]
-# 64: movabsw %ax, 0x5a5a5a5a
+# 64: movw %ax, 0x5a5a5a5a # encoding: [0x67,0x66,0xa3,0x5a,0x5a,0x5a,0x5a]
0x67 0x66 0xa3 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
# 16: movl %eax, %es:0x5a5a5a5a # encoding: [0x67,0x66,0x26,0xa3,0x5a,0x5a,0x5a,0x5a]
# 32: movw %ax, %es:0x5a5a # encoding: [0x67,0x66,0x26,0xa3,0x5a,0x5a]
-# 64: movabsw %ax, %es:0x5a5a5a5a
+# 64: movw %ax, %es:0x5a5a5a5a # encoding: [0x67,0x66,0x26,0xa3,0x5a,0x5a,0x5a,0x5a]
0x67 0x26 0x66 0xa3 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
diff --git a/llvm/utils/TableGen/X86DisassemblerTables.cpp b/llvm/utils/TableGen/X86DisassemblerTables.cpp
index 5c3931f..138a34b 100644
--- a/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -75,13 +75,13 @@
/// @return - True if child is a subset of parent, false otherwise.
static inline bool inheritsFrom(InstructionContext child,
InstructionContext parent,
- bool VEX_LIG = false) {
+ bool VEX_LIG = false, bool AdSize64 = false) {
if (child == parent)
return true;
switch (parent) {
case IC:
- return(inheritsFrom(child, IC_64BIT) ||
+ return(inheritsFrom(child, IC_64BIT, AdSize64) ||
inheritsFrom(child, IC_OPSIZE) ||
inheritsFrom(child, IC_ADSIZE) ||
inheritsFrom(child, IC_XD) ||
@@ -89,7 +89,7 @@
case IC_64BIT:
return(inheritsFrom(child, IC_64BIT_REXW) ||
inheritsFrom(child, IC_64BIT_OPSIZE) ||
- inheritsFrom(child, IC_64BIT_ADSIZE) ||
+ (!AdSize64 && inheritsFrom(child, IC_64BIT_ADSIZE)) ||
inheritsFrom(child, IC_64BIT_XD) ||
inheritsFrom(child, IC_64BIT_XS));
case IC_OPSIZE:
@@ -117,7 +117,7 @@
inheritsFrom(child, IC_64BIT_REXW_OPSIZE));
case IC_64BIT_OPSIZE:
return inheritsFrom(child, IC_64BIT_REXW_OPSIZE) ||
- inheritsFrom(child, IC_64BIT_OPSIZE_ADSIZE);
+ (!AdSize64 && inheritsFrom(child, IC_64BIT_OPSIZE_ADSIZE));
case IC_64BIT_XD:
return(inheritsFrom(child, IC_64BIT_REXW_XD));
case IC_64BIT_XS:
@@ -865,15 +865,19 @@
const ModRMFilter &filter,
InstrUID uid,
bool is32bit,
- bool ignoresVEX_L) {
+ bool ignoresVEX_L,
+ unsigned addressSize) {
ContextDecision &decision = *Tables[type];
for (unsigned index = 0; index < IC_max; ++index) {
- if (is32bit && inheritsFrom((InstructionContext)index, IC_64BIT))
+ if ((is32bit || addressSize == 16) &&
+ inheritsFrom((InstructionContext)index, IC_64BIT))
continue;
+ bool adSize64 = addressSize == 64;
if (inheritsFrom((InstructionContext)index,
- InstructionSpecifiers[uid].insnContext, ignoresVEX_L))
+ InstructionSpecifiers[uid].insnContext, ignoresVEX_L,
+ adSize64))
setTableFields(decision.opcodeDecisions[index].modRMDecisions[opcode],
filter,
uid,
diff --git a/llvm/utils/TableGen/X86DisassemblerTables.h b/llvm/utils/TableGen/X86DisassemblerTables.h
index d86b926..5a8688b 100644
--- a/llvm/utils/TableGen/X86DisassemblerTables.h
+++ b/llvm/utils/TableGen/X86DisassemblerTables.h
@@ -245,13 +245,15 @@
/// @param uid - The unique ID of the instruction.
/// @param is32bit - Instructon is only 32-bit
/// @param ignoresVEX_L - Instruction ignores VEX.L
+ /// @param AddrSize - Instructions address size 16/32/64. 0 is unspecified
void setTableFields(OpcodeType type,
InstructionContext insnContext,
uint8_t opcode,
const ModRMFilter &filter,
InstrUID uid,
bool is32bit,
- bool ignoresVEX_L);
+ bool ignoresVEX_L,
+ unsigned AddrSize);
/// specForUID - Returns the instruction specifier for a given unique
/// instruction ID. Used when resolving collisions.
diff --git a/llvm/utils/TableGen/X86RecognizableInstr.cpp b/llvm/utils/TableGen/X86RecognizableInstr.cpp
index 288d3d6..91c64aa 100644
--- a/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -405,7 +405,7 @@
errs() << "Instruction does not use a prefix: " << Name << "\n";
llvm_unreachable("Invalid prefix");
}
- } else if (Is64Bit || HasREX_WPrefix) {
+ } else if (Is64Bit || HasREX_WPrefix || AdSize == X86Local::AdSize64) {
if (HasREX_WPrefix && (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD))
insnContext = IC_64BIT_REXW_OPSIZE;
else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XD)
@@ -858,6 +858,13 @@
break;
} // switch (OpMap)
+ unsigned AddressSize = 0;
+ switch (AdSize) {
+ case X86Local::AdSize16: AddressSize = 16; break;
+ case X86Local::AdSize32: AddressSize = 32; break;
+ case X86Local::AdSize64: AddressSize = 64; break;
+ }
+
assert(opcodeType != (OpcodeType)-1 &&
"Opcode type not set");
assert(filter && "Filter not set");
@@ -875,13 +882,13 @@
insnContext(),
currentOpcode,
*filter,
- UID, Is32Bit, IgnoresVEX_L);
+ UID, Is32Bit, IgnoresVEX_L, AddressSize);
} else {
tables.setTableFields(opcodeType,
insnContext(),
opcodeToSet,
*filter,
- UID, Is32Bit, IgnoresVEX_L);
+ UID, Is32Bit, IgnoresVEX_L, AddressSize);
}
delete filter;
@@ -971,10 +978,16 @@
TYPE("dstidx16", TYPE_DSTIDX16)
TYPE("dstidx32", TYPE_DSTIDX32)
TYPE("dstidx64", TYPE_DSTIDX64)
- TYPE("offset8", TYPE_MOFFS8)
- TYPE("offset16", TYPE_MOFFS16)
- TYPE("offset32", TYPE_MOFFS32)
- TYPE("offset64", TYPE_MOFFS64)
+ TYPE("offset16_8", TYPE_MOFFS8)
+ TYPE("offset16_16", TYPE_MOFFS16)
+ TYPE("offset16_32", TYPE_MOFFS32)
+ TYPE("offset32_8", TYPE_MOFFS8)
+ TYPE("offset32_16", TYPE_MOFFS16)
+ TYPE("offset32_32", TYPE_MOFFS32)
+ TYPE("offset64_8", TYPE_MOFFS8)
+ TYPE("offset64_16", TYPE_MOFFS16)
+ TYPE("offset64_32", TYPE_MOFFS32)
+ TYPE("offset64_64", TYPE_MOFFS64)
TYPE("VR256", TYPE_XMM256)
TYPE("VR256X", TYPE_XMM256)
TYPE("VR512", TYPE_XMM512)
@@ -1200,10 +1213,16 @@
ENCODING("brtarget", ENCODING_Iv)
ENCODING("brtarget8", ENCODING_IB)
ENCODING("i64imm", ENCODING_IO)
- ENCODING("offset8", ENCODING_Ia)
- ENCODING("offset16", ENCODING_Ia)
- ENCODING("offset32", ENCODING_Ia)
- ENCODING("offset64", ENCODING_Ia)
+ ENCODING("offset16_8", ENCODING_Ia)
+ ENCODING("offset16_16", ENCODING_Ia)
+ ENCODING("offset16_32", ENCODING_Ia)
+ ENCODING("offset32_8", ENCODING_Ia)
+ ENCODING("offset32_16", ENCODING_Ia)
+ ENCODING("offset32_32", ENCODING_Ia)
+ ENCODING("offset64_8", ENCODING_Ia)
+ ENCODING("offset64_16", ENCODING_Ia)
+ ENCODING("offset64_32", ENCODING_Ia)
+ ENCODING("offset64_64", ENCODING_Ia)
ENCODING("srcidx8", ENCODING_SI)
ENCODING("srcidx16", ENCODING_SI)
ENCODING("srcidx32", ENCODING_SI)