[AArch64] Add Tiny Code Model for AArch64
This adds the plumbing for the Tiny code model for the AArch64 backend. This,
instead of loading addresses through the normal ADRP;ADD pair used in the Small
model, uses a single ADR. The 21 bit range of an ADR means that the code and
its statically defined symbols need to be within 1MB of each other.
This makes it mostly interesting for embedded applications where we want to fit
as much as we can in as small a space as possible.
Differential Revision: https://reviews.llvm.org/D49673
llvm-svn: 340397
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index 9226a9d..f7190d5 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -835,36 +835,55 @@
}
case AArch64::LOADgot: {
- // Expand into ADRP + LDR.
+ MachineFunction *MF = MBB.getParent();
unsigned DstReg = MI.getOperand(0).getReg();
const MachineOperand &MO1 = MI.getOperand(1);
unsigned Flags = MO1.getTargetFlags();
- MachineInstrBuilder MIB1 =
- BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
- MachineInstrBuilder MIB2 =
- BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui))
- .add(MI.getOperand(0))
- .addReg(DstReg);
- if (MO1.isGlobal()) {
- MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
- MIB2.addGlobalAddress(MO1.getGlobal(), 0,
- Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
- } else if (MO1.isSymbol()) {
- MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE);
- MIB2.addExternalSymbol(MO1.getSymbolName(),
- Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+ if (MF->getTarget().getCodeModel() == CodeModel::Tiny) {
+ // Tiny codemodel expand to LDR
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(AArch64::LDRXl), DstReg);
+
+ if (MO1.isGlobal()) {
+ MIB.addGlobalAddress(MO1.getGlobal(), 0, Flags);
+ } else if (MO1.isSymbol()) {
+ MIB.addExternalSymbol(MO1.getSymbolName(), Flags);
+ } else {
+ assert(MO1.isCPI() &&
+ "Only expect globals, externalsymbols, or constant pools");
+ MIB.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(), Flags);
+ }
} else {
- assert(MO1.isCPI() &&
- "Only expect globals, externalsymbols, or constant pools");
- MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
- Flags | AArch64II::MO_PAGE);
- MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
- Flags | AArch64II::MO_PAGEOFF |
- AArch64II::MO_NC);
- }
+ // Small codemodel expand into ADRP + LDR.
+ MachineInstrBuilder MIB1 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
+ MachineInstrBuilder MIB2 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui))
+ .add(MI.getOperand(0))
+ .addReg(DstReg);
- transferImpOps(MI, MIB1, MIB2);
+ if (MO1.isGlobal()) {
+ MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
+ MIB2.addGlobalAddress(MO1.getGlobal(), 0,
+ Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+ } else if (MO1.isSymbol()) {
+ MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE);
+ MIB2.addExternalSymbol(MO1.getSymbolName(), Flags |
+ AArch64II::MO_PAGEOFF |
+ AArch64II::MO_NC);
+ } else {
+ assert(MO1.isCPI() &&
+ "Only expect globals, externalsymbols, or constant pools");
+ MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
+ Flags | AArch64II::MO_PAGE);
+ MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
+ Flags | AArch64II::MO_PAGEOFF |
+ AArch64II::MO_NC);
+ }
+
+ transferImpOps(MI, MIB1, MIB2);
+ }
MI.eraseFromParent();
return true;
}
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index deb70e8..54914c0 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -686,6 +686,7 @@
.setMIFlags(MachineInstr::FrameSetup);
switch (MF.getTarget().getCodeModel()) {
+ case CodeModel::Tiny:
case CodeModel::Small:
case CodeModel::Medium:
case CodeModel::Kernel:
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b665deb..f998d0f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1086,6 +1086,7 @@
case AArch64ISD::FIRST_NUMBER: break;
case AArch64ISD::CALL: return "AArch64ISD::CALL";
case AArch64ISD::ADRP: return "AArch64ISD::ADRP";
+ case AArch64ISD::ADR: return "AArch64ISD::ADR";
case AArch64ISD::ADDlow: return "AArch64ISD::ADDlow";
case AArch64ISD::LOADgot: return "AArch64ISD::LOADgot";
case AArch64ISD::RET_FLAG: return "AArch64ISD::RET_FLAG";
@@ -3912,6 +3913,17 @@
return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo);
}
+// (adr sym)
+template <class NodeTy>
+SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG,
+ unsigned Flags) const {
+ LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrTiny\n");
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+ SDValue Sym = getTargetNode(N, Ty, DAG, Flags);
+ return DAG.getNode(AArch64ISD::ADR, DL, Ty, Sym);
+}
+
SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
@@ -3926,7 +3938,8 @@
assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&
"unexpected offset in global node");
- // This also catches the large code model case for Darwin.
+ // This also catches the large code model case for Darwin, and tiny code
+ // model with got relocations.
if ((OpFlags & AArch64II::MO_GOT) != 0) {
return getGOT(GN, DAG, TargetFlags);
}
@@ -3934,6 +3947,8 @@
SDValue Result;
if (getTargetMachine().getCodeModel() == CodeModel::Large) {
Result = getAddrLarge(GN, DAG, TargetFlags);
+ } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
+ Result = getAddrTiny(GN, DAG, TargetFlags);
} else {
Result = getAddr(GN, DAG, TargetFlags);
}
@@ -4055,13 +4070,15 @@
AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const {
assert(Subtarget->isTargetELF() && "This function expects an ELF target");
- assert(Subtarget->useSmallAddressing() &&
- "ELF TLS only supported in small memory model");
+ if (getTargetMachine().getCodeModel() == CodeModel::Large)
+ report_fatal_error("ELF TLS only supported in small memory model");
// Different choices can be made for the maximum size of the TLS area for a
// module. For the small address model, the default TLS size is 16MiB and the
// maximum TLS size is 4GiB.
// FIXME: add -mtls-size command line option and make it control the 16MiB
// vs. 4GiB code sequence generation.
+ // FIXME: add tiny codemodel support. We currently generate the same code as
+ // small, which may be larger than needed.
const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
@@ -4779,6 +4796,8 @@
if (getTargetMachine().getCodeModel() == CodeModel::Large &&
!Subtarget->isTargetMachO()) {
return getAddrLarge(JT, DAG);
+ } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
+ return getAddrTiny(JT, DAG);
}
return getAddr(JT, DAG);
}
@@ -4793,6 +4812,8 @@
return getGOT(CP, DAG);
}
return getAddrLarge(CP, DAG);
+ } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
+ return getAddrTiny(CP, DAG);
} else {
return getAddr(CP, DAG);
}
@@ -4804,9 +4825,10 @@
if (getTargetMachine().getCodeModel() == CodeModel::Large &&
!Subtarget->isTargetMachO()) {
return getAddrLarge(BA, DAG);
- } else {
- return getAddr(BA, DAG);
+ } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
+ return getAddrTiny(BA, DAG);
}
+ return getAddr(BA, DAG);
}
SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index a6d66ae..4470658 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -35,6 +35,7 @@
// offset of a variable into X0, using the TLSDesc model.
TLSDESC_CALLSEQ,
ADRP, // Page address of a TargetGlobalAddress operand.
+ ADR, // ADR
ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
LOADgot, // Load from automatically generated descriptor (e.g. Global
// Offset Table, TLS record).
@@ -587,6 +588,8 @@
SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
template <class NodeTy>
SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
+ template <class NodeTy>
+ SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 032d53d..e17f7c9 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -1632,6 +1632,9 @@
.addReg(Reg, RegState::Kill)
.addImm(0)
.addMemOperand(*MI.memoperands_begin());
+ } else if (TM.getCodeModel() == CodeModel::Tiny) {
+ BuildMI(MBB, MI, DL, get(AArch64::ADR), Reg)
+ .addGlobalAddress(GV, 0, OpFlags);
} else {
BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
.addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index d89ff41..f1ec76b 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -176,6 +176,7 @@
// Node definitions.
def AArch64adrp : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
+def AArch64adr : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
def AArch64addlow : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
def AArch64LOADgot : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
@@ -1385,7 +1386,8 @@
//===----------------------------------------------------------------------===//
let isReMaterializable = 1 in {
let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
-def ADR : ADRI<0, "adr", adrlabel, []>;
+def ADR : ADRI<0, "adr", adrlabel,
+ [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
} // hasSideEffects = 0
def ADRP : ADRI<1, "adrp", adrplabel,
@@ -1393,6 +1395,10 @@
} // isReMaterializable = 1
// page address of a constant pool entry, block address
+def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
+def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
+def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
+def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index b2b5003..46b8b4d 100644
--- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -983,6 +983,9 @@
materializeLargeCMVal(I, GV, OpFlags);
I.eraseFromParent();
return true;
+ } else if (TM.getCodeModel() == CodeModel::Tiny) {
+ I.setDesc(TII.get(AArch64::ADR));
+ I.getOperand(1).setTargetFlags(OpFlags);
} else {
I.setDesc(TII.get(AArch64::MOVaddr));
I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
index 04bb90d..be655e3 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -204,7 +204,9 @@
// The small code model's direct accesses use ADRP, which cannot
// necessarily produce the value 0 (if the code is above 4GB).
- if (useSmallAddressing() && GV->hasExternalWeakLinkage())
+ // Same for the tiny code model, where we have a pc relative LDR.
+ if ((useSmallAddressing() || TM.getCodeModel() == CodeModel::Tiny) &&
+ GV->hasExternalWeakLinkage())
return AArch64II::MO_GOT | Flags;
return Flags;
diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index 120d713..c4b9b45 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -210,14 +210,16 @@
Optional<CodeModel::Model> CM,
bool JIT) {
if (CM) {
- if (*CM != CodeModel::Small && *CM != CodeModel::Large) {
+ if (*CM != CodeModel::Small && *CM != CodeModel::Tiny &&
+ *CM != CodeModel::Large) {
if (!TT.isOSFuchsia())
report_fatal_error(
- "Only small and large code models are allowed on AArch64");
- else if (CM != CodeModel::Kernel)
- report_fatal_error(
- "Only small, kernel, and large code models are allowed on AArch64");
- }
+ "Only small, tiny and large code models are allowed on AArch64");
+ else if (*CM != CodeModel::Kernel)
+ report_fatal_error("Only small, tiny, kernel, and large code models "
+ "are allowed on AArch64");
+ } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF())
+ report_fatal_error("tiny code model is only supported on ELF");
return *CM;
}
// The default MCJIT memory managers make no guarantees about where they can
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 30a9a08..e18498c 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -2453,17 +2453,34 @@
SMLoc S = getLoc();
const MCExpr *Expr;
- const AsmToken &Tok = getParser().getTok();
- if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
- if (getParser().parseExpression(Expr))
+ // Leave anything with a bracket to the default for SVE
+ if (getParser().getTok().is(AsmToken::LBrac))
+ return MatchOperand_NoMatch;
+
+ if (getParser().getTok().is(AsmToken::Hash))
+ getParser().Lex(); // Eat hash token.
+
+ if (parseSymbolicImmVal(Expr))
+ return MatchOperand_ParseFail;
+
+ AArch64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
+ if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
+ ELFRefKind == AArch64MCExpr::VK_INVALID) {
+ // No modifier was specified at all; this is the syntax for an ELF basic
+ // ADR relocation (unfortunately).
+ Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
+ } else {
+ Error(S, "unexpected adr label");
return MatchOperand_ParseFail;
-
- SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
- Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
-
- return MatchOperand_Success;
+ }
}
- return MatchOperand_NoMatch;
+
+ SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
+ return MatchOperand_Success;
}
/// tryParseFPImm - A floating point immediate expression operand.
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index 8569465..37b92d9 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -10,6 +10,7 @@
#include "AArch64.h"
#include "AArch64RegisterInfo.h"
#include "MCTargetDesc/AArch64FixupKinds.h"
+#include "MCTargetDesc/AArch64MCExpr.h"
#include "llvm/ADT/Triple.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/MC/MCAsmBackend.h"
@@ -376,6 +377,14 @@
// to the linker -- a relocation!
if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21)
return true;
+
+ AArch64MCExpr::VariantKind RefKind =
+ static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
+ AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
+ // LDR GOT relocations need a relocation
+ if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_ldr_pcrel_imm19 &&
+ SymLoc == AArch64MCExpr::VK_GOT)
+ return true;
return false;
}
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
index 07a35de..2ccd7ce 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
@@ -138,7 +138,9 @@
} else
return ELF::R_AARCH64_PREL64;
case AArch64::fixup_aarch64_pcrel_adr_imm21:
- assert(SymLoc == AArch64MCExpr::VK_NONE && "unexpected ADR relocation");
+ if (SymLoc != AArch64MCExpr::VK_ABS)
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid symbol kind for ADR relocation");
return R_CLS(ADR_PREL_LO21);
case AArch64::fixup_aarch64_pcrel_adrp_imm21:
if (SymLoc == AArch64MCExpr::VK_ABS && !IsNC)
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
index cd93793..2d9ccd5 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
@@ -62,8 +62,10 @@
case VK_TLSDESC_LO12: return ":tlsdesc_lo12:";
case VK_ABS_PAGE: return "";
case VK_ABS_PAGE_NC: return ":pg_hi21_nc:";
+ case VK_GOT: return ":got:";
case VK_GOT_PAGE: return ":got:";
case VK_GOT_LO12: return ":got_lo12:";
+ case VK_GOTTPREL: return ":gottprel:";
case VK_GOTTPREL_PAGE: return ":gottprel:";
case VK_GOTTPREL_LO12_NC: return ":gottprel_lo12:";
case VK_GOTTPREL_G1: return ":gottprel_g1:";
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 40de21e..2f57019 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -526,6 +526,8 @@
.setMIFlags(MachineInstr::FrameSetup);
switch (TM.getCodeModel()) {
+ case CodeModel::Tiny:
+ llvm_unreachable("Tiny code model not available on ARM.");
case CodeModel::Small:
case CodeModel::Medium:
case CodeModel::Kernel:
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index d0b3fb9..a0b612f 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -9207,6 +9207,8 @@
// IP.
switch (TM.getCodeModel()) {
+ case CodeModel::Tiny:
+ llvm_unreachable("Tiny code model not available on ARM.");
case CodeModel::Small:
case CodeModel::Medium:
case CodeModel::Kernel:
diff --git a/llvm/lib/Target/X86/X86Subtarget.cpp b/llvm/lib/Target/X86/X86Subtarget.cpp
index 7e84323..59128a1 100644
--- a/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -77,6 +77,8 @@
if (isTargetELF()) {
switch (TM.getCodeModel()) {
// 64-bit small code model is simple: All rip-relative.
+ case CodeModel::Tiny:
+ llvm_unreachable("Tiny codesize model not supported on X86");
case CodeModel::Small:
case CodeModel::Kernel:
return X86II::MO_NO_FLAG;