Simplify references to command line flags.
This CL removes all indirect pointer chasing to get the values of
command line flags. Since we are only using 1 copy of ClFlags, this CL
introduces a static field Flags to hold the defined command line flags
(it was previously a static field of GlobalContext).
For those few contexts where one must change CL flags due to context
(such as testsing and running in the browser), use ClFlags::Flags.
In the remainder of the cases, the code uses getFlags() which returns
a constant reference to ClFlags::Flags, allowing access to the get
accessors.
BUG=None
R=stichnot@chromium.org
Review URL: https://codereview.chromium.org/1848303003 .
diff --git a/src/IceAssemblerARM32.cpp b/src/IceAssemblerARM32.cpp
index 746a39f..bba8e16 100644
--- a/src/IceAssemblerARM32.cpp
+++ b/src/IceAssemblerARM32.cpp
@@ -688,8 +688,7 @@
}
void AssemblerARM32::bindCfgNodeLabel(const CfgNode *Node) {
- GlobalContext *Ctx = Node->getCfg()->getContext();
- if (BuildDefs::dump() && !Ctx->getFlags().getDisableHybridAssembly()) {
+ if (BuildDefs::dump() && !getFlags().getDisableHybridAssembly()) {
// Generate label name so that branches can find it.
constexpr SizeT InstSize = 0;
emitTextInst(Node->getAsmName() + ":", InstSize);
diff --git a/src/IceAssemblerARM32.h b/src/IceAssemblerARM32.h
index 0bfcac6..e3f9b4c 100644
--- a/src/IceAssemblerARM32.h
+++ b/src/IceAssemblerARM32.h
@@ -147,10 +147,8 @@
return getOrCreateLabel(Number, LocalLabels);
}
- void bindLocalLabel(const Cfg *Func, const InstARM32Label *InstL,
- SizeT Number) {
- if (BuildDefs::dump() &&
- !Func->getContext()->getFlags().getDisableHybridAssembly()) {
+ void bindLocalLabel(const InstARM32Label *InstL, SizeT Number) {
+ if (BuildDefs::dump() && !getFlags().getDisableHybridAssembly()) {
constexpr SizeT InstSize = 0;
emitTextInst(InstL->getLabelName() + ":", InstSize);
}
diff --git a/src/IceBrowserCompileServer.cpp b/src/IceBrowserCompileServer.cpp
index 6baf62d..3b9dc3f 100644
--- a/src/IceBrowserCompileServer.cpp
+++ b/src/IceBrowserCompileServer.cpp
@@ -165,14 +165,13 @@
void BrowserCompileServer::getParsedFlags(uint32_t NumThreads, int argc,
char **argv) {
ClFlags::parseFlags(argc, argv);
- ClFlags::getParsedClFlags(*Flags);
+ ClFlags::getParsedClFlags(ClFlags::Flags);
// Set some defaults which aren't specified via the argv string.
- Flags->setNumTranslationThreads(NumThreads);
- Flags->setUseSandboxing(true);
- Flags->setOutFileType(FT_Elf);
- Flags->setTargetArch(getTargetArch());
- Flags->setBuildOnRead(true);
- Flags->setInputFileFormat(llvm::PNaClFormat);
+ ClFlags::Flags.setNumTranslationThreads(NumThreads);
+ ClFlags::Flags.setUseSandboxing(true);
+ ClFlags::Flags.setOutFileType(FT_Elf);
+ ClFlags::Flags.setTargetArch(getTargetArch());
+ ClFlags::Flags.setInputFileFormat(llvm::PNaClFormat);
}
bool BrowserCompileServer::pushInputBytes(const void *Data, size_t NumBytes) {
@@ -219,7 +218,7 @@
CompileThread = std::thread([this]() {
llvm::install_fatal_error_handler(fatalErrorHandler, this);
Ctx->initParserThread();
- this->getCompiler().run(*Flags, *Ctx.get(),
+ this->getCompiler().run(ClFlags::Flags, *Ctx.get(),
// Retain original reference, but the compiler
// (LLVM's MemoryObject) wants to handle deletion.
std::unique_ptr<llvm::DataStreamer>(InputStream));
diff --git a/src/IceBrowserCompileServer.h b/src/IceBrowserCompileServer.h
index ecacdec..79ab4c0 100644
--- a/src/IceBrowserCompileServer.h
+++ b/src/IceBrowserCompileServer.h
@@ -41,7 +41,7 @@
class StringStream;
public:
- BrowserCompileServer() : Flags(&GlobalContext::Flags), HadError(false) {}
+ BrowserCompileServer() : HadError(false) {}
~BrowserCompileServer() final;
@@ -101,7 +101,6 @@
std::unique_ptr<llvm::raw_fd_ostream> EmitStream;
std::unique_ptr<StringStream> ErrorStream;
std::unique_ptr<ELFStreamer> ELFStream;
- ClFlags *Flags;
std::thread CompileThread;
std::atomic<bool> HadError;
};
diff --git a/src/IceCfg.cpp b/src/IceCfg.cpp
index 1965fe2..e5a71aa 100644
--- a/src/IceCfg.cpp
+++ b/src/IceCfg.cpp
@@ -34,23 +34,21 @@
namespace Ice {
Cfg::Cfg(GlobalContext *Ctx, uint32_t SequenceNumber)
- : Ctx(Ctx), SequenceNumber(SequenceNumber),
- VMask(Ctx->getFlags().getVerbose()), FunctionName(),
- NextInstNumber(Inst::NumberInitial), Live(nullptr) {
+ : Ctx(Ctx), SequenceNumber(SequenceNumber), VMask(getFlags().getVerbose()),
+ FunctionName(), NextInstNumber(Inst::NumberInitial), Live(nullptr) {
Allocator.reset(new ArenaAllocator());
NodeStrings.reset(new StringPool);
VarStrings.reset(new StringPool);
CfgLocalAllocatorScope _(this);
- Target =
- TargetLowering::createLowering(Ctx->getFlags().getTargetArch(), this);
+ Target = TargetLowering::createLowering(getFlags().getTargetArch(), this);
VMetadata.reset(new VariablesMetadata(this));
TargetAssembler = Target->createAssembler();
- if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_Randomize) {
+ if (getFlags().getRandomizeAndPoolImmediatesOption() == RPI_Randomize) {
// If -randomize-pool-immediates=randomize, create a random number
// generator to generate a cookie for constant blinding.
- RandomNumberGenerator RNG(Ctx->getFlags().getRandomSeed(),
- RPE_ConstantBlinding, this->SequenceNumber);
+ RandomNumberGenerator RNG(getFlags().getRandomSeed(), RPE_ConstantBlinding,
+ this->SequenceNumber);
ConstantBlindingCookie =
(uint32_t)RNG.next((uint64_t)std::numeric_limits<uint32_t>::max() + 1);
}
@@ -58,7 +56,7 @@
Cfg::~Cfg() {
assert(CfgAllocatorTraits::current() == nullptr);
- if (GlobalContext::getFlags().getDumpStrings()) {
+ if (getFlags().getDumpStrings()) {
OstreamLocker _(Ctx);
Ostream &Str = Ctx->getStrDump();
getNodeStrings()->dump(Str);
@@ -200,8 +198,7 @@
if (hasError())
return;
if (BuildDefs::dump()) {
- const std::string TimingFocusOn =
- getContext()->getFlags().getTimingFocusOn();
+ const std::string TimingFocusOn = getFlags().getTimingFocusOn();
const std::string Name = getFunctionName().toString();
if (TimingFocusOn == "*" || TimingFocusOn == Name) {
setFocusedTiming();
@@ -218,7 +215,7 @@
dump("Initial CFG");
- if (getContext()->getFlags().getEnableBlockProfile()) {
+ if (getFlags().getEnableBlockProfile()) {
profileBlocks();
// TODO(jpp): this is fragile, at best. Figure out a better way of
// detecting exit functions.
@@ -456,14 +453,14 @@
} // end of anonymous namespace
void Cfg::shuffleNodes() {
- if (!Ctx->getFlags().getReorderBasicBlocks())
+ if (!getFlags().getReorderBasicBlocks())
return;
NodeList ReversedReachable;
NodeList Unreachable;
BitVector ToVisit(Nodes.size(), true);
// Create Random number generator for function reordering
- RandomNumberGenerator RNG(Ctx->getFlags().getRandomSeed(),
+ RandomNumberGenerator RNG(getFlags().getRandomSeed(),
RPE_BasicBlockReordering, SequenceNumber);
// Traverse from entry node.
getRandomPostOrder(getEntryNode(), ToVisit, ReversedReachable, &RNG);
@@ -779,10 +776,10 @@
}
void Cfg::doNopInsertion() {
- if (!Ctx->getFlags().getShouldDoNopInsertion())
+ if (!getFlags().getShouldDoNopInsertion())
return;
TimerMarker T(TimerStack::TT_doNopInsertion, this);
- RandomNumberGenerator RNG(Ctx->getFlags().getRandomSeed(), RPE_NopInsertion,
+ RandomNumberGenerator RNG(getFlags().getRandomSeed(), RPE_NopInsertion,
SequenceNumber);
for (CfgNode *Node : Nodes)
Node->doNopInsertion(RNG);
@@ -960,7 +957,7 @@
// contain only redundant assignments. As such, we disable this pass when
// DecorateAsm is specified. This may make the resulting code look more
// branchy, but it should have no effect on the register assignments.
- if (Ctx->getFlags().getDecorateAsm())
+ if (getFlags().getDecorateAsm())
return;
for (CfgNode *Node : Nodes) {
Node->contractIfEmpty();
@@ -992,9 +989,9 @@
return;
Ostream &Str = Ctx->getStrEmit();
Str << "\t.text\n";
- if (Ctx->getFlags().getFunctionSections())
+ if (getFlags().getFunctionSections())
Str << "\t.section\t.text." << Name << ",\"ax\",%progbits\n";
- if (!Asm->getInternal() || Ctx->getFlags().getDisableInternal()) {
+ if (!Asm->getInternal() || getFlags().getDisableInternal()) {
Str << "\t.globl\t" << Name << "\n";
Str << "\t.type\t" << Name << ",%function\n";
}
@@ -1012,7 +1009,7 @@
}
void Cfg::emitJumpTables() {
- switch (Ctx->getFlags().getOutFileType()) {
+ switch (getFlags().getOutFileType()) {
case FT_Elf:
case FT_Iasm: {
// The emission needs to be delayed until the after the text section so
@@ -1040,7 +1037,7 @@
if (!BuildDefs::dump())
return;
TimerMarker T(TimerStack::TT_emitAsm, this);
- if (Ctx->getFlags().getDecorateAsm()) {
+ if (getFlags().getDecorateAsm()) {
renumberInstructions();
getVMetadata()->init(VMK_Uses);
liveness(Liveness_Basic);
@@ -1049,11 +1046,11 @@
OstreamLocker L(Ctx);
Ostream &Str = Ctx->getStrEmit();
const Assembler *Asm = getAssembler<>();
- const bool NeedSandboxing = Ctx->getFlags().getUseSandboxing();
+ const bool NeedSandboxing = getFlags().getUseSandboxing();
emitTextHeader(FunctionName, Ctx, Asm);
deleteJumpTableInsts();
- if (Ctx->getFlags().getDecorateAsm()) {
+ if (getFlags().getDecorateAsm()) {
for (Variable *Var : getVariables()) {
if (Var->getStackOffset() && !Var->isRematerializable()) {
Str << "\t" << Var->getSymbolicStackOffset(this) << " = "
@@ -1077,7 +1074,7 @@
// The emitIAS() routines emit into the internal assembler buffer, so there's
// no need to lock the streams.
deleteJumpTableInsts();
- const bool NeedSandboxing = Ctx->getFlags().getUseSandboxing();
+ const bool NeedSandboxing = getFlags().getUseSandboxing();
for (CfgNode *Node : Nodes) {
if (NeedSandboxing && Node->needsAlignment())
getAssembler()->alignCfgNode();
@@ -1118,7 +1115,7 @@
// Print function name+args
if (isVerbose(IceV_Instructions)) {
Str << "define ";
- if (getInternal() && !Ctx->getFlags().getDisableInternal())
+ if (getInternal() && !getFlags().getDisableInternal())
Str << "internal ";
Str << ReturnType << " @" << getFunctionName() << "(";
for (SizeT i = 0; i < Args.size(); ++i) {
diff --git a/src/IceCfgNode.cpp b/src/IceCfgNode.cpp
index a87642d..9fc6634 100644
--- a/src/IceCfgNode.cpp
+++ b/src/IceCfgNode.cpp
@@ -53,7 +53,7 @@
namespace {
template <typename List> void removeDeletedAndRenumber(List *L, Cfg *Func) {
const bool DoDelete =
- BuildDefs::minimal() || !GlobalContext::getFlags().getKeepDeletedInsts();
+ BuildDefs::minimal() || !getFlags().getKeepDeletedInsts();
auto I = L->begin(), E = L->end(), Next = I;
for (++Next; I != E; I = Next++) {
if (DoDelete && I->isDeleted()) {
@@ -1053,8 +1053,7 @@
Func->setCurrentNode(this);
Ostream &Str = Func->getContext()->getStrEmit();
Liveness *Liveness = Func->getLiveness();
- const bool DecorateAsm =
- Liveness && Func->getContext()->getFlags().getDecorateAsm();
+ const bool DecorateAsm = Liveness && getFlags().getDecorateAsm();
Str << getAsmName() << ":\n";
// LiveRegCount keeps track of the number of currently live variables that
// each register is assigned to. Normally that would be only 0 or 1, but the
@@ -1259,7 +1258,7 @@
}
// Do the simple emission if not sandboxed.
- if (!Func->getContext()->getFlags().getUseSandboxing()) {
+ if (!getFlags().getUseSandboxing()) {
for (const Inst &I : Insts) {
if (!I.isDeleted() && !I.isRedundantAssign()) {
I.emitIAS(Func);
diff --git a/src/IceClFlags.cpp b/src/IceClFlags.cpp
index 5befeff..042fc36 100644
--- a/src/IceClFlags.cpp
+++ b/src/IceClFlags.cpp
@@ -104,6 +104,8 @@
namespace Ice {
+ClFlags ClFlags::Flags;
+
void ClFlags::parseFlags(int argc, char **argv) {
cl::ParseCommandLineOptions(argc, argv);
AppNameObj = argv[0];
diff --git a/src/IceClFlags.h b/src/IceClFlags.h
index bf51999..0a2fc56 100644
--- a/src/IceClFlags.h
+++ b/src/IceClFlags.h
@@ -74,6 +74,9 @@
/// User defined constructor.
ClFlags() { resetClFlags(); }
+ /// The command line flags.
+ static ClFlags Flags;
+
/// \brief Parse commmand line options for Subzero.
///
/// This is done use cl::ParseCommandLineOptions() and the static variables of
@@ -155,6 +158,8 @@
bool GenerateUnitTestMessages;
};
+inline const ClFlags &getFlags() { return ClFlags::Flags; }
+
} // end of namespace Ice
#endif // SUBZERO_SRC_ICECLFLAGS_H
diff --git a/src/IceCompileServer.cpp b/src/IceCompileServer.cpp
index e62f268..0ed98ba 100644
--- a/src/IceCompileServer.cpp
+++ b/src/IceCompileServer.cpp
@@ -96,7 +96,7 @@
}
ErrorCodes getReturnValue(ErrorCodes Val) {
- if (GlobalContext::Flags.getAlwaysExitSuccess())
+ if (getFlags().getAlwaysExitSuccess())
return EC_None;
return Val;
}
@@ -159,7 +159,7 @@
llvm::sys::PrintStackTraceOnErrorSignal();
}
ClFlags::parseFlags(argc, argv);
- ClFlags &Flags = GlobalContext::Flags;
+ ClFlags &Flags = ClFlags::Flags;
ClFlags::getParsedClFlags(Flags);
// Override report_fatal_error if we want to exit with 0 status.
@@ -229,7 +229,7 @@
}
Ctx.reset(new GlobalContext(Ls.get(), Os.get(), Ls.get(), ELFStr.get()));
- if (Ctx->getFlags().getNumTranslationThreads() != 0) {
+ if (getFlags().getNumTranslationThreads() != 0) {
std::thread CompileThread([this, &Flags, &InputStream]() {
Ctx->initParserThread();
getCompiler().run(Flags, *Ctx.get(), std::move(InputStream));
diff --git a/src/IceCompiler.cpp b/src/IceCompiler.cpp
index 7a426fc..3120bea 100644
--- a/src/IceCompiler.cpp
+++ b/src/IceCompiler.cpp
@@ -61,7 +61,7 @@
// The Minimal build (specifically, when dump()/emit() are not implemented)
// allows only --filetype=obj. Check here to avoid cryptic error messages
// downstream.
- if (!BuildDefs::dump() && Ctx.getFlags().getOutFileType() != FT_Elf) {
+ if (!BuildDefs::dump() && getFlags().getOutFileType() != FT_Elf) {
Ctx.getStrError()
<< "Error: only --filetype=obj is supported in this build.\n";
Ctx.getErrorStatus()->assign(EC_Args);
@@ -130,17 +130,17 @@
Ctx.lowerConstants();
Ctx.lowerJumpTables();
- if (Ctx.getFlags().getOutFileType() == FT_Elf) {
+ if (getFlags().getOutFileType() == FT_Elf) {
TimerMarker T1(Ice::TimerStack::TT_emitAsm, &Ctx);
Ctx.getObjectWriter()->setUndefinedSyms(Ctx.getConstantExternSyms());
Ctx.getObjectWriter()->writeNonUserSections();
}
}
- if (Ctx.getFlags().getSubzeroTimingEnabled())
+ if (getFlags().getSubzeroTimingEnabled())
Ctx.dumpTimers();
- if (Ctx.getFlags().getTimeEachFunction()) {
+ if (getFlags().getTimeEachFunction()) {
constexpr bool NoDumpCumulative = false;
Ctx.dumpTimers(GlobalContext::TSK_Funcs, NoDumpCumulative);
}
diff --git a/src/IceConverter.cpp b/src/IceConverter.cpp
index e1900c3..0729b73 100644
--- a/src/IceConverter.cpp
+++ b/src/IceConverter.cpp
@@ -722,7 +722,7 @@
}
if (!GV->hasInitializer()) {
- if (Ctx->getFlags().getAllowUninitializedGlobals())
+ if (Ice::getFlags().getAllowUninitializedGlobals())
continue;
else {
std::string Buffer;
@@ -807,7 +807,7 @@
namespace Ice {
void Converter::nameUnnamedGlobalVariables(Module *Mod) {
- const std::string GlobalPrefix = Ctx->getFlags().getDefaultGlobalPrefix();
+ const std::string GlobalPrefix = getFlags().getDefaultGlobalPrefix();
if (GlobalPrefix.empty())
return;
uint32_t NameIndex = 0;
@@ -822,7 +822,7 @@
}
void Converter::nameUnnamedFunctions(Module *Mod) {
- const std::string FunctionPrefix = Ctx->getFlags().getDefaultFunctionPrefix();
+ const std::string FunctionPrefix = getFlags().getDefaultFunctionPrefix();
if (FunctionPrefix.empty())
return;
uint32_t NameIndex = 0;
@@ -895,7 +895,7 @@
Var->setAlignment(GV->getAlignment());
Var->setIsConstant(GV->isConstant());
Var->setName(Ctx, GV->getName());
- if (!Var->verifyLinkageCorrect(Ctx)) {
+ if (!Var->verifyLinkageCorrect()) {
std::string Buffer;
raw_string_ostream StrBuf(Buffer);
StrBuf << "Global " << Var->getName()
diff --git a/src/IceELFObjectWriter.cpp b/src/IceELFObjectWriter.cpp
index c3ba9b0..e960d9c 100644
--- a/src/IceELFObjectWriter.cpp
+++ b/src/IceELFObjectWriter.cpp
@@ -74,7 +74,7 @@
} // end of anonymous namespace
ELFObjectWriter::ELFObjectWriter(GlobalContext &Ctx, ELFStreamer &Out)
- : Ctx(Ctx), Str(Out), ELF64(isELF64(Ctx.getFlags())) {
+ : Ctx(Ctx), Str(Out), ELF64(isELF64(getFlags())) {
// Create the special bookkeeping sections now.
constexpr char NullSectionName[] = "";
NullSection = new (Ctx.allocate<ELFSection>())
@@ -223,7 +223,7 @@
TimerMarker Timer(TimerStack::TT_writeELF, &Ctx);
ELFTextSection *Section = nullptr;
ELFRelocationSection *RelSection = nullptr;
- const bool FunctionSections = Ctx.getFlags().getFunctionSections();
+ const bool FunctionSections = getFlags().getFunctionSections();
if (TextSections.empty() || FunctionSections) {
std::string SectionName = ".text";
if (FunctionSections)
@@ -247,7 +247,7 @@
constexpr SizeT SymbolSize = 0;
uint8_t SymbolType;
uint8_t SymbolBinding;
- if (IsInternal && !Ctx.getFlags().getDisableInternal()) {
+ if (IsInternal && !getFlags().getDisableInternal()) {
SymbolType = STT_NOTYPE;
SymbolBinding = STB_LOCAL;
} else {
@@ -310,8 +310,7 @@
VariableDeclarationPartition VarsBySection[ELFObjectWriter::NumSectionTypes];
for (auto &SectionList : VarsBySection)
SectionList.reserve(Vars.size());
- partitionGlobalsBySection(Vars, VarsBySection,
- Ctx.getFlags().getTranslateOnly());
+ partitionGlobalsBySection(Vars, VarsBySection, getFlags().getTranslateOnly());
size_t I = 0;
for (auto &SectionList : VarsBySection) {
writeDataOfType(static_cast<SectionType>(I++), SectionList, RelocationKind,
@@ -392,7 +391,7 @@
const auto Align = std::max<Elf64_Xword>(MinAlign, Var->getAlignment());
Section->padToAlignment(Str, Align);
SizeT SymbolSize = Var->getNumBytes();
- bool IsExternal = Var->isExternal() || Ctx.getFlags().getDisableInternal();
+ bool IsExternal = Var->isExternal() || getFlags().getDisableInternal();
const uint8_t SymbolBinding = IsExternal ? STB_GLOBAL : STB_LOCAL;
GlobalString Name = Var->getName();
SymTab->createDefinedSym(Name, SymbolType, SymbolBinding, Section,
@@ -476,12 +475,12 @@
assert(NumSections < SHN_LORESERVE);
assert(SectHeaderStrIndex < SHN_LORESERVE);
- const TargetArch Arch = Ctx.getFlags().getTargetArch();
+ const TargetArch Arch = getFlags().getTargetArch();
// Write the rest of the file header, which does depend on byte order and ELF
// class.
- Str.writeLE16(ET_REL); // e_type
- Str.writeLE16(getELFMachine(Ctx.getFlags().getTargetArch())); // e_machine
- Str.writeELFWord<IsELF64>(1); // e_version
+ Str.writeLE16(ET_REL); // e_type
+ Str.writeLE16(getELFMachine(getFlags().getTargetArch())); // e_machine
+ Str.writeELFWord<IsELF64>(1); // e_version
// Since this is for a relocatable object, there is no entry point, and no
// program headers.
Str.writeAddrOrOffset<IsELF64>(0); // e_entry
@@ -531,11 +530,11 @@
// If the -reorder-pooled-constant option is set to true, we should shuffle
// the constants before we emit them.
- if (Ctx.getFlags().getReorderPooledConstants() && !Pool.empty()) {
+ if (getFlags().getReorderPooledConstants() && !Pool.empty()) {
// Use the constant's kind value as the salt for creating random number
// generator.
Operand::OperandKind K = (*Pool.begin())->getKind();
- RandomNumberGenerator RNG(Ctx.getFlags().getRandomSeed(),
+ RandomNumberGenerator RNG(getFlags().getRandomSeed(),
RPE_PooledConstantReordering, K);
RandomShuffle(Pool.begin(), Pool.end(),
[&RNG](uint64_t N) { return (uint32_t)RNG.next(N); });
@@ -597,7 +596,7 @@
constexpr uint8_t SymbolType = STT_OBJECT;
Section->padToAlignment(Str, PointerSize);
- const bool IsExternal = Ctx.getFlags().getDisableInternal();
+ const bool IsExternal = getFlags().getDisableInternal();
const uint8_t SymbolBinding = IsExternal ? STB_GLOBAL : STB_LOCAL;
GlobalString JumpTableName = Ctx.getGlobalString(
InstJumpTable::makeName(JT.getFunctionName(), JT.getId()));
diff --git a/src/IceFixups.cpp b/src/IceFixups.cpp
index 3d3b1b4..b7e8031 100644
--- a/src/IceFixups.cpp
+++ b/src/IceFixups.cpp
@@ -58,8 +58,7 @@
Str << Symbol;
assert(!ValueIsSymbol);
if (const auto *CR = llvm::dyn_cast<ConstantRelocatable>(ConstValue)) {
- if (!Asm.fixupIsPCRel(kind()) &&
- GlobalContext::getFlags().getUseNonsfi() &&
+ if (!Asm.fixupIsPCRel(kind()) && getFlags().getUseNonsfi() &&
CR->getName().toString() != GlobalOffsetTable) {
Str << "@GOTOFF";
}
diff --git a/src/IceFixups.h b/src/IceFixups.h
index b6ba62a..191625a 100644
--- a/src/IceFixups.h
+++ b/src/IceFixups.h
@@ -15,6 +15,7 @@
#ifndef SUBZERO_SRC_ICEFIXUPS_H
#define SUBZERO_SRC_ICEFIXUPS_H
+#include "IceClFlags.h"
#include "IceDefs.h"
#include "IceStringPool.h"
diff --git a/src/IceGlobalContext.cpp b/src/IceGlobalContext.cpp
index bacaec3..5689a3e 100644
--- a/src/IceGlobalContext.cpp
+++ b/src/IceGlobalContext.cpp
@@ -293,13 +293,13 @@
ELFStreamer *ELFStr)
: Strings(new StringPool()), ConstPool(new ConstantPool()), ErrorStatus(),
StrDump(OsDump), StrEmit(OsEmit), StrError(OsError), IntrinsicsInfo(this),
- ObjectWriter(),
- OptQ(/*Sequential=*/Flags.isSequential(),
- /*MaxSize=*/
- Flags.isParseParallel() ? MaxOptQSize
- : Flags.getNumTranslationThreads()),
+ ObjectWriter(), OptQ(/*Sequential=*/getFlags().isSequential(),
+ /*MaxSize=*/
+ getFlags().isParseParallel()
+ ? MaxOptQSize
+ : getFlags().getNumTranslationThreads()),
// EmitQ is allowed unlimited size.
- EmitQ(/*Sequential=*/Flags.isSequential()),
+ EmitQ(/*Sequential=*/getFlags().isSequential()),
DataLowering(TargetDataLowering::createLowering(this)) {
assert(OsDump && "OsDump is not defined for GlobalContext");
assert(OsEmit && "OsEmit is not defined for GlobalContext");
@@ -325,7 +325,7 @@
newTimerStackID("Per-function summary");
}
Timers.initInto(MyTLS->Timers);
- switch (Flags.getOutFileType()) {
+ switch (getFlags().getOutFileType()) {
case FT_Elf:
ObjectWriter.reset(new ELFObjectWriter(*this, *ELFStr));
break;
@@ -455,9 +455,9 @@
void GlobalContext::lowerGlobals(const std::string &SectionSuffix) {
TimerMarker T(TimerStack::TT_emitGlobalInitializers, this);
- const bool DumpGlobalVariables = BuildDefs::dump() &&
- (Flags.getVerbose() & IceV_GlobalInit) &&
- Flags.getVerboseFocusOn().empty();
+ const bool DumpGlobalVariables =
+ BuildDefs::dump() && (getFlags().getVerbose() & IceV_GlobalInit) &&
+ getFlags().getVerboseFocusOn().empty();
if (DumpGlobalVariables) {
OstreamLocker L(this);
Ostream &Stream = getStrDump();
@@ -465,7 +465,7 @@
Global->dump(Stream);
}
}
- if (Flags.getDisableTranslation())
+ if (getFlags().getDisableTranslation())
return;
saveBlockInfoPtrs();
@@ -679,8 +679,8 @@
void GlobalContext::dumpConstantLookupCounts() {
if (!BuildDefs::dump())
return;
- const bool DumpCounts = (Flags.getVerbose() & IceV_ConstPoolStats) &&
- Flags.getVerboseFocusOn().empty();
+ const bool DumpCounts = (getFlags().getVerbose() & IceV_ConstPoolStats) &&
+ getFlags().getVerboseFocusOn().empty();
if (!DumpCounts)
return;
@@ -980,13 +980,11 @@
return PoolOwner->getStrings();
}
-ClFlags GlobalContext::Flags;
-
TimerIdT TimerMarker::getTimerIdFromFuncName(GlobalContext *Ctx,
const std::string &FuncName) {
if (!BuildDefs::timers())
return 0;
- if (!Ctx->getFlags().getTimeEachFunction())
+ if (!getFlags().getTimeEachFunction())
return 0;
return Ctx->getTimerID(GlobalContext::TSK_Funcs, FuncName);
}
@@ -994,10 +992,10 @@
void TimerMarker::push() {
switch (StackID) {
case GlobalContext::TSK_Default:
- Active = Ctx->getFlags().getSubzeroTimingEnabled();
+ Active = getFlags().getSubzeroTimingEnabled();
break;
case GlobalContext::TSK_Funcs:
- Active = Ctx->getFlags().getTimeEachFunction();
+ Active = getFlags().getTimeEachFunction();
break;
default:
break;
@@ -1008,8 +1006,7 @@
void TimerMarker::pushCfg(const Cfg *Func) {
Ctx = Func->getContext();
- Active =
- Func->getFocusedTiming() || Ctx->getFlags().getSubzeroTimingEnabled();
+ Active = Func->getFocusedTiming() || getFlags().getSubzeroTimingEnabled();
if (Active)
Ctx->pushTimer(ID, StackID);
}
diff --git a/src/IceGlobalContext.h b/src/IceGlobalContext.h
index 5d27f5d..cce9492 100644
--- a/src/IceGlobalContext.h
+++ b/src/IceGlobalContext.h
@@ -41,7 +41,6 @@
namespace Ice {
-class ClFlags;
class ConstantPool;
class EmitterWorkItem;
class FuncSigType;
@@ -273,8 +272,6 @@
JumpTableData &addJumpTable(GlobalString FuncName, SizeT Id,
const JumpTableData::TargetList &TargetList);
- static const ClFlags &getFlags() { return Flags; }
-
/// Allocate data of type T using the global allocator. We allow entities
/// allocated from this global allocator to be either trivially or
/// non-trivially destructible. We optimize the case when T is trivially
@@ -460,8 +457,6 @@
static bool matchSymbolName(const GlobalString &SymbolName,
const std::string &Match);
- static ClFlags Flags;
-
/// DisposeGlobalVariablesAfterLowering controls whether the memory used by
/// GlobaleVariables can be reclaimed right after they have been lowered.
/// @{
diff --git a/src/IceGlobalInits.h b/src/IceGlobalInits.h
index 862cc1d..378d5d7 100644
--- a/src/IceGlobalInits.h
+++ b/src/IceGlobalInits.h
@@ -109,14 +109,14 @@
/// Returns true if linkage is defined correctly for the global declaration,
/// based on default rules.
- bool verifyLinkageDefault(const GlobalContext *Ctx) const {
+ bool verifyLinkageDefault() const {
switch (Linkage) {
default:
return false;
case llvm::GlobalValue::InternalLinkage:
return true;
case llvm::GlobalValue::ExternalLinkage:
- return Ctx->getFlags().getAllowExternDefinedSymbols();
+ return getFlags().getAllowExternDefinedSymbols();
}
}
@@ -160,7 +160,7 @@
return Linkage == llvm::GlobalValue::ExternalLinkage;
}
}
- return verifyLinkageDefault(Ctx);
+ return verifyLinkageDefault();
}
/// Validates that the type signature of the function is correct. Returns true
@@ -461,13 +461,13 @@
virtual void dump(Ostream &Stream) const override;
/// Returns true if linkage is correct for the variable declaration.
- bool verifyLinkageCorrect(const GlobalContext *Ctx) const {
+ bool verifyLinkageCorrect() const {
if (getName().hasStdString()) {
if (isPNaClABIExternalName(getName().toString())) {
return Linkage == llvm::GlobalValue::ExternalLinkage;
}
}
- return verifyLinkageDefault(Ctx);
+ return verifyLinkageDefault();
}
static bool classof(const GlobalDeclaration *Addr) {
diff --git a/src/IceInstARM32.cpp b/src/IceInstARM32.cpp
index a025da5..92a57e0 100644
--- a/src/IceInstARM32.cpp
+++ b/src/IceInstARM32.cpp
@@ -103,8 +103,8 @@
return;
GlobalContext *Ctx = Func->getContext();
auto *Asm = Func->getAssembler<ARM32::AssemblerARM32>();
- if (Ctx->getFlags().getDisableHybridAssembly() &&
- Ctx->getFlags().getSkipUnimplemented()) {
+ if (getFlags().getDisableHybridAssembly() &&
+ getFlags().getSkipUnimplemented()) {
Asm->trap();
Asm->resetNeedsTextFixup();
return;
@@ -120,12 +120,12 @@
Asm->incEmitTextSize(InstSize);
emit(Func);
Ctx->setStrEmit(OldStr);
- if (Ctx->getFlags().getDisableHybridAssembly()) {
- if (Ctx->getFlags().getSkipUnimplemented()) {
+ if (getFlags().getDisableHybridAssembly()) {
+ if (getFlags().getSkipUnimplemented()) {
Asm->trap();
} else {
llvm::errs() << "Can't assemble: " << StrBuf.str() << "\n";
- UnimplementedError(Ctx->getFlags());
+ UnimplementedError(getFlags());
}
Asm->resetNeedsTextFixup();
return;
@@ -1741,7 +1741,7 @@
void InstARM32Label::emitIAS(const Cfg *Func) const {
auto *Asm = Func->getAssembler<ARM32::AssemblerARM32>();
- Asm->bindLocalLabel(Func, this, Number);
+ Asm->bindLocalLabel(this, Number);
if (OffsetReloc != nullptr) {
Asm->bindRelocOffset(OffsetReloc);
}
@@ -1887,7 +1887,7 @@
if (auto *CR = llvm::dyn_cast<ConstantRelocatable>(Src0)) {
Str << "#:lower16:";
CR->emitWithoutPrefix(Func->getTarget());
- if (Func->getContext()->getFlags().getUseNonsfi()) {
+ if (getFlags().getUseNonsfi()) {
Str << " - .";
}
} else {
@@ -1916,7 +1916,7 @@
if (auto *CR = llvm::dyn_cast<ConstantRelocatable>(Src1)) {
Str << "#:upper16:";
CR->emitWithoutPrefix(Func->getTarget());
- if (Func->getContext()->getFlags().getUseNonsfi()) {
+ if (getFlags().getUseNonsfi()) {
Str << " - .";
}
} else {
diff --git a/src/IceInstX8632.cpp b/src/IceInstX8632.cpp
index 5d081e0..006d781 100644
--- a/src/IceInstX8632.cpp
+++ b/src/IceInstX8632.cpp
@@ -131,7 +131,7 @@
void TargetX8632Traits::X86OperandMem::emit(const Cfg *Func) const {
if (!BuildDefs::dump())
return;
- const bool UseNonsfi = Func->getContext()->getFlags().getUseNonsfi();
+ const bool UseNonsfi = getFlags().getUseNonsfi();
validateMemOperandPIC(this, UseNonsfi);
const auto *Target =
static_cast<const ::Ice::X8632::TargetX8632 *>(Func->getTarget());
@@ -263,7 +263,7 @@
const Ice::TargetLowering *TargetLowering, bool /*IsLeaAddr*/) const {
const auto *Target =
static_cast<const ::Ice::X8632::TargetX8632 *>(TargetLowering);
- const bool UseNonsfi = Target->getGlobalContext()->getFlags().getUseNonsfi();
+ const bool UseNonsfi = getFlags().getUseNonsfi();
validateMemOperandPIC(this, UseNonsfi);
int32_t Disp = 0;
if (getBase() && getBase()->isRematerializable()) {
diff --git a/src/IceInstX8664.cpp b/src/IceInstX8664.cpp
index ec2d140..0915bc8 100644
--- a/src/IceInstX8664.cpp
+++ b/src/IceInstX8664.cpp
@@ -134,7 +134,7 @@
// TODO(sehr): ConstantRelocatable still needs updating for
// rematerializable base/index and Disp.
assert(Disp == 0);
- const bool UseNonsfi = Func->getContext()->getFlags().getUseNonsfi();
+ const bool UseNonsfi = getFlags().getUseNonsfi();
CR->emitWithoutPrefix(Target, UseNonsfi ? "@GOTOFF" : "");
assert(!UseNonsfi);
if (Base == nullptr && Index == nullptr) {
diff --git a/src/IceInstX86BaseImpl.h b/src/IceInstX86BaseImpl.h
index 3de3296..b9ebf51 100644
--- a/src/IceInstX86BaseImpl.h
+++ b/src/IceInstX86BaseImpl.h
@@ -368,13 +368,13 @@
template <typename TraitsType>
InstImpl<TraitsType>::InstX86IacaStart::InstX86IacaStart(Cfg *Func)
: InstX86Base(Func, InstX86Base::IacaStart, 0, nullptr) {
- assert(Func->getContext()->getFlags().getAllowIacaMarks());
+ assert(getFlags().getAllowIacaMarks());
}
template <typename TraitsType>
InstImpl<TraitsType>::InstX86IacaEnd::InstX86IacaEnd(Cfg *Func)
: InstX86Base(Func, InstX86Base::IacaEnd, 0, nullptr) {
- assert(Func->getContext()->getFlags().getAllowIacaMarks());
+ assert(getFlags().getAllowIacaMarks());
}
// ======================== Dump routines ======================== //
diff --git a/src/IceMangling.cpp b/src/IceMangling.cpp
index 28275c1..fe46bb1 100644
--- a/src/IceMangling.cpp
+++ b/src/IceMangling.cpp
@@ -126,10 +126,10 @@
// _Z3barxyz ==> ZN6Prefix3barExyz
// An unmangled, extern "C" style name, gets a simple prefix:
// bar ==> Prefixbar
- if (!BuildDefs::dump() || GlobalContext::getFlags().getTestPrefix().empty())
+ if (!BuildDefs::dump() || getFlags().getTestPrefix().empty())
return Name;
- const std::string TestPrefix = GlobalContext::getFlags().getTestPrefix();
+ const std::string TestPrefix = getFlags().getTestPrefix();
unsigned PrefixLength = TestPrefix.length();
ManglerVector NameBase(1 + Name.length());
const size_t BufLen = 30 + Name.length() + PrefixLength;
diff --git a/src/IceOperand.cpp b/src/IceOperand.cpp
index 13f96b9..462ba9a 100644
--- a/src/IceOperand.cpp
+++ b/src/IceOperand.cpp
@@ -643,10 +643,8 @@
// Specialization of the template member function for ConstantInteger32
// TODO(stichnot): try to move this specialization into a target-specific file.
template <> bool ConstantInteger32::shouldBeRandomizedOrPooled() const {
- uint32_t Threshold =
- GlobalContext::getFlags().getRandomizeAndPoolImmediatesThreshold();
- if (GlobalContext::getFlags().getRandomizeAndPoolImmediatesOption() ==
- RPI_None)
+ uint32_t Threshold = getFlags().getRandomizeAndPoolImmediatesThreshold();
+ if (getFlags().getRandomizeAndPoolImmediatesOption() == RPI_None)
return false;
if (getType() != IceType_i32 && getType() != IceType_i16 &&
getType() != IceType_i8)
diff --git a/src/IceOperand.h b/src/IceOperand.h
index d80bede..04895f6 100644
--- a/src/IceOperand.h
+++ b/src/IceOperand.h
@@ -224,7 +224,7 @@
// human-readable sprintf form, changing '+' to 'p' and '-' to 'm' to
// maintain valid asm labels.
if (std::is_floating_point<PrimType>::value && !BuildDefs::minimal() &&
- GlobalContext::getFlags().getDecorateAsm()) {
+ getFlags().getDecorateAsm()) {
char Buf[30];
snprintf(Buf, llvm::array_lengthof(Buf), "$%g", (double)Value);
for (unsigned i = 0; i < llvm::array_lengthof(Buf) && Buf[i]; ++i) {
diff --git a/src/IceRegAlloc.cpp b/src/IceRegAlloc.cpp
index 774815e..2eb3a84 100644
--- a/src/IceRegAlloc.cpp
+++ b/src/IceRegAlloc.cpp
@@ -95,7 +95,7 @@
LinearScan::LinearScan(Cfg *Func)
: Func(Func), Ctx(Func->getContext()), Target(Func->getTarget()),
Verbose(BuildDefs::dump() && Func->isVerbose(IceV_LinearScan)),
- UseReserve(Ctx->getFlags().getRegAllocReserve()) {}
+ UseReserve(getFlags().getRegAllocReserve()) {}
// Prepare for full register allocation of all variables. We depend on liveness
// analysis to have calculated live ranges.
diff --git a/src/IceTargetLowering.cpp b/src/IceTargetLowering.cpp
index 94c8ecc..29fbc7b 100644
--- a/src/IceTargetLowering.cpp
+++ b/src/IceTargetLowering.cpp
@@ -212,8 +212,8 @@
}
};
- processRegList(Ctx->getFlags().getUseRestrictedRegisters(), UseSet);
- processRegList(Ctx->getFlags().getExcludedRegisters(), ExcludeSet);
+ processRegList(getFlags().getUseRestrictedRegisters(), UseSet);
+ processRegList(getFlags().getExcludedRegisters(), ExcludeSet);
if (!BadRegNames.empty()) {
std::string Buffer;
@@ -236,7 +236,7 @@
// Display filtered register sets, if requested.
if (BuildDefs::dump() && NumRegs &&
- (Ctx->getFlags().getVerbose() & IceV_AvailableRegs)) {
+ (getFlags().getVerbose() & IceV_AvailableRegs)) {
Ostream &Str = Ctx->getStrDump();
const std::string Indent = " ";
const std::string IndentTwice = Indent + Indent;
@@ -265,7 +265,7 @@
}
void TargetLowering::staticInit(GlobalContext *Ctx) {
- const TargetArch Target = Ctx->getFlags().getTargetArch();
+ const TargetArch Target = getFlags().getTargetArch();
// Call the specified target's static initializer.
switch (Target) {
default:
@@ -285,7 +285,7 @@
}
bool TargetLowering::shouldBePooled(const Constant *C) {
- const TargetArch Target = GlobalContext::getFlags().getTargetArch();
+ const TargetArch Target = getFlags().getTargetArch();
switch (Target) {
default:
return false;
@@ -311,12 +311,11 @@
TargetLowering::TargetLowering(Cfg *Func)
: Func(Func), Ctx(Func->getContext()),
- SandboxingType(determineSandboxTypeFromFlags(Ctx->getFlags())) {}
+ SandboxingType(determineSandboxTypeFromFlags(getFlags())) {}
TargetLowering::AutoBundle::AutoBundle(TargetLowering *Target,
InstBundleLock::Option Option)
- : Target(Target),
- NeedSandboxing(Target->Ctx->getFlags().getUseSandboxing()) {
+ : Target(Target), NeedSandboxing(getFlags().getUseSandboxing()) {
assert(!Target->AutoBundling);
Target->AutoBundling = true;
if (NeedSandboxing) {
@@ -359,8 +358,8 @@
llvm::isa<InstFakeKill>(I) || I->isRedundantAssign() ||
I->isDeleted();
if (!ShouldSkip) {
- int Probability = Ctx->getFlags().getNopProbabilityAsPercentage();
- for (int I = 0; I < Ctx->getFlags().getMaxNopsPerInstruction(); ++I) {
+ int Probability = getFlags().getNopProbabilityAsPercentage();
+ for (int I = 0; I < getFlags().getMaxNopsPerInstruction(); ++I) {
randomlyInsertNop(Probability / 100.0, RNG);
}
}
@@ -492,10 +491,10 @@
if (hasFramePointer())
RegExclude |= RegSet_FramePointer;
SmallBitVector RegMask = getRegisterSet(RegInclude, RegExclude);
- bool Repeat = (Kind == RAK_Global && Ctx->getFlags().getRepeatRegAlloc());
+ bool Repeat = (Kind == RAK_Global && getFlags().getRepeatRegAlloc());
do {
LinearScan.init(Kind);
- LinearScan.scan(RegMask, Ctx->getFlags().getRandomizeRegisterAllocation());
+ LinearScan.scan(RegMask, getFlags().getRandomizeRegisterAllocation());
if (!LinearScan.hasEvictions())
Repeat = false;
Kind = RAK_SecondChance;
@@ -645,7 +644,7 @@
}
// For testing legalization of large stack offsets on targets with limited
// offset bits in instruction encodings, add some padding.
- *SpillAreaSizeBytes += Ctx->getFlags().getTestStackExtra();
+ *SpillAreaSizeBytes += getFlags().getTestStackExtra();
}
void TargetLowering::alignStackSpillAreas(uint32_t SpillAreaStartOffset,
@@ -684,7 +683,7 @@
// SpillAreaSizeBytes. On the other hand, when UseFramePointer is false, the
// offsets depend on the gap between SpillAreaSizeBytes and
// SpillAreaPaddingBytes, so we don't increment that.
- size_t TestPadding = Ctx->getFlags().getTestStackExtra();
+ size_t TestPadding = getFlags().getTestStackExtra();
if (UsesFramePointer)
SpillAreaPaddingBytes += TestPadding;
size_t GlobalsSpaceUsed = SpillAreaPaddingBytes;
@@ -725,8 +724,7 @@
}
bool TargetLowering::shouldOptimizeMemIntrins() {
- return Ctx->getFlags().getOptLevel() >= Opt_1 ||
- Ctx->getFlags().getForceMemIntrinOpt();
+ return getFlags().getOptLevel() >= Opt_1 || getFlags().getForceMemIntrinOpt();
}
void TargetLowering::scalarizeArithmetic(InstArithmetic::OpKind Kind,
@@ -761,7 +759,7 @@
std::unique_ptr<TargetDataLowering>
TargetDataLowering::createLowering(GlobalContext *Ctx) {
- TargetArch Target = Ctx->getFlags().getTargetArch();
+ TargetArch Target = getFlags().getTargetArch();
switch (Target) {
default:
badTargetFatalError(Target);
@@ -805,8 +803,7 @@
// If external and not initialized, this must be a cross test. Don't generate
// a declaration for such cases.
- const bool IsExternal =
- Var.isExternal() || Ctx->getFlags().getDisableInternal();
+ const bool IsExternal = Var.isExternal() || getFlags().getDisableInternal();
if (IsExternal && !Var.hasInitializer())
return;
@@ -818,8 +815,8 @@
Str << "\t.type\t" << Name << ",%object\n";
- const bool UseDataSections = Ctx->getFlags().getDataSections();
- const bool UseNonsfi = Ctx->getFlags().getUseNonsfi();
+ const bool UseDataSections = getFlags().getDataSections();
+ const bool UseNonsfi = getFlags().getUseNonsfi();
const std::string Suffix =
dataSectionSuffix(SectionSuffix, Name, UseDataSections);
if (IsConstant && UseNonsfi)
@@ -892,7 +889,7 @@
std::unique_ptr<TargetHeaderLowering>
TargetHeaderLowering::createLowering(GlobalContext *Ctx) {
- TargetArch Target = Ctx->getFlags().getTargetArch();
+ TargetArch Target = getFlags().getTargetArch();
switch (Target) {
default:
badTargetFatalError(Target);
diff --git a/src/IceTargetLowering.h b/src/IceTargetLowering.h
index 03a8cb2..37bf127 100644
--- a/src/IceTargetLowering.h
+++ b/src/IceTargetLowering.h
@@ -51,7 +51,7 @@
// FakeDef and FakeUse instructions to try maintain liveness consistency.
#define UnimplementedLoweringError(Target, Instr) \
do { \
- if ((Target)->Ctx->getFlags().getSkipUnimplemented()) { \
+ if (getFlags().getSkipUnimplemented()) { \
(Target)->addFakeDefUses(Instr); \
} else { \
/* Use llvm_unreachable instead of report_fatal_error, which gives \
@@ -181,7 +181,7 @@
virtual std::unique_ptr<Assembler> createAssembler() const = 0;
void translate() {
- switch (Ctx->getFlags().getOptLevel()) {
+ switch (getFlags().getOptLevel()) {
case Opt_m1:
translateOm1();
break;
diff --git a/src/IceTargetLoweringARM32.cpp b/src/IceTargetLoweringARM32.cpp
index d3313e5..0f626e6 100644
--- a/src/IceTargetLoweringARM32.cpp
+++ b/src/IceTargetLoweringARM32.cpp
@@ -52,7 +52,7 @@
void staticInit(::Ice::GlobalContext *Ctx) {
::Ice::ARM32::TargetARM32::staticInit(Ctx);
- if (Ctx->getFlags().getUseNonsfi()) {
+ if (Ice::getFlags().getUseNonsfi()) {
// In nonsfi, we need to reference the _GLOBAL_OFFSET_TABLE_ for accessing
// globals. The GOT is an external symbol (i.e., it is not defined in the
// pexe) so we need to register it as such so that ELF emission won't barf
@@ -300,7 +300,7 @@
TargetARM32::TargetARM32(Cfg *Func)
: TargetLowering(Func), NeedSandboxing(SandboxingType == ST_NaCl),
- CPUFeatures(Func->getContext()->getFlags()) {}
+ CPUFeatures(getFlags()) {}
void TargetARM32::staticInit(GlobalContext *Ctx) {
RegNumT::setLimit(RegARM32::Reg_NUM);
@@ -1041,7 +1041,7 @@
Func->processAllocas(SortAndCombineAllocas);
Func->dump("After Alloca processing");
- if (!Ctx->getFlags().getEnablePhiEdgeSplit()) {
+ if (!getFlags().getEnablePhiEdgeSplit()) {
// Lower Phi instructions.
Func->placePhiLoads();
if (Func->hasError())
@@ -1109,7 +1109,7 @@
copyRegAllocFromInfWeightVariable64On32(Func->getVariables());
Func->dump("After linear scan regalloc");
- if (Ctx->getFlags().getEnablePhiEdgeSplit()) {
+ if (getFlags().getEnablePhiEdgeSplit()) {
Func->advancedPhiLowering();
Func->dump("After advanced Phi lowering");
}
@@ -1138,7 +1138,7 @@
Func->dump("After branch optimization");
// Nop insertion
- if (Ctx->getFlags().getShouldDoNopInsertion()) {
+ if (getFlags().getShouldDoNopInsertion()) {
Func->doNopInsertion();
}
}
@@ -1200,7 +1200,7 @@
Func->dump("After postLowerLegalization");
// Nop insertion
- if (Ctx->getFlags().getShouldDoNopInsertion()) {
+ if (getFlags().getShouldDoNopInsertion()) {
Func->doNopInsertion();
}
}
@@ -1256,8 +1256,9 @@
void TargetARM32::emitJumpTable(const Cfg *Func,
const InstJumpTable *JumpTable) const {
+ (void)Func;
(void)JumpTable;
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
}
void TargetARM32::emitVariable(const Variable *Var) const {
@@ -1767,7 +1768,7 @@
if (!PreservedSRegs.empty())
_pop(PreservedSRegs);
- if (!Ctx->getFlags().getUseSandboxing())
+ if (!getFlags().getUseSandboxing())
return;
// Change the original ret instruction into a sandboxed return sequence.
@@ -2255,7 +2256,7 @@
const uint32_t Alignment =
std::max(AlignmentParam, ARM32_STACK_ALIGNMENT_BYTES);
const bool OverAligned = Alignment > ARM32_STACK_ALIGNMENT_BYTES;
- const bool OptM1 = Ctx->getFlags().getOptLevel() == Opt_m1;
+ const bool OptM1 = getFlags().getOptLevel() == Opt_m1;
const bool AllocaWithKnownOffset = Instr->getKnownFrameOffset();
const bool UseFramePointer =
hasFramePointer() || OverAligned || !AllocaWithKnownOffset || OptM1;
@@ -3356,7 +3357,7 @@
return;
}
case InstArithmetic::Mul: {
- const bool OptM1 = Ctx->getFlags().getOptLevel() == Opt_m1;
+ const bool OptM1 = getFlags().getOptLevel() == Opt_m1;
if (!OptM1 && Srcs.hasConstOperand()) {
constexpr std::size_t MaxShifts = 4;
std::array<StrengthReduction::AggregationElement, MaxShifts> Shifts;
@@ -6135,7 +6136,7 @@
}
void TargetARM32::postLower() {
- if (Ctx->getFlags().getOptLevel() == Opt_m1)
+ if (getFlags().getOptLevel() == Opt_m1)
return;
markRedefinitions();
Context.availabilityUpdate();
@@ -6147,7 +6148,7 @@
(void)Permutation;
(void)ExcludeRegisters;
(void)Salt;
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
}
void TargetARM32::emit(const ConstantInteger32 *C) const {
@@ -6163,12 +6164,12 @@
void TargetARM32::emit(const ConstantFloat *C) const {
(void)C;
- UnimplementedError(Ctx->getFlags());
+ UnimplementedError(getFlags());
}
void TargetARM32::emit(const ConstantDouble *C) const {
(void)C;
- UnimplementedError(Ctx->getFlags());
+ UnimplementedError(getFlags());
}
void TargetARM32::emit(const ConstantUndef *) const {
@@ -6721,8 +6722,8 @@
void TargetDataARM32::lowerGlobals(const VariableDeclarationList &Vars,
const std::string &SectionSuffix) {
- const bool IsPIC = Ctx->getFlags().getUseNonsfi();
- switch (Ctx->getFlags().getOutFileType()) {
+ const bool IsPIC = getFlags().getUseNonsfi();
+ switch (getFlags().getOutFileType()) {
case FT_Elf: {
ELFObjectWriter *Writer = Ctx->getObjectWriter();
Writer->writeDataSection(Vars, llvm::ELF::R_ARM_ABS32, SectionSuffix,
@@ -6730,7 +6731,7 @@
} break;
case FT_Asm:
case FT_Iasm: {
- const std::string TranslateOnly = Ctx->getFlags().getTranslateOnly();
+ const std::string TranslateOnly = getFlags().getTranslateOnly();
OstreamLocker _(Ctx);
for (const VariableDeclaration *Var : Vars) {
if (GlobalContext::matchSymbolName(Var->getName(), TranslateOnly)) {
@@ -6810,9 +6811,9 @@
<< "\n"
<< "\t.align\t" << Align << "\n";
- if (Ctx->getFlags().getReorderPooledConstants()) {
+ if (getFlags().getReorderPooledConstants()) {
// TODO(jpp): add constant pooling.
- UnimplementedError(Ctx->getFlags());
+ UnimplementedError(getFlags());
}
for (Constant *C : Pool) {
@@ -6826,9 +6827,9 @@
} // end of anonymous namespace
void TargetDataARM32::lowerConstants() {
- if (Ctx->getFlags().getDisableTranslation())
+ if (getFlags().getDisableTranslation())
return;
- switch (Ctx->getFlags().getOutFileType()) {
+ switch (getFlags().getOutFileType()) {
case FT_Elf: {
ELFObjectWriter *Writer = Ctx->getObjectWriter();
Writer->writeConstantPool<ConstantFloat>(IceType_f32);
@@ -6845,9 +6846,9 @@
}
void TargetDataARM32::lowerJumpTables() {
- if (Ctx->getFlags().getDisableTranslation())
+ if (getFlags().getDisableTranslation())
return;
- switch (Ctx->getFlags().getOutFileType()) {
+ switch (getFlags().getOutFileType()) {
case FT_Elf:
if (!Ctx->getJumpTables().empty()) {
llvm::report_fatal_error("ARM32 does not support jump tables yet.");
@@ -6864,7 +6865,7 @@
}
TargetHeaderARM32::TargetHeaderARM32(GlobalContext *Ctx)
- : TargetHeaderLowering(Ctx), CPUFeatures(Ctx->getFlags()) {}
+ : TargetHeaderLowering(Ctx), CPUFeatures(getFlags()) {}
void TargetHeaderARM32::lower() {
OstreamLocker _(Ctx);
diff --git a/src/IceTargetLoweringMIPS32.cpp b/src/IceTargetLoweringMIPS32.cpp
index 5f1f8e6..de8afbf 100644
--- a/src/IceTargetLoweringMIPS32.cpp
+++ b/src/IceTargetLoweringMIPS32.cpp
@@ -141,7 +141,7 @@
Func->processAllocas(SortAndCombineAllocas);
Func->dump("After Alloca processing");
- if (!Ctx->getFlags().getEnablePhiEdgeSplit()) {
+ if (!getFlags().getEnablePhiEdgeSplit()) {
// Lower Phi instructions.
Func->placePhiLoads();
if (Func->hasError())
@@ -204,7 +204,7 @@
return;
Func->dump("After linear scan regalloc");
- if (Ctx->getFlags().getEnablePhiEdgeSplit()) {
+ if (getFlags().getEnablePhiEdgeSplit()) {
Func->advancedPhiLowering();
Func->dump("After advanced Phi lowering");
}
@@ -226,7 +226,7 @@
Func->dump("After branch optimization");
// Nop insertion
- if (Ctx->getFlags().getShouldDoNopInsertion()) {
+ if (getFlags().getShouldDoNopInsertion()) {
Func->doNopInsertion();
}
}
@@ -271,7 +271,7 @@
Func->dump("After stack frame mapping");
// Nop insertion
- if (Ctx->getFlags().getShouldDoNopInsertion()) {
+ if (getFlags().getShouldDoNopInsertion()) {
Func->doNopInsertion();
}
}
@@ -279,7 +279,7 @@
bool TargetMIPS32::doBranchOpt(Inst *Instr, const CfgNode *NextNode) {
(void)Instr;
(void)NextNode;
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
return false;
}
@@ -328,8 +328,9 @@
void TargetMIPS32::emitJumpTable(const Cfg *Func,
const InstJumpTable *JumpTable) const {
+ (void)Func;
(void)JumpTable;
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
}
/// Provide a trivial wrapper to legalize() for this common usage.
@@ -354,7 +355,7 @@
// then the result should be split and the lo and hi components will
// need to go in uninitialized registers.
if (isVectorType(Ty))
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
return Ctx->getConstantZero(Ty);
}
return From;
@@ -385,7 +386,7 @@
Str << "($" << getRegName(getFrameOrStackReg(), FrameSPTy);
Str << ")";
}
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
}
void TargetMIPS32::lowerArguments() {
@@ -405,11 +406,11 @@
Type Ty = Arg->getType();
// TODO(rkotler): handle float/vector types.
if (isVectorType(Ty)) {
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
continue;
}
if (isFloatingType(Ty)) {
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
continue;
}
if (Ty == IceType_i64) {
@@ -466,13 +467,13 @@
void TargetMIPS32::addProlog(CfgNode *Node) {
(void)Node;
return;
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
}
void TargetMIPS32::addEpilog(CfgNode *Node) {
(void)Node;
return;
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
}
Operand *TargetMIPS32::loOperand(Operand *Operand) {
@@ -1053,7 +1054,7 @@
return;
}
case Intrinsics::NaClReadTP: {
- if (Ctx->getFlags().getUseSandboxing()) {
+ if (getFlags().getUseSandboxing()) {
UnimplementedLoweringError(this, Instr);
} else {
InstCall *Call =
@@ -1095,15 +1096,13 @@
UnimplementedLoweringError(this, Instr);
}
-void TargetMIPS32::doAddressOptLoad() {
- UnimplementedError(Func->getContext()->getFlags());
-}
+void TargetMIPS32::doAddressOptLoad() { UnimplementedError(getFlags()); }
void TargetMIPS32::randomlyInsertNop(float Probability,
RandomNumberGenerator &RNG) {
RandomNumberGeneratorWrapper RNGW(RNG);
if (RNGW.getTrueWithProbability(Probability)) {
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
}
}
@@ -1150,9 +1149,7 @@
UnimplementedLoweringError(this, Instr);
}
-void TargetMIPS32::doAddressOptStore() {
- UnimplementedError(Func->getContext()->getFlags());
-}
+void TargetMIPS32::doAddressOptStore() { UnimplementedError(getFlags()); }
void TargetMIPS32::lowerSwitch(const InstSwitch *Instr) {
UnimplementedLoweringError(this, Instr);
@@ -1170,11 +1167,11 @@
}
void TargetMIPS32::postLower() {
- if (Ctx->getFlags().getOptLevel() == Opt_m1)
+ if (getFlags().getOptLevel() == Opt_m1)
return;
// TODO(rkotler): Find two-address non-SSA instructions where Dest==Src0,
// and set the IsDestRedefined flag to keep liveness analysis consistent.
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
}
void TargetMIPS32::makeRandomRegisterPermutation(
@@ -1183,7 +1180,7 @@
(void)Permutation;
(void)ExcludeRegisters;
(void)Salt;
- UnimplementedError(Func->getContext()->getFlags());
+ UnimplementedError(getFlags());
}
/* TODO(jvoung): avoid duplicate symbols with multiple targets.
@@ -1201,8 +1198,8 @@
void TargetDataMIPS32::lowerGlobals(const VariableDeclarationList &Vars,
const std::string &SectionSuffix) {
- const bool IsPIC = Ctx->getFlags().getUseNonsfi();
- switch (Ctx->getFlags().getOutFileType()) {
+ const bool IsPIC = getFlags().getUseNonsfi();
+ switch (getFlags().getOutFileType()) {
case FT_Elf: {
ELFObjectWriter *Writer = Ctx->getObjectWriter();
Writer->writeDataSection(Vars, llvm::ELF::R_MIPS_GLOB_DAT, SectionSuffix,
@@ -1210,7 +1207,7 @@
} break;
case FT_Asm:
case FT_Iasm: {
- const std::string TranslateOnly = Ctx->getFlags().getTranslateOnly();
+ const std::string TranslateOnly = getFlags().getTranslateOnly();
OstreamLocker L(Ctx);
for (const VariableDeclaration *Var : Vars) {
if (GlobalContext::matchSymbolName(Var->getName(), TranslateOnly)) {
@@ -1222,15 +1219,15 @@
}
void TargetDataMIPS32::lowerConstants() {
- if (Ctx->getFlags().getDisableTranslation())
+ if (getFlags().getDisableTranslation())
return;
- UnimplementedError(Ctx->getFlags());
+ UnimplementedError(getFlags());
}
void TargetDataMIPS32::lowerJumpTables() {
- if (Ctx->getFlags().getDisableTranslation())
+ if (getFlags().getDisableTranslation())
return;
- UnimplementedError(Ctx->getFlags());
+ UnimplementedError(getFlags());
}
// Helper for legalize() to emit the right code to lower an operand to a
@@ -1239,7 +1236,7 @@
Type Ty = Src->getType();
Variable *Reg = makeReg(Ty, RegNum);
if (isVectorType(Ty) || isFloatingType(Ty)) {
- UnimplementedError(Ctx->getFlags());
+ UnimplementedError(getFlags());
} else {
// Mov's Src operand can really only be the flexible second operand type
// or a register. Users should guarantee that.
diff --git a/src/IceTargetLoweringX8632.cpp b/src/IceTargetLoweringX8632.cpp
index 0721168..f69d19a 100644
--- a/src/IceTargetLoweringX8632.cpp
+++ b/src/IceTargetLoweringX8632.cpp
@@ -35,7 +35,7 @@
void staticInit(::Ice::GlobalContext *Ctx) {
::Ice::X8632::TargetX8632::staticInit(Ctx);
- if (Ctx->getFlags().getUseNonsfi()) {
+ if (Ice::getFlags().getUseNonsfi()) {
// In nonsfi, we need to reference the _GLOBAL_OFFSET_TABLE_ for accessing
// globals. The GOT is an external symbol (i.e., it is not defined in the
// pexe) so we need to register it as such so that ELF emission won't barf
@@ -227,7 +227,7 @@
// instructions are inserted, but it's more clear to do the whole
// transformation in a single place.
Traits::Insts::GetIP *GetIPInst = nullptr;
- if (Ctx->getFlags().getUseNonsfi()) {
+ if (getFlags().getUseNonsfi()) {
for (Inst &Instr : Node->getInsts()) {
if (auto *GetIP = llvm::dyn_cast<Traits::Insts::GetIP>(&Instr)) {
if (!Instr.isDeleted())
diff --git a/src/IceTargetLoweringX8632Traits.h b/src/IceTargetLoweringX8632Traits.h
index 12129c3..554b916 100644
--- a/src/IceTargetLoweringX8632Traits.h
+++ b/src/IceTargetLoweringX8632Traits.h
@@ -571,11 +571,9 @@
return Registers;
}
- static void
- makeRandomRegisterPermutation(GlobalContext *Ctx, Cfg *Func,
- llvm::SmallVectorImpl<RegNumT> &Permutation,
- const SmallBitVector &ExcludeRegisters,
- uint64_t Salt) {
+ static void makeRandomRegisterPermutation(
+ Cfg *Func, llvm::SmallVectorImpl<RegNumT> &Permutation,
+ const SmallBitVector &ExcludeRegisters, uint64_t Salt) {
// TODO(stichnot): Declaring Permutation this way loses type/size
// information. Fix this in conjunction with the caller-side TODO.
assert(Permutation.size() >= RegisterSet::Reg_NUM);
@@ -620,7 +618,7 @@
#undef X
// Create a random number generator for regalloc randomization.
- RandomNumberGenerator RNG(Ctx->getFlags().getRandomSeed(),
+ RandomNumberGenerator RNG(getFlags().getRandomSeed(),
RPE_RegAllocRandomization, Salt);
RandomNumberGeneratorWrapper RNGW(RNG);
diff --git a/src/IceTargetLoweringX8664Traits.h b/src/IceTargetLoweringX8664Traits.h
index 0feaa20..0fc4800 100644
--- a/src/IceTargetLoweringX8664Traits.h
+++ b/src/IceTargetLoweringX8664Traits.h
@@ -615,11 +615,9 @@
return Registers;
}
- static void
- makeRandomRegisterPermutation(GlobalContext *Ctx, Cfg *Func,
- llvm::SmallVectorImpl<RegNumT> &Permutation,
- const SmallBitVector &ExcludeRegisters,
- uint64_t Salt) {
+ static void makeRandomRegisterPermutation(
+ Cfg *Func, llvm::SmallVectorImpl<RegNumT> &Permutation,
+ const SmallBitVector &ExcludeRegisters, uint64_t Salt) {
// TODO(stichnot): Declaring Permutation this way loses type/size
// information. Fix this in conjunction with the caller-side TODO.
assert(Permutation.size() >= RegisterSet::Reg_NUM);
@@ -664,7 +662,7 @@
#undef X
// Create a random number generator for regalloc randomization.
- RandomNumberGenerator RNG(Ctx->getFlags().getRandomSeed(),
+ RandomNumberGenerator RNG(getFlags().getRandomSeed(),
RPE_RegAllocRandomization, Salt);
RandomNumberGeneratorWrapper RNGW(RNG);
diff --git a/src/IceTargetLoweringX86BaseImpl.h b/src/IceTargetLoweringX86BaseImpl.h
index 7394ab1..a344d57 100644
--- a/src/IceTargetLoweringX86BaseImpl.h
+++ b/src/IceTargetLoweringX86BaseImpl.h
@@ -352,10 +352,10 @@
(TargetInstructionSet::X86InstructionSet_End -
TargetInstructionSet::X86InstructionSet_Begin),
"Traits::InstructionSet range different from TargetInstructionSet");
- if (Func->getContext()->getFlags().getTargetInstructionSet() !=
+ if (getFlags().getTargetInstructionSet() !=
TargetInstructionSet::BaseInstructionSet) {
InstructionSet = static_cast<InstructionSetEnum>(
- (Func->getContext()->getFlags().getTargetInstructionSet() -
+ (getFlags().getTargetInstructionSet() -
TargetInstructionSet::X86InstructionSet_Begin) +
Traits::InstructionSet::Begin);
}
@@ -364,16 +364,14 @@
template <typename TraitsType>
void TargetX86Base<TraitsType>::staticInit(GlobalContext *Ctx) {
RegNumT::setLimit(Traits::RegisterSet::Reg_NUM);
- Traits::initRegisterSet(Ctx->getFlags(), &TypeToRegisterSet,
- &RegisterAliases);
+ Traits::initRegisterSet(getFlags(), &TypeToRegisterSet, &RegisterAliases);
for (size_t i = 0; i < TypeToRegisterSet.size(); ++i)
TypeToRegisterSetUnfiltered[i] = TypeToRegisterSet[i];
filterTypeToRegisterSet(Ctx, Traits::RegisterSet::Reg_NUM,
TypeToRegisterSet.data(), TypeToRegisterSet.size(),
Traits::getRegName, getRegClassName);
PcRelFixup = Traits::FK_PcRel;
- AbsFixup =
- Ctx->getFlags().getUseNonsfi() ? Traits::FK_Gotoff : Traits::FK_Abs;
+ AbsFixup = getFlags().getUseNonsfi() ? Traits::FK_Gotoff : Traits::FK_Abs;
}
template <typename TraitsType>
@@ -384,8 +382,7 @@
if (auto *ConstDouble = llvm::dyn_cast<ConstantDouble>(C)) {
return !Utils::isPositiveZero(ConstDouble->getValue());
}
- if (GlobalContext::getFlags().getRandomizeAndPoolImmediatesOption() !=
- RPI_Pool) {
+ if (getFlags().getRandomizeAndPoolImmediatesOption() != RPI_Pool) {
return false;
}
return C->shouldBeRandomizedOrPooled();
@@ -406,7 +403,7 @@
Func->processAllocas(SortAndCombineAllocas);
Func->dump("After Alloca processing");
- if (!Ctx->getFlags().getEnablePhiEdgeSplit()) {
+ if (!getFlags().getEnablePhiEdgeSplit()) {
// Lower Phi instructions.
Func->placePhiLoads();
if (Func->hasError())
@@ -492,7 +489,7 @@
return;
Func->dump("After linear scan regalloc");
- if (Ctx->getFlags().getEnablePhiEdgeSplit()) {
+ if (getFlags().getEnablePhiEdgeSplit()) {
Func->advancedPhiLowering();
Func->dump("After advanced Phi lowering");
}
@@ -900,7 +897,7 @@
// Print in the form "Offset(%reg)", taking care that:
// - Offset is never printed when it is 0
- const bool DecorateAsm = Func->getContext()->getFlags().getDecorateAsm();
+ const bool DecorateAsm = getFlags().getDecorateAsm();
// Only print Offset when it is nonzero, regardless of DecorateAsm.
if (Offset) {
if (DecorateAsm) {
@@ -1379,7 +1376,7 @@
SmallBitVector
TargetX86Base<TraitsType>::getRegisterSet(RegSetMask Include,
RegSetMask Exclude) const {
- return Traits::getRegisterSet(Ctx->getFlags(), Include, Exclude);
+ return Traits::getRegisterSet(getFlags(), Include, Exclude);
}
template <typename TraitsType>
@@ -1402,7 +1399,7 @@
const uint32_t Alignment =
std::max(AlignmentParam, Traits::X86_STACK_ALIGNMENT_BYTES);
const bool OverAligned = Alignment > Traits::X86_STACK_ALIGNMENT_BYTES;
- const bool OptM1 = Ctx->getFlags().getOptLevel() == Opt_m1;
+ const bool OptM1 = getFlags().getOptLevel() == Opt_m1;
const bool AllocaWithKnownOffset = Instr->getKnownFrameOffset();
const bool UseFramePointer =
hasFramePointer() || OverAligned || !AllocaWithKnownOffset || OptM1;
@@ -1532,7 +1529,7 @@
int32_t Src1) {
// Disable this optimization for Om1 and O0, just to keep things simple
// there.
- if (Ctx->getFlags().getOptLevel() < Opt_1)
+ if (getFlags().getOptLevel() < Opt_1)
return false;
Type Ty = Dest->getType();
if (Src1 == -1) {
@@ -2226,7 +2223,7 @@
case InstArithmetic::Sdiv:
// TODO(stichnot): Enable this after doing better performance and cross
// testing.
- if (false && Ctx->getFlags().getOptLevel() >= Opt_1) {
+ if (false && getFlags().getOptLevel() >= Opt_1) {
// Optimize division by constant power of 2, but not for Om1 or O0, just
// to keep things simple there.
if (auto *C = llvm::dyn_cast<ConstantInteger32>(Src1)) {
@@ -2316,7 +2313,7 @@
case InstArithmetic::Srem: {
// TODO(stichnot): Enable this after doing better performance and cross
// testing.
- if (false && Ctx->getFlags().getOptLevel() >= Opt_1) {
+ if (false && getFlags().getOptLevel() >= Opt_1) {
// Optimize mod by constant power of 2, but not for Om1 or O0, just to
// keep things simple there.
if (auto *C = llvm::dyn_cast<ConstantInteger32>(Src1)) {
@@ -4309,7 +4306,7 @@
Operand *PtrToMem,
Operand *Expected,
Operand *Desired) {
- if (Ctx->getFlags().getOptLevel() == Opt_m1)
+ if (getFlags().getOptLevel() == Opt_m1)
return false;
// Peek ahead a few instructions and see how Dest is used.
// It's very common to have:
@@ -5279,7 +5276,7 @@
return nullptr;
AddressOptimizer AddrOpt(Func);
- const bool MockBounds = Func->getContext()->getFlags().getMockBoundsCheck();
+ const bool MockBounds = getFlags().getMockBoundsCheck();
const Inst *Reason = nullptr;
bool AddressWasOptimized = false;
// The following unnamed struct identifies the address mode formation steps
@@ -5471,7 +5468,7 @@
/// simple global variable address.
template <typename TraitsType>
void TargetX86Base<TraitsType>::doMockBoundsCheck(Operand *Opnd) {
- if (!Ctx->getFlags().getMockBoundsCheck())
+ if (!getFlags().getMockBoundsCheck())
return;
if (auto *Mem = llvm::dyn_cast<X86OperandMem>(Opnd)) {
if (Mem->getIndex()) {
@@ -6191,7 +6188,7 @@
/// since loOperand() and hiOperand() don't expect Undef input. Also, in
/// Non-SFI mode, add a FakeUse(RebasePtr) for every pooled constant operand.
template <typename TraitsType> void TargetX86Base<TraitsType>::prelowerPhis() {
- if (Ctx->getFlags().getUseNonsfi()) {
+ if (getFlags().getUseNonsfi()) {
assert(RebasePtr);
CfgNode *Node = Context.getNode();
uint32_t RebasePtrUseCount = 0;
@@ -6718,7 +6715,7 @@
template <typename TraitsType>
Operand *TargetX86Base<TraitsType>::legalize(Operand *From, LegalMask Allowed,
RegNumT RegNum) {
- const bool UseNonsfi = Func->getContext()->getFlags().getUseNonsfi();
+ const bool UseNonsfi = getFlags().getUseNonsfi();
const Type Ty = From->getType();
// Assert that a physical register is allowed. To date, all calls to
// legalize() allow a physical register. If a physical register needs to be
@@ -7015,7 +7012,7 @@
}
template <typename TraitsType> void TargetX86Base<TraitsType>::postLower() {
- if (Ctx->getFlags().getOptLevel() == Opt_m1)
+ if (getFlags().getOptLevel() == Opt_m1)
return;
markRedefinitions();
Context.availabilityUpdate();
@@ -7025,8 +7022,8 @@
void TargetX86Base<TraitsType>::makeRandomRegisterPermutation(
llvm::SmallVectorImpl<RegNumT> &Permutation,
const SmallBitVector &ExcludeRegisters, uint64_t Salt) const {
- Traits::makeRandomRegisterPermutation(Ctx, Func, Permutation,
- ExcludeRegisters, Salt);
+ Traits::makeRandomRegisterPermutation(Func, Permutation, ExcludeRegisters,
+ Salt);
}
template <typename TraitsType>
@@ -7074,7 +7071,7 @@
void TargetX86Base<Machine>::emit(const ConstantRelocatable *C) const {
if (!BuildDefs::dump())
return;
- assert(!Ctx->getFlags().getUseNonsfi() ||
+ assert(!getFlags().getUseNonsfi() ||
C->getName().toString() == GlobalOffsetTable);
Ostream &Str = Ctx->getStrEmit();
Str << "$";
@@ -7088,7 +7085,7 @@
RegNumT RegNum) {
assert(llvm::isa<ConstantInteger32>(Immediate) ||
llvm::isa<ConstantRelocatable>(Immediate));
- if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_None ||
+ if (getFlags().getRandomizeAndPoolImmediatesOption() == RPI_None ||
RandomizationPoolingPaused == true) {
// Immediates randomization/pooling off or paused
return Immediate;
@@ -7107,7 +7104,7 @@
return Immediate;
}
Ctx->statsUpdateRPImms();
- switch (Ctx->getFlags().getRandomizeAndPoolImmediatesOption()) {
+ switch (getFlags().getRandomizeAndPoolImmediatesOption()) {
default:
llvm::report_fatal_error("Unsupported -randomize-pool-immediates option");
case RPI_Randomize: {
@@ -7144,7 +7141,7 @@
// TO:
// insert: mov $label, Reg
// => Reg
- assert(Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_Pool);
+ assert(getFlags().getRandomizeAndPoolImmediatesOption() == RPI_Pool);
assert(Immediate->getShouldBePooled());
// if we have already assigned a phy register, we must come from
// advancedPhiLowering()=>lowerAssign(). In this case we should reuse the
@@ -7167,7 +7164,7 @@
TargetX86Base<TraitsType>::randomizeOrPoolImmediate(X86OperandMem *MemOperand,
RegNumT RegNum) {
assert(MemOperand);
- if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_None ||
+ if (getFlags().getRandomizeAndPoolImmediatesOption() == RPI_None ||
RandomizationPoolingPaused == true) {
// immediates randomization/pooling is turned off
return MemOperand;
@@ -7198,7 +7195,7 @@
// The offset of this mem operand should be blinded or pooled
Ctx->statsUpdateRPImms();
- switch (Ctx->getFlags().getRandomizeAndPoolImmediatesOption()) {
+ switch (getFlags().getRandomizeAndPoolImmediatesOption()) {
default:
llvm::report_fatal_error("Unsupported -randomize-pool-immediates option");
case RPI_Randomize: {
@@ -7284,7 +7281,7 @@
if (!BuildDefs::dump())
return;
Ostream &Str = Ctx->getStrEmit();
- const bool UseNonsfi = Ctx->getFlags().getUseNonsfi();
+ const bool UseNonsfi = getFlags().getUseNonsfi();
GlobalString FunctionName = Func->getFunctionName();
const char *Prefix = UseNonsfi ? ".data.rel.ro." : ".rodata.";
Str << "\t.section\t" << Prefix << FunctionName
@@ -7314,11 +7311,11 @@
// If reorder-pooled-constants option is set to true, we need to shuffle the
// constant pool before emitting it.
- if (Ctx->getFlags().getReorderPooledConstants() && !Pool.empty()) {
+ if (getFlags().getReorderPooledConstants() && !Pool.empty()) {
// Use the constant's kind value as the salt for creating random number
// generator.
Operand::OperandKind K = (*Pool.begin())->getKind();
- RandomNumberGenerator RNG(Ctx->getFlags().getRandomSeed(),
+ RandomNumberGenerator RNG(getFlags().getRandomSeed(),
RPE_PooledConstantReordering, K);
RandomShuffle(Pool.begin(), Pool.end(),
[&RNG](uint64_t N) { return (uint32_t)RNG.next(N); });
@@ -7347,9 +7344,9 @@
template <typename TraitsType>
void TargetDataX86<TraitsType>::lowerConstants() {
- if (Ctx->getFlags().getDisableTranslation())
+ if (getFlags().getDisableTranslation())
return;
- switch (Ctx->getFlags().getOutFileType()) {
+ switch (getFlags().getOutFileType()) {
case FT_Elf: {
ELFObjectWriter *Writer = Ctx->getObjectWriter();
@@ -7376,8 +7373,8 @@
template <typename TraitsType>
void TargetDataX86<TraitsType>::lowerJumpTables() {
- const bool IsPIC = Ctx->getFlags().getUseNonsfi();
- switch (Ctx->getFlags().getOutFileType()) {
+ const bool IsPIC = getFlags().getUseNonsfi();
+ switch (getFlags().getOutFileType()) {
case FT_Elf: {
ELFObjectWriter *Writer = Ctx->getObjectWriter();
for (const JumpTableData &JT : Ctx->getJumpTables())
@@ -7409,15 +7406,15 @@
template <typename TraitsType>
void TargetDataX86<TraitsType>::lowerGlobals(
const VariableDeclarationList &Vars, const std::string &SectionSuffix) {
- const bool IsPIC = Ctx->getFlags().getUseNonsfi();
- switch (Ctx->getFlags().getOutFileType()) {
+ const bool IsPIC = getFlags().getUseNonsfi();
+ switch (getFlags().getOutFileType()) {
case FT_Elf: {
ELFObjectWriter *Writer = Ctx->getObjectWriter();
Writer->writeDataSection(Vars, Traits::FK_Abs, SectionSuffix, IsPIC);
} break;
case FT_Asm:
case FT_Iasm: {
- const std::string TranslateOnly = Ctx->getFlags().getTranslateOnly();
+ const std::string TranslateOnly = getFlags().getTranslateOnly();
OstreamLocker L(Ctx);
for (const VariableDeclaration *Var : Vars) {
if (GlobalContext::matchSymbolName(Var->getName(), TranslateOnly)) {
diff --git a/src/IceTranslator.h b/src/IceTranslator.h
index 5e7dfad..1f7b358 100644
--- a/src/IceTranslator.h
+++ b/src/IceTranslator.h
@@ -48,8 +48,6 @@
GlobalContext *getContext() const { return Ctx; }
- const ClFlags &getFlags() const { return Ctx->getFlags(); }
-
/// Translates the constructed ICE function Func to machine code.
void translateFcn(std::unique_ptr<Cfg> Func);
diff --git a/src/PNaClTranslator.cpp b/src/PNaClTranslator.cpp
index 0bba5c2..514d817 100644
--- a/src/PNaClTranslator.cpp
+++ b/src/PNaClTranslator.cpp
@@ -446,10 +446,6 @@
// Defines if a module block has already been parsed.
bool ParsedModuleBlock = false;
- static const Ice::ClFlags &getFlags() {
- return Ice::GlobalContext::getFlags();
- }
-
bool ParseBlock(unsigned BlockID) override;
// Gets extended type associated with the given index, assuming the extended
@@ -493,7 +489,7 @@
// Installs names for global variables without names.
void installGlobalVarNames() {
assert(VariableDeclarations);
- const std::string &GlobalPrefix = getFlags().getDefaultGlobalPrefix();
+ const std::string &GlobalPrefix = Ice::getFlags().getDefaultGlobalPrefix();
if (!GlobalPrefix.empty()) {
NaClBcIndexSize_t NameIndex = 0;
for (Ice::VariableDeclaration *Var : *VariableDeclarations) {
@@ -504,7 +500,8 @@
// Installs names for functions without names.
void installFunctionNames() {
- const std::string &FunctionPrefix = getFlags().getDefaultFunctionPrefix();
+ const std::string &FunctionPrefix =
+ Ice::getFlags().getDefaultFunctionPrefix();
if (!FunctionPrefix.empty()) {
NaClBcIndexSize_t NameIndex = 0;
for (Ice::FunctionDeclaration *Func : FunctionDeclarations) {
@@ -549,9 +546,8 @@
// Converts global variable declarations into constant value IDs.
void createValueIDsForGlobalVars() {
- Ice::GlobalContext *Ctx = getTranslator().getContext();
for (const Ice::VariableDeclaration *Decl : *VariableDeclarations) {
- if (!Decl->verifyLinkageCorrect(Ctx))
+ if (!Decl->verifyLinkageCorrect())
reportLinkageError("Global", *Decl);
Ice::Constant *C =
getConstantSym(Decl->getName(), !Decl->hasInitializer());
@@ -590,7 +586,7 @@
NaClBitcodeParser::ErrorAt(Level, Bit, Message);
setErrStream(OldErrStream);
}
- if (Level >= naclbitc::Error && !getFlags().getAllowErrorRecovery())
+ if (Level >= naclbitc::Error && !Ice::getFlags().getAllowErrorRecovery())
Fatal();
return true;
}
@@ -694,10 +690,6 @@
// Gets the translator associated with the bitcode parser.
Ice::Translator &getTranslator() const { return Context->getTranslator(); }
- static const Ice::ClFlags &getFlags() {
- return Ice::GlobalContext::getFlags();
- }
-
// Default implementation. Reports that block is unknown and skips its
// contents.
bool ParseBlock(unsigned BlockID) override;
@@ -773,7 +765,7 @@
// Note: If dump routines have been turned off, the error messages will not
// be readable. Hence, replace with simple error. We also use the simple form
// for unit tests.
- if (getFlags().getGenerateUnitTestMessages()) {
+ if (Ice::getFlags().getGenerateUnitTestMessages()) {
StrBuf << "Invalid " << getBlockName() << " record: <" << Record.GetCode();
for (const uint64_t Val : Record.GetValues()) {
StrBuf << " " << Val;
@@ -2999,7 +2991,7 @@
: BlockParserBaseClass(BlockID, Context),
Timer(Ice::TimerStack::TT_parseModule,
Context->getTranslator().getContext()),
- IsParseParallel(Ice::GlobalContext::Flags.isParseParallel()) {}
+ IsParseParallel(Ice::getFlags().isParseParallel()) {}
~ModuleParser() override = default;
const char *getBlockName() const override { return "module"; }
NaClBitstreamCursor &getCursor() const { return Record.GetCursor(); }
@@ -3179,7 +3171,7 @@
std::unique_ptr<Ice::Cfg> Func = Parser.parseFunction(SeqNumber);
bool Failed = Func->hasError();
getTranslator().translateFcn(std::move(Func));
- return Failed && !getFlags().getAllowErrorRecovery();
+ return Failed && !Ice::getFlags().getAllowErrorRecovery();
}
}
default: