Subzero. RAII NaCl Bundling.
This CL introduces the TargetLowering::AutoBundle type, which allows
RAII-style bundle emission. As part of the CL, all of the uses of
TargetLowering::_bundle_lock(), and TargetLowering::_bundle_unlock(),
were replaced with uses of the newly introduced type.
BUG=
R=sehr@chromium.org, stichnot@chromium.org
Review URL: https://codereview.chromium.org/1585843007 .
diff --git a/src/IceTargetLowering.cpp b/src/IceTargetLowering.cpp
index 4f19226..28a7ca7 100644
--- a/src/IceTargetLowering.cpp
+++ b/src/IceTargetLowering.cpp
@@ -248,6 +248,25 @@
TargetLowering::TargetLowering(Cfg *Func)
: Func(Func), Ctx(Func->getContext()), Context() {}
+TargetLowering::AutoBundle::AutoBundle(TargetLowering *Target,
+ InstBundleLock::Option Option)
+ : Target(Target),
+ NeedSandboxing(Target->Ctx->getFlags().getUseSandboxing()) {
+ assert(!Target->AutoBundling);
+ Target->AutoBundling = true;
+ if (NeedSandboxing) {
+ Target->_bundle_lock(Option);
+ }
+}
+
+TargetLowering::AutoBundle::~AutoBundle() {
+ assert(Target->AutoBundling);
+ Target->AutoBundling = false;
+ if (NeedSandboxing) {
+ Target->_bundle_unlock();
+ }
+}
+
void TargetLowering::genTargetHelperCalls() {
for (CfgNode *Node : Func->getNodes()) {
Context.init(Node);
diff --git a/src/IceTargetLowering.h b/src/IceTargetLowering.h
index 71536cd..f45956c 100644
--- a/src/IceTargetLowering.h
+++ b/src/IceTargetLowering.h
@@ -297,7 +297,43 @@
virtual ~TargetLowering() = default;
+private:
+ // This control variable is used by AutoBundle (RAII-style bundle
+ // locking/unlocking) to prevent nested bundles.
+ bool AutoBundling = false;
+
+ // _bundle_lock(), and _bundle_unlock(), were made private to force subtargets
+ // to use the AutoBundle helper.
+ void
+ _bundle_lock(InstBundleLock::Option BundleOption = InstBundleLock::Opt_None) {
+ Context.insert<InstBundleLock>(BundleOption);
+ }
+ void _bundle_unlock() { Context.insert<InstBundleUnlock>(); }
+
protected:
+ /// AutoBundle provides RIAA-style bundling. Sub-targets are expected to use
+ /// it when emitting NaCl Bundles to ensure proper bundle_unlocking, and
+ /// prevent nested bundles.
+ ///
+ /// AutoBundle objects will emit a _bundle_lock during construction (but only
+ /// if sandboxed code generation was requested), and a bundle_unlock() during
+ /// destruction. By carefully scoping objects of this type, Subtargets can
+ /// ensure proper bundle emission.
+ class AutoBundle {
+ AutoBundle() = delete;
+ AutoBundle(const AutoBundle &) = delete;
+ AutoBundle &operator=(const AutoBundle &) = delete;
+
+ public:
+ explicit AutoBundle(TargetLowering *Target, InstBundleLock::Option Option =
+ InstBundleLock::Opt_None);
+ ~AutoBundle();
+
+ private:
+ TargetLowering *const Target;
+ const bool NeedSandboxing;
+ };
+
explicit TargetLowering(Cfg *Func);
// Applies command line filters to TypeToRegisterSet array.
static void
@@ -394,11 +430,6 @@
InstCall *makeHelperCall(const IceString &Name, Variable *Dest,
SizeT MaxSrcs);
- void
- _bundle_lock(InstBundleLock::Option BundleOption = InstBundleLock::Opt_None) {
- Context.insert<InstBundleLock>(BundleOption);
- }
- void _bundle_unlock() { Context.insert<InstBundleUnlock>(); }
void _set_dest_redefined() { Context.getLastInserted()->setDestRedefined(); }
bool shouldOptimizeMemIntrins();
diff --git a/src/IceTargetLoweringARM32.cpp b/src/IceTargetLoweringARM32.cpp
index 477fe9f..6ca28ed 100644
--- a/src/IceTargetLoweringARM32.cpp
+++ b/src/IceTargetLoweringARM32.cpp
@@ -1771,7 +1771,6 @@
CurInstr->setDeleted();
}
} else if (auto *StrInstr = llvm::dyn_cast<InstARM32Str>(CurInstr)) {
- Sandboxer Bundle(this);
if (OperandARM32Mem *LegalMem = Legalizer.legalizeMemOperand(
llvm::cast<OperandARM32Mem>(StrInstr->getSrc(1)))) {
Sandboxer(this).str(llvm::cast<Variable>(CurInstr->getSrc(0)),
@@ -6193,17 +6192,9 @@
TargetARM32::Sandboxer::Sandboxer(TargetARM32 *Target,
InstBundleLock::Option BundleOption)
- : Target(Target) {
- if (Target->NeedSandboxing) {
- Target->_bundle_lock(BundleOption);
- }
-}
+ : Bundler(Target, BundleOption), Target(Target) {}
-TargetARM32::Sandboxer::~Sandboxer() {
- if (Target->NeedSandboxing) {
- Target->_bundle_unlock();
- }
-}
+TargetARM32::Sandboxer::~Sandboxer() {}
namespace {
OperandARM32FlexImm *indirectBranchBicMask(Cfg *Func) {
diff --git a/src/IceTargetLoweringARM32.h b/src/IceTargetLoweringARM32.h
index 70c38fa..814ef75 100644
--- a/src/IceTargetLoweringARM32.h
+++ b/src/IceTargetLoweringARM32.h
@@ -942,6 +942,7 @@
void sub_sp(Operand *SubAmount);
private:
+ AutoBundle Bundler;
TargetARM32 *Target;
};
diff --git a/src/IceTargetLoweringX8632.cpp b/src/IceTargetLoweringX8632.cpp
index f5f0d1c..694de61 100644
--- a/src/IceTargetLoweringX8632.cpp
+++ b/src/IceTargetLoweringX8632.cpp
@@ -146,15 +146,15 @@
}
void TargetX8632::lowerIndirectJump(Variable *JumpTarget) {
+ AutoBundle _(this);
+
if (NeedSandboxing) {
- _bundle_lock();
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(JumpTarget, Ctx->getConstantInt32(~(BundleSize - 1)));
}
+
_jmp(JumpTarget);
- if (NeedSandboxing)
- _bundle_unlock();
}
void TargetX8632::lowerCall(const InstCall *Instr) {
@@ -278,24 +278,29 @@
break;
}
}
+
Operand *CallTarget =
legalize(Instr->getCallTarget(), Legal_Reg | Legal_Imm | Legal_AddrAbs);
- if (NeedSandboxing) {
- if (llvm::isa<Constant>(CallTarget)) {
- _bundle_lock(InstBundleLock::Opt_AlignToEnd);
- } else {
- Variable *CallTargetVar = nullptr;
- _mov(CallTargetVar, CallTarget);
- _bundle_lock(InstBundleLock::Opt_AlignToEnd);
- const SizeT BundleSize =
- 1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
- _and(CallTargetVar, Ctx->getConstantInt32(~(BundleSize - 1)));
- CallTarget = CallTargetVar;
+
+ Traits::Insts::Call *NewCall;
+ /* AutoBundle scoping */ {
+ std::unique_ptr<AutoBundle> Bundle;
+ if (NeedSandboxing) {
+ if (llvm::isa<Constant>(CallTarget)) {
+ Bundle = makeUnique<AutoBundle>(this, InstBundleLock::Opt_AlignToEnd);
+ } else {
+ Variable *CallTargetVar = nullptr;
+ _mov(CallTargetVar, CallTarget);
+ Bundle = makeUnique<AutoBundle>(this, InstBundleLock::Opt_AlignToEnd);
+ const SizeT BundleSize =
+ 1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
+ _and(CallTargetVar, Ctx->getConstantInt32(~(BundleSize - 1)));
+ CallTarget = CallTargetVar;
+ }
}
+ NewCall = Context.insert<Traits::Insts::Call>(ReturnReg, CallTarget);
}
- auto *NewCall = Context.insert<Traits::Insts::Call>(ReturnReg, CallTarget);
- if (NeedSandboxing)
- _bundle_unlock();
+
if (ReturnRegHi)
Context.insert<InstFakeDef>(ReturnRegHi);
@@ -749,8 +754,10 @@
}
}
- if (!NeedSandboxing)
+ if (!NeedSandboxing) {
return;
+ }
+
// Change the original ret instruction into a sandboxed return sequence.
// t:ecx = pop
// bundle_lock
diff --git a/src/IceTargetLoweringX8664.cpp b/src/IceTargetLoweringX8664.cpp
index 9f825c9..9f3feea 100644
--- a/src/IceTargetLoweringX8664.cpp
+++ b/src/IceTargetLoweringX8664.cpp
@@ -165,12 +165,11 @@
// add Adjustment, %esp
//
// instruction is not DCE'd.
- _bundle_lock();
+ AutoBundle _(this);
_redefined(Context.insert<InstFakeDef>(esp, rsp));
_add(esp, Adjustment);
_redefined(Context.insert<InstFakeDef>(rsp, esp));
_add(rsp, r15);
- _bundle_unlock();
}
void TargetX8664::_mov_sp(Operand *NewValue) {
@@ -180,9 +179,7 @@
Variable *rsp =
getPhysicalRegister(Traits::RegisterSet::Reg_rsp, IceType_i64);
- if (NeedSandboxing) {
- _bundle_lock();
- }
+ AutoBundle _(this);
_redefined(Context.insert<InstFakeDef>(esp, rsp));
_redefined(_mov(esp, NewValue));
@@ -195,7 +192,6 @@
Variable *r15 =
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
_add(rsp, r15);
- _bundle_unlock();
}
void TargetX8664::_push_rbp() {
@@ -218,10 +214,9 @@
// .bundle_end
//
// to avoid leaking the upper 32-bits (i.e., the sandbox address.)
- _bundle_lock();
+ AutoBundle _(this);
_push(_0);
Context.insert<typename Traits::Insts::Store>(ebp, TopOfStack);
- _bundle_unlock();
}
Traits::X86OperandMem *TargetX8664::_sandbox_mem_reference(X86OperandMem *Mem) {
@@ -350,12 +345,11 @@
// sub Adjustment, %esp
// add %r15, %rsp
// .bundle_end
- _bundle_lock();
+ AutoBundle _(this);
_redefined(Context.insert<InstFakeDef>(esp, rsp));
_sub(esp, Adjustment);
_redefined(Context.insert<InstFakeDef>(rsp, esp));
_add(rsp, r15);
- _bundle_unlock();
}
void TargetX8664::initSandbox() {
@@ -369,6 +363,8 @@
}
void TargetX8664::lowerIndirectJump(Variable *JumpTarget) {
+ std::unique_ptr<AutoBundle> Bundler;
+
if (!NeedSandboxing) {
Variable *T = makeReg(IceType_i64);
_movzx(T, JumpTarget);
@@ -380,7 +376,7 @@
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
_mov(T, JumpTarget);
- _bundle_lock();
+ Bundler = makeUnique<AutoBundle>(this);
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(T, Ctx->getConstantInt32(~(BundleSize - 1)));
@@ -390,8 +386,6 @@
}
_jmp(JumpTarget);
- if (NeedSandboxing)
- _bundle_unlock();
}
namespace {
@@ -599,30 +593,32 @@
ReturnAddress = InstX86Label::create(Func, this);
ReturnAddress->setIsReturnLocation(true);
constexpr bool SuppressMangling = true;
- if (CallTargetR == nullptr) {
- _bundle_lock(InstBundleLock::Opt_PadToEnd);
- _push(Ctx->getConstantSym(0, ReturnAddress->getName(Func),
- SuppressMangling));
- } else {
- Variable *T = makeReg(IceType_i32);
- Variable *T64 = makeReg(IceType_i64);
- Variable *r15 =
- getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
+ /* AutoBundle scoping */ {
+ std::unique_ptr<AutoBundle> Bundler;
+ if (CallTargetR == nullptr) {
+ Bundler = makeUnique<AutoBundle>(this, InstBundleLock::Opt_PadToEnd);
+ _push(Ctx->getConstantSym(0, ReturnAddress->getName(Func),
+ SuppressMangling));
+ } else {
+ Variable *T = makeReg(IceType_i32);
+ Variable *T64 = makeReg(IceType_i64);
+ Variable *r15 =
+ getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
- _mov(T, CallTargetR);
- _bundle_lock(InstBundleLock::Opt_PadToEnd);
- _push(Ctx->getConstantSym(0, ReturnAddress->getName(Func),
- SuppressMangling));
- const SizeT BundleSize =
- 1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
- _and(T, Ctx->getConstantInt32(~(BundleSize - 1)));
- _movzx(T64, T);
- _add(T64, r15);
- CallTarget = T64;
+ _mov(T, CallTargetR);
+ Bundler = makeUnique<AutoBundle>(this, InstBundleLock::Opt_PadToEnd);
+ _push(Ctx->getConstantSym(0, ReturnAddress->getName(Func),
+ SuppressMangling));
+ const SizeT BundleSize =
+ 1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
+ _and(T, Ctx->getConstantInt32(~(BundleSize - 1)));
+ _movzx(T64, T);
+ _add(T64, r15);
+ CallTarget = T64;
+ }
+
+ NewCall = Context.insert<Traits::Insts::Jmp>(CallTarget);
}
-
- NewCall = Context.insert<Traits::Insts::Jmp>(CallTarget);
- _bundle_unlock();
if (ReturnReg != nullptr) {
Context.insert<InstFakeDef>(ReturnReg);
}
@@ -858,13 +854,12 @@
} else {
_push_rbp();
- _bundle_lock();
+ AutoBundle _(this);
_redefined(Context.insert<InstFakeDef>(ebp, rbp));
_redefined(Context.insert<InstFakeDef>(esp, rsp));
_mov(ebp, esp);
_redefined(Context.insert<InstFakeDef>(rsp, esp));
_add(rbp, r15);
- _bundle_unlock();
}
// Keep ebp live for late-stage liveness analysis (e.g. asm-verbose mode).
Context.insert<InstFakeUse>(rbp);
@@ -1057,12 +1052,11 @@
_pop(rcx);
Context.insert<InstFakeDef>(ecx, rcx);
- _bundle_lock();
+ AutoBundle _(this);
_mov(ebp, ecx);
_redefined(Context.insert<InstFakeDef>(rbp, ebp));
_add(rbp, r15);
- _bundle_unlock();
}
}
@@ -1097,15 +1091,16 @@
Variable *r15 =
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
- _bundle_lock();
- const SizeT BundleSize = 1
- << Func->getAssembler<>()->getBundleAlignLog2Bytes();
- _and(T_ecx, Ctx->getConstantInt32(~(BundleSize - 1)));
- Context.insert<InstFakeDef>(T_rcx, T_ecx);
- _add(T_rcx, r15);
+ /* AutoBundle scoping */ {
+ AutoBundle _(this);
+ const SizeT BundleSize =
+ 1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
+ _and(T_ecx, Ctx->getConstantInt32(~(BundleSize - 1)));
+ Context.insert<InstFakeDef>(T_rcx, T_ecx);
+ _add(T_rcx, r15);
- _jmp(T_rcx);
- _bundle_unlock();
+ _jmp(T_rcx);
+ }
if (RI->getSrcSize()) {
auto *RetValue = llvm::cast<Variable>(RI->getSrc(0));
diff --git a/src/IceTargetLoweringX86Base.h b/src/IceTargetLoweringX86Base.h
index 853ae4e..ff9274f 100644
--- a/src/IceTargetLoweringX86Base.h
+++ b/src/IceTargetLoweringX86Base.h
@@ -382,6 +382,7 @@
X86OperandMem **findMemoryReference() { return nullptr; }
public:
+ std::unique_ptr<AutoBundle> Bundler;
X86OperandMem **const MemOperand;
template <typename... T>
@@ -392,16 +393,12 @@
? nullptr
: findMemoryReference(Args...)) {
if (MemOperand != nullptr) {
- Target->_bundle_lock(BundleLockOpt);
+ Bundler = makeUnique<AutoBundle>(Target, BundleLockOpt);
*MemOperand = Target->_sandbox_mem_reference(*MemOperand);
}
}
- ~AutoMemorySandboxer() {
- if (MemOperand != nullptr) {
- Target->_bundle_unlock();
- }
- }
+ ~AutoMemorySandboxer() {}
};
/// The following are helpers that insert lowered x86 instructions with