Let lock_guard deduce its template argument
No functional change, this is a cleanup.
With C++17, it's no longer necessary to specify the teplate argument
when it can be deduced from the types of constructor arguments. This
allows de-cluttering our locking statements.
To avoid typos, this patch was mechanically generated:
perl -p -i -e 's/std::lock_guard<std::mutex>/std::lock_guard/g' \
$(find . -name '*.cpp' -o -name '*.h')
Change-Id: Ibb15d9a6c5b1c861d81353e47d25474eb1d4c2df
diff --git a/libnetdutils/Log.cpp b/libnetdutils/Log.cpp
index 98f2d13..07d5952 100644
--- a/libnetdutils/Log.cpp
+++ b/libnetdutils/Log.cpp
@@ -202,7 +202,7 @@
break;
}
- std::lock_guard<std::shared_mutex> guard(mLock);
+ std::lock_guard guard(mLock);
mEntries.push_back(makeTimestampedEntry(entry));
while (mEntries.size() > mMaxEntries) mEntries.pop_front();
}
diff --git a/libnetdutils/include/netdutils/OperationLimiter.h b/libnetdutils/include/netdutils/OperationLimiter.h
index 633536b..992a849 100644
--- a/libnetdutils/include/netdutils/OperationLimiter.h
+++ b/libnetdutils/include/netdutils/OperationLimiter.h
@@ -60,7 +60,7 @@
// Note: each successful start(key) must be matched by exactly one call to
// finish(key).
bool start(KeyType key) EXCLUDES(mMutex) {
- std::lock_guard<std::mutex> lock(mMutex);
+ std::lock_guard lock(mMutex);
auto& cnt = mCounters[key]; // operator[] creates new entries as needed.
if (cnt >= mLimitPerKey) {
// Oh, no!
@@ -73,7 +73,7 @@
// Decrements the number of operations in progress accounted to |key|.
// See usage notes on start().
void finish(KeyType key) EXCLUDES(mMutex) {
- std::lock_guard<std::mutex> lock(mMutex);
+ std::lock_guard lock(mMutex);
auto it = mCounters.find(key);
if (it == mCounters.end()) {
LOG(FATAL_WITHOUT_ABORT) << "Decremented non-existent counter for key=" << key;
diff --git a/server/CommandListener.cpp b/server/CommandListener.cpp
index 9db017c..0b7ef2c 100644
--- a/server/CommandListener.cpp
+++ b/server/CommandListener.cpp
@@ -90,7 +90,7 @@
mLock(lock) {}
int runCommand(SocketClient *c, int argc, char **argv) {
- std::lock_guard<std::mutex> lock(mLock);
+ std::lock_guard lock(mLock);
return mWrappedCmd->runCommand(c, argc, argv);
}
diff --git a/server/EventReporter.cpp b/server/EventReporter.cpp
index c9238f4..52aaf16 100644
--- a/server/EventReporter.cpp
+++ b/server/EventReporter.cpp
@@ -37,7 +37,7 @@
}
android::sp<INetdEventListener> EventReporter::getNetdEventListener() {
- std::lock_guard<std::mutex> lock(mutex);
+ std::lock_guard lock(mutex);
if (mNetdEventListener == nullptr) {
// Use checkService instead of getService because getService waits for 5 seconds for the
// service to become available. The DNS resolver inside netd is started much earlier in the
diff --git a/server/IptablesRestoreController.cpp b/server/IptablesRestoreController.cpp
index 88d88f6..eb67b75 100644
--- a/server/IptablesRestoreController.cpp
+++ b/server/IptablesRestoreController.cpp
@@ -347,7 +347,7 @@
int IptablesRestoreController::execute(const IptablesTarget target, const std::string& command,
std::string *output) {
- std::lock_guard<std::mutex> lock(mLock);
+ std::lock_guard lock(mLock);
std::string buffer;
if (output == nullptr) {
diff --git a/server/NFLogListener.cpp b/server/NFLogListener.cpp
index 82eb0c4..67ed453 100644
--- a/server/NFLogListener.cpp
+++ b/server/NFLogListener.cpp
@@ -149,7 +149,7 @@
const auto rxHandler = [this](const nlmsghdr& nlmsg, const Slice msg) {
nfgenmsg nfmsg = {};
extract(msg, nfmsg);
- std::lock_guard<std::mutex> guard(mMutex);
+ std::lock_guard guard(mMutex);
const auto& fn = findWithDefault(mDispatchMap, ntohs(nfmsg.res_id), kDefaultDispatchFn);
fn(nlmsg, nfmsg, drop(msg, sizeof(nfmsg)));
};
@@ -183,7 +183,7 @@
const auto sendFn = [this](const Slice msg) { return mListener->send(msg); };
// Install fn into the dispatch map BEFORE requesting delivery of messages
{
- std::lock_guard<std::mutex> guard(mMutex);
+ std::lock_guard guard(mMutex);
mDispatchMap[nfLogGroup] = fn;
}
RETURN_IF_NOT_OK(cfgCmdBind(sendFn, nfLogGroup));
@@ -198,7 +198,7 @@
RETURN_IF_NOT_OK(cfgCmdUnbind(sendFn, nfLogGroup));
// Remove from the dispatch map AFTER stopping message delivery.
{
- std::lock_guard<std::mutex> guard(mMutex);
+ std::lock_guard guard(mMutex);
mDispatchMap.erase(nfLogGroup);
}
return ok;
diff --git a/server/NetdHwService.cpp b/server/NetdHwService.cpp
index 06723cf..d76075e 100644
--- a/server/NetdHwService.cpp
+++ b/server/NetdHwService.cpp
@@ -140,7 +140,7 @@
}
Return <StatusCode> NetdHwService::setIpForwardEnable(bool enable) {
- std::lock_guard<std::mutex> _lock(gCtls->tetherCtrl.lock);
+ std::lock_guard _lock(gCtls->tetherCtrl.lock);
bool success = enable ? gCtls->tetherCtrl.enableForwarding(FORWARDING_REQUESTER) :
gCtls->tetherCtrl.disableForwarding(FORWARDING_REQUESTER);
@@ -150,7 +150,7 @@
Return <StatusCode> NetdHwService::setForwardingBetweenInterfaces(
const hidl_string& inputIfName, const hidl_string& outputIfName, bool enable) {
- std::lock_guard<std::mutex> _lock(gCtls->tetherCtrl.lock);
+ std::lock_guard _lock(gCtls->tetherCtrl.lock);
// TODO: check that one interface is an OEM interface and the other is another OEM interface, an
// IPsec interface or a dummy interface.
diff --git a/server/NetdNativeService.cpp b/server/NetdNativeService.cpp
index c155ac1..ad6d6de 100644
--- a/server/NetdNativeService.cpp
+++ b/server/NetdNativeService.cpp
@@ -88,7 +88,7 @@
#define NETD_LOCKING_RPC(permission, lock) \
ENFORCE_PERMISSION(permission); \
- std::lock_guard<std::mutex> _lock(lock);
+ std::lock_guard _lock(lock);
#define NETD_BIG_LOCK_RPC(permission) NETD_LOCKING_RPC((permission), gBigNetdLock)
diff --git a/server/NetlinkListener.cpp b/server/NetlinkListener.cpp
index cfaf721..43ada9d 100644
--- a/server/NetlinkListener.cpp
+++ b/server/NetlinkListener.cpp
@@ -85,13 +85,13 @@
}
Status NetlinkListener::subscribe(uint16_t type, const DispatchFn& fn) {
- std::lock_guard<std::mutex> guard(mMutex);
+ std::lock_guard guard(mMutex);
mDispatchMap[type] = fn;
return ok;
}
Status NetlinkListener::unsubscribe(uint16_t type) {
- std::lock_guard<std::mutex> guard(mMutex);
+ std::lock_guard guard(mMutex);
mDispatchMap.erase(type);
return ok;
}
@@ -100,7 +100,7 @@
std::vector<char> rxbuf(4096);
const auto rxHandler = [this](const nlmsghdr& nlmsg, const Slice& buf) {
- std::lock_guard<std::mutex> guard(mMutex);
+ std::lock_guard guard(mMutex);
const auto& fn = findWithDefault(mDispatchMap, nlmsg.nlmsg_type, kDefaultDispatchFn);
fn(nlmsg, buf);
};
diff --git a/server/ResolverController.cpp b/server/ResolverController.cpp
index 4eafcec..5a0c357 100644
--- a/server/ResolverController.cpp
+++ b/server/ResolverController.cpp
@@ -133,7 +133,7 @@
tlsServers.insert(server);
}
- std::lock_guard<std::mutex> guard(mPrivateDnsLock);
+ std::lock_guard guard(mPrivateDnsLock);
if (explicitlyConfigured) {
mPrivateDnsModes[netId] = PrivateDnsMode::STRICT;
} else if (!tlsServers.empty()) {
@@ -183,7 +183,7 @@
// If the overhead of mutex acquisition proves too high, we could reduce
// it by maintaining an atomic_int32_t counter of TLS-enabled netids, or
// by using an RWLock.
- std::lock_guard<std::mutex> guard(mPrivateDnsLock);
+ std::lock_guard guard(mPrivateDnsLock);
const auto mode = mPrivateDnsModes.find(netId);
if (mode == mPrivateDnsModes.end()) return status;
@@ -205,13 +205,13 @@
if (DBG) {
ALOGD("PrivateDnsConfiguration::clear(%u)", netId);
}
- std::lock_guard<std::mutex> guard(mPrivateDnsLock);
+ std::lock_guard guard(mPrivateDnsLock);
mPrivateDnsModes.erase(netId);
mPrivateDnsTransports.erase(netId);
}
void dump(DumpWriter& dw, unsigned netId) {
- std::lock_guard<std::mutex> guard(mPrivateDnsLock);
+ std::lock_guard guard(mPrivateDnsLock);
const auto& mode = mPrivateDnsModes.find(netId);
dw.println("Private DNS mode: %s", getPrivateDnsModeString(
@@ -295,7 +295,7 @@
constexpr bool NEEDS_REEVALUATION = true;
constexpr bool DONT_REEVALUATE = false;
- std::lock_guard<std::mutex> guard(mPrivateDnsLock);
+ std::lock_guard guard(mPrivateDnsLock);
auto netPair = mPrivateDnsTransports.find(netId);
if (netPair == mPrivateDnsTransports.end()) {
diff --git a/server/RouteController.cpp b/server/RouteController.cpp
index 16947af..6774fd5 100644
--- a/server/RouteController.cpp
+++ b/server/RouteController.cpp
@@ -155,7 +155,7 @@
}
uint32_t RouteController::getIfIndex(const char* interface) {
- std::lock_guard<std::mutex> lock(sInterfaceToTableLock);
+ std::lock_guard lock(sInterfaceToTableLock);
auto iter = sInterfaceToTable.find(interface);
if (iter == sInterfaceToTable.end()) {
@@ -167,7 +167,7 @@
}
uint32_t RouteController::getRouteTableForInterface(const char* interface) {
- std::lock_guard<std::mutex> lock(sInterfaceToTableLock);
+ std::lock_guard lock(sInterfaceToTableLock);
return getRouteTableForInterfaceLocked(interface);
}
@@ -191,7 +191,7 @@
addTableName(ROUTE_TABLE_LEGACY_NETWORK, ROUTE_TABLE_NAME_LEGACY_NETWORK, &contents);
addTableName(ROUTE_TABLE_LEGACY_SYSTEM, ROUTE_TABLE_NAME_LEGACY_SYSTEM, &contents);
- std::lock_guard<std::mutex> lock(sInterfaceToTableLock);
+ std::lock_guard lock(sInterfaceToTableLock);
for (const auto& entry : sInterfaceToTable) {
addTableName(entry.second, entry.first, &contents);
}
@@ -927,7 +927,7 @@
// Returns 0 on success or negative errno on failure.
WARN_UNUSED_RESULT int RouteController::flushRoutes(const char* interface) {
- std::lock_guard<std::mutex> lock(sInterfaceToTableLock);
+ std::lock_guard lock(sInterfaceToTableLock);
uint32_t table = getRouteTableForInterfaceLocked(interface);
if (table == RT_TABLE_UNSPEC) {
diff --git a/server/TcpSocketMonitor.cpp b/server/TcpSocketMonitor.cpp
index 864c3be..3e620b1 100644
--- a/server/TcpSocketMonitor.cpp
+++ b/server/TcpSocketMonitor.cpp
@@ -97,7 +97,7 @@
const milliseconds TcpSocketMonitor::kDefaultPollingInterval = milliseconds(30000);
void TcpSocketMonitor::dump(DumpWriter& dw) {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
dw.println("TcpSocketMonitor");
ScopedIndent tcpSocketMonitorDetails(dw);
@@ -150,7 +150,7 @@
}
void TcpSocketMonitor::setPollingInterval(milliseconds nextSleepDurationMs) {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
mNextSleepDurationMs = nextSleepDurationMs;
@@ -160,7 +160,7 @@
void TcpSocketMonitor::resumePolling() {
bool wasSuspended;
{
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
wasSuspended = mIsSuspended;
mIsSuspended = false;
@@ -173,7 +173,7 @@
}
void TcpSocketMonitor::suspendPolling() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
bool wasSuspended = mIsSuspended;
mIsSuspended = true;
@@ -185,7 +185,7 @@
}
void TcpSocketMonitor::poll() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
if (mIsSuspended) {
return;
@@ -252,7 +252,7 @@
bool isSuspended;
milliseconds nextSleepDurationMs;
{
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
isSuspended = mIsSuspended;
nextSleepDurationMs= mNextSleepDurationMs;
}
@@ -266,7 +266,7 @@
}
bool TcpSocketMonitor::isRunning() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
return mIsRunning;
}
@@ -313,7 +313,7 @@
}
TcpSocketMonitor::TcpSocketMonitor() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
mNextSleepDurationMs = kDefaultPollingInterval;
mIsRunning = true;
@@ -329,7 +329,7 @@
TcpSocketMonitor::~TcpSocketMonitor() {
{
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
mIsRunning = false;
mIsSuspended = true;
}
diff --git a/server/TrafficController.cpp b/server/TrafficController.cpp
index bb7a0ad..0ecffa2 100644
--- a/server/TrafficController.cpp
+++ b/server/TrafficController.cpp
@@ -144,7 +144,7 @@
}
Status TrafficController::initMaps() {
- std::lock_guard<std::mutex> ownerMapGuard(mOwnerMatchMutex);
+ std::lock_guard ownerMapGuard(mOwnerMatchMutex);
RETURN_IF_NOT_OK(
mCookieTagMap.getOrCreate(COOKIE_UID_MAP_SIZE, COOKIE_TAG_MAP_PATH, BPF_MAP_TYPE_HASH));
@@ -416,7 +416,7 @@
Status TrafficController::updateOwnerMapEntry(UidOwnerMatchType match, uid_t uid, FirewallRule rule,
FirewallType type) {
- std::lock_guard<std::mutex> guard(mOwnerMatchMutex);
+ std::lock_guard guard(mOwnerMatchMutex);
if ((rule == ALLOW && type == WHITELIST) || (rule == DENY && type == BLACKLIST)) {
RETURN_IF_NOT_OK(addMatch(mUidOwnerMap, uid, match));
} else if ((rule == ALLOW && type == BLACKLIST) || (rule == DENY && type == WHITELIST)) {
@@ -470,7 +470,7 @@
Status TrafficController::updateUidOwnerMap(const std::vector<std::string>& appStrUids,
BandwidthController::IptJumpOp jumpHandling,
BandwidthController::IptOp op) {
- std::lock_guard<std::mutex> guard(mOwnerMatchMutex);
+ std::lock_guard guard(mOwnerMatchMutex);
UidOwnerMatchType match = jumpOpToMatch(jumpHandling);
if (match == NO_MATCH) {
return statusFromErrno(
@@ -527,7 +527,7 @@
Status TrafficController::replaceUidsInMap(const UidOwnerMatchType match,
const std::vector<int32_t>& uids) {
- std::lock_guard<std::mutex> guard(mOwnerMatchMutex);
+ std::lock_guard guard(mOwnerMatchMutex);
std::set<int32_t> uidSet(uids.begin(), uids.end());
std::vector<uint32_t> uidsToDelete;
auto getUidsToDelete = [&uidsToDelete, &uidSet](const uint32_t& key,
@@ -579,7 +579,7 @@
}
int TrafficController::toggleUidOwnerMap(ChildChain chain, bool enable) {
- std::lock_guard<std::mutex> guard(mOwnerMatchMutex);
+ std::lock_guard guard(mOwnerMatchMutex);
uint32_t key = CONFIGURATION_KEY;
auto oldConfiguration = mConfigurationMap.readValue(key);
if (!isOk(oldConfiguration)) {
@@ -648,7 +648,7 @@
const String16 TrafficController::DUMP_KEYWORD = String16("trafficcontroller");
void TrafficController::dump(DumpWriter& dw, bool verbose) {
- std::lock_guard<std::mutex> ownerMapGuard(mOwnerMatchMutex);
+ std::lock_guard ownerMapGuard(mOwnerMatchMutex);
ScopedIndent indentTop(dw);
dw.println("TrafficController");
diff --git a/server/TrafficControllerTest.cpp b/server/TrafficControllerTest.cpp
index c3b6a6e..61622d8 100644
--- a/server/TrafficControllerTest.cpp
+++ b/server/TrafficControllerTest.cpp
@@ -79,7 +79,7 @@
BpfMap<uint32_t, uint8_t> mFakeUidOwnerMap;
void SetUp() {
- std::lock_guard<std::mutex> ownerGuard(mTc.mOwnerMatchMutex);
+ std::lock_guard ownerGuard(mTc.mOwnerMatchMutex);
SKIP_IF_BPF_NOT_SUPPORTED;
mFakeCookieTagMap.reset(createMap(BPF_MAP_TYPE_HASH, sizeof(uint64_t),
diff --git a/server/dns/DnsTlsDispatcher.cpp b/server/dns/DnsTlsDispatcher.cpp
index 95fbb9a..c336f54 100644
--- a/server/dns/DnsTlsDispatcher.cpp
+++ b/server/dns/DnsTlsDispatcher.cpp
@@ -44,7 +44,7 @@
// Pull out any servers for which we might have existing connections and
// place them at the from the list of servers to try.
{
- std::lock_guard<std::mutex> guard(sLock);
+ std::lock_guard guard(sLock);
for (const auto& tlsServer : tlsServers) {
const Key key = std::make_pair(mark, tlsServer);
@@ -113,7 +113,7 @@
const Key key = std::make_pair(mark, server);
Transport* xport;
{
- std::lock_guard<std::mutex> guard(sLock);
+ std::lock_guard guard(sLock);
auto it = mStore.find(key);
if (it == mStore.end()) {
xport = new Transport(server, mark, mFactory.get());
@@ -144,7 +144,7 @@
auto now = std::chrono::steady_clock::now();
{
- std::lock_guard<std::mutex> guard(sLock);
+ std::lock_guard guard(sLock);
--xport->useCount;
xport->lastUsed = now;
cleanup(now);
diff --git a/server/dns/DnsTlsQueryMap.cpp b/server/dns/DnsTlsQueryMap.cpp
index 760b26a..2ef050c 100644
--- a/server/dns/DnsTlsQueryMap.cpp
+++ b/server/dns/DnsTlsQueryMap.cpp
@@ -25,7 +25,7 @@
namespace net {
std::unique_ptr<DnsTlsQueryMap::QueryFuture> DnsTlsQueryMap::recordQuery(const Slice query) {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
// Store the query so it can be matched to the response or reissued.
if (query.size() < 2) {
@@ -54,7 +54,7 @@
}
void DnsTlsQueryMap::markTried(uint16_t newId) {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
auto it = mQueries.find(newId);
if (it != mQueries.end()) {
it->second.tries++;
@@ -62,7 +62,7 @@
}
void DnsTlsQueryMap::cleanup() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
for (auto it = mQueries.begin(); it != mQueries.end();) {
auto& p = it->second;
if (p.tries >= kMaxTries) {
@@ -101,7 +101,7 @@
}
std::vector<DnsTlsQueryMap::Query> DnsTlsQueryMap::getAll() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
std::vector<DnsTlsQueryMap::Query> queries;
for (auto& q : mQueries) {
queries.push_back(q.second.query);
@@ -110,12 +110,12 @@
}
bool DnsTlsQueryMap::empty() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
return mQueries.empty();
}
void DnsTlsQueryMap::clear() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
for (auto& q : mQueries) {
expire(&q.second);
}
@@ -129,7 +129,7 @@
return;
}
uint16_t id = response[0] << 8 | response[1];
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
auto it = mQueries.find(id);
if (it == mQueries.end()) {
ALOGW("Discarding response: unknown ID %d", id);
diff --git a/server/dns/DnsTlsSessionCache.cpp b/server/dns/DnsTlsSessionCache.cpp
index 880b773..58e36b5 100644
--- a/server/dns/DnsTlsSessionCache.cpp
+++ b/server/dns/DnsTlsSessionCache.cpp
@@ -54,7 +54,7 @@
}
void DnsTlsSessionCache::recordSession(SSL_SESSION* session) {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
mSessions.emplace_front(session);
if (mSessions.size() > kMaxSize) {
ALOGV("Too many sessions; trimming");
@@ -63,7 +63,7 @@
}
bssl::UniquePtr<SSL_SESSION> DnsTlsSessionCache::getSession() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
if (mSessions.size() == 0) {
ALOGV("No known sessions");
return nullptr;
diff --git a/server/dns/DnsTlsSocket.cpp b/server/dns/DnsTlsSocket.cpp
index 8e25b1f..df48d91 100644
--- a/server/dns/DnsTlsSocket.cpp
+++ b/server/dns/DnsTlsSocket.cpp
@@ -132,7 +132,7 @@
bool DnsTlsSocket::initialize() {
// This method should only be called once, at the beginning, so locking should be
// unnecessary. This lock only serves to help catch bugs in code that calls this method.
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
if (mSslCtx) {
// This is a bug in the caller.
return false;
@@ -340,7 +340,7 @@
}
void DnsTlsSocket::loop() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
// Buffer at most one query.
Query q;
@@ -413,7 +413,7 @@
mIpcInFd.reset();
{
// Wait for the orderly shutdown to complete.
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
if (mLoopThread && std::this_thread::get_id() == mLoopThread->get_id()) {
ALOGE("Violation of re-entrance precondition");
return;
diff --git a/server/dns/DnsTlsTransport.cpp b/server/dns/DnsTlsTransport.cpp
index 033ae80..e94daff 100644
--- a/server/dns/DnsTlsTransport.cpp
+++ b/server/dns/DnsTlsTransport.cpp
@@ -36,7 +36,7 @@
namespace net {
std::future<DnsTlsTransport::Result> DnsTlsTransport::query(const netdutils::Slice query) {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
auto record = mQueries.recordQuery(query);
if (!record) {
@@ -89,7 +89,7 @@
}
void DnsTlsTransport::onClosed() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
if (mClosing) {
return;
}
@@ -109,7 +109,7 @@
}
void DnsTlsTransport::doReconnect() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
if (mClosing) {
return;
}
@@ -126,7 +126,7 @@
DnsTlsTransport::~DnsTlsTransport() {
ALOGV("Destructor");
{
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
ALOGV("Locked destruction procedure");
mQueries.clear();
mClosing = true;
diff --git a/tests/dns_responder/dns_responder.cpp b/tests/dns_responder/dns_responder.cpp
index 0ba3388..615e6f6 100644
--- a/tests/dns_responder/dns_responder.cpp
+++ b/tests/dns_responder/dns_responder.cpp
@@ -538,7 +538,7 @@
void DNSResponder::addMapping(const char* name, ns_type type,
const char* addr) {
- std::lock_guard<std::mutex> lock(mappings_mutex_);
+ std::lock_guard lock(mappings_mutex_);
auto it = mappings_.find(QueryKey(name, type));
if (it != mappings_.end()) {
ALOGI("Overwriting mapping for (%s, %s), previous address %s, new "
@@ -553,7 +553,7 @@
}
void DNSResponder::removeMapping(const char* name, ns_type type) {
- std::lock_guard<std::mutex> lock(mappings_mutex_);
+ std::lock_guard lock(mappings_mutex_);
auto it = mappings_.find(QueryKey(name, type));
if (it != mappings_.end()) {
ALOGI("Cannot remove mapping mapping from (%s, %s), not present", name,
@@ -641,7 +641,7 @@
epoll_fd_ = ep_fd;
socket_ = s;
{
- std::lock_guard<std::mutex> lock(update_mutex_);
+ std::lock_guard lock(update_mutex_);
handler_thread_ = std::thread(&DNSResponder::requestHandler, this);
}
ALOGI("server started successfully");
@@ -649,7 +649,7 @@
}
bool DNSResponder::stopServer() {
- std::lock_guard<std::mutex> lock(update_mutex_);
+ std::lock_guard lock(update_mutex_);
if (!running()) {
ALOGI("server not running");
return false;
@@ -670,12 +670,12 @@
}
std::vector<std::pair<std::string, ns_type >> DNSResponder::queries() const {
- std::lock_guard<std::mutex> lock(queries_mutex_);
+ std::lock_guard lock(queries_mutex_);
return queries_;
}
void DNSResponder::clearQueries() {
- std::lock_guard<std::mutex> lock(queries_mutex_);
+ std::lock_guard lock(queries_mutex_);
queries_.clear();
}
@@ -765,7 +765,7 @@
response_len);
}
{
- std::lock_guard<std::mutex> lock(queries_mutex_);
+ std::lock_guard lock(queries_mutex_);
for (const DNSQuestion& question : header.questions) {
queries_.push_back(make_pair(question.qname.name,
ns_type(question.qtype)));
@@ -808,7 +808,7 @@
bool DNSResponder::addAnswerRecords(const DNSQuestion& question,
std::vector<DNSRecord>* answers) const {
- std::lock_guard<std::mutex> guard(mappings_mutex_);
+ std::lock_guard guard(mappings_mutex_);
auto it = mappings_.find(QueryKey(question.qname.name, question.qtype));
if (it == mappings_.end()) {
// TODO(imaipi): handle correctly
diff --git a/tests/dns_responder/dns_tls_frontend.cpp b/tests/dns_responder/dns_tls_frontend.cpp
index 8c49254..b114280 100644
--- a/tests/dns_responder/dns_tls_frontend.cpp
+++ b/tests/dns_responder/dns_tls_frontend.cpp
@@ -256,7 +256,7 @@
freeaddrinfo(backend_ai_res);
{
- std::lock_guard<std::mutex> lock(update_mutex_);
+ std::lock_guard lock(update_mutex_);
handler_thread_ = std::thread(&DnsTlsFrontend::requestHandler, this);
}
ALOGI("server started successfully");
@@ -356,7 +356,7 @@
}
bool DnsTlsFrontend::stopServer() {
- std::lock_guard<std::mutex> lock(update_mutex_);
+ std::lock_guard lock(update_mutex_);
if (!running()) {
ALOGI("server not running");
return false;
diff --git a/tests/dns_tls_test.cpp b/tests/dns_tls_test.cpp
index ba71f24..cbedc34 100644
--- a/tests/dns_tls_test.cpp
+++ b/tests/dns_tls_test.cpp
@@ -186,13 +186,13 @@
class FakeSocketDelay : public IDnsTlsSocket {
public:
explicit FakeSocketDelay(IDnsTlsSocketObserver* observer) : mObserver(observer) {}
- ~FakeSocketDelay() { std::lock_guard<std::mutex> guard(mLock); }
+ ~FakeSocketDelay() { std::lock_guard guard(mLock); }
static size_t sDelay;
static bool sReverse;
bool query(uint16_t id, const Slice query) override {
ALOGD("FakeSocketDelay got query with ID %d", int(id));
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
// Check for duplicate IDs.
EXPECT_EQ(0U, mIds.count(id));
mIds.insert(id);
@@ -209,7 +209,7 @@
private:
void sendResponses() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
if (sReverse) {
std::reverse(std::begin(mResponses), std::end(mResponses));
}
@@ -423,7 +423,7 @@
~FakeSocketLimited() {
{
ALOGD("~FakeSocketLimited acquiring mLock");
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
ALOGD("~FakeSocketLimited acquired mLock");
for (auto& thread : mThreads) {
ALOGD("~FakeSocketLimited joining response thread");
@@ -441,7 +441,7 @@
}
bool query(uint16_t id, const Slice query) override {
ALOGD("FakeSocketLimited::query acquiring mLock");
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
ALOGD("FakeSocketLimited::query acquired mLock");
++mQueries;
@@ -462,7 +462,7 @@
void sendClose() {
{
ALOGD("FakeSocketLimited::sendClose acquiring mLock");
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
ALOGD("FakeSocketLimited::sendClose acquired mLock");
for (auto& thread : mThreads) {
ALOGD("FakeSocketLimited::sendClose joining response thread");
@@ -536,13 +536,13 @@
mThreads.emplace_back(&IDnsTlsSocketObserver::onResponse, mObserver, make_query(ID + 1, SIZE));
}
~FakeSocketGarbage() {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
for (auto& thread : mThreads) {
thread.join();
}
}
bool query(uint16_t id, const Slice query) override {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
// Return the response twice.
auto echo = make_echo(id, query);
mThreads.emplace_back(&IDnsTlsSocketObserver::onResponse, mObserver, echo);
@@ -608,7 +608,7 @@
unsigned mark,
IDnsTlsSocketObserver* observer,
DnsTlsSessionCache* cache ATTRIBUTE_UNUSED) override {
- std::lock_guard<std::mutex> guard(mLock);
+ std::lock_guard guard(mLock);
keys.emplace(mark, server);
return std::make_unique<T>(observer);
}