Fix a number of clang-tidy warnings
No functional changes. There are still plenty of atoi() warnings,
but most of those should go away as we convert all remaining
commands to binder.
Test: atest netd_integration_test netd_utils_test netd_integration_test
Change-Id: I15d6fd9523debf996dec9d0b3c3d7c11ae6444f3
diff --git a/server/TcpSocketMonitor.cpp b/server/TcpSocketMonitor.cpp
index 3e620b1..9439d77 100644
--- a/server/TcpSocketMonitor.cpp
+++ b/server/TcpSocketMonitor.cpp
@@ -16,7 +16,8 @@
#define LOG_TAG "TcpSocketMonitor"
-#include <iomanip>
+#include <chrono>
+#include <cinttypes>
#include <thread>
#include <vector>
@@ -110,7 +111,7 @@
if (!mNetworkStats.empty()) {
dw.blankline();
dw.println("Network stats:");
- for (const auto& stats : mNetworkStats) {
+ for (const std::pair<uint32_t, TcpStats>& stats : mNetworkStats) {
if (stats.second.nSockets == 0) {
continue;
}
@@ -126,9 +127,9 @@
if (!mSocketEntries.empty()) {
dw.blankline();
dw.println("Socket entries:");
- for (const auto& stats : mSocketEntries) {
- dw.println("netId=%u uid=%u cookie=%llu",
- stats.second.mark.netId, stats.second.uid, (unsigned long long) stats.first);
+ for (const std::pair<uint64_t, SocketEntry>& stats : mSocketEntries) {
+ dw.println("netId=%u uid=%u cookie=%" PRIu64, stats.second.mark.netId, stats.second.uid,
+ stats.first);
}
}
diff --git a/server/ndc.cpp b/server/ndc.cpp
index 836e802..ec59637 100644
--- a/server/ndc.cpp
+++ b/server/ndc.cpp
@@ -156,12 +156,10 @@
for (i = 0; i < rc; i++) {
if (buffer[i] == '\0') {
- int code;
char tmp[4];
-
strncpy(tmp, buffer + offset, 3);
tmp[3] = '\0';
- code = atoi(tmp);
+ long code = strtol(tmp, nullptr, 10);
printf("%s\n", buffer + offset);
if (stop_after_cmd) {
diff --git a/tests/binder_test.cpp b/tests/binder_test.cpp
index fc87ba8..b888421 100644
--- a/tests/binder_test.cpp
+++ b/tests/binder_test.cpp
@@ -60,22 +60,28 @@
#define RAW_TABLE "raw"
#define MANGLE_TABLE "mangle"
-using namespace android;
-using namespace android::base;
-using namespace android::binder;
+namespace binder = android::binder;
+namespace netdutils = android::netdutils;
+
+using android::IBinder;
+using android::IServiceManager;
+using android::sp;
+using android::String16;
+using android::String8;
+using android::base::Join;
using android::base::StartsWith;
+using android::base::StringPrintf;
using android::bpf::hasBpfSupport;
using android::net::INetd;
using android::net::TunInterface;
using android::net::UidRange;
using android::net::XfrmController;
-using android::netdutils::sSyscalls;
using android::os::PersistableBundle;
-#define SKIP_IF_BPF_SUPPORTED \
- do { \
- if (hasBpfSupport()) return; \
- } while (0);
+#define SKIP_IF_BPF_SUPPORTED \
+ do { \
+ if (hasBpfSupport()) return; \
+ } while (0)
static const char* IP_RULE_V4 = "-4";
static const char* IP_RULE_V6 = "-6";
@@ -87,13 +93,12 @@
static const std::string ESP_ALLOW_RULE("esp");
class BinderTest : public ::testing::Test {
-
-public:
+ public:
BinderTest() {
- sp<IServiceManager> sm = defaultServiceManager();
+ sp<IServiceManager> sm = android::defaultServiceManager();
sp<IBinder> binder = sm->getService(String16("netd"));
if (binder != nullptr) {
- mNetd = interface_cast<INetd>(binder);
+ mNetd = android::interface_cast<INetd>(binder);
}
}
@@ -121,7 +126,7 @@
static void fakeRemoteSocketPair(int *clientSocket, int *serverSocket, int *acceptedSocket);
-protected:
+ protected:
sp<INetd> mNetd;
static TunInterface sTun;
};
@@ -129,17 +134,17 @@
TunInterface BinderTest::sTun;
class TimedOperation : public Stopwatch {
-public:
+ public:
explicit TimedOperation(const std::string &name): mName(name) {}
virtual ~TimedOperation() {
fprintf(stderr, " %s: %6.1f ms\n", mName.c_str(), timeTaken());
}
-private:
+ private:
std::string mName;
};
-TEST_F(BinderTest, TestIsAlive) {
+TEST_F(BinderTest, IsAlive) {
TimedOperation t("isAlive RPC");
bool isAlive = false;
mNetd->isAlive(&isAlive);
@@ -207,7 +212,7 @@
iptablesRuleExists(IP6TABLES_PATH, chainName, ESP_ALLOW_RULE);
}
-TEST_F(BinderTest, TestFirewallReplaceUidChain) {
+TEST_F(BinderTest, FirewallReplaceUidChain) {
SKIP_IF_BPF_SUPPORTED;
std::string chainName = StringPrintf("netd_binder_test_%u", arc4random_uniform(10000));
@@ -260,7 +265,7 @@
EXPECT_EQ(false, ret);
}
-TEST_F(BinderTest, TestVirtualTunnelInterface) {
+TEST_F(BinderTest, VirtualTunnelInterface) {
const struct TestData {
const std::string family;
const std::string deviceName;
@@ -317,7 +322,7 @@
return (status.ok() == expectOk);
}
-TEST_F(BinderTest, TestXfrmControllerInit) {
+TEST_F(BinderTest, XfrmControllerInit) {
netdutils::Status status;
status = XfrmController::Init();
SCOPED_TRACE(status);
@@ -407,7 +412,7 @@
return enabled6;
}
-TEST_F(BinderTest, TestBandwidthEnableDataSaver) {
+TEST_F(BinderTest, BandwidthEnableDataSaver) {
const int wasEnabled = getDataSaverState();
ASSERT_NE(-1, wasEnabled);
@@ -456,7 +461,7 @@
return existsIp4;
}
-TEST_F(BinderTest, TestNetworkInterfaces) {
+TEST_F(BinderTest, NetworkInterfaces) {
EXPECT_TRUE(mNetd->networkCreatePhysical(TEST_NETID1, "").isOk());
EXPECT_EQ(EEXIST, mNetd->networkCreatePhysical(TEST_NETID1, "").serviceSpecificErrorCode());
EXPECT_EQ(EEXIST, mNetd->networkCreateVpn(TEST_NETID1, false, true).serviceSpecificErrorCode());
@@ -471,7 +476,7 @@
EXPECT_TRUE(mNetd->networkDestroy(TEST_NETID2).isOk());
}
-TEST_F(BinderTest, TestNetworkUidRules) {
+TEST_F(BinderTest, NetworkUidRules) {
const uint32_t RULE_PRIORITY_SECURE_VPN = 12000;
EXPECT_TRUE(mNetd->networkCreateVpn(TEST_NETID1, false, true).isOk());
@@ -500,7 +505,7 @@
EXPECT_EQ(ENONET, mNetd->networkDestroy(TEST_NETID1).serviceSpecificErrorCode());
}
-TEST_F(BinderTest, TestNetworkRejectNonSecureVpn) {
+TEST_F(BinderTest, NetworkRejectNonSecureVpn) {
constexpr uint32_t RULE_PRIORITY = 12500;
std::vector<UidRange> uidRanges = {
@@ -580,7 +585,7 @@
EXPECT_EQ(ECONNRESET, err);
}
-TEST_F(BinderTest, TestSocketDestroy) {
+TEST_F(BinderTest, SocketDestroy) {
int clientSocket, serverSocket, acceptedSocket;
ASSERT_NO_FATAL_FAILURE(fakeRemoteSocketPair(&clientSocket, &serverSocket, &acceptedSocket));
@@ -730,7 +735,7 @@
} // namespace
-TEST_F(BinderTest, TestInterfaceAddRemoveAddress) {
+TEST_F(BinderTest, InterfaceAddRemoveAddress) {
static const struct TestData {
const char *addrString;
const int prefixLength;
@@ -782,7 +787,7 @@
}
}
-TEST_F(BinderTest, TestSetProcSysNet) {
+TEST_F(BinderTest, SetProcSysNet) {
static const struct TestData {
const int family;
const int which;
@@ -828,7 +833,7 @@
return std::string(reinterpret_cast<char*>(output_bytes));
}
-TEST_F(BinderTest, TestSetResolverConfiguration_Tls) {
+TEST_F(BinderTest, SetResolverConfiguration_Tls) {
const std::vector<std::string> LOCALLY_ASSIGNED_DNS{"8.8.8.8", "2001:4860:4860::8888"};
std::vector<uint8_t> fp(SHA256_SIZE);
std::vector<uint8_t> short_fp(1);
@@ -883,6 +888,8 @@
"", {}, {});
}
+namespace {
+
void expectNoTestCounterRules() {
for (const auto& binary : { IPTABLES_PATH, IP6TABLES_PATH }) {
std::string command = StringPrintf("%s -w -nvL tetherctrl_counters", binary);
@@ -904,7 +911,9 @@
path, if2.c_str(), if1.c_str()));
}
-TEST_F(BinderTest, TestTetherGetStats) {
+} // namespace
+
+TEST_F(BinderTest, TetherGetStats) {
expectNoTestCounterRules();
// TODO: fold this into more comprehensive tests once we have binder RPCs for enabling and
@@ -959,6 +968,7 @@
expectNoTestCounterRules();
}
+
namespace {
constexpr char chainName_LOCAL_RAW_PREROUTING[] = "idletimer_raw_PREROUTING";
@@ -1008,7 +1018,9 @@
}
}
-TEST_F(BinderTest, TestIdletimerAddRemoveInterface) {
+} // namespace
+
+TEST_F(BinderTest, IdletimerAddRemoveInterface) {
// TODO: We will get error in if expectIdletimerInterfaceRuleNotExists if there are the same
// rule in the table. Because we only check the result after calling remove function. We might
// check the actual rule which is removed by our function (maybe compare the results between
@@ -1033,8 +1045,6 @@
}
}
-} // namespace
-
namespace {
constexpr char STRICT_OUTPUT[] = "st_OUTPUT";
@@ -1072,7 +1082,9 @@
}
}
-TEST_F(BinderTest, TestStrictSetUidCleartextPenalty) {
+} // namespace
+
+TEST_F(BinderTest, StrictSetUidCleartextPenalty) {
binder::Status status;
int32_t uid = randomUid();
@@ -1100,14 +1112,16 @@
EXPECT_EQ(EINVAL, status.serviceSpecificErrorCode());
}
-} // namespace
+namespace {
static bool processExists(const std::string& processName) {
std::string cmd = StringPrintf("ps -A | grep '%s'", processName.c_str());
return (runCommand(cmd.c_str()).size()) ? true : false;
}
-TEST_F(BinderTest, TestClatdStartStop) {
+} // namespace
+
+TEST_F(BinderTest, ClatdStartStop) {
binder::Status status;
// use dummy0 for test since it is set ready
static const char testIf[] = "dummy0";
@@ -1208,4 +1222,4 @@
status = mNetd->ipfwdRemoveInterfaceForward(testFromIf, testToIf);
EXPECT_TRUE(status.isOk()) << status.exceptionMessage();
expectIpfwdRuleNotExists(testFromIf, testToIf);
-}
\ No newline at end of file
+}
diff --git a/tests/bpf_base_test.cpp b/tests/bpf_base_test.cpp
index ead06bd..4a96979 100644
--- a/tests/bpf_base_test.cpp
+++ b/tests/bpf_base_test.cpp
@@ -36,8 +36,6 @@
#include "bpf/BpfMap.h"
#include "bpf/BpfUtils.h"
-using namespace android::bpf;
-
using android::base::unique_fd;
using android::netdutils::status::ok;
using android::netdutils::StatusOr;
diff --git a/tests/dns_tls_test.cpp b/tests/dns_tls_test.cpp
index cbedc34..f8070bd 100644
--- a/tests/dns_tls_test.cpp
+++ b/tests/dns_tls_test.cpp
@@ -172,7 +172,9 @@
DnsTlsTransport transport(SERVER1, MARK, &factory);
std::vector<std::future<DnsTlsTransport::Result>> results;
// Fewer than 65536 queries to avoid ID exhaustion.
- for (int i = 0; i < 10000; ++i) {
+ const int num_queries = 10000;
+ results.reserve(num_queries);
+ for (int i = 0; i < num_queries; ++i) {
results.push_back(transport.query(makeSlice(QUERY)));
}
for (auto& result : results) {
@@ -236,6 +238,7 @@
DnsTlsTransport transport(SERVER1, MARK, &factory);
std::vector<std::future<DnsTlsTransport::Result>> results;
// Fewer than 65536 queries to avoid ID exhaustion.
+ results.reserve(FakeSocketDelay::sDelay);
for (size_t i = 0; i < FakeSocketDelay::sDelay; ++i) {
results.push_back(transport.query(makeSlice(QUERY)));
}
@@ -254,6 +257,7 @@
std::vector<std::future<DnsTlsTransport::Result>> results;
// Exactly 65536 queries should still be possible in parallel,
// even if they all have the same original ID.
+ results.reserve(FakeSocketDelay::sDelay);
for (size_t i = 0; i < FakeSocketDelay::sDelay; ++i) {
results.push_back(transport.query(makeSlice(QUERY)));
}
@@ -271,6 +275,7 @@
DnsTlsTransport transport(SERVER1, MARK, &factory);
std::vector<bytevec> queries(FakeSocketDelay::sDelay);
std::vector<std::future<DnsTlsTransport::Result>> results;
+ results.reserve(FakeSocketDelay::sDelay);
for (size_t i = 0; i < FakeSocketDelay::sDelay; ++i) {
queries[i] = make_query(i, SIZE);
results.push_back(transport.query(makeSlice(queries[i])));
@@ -291,6 +296,7 @@
std::vector<std::future<DnsTlsTransport::Result>> results;
// Exactly 65536 queries should still be possible in parallel,
// and they should all be mapped correctly back to the original ID.
+ results.reserve(FakeSocketDelay::sDelay);
for (size_t i = 0; i < FakeSocketDelay::sDelay; ++i) {
queries[i] = make_query(i, SIZE);
results.push_back(transport.query(makeSlice(queries[i])));
@@ -303,15 +309,17 @@
}
TEST_F(TransportTest, IdExhaustion) {
+ const int num_queries = 65536;
// A delay of 65537 is unreachable, because the maximum number
// of outstanding queries is 65536.
- FakeSocketDelay::sDelay = 65537;
+ FakeSocketDelay::sDelay = num_queries + 1;
FakeSocketDelay::sReverse = false;
FakeSocketFactory<FakeSocketDelay> factory;
DnsTlsTransport transport(SERVER1, MARK, &factory);
std::vector<std::future<DnsTlsTransport::Result>> results;
// Issue the maximum number of queries.
- for (int i = 0; i < 65536; ++i) {
+ results.reserve(num_queries);
+ for (int i = 0; i < num_queries; ++i) {
results.push_back(transport.query(makeSlice(QUERY)));
}
@@ -336,6 +344,7 @@
DnsTlsTransport transport(SERVER1, MARK, &factory);
std::vector<bytevec> queries(FakeSocketDelay::sDelay);
std::vector<std::future<DnsTlsTransport::Result>> results;
+ results.reserve(FakeSocketDelay::sDelay);
for (size_t i = 0; i < FakeSocketDelay::sDelay; ++i) {
queries[i] = make_query(i, SIZE);
results.push_back(transport.query(makeSlice(queries[i])));
@@ -354,6 +363,7 @@
DnsTlsTransport transport(SERVER1, MARK, &factory);
std::vector<bytevec> queries(FakeSocketDelay::sDelay);
std::vector<std::future<DnsTlsTransport::Result>> results;
+ results.reserve(FakeSocketDelay::sDelay);
for (size_t i = 0; i < FakeSocketDelay::sDelay; ++i) {
queries[i] = make_query(i, SIZE);
results.push_back(transport.query(makeSlice(queries[i])));
@@ -493,6 +503,7 @@
// the socket will close. Transport will retry them all, until they
// all hit the retry limit and expire.
std::vector<std::future<DnsTlsTransport::Result>> results;
+ results.reserve(FakeSocketLimited::sLimit);
for (int i = 0; i < FakeSocketLimited::sLimit; ++i) {
results.push_back(transport.query(makeSlice(QUERY)));
}
@@ -511,9 +522,10 @@
// Queue up 100 queries, alternating "short" which will be served and "long"
// which will be dropped.
- int num_queries = 10 * FakeSocketLimited::sLimit;
+ const int num_queries = 10 * FakeSocketLimited::sLimit;
std::vector<bytevec> queries(num_queries);
std::vector<std::future<DnsTlsTransport::Result>> results;
+ results.reserve(num_queries);
for (int i = 0; i < num_queries; ++i) {
queries[i] = make_query(i, SIZE + (i % 2));
results.push_back(transport.query(makeSlice(queries[i])));