Merge "Delete dump_bytes() and its helper functions"
diff --git a/Android.bp b/Android.bp
index 059fa86..6d566d6 100644
--- a/Android.bp
+++ b/Android.bp
@@ -157,22 +157,23 @@
srcs: [
"tests/dns_responder/dns_responder.cpp",
"dnsresolver_binder_test.cpp",
- "resolver_test.cpp",
+ "resolv_integration_test.cpp",
],
header_libs: [
"libnetd_resolv_headers",
],
shared_libs: [
"libbpf_android",
- "libbase",
"libbinder",
"libcrypto",
+ "liblog",
"libnetd_client",
"libssl",
"libutils",
],
static_libs: [
"dnsresolver_aidl_interface-cpp",
+ "libbase",
"libgmock",
"libnetd_test_dnsresponder",
"libnetd_test_metrics_listener",
@@ -197,19 +198,23 @@
//TODO: drop root privileges and make it be an real unit test.
defaults: ["netd_defaults"],
srcs: [
- "dns_tls_test.cpp",
- "libnetd_resolv_test.cpp",
- "res_cache_test.cpp",
+ "resolv_cache_unit_test.cpp",
+ "resolv_tls_unit_test.cpp",
+ "resolv_unit_test.cpp",
],
shared_libs: [
"libbase",
"libcrypto",
"libcutils",
"libssl",
+ "libbinder_ndk",
],
static_libs: [
"dnsresolver_aidl_interface-V2-cpp",
+ "dnsresolver_aidl_interface-V2-ndk_platform",
+ "netd_event_listener_interface-V1-ndk_platform",
"libgmock",
+ "liblog",
"libnetd_resolv",
"libnetd_test_dnsresponder",
"libnetd_test_resolv_utils",
diff --git a/DnsResolver.cpp b/DnsResolver.cpp
index 092ff3f..70ecc7e 100644
--- a/DnsResolver.cpp
+++ b/DnsResolver.cpp
@@ -30,7 +30,15 @@
LOG(INFO) << __func__ << ": Initializing resolver";
resolv_set_log_severity(android::base::WARNING);
- android::net::gResNetdCallbacks = *callbacks;
+ using android::net::gApiLevel;
+ gApiLevel = android::base::GetUintProperty<uint64_t>("ro.build.version.sdk", 0);
+ using android::net::gResNetdCallbacks;
+ gResNetdCallbacks.check_calling_permission = callbacks->check_calling_permission;
+ gResNetdCallbacks.get_network_context = callbacks->get_network_context;
+ gResNetdCallbacks.log = callbacks->log;
+ if (gApiLevel >= 30) {
+ gResNetdCallbacks.tagSocket = callbacks->tagSocket;
+ }
android::net::gDnsResolv = android::net::DnsResolver::getInstance();
return android::net::gDnsResolv->start();
}
@@ -41,8 +49,14 @@
namespace {
bool verifyCallbacks() {
- return gResNetdCallbacks.check_calling_permission && gResNetdCallbacks.get_network_context &&
- gResNetdCallbacks.log;
+ if (!(gResNetdCallbacks.check_calling_permission && gResNetdCallbacks.get_network_context &&
+ gResNetdCallbacks.log)) {
+ return false;
+ }
+ if (gApiLevel >= 30) {
+ return gResNetdCallbacks.tagSocket != nullptr;
+ }
+ return true;
}
} // namespace
@@ -50,6 +64,7 @@
DnsResolver* gDnsResolv = nullptr;
ResolverNetdCallbacks gResNetdCallbacks;
netdutils::Log gDnsResolverLog("dnsResolver");
+uint64_t gApiLevel = 0;
DnsResolver* DnsResolver::getInstance() {
// Instantiated on first use.
diff --git a/DnsResolver.h b/DnsResolver.h
index 6d8d2ce..2efa341 100644
--- a/DnsResolver.h
+++ b/DnsResolver.h
@@ -44,6 +44,7 @@
extern DnsResolver* gDnsResolv;
extern ResolverNetdCallbacks gResNetdCallbacks;
extern netdutils::Log gDnsResolverLog;
+extern uint64_t gApiLevel;
} // namespace net
} // namespace android
diff --git a/DnsTlsSocket.cpp b/DnsTlsSocket.cpp
index 838886a..14aadd4 100644
--- a/DnsTlsSocket.cpp
+++ b/DnsTlsSocket.cpp
@@ -39,6 +39,7 @@
#include <netdutils/ThreadUtil.h>
#include "private/android_filesystem_config.h" // AID_DNS
+#include "resolv_private.h"
// NOTE: Inject CA certificate for internal testing -- do NOT enable in production builds
#ifndef RESOLV_INJECT_CA_CERTIFICATE
@@ -96,9 +97,7 @@
return Status(errno);
}
- if (fchown(mSslFd.get(), AID_DNS, -1) == -1) {
- LOG(WARNING) << "Failed to chown socket: %s" << strerror(errno);
- }
+ resolv_tag_socket(mSslFd.get(), AID_DNS);
const socklen_t len = sizeof(mMark);
if (setsockopt(mSslFd.get(), SOL_SOCKET, SO_MARK, &mMark, len) == -1) {
diff --git a/README-DoT.md b/README-DoT.md
new file mode 100644
index 0000000..56fe772
--- /dev/null
+++ b/README-DoT.md
@@ -0,0 +1,127 @@
+# DNS-over-TLS query forwarder design
+
+## Overview
+
+The DNS-over-TLS query forwarder consists of five classes:
+ * `DnsTlsDispatcher`
+ * `DnsTlsTransport`
+ * `DnsTlsQueryMap`
+ * `DnsTlsSessionCache`
+ * `DnsTlsSocket`
+
+`DnsTlsDispatcher` is a singleton class whose `query` method is the DnsTls's
+only public interface. `DnsTlsDispatcher` is just a table holding the
+`DnsTlsTransport` for each server (represented by a `DnsTlsServer` struct) and
+network. `DnsTlsDispatcher` also blocks each query thread, waiting on a
+`std::future` returned by `DnsTlsTransport` that represents the response.
+
+`DnsTlsTransport` sends each query over a `DnsTlsSocket`, opening a
+new one if necessary. It also has to listen for responses from the
+`DnsTlsSocket`, which happen on a different thread.
+`IDnsTlsSocketObserver` is an interface defining how `DnsTlsSocket` returns
+responses to `DnsTlsTransport`.
+
+`DnsTlsQueryMap` and `DnsTlsSessionCache` are helper classes owned by `DnsTlsTransport`.
+`DnsTlsQueryMap` handles ID renumbering and query-response pairing.
+`DnsTlsSessionCache` allows TLS session resumption.
+
+`DnsTlsSocket` interleaves all queries onto a single socket, and reports all
+responses to `DnsTlsTransport` (through the `IDnsTlsObserver` interface). It doesn't
+know anything about which queries correspond to which responses, and does not retain
+state to indicate whether there is an outstanding query.
+
+## Threading
+
+### Overall patterns
+
+For clarity, each of the five classes in this design is thread-safe and holds one lock.
+Classes that spawn a helper thread call `thread::join()` in their destructor to ensure
+that it is cleaned up appropriately.
+
+All the classes here make full use of Clang thread annotations (and also null-pointer
+annotations) to minimize the likelihood of a latent threading bug. The unit tests are
+also heavily threaded to exercise this functionality.
+
+This code creates O(1) threads per socket, and does not create a new thread for each
+query or response. However, DnsProxyListener does create a thread for each query.
+
+### Threading in `DnsTlsSocket`
+
+`DnsTlsSocket` can receive queries on any thread, and send them over a
+"reliable datagram pipe" (`socketpair()` in `SOCK_SEQPACKET` mode).
+The query method writes a struct (containing a pointer to the query) to the pipe
+from its thread, and the loop thread (which owns the SSL socket)
+reads off the other end of the pipe. The pipe doesn't actually have a queue "inside";
+instead, any queueing happens by blocking the query thread until the
+socket thread can read the datagram off the other end.
+
+We need to pass messages between threads using a pipe, and not a condition variable
+or a thread-safe queue, because the socket thread has to be blocked
+in `poll()` waiting for data from the server, but also has to be woken
+up on inputs from the query threads. Therefore, inputs from the query
+threads have to arrive on a socket, so that `poll()` can listen for them.
+(There can only be a single thread because [you can't use different threads
+to read and write in OpenSSL](https://www.openssl.org/blog/blog/2017/02/21/threads/)).
+
+## ID renumbering
+
+`DnsTlsDispatcher` accepts queries that have colliding ID numbers and still sends them on
+a single socket. To avoid confusion at the server, `DnsTlsQueryMap` assigns each
+query a new ID for transmission, records the mapping from input IDs to sent IDs, and
+applies the inverse mapping to responses before returning them to the caller.
+
+`DnsTlsQueryMap` assigns each new query the ID number one greater than the largest
+ID number of an outstanding query. This means that ID numbers are initially sequential
+and usually small. If the largest possible ID number is already in use,
+`DnsTlsQueryMap` will scan the ID space to find an available ID, or fail the query
+if there are no available IDs. Queries will not block waiting for an ID number to
+become available.
+
+## Time constants
+
+`DnsTlsSocket` imposes a 20-second inactivity timeout. A socket that has been idle for
+20 seconds will be closed. This sets the limit of tolerance for slow replies,
+which could happen as a result of malfunctioning authoritative DNS servers.
+If there are any pending queries, `DnsTlsTransport` will retry them.
+
+`DnsTlsQueryMap` imposes a retry limit of 3. `DnsTlsTransport` will retry the query up
+to 3 times before reporting failure to `DnsTlsDispatcher`.
+This limit helps to ensure proper functioning in the case of a recursive resolver that
+is malfunctioning or is flooded with requests that are stalled due to malfunctioning
+authoritative servers.
+
+`DnsTlsDispatcher` maintains a 5-minute timeout. Any `DnsTlsTransport` that has had no
+outstanding queries for 5 minutes will be destroyed at the next query on a different
+transport.
+This sets the limit on how long session tickets will be preserved during idle periods,
+because each `DnsTlsTransport` owns a `DnsTlsSessionCache`. Imposing this timeout
+increases latency on the first query after an idle period, but also helps to avoid
+unbounded memory usage.
+
+`DnsTlsSessionCache` sets a limit of 5 sessions in each cache, expiring the oldest one
+when the limit is reached. However, because the client code does not currently
+reuse sessions more than once, it should not be possible to hit this limit.
+
+## Testing
+
+Unit tests for DoT are in `resolv_tls_unit_test.cpp`. They cover all the classes except
+`DnsTlsSocket` (which requires `CAP_NET_ADMIN` because it uses `setsockopt(SO_MARK)`) and
+`DnsTlsSessionCache` (which requires integration with libssl). These classes are
+exercised by the integration tests in `resolv_integration_test.cpp`.
+
+### Dependency Injection
+
+For unit testing, we would like to be able to mock out `DnsTlsSocket`. This is
+particularly required for unit testing of `DnsTlsDispatcher` and `DnsTlsTransport`.
+To make these unit tests possible, this code uses a dependency injection pattern:
+`DnsTlsSocket` is produced by a `DnsTlsSocketFactory`, and both of these have a
+defined interface.
+
+`DnsTlsDispatcher`'s constructor takes an `IDnsTlsSocketFactory`,
+which in production is a `DnsTlsSocketFactory`. However, in unit tests, we can
+substitute a test factory that returns a fake socket, so that the unit tests can
+run without actually connecting over TLS to a test server. (The integration tests
+do actual TLS.)
+
+## Reference
+ * [BoringSSL API docs](https://commondatastorage.googleapis.com/chromium-boringssl-docs/headers.html)
diff --git a/README.md b/README.md
index b002a9c..35371ad 100644
--- a/README.md
+++ b/README.md
@@ -1,128 +1,3 @@
-# DNS-over-TLS query forwarder design
-
-## Overview
-
-The DNS-over-TLS query forwarder consists of five classes:
- * `DnsTlsDispatcher`
- * `DnsTlsTransport`
- * `DnsTlsQueryMap`
- * `DnsTlsSessionCache`
- * `DnsTlsSocket`
-
-`DnsTlsDispatcher` is a singleton class whose `query` method is the DnsTls's
-only public interface. `DnsTlsDispatcher` is just a table holding the
-`DnsTlsTransport` for each server (represented by a `DnsTlsServer` struct) and
-network. `DnsTlsDispatcher` also blocks each query thread, waiting on a
-`std::future` returned by `DnsTlsTransport` that represents the response.
-
-`DnsTlsTransport` sends each query over a `DnsTlsSocket`, opening a
-new one if necessary. It also has to listen for responses from the
-`DnsTlsSocket`, which happen on a different thread.
-`IDnsTlsSocketObserver` is an interface defining how `DnsTlsSocket` returns
-responses to `DnsTlsTransport`.
-
-`DnsTlsQueryMap` and `DnsTlsSessionCache` are helper classes owned by `DnsTlsTransport`.
-`DnsTlsQueryMap` handles ID renumbering and query-response pairing.
-`DnsTlsSessionCache` allows TLS session resumption.
-
-`DnsTlsSocket` interleaves all queries onto a single socket, and reports all
-responses to `DnsTlsTransport` (through the `IDnsTlsObserver` interface). It doesn't
-know anything about which queries correspond to which responses, and does not retain
-state to indicate whether there is an outstanding query.
-
-## Threading
-
-### Overall patterns
-
-For clarity, each of the five classes in this design is thread-safe and holds one lock.
-Classes that spawn a helper thread call `thread::join()` in their destructor to ensure
-that it is cleaned up appropriately.
-
-All the classes here make full use of Clang thread annotations (and also null-pointer
-annotations) to minimize the likelihood of a latent threading bug. The unit tests are
-also heavily threaded to exercise this functionality.
-
-This code creates O(1) threads per socket, and does not create a new thread for each
-query or response. However, DnsProxyListener does create a thread for each query.
-
-### Threading in `DnsTlsSocket`
-
-`DnsTlsSocket` can receive queries on any thread, and send them over a
-"reliable datagram pipe" (`socketpair()` in `SOCK_SEQPACKET` mode).
-The query method writes a struct (containing a pointer to the query) to the pipe
-from its thread, and the loop thread (which owns the SSL socket)
-reads off the other end of the pipe. The pipe doesn't actually have a queue "inside";
-instead, any queueing happens by blocking the query thread until the
-socket thread can read the datagram off the other end.
-
-We need to pass messages between threads using a pipe, and not a condition variable
-or a thread-safe queue, because the socket thread has to be blocked
-in `poll()` waiting for data from the server, but also has to be woken
-up on inputs from the query threads. Therefore, inputs from the query
-threads have to arrive on a socket, so that `poll()` can listen for them.
-(There can only be a single thread because [you can't use different threads
-to read and write in OpenSSL](https://www.openssl.org/blog/blog/2017/02/21/threads/)).
-
-## ID renumbering
-
-`DnsTlsDispatcher` accepts queries that have colliding ID numbers and still sends them on
-a single socket. To avoid confusion at the server, `DnsTlsQueryMap` assigns each
-query a new ID for transmission, records the mapping from input IDs to sent IDs, and
-applies the inverse mapping to responses before returning them to the caller.
-
-`DnsTlsQueryMap` assigns each new query the ID number one greater than the largest
-ID number of an outstanding query. This means that ID numbers are initially sequential
-and usually small. If the largest possible ID number is already in use,
-`DnsTlsQueryMap` will scan the ID space to find an available ID, or fail the query
-if there are no available IDs. Queries will not block waiting for an ID number to
-become available.
-
-## Time constants
-
-`DnsTlsSocket` imposes a 20-second inactivity timeout. A socket that has been idle for
-20 seconds will be closed. This sets the limit of tolerance for slow replies,
-which could happen as a result of malfunctioning authoritative DNS servers.
-If there are any pending queries, `DnsTlsTransport` will retry them.
-
-`DnsTlsQueryMap` imposes a retry limit of 3. `DnsTlsTransport` will retry the query up
-to 3 times before reporting failure to `DnsTlsDispatcher`.
-This limit helps to ensure proper functioning in the case of a recursive resolver that
-is malfunctioning or is flooded with requests that are stalled due to malfunctioning
-authoritative servers.
-
-`DnsTlsDispatcher` maintains a 5-minute timeout. Any `DnsTlsTransport` that has had no
-outstanding queries for 5 minutes will be destroyed at the next query on a different
-transport.
-This sets the limit on how long session tickets will be preserved during idle periods,
-because each `DnsTlsTransport` owns a `DnsTlsSessionCache`. Imposing this timeout
-increases latency on the first query after an idle period, but also helps to avoid
-unbounded memory usage.
-
-`DnsTlsSessionCache` sets a limit of 5 sessions in each cache, expiring the oldest one
-when the limit is reached. However, because the client code does not currently
-reuse sessions more than once, it should not be possible to hit this limit.
-
-## Testing
-
-Unit tests are in `dns_tls_test.cpp`. They cover all the classes except
-`DnsTlsSocket` (which requires `CAP_NET_ADMIN` because it uses `setsockopt(SO_MARK)`) and
-`DnsTlsSessionCache` (which requires integration with libssl). These classes are
-exercised by the integration tests in `../tests/resolv_test.cpp`.
-
-### Dependency Injection
-
-For unit testing, we would like to be able to mock out `DnsTlsSocket`. This is
-particularly required for unit testing of `DnsTlsDispatcher` and `DnsTlsTransport`.
-To make these unit tests possible, this code uses a dependency injection pattern:
-`DnsTlsSocket` is produced by a `DnsTlsSocketFactory`, and both of these have a
-defined interface.
-
-`DnsTlsDispatcher`'s constructor takes an `IDnsTlsSocketFactory`,
-which in production is a `DnsTlsSocketFactory`. However, in unit tests, we can
-substitute a test factory that returns a fake socket, so that the unit tests can
-run without actually connecting over TLS to a test server. (The integration tests
-do actual TLS.)
-
## Logging
This code uses LOG(X) for logging. Log levels are VERBOSE,DEBUG,INFO,WARNING and ERROR.
@@ -136,5 +11,3 @@
ERROR 4
Verbose resolver logs could contain PII -- do NOT enable in production builds.
-## Reference
- * [BoringSSL API docs](https://commondatastorage.googleapis.com/chromium-boringssl-docs/headers.html)
diff --git a/dnsresolver_binder_test.cpp b/dnsresolver_binder_test.cpp
index fcd4918..97c136f 100644
--- a/dnsresolver_binder_test.cpp
+++ b/dnsresolver_binder_test.cpp
@@ -54,7 +54,7 @@
using android::netdutils::Stopwatch;
// TODO: make this dynamic and stop depending on implementation details.
-// Sync from TEST_NETID in dns_responder_client.cpp as resolver_test.cpp does.
+// Sync from TEST_NETID in dns_responder_client.cpp as resolv_integration_test.cpp does.
constexpr int TEST_NETID = 30;
class DnsResolverBinderTest : public ::testing::Test {
@@ -158,7 +158,7 @@
ASSERT_EQ(EEXIST, status.serviceSpecificErrorCode());
}
-// TODO: Move this test to resolver_test.cpp
+// TODO: Move this test to resolv_integration_test.cpp
TEST_F(DnsResolverBinderTest, RegisterEventListener_onDnsEvent) {
// The test configs are used to trigger expected events. The expected results are defined in
// expectedResults.
diff --git a/include/netd_resolv/resolv.h b/include/netd_resolv/resolv.h
index 400d56d..17cf2aa 100644
--- a/include/netd_resolv/resolv.h
+++ b/include/netd_resolv/resolv.h
@@ -81,6 +81,7 @@
typedef void (*get_network_context_callback)(unsigned netid, uid_t uid,
android_net_context* netcontext);
typedef void (*log_callback)(const char* msg);
+typedef int (*tagSocketCallback)(int sockFd, uint32_t tag, uid_t uid);
/*
* Some functions needed by the resolver (e.g. checkCallingPermission()) live in
@@ -92,8 +93,11 @@
check_calling_permission_callback check_calling_permission;
get_network_context_callback get_network_context;
log_callback log;
+ tagSocketCallback tagSocket;
};
+#define TAG_SYSTEM_DNS 0xFFFFFF82
+
LIBNETD_RESOLV_PUBLIC bool resolv_has_nameservers(unsigned netid);
// Set callbacks and bring DnsResolver up.
diff --git a/res_send.cpp b/res_send.cpp
index ae940a3..fa3371f 100644
--- a/res_send.cpp
+++ b/res_send.cpp
@@ -774,9 +774,7 @@
return -1;
}
}
- if (fchown(statp->_vcsock, statp->uid, -1) == -1) {
- PLOG(WARNING) << __func__ << ": Failed to chown socket";
- }
+ resolv_tag_socket(statp->_vcsock, statp->uid);
if (statp->_mark != MARK_UNSET) {
if (setsockopt(statp->_vcsock, SOL_SOCKET, SO_MARK, &statp->_mark,
sizeof(statp->_mark)) < 0) {
@@ -1019,9 +1017,7 @@
}
}
- if (fchown(statp->_u._ext.nssocks[ns], statp->uid, -1) == -1) {
- PLOG(WARNING) << __func__ << ": Failed to chown socket";
- }
+ resolv_tag_socket(statp->_u._ext.nssocks[ns], statp->uid);
if (statp->_mark != MARK_UNSET) {
if (setsockopt(statp->_u._ext.nssocks[ns], SOL_SOCKET, SO_MARK, &(statp->_mark),
sizeof(statp->_mark)) < 0) {
diff --git a/res_cache_test.cpp b/resolv_cache_unit_test.cpp
similarity index 100%
rename from res_cache_test.cpp
rename to resolv_cache_unit_test.cpp
diff --git a/resolver_test.cpp b/resolv_integration_test.cpp
similarity index 98%
rename from resolver_test.cpp
rename to resolv_integration_test.cpp
index d7cd263..5014983 100644
--- a/resolver_test.cpp
+++ b/resolv_integration_test.cpp
@@ -171,6 +171,10 @@
return sDnsMetricsListener->waitForNat64Prefix(status, timeout);
}
+ bool WaitForPrivateDnsValidation(std::string serverAddr, bool validated) {
+ return sDnsMetricsListener->waitForPrivateDnsValidation(serverAddr, validated);
+ }
+
DnsResponderClient mDnsClient;
// Use a shared static DNS listener for all tests to avoid registering lots of listeners
@@ -1091,7 +1095,7 @@
// So, wait for private DNS validation done before stopping backend DNS servers.
for (int i = 0; i < MAXNS; i++) {
LOG(INFO) << "Waiting for private DNS validation on " << tls[i]->listen_address() << ".";
- EXPECT_TRUE(tls[i]->waitForQueries(1, 5000));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls[i]->listen_address(), true));
LOG(INFO) << "private DNS validation on " << tls[i]->listen_address() << " done.";
}
@@ -1262,13 +1266,9 @@
test::DnsTlsFrontend tls(listen_addr, listen_tls, listen_addr, listen_udp);
ASSERT_TRUE(tls.startServer());
ASSERT_TRUE(mDnsClient.SetResolversWithTls(servers, kDefaultSearchDomains, kDefaultParams, ""));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls.listen_address(), true));
- const hostent* result;
-
- // Wait for validation to complete.
- EXPECT_TRUE(tls.waitForQueries(1, 5000));
-
- result = gethostbyname("tls1");
+ const hostent* result = gethostbyname("tls1");
ASSERT_FALSE(result == nullptr);
EXPECT_EQ("1.2.3.1", ToString(result));
@@ -1326,14 +1326,10 @@
ASSERT_TRUE(tls2.startServer());
ASSERT_TRUE(mDnsClient.SetResolversWithTls(servers, kDefaultSearchDomains, kDefaultParams,
kDefaultPrivateDnsHostName));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls1.listen_address(), true));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls2.listen_address(), true));
- const hostent* result;
-
- // Wait for validation to complete.
- EXPECT_TRUE(tls1.waitForQueries(1, 5000));
- EXPECT_TRUE(tls2.waitForQueries(1, 5000));
-
- result = gethostbyname("tlsfailover1");
+ const hostent* result = gethostbyname("tlsfailover1");
ASSERT_FALSE(result == nullptr);
EXPECT_EQ("1.2.3.1", ToString(result));
@@ -1376,7 +1372,7 @@
// The TLS handshake would fail because the name of TLS server doesn't
// match with TLS server's certificate.
- EXPECT_FALSE(tls.waitForQueries(1, 500));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls.listen_address(), false));
// The query should fail hard, because a name was specified.
EXPECT_EQ(nullptr, gethostbyname("badtlsname"));
@@ -1403,9 +1399,7 @@
ASSERT_TRUE(tls.startServer());
ASSERT_TRUE(mDnsClient.SetResolversWithTls(servers, kDefaultSearchDomains, kDefaultParams,
kDefaultPrivateDnsHostName));
-
- // Wait for validation to complete.
- EXPECT_TRUE(tls.waitForQueries(1, 5000));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls.listen_address(), true));
dns.clearQueries();
ScopedAddrinfo result = safe_getaddrinfo("addrinfotls", nullptr, nullptr);
@@ -1505,13 +1499,16 @@
} else if (config.mode == OPPORTUNISTIC) {
ASSERT_TRUE(mDnsClient.SetResolversWithTls(servers, kDefaultSearchDomains,
kDefaultParams, ""));
- // Wait for validation to complete.
- if (config.withWorkingTLS) EXPECT_TRUE(tls.waitForQueries(1, 5000));
+
+ // Wait for the validation event. If the server is running, the validation should
+ // be successful; otherwise, the validation should be failed.
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls.listen_address(), config.withWorkingTLS));
} else if (config.mode == STRICT) {
ASSERT_TRUE(mDnsClient.SetResolversWithTls(servers, kDefaultSearchDomains,
kDefaultParams, kDefaultPrivateDnsHostName));
- // Wait for validation to complete.
- if (config.withWorkingTLS) EXPECT_TRUE(tls.waitForQueries(1, 5000));
+
+ // Wait for the validation event.
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls.listen_address(), config.withWorkingTLS));
}
tls.clearQueries();
@@ -2231,22 +2228,21 @@
}
ASSERT_TRUE(mDnsClient.SetResolversWithTls(servers, kDefaultSearchDomains,
kDefaultParams, ""));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls.listen_address(), false));
} else if (config.mode == OPPORTUNISTIC_TLS) {
if (!tls.running()) {
ASSERT_TRUE(tls.startServer());
}
ASSERT_TRUE(mDnsClient.SetResolversWithTls(servers, kDefaultSearchDomains,
kDefaultParams, ""));
- // Wait for validation to complete.
- EXPECT_TRUE(tls.waitForQueries(1, 5000));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls.listen_address(), true));
} else if (config.mode == STRICT) {
if (!tls.running()) {
ASSERT_TRUE(tls.startServer());
}
ASSERT_TRUE(mDnsClient.SetResolversWithTls(servers, kDefaultSearchDomains,
kDefaultParams, kDefaultPrivateDnsHostName));
- // Wait for validation to complete.
- EXPECT_TRUE(tls.waitForQueries(1, 5000));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls.listen_address(), true));
}
if (config.method == GETHOSTBYNAME) {
@@ -2303,8 +2299,8 @@
test::DnsTlsFrontend tls(CLEARTEXT_ADDR, TLS_PORT, CLEARTEXT_ADDR, CLEARTEXT_PORT);
ASSERT_TRUE(tls.startServer());
ASSERT_TRUE(mDnsClient.SetResolversWithTls(servers, kDefaultSearchDomains, kDefaultParams, ""));
- // Wait for validation complete.
- EXPECT_TRUE(tls.waitForQueries(1, 5000));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls.listen_address(), true));
+
// Shutdown TLS server to get an error. It's similar to no response case but without waiting.
tls.stopServer();
@@ -2334,8 +2330,8 @@
test::DnsTlsFrontend tls(CLEARTEXT_ADDR, TLS_PORT, CLEARTEXT_ADDR, CLEARTEXT_PORT);
ASSERT_TRUE(tls.startServer());
ASSERT_TRUE(mDnsClient.SetResolversWithTls(servers, kDefaultSearchDomains, kDefaultParams, ""));
- // Wait for validation complete.
- EXPECT_TRUE(tls.waitForQueries(1, 5000));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls.listen_address(), true));
+
// Shutdown TLS server to get an error. It's similar to no response case but without waiting.
tls.stopServer();
dns.setEdns(test::DNSResponder::Edns::FORMERR_UNCOND);
@@ -3200,7 +3196,7 @@
// Setup OPPORTUNISTIC mode and wait for the validation complete.
ASSERT_TRUE(mDnsClient.SetResolversWithTls(servers, kDefaultSearchDomains, kDefaultParams, ""));
- EXPECT_TRUE(tls.waitForQueries(1, 5000));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls.listen_address(), true));
tls.clearQueries();
// Start NAT64 prefix discovery and wait for it complete.
@@ -3219,7 +3215,7 @@
// Setup STRICT mode and wait for the validation complete.
ASSERT_TRUE(mDnsClient.SetResolversWithTls(servers, kDefaultSearchDomains, kDefaultParams,
kDefaultPrivateDnsHostName));
- EXPECT_TRUE(tls.waitForQueries(1, 5000));
+ EXPECT_TRUE(WaitForPrivateDnsValidation(tls.listen_address(), true));
tls.clearQueries();
// Start NAT64 prefix discovery and wait for it to complete.
diff --git a/resolv_private.h b/resolv_private.h
index e6c20f4..f2668b5 100644
--- a/resolv_private.h
+++ b/resolv_private.h
@@ -61,6 +61,7 @@
#include <string>
#include <vector>
+#include "DnsResolver.h"
#include "netd_resolv/params.h"
#include "netd_resolv/resolv.h"
#include "netd_resolv/stats.h"
@@ -217,4 +218,16 @@
android::net::IpVersion ipFamilyToIPVersion(int ipFamily);
+inline void resolv_tag_socket(int sock, uid_t uid) {
+ if (android::net::gResNetdCallbacks.tagSocket != nullptr) {
+ if (int err = android::net::gResNetdCallbacks.tagSocket(sock, TAG_SYSTEM_DNS, uid)) {
+ LOG(WARNING) << "Failed to tag socket: " << strerror(-err);
+ }
+ }
+
+ if (fchown(sock, uid, -1) == -1) {
+ LOG(WARNING) << "Failed to chown socket: " << strerror(errno);
+ }
+}
+
#endif // NETD_RESOLV_PRIVATE_H
diff --git a/dns_tls_test.cpp b/resolv_tls_unit_test.cpp
similarity index 100%
rename from dns_tls_test.cpp
rename to resolv_tls_unit_test.cpp
diff --git a/libnetd_resolv_test.cpp b/resolv_unit_test.cpp
similarity index 100%
rename from libnetd_resolv_test.cpp
rename to resolv_unit_test.cpp
diff --git a/tests/dns_metrics_listener/dns_metrics_listener.cpp b/tests/dns_metrics_listener/dns_metrics_listener.cpp
index 8f3b646..acfb416 100644
--- a/tests/dns_metrics_listener/dns_metrics_listener.cpp
+++ b/tests/dns_metrics_listener/dns_metrics_listener.cpp
@@ -23,9 +23,11 @@
namespace net {
namespace metrics {
+using android::base::ScopedLockAssertion;
using std::chrono::milliseconds;
constexpr milliseconds kRetryIntervalMs{20};
+constexpr milliseconds kEventTimeoutMs{5000};
android::binder::Status DnsMetricsListener::onNat64PrefixEvent(int32_t netId, bool added,
const std::string& prefixString,
@@ -35,6 +37,20 @@
return android::binder::Status::ok();
}
+android::binder::Status DnsMetricsListener::onPrivateDnsValidationEvent(
+ int32_t netId, const ::android::String16& ipAddress,
+ const ::android::String16& /*hostname*/, bool validated) {
+ {
+ std::lock_guard lock(mMutex);
+ std::string serverAddr(String8(ipAddress.string()));
+
+ // keep updating the server to have latest validation status.
+ mValidationRecords.insert_or_assign({netId, serverAddr}, validated);
+ }
+ mCv.notify_one();
+ return android::binder::Status::ok();
+}
+
bool DnsMetricsListener::waitForNat64Prefix(ExpectNat64PrefixStatus status,
milliseconds timeout) const {
android::base::Timer t;
@@ -50,6 +66,31 @@
return false;
}
+bool DnsMetricsListener::waitForPrivateDnsValidation(const std::string& serverAddr,
+ const bool validated) {
+ const auto now = std::chrono::steady_clock::now();
+
+ std::unique_lock lock(mMutex);
+ ScopedLockAssertion assume_lock(mMutex);
+
+ // onPrivateDnsValidationEvent() might already be invoked. Search for the record first.
+ do {
+ if (findAndRemoveValidationRecord({mNetId, serverAddr}, validated)) return true;
+ } while (mCv.wait_until(lock, now + kEventTimeoutMs) != std::cv_status::timeout);
+
+ // Timeout.
+ return false;
+}
+
+bool DnsMetricsListener::findAndRemoveValidationRecord(const ServerKey& key, const bool value) {
+ auto it = mValidationRecords.find(key);
+ if (it != mValidationRecords.end() && it->second == value) {
+ mValidationRecords.erase(it);
+ return true;
+ }
+ return false;
+}
+
} // namespace metrics
} // namespace net
-} // namespace android
\ No newline at end of file
+} // namespace android
diff --git a/tests/dns_metrics_listener/dns_metrics_listener.h b/tests/dns_metrics_listener/dns_metrics_listener.h
index 1f8170d..d933b13 100644
--- a/tests/dns_metrics_listener/dns_metrics_listener.h
+++ b/tests/dns_metrics_listener/dns_metrics_listener.h
@@ -16,6 +16,10 @@
#pragma once
+#include <condition_variable>
+#include <map>
+#include <utility>
+
#include <android-base/thread_annotations.h>
#include "base_metrics_listener.h"
@@ -42,20 +46,38 @@
const std::string& prefixString,
int32_t /*prefixLength*/) override;
+ android::binder::Status onPrivateDnsValidationEvent(int32_t netId,
+ const ::android::String16& ipAddress,
+ const ::android::String16& /*hostname*/,
+ bool validated) override;
+
// Wait for expected NAT64 prefix status until timeout.
bool waitForNat64Prefix(ExpectNat64PrefixStatus status,
std::chrono::milliseconds timeout) const;
+ // Wait for the expected private DNS validation result until timeout.
+ bool waitForPrivateDnsValidation(const std::string& serverAddr, const bool validated);
+
private:
+ typedef std::pair<int32_t, std::string> ServerKey;
+
+ // Search mValidationRecords. Return true if |key| exists and its value is equal to
+ // |value|, and then remove it; otherwise, return false.
+ bool findAndRemoveValidationRecord(const ServerKey& key, const bool value) REQUIRES(mMutex);
+
// Monitor the event which was fired on specific network id.
const int32_t mNetId;
// The NAT64 prefix of the network |mNetId|. It is updated by the event onNat64PrefixEvent().
std::string mNat64Prefix GUARDED_BY(mMutex);
+ // Used to store the data from onPrivateDnsValidationEvent.
+ std::map<ServerKey, bool> mValidationRecords GUARDED_BY(mMutex);
+
mutable std::mutex mMutex;
+ std::condition_variable mCv;
};
} // namespace metrics
} // namespace net
-} // namespace android
\ No newline at end of file
+} // namespace android