libbinder: support server-specific session
When multiple clients connect to a server, we need a way to tell these
clients apart. Having a per-client root object is the easiest way to do
this (the alternative, using getCalling* like is used in binder, isn't
so great because it requires global/thread local place, but given that
many RpcSession objects can be created, and these can also be used in
conjunction with kernel binder, it is complicated figuring out exactly
where to call getCalling*).
Bug: 199259751
Test: binderRpcTest
Change-Id: I5727db618b5ea138bfa19e75ed915f6a6991518e
diff --git a/libs/binder/RpcServer.cpp b/libs/binder/RpcServer.cpp
index 967b8e3..4edc202 100644
--- a/libs/binder/RpcServer.cpp
+++ b/libs/binder/RpcServer.cpp
@@ -127,14 +127,23 @@
void RpcServer::setRootObject(const sp<IBinder>& binder) {
std::lock_guard<std::mutex> _l(mLock);
+ mRootObjectFactory = nullptr;
mRootObjectWeak = mRootObject = binder;
}
void RpcServer::setRootObjectWeak(const wp<IBinder>& binder) {
std::lock_guard<std::mutex> _l(mLock);
mRootObject.clear();
+ mRootObjectFactory = nullptr;
mRootObjectWeak = binder;
}
+void RpcServer::setPerSessionRootObject(
+ std::function<sp<IBinder>(const sockaddr*, socklen_t)>&& makeObject) {
+ std::lock_guard<std::mutex> _l(mLock);
+ mRootObject.clear();
+ mRootObjectWeak.clear();
+ mRootObjectFactory = std::move(makeObject);
+}
sp<IBinder> RpcServer::getRootObject() {
std::lock_guard<std::mutex> _l(mLock);
@@ -174,8 +183,14 @@
status_t status;
while ((status = mShutdownTrigger->triggerablePoll(mServer, POLLIN)) == OK) {
- unique_fd clientFd(TEMP_FAILURE_RETRY(
- accept4(mServer.get(), nullptr, nullptr /*length*/, SOCK_CLOEXEC | SOCK_NONBLOCK)));
+ sockaddr_storage addr;
+ socklen_t addrLen = sizeof(addr);
+
+ unique_fd clientFd(
+ TEMP_FAILURE_RETRY(accept4(mServer.get(), reinterpret_cast<sockaddr*>(&addr),
+ &addrLen, SOCK_CLOEXEC | SOCK_NONBLOCK)));
+
+ LOG_ALWAYS_FATAL_IF(addrLen > static_cast<socklen_t>(sizeof(addr)), "Truncated address");
if (clientFd < 0) {
ALOGE("Could not accept4 socket: %s", strerror(errno));
@@ -187,7 +202,7 @@
std::lock_guard<std::mutex> _l(mLock);
std::thread thread =
std::thread(&RpcServer::establishConnection, sp<RpcServer>::fromExisting(this),
- std::move(clientFd));
+ std::move(clientFd), addr, addrLen);
mConnectingThreads[thread.get_id()] = std::move(thread);
}
}
@@ -257,7 +272,8 @@
return mConnectingThreads.size();
}
-void RpcServer::establishConnection(sp<RpcServer>&& server, base::unique_fd clientFd) {
+void RpcServer::establishConnection(sp<RpcServer>&& server, base::unique_fd clientFd,
+ const sockaddr_storage addr, socklen_t addrLen) {
// TODO(b/183988761): cannot trust this simple ID
LOG_ALWAYS_FATAL_IF(!server->mAgreedExperimental, "no!");
@@ -383,11 +399,23 @@
session = RpcSession::make();
session->setMaxIncomingThreads(server->mMaxThreads);
if (!session->setProtocolVersion(protocolVersion)) return;
+
+ // if null, falls back to server root
+ sp<IBinder> sessionSpecificRoot;
+ if (server->mRootObjectFactory != nullptr) {
+ sessionSpecificRoot =
+ server->mRootObjectFactory(reinterpret_cast<const sockaddr*>(&addr),
+ addrLen);
+ if (sessionSpecificRoot == nullptr) {
+ ALOGE("Warning: server returned null from root object factory");
+ }
+ }
+
if (!session->setForServer(server,
sp<RpcServer::EventListener>::fromExisting(
static_cast<RpcServer::EventListener*>(
server.get())),
- sessionId)) {
+ sessionId, sessionSpecificRoot)) {
ALOGE("Failed to attach server to session");
return;
}