blob: f4aa9fbf29e1b677350fb1325214fbc697f38fd9 [file] [log] [blame]
Josh Gao3a34bc52018-10-11 16:33:05 -07001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define TRACE_TAG USB
18
19#include "sysdeps.h"
20
21#include <errno.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>
25#include <sys/ioctl.h>
26#include <sys/types.h>
27#include <unistd.h>
28
29#include <linux/usb/functionfs.h>
30#include <sys/eventfd.h>
31
Josh Gaob9727b02019-02-26 17:53:52 -080032#include <algorithm>
Josh Gao3a34bc52018-10-11 16:33:05 -070033#include <array>
34#include <future>
35#include <memory>
36#include <mutex>
37#include <optional>
38#include <vector>
39
40#include <asyncio/AsyncIO.h>
41
42#include <android-base/logging.h>
43#include <android-base/macros.h>
44#include <android-base/properties.h>
45#include <android-base/thread_annotations.h>
46
47#include <adbd/usb.h>
48
49#include "adb_unique_fd.h"
50#include "adb_utils.h"
51#include "sysdeps/chrono.h"
52#include "transport.h"
53#include "types.h"
54
55using android::base::StringPrintf;
56
Josh Gaob8afeea2019-02-13 15:27:28 -080057// We can't find out whether we have support for AIO on ffs endpoints until we submit a read.
58static std::optional<bool> gFfsAioSupported;
59
Josh Gao641c7082019-04-16 11:20:04 -070060// Not all USB controllers support operations larger than 16k, so don't go above that.
Josh Gao3b4be352019-04-24 14:28:25 -070061// Also, each submitted operation does an allocation in the kernel of that size, so we want to
62// minimize our queue depth while still maintaining a deep enough queue to keep the USB stack fed.
63static constexpr size_t kUsbReadQueueDepth = 8;
Josh Gao641c7082019-04-16 11:20:04 -070064static constexpr size_t kUsbReadSize = 4 * PAGE_SIZE;
Josh Gao3a34bc52018-10-11 16:33:05 -070065
Josh Gao3b4be352019-04-24 14:28:25 -070066static constexpr size_t kUsbWriteQueueDepth = 8;
Josh Gao641c7082019-04-16 11:20:04 -070067static constexpr size_t kUsbWriteSize = 4 * PAGE_SIZE;
Josh Gao3a34bc52018-10-11 16:33:05 -070068
Dan Albert4c5a66b2019-06-24 14:35:35 -070069static const char* to_string(enum usb_functionfs_event_type type) {
70 switch (type) {
71 case FUNCTIONFS_BIND:
72 return "FUNCTIONFS_BIND";
73 case FUNCTIONFS_UNBIND:
74 return "FUNCTIONFS_UNBIND";
75 case FUNCTIONFS_ENABLE:
76 return "FUNCTIONFS_ENABLE";
77 case FUNCTIONFS_DISABLE:
78 return "FUNCTIONFS_DISABLE";
79 case FUNCTIONFS_SETUP:
80 return "FUNCTIONFS_SETUP";
81 case FUNCTIONFS_SUSPEND:
82 return "FUNCTIONFS_SUSPEND";
83 case FUNCTIONFS_RESUME:
84 return "FUNCTIONFS_RESUME";
85 }
86}
87
Josh Gao3a34bc52018-10-11 16:33:05 -070088enum class TransferDirection : uint64_t {
89 READ = 0,
90 WRITE = 1,
91};
92
93struct TransferId {
94 TransferDirection direction : 1;
95 uint64_t id : 63;
96
97 TransferId() : TransferId(TransferDirection::READ, 0) {}
98
99 private:
100 TransferId(TransferDirection direction, uint64_t id) : direction(direction), id(id) {}
101
102 public:
103 explicit operator uint64_t() const {
104 uint64_t result;
105 static_assert(sizeof(*this) == sizeof(result));
106 memcpy(&result, this, sizeof(*this));
107 return result;
108 }
109
110 static TransferId read(uint64_t id) { return TransferId(TransferDirection::READ, id); }
111 static TransferId write(uint64_t id) { return TransferId(TransferDirection::WRITE, id); }
112
113 static TransferId from_value(uint64_t value) {
114 TransferId result;
115 memcpy(&result, &value, sizeof(value));
116 return result;
117 }
118};
119
120struct IoBlock {
Josh Gao9f608462019-03-18 14:11:28 -0700121 bool pending = false;
Evgenii Stepanov6e03b682019-05-15 18:45:01 -0700122 struct iocb control = {};
Josh Gaob9727b02019-02-26 17:53:52 -0800123 std::shared_ptr<Block> payload;
Josh Gao3a34bc52018-10-11 16:33:05 -0700124
125 TransferId id() const { return TransferId::from_value(control.aio_data); }
126};
127
128struct ScopedAioContext {
129 ScopedAioContext() = default;
130 ~ScopedAioContext() { reset(); }
131
132 ScopedAioContext(ScopedAioContext&& move) { reset(move.release()); }
133 ScopedAioContext(const ScopedAioContext& copy) = delete;
134
135 ScopedAioContext& operator=(ScopedAioContext&& move) {
136 reset(move.release());
137 return *this;
138 }
139 ScopedAioContext& operator=(const ScopedAioContext& copy) = delete;
140
141 static ScopedAioContext Create(size_t max_events) {
142 aio_context_t ctx = 0;
143 if (io_setup(max_events, &ctx) != 0) {
144 PLOG(FATAL) << "failed to create aio_context_t";
145 }
146 ScopedAioContext result;
147 result.reset(ctx);
148 return result;
149 }
150
151 aio_context_t release() {
152 aio_context_t result = context_;
153 context_ = 0;
154 return result;
155 }
156
157 void reset(aio_context_t new_context = 0) {
158 if (context_ != 0) {
159 io_destroy(context_);
160 }
161
162 context_ = new_context;
163 }
164
165 aio_context_t get() { return context_; }
166
167 private:
168 aio_context_t context_ = 0;
169};
170
171struct UsbFfsConnection : public Connection {
Dan Albert4c5a66b2019-06-24 14:35:35 -0700172 UsbFfsConnection(unique_fd control, unique_fd read, unique_fd write,
Josh Gao3a34bc52018-10-11 16:33:05 -0700173 std::promise<void> destruction_notifier)
Josh Gaodcf36882019-03-26 18:47:45 -0700174 : worker_started_(false),
175 stopped_(false),
Josh Gao3a34bc52018-10-11 16:33:05 -0700176 destruction_notifier_(std::move(destruction_notifier)),
Dan Albert4c5a66b2019-06-24 14:35:35 -0700177 control_fd_(std::move(control)),
Josh Gao3a34bc52018-10-11 16:33:05 -0700178 read_fd_(std::move(read)),
179 write_fd_(std::move(write)) {
180 LOG(INFO) << "UsbFfsConnection constructed";
Josh Gaob8afeea2019-02-13 15:27:28 -0800181 worker_event_fd_.reset(eventfd(0, EFD_CLOEXEC));
182 if (worker_event_fd_ == -1) {
183 PLOG(FATAL) << "failed to create eventfd";
184 }
185
Dan Albert4c5a66b2019-06-24 14:35:35 -0700186 monitor_event_fd_.reset(eventfd(0, EFD_CLOEXEC));
187 if (monitor_event_fd_ == -1) {
188 PLOG(FATAL) << "failed to create eventfd";
189 }
190
Josh Gao3a34bc52018-10-11 16:33:05 -0700191 aio_context_ = ScopedAioContext::Create(kUsbReadQueueDepth + kUsbWriteQueueDepth);
192 }
193
194 ~UsbFfsConnection() {
195 LOG(INFO) << "UsbFfsConnection being destroyed";
196 Stop();
197 monitor_thread_.join();
Josh Gaob8afeea2019-02-13 15:27:28 -0800198
199 // We need to explicitly close our file descriptors before we notify our destruction,
200 // because the thread listening on the future will immediately try to reopen the endpoint.
Josh Gaodcf36882019-03-26 18:47:45 -0700201 aio_context_.reset();
Dan Albert4c5a66b2019-06-24 14:35:35 -0700202 control_fd_.reset();
Josh Gaob8afeea2019-02-13 15:27:28 -0800203 read_fd_.reset();
204 write_fd_.reset();
205
Josh Gao3a34bc52018-10-11 16:33:05 -0700206 destruction_notifier_.set_value();
207 }
208
209 virtual bool Write(std::unique_ptr<apacket> packet) override final {
210 LOG(DEBUG) << "USB write: " << dump_header(&packet->msg);
211 Block header(sizeof(packet->msg));
212 memcpy(header.data(), &packet->msg, sizeof(packet->msg));
213
214 std::lock_guard<std::mutex> lock(write_mutex_);
215 write_requests_.push_back(CreateWriteBlock(std::move(header), next_write_id_++));
216 if (!packet->payload.empty()) {
Josh Gaob9727b02019-02-26 17:53:52 -0800217 // The kernel attempts to allocate a contiguous block of memory for each write,
218 // which can fail if the write is large and the kernel heap is fragmented.
219 // Split large writes into smaller chunks to avoid this.
220 std::shared_ptr<Block> payload = std::make_shared<Block>(std::move(packet->payload));
221 size_t offset = 0;
222 size_t len = payload->size();
223
224 while (len > 0) {
225 size_t write_size = std::min(kUsbWriteSize, len);
226 write_requests_.push_back(
227 CreateWriteBlock(payload, offset, write_size, next_write_id_++));
228 len -= write_size;
229 offset += write_size;
230 }
Josh Gao3a34bc52018-10-11 16:33:05 -0700231 }
232 SubmitWrites();
233 return true;
234 }
235
236 virtual void Start() override final { StartMonitor(); }
237
238 virtual void Stop() override final {
239 if (stopped_.exchange(true)) {
240 return;
241 }
242 stopped_ = true;
243 uint64_t notify = 1;
Josh Gaob8afeea2019-02-13 15:27:28 -0800244 ssize_t rc = adb_write(worker_event_fd_.get(), &notify, sizeof(notify));
Josh Gao3a34bc52018-10-11 16:33:05 -0700245 if (rc < 0) {
Josh Gaob8afeea2019-02-13 15:27:28 -0800246 PLOG(FATAL) << "failed to notify worker eventfd to stop UsbFfsConnection";
Josh Gao3a34bc52018-10-11 16:33:05 -0700247 }
248 CHECK_EQ(static_cast<size_t>(rc), sizeof(notify));
Dan Albert4c5a66b2019-06-24 14:35:35 -0700249
250 rc = adb_write(monitor_event_fd_.get(), &notify, sizeof(notify));
251 if (rc < 0) {
252 PLOG(FATAL) << "failed to notify monitor eventfd to stop UsbFfsConnection";
253 }
254
255 CHECK_EQ(static_cast<size_t>(rc), sizeof(notify));
Josh Gao3a34bc52018-10-11 16:33:05 -0700256 }
257
258 private:
259 void StartMonitor() {
260 // This is a bit of a mess.
261 // It's possible for io_submit to end up blocking, if we call it as the endpoint
262 // becomes disabled. Work around this by having a monitor thread to listen for functionfs
263 // lifecycle events. If we notice an error condition (either we've become disabled, or we
264 // were never enabled in the first place), we send interruption signals to the worker thread
265 // until it dies, and then report failure to the transport via HandleError, which will
266 // eventually result in the transport being destroyed, which will result in UsbFfsConnection
267 // being destroyed, which unblocks the open thread and restarts this entire process.
Josh Gao3a34bc52018-10-11 16:33:05 -0700268 static std::once_flag handler_once;
269 std::call_once(handler_once, []() { signal(kInterruptionSignal, [](int) {}); });
270
271 monitor_thread_ = std::thread([this]() {
272 adb_thread_setname("UsbFfs-monitor");
273
Dan Albert4c5a66b2019-06-24 14:35:35 -0700274 bool bound = false;
Josh Gao961496c2019-03-26 13:21:42 -0700275 bool enabled = false;
Josh Gao3a34bc52018-10-11 16:33:05 -0700276 bool running = true;
277 while (running) {
Josh Gaob8afeea2019-02-13 15:27:28 -0800278 adb_pollfd pfd[2] = {
Dan Albert4c5a66b2019-06-24 14:35:35 -0700279 { .fd = control_fd_.get(), .events = POLLIN, .revents = 0 },
280 { .fd = monitor_event_fd_.get(), .events = POLLIN, .revents = 0 },
Josh Gaob8afeea2019-02-13 15:27:28 -0800281 };
Josh Gaodcf36882019-03-26 18:47:45 -0700282
Dan Albert4c5a66b2019-06-24 14:35:35 -0700283 // If we don't see our first bind within a second, try again.
284 int timeout_ms = bound ? -1 : 1000;
285
286 int rc = TEMP_FAILURE_RETRY(adb_poll(pfd, 2, timeout_ms));
Josh Gaob8afeea2019-02-13 15:27:28 -0800287 if (rc == -1) {
288 PLOG(FATAL) << "poll on USB control fd failed";
Dan Albert4c5a66b2019-06-24 14:35:35 -0700289 } else if (rc == 0) {
290 LOG(WARNING) << "timed out while waiting for FUNCTIONFS_BIND, trying again";
291 break;
Josh Gaob8afeea2019-02-13 15:27:28 -0800292 }
293
294 if (pfd[1].revents) {
Dan Albert4c5a66b2019-06-24 14:35:35 -0700295 // We were told to die.
296 break;
Josh Gao3a34bc52018-10-11 16:33:05 -0700297 }
298
299 struct usb_functionfs_event event;
Dan Albert4c5a66b2019-06-24 14:35:35 -0700300 rc = TEMP_FAILURE_RETRY(adb_read(control_fd_.get(), &event, sizeof(event)));
Josh Gao370dbb02019-05-10 11:37:34 -0700301 if (rc == -1) {
Josh Gao3a34bc52018-10-11 16:33:05 -0700302 PLOG(FATAL) << "failed to read functionfs event";
Josh Gao370dbb02019-05-10 11:37:34 -0700303 } else if (rc == 0) {
304 LOG(WARNING) << "hit EOF on functionfs control fd";
305 break;
306 } else if (rc != sizeof(event)) {
307 LOG(FATAL) << "read functionfs event of unexpected size, expected "
308 << sizeof(event) << ", got " << rc;
Josh Gao3a34bc52018-10-11 16:33:05 -0700309 }
310
311 LOG(INFO) << "USB event: "
Dan Albert4c5a66b2019-06-24 14:35:35 -0700312 << to_string(static_cast<usb_functionfs_event_type>(event.type));
Josh Gao3a34bc52018-10-11 16:33:05 -0700313
314 switch (event.type) {
315 case FUNCTIONFS_BIND:
Dan Albert4c5a66b2019-06-24 14:35:35 -0700316 if (bound) {
317 LOG(WARNING) << "received FUNCTIONFS_BIND while already bound?";
318 running = false;
319 break;
320 }
321
322 if (enabled) {
323 LOG(WARNING) << "received FUNCTIONFS_BIND while already enabled?";
324 running = false;
325 break;
326 }
327
328 bound = true;
Josh Gao3a34bc52018-10-11 16:33:05 -0700329 break;
330
331 case FUNCTIONFS_ENABLE:
Dan Albert4c5a66b2019-06-24 14:35:35 -0700332 if (!bound) {
333 LOG(WARNING) << "received FUNCTIONFS_ENABLE while not bound?";
334 running = false;
335 break;
336 }
337
Josh Gaoe3fa0c82019-03-28 11:05:53 -0700338 if (enabled) {
339 LOG(WARNING) << "received FUNCTIONFS_ENABLE while already enabled?";
340 running = false;
Josh Gao56463fc2019-05-01 16:53:53 -0700341 break;
Josh Gaoe3fa0c82019-03-28 11:05:53 -0700342 }
343
344 enabled = true;
Josh Gao3a34bc52018-10-11 16:33:05 -0700345 StartWorker();
346 break;
347
348 case FUNCTIONFS_DISABLE:
Dan Albert4c5a66b2019-06-24 14:35:35 -0700349 if (!bound) {
350 LOG(WARNING) << "received FUNCTIONFS_DISABLE while not bound?";
351 }
352
Josh Gaoe3fa0c82019-03-28 11:05:53 -0700353 if (!enabled) {
354 LOG(WARNING) << "received FUNCTIONFS_DISABLE while not enabled?";
355 }
356
357 enabled = false;
Josh Gao961496c2019-03-26 13:21:42 -0700358 running = false;
359 break;
360
361 case FUNCTIONFS_UNBIND:
Josh Gaoe3fa0c82019-03-28 11:05:53 -0700362 if (enabled) {
363 LOG(WARNING) << "received FUNCTIONFS_UNBIND while still enabled?";
364 }
Josh Gao961496c2019-03-26 13:21:42 -0700365
Dan Albert4c5a66b2019-06-24 14:35:35 -0700366 if (!bound) {
367 LOG(WARNING) << "received FUNCTIONFS_UNBIND when not bound?";
368 }
369
370 bound = false;
Josh Gao3a34bc52018-10-11 16:33:05 -0700371 running = false;
372 break;
Josh Gao7be217e2019-05-15 18:03:29 -0700373
374 case FUNCTIONFS_SETUP: {
Dan Albert4c5a66b2019-06-24 14:35:35 -0700375 LOG(INFO) << "received FUNCTIONFS_SETUP control transfer: bRequestType = "
376 << static_cast<int>(event.u.setup.bRequestType)
377 << ", bRequest = " << static_cast<int>(event.u.setup.bRequest)
378 << ", wValue = " << static_cast<int>(event.u.setup.wValue)
379 << ", wIndex = " << static_cast<int>(event.u.setup.wIndex)
380 << ", wLength = " << static_cast<int>(event.u.setup.wLength);
381
382 if ((event.u.setup.bRequestType & USB_DIR_IN)) {
383 LOG(INFO) << "acking device-to-host control transfer";
384 ssize_t rc = adb_write(control_fd_.get(), "", 0);
385 if (rc != 0) {
386 PLOG(ERROR) << "failed to write empty packet to host";
387 break;
388 }
389 } else {
390 std::string buf;
391 buf.resize(event.u.setup.wLength + 1);
392
393 ssize_t rc = adb_read(control_fd_.get(), buf.data(), buf.size());
394 if (rc != event.u.setup.wLength) {
395 LOG(ERROR)
396 << "read " << rc
397 << " bytes when trying to read control request, expected "
398 << event.u.setup.wLength;
399 }
400
401 LOG(INFO) << "control request contents: " << buf;
402 break;
403 }
Josh Gao7be217e2019-05-15 18:03:29 -0700404 }
Josh Gao3a34bc52018-10-11 16:33:05 -0700405 }
406 }
407
Josh Gao5f572772019-02-28 13:29:32 -0800408 StopWorker();
Josh Gaodcf36882019-03-26 18:47:45 -0700409 HandleError("monitor thread finished");
Josh Gao3a34bc52018-10-11 16:33:05 -0700410 });
411 }
412
413 void StartWorker() {
Josh Gaodcf36882019-03-26 18:47:45 -0700414 CHECK(!worker_started_);
415 worker_started_ = true;
Josh Gao3a34bc52018-10-11 16:33:05 -0700416 worker_thread_ = std::thread([this]() {
417 adb_thread_setname("UsbFfs-worker");
418 for (size_t i = 0; i < kUsbReadQueueDepth; ++i) {
419 read_requests_[i] = CreateReadBlock(next_read_id_++);
Josh Gaob8afeea2019-02-13 15:27:28 -0800420 if (!SubmitRead(&read_requests_[i])) {
421 return;
422 }
Josh Gao3a34bc52018-10-11 16:33:05 -0700423 }
424
425 while (!stopped_) {
426 uint64_t dummy;
Josh Gaob8afeea2019-02-13 15:27:28 -0800427 ssize_t rc = adb_read(worker_event_fd_.get(), &dummy, sizeof(dummy));
Josh Gao3a34bc52018-10-11 16:33:05 -0700428 if (rc == -1) {
429 PLOG(FATAL) << "failed to read from eventfd";
430 } else if (rc == 0) {
431 LOG(FATAL) << "hit EOF on eventfd";
432 }
433
Josh Gao961496c2019-03-26 13:21:42 -0700434 ReadEvents();
Josh Gao3a34bc52018-10-11 16:33:05 -0700435 }
436 });
437 }
438
Josh Gao5f572772019-02-28 13:29:32 -0800439 void StopWorker() {
Josh Gaodcf36882019-03-26 18:47:45 -0700440 if (!worker_started_) {
441 return;
442 }
443
Josh Gao5f572772019-02-28 13:29:32 -0800444 pthread_t worker_thread_handle = worker_thread_.native_handle();
445 while (true) {
446 int rc = pthread_kill(worker_thread_handle, kInterruptionSignal);
447 if (rc != 0) {
448 LOG(ERROR) << "failed to send interruption signal to worker: " << strerror(rc);
449 break;
450 }
451
452 std::this_thread::sleep_for(100ms);
453
454 rc = pthread_kill(worker_thread_handle, 0);
455 if (rc == 0) {
456 continue;
457 } else if (rc == ESRCH) {
458 break;
459 } else {
460 LOG(ERROR) << "failed to send interruption signal to worker: " << strerror(rc);
461 }
462 }
463
464 worker_thread_.join();
465 }
466
Josh Gao3a34bc52018-10-11 16:33:05 -0700467 void PrepareReadBlock(IoBlock* block, uint64_t id) {
468 block->pending = false;
Josh Gaob9727b02019-02-26 17:53:52 -0800469 block->payload = std::make_shared<Block>(kUsbReadSize);
Josh Gao3a34bc52018-10-11 16:33:05 -0700470 block->control.aio_data = static_cast<uint64_t>(TransferId::read(id));
Josh Gaob9727b02019-02-26 17:53:52 -0800471 block->control.aio_buf = reinterpret_cast<uintptr_t>(block->payload->data());
472 block->control.aio_nbytes = block->payload->size();
Josh Gao3a34bc52018-10-11 16:33:05 -0700473 }
474
475 IoBlock CreateReadBlock(uint64_t id) {
476 IoBlock block;
477 PrepareReadBlock(&block, id);
478 block.control.aio_rw_flags = 0;
479 block.control.aio_lio_opcode = IOCB_CMD_PREAD;
480 block.control.aio_reqprio = 0;
481 block.control.aio_fildes = read_fd_.get();
482 block.control.aio_offset = 0;
483 block.control.aio_flags = IOCB_FLAG_RESFD;
Josh Gaob8afeea2019-02-13 15:27:28 -0800484 block.control.aio_resfd = worker_event_fd_.get();
Josh Gao3a34bc52018-10-11 16:33:05 -0700485 return block;
486 }
487
Josh Gao961496c2019-03-26 13:21:42 -0700488 void ReadEvents() {
Josh Gao3a34bc52018-10-11 16:33:05 -0700489 static constexpr size_t kMaxEvents = kUsbReadQueueDepth + kUsbWriteQueueDepth;
490 struct io_event events[kMaxEvents];
491 struct timespec timeout = {.tv_sec = 0, .tv_nsec = 0};
492 int rc = io_getevents(aio_context_.get(), 0, kMaxEvents, events, &timeout);
493 if (rc == -1) {
494 HandleError(StringPrintf("io_getevents failed while reading: %s", strerror(errno)));
495 return;
496 }
497
498 for (int event_idx = 0; event_idx < rc; ++event_idx) {
499 auto& event = events[event_idx];
500 TransferId id = TransferId::from_value(event.data);
501
502 if (event.res < 0) {
503 std::string error =
504 StringPrintf("%s %" PRIu64 " failed with error %s",
505 id.direction == TransferDirection::READ ? "read" : "write",
506 id.id, strerror(-event.res));
507 HandleError(error);
508 return;
509 }
510
511 if (id.direction == TransferDirection::READ) {
Josh Gaof855d9e2019-06-13 13:11:18 -0700512 if (!HandleRead(id, event.res)) {
513 return;
514 }
Josh Gao3a34bc52018-10-11 16:33:05 -0700515 } else {
516 HandleWrite(id);
517 }
518 }
519 }
520
Josh Gaof855d9e2019-06-13 13:11:18 -0700521 bool HandleRead(TransferId id, int64_t size) {
Josh Gao3a34bc52018-10-11 16:33:05 -0700522 uint64_t read_idx = id.id % kUsbReadQueueDepth;
523 IoBlock* block = &read_requests_[read_idx];
524 block->pending = false;
Josh Gaob9727b02019-02-26 17:53:52 -0800525 block->payload->resize(size);
Josh Gao3a34bc52018-10-11 16:33:05 -0700526
527 // Notification for completed reads can be received out of order.
528 if (block->id().id != needed_read_id_) {
529 LOG(VERBOSE) << "read " << block->id().id << " completed while waiting for "
530 << needed_read_id_;
Josh Gaof855d9e2019-06-13 13:11:18 -0700531 return true;
Josh Gao3a34bc52018-10-11 16:33:05 -0700532 }
533
534 for (uint64_t id = needed_read_id_;; ++id) {
535 size_t read_idx = id % kUsbReadQueueDepth;
536 IoBlock* current_block = &read_requests_[read_idx];
537 if (current_block->pending) {
538 break;
539 }
Josh Gaof855d9e2019-06-13 13:11:18 -0700540 if (!ProcessRead(current_block)) {
541 return false;
542 }
Josh Gao3a34bc52018-10-11 16:33:05 -0700543 ++needed_read_id_;
544 }
Josh Gaof855d9e2019-06-13 13:11:18 -0700545
546 return true;
Josh Gao3a34bc52018-10-11 16:33:05 -0700547 }
548
Josh Gaof855d9e2019-06-13 13:11:18 -0700549 bool ProcessRead(IoBlock* block) {
Josh Gaob9727b02019-02-26 17:53:52 -0800550 if (!block->payload->empty()) {
Josh Gao3a34bc52018-10-11 16:33:05 -0700551 if (!incoming_header_.has_value()) {
Josh Gaof855d9e2019-06-13 13:11:18 -0700552 if (block->payload->size() != sizeof(amessage)) {
553 HandleError("received packet of unexpected length while reading header");
554 return false;
555 }
Josh Gao3a34bc52018-10-11 16:33:05 -0700556 amessage msg;
Josh Gaob9727b02019-02-26 17:53:52 -0800557 memcpy(&msg, block->payload->data(), sizeof(amessage));
Josh Gao3a34bc52018-10-11 16:33:05 -0700558 LOG(DEBUG) << "USB read:" << dump_header(&msg);
559 incoming_header_ = msg;
560 } else {
561 size_t bytes_left = incoming_header_->data_length - incoming_payload_.size();
Josh Gaob9727b02019-02-26 17:53:52 -0800562 Block payload = std::move(*block->payload);
Josh Gaof855d9e2019-06-13 13:11:18 -0700563 if (block->payload->size() > bytes_left) {
564 HandleError("received too many bytes while waiting for payload");
565 return false;
566 }
Josh Gao3a34bc52018-10-11 16:33:05 -0700567 incoming_payload_.append(std::make_unique<Block>(std::move(payload)));
568 }
569
570 if (incoming_header_->data_length == incoming_payload_.size()) {
571 auto packet = std::make_unique<apacket>();
572 packet->msg = *incoming_header_;
573
574 // TODO: Make apacket contain an IOVector so we don't have to coalesce.
575 packet->payload = incoming_payload_.coalesce();
576 read_callback_(this, std::move(packet));
577
578 incoming_header_.reset();
579 incoming_payload_.clear();
580 }
581 }
582
583 PrepareReadBlock(block, block->id().id + kUsbReadQueueDepth);
584 SubmitRead(block);
Josh Gaof855d9e2019-06-13 13:11:18 -0700585 return true;
Josh Gao3a34bc52018-10-11 16:33:05 -0700586 }
587
Josh Gaob8afeea2019-02-13 15:27:28 -0800588 bool SubmitRead(IoBlock* block) {
Josh Gao3a34bc52018-10-11 16:33:05 -0700589 block->pending = true;
590 struct iocb* iocb = &block->control;
591 if (io_submit(aio_context_.get(), 1, &iocb) != 1) {
Josh Gaob8afeea2019-02-13 15:27:28 -0800592 if (errno == EINVAL && !gFfsAioSupported.has_value()) {
593 HandleError("failed to submit first read, AIO on FFS not supported");
594 gFfsAioSupported = false;
595 return false;
596 }
597
Josh Gao3a34bc52018-10-11 16:33:05 -0700598 HandleError(StringPrintf("failed to submit read: %s", strerror(errno)));
Josh Gaob8afeea2019-02-13 15:27:28 -0800599 return false;
Josh Gao3a34bc52018-10-11 16:33:05 -0700600 }
Josh Gaob8afeea2019-02-13 15:27:28 -0800601
602 gFfsAioSupported = true;
603 return true;
Josh Gao3a34bc52018-10-11 16:33:05 -0700604 }
605
606 void HandleWrite(TransferId id) {
607 std::lock_guard<std::mutex> lock(write_mutex_);
608 auto it =
609 std::find_if(write_requests_.begin(), write_requests_.end(), [id](const auto& req) {
610 return static_cast<uint64_t>(req->id()) == static_cast<uint64_t>(id);
611 });
612 CHECK(it != write_requests_.end());
613
614 write_requests_.erase(it);
615 size_t outstanding_writes = --writes_submitted_;
616 LOG(DEBUG) << "USB write: reaped, down to " << outstanding_writes;
617
618 SubmitWrites();
619 }
620
Josh Gaob9727b02019-02-26 17:53:52 -0800621 std::unique_ptr<IoBlock> CreateWriteBlock(std::shared_ptr<Block> payload, size_t offset,
622 size_t len, uint64_t id) {
Josh Gao3a34bc52018-10-11 16:33:05 -0700623 auto block = std::make_unique<IoBlock>();
624 block->payload = std::move(payload);
625 block->control.aio_data = static_cast<uint64_t>(TransferId::write(id));
626 block->control.aio_rw_flags = 0;
627 block->control.aio_lio_opcode = IOCB_CMD_PWRITE;
628 block->control.aio_reqprio = 0;
629 block->control.aio_fildes = write_fd_.get();
Josh Gaob9727b02019-02-26 17:53:52 -0800630 block->control.aio_buf = reinterpret_cast<uintptr_t>(block->payload->data() + offset);
631 block->control.aio_nbytes = len;
Josh Gao3a34bc52018-10-11 16:33:05 -0700632 block->control.aio_offset = 0;
633 block->control.aio_flags = IOCB_FLAG_RESFD;
Josh Gaob8afeea2019-02-13 15:27:28 -0800634 block->control.aio_resfd = worker_event_fd_.get();
Josh Gao3a34bc52018-10-11 16:33:05 -0700635 return block;
636 }
637
Josh Gaob9727b02019-02-26 17:53:52 -0800638 std::unique_ptr<IoBlock> CreateWriteBlock(Block payload, uint64_t id) {
639 std::shared_ptr<Block> block = std::make_shared<Block>(std::move(payload));
640 size_t len = block->size();
641 return CreateWriteBlock(std::move(block), 0, len, id);
642 }
643
Josh Gao3a34bc52018-10-11 16:33:05 -0700644 void SubmitWrites() REQUIRES(write_mutex_) {
645 if (writes_submitted_ == kUsbWriteQueueDepth) {
646 return;
647 }
648
649 ssize_t writes_to_submit = std::min(kUsbWriteQueueDepth - writes_submitted_,
650 write_requests_.size() - writes_submitted_);
651 CHECK_GE(writes_to_submit, 0);
652 if (writes_to_submit == 0) {
653 return;
654 }
655
656 struct iocb* iocbs[kUsbWriteQueueDepth];
657 for (int i = 0; i < writes_to_submit; ++i) {
658 CHECK(!write_requests_[writes_submitted_ + i]->pending);
659 write_requests_[writes_submitted_ + i]->pending = true;
660 iocbs[i] = &write_requests_[writes_submitted_ + i]->control;
661 LOG(VERBOSE) << "submitting write_request " << static_cast<void*>(iocbs[i]);
662 }
663
Josh Gaocc14d392019-03-26 13:06:38 -0700664 writes_submitted_ += writes_to_submit;
665
Josh Gao3a34bc52018-10-11 16:33:05 -0700666 int rc = io_submit(aio_context_.get(), writes_to_submit, iocbs);
667 if (rc == -1) {
668 HandleError(StringPrintf("failed to submit write requests: %s", strerror(errno)));
669 return;
670 } else if (rc != writes_to_submit) {
671 LOG(FATAL) << "failed to submit all writes: wanted to submit " << writes_to_submit
672 << ", actually submitted " << rc;
673 }
Josh Gao3a34bc52018-10-11 16:33:05 -0700674 }
675
676 void HandleError(const std::string& error) {
677 std::call_once(error_flag_, [&]() {
678 error_callback_(this, error);
679 if (!stopped_) {
680 Stop();
681 }
682 });
683 }
684
685 std::thread monitor_thread_;
Josh Gaodcf36882019-03-26 18:47:45 -0700686
687 bool worker_started_;
Josh Gao3a34bc52018-10-11 16:33:05 -0700688 std::thread worker_thread_;
689
690 std::atomic<bool> stopped_;
691 std::promise<void> destruction_notifier_;
692 std::once_flag error_flag_;
693
Josh Gaob8afeea2019-02-13 15:27:28 -0800694 unique_fd worker_event_fd_;
Dan Albert4c5a66b2019-06-24 14:35:35 -0700695 unique_fd monitor_event_fd_;
Josh Gao3a34bc52018-10-11 16:33:05 -0700696
697 ScopedAioContext aio_context_;
Dan Albert4c5a66b2019-06-24 14:35:35 -0700698 unique_fd control_fd_;
Josh Gao3a34bc52018-10-11 16:33:05 -0700699 unique_fd read_fd_;
700 unique_fd write_fd_;
701
702 std::optional<amessage> incoming_header_;
703 IOVector incoming_payload_;
704
705 std::array<IoBlock, kUsbReadQueueDepth> read_requests_;
706 IOVector read_data_;
707
708 // ID of the next request that we're going to send out.
709 size_t next_read_id_ = 0;
710
711 // ID of the next packet we're waiting for.
712 size_t needed_read_id_ = 0;
713
714 std::mutex write_mutex_;
715 std::deque<std::unique_ptr<IoBlock>> write_requests_ GUARDED_BY(write_mutex_);
716 size_t next_write_id_ GUARDED_BY(write_mutex_) = 0;
717 size_t writes_submitted_ GUARDED_BY(write_mutex_) = 0;
Josh Gao5f572772019-02-28 13:29:32 -0800718
719 static constexpr int kInterruptionSignal = SIGUSR1;
Josh Gao3a34bc52018-10-11 16:33:05 -0700720};
721
Josh Gaob8afeea2019-02-13 15:27:28 -0800722void usb_init_legacy();
723
Josh Gao3a34bc52018-10-11 16:33:05 -0700724static void usb_ffs_open_thread() {
725 adb_thread_setname("usb ffs open");
726
727 while (true) {
Josh Gaob8afeea2019-02-13 15:27:28 -0800728 if (gFfsAioSupported.has_value() && !gFfsAioSupported.value()) {
729 LOG(INFO) << "failed to use nonblocking ffs, falling back to legacy";
730 return usb_init_legacy();
731 }
732
Dan Albert4c5a66b2019-06-24 14:35:35 -0700733 unique_fd control;
734 unique_fd bulk_out;
735 unique_fd bulk_in;
Josh Gao3a34bc52018-10-11 16:33:05 -0700736 if (!open_functionfs(&control, &bulk_out, &bulk_in)) {
737 std::this_thread::sleep_for(1s);
738 continue;
739 }
740
741 atransport* transport = new atransport();
742 transport->serial = "UsbFfs";
743 std::promise<void> destruction_notifier;
744 std::future<void> future = destruction_notifier.get_future();
745 transport->SetConnection(std::make_unique<UsbFfsConnection>(
Dan Albert4c5a66b2019-06-24 14:35:35 -0700746 std::move(control), std::move(bulk_out), std::move(bulk_in),
Josh Gao3a34bc52018-10-11 16:33:05 -0700747 std::move(destruction_notifier)));
748 register_transport(transport);
749 future.wait();
750 }
751}
752
Josh Gao3a34bc52018-10-11 16:33:05 -0700753void usb_init() {
Josh Gao99920cb2019-03-18 16:33:18 -0700754 bool use_nonblocking = android::base::GetBoolProperty(
755 "persist.adb.nonblocking_ffs",
756 android::base::GetBoolProperty("ro.adb.nonblocking_ffs", true));
757
Josh Gaobfd7afc2019-02-28 07:26:20 +0000758 if (use_nonblocking) {
Josh Gaoa8d018c2019-02-26 22:10:33 +0000759 std::thread(usb_ffs_open_thread).detach();
Josh Gaobfd7afc2019-02-28 07:26:20 +0000760 } else {
761 usb_init_legacy();
Josh Gao3a34bc52018-10-11 16:33:05 -0700762 }
763}