Yurii Zubrytskyi | f910124 | 2016-07-29 10:51:46 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 Intel, Inc. |
| 3 | * Copyright (C) 2013 Intel, Inc. |
| 4 | * Copyright (C) 2014 Linaro Limited |
| 5 | * Copyright (C) 2011-2016 Google, Inc. |
| 6 | * |
| 7 | * This software is licensed under the terms of the GNU General Public |
| 8 | * License version 2, as published by the Free Software Foundation, and |
| 9 | * may be copied, distributed, and modified under those terms. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | */ |
| 17 | |
| 18 | /* This source file contains the implementation of a special device driver |
| 19 | * that intends to provide a *very* fast communication channel between the |
| 20 | * guest system and the QEMU emulator. |
| 21 | * |
| 22 | * Usage from the guest is simply the following (error handling simplified): |
| 23 | * |
| 24 | * int fd = open("/dev/qemu_pipe",O_RDWR); |
| 25 | * .... write() or read() through the pipe. |
| 26 | * |
| 27 | * This driver doesn't deal with the exact protocol used during the session. |
| 28 | * It is intended to be as simple as something like: |
| 29 | * |
| 30 | * // do this _just_ after opening the fd to connect to a specific |
| 31 | * // emulator service. |
| 32 | * const char* msg = "<pipename>"; |
| 33 | * if (write(fd, msg, strlen(msg)+1) < 0) { |
| 34 | * ... could not connect to <pipename> service |
| 35 | * close(fd); |
| 36 | * } |
| 37 | * |
| 38 | * // after this, simply read() and write() to communicate with the |
| 39 | * // service. Exact protocol details left as an exercise to the reader. |
| 40 | * |
| 41 | * This driver is very fast because it doesn't copy any data through |
| 42 | * intermediate buffers, since the emulator is capable of translating |
| 43 | * guest user addresses into host ones. |
| 44 | * |
| 45 | * Note that we must however ensure that each user page involved in the |
| 46 | * exchange is properly mapped during a transfer. |
| 47 | */ |
| 48 | |
| 49 | #include "goldfish_pipe.h" |
| 50 | |
| 51 | |
| 52 | /* |
| 53 | * Update this when something changes in the driver's behavior so the host |
| 54 | * can benefit from knowing it |
| 55 | */ |
| 56 | enum { |
| 57 | PIPE_DRIVER_VERSION = 2, |
| 58 | PIPE_CURRENT_DEVICE_VERSION = 2 |
| 59 | }; |
| 60 | |
| 61 | /* |
| 62 | * IMPORTANT: The following constants must match the ones used and defined |
| 63 | * in external/qemu/hw/goldfish_pipe.c in the Android source tree. |
| 64 | */ |
| 65 | |
| 66 | /* List of bitflags returned in status of CMD_POLL command */ |
| 67 | enum PipePollFlags { |
| 68 | PIPE_POLL_IN = 1 << 0, |
| 69 | PIPE_POLL_OUT = 1 << 1, |
| 70 | PIPE_POLL_HUP = 1 << 2 |
| 71 | }; |
| 72 | |
| 73 | /* Possible status values used to signal errors - see goldfish_pipe_error_convert */ |
| 74 | enum PipeErrors { |
| 75 | PIPE_ERROR_INVAL = -1, |
| 76 | PIPE_ERROR_AGAIN = -2, |
| 77 | PIPE_ERROR_NOMEM = -3, |
| 78 | PIPE_ERROR_IO = -4 |
| 79 | }; |
| 80 | |
| 81 | /* Bit-flags used to signal events from the emulator */ |
| 82 | enum PipeWakeFlags { |
| 83 | PIPE_WAKE_CLOSED = 1 << 0, /* emulator closed pipe */ |
| 84 | PIPE_WAKE_READ = 1 << 1, /* pipe can now be read from */ |
| 85 | PIPE_WAKE_WRITE = 1 << 2 /* pipe can now be written to */ |
| 86 | }; |
| 87 | |
| 88 | /* Bit flags for the 'flags' field */ |
| 89 | enum PipeFlagsBits { |
| 90 | BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */ |
| 91 | BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */ |
| 92 | BIT_WAKE_ON_READ = 2, /* want to be woken on reads */ |
| 93 | }; |
| 94 | |
| 95 | enum PipeRegs { |
| 96 | PIPE_REG_CMD = 0, |
| 97 | |
| 98 | PIPE_REG_SIGNAL_BUFFER_HIGH = 4, |
| 99 | PIPE_REG_SIGNAL_BUFFER = 8, |
| 100 | PIPE_REG_SIGNAL_BUFFER_COUNT = 12, |
| 101 | |
| 102 | PIPE_REG_OPEN_BUFFER_HIGH = 20, |
| 103 | PIPE_REG_OPEN_BUFFER = 24, |
| 104 | |
| 105 | PIPE_REG_VERSION = 36, |
| 106 | |
| 107 | PIPE_REG_GET_SIGNALLED = 48, |
| 108 | }; |
| 109 | |
| 110 | enum PipeCmdCode { |
| 111 | PIPE_CMD_OPEN = 1, /* to be used by the pipe device itself */ |
| 112 | PIPE_CMD_CLOSE, |
| 113 | PIPE_CMD_POLL, |
| 114 | PIPE_CMD_WRITE, |
| 115 | PIPE_CMD_WAKE_ON_WRITE, |
| 116 | PIPE_CMD_READ, |
| 117 | PIPE_CMD_WAKE_ON_READ, |
| 118 | |
| 119 | /* |
| 120 | * TODO(zyy): implement a deferred read/write execution to allow parallel |
| 121 | * processing of pipe operations on the host. |
| 122 | */ |
| 123 | PIPE_CMD_WAKE_ON_DONE_IO, |
| 124 | }; |
| 125 | |
| 126 | enum { |
| 127 | MAX_BUFFERS_PER_COMMAND = 336, |
| 128 | MAX_SIGNALLED_PIPES = 64, |
| 129 | INITIAL_PIPES_CAPACITY = 64 |
| 130 | }; |
| 131 | |
| 132 | struct goldfish_pipe_dev; |
| 133 | struct goldfish_pipe; |
| 134 | struct goldfish_pipe_command; |
| 135 | |
| 136 | /* A per-pipe command structure, shared with the host */ |
| 137 | struct goldfish_pipe_command { |
| 138 | s32 cmd; /* PipeCmdCode, guest -> host */ |
| 139 | s32 id; /* pipe id, guest -> host */ |
| 140 | s32 status; /* command execution status, host -> guest */ |
| 141 | s32 reserved; /* to pad to 64-bit boundary */ |
| 142 | union { |
| 143 | /* Parameters for PIPE_CMD_{READ,WRITE} */ |
| 144 | struct { |
| 145 | u32 buffers_count; /* number of buffers, guest -> host */ |
| 146 | s32 consumed_size; /* number of consumed bytes, host -> guest */ |
| 147 | u64 ptrs[MAX_BUFFERS_PER_COMMAND]; /* buffer pointers, guest -> host */ |
| 148 | u32 sizes[MAX_BUFFERS_PER_COMMAND]; /* buffer sizes, guest -> host */ |
| 149 | } rw_params; |
| 150 | }; |
| 151 | }; |
| 152 | |
| 153 | /* A single signalled pipe information */ |
| 154 | struct signalled_pipe_buffer { |
| 155 | u32 id; |
| 156 | u32 flags; |
| 157 | }; |
| 158 | |
| 159 | /* Parameters for the PIPE_CMD_OPEN command */ |
| 160 | struct open_command_param { |
| 161 | u64 command_buffer_ptr; |
| 162 | u32 rw_params_max_count; |
| 163 | }; |
| 164 | |
| 165 | /* Device-level set of buffers shared with the host */ |
| 166 | struct goldfish_pipe_dev_buffers { |
| 167 | struct open_command_param open_command_params; |
| 168 | struct signalled_pipe_buffer signalled_pipe_buffers[MAX_SIGNALLED_PIPES]; |
| 169 | }; |
| 170 | |
| 171 | /* This data type models a given pipe instance */ |
| 172 | struct goldfish_pipe { |
| 173 | u32 id; /* pipe ID - index into goldfish_pipe_dev::pipes array */ |
| 174 | unsigned long flags; /* The wake flags pipe is waiting for |
| 175 | * Note: not protected with any lock, uses atomic operations |
| 176 | * and barriers to make it thread-safe. |
| 177 | */ |
| 178 | unsigned long signalled_flags; /* wake flags host have signalled, |
| 179 | * - protected by goldfish_pipe_dev::lock */ |
| 180 | |
| 181 | struct goldfish_pipe_command *command_buffer; /* A pointer to command buffer */ |
| 182 | |
| 183 | /* doubly linked list of signalled pipes, protected by goldfish_pipe_dev::lock */ |
| 184 | struct goldfish_pipe *prev_signalled; |
| 185 | struct goldfish_pipe *next_signalled; |
| 186 | |
| 187 | /* |
| 188 | * A pipe's own lock. Protects the following: |
| 189 | * - *command_buffer - makes sure a command can safely write its parameters |
| 190 | * to the host and read the results back. |
| 191 | */ |
| 192 | struct mutex lock; |
| 193 | |
| 194 | wait_queue_head_t wake_queue; /* A wake queue for sleeping until host signals an event */ |
| 195 | struct goldfish_pipe_dev *dev; /* Pointer to the parent goldfish_pipe_dev instance */ |
| 196 | }; |
| 197 | |
| 198 | struct goldfish_pipe_dev pipe_dev[1] = {}; |
| 199 | |
| 200 | static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
| 201 | { |
| 202 | pipe->command_buffer->cmd = cmd; |
| 203 | pipe->command_buffer->status = PIPE_ERROR_INVAL; /* failure by default */ |
| 204 | writel(pipe->id, pipe->dev->base + PIPE_REG_CMD); |
| 205 | return pipe->command_buffer->status; |
| 206 | } |
| 207 | |
| 208 | static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
| 209 | { |
| 210 | int status; |
| 211 | if (mutex_lock_interruptible(&pipe->lock)) |
| 212 | return PIPE_ERROR_IO; |
| 213 | status = goldfish_cmd_locked(pipe, cmd); |
| 214 | mutex_unlock(&pipe->lock); |
| 215 | return status; |
| 216 | } |
| 217 | |
| 218 | /* |
| 219 | * This function converts an error code returned by the emulator through |
| 220 | * the PIPE_REG_STATUS i/o register into a valid negative errno value. |
| 221 | */ |
| 222 | static int goldfish_pipe_error_convert(int status) |
| 223 | { |
| 224 | switch (status) { |
| 225 | case PIPE_ERROR_AGAIN: |
| 226 | return -EAGAIN; |
| 227 | case PIPE_ERROR_NOMEM: |
| 228 | return -ENOMEM; |
| 229 | case PIPE_ERROR_IO: |
| 230 | return -EIO; |
| 231 | default: |
| 232 | return -EINVAL; |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | static int pin_user_pages(unsigned long first_page, unsigned long last_page, |
| 237 | unsigned last_page_size, int is_write, |
| 238 | struct page *pages[MAX_BUFFERS_PER_COMMAND], unsigned *iter_last_page_size) |
| 239 | { |
| 240 | int ret; |
| 241 | int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; |
| 242 | if (requested_pages > MAX_BUFFERS_PER_COMMAND) { |
| 243 | requested_pages = MAX_BUFFERS_PER_COMMAND; |
| 244 | *iter_last_page_size = PAGE_SIZE; |
| 245 | } else { |
| 246 | *iter_last_page_size = last_page_size; |
| 247 | } |
| 248 | |
| 249 | ret = get_user_pages_fast( |
| 250 | first_page, requested_pages, !is_write, pages); |
| 251 | if (ret <= 0) |
| 252 | return -EFAULT; |
| 253 | if (ret < requested_pages) |
| 254 | *iter_last_page_size = PAGE_SIZE; |
| 255 | return ret; |
| 256 | |
| 257 | } |
| 258 | |
| 259 | static void release_user_pages(struct page **pages, int pages_count, |
| 260 | int is_write, s32 consumed_size) |
| 261 | { |
| 262 | int i; |
| 263 | for (i = 0; i < pages_count; i++) { |
| 264 | if (!is_write && consumed_size > 0) { |
| 265 | set_page_dirty(pages[i]); |
| 266 | } |
| 267 | put_page(pages[i]); |
| 268 | } |
| 269 | } |
| 270 | |
| 271 | /* Populate the call parameters, merging adjacent pages together */ |
| 272 | static void populate_rw_params( |
| 273 | struct page **pages, int pages_count, |
| 274 | unsigned long address, unsigned long address_end, |
| 275 | unsigned long first_page, unsigned long last_page, |
| 276 | unsigned iter_last_page_size, int is_write, |
| 277 | struct goldfish_pipe_command *command) |
| 278 | { |
| 279 | /* |
| 280 | * Process the first page separately - it's the only page that |
| 281 | * needs special handling for its start address. |
| 282 | */ |
| 283 | unsigned long xaddr = page_to_phys(pages[0]); |
| 284 | unsigned long xaddr_prev = xaddr; |
| 285 | int buffer_idx = 0; |
| 286 | int i = 1; |
| 287 | int size_on_page = first_page == last_page |
| 288 | ? (int)(address_end - address) |
| 289 | : (PAGE_SIZE - (address & ~PAGE_MASK)); |
| 290 | command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK)); |
| 291 | command->rw_params.sizes[0] = size_on_page; |
| 292 | for (; i < pages_count; ++i) { |
| 293 | xaddr = page_to_phys(pages[i]); |
| 294 | size_on_page = (i == pages_count - 1) ? iter_last_page_size : PAGE_SIZE; |
| 295 | if (xaddr == xaddr_prev + PAGE_SIZE) { |
| 296 | command->rw_params.sizes[buffer_idx] += size_on_page; |
| 297 | } else { |
| 298 | ++buffer_idx; |
| 299 | command->rw_params.ptrs[buffer_idx] = (u64)xaddr; |
| 300 | command->rw_params.sizes[buffer_idx] = size_on_page; |
| 301 | } |
| 302 | xaddr_prev = xaddr; |
| 303 | } |
| 304 | command->rw_params.buffers_count = buffer_idx + 1; |
| 305 | } |
| 306 | |
| 307 | static int transfer_max_buffers(struct goldfish_pipe* pipe, |
| 308 | unsigned long address, unsigned long address_end, int is_write, |
| 309 | unsigned long last_page, unsigned int last_page_size, |
| 310 | s32* consumed_size, int* status) |
| 311 | { |
| 312 | struct page *pages[MAX_BUFFERS_PER_COMMAND]; |
| 313 | unsigned long first_page = address & PAGE_MASK; |
| 314 | unsigned int iter_last_page_size; |
| 315 | int pages_count = pin_user_pages(first_page, last_page, |
| 316 | last_page_size, is_write, |
| 317 | pages, &iter_last_page_size); |
| 318 | if (pages_count < 0) |
| 319 | return pages_count; |
| 320 | |
| 321 | /* Serialize access to the pipe command buffers */ |
| 322 | if (mutex_lock_interruptible(&pipe->lock)) |
| 323 | return -ERESTARTSYS; |
| 324 | |
| 325 | populate_rw_params(pages, pages_count, address, address_end, |
| 326 | first_page, last_page, iter_last_page_size, is_write, |
| 327 | pipe->command_buffer); |
| 328 | |
| 329 | /* Transfer the data */ |
| 330 | *status = goldfish_cmd_locked(pipe, |
| 331 | is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); |
| 332 | |
| 333 | *consumed_size = pipe->command_buffer->rw_params.consumed_size; |
| 334 | |
| 335 | mutex_unlock(&pipe->lock); |
| 336 | |
| 337 | release_user_pages(pages, pages_count, is_write, *consumed_size); |
| 338 | |
| 339 | return 0; |
| 340 | } |
| 341 | |
| 342 | static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write) |
| 343 | { |
| 344 | u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; |
| 345 | set_bit(wakeBit, &pipe->flags); |
| 346 | |
| 347 | /* Tell the emulator we're going to wait for a wake event */ |
| 348 | (void)goldfish_cmd(pipe, |
| 349 | is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ); |
| 350 | |
| 351 | while (test_bit(wakeBit, &pipe->flags)) { |
| 352 | if (wait_event_interruptible( |
| 353 | pipe->wake_queue, |
| 354 | !test_bit(wakeBit, &pipe->flags))) |
| 355 | return -ERESTARTSYS; |
| 356 | |
| 357 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) |
| 358 | return -EIO; |
| 359 | } |
| 360 | |
| 361 | return 0; |
| 362 | } |
| 363 | |
| 364 | static ssize_t goldfish_pipe_read_write(struct file *filp, |
| 365 | char __user *buffer, size_t bufflen, int is_write) |
| 366 | { |
| 367 | struct goldfish_pipe *pipe = filp->private_data; |
| 368 | int count = 0, ret = -EINVAL; |
| 369 | unsigned long address, address_end, last_page; |
| 370 | unsigned int last_page_size; |
| 371 | |
| 372 | /* If the emulator already closed the pipe, no need to go further */ |
| 373 | if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))) |
| 374 | return -EIO; |
| 375 | /* Null reads or writes succeeds */ |
| 376 | if (unlikely(bufflen == 0)) |
| 377 | return 0; |
| 378 | /* Check the buffer range for access */ |
| 379 | if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ, |
| 380 | buffer, bufflen))) |
| 381 | return -EFAULT; |
| 382 | |
| 383 | address = (unsigned long)buffer; |
| 384 | address_end = address + bufflen; |
| 385 | last_page = (address_end - 1) & PAGE_MASK; |
| 386 | last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1; |
| 387 | |
| 388 | while (address < address_end) { |
| 389 | s32 consumed_size; |
| 390 | int status; |
| 391 | ret = transfer_max_buffers(pipe, address, address_end, is_write, |
| 392 | last_page, last_page_size, &consumed_size, &status); |
| 393 | if (ret < 0) |
| 394 | break; |
| 395 | |
| 396 | if (consumed_size > 0) { |
| 397 | /* No matter what's the status, we've transfered something */ |
| 398 | count += consumed_size; |
| 399 | address += consumed_size; |
| 400 | } |
| 401 | if (status > 0) |
| 402 | continue; |
| 403 | if (status == 0) { |
| 404 | /* EOF */ |
| 405 | ret = 0; |
| 406 | break; |
| 407 | } |
| 408 | if (count > 0) { |
| 409 | /* |
| 410 | * An error occured, but we already transfered |
| 411 | * something on one of the previous iterations. |
| 412 | * Just return what we already copied and log this |
| 413 | * err. |
| 414 | */ |
| 415 | if (status != PIPE_ERROR_AGAIN) |
| 416 | pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n", |
| 417 | status, is_write ? "write" : "read"); |
| 418 | break; |
| 419 | } |
| 420 | |
| 421 | /* |
| 422 | * If the error is not PIPE_ERROR_AGAIN, or if we are in |
| 423 | * non-blocking mode, just return the error code. |
| 424 | */ |
| 425 | if (status != PIPE_ERROR_AGAIN || (filp->f_flags & O_NONBLOCK) != 0) { |
| 426 | ret = goldfish_pipe_error_convert(status); |
| 427 | break; |
| 428 | } |
| 429 | |
| 430 | status = wait_for_host_signal(pipe, is_write); |
| 431 | if (status < 0) |
| 432 | return status; |
| 433 | } |
| 434 | |
| 435 | if (count > 0) |
| 436 | return count; |
| 437 | return ret; |
| 438 | } |
| 439 | |
| 440 | static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, |
| 441 | size_t bufflen, loff_t *ppos) |
| 442 | { |
| 443 | return goldfish_pipe_read_write(filp, buffer, bufflen, /* is_write */ 0); |
| 444 | } |
| 445 | |
| 446 | static ssize_t goldfish_pipe_write(struct file *filp, |
| 447 | const char __user *buffer, size_t bufflen, |
| 448 | loff_t *ppos) |
| 449 | { |
| 450 | return goldfish_pipe_read_write(filp, |
| 451 | /* cast away the const */(char __user *)buffer, bufflen, |
| 452 | /* is_write */ 1); |
| 453 | } |
| 454 | |
| 455 | static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait) |
| 456 | { |
| 457 | struct goldfish_pipe *pipe = filp->private_data; |
| 458 | unsigned int mask = 0; |
| 459 | int status; |
| 460 | |
| 461 | poll_wait(filp, &pipe->wake_queue, wait); |
| 462 | |
| 463 | status = goldfish_cmd(pipe, PIPE_CMD_POLL); |
| 464 | if (status < 0) { |
| 465 | return -ERESTARTSYS; |
| 466 | } |
| 467 | |
| 468 | if (status & PIPE_POLL_IN) |
| 469 | mask |= POLLIN | POLLRDNORM; |
| 470 | if (status & PIPE_POLL_OUT) |
| 471 | mask |= POLLOUT | POLLWRNORM; |
| 472 | if (status & PIPE_POLL_HUP) |
| 473 | mask |= POLLHUP; |
| 474 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) |
| 475 | mask |= POLLERR; |
| 476 | |
| 477 | return mask; |
| 478 | } |
| 479 | |
| 480 | static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev, |
| 481 | u32 id, u32 flags) |
| 482 | { |
| 483 | struct goldfish_pipe *pipe; |
| 484 | |
| 485 | BUG_ON(id >= dev->pipes_capacity); |
| 486 | |
| 487 | pipe = dev->pipes[id]; |
| 488 | if (!pipe) |
| 489 | return; |
| 490 | pipe->signalled_flags |= flags; |
| 491 | |
| 492 | if (pipe->prev_signalled || pipe->next_signalled |
| 493 | || dev->first_signalled_pipe == pipe) |
| 494 | return; /* already in the list */ |
| 495 | pipe->next_signalled = dev->first_signalled_pipe; |
| 496 | if (dev->first_signalled_pipe) { |
| 497 | dev->first_signalled_pipe->prev_signalled = pipe; |
| 498 | } |
| 499 | dev->first_signalled_pipe = pipe; |
| 500 | } |
| 501 | |
| 502 | static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev, |
| 503 | struct goldfish_pipe *pipe) { |
| 504 | if (pipe->prev_signalled) |
| 505 | pipe->prev_signalled->next_signalled = pipe->next_signalled; |
| 506 | if (pipe->next_signalled) |
| 507 | pipe->next_signalled->prev_signalled = pipe->prev_signalled; |
| 508 | if (pipe == dev->first_signalled_pipe) |
| 509 | dev->first_signalled_pipe = pipe->next_signalled; |
| 510 | pipe->prev_signalled = NULL; |
| 511 | pipe->next_signalled = NULL; |
| 512 | } |
| 513 | |
| 514 | static struct goldfish_pipe *signalled_pipes_pop_front(struct goldfish_pipe_dev *dev, |
| 515 | int *wakes) |
| 516 | { |
| 517 | struct goldfish_pipe *pipe; |
| 518 | unsigned long flags; |
| 519 | spin_lock_irqsave(&dev->lock, flags); |
| 520 | |
| 521 | pipe = dev->first_signalled_pipe; |
| 522 | if (pipe) { |
| 523 | *wakes = pipe->signalled_flags; |
| 524 | pipe->signalled_flags = 0; |
| 525 | /* |
| 526 | * This is an optimized version of signalled_pipes_remove_locked() - |
| 527 | * we want to make it as fast as possible to wake the sleeping pipe |
| 528 | * operations faster |
| 529 | */ |
| 530 | dev->first_signalled_pipe = pipe->next_signalled; |
| 531 | if (dev->first_signalled_pipe) |
| 532 | dev->first_signalled_pipe->prev_signalled = NULL; |
| 533 | pipe->next_signalled = NULL; |
| 534 | } |
| 535 | |
| 536 | spin_unlock_irqrestore(&dev->lock, flags); |
| 537 | return pipe; |
| 538 | } |
| 539 | |
| 540 | static void goldfish_interrupt_task(unsigned long unused) |
| 541 | { |
| 542 | struct goldfish_pipe_dev *dev = pipe_dev; |
| 543 | /* Iterate over the signalled pipes and wake them one by one */ |
| 544 | struct goldfish_pipe *pipe; |
| 545 | int wakes; |
| 546 | while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) { |
| 547 | if (wakes & PIPE_WAKE_CLOSED) { |
| 548 | pipe->flags = 1 << BIT_CLOSED_ON_HOST; |
| 549 | } else { |
| 550 | if (wakes & PIPE_WAKE_READ) |
| 551 | clear_bit(BIT_WAKE_ON_READ, &pipe->flags); |
| 552 | if (wakes & PIPE_WAKE_WRITE) |
| 553 | clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags); |
| 554 | } |
| 555 | /* |
| 556 | * wake_up_interruptible() implies a write barrier, so don't explicitly |
| 557 | * add another one here. |
| 558 | */ |
| 559 | wake_up_interruptible(&pipe->wake_queue); |
| 560 | } |
| 561 | } |
| 562 | DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0); |
| 563 | |
| 564 | /* |
| 565 | * The general idea of the interrupt handling: |
| 566 | * |
| 567 | * 1. device raises an interrupt if there's at least one signalled pipe |
| 568 | * 2. IRQ handler reads the signalled pipes and their count from the device |
| 569 | * 3. device writes them into a shared buffer and returns the count |
| 570 | * it only resets the IRQ if it has returned all signalled pipes, |
| 571 | * otherwise it leaves it raised, so IRQ handler will be called |
| 572 | * again for the next chunk |
| 573 | * 4. IRQ handler adds all returned pipes to the device's signalled pipes list |
| 574 | * 5. IRQ handler launches a tasklet to process the signalled pipes from the |
| 575 | * list in a separate context |
| 576 | */ |
| 577 | static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id) |
| 578 | { |
| 579 | u32 count; |
| 580 | u32 i; |
| 581 | unsigned long flags; |
| 582 | struct goldfish_pipe_dev *dev = dev_id; |
| 583 | if (dev != pipe_dev) |
| 584 | return IRQ_NONE; |
| 585 | |
| 586 | /* Request the signalled pipes from the device */ |
| 587 | spin_lock_irqsave(&dev->lock, flags); |
| 588 | |
| 589 | count = readl(dev->base + PIPE_REG_GET_SIGNALLED); |
| 590 | if (count == 0) { |
| 591 | spin_unlock_irqrestore(&dev->lock, flags); |
| 592 | return IRQ_NONE; |
| 593 | } |
| 594 | if (count > MAX_SIGNALLED_PIPES) |
| 595 | count = MAX_SIGNALLED_PIPES; |
| 596 | |
| 597 | for (i = 0; i < count; ++i) |
| 598 | signalled_pipes_add_locked(dev, |
| 599 | dev->buffers->signalled_pipe_buffers[i].id, |
| 600 | dev->buffers->signalled_pipe_buffers[i].flags); |
| 601 | |
| 602 | spin_unlock_irqrestore(&dev->lock, flags); |
| 603 | |
| 604 | tasklet_schedule(&goldfish_interrupt_tasklet); |
| 605 | return IRQ_HANDLED; |
| 606 | } |
| 607 | |
| 608 | static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev) |
| 609 | { |
| 610 | int id; |
| 611 | for (id = 0; id < dev->pipes_capacity; ++id) |
| 612 | if (!dev->pipes[id]) |
| 613 | return id; |
| 614 | |
| 615 | { |
| 616 | /* Reallocate the array */ |
| 617 | u32 new_capacity = 2 * dev->pipes_capacity; |
| 618 | struct goldfish_pipe **pipes = |
Julia Lawall | b95ca37 | 2016-11-18 07:26:19 +0100 | [diff] [blame] | 619 | kcalloc(new_capacity, sizeof(*pipes), |
| 620 | GFP_ATOMIC); |
Yurii Zubrytskyi | f910124 | 2016-07-29 10:51:46 -0700 | [diff] [blame] | 621 | if (!pipes) |
| 622 | return -ENOMEM; |
| 623 | memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity); |
| 624 | kfree(dev->pipes); |
| 625 | dev->pipes = pipes; |
| 626 | id = dev->pipes_capacity; |
| 627 | dev->pipes_capacity = new_capacity; |
| 628 | } |
| 629 | return id; |
| 630 | } |
| 631 | |
| 632 | /** |
| 633 | * goldfish_pipe_open - open a channel to the AVD |
| 634 | * @inode: inode of device |
| 635 | * @file: file struct of opener |
| 636 | * |
| 637 | * Create a new pipe link between the emulator and the use application. |
| 638 | * Each new request produces a new pipe. |
| 639 | * |
| 640 | * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit |
| 641 | * right now so this is fine. A move to 64bit will need this addressing |
| 642 | */ |
| 643 | static int goldfish_pipe_open(struct inode *inode, struct file *file) |
| 644 | { |
| 645 | struct goldfish_pipe_dev *dev = pipe_dev; |
| 646 | unsigned long flags; |
| 647 | int id; |
| 648 | int status; |
| 649 | |
| 650 | /* Allocate new pipe kernel object */ |
| 651 | struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); |
| 652 | if (pipe == NULL) |
| 653 | return -ENOMEM; |
| 654 | |
| 655 | pipe->dev = dev; |
| 656 | mutex_init(&pipe->lock); |
| 657 | init_waitqueue_head(&pipe->wake_queue); |
| 658 | |
| 659 | /* |
| 660 | * Command buffer needs to be allocated on its own page to make sure it is |
| 661 | * physically contiguous in host's address space. |
| 662 | */ |
| 663 | pipe->command_buffer = |
| 664 | (struct goldfish_pipe_command*)__get_free_page(GFP_KERNEL); |
| 665 | if (!pipe->command_buffer) { |
| 666 | status = -ENOMEM; |
| 667 | goto err_pipe; |
| 668 | } |
| 669 | |
| 670 | spin_lock_irqsave(&dev->lock, flags); |
| 671 | |
| 672 | id = get_free_pipe_id_locked(dev); |
| 673 | if (id < 0) { |
| 674 | status = id; |
| 675 | goto err_id_locked; |
| 676 | } |
| 677 | |
| 678 | dev->pipes[id] = pipe; |
| 679 | pipe->id = id; |
| 680 | pipe->command_buffer->id = id; |
| 681 | |
| 682 | /* Now tell the emulator we're opening a new pipe. */ |
| 683 | dev->buffers->open_command_params.rw_params_max_count = |
| 684 | MAX_BUFFERS_PER_COMMAND; |
| 685 | dev->buffers->open_command_params.command_buffer_ptr = |
| 686 | (u64)(unsigned long)__pa(pipe->command_buffer); |
| 687 | status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN); |
| 688 | spin_unlock_irqrestore(&dev->lock, flags); |
| 689 | if (status < 0) |
| 690 | goto err_cmd; |
| 691 | /* All is done, save the pipe into the file's private data field */ |
| 692 | file->private_data = pipe; |
| 693 | return 0; |
| 694 | |
| 695 | err_cmd: |
| 696 | spin_lock_irqsave(&dev->lock, flags); |
| 697 | dev->pipes[id] = NULL; |
| 698 | err_id_locked: |
| 699 | spin_unlock_irqrestore(&dev->lock, flags); |
| 700 | free_page((unsigned long)pipe->command_buffer); |
| 701 | err_pipe: |
| 702 | kfree(pipe); |
| 703 | return status; |
| 704 | } |
| 705 | |
| 706 | static int goldfish_pipe_release(struct inode *inode, struct file *filp) |
| 707 | { |
| 708 | unsigned long flags; |
| 709 | struct goldfish_pipe *pipe = filp->private_data; |
| 710 | struct goldfish_pipe_dev *dev = pipe->dev; |
| 711 | |
| 712 | /* The guest is closing the channel, so tell the emulator right now */ |
| 713 | (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE); |
| 714 | |
| 715 | spin_lock_irqsave(&dev->lock, flags); |
| 716 | dev->pipes[pipe->id] = NULL; |
| 717 | signalled_pipes_remove_locked(dev, pipe); |
| 718 | spin_unlock_irqrestore(&dev->lock, flags); |
| 719 | |
| 720 | filp->private_data = NULL; |
| 721 | free_page((unsigned long)pipe->command_buffer); |
| 722 | kfree(pipe); |
| 723 | return 0; |
| 724 | } |
| 725 | |
| 726 | static const struct file_operations goldfish_pipe_fops = { |
| 727 | .owner = THIS_MODULE, |
| 728 | .read = goldfish_pipe_read, |
| 729 | .write = goldfish_pipe_write, |
| 730 | .poll = goldfish_pipe_poll, |
| 731 | .open = goldfish_pipe_open, |
| 732 | .release = goldfish_pipe_release, |
| 733 | }; |
| 734 | |
| 735 | static struct miscdevice goldfish_pipe_dev = { |
| 736 | .minor = MISC_DYNAMIC_MINOR, |
| 737 | .name = "goldfish_pipe", |
| 738 | .fops = &goldfish_pipe_fops, |
| 739 | }; |
| 740 | |
| 741 | static int goldfish_pipe_device_init_v2(struct platform_device *pdev) |
| 742 | { |
| 743 | char *page; |
| 744 | struct goldfish_pipe_dev *dev = pipe_dev; |
| 745 | int err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt, |
| 746 | IRQF_SHARED, "goldfish_pipe", dev); |
| 747 | if (err) { |
| 748 | dev_err(&pdev->dev, "unable to allocate IRQ for v2\n"); |
| 749 | return err; |
| 750 | } |
| 751 | |
| 752 | err = misc_register(&goldfish_pipe_dev); |
| 753 | if (err) { |
| 754 | dev_err(&pdev->dev, "unable to register v2 device\n"); |
| 755 | return err; |
| 756 | } |
| 757 | |
| 758 | dev->first_signalled_pipe = NULL; |
| 759 | dev->pipes_capacity = INITIAL_PIPES_CAPACITY; |
| 760 | dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), GFP_KERNEL); |
| 761 | if (!dev->pipes) |
| 762 | return -ENOMEM; |
| 763 | |
| 764 | /* |
| 765 | * We're going to pass two buffers, open_command_params and |
| 766 | * signalled_pipe_buffers, to the host. This means each of those buffers |
| 767 | * needs to be contained in a single physical page. The easiest choice is |
| 768 | * to just allocate a page and place the buffers in it. |
| 769 | */ |
| 770 | BUG_ON(sizeof(*dev->buffers) > PAGE_SIZE); |
| 771 | page = (char*)__get_free_page(GFP_KERNEL); |
| 772 | if (!page) { |
| 773 | kfree(dev->pipes); |
| 774 | return -ENOMEM; |
| 775 | } |
| 776 | dev->buffers = (struct goldfish_pipe_dev_buffers*)page; |
| 777 | |
| 778 | /* Send the buffer addresses to the host */ |
| 779 | { |
| 780 | u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers); |
| 781 | writel((u32)(unsigned long)(paddr >> 32), dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); |
| 782 | writel((u32)(unsigned long)paddr, dev->base + PIPE_REG_SIGNAL_BUFFER); |
| 783 | writel((u32)MAX_SIGNALLED_PIPES, dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); |
| 784 | |
| 785 | paddr = __pa(&dev->buffers->open_command_params); |
| 786 | writel((u32)(unsigned long)(paddr >> 32), dev->base + PIPE_REG_OPEN_BUFFER_HIGH); |
| 787 | writel((u32)(unsigned long)paddr, dev->base + PIPE_REG_OPEN_BUFFER); |
| 788 | } |
| 789 | return 0; |
| 790 | } |
| 791 | |
| 792 | static void goldfish_pipe_device_deinit_v2(struct platform_device *pdev) { |
| 793 | struct goldfish_pipe_dev *dev = pipe_dev; |
| 794 | misc_deregister(&goldfish_pipe_dev); |
| 795 | kfree(dev->pipes); |
| 796 | free_page((unsigned long)dev->buffers); |
| 797 | } |
| 798 | |
| 799 | static int goldfish_pipe_probe(struct platform_device *pdev) |
| 800 | { |
| 801 | int err; |
| 802 | struct resource *r; |
| 803 | struct goldfish_pipe_dev *dev = pipe_dev; |
| 804 | |
| 805 | BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE); |
| 806 | |
| 807 | /* not thread safe, but this should not happen */ |
| 808 | WARN_ON(dev->base != NULL); |
| 809 | |
| 810 | spin_lock_init(&dev->lock); |
| 811 | |
| 812 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 813 | if (r == NULL || resource_size(r) < PAGE_SIZE) { |
| 814 | dev_err(&pdev->dev, "can't allocate i/o page\n"); |
| 815 | return -EINVAL; |
| 816 | } |
| 817 | dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); |
| 818 | if (dev->base == NULL) { |
| 819 | dev_err(&pdev->dev, "ioremap failed\n"); |
| 820 | return -EINVAL; |
| 821 | } |
| 822 | |
| 823 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
| 824 | if (r == NULL) { |
| 825 | err = -EINVAL; |
| 826 | goto error; |
| 827 | } |
| 828 | dev->irq = r->start; |
| 829 | |
| 830 | /* |
| 831 | * Exchange the versions with the host device |
| 832 | * |
| 833 | * Note: v1 driver used to not report its version, so we write it before |
| 834 | * reading device version back: this allows the host implementation to |
| 835 | * detect the old driver (if there was no version write before read). |
| 836 | */ |
| 837 | writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION); |
| 838 | dev->version = readl(dev->base + PIPE_REG_VERSION); |
| 839 | if (dev->version < PIPE_CURRENT_DEVICE_VERSION) { |
| 840 | /* initialize the old device version */ |
| 841 | err = goldfish_pipe_device_init_v1(pdev); |
| 842 | } else { |
| 843 | /* Host device supports the new interface */ |
| 844 | err = goldfish_pipe_device_init_v2(pdev); |
| 845 | } |
| 846 | if (!err) |
| 847 | return 0; |
| 848 | |
| 849 | error: |
| 850 | dev->base = NULL; |
| 851 | return err; |
| 852 | } |
| 853 | |
| 854 | static int goldfish_pipe_remove(struct platform_device *pdev) |
| 855 | { |
| 856 | struct goldfish_pipe_dev *dev = pipe_dev; |
| 857 | if (dev->version < PIPE_CURRENT_DEVICE_VERSION) |
| 858 | goldfish_pipe_device_deinit_v1(pdev); |
| 859 | else |
| 860 | goldfish_pipe_device_deinit_v2(pdev); |
| 861 | dev->base = NULL; |
| 862 | return 0; |
| 863 | } |
| 864 | |
| 865 | static const struct acpi_device_id goldfish_pipe_acpi_match[] = { |
| 866 | { "GFSH0003", 0 }, |
| 867 | { }, |
| 868 | }; |
| 869 | MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match); |
| 870 | |
| 871 | static const struct of_device_id goldfish_pipe_of_match[] = { |
| 872 | { .compatible = "google,android-pipe", }, |
| 873 | {}, |
| 874 | }; |
| 875 | MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match); |
| 876 | |
| 877 | static struct platform_driver goldfish_pipe_driver = { |
| 878 | .probe = goldfish_pipe_probe, |
| 879 | .remove = goldfish_pipe_remove, |
| 880 | .driver = { |
| 881 | .name = "goldfish_pipe", |
| 882 | .of_match_table = goldfish_pipe_of_match, |
| 883 | .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match), |
| 884 | } |
| 885 | }; |
| 886 | |
| 887 | module_platform_driver(goldfish_pipe_driver); |
| 888 | MODULE_AUTHOR("David Turner <digit@google.com>"); |
| 889 | MODULE_LICENSE("GPL"); |