blob: 5927db046a87c399016d397bb99f101a46612d5b [file] [log] [blame]
George Zhang06164d22013-01-08 15:54:54 -08001/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
George Zhang06164d22013-01-08 15:54:54 -080016#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
Andy King42281d22013-01-10 15:41:39 -080018#include <linux/highmem.h>
George Zhang06164d22013-01-08 15:54:54 -080019#include <linux/kernel.h>
Andy King42281d22013-01-10 15:41:39 -080020#include <linux/mm.h>
George Zhang06164d22013-01-08 15:54:54 -080021#include <linux/module.h>
22#include <linux/mutex.h>
Andy King42281d22013-01-10 15:41:39 -080023#include <linux/pagemap.h>
Andy King6d6dfb42013-08-23 09:22:14 -070024#include <linux/pci.h>
Andy King42281d22013-01-10 15:41:39 -080025#include <linux/sched.h>
26#include <linux/slab.h>
Rusty Russelld2f83e92013-05-17 09:05:21 +093027#include <linux/uio.h>
George Zhang06164d22013-01-08 15:54:54 -080028#include <linux/wait.h>
David Rientjesf6dcf8e2013-01-24 14:49:31 -080029#include <linux/vmalloc.h>
Al Virod838df22014-11-24 19:32:50 -050030#include <linux/skbuff.h>
George Zhang06164d22013-01-08 15:54:54 -080031
32#include "vmci_handle_array.h"
33#include "vmci_queue_pair.h"
34#include "vmci_datagram.h"
35#include "vmci_resource.h"
36#include "vmci_context.h"
37#include "vmci_driver.h"
38#include "vmci_event.h"
39#include "vmci_route.h"
40
41/*
42 * In the following, we will distinguish between two kinds of VMX processes -
43 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
44 * VMCI page files in the VMX and supporting VM to VM communication and the
45 * newer ones that use the guest memory directly. We will in the following
46 * refer to the older VMX versions as old-style VMX'en, and the newer ones as
47 * new-style VMX'en.
48 *
49 * The state transition datagram is as follows (the VMCIQPB_ prefix has been
50 * removed for readability) - see below for more details on the transtions:
51 *
52 * -------------- NEW -------------
53 * | |
54 * \_/ \_/
55 * CREATED_NO_MEM <-----------------> CREATED_MEM
56 * | | |
57 * | o-----------------------o |
58 * | | |
59 * \_/ \_/ \_/
60 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM
61 * | | |
62 * | o----------------------o |
63 * | | |
64 * \_/ \_/ \_/
65 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
66 * | |
67 * | |
68 * -------------> gone <-------------
69 *
70 * In more detail. When a VMCI queue pair is first created, it will be in the
71 * VMCIQPB_NEW state. It will then move into one of the following states:
72 *
73 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
74 *
75 * - the created was performed by a host endpoint, in which case there is
76 * no backing memory yet.
77 *
78 * - the create was initiated by an old-style VMX, that uses
79 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
80 * a later point in time. This state can be distinguished from the one
81 * above by the context ID of the creator. A host side is not allowed to
82 * attach until the page store has been set.
83 *
84 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
85 * is created by a VMX using the queue pair device backend that
86 * sets the UVAs of the queue pair immediately and stores the
87 * information for later attachers. At this point, it is ready for
88 * the host side to attach to it.
89 *
90 * Once the queue pair is in one of the created states (with the exception of
91 * the case mentioned for older VMX'en above), it is possible to attach to the
92 * queue pair. Again we have two new states possible:
93 *
94 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
95 * paths:
96 *
97 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
98 * pair, and attaches to a queue pair previously created by the host side.
99 *
100 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
101 * already created by a guest.
102 *
103 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
104 * vmci_qp_broker_set_page_store (see below).
105 *
106 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
107 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
108 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store
109 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state
110 * will be entered.
111 *
112 * From the attached queue pair, the queue pair can enter the shutdown states
113 * when either side of the queue pair detaches. If the guest side detaches
114 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
115 * the content of the queue pair will no longer be available. If the host
116 * side detaches first, the queue pair will either enter the
117 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
118 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
119 * (e.g., the host detaches while a guest is stunned).
120 *
121 * New-style VMX'en will also unmap guest memory, if the guest is
122 * quiesced, e.g., during a snapshot operation. In that case, the guest
123 * memory will no longer be available, and the queue pair will transition from
124 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
125 * in which case the queue pair will transition from the *_NO_MEM state at that
126 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
127 * since the peer may have either attached or detached in the meantime. The
128 * values are laid out such that ++ on a state will move from a *_NO_MEM to a
129 * *_MEM state, and vice versa.
130 */
131
132/*
133 * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
134 * types are passed around to enqueue and dequeue routines. Note that
135 * often the functions passed are simply wrappers around memcpy
136 * itself.
137 *
138 * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
139 * there's an unused last parameter for the hosted side. In
140 * ESX, that parameter holds a buffer type.
141 */
142typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
143 u64 queue_offset, const void *src,
144 size_t src_offset, size_t size);
145typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
146 const struct vmci_queue *queue,
147 u64 queue_offset, size_t size);
148
149/* The Kernel specific component of the struct vmci_queue structure. */
150struct vmci_queue_kern_if {
George Zhang06164d22013-01-08 15:54:54 -0800151 struct mutex __mutex; /* Protects the queue. */
152 struct mutex *mutex; /* Shared by producer and consumer queues. */
Andy King6d6dfb42013-08-23 09:22:14 -0700153 size_t num_pages; /* Number of pages incl. header. */
154 bool host; /* Host or guest? */
155 union {
156 struct {
157 dma_addr_t *pas;
158 void **vas;
159 } g; /* Used by the guest. */
160 struct {
161 struct page **page;
162 struct page **header_page;
163 } h; /* Used by the host. */
164 } u;
George Zhang06164d22013-01-08 15:54:54 -0800165};
166
167/*
168 * This structure is opaque to the clients.
169 */
170struct vmci_qp {
171 struct vmci_handle handle;
172 struct vmci_queue *produce_q;
173 struct vmci_queue *consume_q;
174 u64 produce_q_size;
175 u64 consume_q_size;
176 u32 peer;
177 u32 flags;
178 u32 priv_flags;
179 bool guest_endpoint;
180 unsigned int blocked;
181 unsigned int generation;
182 wait_queue_head_t event;
183};
184
185enum qp_broker_state {
186 VMCIQPB_NEW,
187 VMCIQPB_CREATED_NO_MEM,
188 VMCIQPB_CREATED_MEM,
189 VMCIQPB_ATTACHED_NO_MEM,
190 VMCIQPB_ATTACHED_MEM,
191 VMCIQPB_SHUTDOWN_NO_MEM,
192 VMCIQPB_SHUTDOWN_MEM,
193 VMCIQPB_GONE
194};
195
196#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
197 _qpb->state == VMCIQPB_ATTACHED_MEM || \
198 _qpb->state == VMCIQPB_SHUTDOWN_MEM)
199
200/*
201 * In the queue pair broker, we always use the guest point of view for
202 * the produce and consume queue values and references, e.g., the
203 * produce queue size stored is the guests produce queue size. The
204 * host endpoint will need to swap these around. The only exception is
205 * the local queue pairs on the host, in which case the host endpoint
206 * that creates the queue pair will have the right orientation, and
207 * the attaching host endpoint will need to swap.
208 */
209struct qp_entry {
210 struct list_head list_item;
211 struct vmci_handle handle;
212 u32 peer;
213 u32 flags;
214 u64 produce_size;
215 u64 consume_size;
216 u32 ref_count;
217};
218
219struct qp_broker_entry {
220 struct vmci_resource resource;
221 struct qp_entry qp;
222 u32 create_id;
223 u32 attach_id;
224 enum qp_broker_state state;
225 bool require_trusted_attach;
226 bool created_by_trusted;
227 bool vmci_page_files; /* Created by VMX using VMCI page files */
228 struct vmci_queue *produce_q;
229 struct vmci_queue *consume_q;
230 struct vmci_queue_header saved_produce_q;
231 struct vmci_queue_header saved_consume_q;
232 vmci_event_release_cb wakeup_cb;
233 void *client_data;
234 void *local_mem; /* Kernel memory for local queue pair */
235};
236
237struct qp_guest_endpoint {
238 struct vmci_resource resource;
239 struct qp_entry qp;
240 u64 num_ppns;
241 void *produce_q;
242 void *consume_q;
Dmitry Torokhove6389a12013-01-10 15:41:42 -0800243 struct ppn_set ppn_set;
George Zhang06164d22013-01-08 15:54:54 -0800244};
245
246struct qp_list {
247 struct list_head head;
248 struct mutex mutex; /* Protect queue list. */
249};
250
251static struct qp_list qp_broker_list = {
252 .head = LIST_HEAD_INIT(qp_broker_list.head),
253 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
254};
255
256static struct qp_list qp_guest_endpoints = {
257 .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
258 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
259};
260
261#define INVALID_VMCI_GUEST_MEM_ID 0
Andy King42281d22013-01-10 15:41:39 -0800262#define QPE_NUM_PAGES(_QPE) ((u32) \
263 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
264 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
George Zhang06164d22013-01-08 15:54:54 -0800265
266
267/*
268 * Frees kernel VA space for a given queue and its queue header, and
269 * frees physical data pages.
270 */
271static void qp_free_queue(void *q, u64 size)
272{
273 struct vmci_queue *queue = q;
274
275 if (queue) {
Andy King6d6dfb42013-08-23 09:22:14 -0700276 u64 i;
George Zhang06164d22013-01-08 15:54:54 -0800277
Andy King6d6dfb42013-08-23 09:22:14 -0700278 /* Given size does not include header, so add in a page here. */
279 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
280 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
281 queue->kernel_if->u.g.vas[i],
282 queue->kernel_if->u.g.pas[i]);
283 }
George Zhang06164d22013-01-08 15:54:54 -0800284
Andy King6d6dfb42013-08-23 09:22:14 -0700285 vfree(queue);
George Zhang06164d22013-01-08 15:54:54 -0800286 }
287}
288
289/*
Andy King6d6dfb42013-08-23 09:22:14 -0700290 * Allocates kernel queue pages of specified size with IOMMU mappings,
291 * plus space for the queue structure/kernel interface and the queue
292 * header.
George Zhang06164d22013-01-08 15:54:54 -0800293 */
294static void *qp_alloc_queue(u64 size, u32 flags)
295{
296 u64 i;
297 struct vmci_queue *queue;
Jorgen Hansenaa6467f12015-03-02 08:19:11 -0800298 size_t pas_size;
299 size_t vas_size;
300 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
Dan Carpentera90e9ee2017-05-08 15:55:14 -0700301 u64 num_pages;
Jorgen Hansenaa6467f12015-03-02 08:19:11 -0800302
Dan Carpentera90e9ee2017-05-08 15:55:14 -0700303 if (size > SIZE_MAX - PAGE_SIZE)
304 return NULL;
305 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
Jorgen Hansenaa6467f12015-03-02 08:19:11 -0800306 if (num_pages >
307 (SIZE_MAX - queue_size) /
308 (sizeof(*queue->kernel_if->u.g.pas) +
309 sizeof(*queue->kernel_if->u.g.vas)))
310 return NULL;
311
312 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
313 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
314 queue_size += pas_size + vas_size;
George Zhang06164d22013-01-08 15:54:54 -0800315
Andy King6d6dfb42013-08-23 09:22:14 -0700316 queue = vmalloc(queue_size);
317 if (!queue)
George Zhang06164d22013-01-08 15:54:54 -0800318 return NULL;
319
Andy King6d6dfb42013-08-23 09:22:14 -0700320 queue->q_header = NULL;
George Zhang06164d22013-01-08 15:54:54 -0800321 queue->saved_header = NULL;
322 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
Andy King6d6dfb42013-08-23 09:22:14 -0700323 queue->kernel_if->mutex = NULL;
324 queue->kernel_if->num_pages = num_pages;
325 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
326 queue->kernel_if->u.g.vas =
327 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
George Zhang06164d22013-01-08 15:54:54 -0800328 queue->kernel_if->host = false;
George Zhang06164d22013-01-08 15:54:54 -0800329
Andy King6d6dfb42013-08-23 09:22:14 -0700330 for (i = 0; i < num_pages; i++) {
331 queue->kernel_if->u.g.vas[i] =
332 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
333 &queue->kernel_if->u.g.pas[i],
334 GFP_KERNEL);
335 if (!queue->kernel_if->u.g.vas[i]) {
336 /* Size excl. the header. */
337 qp_free_queue(queue, i * PAGE_SIZE);
338 return NULL;
339 }
George Zhang06164d22013-01-08 15:54:54 -0800340 }
341
Andy King6d6dfb42013-08-23 09:22:14 -0700342 /* Queue header is the first page. */
343 queue->q_header = queue->kernel_if->u.g.vas[0];
George Zhang06164d22013-01-08 15:54:54 -0800344
Andy King6d6dfb42013-08-23 09:22:14 -0700345 return queue;
George Zhang06164d22013-01-08 15:54:54 -0800346}
347
348/*
349 * Copies from a given buffer or iovector to a VMCI Queue. Uses
350 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
351 * by traversing the offset -> page translation structure for the queue.
352 * Assumes that offset + size does not wrap around in the queue.
353 */
354static int __qp_memcpy_to_queue(struct vmci_queue *queue,
355 u64 queue_offset,
356 const void *src,
357 size_t size,
358 bool is_iovec)
359{
360 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
361 size_t bytes_copied = 0;
362
363 while (bytes_copied < size) {
Andy King6d6dfb42013-08-23 09:22:14 -0700364 const u64 page_index =
365 (queue_offset + bytes_copied) / PAGE_SIZE;
366 const size_t page_offset =
George Zhang06164d22013-01-08 15:54:54 -0800367 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
368 void *va;
369 size_t to_copy;
370
Andy King6d6dfb42013-08-23 09:22:14 -0700371 if (kernel_if->host)
372 va = kmap(kernel_if->u.h.page[page_index]);
373 else
374 va = kernel_if->u.g.vas[page_index + 1];
375 /* Skip header. */
George Zhang06164d22013-01-08 15:54:54 -0800376
377 if (size - bytes_copied > PAGE_SIZE - page_offset)
378 /* Enough payload to fill up from this page. */
379 to_copy = PAGE_SIZE - page_offset;
380 else
381 to_copy = size - bytes_copied;
382
383 if (is_iovec) {
Al Viro4c946d92014-11-27 19:52:04 -0500384 struct msghdr *msg = (struct msghdr *)src;
George Zhang06164d22013-01-08 15:54:54 -0800385 int err;
386
387 /* The iovec will track bytes_copied internally. */
Al Viro4c946d92014-11-27 19:52:04 -0500388 err = memcpy_from_msg((u8 *)va + page_offset,
389 msg, to_copy);
George Zhang06164d22013-01-08 15:54:54 -0800390 if (err != 0) {
Andy King6d6dfb42013-08-23 09:22:14 -0700391 if (kernel_if->host)
392 kunmap(kernel_if->u.h.page[page_index]);
George Zhang06164d22013-01-08 15:54:54 -0800393 return VMCI_ERROR_INVALID_ARGS;
394 }
395 } else {
396 memcpy((u8 *)va + page_offset,
397 (u8 *)src + bytes_copied, to_copy);
398 }
399
400 bytes_copied += to_copy;
Andy King6d6dfb42013-08-23 09:22:14 -0700401 if (kernel_if->host)
402 kunmap(kernel_if->u.h.page[page_index]);
George Zhang06164d22013-01-08 15:54:54 -0800403 }
404
405 return VMCI_SUCCESS;
406}
407
408/*
409 * Copies to a given buffer or iovector from a VMCI Queue. Uses
410 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
411 * by traversing the offset -> page translation structure for the queue.
412 * Assumes that offset + size does not wrap around in the queue.
413 */
414static int __qp_memcpy_from_queue(void *dest,
415 const struct vmci_queue *queue,
416 u64 queue_offset,
417 size_t size,
418 bool is_iovec)
419{
420 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
421 size_t bytes_copied = 0;
422
423 while (bytes_copied < size) {
Andy King6d6dfb42013-08-23 09:22:14 -0700424 const u64 page_index =
425 (queue_offset + bytes_copied) / PAGE_SIZE;
426 const size_t page_offset =
George Zhang06164d22013-01-08 15:54:54 -0800427 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
428 void *va;
429 size_t to_copy;
430
Andy King6d6dfb42013-08-23 09:22:14 -0700431 if (kernel_if->host)
432 va = kmap(kernel_if->u.h.page[page_index]);
433 else
434 va = kernel_if->u.g.vas[page_index + 1];
435 /* Skip header. */
George Zhang06164d22013-01-08 15:54:54 -0800436
437 if (size - bytes_copied > PAGE_SIZE - page_offset)
438 /* Enough payload to fill up this page. */
439 to_copy = PAGE_SIZE - page_offset;
440 else
441 to_copy = size - bytes_copied;
442
443 if (is_iovec) {
Al Virod838df22014-11-24 19:32:50 -0500444 struct msghdr *msg = dest;
George Zhang06164d22013-01-08 15:54:54 -0800445 int err;
446
447 /* The iovec will track bytes_copied internally. */
Al Virod838df22014-11-24 19:32:50 -0500448 err = memcpy_to_msg(msg, (u8 *)va + page_offset,
George Zhang06164d22013-01-08 15:54:54 -0800449 to_copy);
450 if (err != 0) {
Andy King6d6dfb42013-08-23 09:22:14 -0700451 if (kernel_if->host)
452 kunmap(kernel_if->u.h.page[page_index]);
George Zhang06164d22013-01-08 15:54:54 -0800453 return VMCI_ERROR_INVALID_ARGS;
454 }
455 } else {
456 memcpy((u8 *)dest + bytes_copied,
457 (u8 *)va + page_offset, to_copy);
458 }
459
460 bytes_copied += to_copy;
Andy King6d6dfb42013-08-23 09:22:14 -0700461 if (kernel_if->host)
462 kunmap(kernel_if->u.h.page[page_index]);
George Zhang06164d22013-01-08 15:54:54 -0800463 }
464
465 return VMCI_SUCCESS;
466}
467
468/*
469 * Allocates two list of PPNs --- one for the pages in the produce queue,
470 * and the other for the pages in the consume queue. Intializes the list
471 * of PPNs with the page frame numbers of the KVA for the two queues (and
472 * the queue headers).
473 */
474static int qp_alloc_ppn_set(void *prod_q,
475 u64 num_produce_pages,
476 void *cons_q,
Dmitry Torokhove6389a12013-01-10 15:41:42 -0800477 u64 num_consume_pages, struct ppn_set *ppn_set)
George Zhang06164d22013-01-08 15:54:54 -0800478{
479 u32 *produce_ppns;
480 u32 *consume_ppns;
481 struct vmci_queue *produce_q = prod_q;
482 struct vmci_queue *consume_q = cons_q;
483 u64 i;
484
485 if (!produce_q || !num_produce_pages || !consume_q ||
486 !num_consume_pages || !ppn_set)
487 return VMCI_ERROR_INVALID_ARGS;
488
489 if (ppn_set->initialized)
490 return VMCI_ERROR_ALREADY_EXISTS;
491
492 produce_ppns =
493 kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
494 if (!produce_ppns)
495 return VMCI_ERROR_NO_MEM;
496
497 consume_ppns =
498 kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
499 if (!consume_ppns) {
500 kfree(produce_ppns);
501 return VMCI_ERROR_NO_MEM;
502 }
503
Andy King6d6dfb42013-08-23 09:22:14 -0700504 for (i = 0; i < num_produce_pages; i++) {
George Zhang06164d22013-01-08 15:54:54 -0800505 unsigned long pfn;
506
507 produce_ppns[i] =
Andy King6d6dfb42013-08-23 09:22:14 -0700508 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
George Zhang06164d22013-01-08 15:54:54 -0800509 pfn = produce_ppns[i];
510
511 /* Fail allocation if PFN isn't supported by hypervisor. */
512 if (sizeof(pfn) > sizeof(*produce_ppns)
513 && pfn != produce_ppns[i])
514 goto ppn_error;
515 }
516
Andy King6d6dfb42013-08-23 09:22:14 -0700517 for (i = 0; i < num_consume_pages; i++) {
George Zhang06164d22013-01-08 15:54:54 -0800518 unsigned long pfn;
519
520 consume_ppns[i] =
Andy King6d6dfb42013-08-23 09:22:14 -0700521 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
George Zhang06164d22013-01-08 15:54:54 -0800522 pfn = consume_ppns[i];
523
524 /* Fail allocation if PFN isn't supported by hypervisor. */
525 if (sizeof(pfn) > sizeof(*consume_ppns)
526 && pfn != consume_ppns[i])
527 goto ppn_error;
528 }
529
530 ppn_set->num_produce_pages = num_produce_pages;
531 ppn_set->num_consume_pages = num_consume_pages;
532 ppn_set->produce_ppns = produce_ppns;
533 ppn_set->consume_ppns = consume_ppns;
534 ppn_set->initialized = true;
535 return VMCI_SUCCESS;
536
537 ppn_error:
538 kfree(produce_ppns);
539 kfree(consume_ppns);
540 return VMCI_ERROR_INVALID_ARGS;
541}
542
543/*
544 * Frees the two list of PPNs for a queue pair.
545 */
Dmitry Torokhove6389a12013-01-10 15:41:42 -0800546static void qp_free_ppn_set(struct ppn_set *ppn_set)
George Zhang06164d22013-01-08 15:54:54 -0800547{
548 if (ppn_set->initialized) {
549 /* Do not call these functions on NULL inputs. */
550 kfree(ppn_set->produce_ppns);
551 kfree(ppn_set->consume_ppns);
552 }
553 memset(ppn_set, 0, sizeof(*ppn_set));
554}
555
556/*
557 * Populates the list of PPNs in the hypercall structure with the PPNS
558 * of the produce queue and the consume queue.
559 */
Dmitry Torokhove6389a12013-01-10 15:41:42 -0800560static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
George Zhang06164d22013-01-08 15:54:54 -0800561{
562 memcpy(call_buf, ppn_set->produce_ppns,
563 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
564 memcpy(call_buf +
565 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
566 ppn_set->consume_ppns,
567 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
568
569 return VMCI_SUCCESS;
570}
571
572static int qp_memcpy_to_queue(struct vmci_queue *queue,
573 u64 queue_offset,
574 const void *src, size_t src_offset, size_t size)
575{
576 return __qp_memcpy_to_queue(queue, queue_offset,
577 (u8 *)src + src_offset, size, false);
578}
579
580static int qp_memcpy_from_queue(void *dest,
581 size_t dest_offset,
582 const struct vmci_queue *queue,
583 u64 queue_offset, size_t size)
584{
585 return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
586 queue, queue_offset, size, false);
587}
588
589/*
590 * Copies from a given iovec from a VMCI Queue.
591 */
592static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
593 u64 queue_offset,
Al Viro4c946d92014-11-27 19:52:04 -0500594 const void *msg,
George Zhang06164d22013-01-08 15:54:54 -0800595 size_t src_offset, size_t size)
596{
597
598 /*
599 * We ignore src_offset because src is really a struct iovec * and will
600 * maintain offset internally.
601 */
Al Viro4c946d92014-11-27 19:52:04 -0500602 return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
George Zhang06164d22013-01-08 15:54:54 -0800603}
604
605/*
606 * Copies to a given iovec from a VMCI Queue.
607 */
608static int qp_memcpy_from_queue_iov(void *dest,
609 size_t dest_offset,
610 const struct vmci_queue *queue,
611 u64 queue_offset, size_t size)
612{
613 /*
614 * We ignore dest_offset because dest is really a struct iovec * and
615 * will maintain offset internally.
616 */
617 return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
618}
619
620/*
621 * Allocates kernel VA space of specified size plus space for the queue
622 * and kernel interface. This is different from the guest queue allocator,
623 * because we do not allocate our own queue header/data pages here but
624 * share those of the guest.
625 */
626static struct vmci_queue *qp_host_alloc_queue(u64 size)
627{
628 struct vmci_queue *queue;
Jorgen Hansenaa6467f12015-03-02 08:19:11 -0800629 size_t queue_page_size;
Dan Carpentera90e9ee2017-05-08 15:55:14 -0700630 u64 num_pages;
George Zhang06164d22013-01-08 15:54:54 -0800631 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
Jorgen Hansenaa6467f12015-03-02 08:19:11 -0800632
Dan Carpentera90e9ee2017-05-08 15:55:14 -0700633 if (size > SIZE_MAX - PAGE_SIZE)
634 return NULL;
635 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
Jorgen Hansenaa6467f12015-03-02 08:19:11 -0800636 if (num_pages > (SIZE_MAX - queue_size) /
637 sizeof(*queue->kernel_if->u.h.page))
638 return NULL;
639
640 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
George Zhang06164d22013-01-08 15:54:54 -0800641
642 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
643 if (queue) {
644 queue->q_header = NULL;
645 queue->saved_header = NULL;
Andy King6d6dfb42013-08-23 09:22:14 -0700646 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
George Zhang06164d22013-01-08 15:54:54 -0800647 queue->kernel_if->host = true;
648 queue->kernel_if->mutex = NULL;
649 queue->kernel_if->num_pages = num_pages;
Andy King6d6dfb42013-08-23 09:22:14 -0700650 queue->kernel_if->u.h.header_page =
George Zhang06164d22013-01-08 15:54:54 -0800651 (struct page **)((u8 *)queue + queue_size);
Andy King6d6dfb42013-08-23 09:22:14 -0700652 queue->kernel_if->u.h.page =
653 &queue->kernel_if->u.h.header_page[1];
George Zhang06164d22013-01-08 15:54:54 -0800654 }
655
656 return queue;
657}
658
659/*
660 * Frees kernel memory for a given queue (header plus translation
661 * structure).
662 */
663static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
664{
665 kfree(queue);
666}
667
668/*
669 * Initialize the mutex for the pair of queues. This mutex is used to
670 * protect the q_header and the buffer from changing out from under any
671 * users of either queue. Of course, it's only any good if the mutexes
672 * are actually acquired. Queue structure must lie on non-paged memory
673 * or we cannot guarantee access to the mutex.
674 */
675static void qp_init_queue_mutex(struct vmci_queue *produce_q,
676 struct vmci_queue *consume_q)
677{
678 /*
679 * Only the host queue has shared state - the guest queues do not
680 * need to synchronize access using a queue mutex.
681 */
682
683 if (produce_q->kernel_if->host) {
684 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
685 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
686 mutex_init(produce_q->kernel_if->mutex);
687 }
688}
689
690/*
691 * Cleans up the mutex for the pair of queues.
692 */
693static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
694 struct vmci_queue *consume_q)
695{
696 if (produce_q->kernel_if->host) {
697 produce_q->kernel_if->mutex = NULL;
698 consume_q->kernel_if->mutex = NULL;
699 }
700}
701
702/*
703 * Acquire the mutex for the queue. Note that the produce_q and
704 * the consume_q share a mutex. So, only one of the two need to
705 * be passed in to this routine. Either will work just fine.
706 */
707static void qp_acquire_queue_mutex(struct vmci_queue *queue)
708{
709 if (queue->kernel_if->host)
710 mutex_lock(queue->kernel_if->mutex);
711}
712
713/*
714 * Release the mutex for the queue. Note that the produce_q and
715 * the consume_q share a mutex. So, only one of the two need to
716 * be passed in to this routine. Either will work just fine.
717 */
718static void qp_release_queue_mutex(struct vmci_queue *queue)
719{
720 if (queue->kernel_if->host)
721 mutex_unlock(queue->kernel_if->mutex);
722}
723
724/*
725 * Helper function to release pages in the PageStoreAttachInfo
726 * previously obtained using get_user_pages.
727 */
728static void qp_release_pages(struct page **pages,
729 u64 num_pages, bool dirty)
730{
731 int i;
732
733 for (i = 0; i < num_pages; i++) {
734 if (dirty)
735 set_page_dirty(pages[i]);
736
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300737 put_page(pages[i]);
George Zhang06164d22013-01-08 15:54:54 -0800738 pages[i] = NULL;
739 }
740}
741
742/*
743 * Lock the user pages referenced by the {produce,consume}Buffer
744 * struct into memory and populate the {produce,consume}Pages
745 * arrays in the attach structure with them.
746 */
747static int qp_host_get_user_memory(u64 produce_uva,
748 u64 consume_uva,
749 struct vmci_queue *produce_q,
750 struct vmci_queue *consume_q)
751{
752 int retval;
753 int err = VMCI_SUCCESS;
754
Jan Kara240ddd42013-10-02 16:27:47 +0200755 retval = get_user_pages_fast((uintptr_t) produce_uva,
756 produce_q->kernel_if->num_pages, 1,
757 produce_q->kernel_if->u.h.header_page);
Dan Carpenter37f43c12018-07-04 12:33:34 +0300758 if (retval < (int)produce_q->kernel_if->num_pages) {
Davidlohr Buesobf136122015-02-17 14:29:21 -0800759 pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
760 retval);
Andy King6d6dfb42013-08-23 09:22:14 -0700761 qp_release_pages(produce_q->kernel_if->u.h.header_page,
762 retval, false);
George Zhang06164d22013-01-08 15:54:54 -0800763 err = VMCI_ERROR_NO_MEM;
764 goto out;
765 }
766
Jan Kara240ddd42013-10-02 16:27:47 +0200767 retval = get_user_pages_fast((uintptr_t) consume_uva,
768 consume_q->kernel_if->num_pages, 1,
769 consume_q->kernel_if->u.h.header_page);
Dan Carpenter37f43c12018-07-04 12:33:34 +0300770 if (retval < (int)consume_q->kernel_if->num_pages) {
Davidlohr Buesobf136122015-02-17 14:29:21 -0800771 pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
772 retval);
Andy King6d6dfb42013-08-23 09:22:14 -0700773 qp_release_pages(consume_q->kernel_if->u.h.header_page,
774 retval, false);
775 qp_release_pages(produce_q->kernel_if->u.h.header_page,
George Zhang06164d22013-01-08 15:54:54 -0800776 produce_q->kernel_if->num_pages, false);
777 err = VMCI_ERROR_NO_MEM;
778 }
779
780 out:
George Zhang06164d22013-01-08 15:54:54 -0800781 return err;
782}
783
784/*
785 * Registers the specification of the user pages used for backing a queue
786 * pair. Enough information to map in pages is stored in the OS specific
787 * part of the struct vmci_queue structure.
788 */
789static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
790 struct vmci_queue *produce_q,
791 struct vmci_queue *consume_q)
792{
793 u64 produce_uva;
794 u64 consume_uva;
795
796 /*
797 * The new style and the old style mapping only differs in
798 * that we either get a single or two UVAs, so we split the
799 * single UVA range at the appropriate spot.
800 */
801 produce_uva = page_store->pages;
802 consume_uva = page_store->pages +
803 produce_q->kernel_if->num_pages * PAGE_SIZE;
804 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
805 consume_q);
806}
807
808/*
809 * Releases and removes the references to user pages stored in the attach
810 * struct. Pages are released from the page cache and may become
811 * swappable again.
812 */
813static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
814 struct vmci_queue *consume_q)
815{
Andy King6d6dfb42013-08-23 09:22:14 -0700816 qp_release_pages(produce_q->kernel_if->u.h.header_page,
George Zhang06164d22013-01-08 15:54:54 -0800817 produce_q->kernel_if->num_pages, true);
Andy King6d6dfb42013-08-23 09:22:14 -0700818 memset(produce_q->kernel_if->u.h.header_page, 0,
819 sizeof(*produce_q->kernel_if->u.h.header_page) *
George Zhang06164d22013-01-08 15:54:54 -0800820 produce_q->kernel_if->num_pages);
Andy King6d6dfb42013-08-23 09:22:14 -0700821 qp_release_pages(consume_q->kernel_if->u.h.header_page,
George Zhang06164d22013-01-08 15:54:54 -0800822 consume_q->kernel_if->num_pages, true);
Andy King6d6dfb42013-08-23 09:22:14 -0700823 memset(consume_q->kernel_if->u.h.header_page, 0,
824 sizeof(*consume_q->kernel_if->u.h.header_page) *
George Zhang06164d22013-01-08 15:54:54 -0800825 consume_q->kernel_if->num_pages);
826}
827
828/*
829 * Once qp_host_register_user_memory has been performed on a
830 * queue, the queue pair headers can be mapped into the
831 * kernel. Once mapped, they must be unmapped with
832 * qp_host_unmap_queues prior to calling
833 * qp_host_unregister_user_memory.
834 * Pages are pinned.
835 */
836static int qp_host_map_queues(struct vmci_queue *produce_q,
837 struct vmci_queue *consume_q)
838{
839 int result;
840
841 if (!produce_q->q_header || !consume_q->q_header) {
842 struct page *headers[2];
843
844 if (produce_q->q_header != consume_q->q_header)
845 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
846
Andy King6d6dfb42013-08-23 09:22:14 -0700847 if (produce_q->kernel_if->u.h.header_page == NULL ||
848 *produce_q->kernel_if->u.h.header_page == NULL)
George Zhang06164d22013-01-08 15:54:54 -0800849 return VMCI_ERROR_UNAVAILABLE;
850
Andy King6d6dfb42013-08-23 09:22:14 -0700851 headers[0] = *produce_q->kernel_if->u.h.header_page;
852 headers[1] = *consume_q->kernel_if->u.h.header_page;
George Zhang06164d22013-01-08 15:54:54 -0800853
854 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
855 if (produce_q->q_header != NULL) {
856 consume_q->q_header =
857 (struct vmci_queue_header *)((u8 *)
858 produce_q->q_header +
859 PAGE_SIZE);
860 result = VMCI_SUCCESS;
861 } else {
862 pr_warn("vmap failed\n");
863 result = VMCI_ERROR_NO_MEM;
864 }
865 } else {
866 result = VMCI_SUCCESS;
867 }
868
869 return result;
870}
871
872/*
873 * Unmaps previously mapped queue pair headers from the kernel.
874 * Pages are unpinned.
875 */
876static int qp_host_unmap_queues(u32 gid,
877 struct vmci_queue *produce_q,
878 struct vmci_queue *consume_q)
879{
880 if (produce_q->q_header) {
881 if (produce_q->q_header < consume_q->q_header)
882 vunmap(produce_q->q_header);
883 else
884 vunmap(consume_q->q_header);
885
886 produce_q->q_header = NULL;
887 consume_q->q_header = NULL;
888 }
889
890 return VMCI_SUCCESS;
891}
892
893/*
894 * Finds the entry in the list corresponding to a given handle. Assumes
895 * that the list is locked.
896 */
897static struct qp_entry *qp_list_find(struct qp_list *qp_list,
898 struct vmci_handle handle)
899{
900 struct qp_entry *entry;
901
902 if (vmci_handle_is_invalid(handle))
903 return NULL;
904
905 list_for_each_entry(entry, &qp_list->head, list_item) {
906 if (vmci_handle_is_equal(entry->handle, handle))
907 return entry;
908 }
909
910 return NULL;
911}
912
913/*
914 * Finds the entry in the list corresponding to a given handle.
915 */
916static struct qp_guest_endpoint *
917qp_guest_handle_to_entry(struct vmci_handle handle)
918{
919 struct qp_guest_endpoint *entry;
920 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
921
922 entry = qp ? container_of(
923 qp, struct qp_guest_endpoint, qp) : NULL;
924 return entry;
925}
926
927/*
928 * Finds the entry in the list corresponding to a given handle.
929 */
930static struct qp_broker_entry *
931qp_broker_handle_to_entry(struct vmci_handle handle)
932{
933 struct qp_broker_entry *entry;
934 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
935
936 entry = qp ? container_of(
937 qp, struct qp_broker_entry, qp) : NULL;
938 return entry;
939}
940
941/*
942 * Dispatches a queue pair event message directly into the local event
943 * queue.
944 */
945static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
946{
947 u32 context_id = vmci_get_context_id();
948 struct vmci_event_qp ev;
949
950 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
951 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
952 VMCI_CONTEXT_RESOURCE_ID);
953 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
954 ev.msg.event_data.event =
955 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
956 ev.payload.peer_id = context_id;
957 ev.payload.handle = handle;
958
959 return vmci_event_dispatch(&ev.msg.hdr);
960}
961
962/*
963 * Allocates and initializes a qp_guest_endpoint structure.
964 * Allocates a queue_pair rid (and handle) iff the given entry has
965 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX
966 * are reserved handles. Assumes that the QP list mutex is held
967 * by the caller.
968 */
969static struct qp_guest_endpoint *
970qp_guest_endpoint_create(struct vmci_handle handle,
971 u32 peer,
972 u32 flags,
973 u64 produce_size,
974 u64 consume_size,
975 void *produce_q,
976 void *consume_q)
977{
978 int result;
979 struct qp_guest_endpoint *entry;
980 /* One page each for the queue headers. */
Andy King42281d22013-01-10 15:41:39 -0800981 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
982 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
George Zhang06164d22013-01-08 15:54:54 -0800983
984 if (vmci_handle_is_invalid(handle)) {
985 u32 context_id = vmci_get_context_id();
986
987 handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
988 }
989
990 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
991 if (entry) {
992 entry->qp.peer = peer;
993 entry->qp.flags = flags;
994 entry->qp.produce_size = produce_size;
995 entry->qp.consume_size = consume_size;
996 entry->qp.ref_count = 0;
997 entry->num_ppns = num_ppns;
998 entry->produce_q = produce_q;
999 entry->consume_q = consume_q;
1000 INIT_LIST_HEAD(&entry->qp.list_item);
1001
1002 /* Add resource obj */
1003 result = vmci_resource_add(&entry->resource,
1004 VMCI_RESOURCE_TYPE_QPAIR_GUEST,
1005 handle);
1006 entry->qp.handle = vmci_resource_handle(&entry->resource);
1007 if ((result != VMCI_SUCCESS) ||
1008 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
1009 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1010 handle.context, handle.resource, result);
1011 kfree(entry);
1012 entry = NULL;
1013 }
1014 }
1015 return entry;
1016}
1017
1018/*
1019 * Frees a qp_guest_endpoint structure.
1020 */
1021static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
1022{
1023 qp_free_ppn_set(&entry->ppn_set);
1024 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
1025 qp_free_queue(entry->produce_q, entry->qp.produce_size);
1026 qp_free_queue(entry->consume_q, entry->qp.consume_size);
1027 /* Unlink from resource hash table and free callback */
1028 vmci_resource_remove(&entry->resource);
1029
1030 kfree(entry);
1031}
1032
1033/*
1034 * Helper to make a queue_pairAlloc hypercall when the driver is
1035 * supporting a guest device.
1036 */
1037static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
1038{
1039 struct vmci_qp_alloc_msg *alloc_msg;
1040 size_t msg_size;
1041 int result;
1042
1043 if (!entry || entry->num_ppns <= 2)
1044 return VMCI_ERROR_INVALID_ARGS;
1045
1046 msg_size = sizeof(*alloc_msg) +
1047 (size_t) entry->num_ppns * sizeof(u32);
1048 alloc_msg = kmalloc(msg_size, GFP_KERNEL);
1049 if (!alloc_msg)
1050 return VMCI_ERROR_NO_MEM;
1051
1052 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1053 VMCI_QUEUEPAIR_ALLOC);
1054 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
1055 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
1056 alloc_msg->handle = entry->qp.handle;
1057 alloc_msg->peer = entry->qp.peer;
1058 alloc_msg->flags = entry->qp.flags;
1059 alloc_msg->produce_size = entry->qp.produce_size;
1060 alloc_msg->consume_size = entry->qp.consume_size;
1061 alloc_msg->num_ppns = entry->num_ppns;
1062
1063 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
1064 &entry->ppn_set);
1065 if (result == VMCI_SUCCESS)
1066 result = vmci_send_datagram(&alloc_msg->hdr);
1067
1068 kfree(alloc_msg);
1069
1070 return result;
1071}
1072
1073/*
1074 * Helper to make a queue_pairDetach hypercall when the driver is
1075 * supporting a guest device.
1076 */
1077static int qp_detatch_hypercall(struct vmci_handle handle)
1078{
1079 struct vmci_qp_detach_msg detach_msg;
1080
1081 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1082 VMCI_QUEUEPAIR_DETACH);
1083 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
1084 detach_msg.hdr.payload_size = sizeof(handle);
1085 detach_msg.handle = handle;
1086
1087 return vmci_send_datagram(&detach_msg.hdr);
1088}
1089
1090/*
1091 * Adds the given entry to the list. Assumes that the list is locked.
1092 */
1093static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
1094{
1095 if (entry)
1096 list_add(&entry->list_item, &qp_list->head);
1097}
1098
1099/*
1100 * Removes the given entry from the list. Assumes that the list is locked.
1101 */
1102static void qp_list_remove_entry(struct qp_list *qp_list,
1103 struct qp_entry *entry)
1104{
1105 if (entry)
1106 list_del(&entry->list_item);
1107}
1108
1109/*
1110 * Helper for VMCI queue_pair detach interface. Frees the physical
1111 * pages for the queue pair.
1112 */
1113static int qp_detatch_guest_work(struct vmci_handle handle)
1114{
1115 int result;
1116 struct qp_guest_endpoint *entry;
1117 u32 ref_count = ~0; /* To avoid compiler warning below */
1118
1119 mutex_lock(&qp_guest_endpoints.mutex);
1120
1121 entry = qp_guest_handle_to_entry(handle);
1122 if (!entry) {
1123 mutex_unlock(&qp_guest_endpoints.mutex);
1124 return VMCI_ERROR_NOT_FOUND;
1125 }
1126
1127 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1128 result = VMCI_SUCCESS;
1129
1130 if (entry->qp.ref_count > 1) {
1131 result = qp_notify_peer_local(false, handle);
1132 /*
1133 * We can fail to notify a local queuepair
1134 * because we can't allocate. We still want
1135 * to release the entry if that happens, so
1136 * don't bail out yet.
1137 */
1138 }
1139 } else {
1140 result = qp_detatch_hypercall(handle);
1141 if (result < VMCI_SUCCESS) {
1142 /*
1143 * We failed to notify a non-local queuepair.
1144 * That other queuepair might still be
1145 * accessing the shared memory, so don't
1146 * release the entry yet. It will get cleaned
1147 * up by VMCIqueue_pair_Exit() if necessary
1148 * (assuming we are going away, otherwise why
1149 * did this fail?).
1150 */
1151
1152 mutex_unlock(&qp_guest_endpoints.mutex);
1153 return result;
1154 }
1155 }
1156
1157 /*
1158 * If we get here then we either failed to notify a local queuepair, or
1159 * we succeeded in all cases. Release the entry if required.
1160 */
1161
1162 entry->qp.ref_count--;
1163 if (entry->qp.ref_count == 0)
1164 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
1165
1166 /* If we didn't remove the entry, this could change once we unlock. */
1167 if (entry)
1168 ref_count = entry->qp.ref_count;
1169
1170 mutex_unlock(&qp_guest_endpoints.mutex);
1171
1172 if (ref_count == 0)
1173 qp_guest_endpoint_destroy(entry);
1174
1175 return result;
1176}
1177
1178/*
1179 * This functions handles the actual allocation of a VMCI queue
1180 * pair guest endpoint. Allocates physical pages for the queue
1181 * pair. It makes OS dependent calls through generic wrappers.
1182 */
1183static int qp_alloc_guest_work(struct vmci_handle *handle,
1184 struct vmci_queue **produce_q,
1185 u64 produce_size,
1186 struct vmci_queue **consume_q,
1187 u64 consume_size,
1188 u32 peer,
1189 u32 flags,
1190 u32 priv_flags)
1191{
1192 const u64 num_produce_pages =
Andy King42281d22013-01-10 15:41:39 -08001193 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
George Zhang06164d22013-01-08 15:54:54 -08001194 const u64 num_consume_pages =
Andy King42281d22013-01-10 15:41:39 -08001195 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
George Zhang06164d22013-01-08 15:54:54 -08001196 void *my_produce_q = NULL;
1197 void *my_consume_q = NULL;
1198 int result;
1199 struct qp_guest_endpoint *queue_pair_entry = NULL;
1200
1201 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
1202 return VMCI_ERROR_NO_ACCESS;
1203
1204 mutex_lock(&qp_guest_endpoints.mutex);
1205
1206 queue_pair_entry = qp_guest_handle_to_entry(*handle);
1207 if (queue_pair_entry) {
1208 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1209 /* Local attach case. */
1210 if (queue_pair_entry->qp.ref_count > 1) {
1211 pr_devel("Error attempting to attach more than once\n");
1212 result = VMCI_ERROR_UNAVAILABLE;
1213 goto error_keep_entry;
1214 }
1215
1216 if (queue_pair_entry->qp.produce_size != consume_size ||
1217 queue_pair_entry->qp.consume_size !=
1218 produce_size ||
1219 queue_pair_entry->qp.flags !=
1220 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
1221 pr_devel("Error mismatched queue pair in local attach\n");
1222 result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
1223 goto error_keep_entry;
1224 }
1225
1226 /*
1227 * Do a local attach. We swap the consume and
1228 * produce queues for the attacher and deliver
1229 * an attach event.
1230 */
1231 result = qp_notify_peer_local(true, *handle);
1232 if (result < VMCI_SUCCESS)
1233 goto error_keep_entry;
1234
1235 my_produce_q = queue_pair_entry->consume_q;
1236 my_consume_q = queue_pair_entry->produce_q;
1237 goto out;
1238 }
1239
1240 result = VMCI_ERROR_ALREADY_EXISTS;
1241 goto error_keep_entry;
1242 }
1243
1244 my_produce_q = qp_alloc_queue(produce_size, flags);
1245 if (!my_produce_q) {
1246 pr_warn("Error allocating pages for produce queue\n");
1247 result = VMCI_ERROR_NO_MEM;
1248 goto error;
1249 }
1250
1251 my_consume_q = qp_alloc_queue(consume_size, flags);
1252 if (!my_consume_q) {
1253 pr_warn("Error allocating pages for consume queue\n");
1254 result = VMCI_ERROR_NO_MEM;
1255 goto error;
1256 }
1257
1258 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
1259 produce_size, consume_size,
1260 my_produce_q, my_consume_q);
1261 if (!queue_pair_entry) {
1262 pr_warn("Error allocating memory in %s\n", __func__);
1263 result = VMCI_ERROR_NO_MEM;
1264 goto error;
1265 }
1266
1267 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
1268 num_consume_pages,
1269 &queue_pair_entry->ppn_set);
1270 if (result < VMCI_SUCCESS) {
1271 pr_warn("qp_alloc_ppn_set failed\n");
1272 goto error;
1273 }
1274
1275 /*
1276 * It's only necessary to notify the host if this queue pair will be
1277 * attached to from another context.
1278 */
1279 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1280 /* Local create case. */
1281 u32 context_id = vmci_get_context_id();
1282
1283 /*
1284 * Enforce similar checks on local queue pairs as we
1285 * do for regular ones. The handle's context must
1286 * match the creator or attacher context id (here they
1287 * are both the current context id) and the
1288 * attach-only flag cannot exist during create. We
1289 * also ensure specified peer is this context or an
1290 * invalid one.
1291 */
1292 if (queue_pair_entry->qp.handle.context != context_id ||
1293 (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
1294 queue_pair_entry->qp.peer != context_id)) {
1295 result = VMCI_ERROR_NO_ACCESS;
1296 goto error;
1297 }
1298
1299 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
1300 result = VMCI_ERROR_NOT_FOUND;
1301 goto error;
1302 }
1303 } else {
1304 result = qp_alloc_hypercall(queue_pair_entry);
1305 if (result < VMCI_SUCCESS) {
1306 pr_warn("qp_alloc_hypercall result = %d\n", result);
1307 goto error;
1308 }
1309 }
1310
1311 qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
1312 (struct vmci_queue *)my_consume_q);
1313
1314 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
1315
1316 out:
1317 queue_pair_entry->qp.ref_count++;
1318 *handle = queue_pair_entry->qp.handle;
1319 *produce_q = (struct vmci_queue *)my_produce_q;
1320 *consume_q = (struct vmci_queue *)my_consume_q;
1321
1322 /*
1323 * We should initialize the queue pair header pages on a local
1324 * queue pair create. For non-local queue pairs, the
1325 * hypervisor initializes the header pages in the create step.
1326 */
1327 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
1328 queue_pair_entry->qp.ref_count == 1) {
1329 vmci_q_header_init((*produce_q)->q_header, *handle);
1330 vmci_q_header_init((*consume_q)->q_header, *handle);
1331 }
1332
1333 mutex_unlock(&qp_guest_endpoints.mutex);
1334
1335 return VMCI_SUCCESS;
1336
1337 error:
1338 mutex_unlock(&qp_guest_endpoints.mutex);
1339 if (queue_pair_entry) {
1340 /* The queues will be freed inside the destroy routine. */
1341 qp_guest_endpoint_destroy(queue_pair_entry);
1342 } else {
1343 qp_free_queue(my_produce_q, produce_size);
1344 qp_free_queue(my_consume_q, consume_size);
1345 }
1346 return result;
1347
1348 error_keep_entry:
1349 /* This path should only be used when an existing entry was found. */
1350 mutex_unlock(&qp_guest_endpoints.mutex);
1351 return result;
1352}
1353
1354/*
1355 * The first endpoint issuing a queue pair allocation will create the state
1356 * of the queue pair in the queue pair broker.
1357 *
1358 * If the creator is a guest, it will associate a VMX virtual address range
1359 * with the queue pair as specified by the page_store. For compatibility with
1360 * older VMX'en, that would use a separate step to set the VMX virtual
1361 * address range, the virtual address range can be registered later using
1362 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
1363 * used.
1364 *
1365 * If the creator is the host, a page_store of NULL should be used as well,
1366 * since the host is not able to supply a page store for the queue pair.
1367 *
1368 * For older VMX and host callers, the queue pair will be created in the
1369 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
1370 * created in VMCOQPB_CREATED_MEM state.
1371 */
1372static int qp_broker_create(struct vmci_handle handle,
1373 u32 peer,
1374 u32 flags,
1375 u32 priv_flags,
1376 u64 produce_size,
1377 u64 consume_size,
1378 struct vmci_qp_page_store *page_store,
1379 struct vmci_ctx *context,
1380 vmci_event_release_cb wakeup_cb,
1381 void *client_data, struct qp_broker_entry **ent)
1382{
1383 struct qp_broker_entry *entry = NULL;
1384 const u32 context_id = vmci_ctx_get_id(context);
1385 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1386 int result;
1387 u64 guest_produce_size;
1388 u64 guest_consume_size;
1389
1390 /* Do not create if the caller asked not to. */
1391 if (flags & VMCI_QPFLAG_ATTACH_ONLY)
1392 return VMCI_ERROR_NOT_FOUND;
1393
1394 /*
1395 * Creator's context ID should match handle's context ID or the creator
1396 * must allow the context in handle's context ID as the "peer".
1397 */
1398 if (handle.context != context_id && handle.context != peer)
1399 return VMCI_ERROR_NO_ACCESS;
1400
1401 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
1402 return VMCI_ERROR_DST_UNREACHABLE;
1403
1404 /*
1405 * Creator's context ID for local queue pairs should match the
1406 * peer, if a peer is specified.
1407 */
1408 if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
1409 return VMCI_ERROR_NO_ACCESS;
1410
1411 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1412 if (!entry)
1413 return VMCI_ERROR_NO_MEM;
1414
1415 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
1416 /*
1417 * The queue pair broker entry stores values from the guest
1418 * point of view, so a creating host side endpoint should swap
1419 * produce and consume values -- unless it is a local queue
1420 * pair, in which case no swapping is necessary, since the local
1421 * attacher will swap queues.
1422 */
1423
1424 guest_produce_size = consume_size;
1425 guest_consume_size = produce_size;
1426 } else {
1427 guest_produce_size = produce_size;
1428 guest_consume_size = consume_size;
1429 }
1430
1431 entry->qp.handle = handle;
1432 entry->qp.peer = peer;
1433 entry->qp.flags = flags;
1434 entry->qp.produce_size = guest_produce_size;
1435 entry->qp.consume_size = guest_consume_size;
1436 entry->qp.ref_count = 1;
1437 entry->create_id = context_id;
1438 entry->attach_id = VMCI_INVALID_ID;
1439 entry->state = VMCIQPB_NEW;
1440 entry->require_trusted_attach =
1441 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
1442 entry->created_by_trusted =
1443 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
1444 entry->vmci_page_files = false;
1445 entry->wakeup_cb = wakeup_cb;
1446 entry->client_data = client_data;
1447 entry->produce_q = qp_host_alloc_queue(guest_produce_size);
1448 if (entry->produce_q == NULL) {
1449 result = VMCI_ERROR_NO_MEM;
1450 goto error;
1451 }
1452 entry->consume_q = qp_host_alloc_queue(guest_consume_size);
1453 if (entry->consume_q == NULL) {
1454 result = VMCI_ERROR_NO_MEM;
1455 goto error;
1456 }
1457
1458 qp_init_queue_mutex(entry->produce_q, entry->consume_q);
1459
1460 INIT_LIST_HEAD(&entry->qp.list_item);
1461
1462 if (is_local) {
1463 u8 *tmp;
1464
1465 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
1466 PAGE_SIZE, GFP_KERNEL);
1467 if (entry->local_mem == NULL) {
1468 result = VMCI_ERROR_NO_MEM;
1469 goto error;
1470 }
1471 entry->state = VMCIQPB_CREATED_MEM;
1472 entry->produce_q->q_header = entry->local_mem;
1473 tmp = (u8 *)entry->local_mem + PAGE_SIZE *
Andy King42281d22013-01-10 15:41:39 -08001474 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
George Zhang06164d22013-01-08 15:54:54 -08001475 entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
1476 } else if (page_store) {
1477 /*
1478 * The VMX already initialized the queue pair headers, so no
1479 * need for the kernel side to do that.
1480 */
1481 result = qp_host_register_user_memory(page_store,
1482 entry->produce_q,
1483 entry->consume_q);
1484 if (result < VMCI_SUCCESS)
1485 goto error;
1486
1487 entry->state = VMCIQPB_CREATED_MEM;
1488 } else {
1489 /*
1490 * A create without a page_store may be either a host
1491 * side create (in which case we are waiting for the
1492 * guest side to supply the memory) or an old style
1493 * queue pair create (in which case we will expect a
1494 * set page store call as the next step).
1495 */
1496 entry->state = VMCIQPB_CREATED_NO_MEM;
1497 }
1498
1499 qp_list_add_entry(&qp_broker_list, &entry->qp);
1500 if (ent != NULL)
1501 *ent = entry;
1502
1503 /* Add to resource obj */
1504 result = vmci_resource_add(&entry->resource,
1505 VMCI_RESOURCE_TYPE_QPAIR_HOST,
1506 handle);
1507 if (result != VMCI_SUCCESS) {
1508 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1509 handle.context, handle.resource, result);
1510 goto error;
1511 }
1512
1513 entry->qp.handle = vmci_resource_handle(&entry->resource);
1514 if (is_local) {
1515 vmci_q_header_init(entry->produce_q->q_header,
1516 entry->qp.handle);
1517 vmci_q_header_init(entry->consume_q->q_header,
1518 entry->qp.handle);
1519 }
1520
1521 vmci_ctx_qp_create(context, entry->qp.handle);
1522
1523 return VMCI_SUCCESS;
1524
1525 error:
1526 if (entry != NULL) {
1527 qp_host_free_queue(entry->produce_q, guest_produce_size);
1528 qp_host_free_queue(entry->consume_q, guest_consume_size);
1529 kfree(entry);
1530 }
1531
1532 return result;
1533}
1534
1535/*
1536 * Enqueues an event datagram to notify the peer VM attached to
1537 * the given queue pair handle about attach/detach event by the
1538 * given VM. Returns Payload size of datagram enqueued on
1539 * success, error code otherwise.
1540 */
1541static int qp_notify_peer(bool attach,
1542 struct vmci_handle handle,
1543 u32 my_id,
1544 u32 peer_id)
1545{
1546 int rv;
1547 struct vmci_event_qp ev;
1548
1549 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
1550 peer_id == VMCI_INVALID_ID)
1551 return VMCI_ERROR_INVALID_ARGS;
1552
1553 /*
1554 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
1555 * number of pending events from the hypervisor to a given VM
1556 * otherwise a rogue VM could do an arbitrary number of attach
1557 * and detach operations causing memory pressure in the host
1558 * kernel.
1559 */
1560
1561 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
1562 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1563 VMCI_CONTEXT_RESOURCE_ID);
1564 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
1565 ev.msg.event_data.event = attach ?
1566 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
1567 ev.payload.handle = handle;
1568 ev.payload.peer_id = my_id;
1569
1570 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
1571 &ev.msg.hdr, false);
1572 if (rv < VMCI_SUCCESS)
1573 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
1574 attach ? "ATTACH" : "DETACH", peer_id);
1575
1576 return rv;
1577}
1578
1579/*
1580 * The second endpoint issuing a queue pair allocation will attach to
1581 * the queue pair registered with the queue pair broker.
1582 *
1583 * If the attacher is a guest, it will associate a VMX virtual address
1584 * range with the queue pair as specified by the page_store. At this
1585 * point, the already attach host endpoint may start using the queue
1586 * pair, and an attach event is sent to it. For compatibility with
1587 * older VMX'en, that used a separate step to set the VMX virtual
1588 * address range, the virtual address range can be registered later
1589 * using vmci_qp_broker_set_page_store. In that case, a page_store of
1590 * NULL should be used, and the attach event will be generated once
1591 * the actual page store has been set.
1592 *
1593 * If the attacher is the host, a page_store of NULL should be used as
1594 * well, since the page store information is already set by the guest.
1595 *
1596 * For new VMX and host callers, the queue pair will be moved to the
1597 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
1598 * moved to the VMCOQPB_ATTACHED_NO_MEM state.
1599 */
1600static int qp_broker_attach(struct qp_broker_entry *entry,
1601 u32 peer,
1602 u32 flags,
1603 u32 priv_flags,
1604 u64 produce_size,
1605 u64 consume_size,
1606 struct vmci_qp_page_store *page_store,
1607 struct vmci_ctx *context,
1608 vmci_event_release_cb wakeup_cb,
1609 void *client_data,
1610 struct qp_broker_entry **ent)
1611{
1612 const u32 context_id = vmci_ctx_get_id(context);
1613 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1614 int result;
1615
1616 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
1617 entry->state != VMCIQPB_CREATED_MEM)
1618 return VMCI_ERROR_UNAVAILABLE;
1619
1620 if (is_local) {
1621 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
1622 context_id != entry->create_id) {
1623 return VMCI_ERROR_INVALID_ARGS;
1624 }
1625 } else if (context_id == entry->create_id ||
1626 context_id == entry->attach_id) {
1627 return VMCI_ERROR_ALREADY_EXISTS;
1628 }
1629
1630 if (VMCI_CONTEXT_IS_VM(context_id) &&
1631 VMCI_CONTEXT_IS_VM(entry->create_id))
1632 return VMCI_ERROR_DST_UNREACHABLE;
1633
1634 /*
1635 * If we are attaching from a restricted context then the queuepair
1636 * must have been created by a trusted endpoint.
1637 */
1638 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
1639 !entry->created_by_trusted)
1640 return VMCI_ERROR_NO_ACCESS;
1641
1642 /*
1643 * If we are attaching to a queuepair that was created by a restricted
1644 * context then we must be trusted.
1645 */
1646 if (entry->require_trusted_attach &&
1647 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
1648 return VMCI_ERROR_NO_ACCESS;
1649
1650 /*
1651 * If the creator specifies VMCI_INVALID_ID in "peer" field, access
1652 * control check is not performed.
1653 */
1654 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
1655 return VMCI_ERROR_NO_ACCESS;
1656
1657 if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
1658 /*
1659 * Do not attach if the caller doesn't support Host Queue Pairs
1660 * and a host created this queue pair.
1661 */
1662
1663 if (!vmci_ctx_supports_host_qp(context))
1664 return VMCI_ERROR_INVALID_RESOURCE;
1665
1666 } else if (context_id == VMCI_HOST_CONTEXT_ID) {
1667 struct vmci_ctx *create_context;
1668 bool supports_host_qp;
1669
1670 /*
1671 * Do not attach a host to a user created queue pair if that
1672 * user doesn't support host queue pair end points.
1673 */
1674
1675 create_context = vmci_ctx_get(entry->create_id);
1676 supports_host_qp = vmci_ctx_supports_host_qp(create_context);
1677 vmci_ctx_put(create_context);
1678
1679 if (!supports_host_qp)
1680 return VMCI_ERROR_INVALID_RESOURCE;
1681 }
1682
1683 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
1684 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1685
1686 if (context_id != VMCI_HOST_CONTEXT_ID) {
1687 /*
1688 * The queue pair broker entry stores values from the guest
1689 * point of view, so an attaching guest should match the values
1690 * stored in the entry.
1691 */
1692
1693 if (entry->qp.produce_size != produce_size ||
1694 entry->qp.consume_size != consume_size) {
1695 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1696 }
1697 } else if (entry->qp.produce_size != consume_size ||
1698 entry->qp.consume_size != produce_size) {
1699 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1700 }
1701
1702 if (context_id != VMCI_HOST_CONTEXT_ID) {
1703 /*
1704 * If a guest attached to a queue pair, it will supply
1705 * the backing memory. If this is a pre NOVMVM vmx,
1706 * the backing memory will be supplied by calling
1707 * vmci_qp_broker_set_page_store() following the
1708 * return of the vmci_qp_broker_alloc() call. If it is
1709 * a vmx of version NOVMVM or later, the page store
1710 * must be supplied as part of the
1711 * vmci_qp_broker_alloc call. Under all circumstances
1712 * must the initially created queue pair not have any
1713 * memory associated with it already.
1714 */
1715
1716 if (entry->state != VMCIQPB_CREATED_NO_MEM)
1717 return VMCI_ERROR_INVALID_ARGS;
1718
1719 if (page_store != NULL) {
1720 /*
1721 * Patch up host state to point to guest
1722 * supplied memory. The VMX already
1723 * initialized the queue pair headers, so no
1724 * need for the kernel side to do that.
1725 */
1726
1727 result = qp_host_register_user_memory(page_store,
1728 entry->produce_q,
1729 entry->consume_q);
1730 if (result < VMCI_SUCCESS)
1731 return result;
1732
George Zhang06164d22013-01-08 15:54:54 -08001733 entry->state = VMCIQPB_ATTACHED_MEM;
1734 } else {
1735 entry->state = VMCIQPB_ATTACHED_NO_MEM;
1736 }
1737 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
1738 /*
1739 * The host side is attempting to attach to a queue
1740 * pair that doesn't have any memory associated with
1741 * it. This must be a pre NOVMVM vmx that hasn't set
1742 * the page store information yet, or a quiesced VM.
1743 */
1744
1745 return VMCI_ERROR_UNAVAILABLE;
1746 } else {
George Zhang06164d22013-01-08 15:54:54 -08001747 /* The host side has successfully attached to a queue pair. */
1748 entry->state = VMCIQPB_ATTACHED_MEM;
1749 }
1750
1751 if (entry->state == VMCIQPB_ATTACHED_MEM) {
1752 result =
1753 qp_notify_peer(true, entry->qp.handle, context_id,
1754 entry->create_id);
1755 if (result < VMCI_SUCCESS)
1756 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
1757 entry->create_id, entry->qp.handle.context,
1758 entry->qp.handle.resource);
1759 }
1760
1761 entry->attach_id = context_id;
1762 entry->qp.ref_count++;
1763 if (wakeup_cb) {
1764 entry->wakeup_cb = wakeup_cb;
1765 entry->client_data = client_data;
1766 }
1767
1768 /*
1769 * When attaching to local queue pairs, the context already has
1770 * an entry tracking the queue pair, so don't add another one.
1771 */
1772 if (!is_local)
1773 vmci_ctx_qp_create(context, entry->qp.handle);
1774
1775 if (ent != NULL)
1776 *ent = entry;
1777
1778 return VMCI_SUCCESS;
1779}
1780
1781/*
1782 * queue_pair_Alloc for use when setting up queue pair endpoints
1783 * on the host.
1784 */
1785static int qp_broker_alloc(struct vmci_handle handle,
1786 u32 peer,
1787 u32 flags,
1788 u32 priv_flags,
1789 u64 produce_size,
1790 u64 consume_size,
1791 struct vmci_qp_page_store *page_store,
1792 struct vmci_ctx *context,
1793 vmci_event_release_cb wakeup_cb,
1794 void *client_data,
1795 struct qp_broker_entry **ent,
1796 bool *swap)
1797{
1798 const u32 context_id = vmci_ctx_get_id(context);
1799 bool create;
1800 struct qp_broker_entry *entry = NULL;
1801 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1802 int result;
1803
1804 if (vmci_handle_is_invalid(handle) ||
1805 (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
1806 !(produce_size || consume_size) ||
1807 !context || context_id == VMCI_INVALID_ID ||
1808 handle.context == VMCI_INVALID_ID) {
1809 return VMCI_ERROR_INVALID_ARGS;
1810 }
1811
1812 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
1813 return VMCI_ERROR_INVALID_ARGS;
1814
1815 /*
1816 * In the initial argument check, we ensure that non-vmkernel hosts
1817 * are not allowed to create local queue pairs.
1818 */
1819
1820 mutex_lock(&qp_broker_list.mutex);
1821
1822 if (!is_local && vmci_ctx_qp_exists(context, handle)) {
1823 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
1824 context_id, handle.context, handle.resource);
1825 mutex_unlock(&qp_broker_list.mutex);
1826 return VMCI_ERROR_ALREADY_EXISTS;
1827 }
1828
1829 if (handle.resource != VMCI_INVALID_ID)
1830 entry = qp_broker_handle_to_entry(handle);
1831
1832 if (!entry) {
1833 create = true;
1834 result =
1835 qp_broker_create(handle, peer, flags, priv_flags,
1836 produce_size, consume_size, page_store,
1837 context, wakeup_cb, client_data, ent);
1838 } else {
1839 create = false;
1840 result =
1841 qp_broker_attach(entry, peer, flags, priv_flags,
1842 produce_size, consume_size, page_store,
1843 context, wakeup_cb, client_data, ent);
1844 }
1845
1846 mutex_unlock(&qp_broker_list.mutex);
1847
1848 if (swap)
1849 *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
1850 !(create && is_local);
1851
1852 return result;
1853}
1854
1855/*
1856 * This function implements the kernel API for allocating a queue
1857 * pair.
1858 */
1859static int qp_alloc_host_work(struct vmci_handle *handle,
1860 struct vmci_queue **produce_q,
1861 u64 produce_size,
1862 struct vmci_queue **consume_q,
1863 u64 consume_size,
1864 u32 peer,
1865 u32 flags,
1866 u32 priv_flags,
1867 vmci_event_release_cb wakeup_cb,
1868 void *client_data)
1869{
1870 struct vmci_handle new_handle;
1871 struct vmci_ctx *context;
1872 struct qp_broker_entry *entry;
1873 int result;
1874 bool swap;
1875
1876 if (vmci_handle_is_invalid(*handle)) {
1877 new_handle = vmci_make_handle(
1878 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
1879 } else
1880 new_handle = *handle;
1881
1882 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1883 entry = NULL;
1884 result =
1885 qp_broker_alloc(new_handle, peer, flags, priv_flags,
1886 produce_size, consume_size, NULL, context,
1887 wakeup_cb, client_data, &entry, &swap);
1888 if (result == VMCI_SUCCESS) {
1889 if (swap) {
1890 /*
1891 * If this is a local queue pair, the attacher
1892 * will swap around produce and consume
1893 * queues.
1894 */
1895
1896 *produce_q = entry->consume_q;
1897 *consume_q = entry->produce_q;
1898 } else {
1899 *produce_q = entry->produce_q;
1900 *consume_q = entry->consume_q;
1901 }
1902
1903 *handle = vmci_resource_handle(&entry->resource);
1904 } else {
1905 *handle = VMCI_INVALID_HANDLE;
1906 pr_devel("queue pair broker failed to alloc (result=%d)\n",
1907 result);
1908 }
1909 vmci_ctx_put(context);
1910 return result;
1911}
1912
1913/*
1914 * Allocates a VMCI queue_pair. Only checks validity of input
1915 * arguments. The real work is done in the host or guest
1916 * specific function.
1917 */
1918int vmci_qp_alloc(struct vmci_handle *handle,
1919 struct vmci_queue **produce_q,
1920 u64 produce_size,
1921 struct vmci_queue **consume_q,
1922 u64 consume_size,
1923 u32 peer,
1924 u32 flags,
1925 u32 priv_flags,
1926 bool guest_endpoint,
1927 vmci_event_release_cb wakeup_cb,
1928 void *client_data)
1929{
1930 if (!handle || !produce_q || !consume_q ||
1931 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
1932 return VMCI_ERROR_INVALID_ARGS;
1933
1934 if (guest_endpoint) {
1935 return qp_alloc_guest_work(handle, produce_q,
1936 produce_size, consume_q,
1937 consume_size, peer,
1938 flags, priv_flags);
1939 } else {
1940 return qp_alloc_host_work(handle, produce_q,
1941 produce_size, consume_q,
1942 consume_size, peer, flags,
1943 priv_flags, wakeup_cb, client_data);
1944 }
1945}
1946
1947/*
1948 * This function implements the host kernel API for detaching from
1949 * a queue pair.
1950 */
1951static int qp_detatch_host_work(struct vmci_handle handle)
1952{
1953 int result;
1954 struct vmci_ctx *context;
1955
1956 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1957
1958 result = vmci_qp_broker_detach(handle, context);
1959
1960 vmci_ctx_put(context);
1961 return result;
1962}
1963
1964/*
1965 * Detaches from a VMCI queue_pair. Only checks validity of input argument.
1966 * Real work is done in the host or guest specific function.
1967 */
1968static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
1969{
1970 if (vmci_handle_is_invalid(handle))
1971 return VMCI_ERROR_INVALID_ARGS;
1972
1973 if (guest_endpoint)
1974 return qp_detatch_guest_work(handle);
1975 else
1976 return qp_detatch_host_work(handle);
1977}
1978
1979/*
1980 * Returns the entry from the head of the list. Assumes that the list is
1981 * locked.
1982 */
1983static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
1984{
1985 if (!list_empty(&qp_list->head)) {
1986 struct qp_entry *entry =
1987 list_first_entry(&qp_list->head, struct qp_entry,
1988 list_item);
1989 return entry;
1990 }
1991
1992 return NULL;
1993}
1994
1995void vmci_qp_broker_exit(void)
1996{
1997 struct qp_entry *entry;
1998 struct qp_broker_entry *be;
1999
2000 mutex_lock(&qp_broker_list.mutex);
2001
2002 while ((entry = qp_list_get_head(&qp_broker_list))) {
2003 be = (struct qp_broker_entry *)entry;
2004
2005 qp_list_remove_entry(&qp_broker_list, entry);
2006 kfree(be);
2007 }
2008
2009 mutex_unlock(&qp_broker_list.mutex);
2010}
2011
2012/*
2013 * Requests that a queue pair be allocated with the VMCI queue
2014 * pair broker. Allocates a queue pair entry if one does not
2015 * exist. Attaches to one if it exists, and retrieves the page
2016 * files backing that queue_pair. Assumes that the queue pair
2017 * broker lock is held.
2018 */
2019int vmci_qp_broker_alloc(struct vmci_handle handle,
2020 u32 peer,
2021 u32 flags,
2022 u32 priv_flags,
2023 u64 produce_size,
2024 u64 consume_size,
2025 struct vmci_qp_page_store *page_store,
2026 struct vmci_ctx *context)
2027{
2028 return qp_broker_alloc(handle, peer, flags, priv_flags,
2029 produce_size, consume_size,
2030 page_store, context, NULL, NULL, NULL, NULL);
2031}
2032
2033/*
2034 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
2035 * step to add the UVAs of the VMX mapping of the queue pair. This function
2036 * provides backwards compatibility with such VMX'en, and takes care of
2037 * registering the page store for a queue pair previously allocated by the
2038 * VMX during create or attach. This function will move the queue pair state
2039 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
2040 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
2041 * attached state with memory, the queue pair is ready to be used by the
2042 * host peer, and an attached event will be generated.
2043 *
2044 * Assumes that the queue pair broker lock is held.
2045 *
2046 * This function is only used by the hosted platform, since there is no
2047 * issue with backwards compatibility for vmkernel.
2048 */
2049int vmci_qp_broker_set_page_store(struct vmci_handle handle,
2050 u64 produce_uva,
2051 u64 consume_uva,
2052 struct vmci_ctx *context)
2053{
2054 struct qp_broker_entry *entry;
2055 int result;
2056 const u32 context_id = vmci_ctx_get_id(context);
2057
2058 if (vmci_handle_is_invalid(handle) || !context ||
2059 context_id == VMCI_INVALID_ID)
2060 return VMCI_ERROR_INVALID_ARGS;
2061
2062 /*
2063 * We only support guest to host queue pairs, so the VMX must
2064 * supply UVAs for the mapped page files.
2065 */
2066
2067 if (produce_uva == 0 || consume_uva == 0)
2068 return VMCI_ERROR_INVALID_ARGS;
2069
2070 mutex_lock(&qp_broker_list.mutex);
2071
2072 if (!vmci_ctx_qp_exists(context, handle)) {
2073 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2074 context_id, handle.context, handle.resource);
2075 result = VMCI_ERROR_NOT_FOUND;
2076 goto out;
2077 }
2078
2079 entry = qp_broker_handle_to_entry(handle);
2080 if (!entry) {
2081 result = VMCI_ERROR_NOT_FOUND;
2082 goto out;
2083 }
2084
2085 /*
2086 * If I'm the owner then I can set the page store.
2087 *
2088 * Or, if a host created the queue_pair and I'm the attached peer
2089 * then I can set the page store.
2090 */
2091 if (entry->create_id != context_id &&
2092 (entry->create_id != VMCI_HOST_CONTEXT_ID ||
2093 entry->attach_id != context_id)) {
2094 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
2095 goto out;
2096 }
2097
2098 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
2099 entry->state != VMCIQPB_ATTACHED_NO_MEM) {
2100 result = VMCI_ERROR_UNAVAILABLE;
2101 goto out;
2102 }
2103
2104 result = qp_host_get_user_memory(produce_uva, consume_uva,
2105 entry->produce_q, entry->consume_q);
2106 if (result < VMCI_SUCCESS)
2107 goto out;
2108
2109 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2110 if (result < VMCI_SUCCESS) {
2111 qp_host_unregister_user_memory(entry->produce_q,
2112 entry->consume_q);
2113 goto out;
2114 }
2115
2116 if (entry->state == VMCIQPB_CREATED_NO_MEM)
2117 entry->state = VMCIQPB_CREATED_MEM;
2118 else
2119 entry->state = VMCIQPB_ATTACHED_MEM;
2120
2121 entry->vmci_page_files = true;
2122
2123 if (entry->state == VMCIQPB_ATTACHED_MEM) {
2124 result =
2125 qp_notify_peer(true, handle, context_id, entry->create_id);
2126 if (result < VMCI_SUCCESS) {
2127 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
2128 entry->create_id, entry->qp.handle.context,
2129 entry->qp.handle.resource);
2130 }
2131 }
2132
2133 result = VMCI_SUCCESS;
2134 out:
2135 mutex_unlock(&qp_broker_list.mutex);
2136 return result;
2137}
2138
2139/*
2140 * Resets saved queue headers for the given QP broker
2141 * entry. Should be used when guest memory becomes available
2142 * again, or the guest detaches.
2143 */
2144static void qp_reset_saved_headers(struct qp_broker_entry *entry)
2145{
2146 entry->produce_q->saved_header = NULL;
2147 entry->consume_q->saved_header = NULL;
2148}
2149
2150/*
2151 * The main entry point for detaching from a queue pair registered with the
2152 * queue pair broker. If more than one endpoint is attached to the queue
2153 * pair, the first endpoint will mainly decrement a reference count and
2154 * generate a notification to its peer. The last endpoint will clean up
2155 * the queue pair state registered with the broker.
2156 *
2157 * When a guest endpoint detaches, it will unmap and unregister the guest
2158 * memory backing the queue pair. If the host is still attached, it will
2159 * no longer be able to access the queue pair content.
2160 *
2161 * If the queue pair is already in a state where there is no memory
2162 * registered for the queue pair (any *_NO_MEM state), it will transition to
2163 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
2164 * endpoint is the first of two endpoints to detach. If the host endpoint is
2165 * the first out of two to detach, the queue pair will move to the
2166 * VMCIQPB_SHUTDOWN_MEM state.
2167 */
2168int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
2169{
2170 struct qp_broker_entry *entry;
2171 const u32 context_id = vmci_ctx_get_id(context);
2172 u32 peer_id;
2173 bool is_local = false;
2174 int result;
2175
2176 if (vmci_handle_is_invalid(handle) || !context ||
2177 context_id == VMCI_INVALID_ID) {
2178 return VMCI_ERROR_INVALID_ARGS;
2179 }
2180
2181 mutex_lock(&qp_broker_list.mutex);
2182
2183 if (!vmci_ctx_qp_exists(context, handle)) {
2184 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2185 context_id, handle.context, handle.resource);
2186 result = VMCI_ERROR_NOT_FOUND;
2187 goto out;
2188 }
2189
2190 entry = qp_broker_handle_to_entry(handle);
2191 if (!entry) {
2192 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
2193 context_id, handle.context, handle.resource);
2194 result = VMCI_ERROR_NOT_FOUND;
2195 goto out;
2196 }
2197
2198 if (context_id != entry->create_id && context_id != entry->attach_id) {
2199 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2200 goto out;
2201 }
2202
2203 if (context_id == entry->create_id) {
2204 peer_id = entry->attach_id;
2205 entry->create_id = VMCI_INVALID_ID;
2206 } else {
2207 peer_id = entry->create_id;
2208 entry->attach_id = VMCI_INVALID_ID;
2209 }
2210 entry->qp.ref_count--;
2211
2212 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2213
2214 if (context_id != VMCI_HOST_CONTEXT_ID) {
2215 bool headers_mapped;
2216
2217 /*
2218 * Pre NOVMVM vmx'en may detach from a queue pair
2219 * before setting the page store, and in that case
2220 * there is no user memory to detach from. Also, more
2221 * recent VMX'en may detach from a queue pair in the
2222 * quiesced state.
2223 */
2224
2225 qp_acquire_queue_mutex(entry->produce_q);
2226 headers_mapped = entry->produce_q->q_header ||
2227 entry->consume_q->q_header;
2228 if (QPBROKERSTATE_HAS_MEM(entry)) {
2229 result =
2230 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
2231 entry->produce_q,
2232 entry->consume_q);
2233 if (result < VMCI_SUCCESS)
2234 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2235 handle.context, handle.resource,
2236 result);
2237
2238 if (entry->vmci_page_files)
2239 qp_host_unregister_user_memory(entry->produce_q,
2240 entry->
2241 consume_q);
2242 else
2243 qp_host_unregister_user_memory(entry->produce_q,
2244 entry->
2245 consume_q);
2246
2247 }
2248
2249 if (!headers_mapped)
2250 qp_reset_saved_headers(entry);
2251
2252 qp_release_queue_mutex(entry->produce_q);
2253
2254 if (!headers_mapped && entry->wakeup_cb)
2255 entry->wakeup_cb(entry->client_data);
2256
2257 } else {
2258 if (entry->wakeup_cb) {
2259 entry->wakeup_cb = NULL;
2260 entry->client_data = NULL;
2261 }
2262 }
2263
2264 if (entry->qp.ref_count == 0) {
2265 qp_list_remove_entry(&qp_broker_list, &entry->qp);
2266
2267 if (is_local)
2268 kfree(entry->local_mem);
2269
2270 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
2271 qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
2272 qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
2273 /* Unlink from resource hash table and free callback */
2274 vmci_resource_remove(&entry->resource);
2275
2276 kfree(entry);
2277
2278 vmci_ctx_qp_destroy(context, handle);
2279 } else {
2280 qp_notify_peer(false, handle, context_id, peer_id);
2281 if (context_id == VMCI_HOST_CONTEXT_ID &&
2282 QPBROKERSTATE_HAS_MEM(entry)) {
2283 entry->state = VMCIQPB_SHUTDOWN_MEM;
2284 } else {
2285 entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
2286 }
2287
2288 if (!is_local)
2289 vmci_ctx_qp_destroy(context, handle);
2290
2291 }
2292 result = VMCI_SUCCESS;
2293 out:
2294 mutex_unlock(&qp_broker_list.mutex);
2295 return result;
2296}
2297
2298/*
2299 * Establishes the necessary mappings for a queue pair given a
2300 * reference to the queue pair guest memory. This is usually
2301 * called when a guest is unquiesced and the VMX is allowed to
2302 * map guest memory once again.
2303 */
2304int vmci_qp_broker_map(struct vmci_handle handle,
2305 struct vmci_ctx *context,
2306 u64 guest_mem)
2307{
2308 struct qp_broker_entry *entry;
2309 const u32 context_id = vmci_ctx_get_id(context);
2310 bool is_local = false;
2311 int result;
2312
2313 if (vmci_handle_is_invalid(handle) || !context ||
2314 context_id == VMCI_INVALID_ID)
2315 return VMCI_ERROR_INVALID_ARGS;
2316
2317 mutex_lock(&qp_broker_list.mutex);
2318
2319 if (!vmci_ctx_qp_exists(context, handle)) {
2320 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2321 context_id, handle.context, handle.resource);
2322 result = VMCI_ERROR_NOT_FOUND;
2323 goto out;
2324 }
2325
2326 entry = qp_broker_handle_to_entry(handle);
2327 if (!entry) {
2328 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2329 context_id, handle.context, handle.resource);
2330 result = VMCI_ERROR_NOT_FOUND;
2331 goto out;
2332 }
2333
2334 if (context_id != entry->create_id && context_id != entry->attach_id) {
2335 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2336 goto out;
2337 }
2338
2339 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2340 result = VMCI_SUCCESS;
2341
2342 if (context_id != VMCI_HOST_CONTEXT_ID) {
2343 struct vmci_qp_page_store page_store;
2344
2345 page_store.pages = guest_mem;
2346 page_store.len = QPE_NUM_PAGES(entry->qp);
2347
2348 qp_acquire_queue_mutex(entry->produce_q);
2349 qp_reset_saved_headers(entry);
2350 result =
2351 qp_host_register_user_memory(&page_store,
2352 entry->produce_q,
2353 entry->consume_q);
2354 qp_release_queue_mutex(entry->produce_q);
2355 if (result == VMCI_SUCCESS) {
2356 /* Move state from *_NO_MEM to *_MEM */
2357
2358 entry->state++;
2359
2360 if (entry->wakeup_cb)
2361 entry->wakeup_cb(entry->client_data);
2362 }
2363 }
2364
2365 out:
2366 mutex_unlock(&qp_broker_list.mutex);
2367 return result;
2368}
2369
2370/*
2371 * Saves a snapshot of the queue headers for the given QP broker
2372 * entry. Should be used when guest memory is unmapped.
2373 * Results:
2374 * VMCI_SUCCESS on success, appropriate error code if guest memory
2375 * can't be accessed..
2376 */
2377static int qp_save_headers(struct qp_broker_entry *entry)
2378{
2379 int result;
2380
2381 if (entry->produce_q->saved_header != NULL &&
2382 entry->consume_q->saved_header != NULL) {
2383 /*
2384 * If the headers have already been saved, we don't need to do
2385 * it again, and we don't want to map in the headers
2386 * unnecessarily.
2387 */
2388
2389 return VMCI_SUCCESS;
2390 }
2391
2392 if (NULL == entry->produce_q->q_header ||
2393 NULL == entry->consume_q->q_header) {
2394 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2395 if (result < VMCI_SUCCESS)
2396 return result;
2397 }
2398
2399 memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
2400 sizeof(entry->saved_produce_q));
2401 entry->produce_q->saved_header = &entry->saved_produce_q;
2402 memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
2403 sizeof(entry->saved_consume_q));
2404 entry->consume_q->saved_header = &entry->saved_consume_q;
2405
2406 return VMCI_SUCCESS;
2407}
2408
2409/*
2410 * Removes all references to the guest memory of a given queue pair, and
2411 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
2412 * called when a VM is being quiesced where access to guest memory should
2413 * avoided.
2414 */
2415int vmci_qp_broker_unmap(struct vmci_handle handle,
2416 struct vmci_ctx *context,
2417 u32 gid)
2418{
2419 struct qp_broker_entry *entry;
2420 const u32 context_id = vmci_ctx_get_id(context);
2421 bool is_local = false;
2422 int result;
2423
2424 if (vmci_handle_is_invalid(handle) || !context ||
2425 context_id == VMCI_INVALID_ID)
2426 return VMCI_ERROR_INVALID_ARGS;
2427
2428 mutex_lock(&qp_broker_list.mutex);
2429
2430 if (!vmci_ctx_qp_exists(context, handle)) {
2431 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2432 context_id, handle.context, handle.resource);
2433 result = VMCI_ERROR_NOT_FOUND;
2434 goto out;
2435 }
2436
2437 entry = qp_broker_handle_to_entry(handle);
2438 if (!entry) {
2439 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2440 context_id, handle.context, handle.resource);
2441 result = VMCI_ERROR_NOT_FOUND;
2442 goto out;
2443 }
2444
2445 if (context_id != entry->create_id && context_id != entry->attach_id) {
2446 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2447 goto out;
2448 }
2449
2450 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2451
2452 if (context_id != VMCI_HOST_CONTEXT_ID) {
2453 qp_acquire_queue_mutex(entry->produce_q);
2454 result = qp_save_headers(entry);
2455 if (result < VMCI_SUCCESS)
2456 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2457 handle.context, handle.resource, result);
2458
2459 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
2460
2461 /*
2462 * On hosted, when we unmap queue pairs, the VMX will also
2463 * unmap the guest memory, so we invalidate the previously
2464 * registered memory. If the queue pair is mapped again at a
2465 * later point in time, we will need to reregister the user
2466 * memory with a possibly new user VA.
2467 */
2468 qp_host_unregister_user_memory(entry->produce_q,
2469 entry->consume_q);
2470
2471 /*
2472 * Move state from *_MEM to *_NO_MEM.
2473 */
2474 entry->state--;
2475
2476 qp_release_queue_mutex(entry->produce_q);
2477 }
2478
2479 result = VMCI_SUCCESS;
2480
2481 out:
2482 mutex_unlock(&qp_broker_list.mutex);
2483 return result;
2484}
2485
2486/*
2487 * Destroys all guest queue pair endpoints. If active guest queue
2488 * pairs still exist, hypercalls to attempt detach from these
2489 * queue pairs will be made. Any failure to detach is silently
2490 * ignored.
2491 */
2492void vmci_qp_guest_endpoints_exit(void)
2493{
2494 struct qp_entry *entry;
2495 struct qp_guest_endpoint *ep;
2496
2497 mutex_lock(&qp_guest_endpoints.mutex);
2498
2499 while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
2500 ep = (struct qp_guest_endpoint *)entry;
2501
2502 /* Don't make a hypercall for local queue_pairs. */
2503 if (!(entry->flags & VMCI_QPFLAG_LOCAL))
2504 qp_detatch_hypercall(entry->handle);
2505
2506 /* We cannot fail the exit, so let's reset ref_count. */
2507 entry->ref_count = 0;
2508 qp_list_remove_entry(&qp_guest_endpoints, entry);
2509
2510 qp_guest_endpoint_destroy(ep);
2511 }
2512
2513 mutex_unlock(&qp_guest_endpoints.mutex);
2514}
2515
2516/*
2517 * Helper routine that will lock the queue pair before subsequent
2518 * operations.
2519 * Note: Non-blocking on the host side is currently only implemented in ESX.
2520 * Since non-blocking isn't yet implemented on the host personality we
2521 * have no reason to acquire a spin lock. So to avoid the use of an
2522 * unnecessary lock only acquire the mutex if we can block.
George Zhang06164d22013-01-08 15:54:54 -08002523 */
2524static void qp_lock(const struct vmci_qp *qpair)
2525{
Andy King45412be2013-08-23 09:22:13 -07002526 qp_acquire_queue_mutex(qpair->produce_q);
George Zhang06164d22013-01-08 15:54:54 -08002527}
2528
2529/*
2530 * Helper routine that unlocks the queue pair after calling
Andy King45412be2013-08-23 09:22:13 -07002531 * qp_lock.
George Zhang06164d22013-01-08 15:54:54 -08002532 */
2533static void qp_unlock(const struct vmci_qp *qpair)
2534{
Andy King45412be2013-08-23 09:22:13 -07002535 qp_release_queue_mutex(qpair->produce_q);
George Zhang06164d22013-01-08 15:54:54 -08002536}
2537
2538/*
2539 * The queue headers may not be mapped at all times. If a queue is
2540 * currently not mapped, it will be attempted to do so.
2541 */
2542static int qp_map_queue_headers(struct vmci_queue *produce_q,
Andy King45412be2013-08-23 09:22:13 -07002543 struct vmci_queue *consume_q)
George Zhang06164d22013-01-08 15:54:54 -08002544{
2545 int result;
2546
2547 if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
Andy King45412be2013-08-23 09:22:13 -07002548 result = qp_host_map_queues(produce_q, consume_q);
George Zhang06164d22013-01-08 15:54:54 -08002549 if (result < VMCI_SUCCESS)
2550 return (produce_q->saved_header &&
2551 consume_q->saved_header) ?
2552 VMCI_ERROR_QUEUEPAIR_NOT_READY :
2553 VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2554 }
2555
2556 return VMCI_SUCCESS;
2557}
2558
2559/*
2560 * Helper routine that will retrieve the produce and consume
2561 * headers of a given queue pair. If the guest memory of the
2562 * queue pair is currently not available, the saved queue headers
2563 * will be returned, if these are available.
2564 */
2565static int qp_get_queue_headers(const struct vmci_qp *qpair,
2566 struct vmci_queue_header **produce_q_header,
2567 struct vmci_queue_header **consume_q_header)
2568{
2569 int result;
2570
Andy King45412be2013-08-23 09:22:13 -07002571 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
George Zhang06164d22013-01-08 15:54:54 -08002572 if (result == VMCI_SUCCESS) {
2573 *produce_q_header = qpair->produce_q->q_header;
2574 *consume_q_header = qpair->consume_q->q_header;
2575 } else if (qpair->produce_q->saved_header &&
2576 qpair->consume_q->saved_header) {
2577 *produce_q_header = qpair->produce_q->saved_header;
2578 *consume_q_header = qpair->consume_q->saved_header;
2579 result = VMCI_SUCCESS;
2580 }
2581
2582 return result;
2583}
2584
2585/*
2586 * Callback from VMCI queue pair broker indicating that a queue
2587 * pair that was previously not ready, now either is ready or
2588 * gone forever.
2589 */
2590static int qp_wakeup_cb(void *client_data)
2591{
2592 struct vmci_qp *qpair = (struct vmci_qp *)client_data;
2593
2594 qp_lock(qpair);
2595 while (qpair->blocked > 0) {
2596 qpair->blocked--;
2597 qpair->generation++;
2598 wake_up(&qpair->event);
2599 }
2600 qp_unlock(qpair);
2601
2602 return VMCI_SUCCESS;
2603}
2604
2605/*
2606 * Makes the calling thread wait for the queue pair to become
2607 * ready for host side access. Returns true when thread is
2608 * woken up after queue pair state change, false otherwise.
2609 */
2610static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2611{
2612 unsigned int generation;
2613
George Zhang06164d22013-01-08 15:54:54 -08002614 qpair->blocked++;
2615 generation = qpair->generation;
2616 qp_unlock(qpair);
2617 wait_event(qpair->event, generation != qpair->generation);
2618 qp_lock(qpair);
2619
2620 return true;
2621}
2622
2623/*
2624 * Enqueues a given buffer to the produce queue using the provided
2625 * function. As many bytes as possible (space available in the queue)
2626 * are enqueued. Assumes the queue->mutex has been acquired. Returns
2627 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
2628 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
2629 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
2630 * an error occured when accessing the buffer,
2631 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
2632 * available. Otherwise, the number of bytes written to the queue is
2633 * returned. Updates the tail pointer of the produce queue.
2634 */
2635static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2636 struct vmci_queue *consume_q,
2637 const u64 produce_q_size,
2638 const void *buf,
2639 size_t buf_size,
Andy King45412be2013-08-23 09:22:13 -07002640 vmci_memcpy_to_queue_func memcpy_to_queue)
George Zhang06164d22013-01-08 15:54:54 -08002641{
2642 s64 free_space;
2643 u64 tail;
2644 size_t written;
2645 ssize_t result;
2646
Andy King45412be2013-08-23 09:22:13 -07002647 result = qp_map_queue_headers(produce_q, consume_q);
George Zhang06164d22013-01-08 15:54:54 -08002648 if (unlikely(result != VMCI_SUCCESS))
2649 return result;
2650
2651 free_space = vmci_q_header_free_space(produce_q->q_header,
2652 consume_q->q_header,
2653 produce_q_size);
2654 if (free_space == 0)
2655 return VMCI_ERROR_QUEUEPAIR_NOSPACE;
2656
2657 if (free_space < VMCI_SUCCESS)
2658 return (ssize_t) free_space;
2659
2660 written = (size_t) (free_space > buf_size ? buf_size : free_space);
2661 tail = vmci_q_header_producer_tail(produce_q->q_header);
2662 if (likely(tail + written < produce_q_size)) {
2663 result = memcpy_to_queue(produce_q, tail, buf, 0, written);
2664 } else {
2665 /* Tail pointer wraps around. */
2666
2667 const size_t tmp = (size_t) (produce_q_size - tail);
2668
2669 result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
2670 if (result >= VMCI_SUCCESS)
2671 result = memcpy_to_queue(produce_q, 0, buf, tmp,
2672 written - tmp);
2673 }
2674
2675 if (result < VMCI_SUCCESS)
2676 return result;
2677
2678 vmci_q_header_add_producer_tail(produce_q->q_header, written,
2679 produce_q_size);
2680 return written;
2681}
2682
2683/*
2684 * Dequeues data (if available) from the given consume queue. Writes data
2685 * to the user provided buffer using the provided function.
2686 * Assumes the queue->mutex has been acquired.
2687 * Results:
2688 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
2689 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
2690 * (as defined by the queue size).
2691 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
2692 * Otherwise the number of bytes dequeued is returned.
2693 * Side effects:
2694 * Updates the head pointer of the consume queue.
2695 */
2696static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2697 struct vmci_queue *consume_q,
2698 const u64 consume_q_size,
2699 void *buf,
2700 size_t buf_size,
2701 vmci_memcpy_from_queue_func memcpy_from_queue,
Andy King45412be2013-08-23 09:22:13 -07002702 bool update_consumer)
George Zhang06164d22013-01-08 15:54:54 -08002703{
2704 s64 buf_ready;
2705 u64 head;
2706 size_t read;
2707 ssize_t result;
2708
Andy King45412be2013-08-23 09:22:13 -07002709 result = qp_map_queue_headers(produce_q, consume_q);
George Zhang06164d22013-01-08 15:54:54 -08002710 if (unlikely(result != VMCI_SUCCESS))
2711 return result;
2712
2713 buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
2714 produce_q->q_header,
2715 consume_q_size);
2716 if (buf_ready == 0)
2717 return VMCI_ERROR_QUEUEPAIR_NODATA;
2718
2719 if (buf_ready < VMCI_SUCCESS)
2720 return (ssize_t) buf_ready;
2721
2722 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2723 head = vmci_q_header_consumer_head(produce_q->q_header);
2724 if (likely(head + read < consume_q_size)) {
2725 result = memcpy_from_queue(buf, 0, consume_q, head, read);
2726 } else {
2727 /* Head pointer wraps around. */
2728
2729 const size_t tmp = (size_t) (consume_q_size - head);
2730
2731 result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
2732 if (result >= VMCI_SUCCESS)
2733 result = memcpy_from_queue(buf, tmp, consume_q, 0,
2734 read - tmp);
2735
2736 }
2737
2738 if (result < VMCI_SUCCESS)
2739 return result;
2740
2741 if (update_consumer)
2742 vmci_q_header_add_consumer_head(produce_q->q_header,
2743 read, consume_q_size);
2744
2745 return read;
2746}
2747
2748/*
2749 * vmci_qpair_alloc() - Allocates a queue pair.
2750 * @qpair: Pointer for the new vmci_qp struct.
2751 * @handle: Handle to track the resource.
2752 * @produce_qsize: Desired size of the producer queue.
2753 * @consume_qsize: Desired size of the consumer queue.
2754 * @peer: ContextID of the peer.
2755 * @flags: VMCI flags.
2756 * @priv_flags: VMCI priviledge flags.
2757 *
2758 * This is the client interface for allocating the memory for a
2759 * vmci_qp structure and then attaching to the underlying
2760 * queue. If an error occurs allocating the memory for the
2761 * vmci_qp structure no attempt is made to attach. If an
2762 * error occurs attaching, then the structure is freed.
2763 */
2764int vmci_qpair_alloc(struct vmci_qp **qpair,
2765 struct vmci_handle *handle,
2766 u64 produce_qsize,
2767 u64 consume_qsize,
2768 u32 peer,
2769 u32 flags,
2770 u32 priv_flags)
2771{
2772 struct vmci_qp *my_qpair;
2773 int retval;
2774 struct vmci_handle src = VMCI_INVALID_HANDLE;
2775 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
2776 enum vmci_route route;
2777 vmci_event_release_cb wakeup_cb;
2778 void *client_data;
2779
2780 /*
2781 * Restrict the size of a queuepair. The device already
2782 * enforces a limit on the total amount of memory that can be
2783 * allocated to queuepairs for a guest. However, we try to
2784 * allocate this memory before we make the queuepair
2785 * allocation hypercall. On Linux, we allocate each page
2786 * separately, which means rather than fail, the guest will
2787 * thrash while it tries to allocate, and will become
2788 * increasingly unresponsive to the point where it appears to
2789 * be hung. So we place a limit on the size of an individual
2790 * queuepair here, and leave the device to enforce the
2791 * restriction on total queuepair memory. (Note that this
2792 * doesn't prevent all cases; a user with only this much
2793 * physical memory could still get into trouble.) The error
2794 * used by the device is NO_RESOURCES, so use that here too.
2795 */
2796
2797 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
2798 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
2799 return VMCI_ERROR_NO_RESOURCES;
2800
2801 retval = vmci_route(&src, &dst, false, &route);
2802 if (retval < VMCI_SUCCESS)
2803 route = vmci_guest_code_active() ?
2804 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2805
Andy King45412be2013-08-23 09:22:13 -07002806 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2807 pr_devel("NONBLOCK OR PINNED set");
George Zhang06164d22013-01-08 15:54:54 -08002808 return VMCI_ERROR_INVALID_ARGS;
2809 }
2810
George Zhang06164d22013-01-08 15:54:54 -08002811 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2812 if (!my_qpair)
2813 return VMCI_ERROR_NO_MEM;
2814
2815 my_qpair->produce_q_size = produce_qsize;
2816 my_qpair->consume_q_size = consume_qsize;
2817 my_qpair->peer = peer;
2818 my_qpair->flags = flags;
2819 my_qpair->priv_flags = priv_flags;
2820
2821 wakeup_cb = NULL;
2822 client_data = NULL;
2823
2824 if (VMCI_ROUTE_AS_HOST == route) {
2825 my_qpair->guest_endpoint = false;
2826 if (!(flags & VMCI_QPFLAG_LOCAL)) {
2827 my_qpair->blocked = 0;
2828 my_qpair->generation = 0;
2829 init_waitqueue_head(&my_qpair->event);
2830 wakeup_cb = qp_wakeup_cb;
2831 client_data = (void *)my_qpair;
2832 }
2833 } else {
2834 my_qpair->guest_endpoint = true;
2835 }
2836
2837 retval = vmci_qp_alloc(handle,
2838 &my_qpair->produce_q,
2839 my_qpair->produce_q_size,
2840 &my_qpair->consume_q,
2841 my_qpair->consume_q_size,
2842 my_qpair->peer,
2843 my_qpair->flags,
2844 my_qpair->priv_flags,
2845 my_qpair->guest_endpoint,
2846 wakeup_cb, client_data);
2847
2848 if (retval < VMCI_SUCCESS) {
2849 kfree(my_qpair);
2850 return retval;
2851 }
2852
2853 *qpair = my_qpair;
2854 my_qpair->handle = *handle;
2855
2856 return retval;
2857}
2858EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
2859
2860/*
2861 * vmci_qpair_detach() - Detatches the client from a queue pair.
2862 * @qpair: Reference of a pointer to the qpair struct.
2863 *
2864 * This is the client interface for detaching from a VMCIQPair.
2865 * Note that this routine will free the memory allocated for the
2866 * vmci_qp structure too.
2867 */
2868int vmci_qpair_detach(struct vmci_qp **qpair)
2869{
2870 int result;
2871 struct vmci_qp *old_qpair;
2872
2873 if (!qpair || !(*qpair))
2874 return VMCI_ERROR_INVALID_ARGS;
2875
2876 old_qpair = *qpair;
2877 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
2878
2879 /*
2880 * The guest can fail to detach for a number of reasons, and
2881 * if it does so, it will cleanup the entry (if there is one).
2882 * The host can fail too, but it won't cleanup the entry
2883 * immediately, it will do that later when the context is
2884 * freed. Either way, we need to release the qpair struct
2885 * here; there isn't much the caller can do, and we don't want
2886 * to leak.
2887 */
2888
2889 memset(old_qpair, 0, sizeof(*old_qpair));
2890 old_qpair->handle = VMCI_INVALID_HANDLE;
2891 old_qpair->peer = VMCI_INVALID_ID;
2892 kfree(old_qpair);
2893 *qpair = NULL;
2894
2895 return result;
2896}
2897EXPORT_SYMBOL_GPL(vmci_qpair_detach);
2898
2899/*
2900 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
2901 * @qpair: Pointer to the queue pair struct.
2902 * @producer_tail: Reference used for storing producer tail index.
2903 * @consumer_head: Reference used for storing the consumer head index.
2904 *
2905 * This is the client interface for getting the current indexes of the
2906 * QPair from the point of the view of the caller as the producer.
2907 */
2908int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
2909 u64 *producer_tail,
2910 u64 *consumer_head)
2911{
2912 struct vmci_queue_header *produce_q_header;
2913 struct vmci_queue_header *consume_q_header;
2914 int result;
2915
2916 if (!qpair)
2917 return VMCI_ERROR_INVALID_ARGS;
2918
2919 qp_lock(qpair);
2920 result =
2921 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2922 if (result == VMCI_SUCCESS)
2923 vmci_q_header_get_pointers(produce_q_header, consume_q_header,
2924 producer_tail, consumer_head);
2925 qp_unlock(qpair);
2926
2927 if (result == VMCI_SUCCESS &&
2928 ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
2929 (consumer_head && *consumer_head >= qpair->produce_q_size)))
2930 return VMCI_ERROR_INVALID_SIZE;
2931
2932 return result;
2933}
2934EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
2935
2936/*
2937 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer.
2938 * @qpair: Pointer to the queue pair struct.
2939 * @consumer_tail: Reference used for storing consumer tail index.
2940 * @producer_head: Reference used for storing the producer head index.
2941 *
2942 * This is the client interface for getting the current indexes of the
2943 * QPair from the point of the view of the caller as the consumer.
2944 */
2945int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
2946 u64 *consumer_tail,
2947 u64 *producer_head)
2948{
2949 struct vmci_queue_header *produce_q_header;
2950 struct vmci_queue_header *consume_q_header;
2951 int result;
2952
2953 if (!qpair)
2954 return VMCI_ERROR_INVALID_ARGS;
2955
2956 qp_lock(qpair);
2957 result =
2958 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2959 if (result == VMCI_SUCCESS)
2960 vmci_q_header_get_pointers(consume_q_header, produce_q_header,
2961 consumer_tail, producer_head);
2962 qp_unlock(qpair);
2963
2964 if (result == VMCI_SUCCESS &&
2965 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
2966 (producer_head && *producer_head >= qpair->consume_q_size)))
2967 return VMCI_ERROR_INVALID_SIZE;
2968
2969 return result;
2970}
2971EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
2972
2973/*
2974 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
2975 * @qpair: Pointer to the queue pair struct.
2976 *
2977 * This is the client interface for getting the amount of free
2978 * space in the QPair from the point of the view of the caller as
2979 * the producer which is the common case. Returns < 0 if err, else
2980 * available bytes into which data can be enqueued if > 0.
2981 */
2982s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
2983{
2984 struct vmci_queue_header *produce_q_header;
2985 struct vmci_queue_header *consume_q_header;
2986 s64 result;
2987
2988 if (!qpair)
2989 return VMCI_ERROR_INVALID_ARGS;
2990
2991 qp_lock(qpair);
2992 result =
2993 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2994 if (result == VMCI_SUCCESS)
2995 result = vmci_q_header_free_space(produce_q_header,
2996 consume_q_header,
2997 qpair->produce_q_size);
2998 else
2999 result = 0;
3000
3001 qp_unlock(qpair);
3002
3003 return result;
3004}
3005EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
3006
3007/*
3008 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
3009 * @qpair: Pointer to the queue pair struct.
3010 *
3011 * This is the client interface for getting the amount of free
3012 * space in the QPair from the point of the view of the caller as
3013 * the consumer which is not the common case. Returns < 0 if err, else
3014 * available bytes into which data can be enqueued if > 0.
3015 */
3016s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
3017{
3018 struct vmci_queue_header *produce_q_header;
3019 struct vmci_queue_header *consume_q_header;
3020 s64 result;
3021
3022 if (!qpair)
3023 return VMCI_ERROR_INVALID_ARGS;
3024
3025 qp_lock(qpair);
3026 result =
3027 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3028 if (result == VMCI_SUCCESS)
3029 result = vmci_q_header_free_space(consume_q_header,
3030 produce_q_header,
3031 qpair->consume_q_size);
3032 else
3033 result = 0;
3034
3035 qp_unlock(qpair);
3036
3037 return result;
3038}
3039EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
3040
3041/*
3042 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
3043 * producer queue.
3044 * @qpair: Pointer to the queue pair struct.
3045 *
3046 * This is the client interface for getting the amount of
3047 * enqueued data in the QPair from the point of the view of the
3048 * caller as the producer which is not the common case. Returns < 0 if err,
3049 * else available bytes that may be read.
3050 */
3051s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
3052{
3053 struct vmci_queue_header *produce_q_header;
3054 struct vmci_queue_header *consume_q_header;
3055 s64 result;
3056
3057 if (!qpair)
3058 return VMCI_ERROR_INVALID_ARGS;
3059
3060 qp_lock(qpair);
3061 result =
3062 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3063 if (result == VMCI_SUCCESS)
3064 result = vmci_q_header_buf_ready(produce_q_header,
3065 consume_q_header,
3066 qpair->produce_q_size);
3067 else
3068 result = 0;
3069
3070 qp_unlock(qpair);
3071
3072 return result;
3073}
3074EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
3075
3076/*
3077 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
3078 * consumer queue.
3079 * @qpair: Pointer to the queue pair struct.
3080 *
3081 * This is the client interface for getting the amount of
3082 * enqueued data in the QPair from the point of the view of the
3083 * caller as the consumer which is the normal case. Returns < 0 if err,
3084 * else available bytes that may be read.
3085 */
3086s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
3087{
3088 struct vmci_queue_header *produce_q_header;
3089 struct vmci_queue_header *consume_q_header;
3090 s64 result;
3091
3092 if (!qpair)
3093 return VMCI_ERROR_INVALID_ARGS;
3094
3095 qp_lock(qpair);
3096 result =
3097 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3098 if (result == VMCI_SUCCESS)
3099 result = vmci_q_header_buf_ready(consume_q_header,
3100 produce_q_header,
3101 qpair->consume_q_size);
3102 else
3103 result = 0;
3104
3105 qp_unlock(qpair);
3106
3107 return result;
3108}
3109EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
3110
3111/*
3112 * vmci_qpair_enqueue() - Throw data on the queue.
3113 * @qpair: Pointer to the queue pair struct.
3114 * @buf: Pointer to buffer containing data
3115 * @buf_size: Length of buffer.
3116 * @buf_type: Buffer type (Unused).
3117 *
3118 * This is the client interface for enqueueing data into the queue.
3119 * Returns number of bytes enqueued or < 0 on error.
3120 */
3121ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3122 const void *buf,
3123 size_t buf_size,
3124 int buf_type)
3125{
3126 ssize_t result;
3127
3128 if (!qpair || !buf)
3129 return VMCI_ERROR_INVALID_ARGS;
3130
3131 qp_lock(qpair);
3132
3133 do {
3134 result = qp_enqueue_locked(qpair->produce_q,
3135 qpair->consume_q,
3136 qpair->produce_q_size,
3137 buf, buf_size,
Andy King45412be2013-08-23 09:22:13 -07003138 qp_memcpy_to_queue);
George Zhang06164d22013-01-08 15:54:54 -08003139
3140 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3141 !qp_wait_for_ready_queue(qpair))
3142 result = VMCI_ERROR_WOULD_BLOCK;
3143
3144 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3145
3146 qp_unlock(qpair);
3147
3148 return result;
3149}
3150EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
3151
3152/*
3153 * vmci_qpair_dequeue() - Get data from the queue.
3154 * @qpair: Pointer to the queue pair struct.
3155 * @buf: Pointer to buffer for the data
3156 * @buf_size: Length of buffer.
3157 * @buf_type: Buffer type (Unused).
3158 *
3159 * This is the client interface for dequeueing data from the queue.
3160 * Returns number of bytes dequeued or < 0 on error.
3161 */
3162ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3163 void *buf,
3164 size_t buf_size,
3165 int buf_type)
3166{
3167 ssize_t result;
3168
3169 if (!qpair || !buf)
3170 return VMCI_ERROR_INVALID_ARGS;
3171
3172 qp_lock(qpair);
3173
3174 do {
3175 result = qp_dequeue_locked(qpair->produce_q,
3176 qpair->consume_q,
3177 qpair->consume_q_size,
3178 buf, buf_size,
Andy King45412be2013-08-23 09:22:13 -07003179 qp_memcpy_from_queue, true);
George Zhang06164d22013-01-08 15:54:54 -08003180
3181 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3182 !qp_wait_for_ready_queue(qpair))
3183 result = VMCI_ERROR_WOULD_BLOCK;
3184
3185 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3186
3187 qp_unlock(qpair);
3188
3189 return result;
3190}
3191EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
3192
3193/*
3194 * vmci_qpair_peek() - Peek at the data in the queue.
3195 * @qpair: Pointer to the queue pair struct.
3196 * @buf: Pointer to buffer for the data
3197 * @buf_size: Length of buffer.
3198 * @buf_type: Buffer type (Unused on Linux).
3199 *
3200 * This is the client interface for peeking into a queue. (I.e.,
3201 * copy data from the queue without updating the head pointer.)
3202 * Returns number of bytes dequeued or < 0 on error.
3203 */
3204ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3205 void *buf,
3206 size_t buf_size,
3207 int buf_type)
3208{
3209 ssize_t result;
3210
3211 if (!qpair || !buf)
3212 return VMCI_ERROR_INVALID_ARGS;
3213
3214 qp_lock(qpair);
3215
3216 do {
3217 result = qp_dequeue_locked(qpair->produce_q,
3218 qpair->consume_q,
3219 qpair->consume_q_size,
3220 buf, buf_size,
Andy King45412be2013-08-23 09:22:13 -07003221 qp_memcpy_from_queue, false);
George Zhang06164d22013-01-08 15:54:54 -08003222
3223 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3224 !qp_wait_for_ready_queue(qpair))
3225 result = VMCI_ERROR_WOULD_BLOCK;
3226
3227 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3228
3229 qp_unlock(qpair);
3230
3231 return result;
3232}
3233EXPORT_SYMBOL_GPL(vmci_qpair_peek);
3234
3235/*
3236 * vmci_qpair_enquev() - Throw data on the queue using iov.
3237 * @qpair: Pointer to the queue pair struct.
3238 * @iov: Pointer to buffer containing data
3239 * @iov_size: Length of buffer.
3240 * @buf_type: Buffer type (Unused).
3241 *
3242 * This is the client interface for enqueueing data into the queue.
3243 * This function uses IO vectors to handle the work. Returns number
3244 * of bytes enqueued or < 0 on error.
3245 */
3246ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
Al Viro4c946d92014-11-27 19:52:04 -05003247 struct msghdr *msg,
George Zhang06164d22013-01-08 15:54:54 -08003248 size_t iov_size,
3249 int buf_type)
3250{
3251 ssize_t result;
3252
Al Viro4c946d92014-11-27 19:52:04 -05003253 if (!qpair)
George Zhang06164d22013-01-08 15:54:54 -08003254 return VMCI_ERROR_INVALID_ARGS;
3255
3256 qp_lock(qpair);
3257
3258 do {
3259 result = qp_enqueue_locked(qpair->produce_q,
3260 qpair->consume_q,
3261 qpair->produce_q_size,
Al Viro4c946d92014-11-27 19:52:04 -05003262 msg, iov_size,
Andy King45412be2013-08-23 09:22:13 -07003263 qp_memcpy_to_queue_iov);
George Zhang06164d22013-01-08 15:54:54 -08003264
3265 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3266 !qp_wait_for_ready_queue(qpair))
3267 result = VMCI_ERROR_WOULD_BLOCK;
3268
3269 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3270
3271 qp_unlock(qpair);
3272
3273 return result;
3274}
3275EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
3276
3277/*
3278 * vmci_qpair_dequev() - Get data from the queue using iov.
3279 * @qpair: Pointer to the queue pair struct.
3280 * @iov: Pointer to buffer for the data
3281 * @iov_size: Length of buffer.
3282 * @buf_type: Buffer type (Unused).
3283 *
3284 * This is the client interface for dequeueing data from the queue.
3285 * This function uses IO vectors to handle the work. Returns number
3286 * of bytes dequeued or < 0 on error.
3287 */
3288ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
Al Virod838df22014-11-24 19:32:50 -05003289 struct msghdr *msg,
George Zhang06164d22013-01-08 15:54:54 -08003290 size_t iov_size,
3291 int buf_type)
3292{
3293 ssize_t result;
3294
Al Virod838df22014-11-24 19:32:50 -05003295 if (!qpair)
George Zhang06164d22013-01-08 15:54:54 -08003296 return VMCI_ERROR_INVALID_ARGS;
3297
Andy King32b083a2013-01-10 15:41:40 -08003298 qp_lock(qpair);
3299
George Zhang06164d22013-01-08 15:54:54 -08003300 do {
3301 result = qp_dequeue_locked(qpair->produce_q,
3302 qpair->consume_q,
3303 qpair->consume_q_size,
Al Virod838df22014-11-24 19:32:50 -05003304 msg, iov_size,
George Zhang06164d22013-01-08 15:54:54 -08003305 qp_memcpy_from_queue_iov,
Andy King45412be2013-08-23 09:22:13 -07003306 true);
George Zhang06164d22013-01-08 15:54:54 -08003307
3308 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3309 !qp_wait_for_ready_queue(qpair))
3310 result = VMCI_ERROR_WOULD_BLOCK;
3311
3312 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3313
3314 qp_unlock(qpair);
3315
3316 return result;
3317}
3318EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
3319
3320/*
3321 * vmci_qpair_peekv() - Peek at the data in the queue using iov.
3322 * @qpair: Pointer to the queue pair struct.
3323 * @iov: Pointer to buffer for the data
3324 * @iov_size: Length of buffer.
3325 * @buf_type: Buffer type (Unused on Linux).
3326 *
3327 * This is the client interface for peeking into a queue. (I.e.,
3328 * copy data from the queue without updating the head pointer.)
3329 * This function uses IO vectors to handle the work. Returns number
3330 * of bytes peeked or < 0 on error.
3331 */
3332ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
Al Virod838df22014-11-24 19:32:50 -05003333 struct msghdr *msg,
George Zhang06164d22013-01-08 15:54:54 -08003334 size_t iov_size,
3335 int buf_type)
3336{
3337 ssize_t result;
3338
Al Virod838df22014-11-24 19:32:50 -05003339 if (!qpair)
George Zhang06164d22013-01-08 15:54:54 -08003340 return VMCI_ERROR_INVALID_ARGS;
3341
3342 qp_lock(qpair);
3343
3344 do {
3345 result = qp_dequeue_locked(qpair->produce_q,
3346 qpair->consume_q,
3347 qpair->consume_q_size,
Al Virod838df22014-11-24 19:32:50 -05003348 msg, iov_size,
George Zhang06164d22013-01-08 15:54:54 -08003349 qp_memcpy_from_queue_iov,
Andy King45412be2013-08-23 09:22:13 -07003350 false);
George Zhang06164d22013-01-08 15:54:54 -08003351
3352 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3353 !qp_wait_for_ready_queue(qpair))
3354 result = VMCI_ERROR_WOULD_BLOCK;
3355
3356 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3357
3358 qp_unlock(qpair);
3359 return result;
3360}
3361EXPORT_SYMBOL_GPL(vmci_qpair_peekv);