blob: b724ef7005de6745bdd8856a8ab50d68bbef06ed [file] [log] [blame]
George Zhang20259842013-01-08 15:55:59 -08001/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef _VMW_VMCI_DEF_H_
17#define _VMW_VMCI_DEF_H_
18
19#include <linux/atomic.h>
20
21/* Register offsets. */
22#define VMCI_STATUS_ADDR 0x00
23#define VMCI_CONTROL_ADDR 0x04
24#define VMCI_ICR_ADDR 0x08
25#define VMCI_IMR_ADDR 0x0c
26#define VMCI_DATA_OUT_ADDR 0x10
27#define VMCI_DATA_IN_ADDR 0x14
28#define VMCI_CAPS_ADDR 0x18
29#define VMCI_RESULT_LOW_ADDR 0x1c
30#define VMCI_RESULT_HIGH_ADDR 0x20
31
32/* Max number of devices. */
33#define VMCI_MAX_DEVICES 1
34
35/* Status register bits. */
36#define VMCI_STATUS_INT_ON 0x1
37
38/* Control register bits. */
39#define VMCI_CONTROL_RESET 0x1
40#define VMCI_CONTROL_INT_ENABLE 0x2
41#define VMCI_CONTROL_INT_DISABLE 0x4
42
43/* Capabilities register bits. */
44#define VMCI_CAPS_HYPERCALL 0x1
45#define VMCI_CAPS_GUESTCALL 0x2
46#define VMCI_CAPS_DATAGRAM 0x4
47#define VMCI_CAPS_NOTIFICATIONS 0x8
48
49/* Interrupt Cause register bits. */
50#define VMCI_ICR_DATAGRAM 0x1
51#define VMCI_ICR_NOTIFICATION 0x2
52
53/* Interrupt Mask register bits. */
54#define VMCI_IMR_DATAGRAM 0x1
55#define VMCI_IMR_NOTIFICATION 0x2
56
George Zhang20259842013-01-08 15:55:59 -080057/* Maximum MSI/MSI-X interrupt vectors in the device. */
58#define VMCI_MAX_INTRS 2
59
60/*
61 * Supported interrupt vectors. There is one for each ICR value above,
62 * but here they indicate the position in the vector array/message ID.
63 */
64enum {
65 VMCI_INTR_DATAGRAM = 0,
66 VMCI_INTR_NOTIFICATION = 1,
67};
68
69/*
70 * A single VMCI device has an upper limit of 128MB on the amount of
71 * memory that can be used for queue pairs.
72 */
73#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
74
75/*
76 * Queues with pre-mapped data pages must be small, so that we don't pin
77 * too much kernel memory (especially on vmkernel). We limit a queuepair to
78 * 32 KB, or 16 KB per queue for symmetrical pairs.
79 */
80#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
81
82/*
83 * We have a fixed set of resource IDs available in the VMX.
84 * This allows us to have a very simple implementation since we statically
85 * know how many will create datagram handles. If a new caller arrives and
86 * we have run out of slots we can manually increment the maximum size of
87 * available resource IDs.
88 *
89 * VMCI reserved hypervisor datagram resource IDs.
90 */
91enum {
92 VMCI_RESOURCES_QUERY = 0,
93 VMCI_GET_CONTEXT_ID = 1,
94 VMCI_SET_NOTIFY_BITMAP = 2,
95 VMCI_DOORBELL_LINK = 3,
96 VMCI_DOORBELL_UNLINK = 4,
97 VMCI_DOORBELL_NOTIFY = 5,
98 /*
99 * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
100 * obsoleted by the removal of VM to VM communication.
101 */
102 VMCI_DATAGRAM_REQUEST_MAP = 6,
103 VMCI_DATAGRAM_REMOVE_MAP = 7,
104 VMCI_EVENT_SUBSCRIBE = 8,
105 VMCI_EVENT_UNSUBSCRIBE = 9,
106 VMCI_QUEUEPAIR_ALLOC = 10,
107 VMCI_QUEUEPAIR_DETACH = 11,
108
109 /*
110 * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
111 * WS 7.0/7.1 and ESX 4.1
112 */
113 VMCI_HGFS_TRANSPORT = 13,
114 VMCI_UNITY_PBRPC_REGISTER = 14,
115 VMCI_RPC_PRIVILEGED = 15,
116 VMCI_RPC_UNPRIVILEGED = 16,
117 VMCI_RESOURCE_MAX = 17,
118};
119
120/*
121 * struct vmci_handle - Ownership information structure
122 * @context: The VMX context ID.
123 * @resource: The resource ID (used for locating in resource hash).
124 *
125 * The vmci_handle structure is used to track resources used within
126 * vmw_vmci.
127 */
128struct vmci_handle {
129 u32 context;
130 u32 resource;
131};
132
133#define vmci_make_handle(_cid, _rid) \
134 (struct vmci_handle){ .context = _cid, .resource = _rid }
135
136static inline bool vmci_handle_is_equal(struct vmci_handle h1,
137 struct vmci_handle h2)
138{
139 return h1.context == h2.context && h1.resource == h2.resource;
140}
141
142#define VMCI_INVALID_ID ~0
143static const struct vmci_handle VMCI_INVALID_HANDLE = {
144 .context = VMCI_INVALID_ID,
145 .resource = VMCI_INVALID_ID
146};
147
148static inline bool vmci_handle_is_invalid(struct vmci_handle h)
149{
150 return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE);
151}
152
153/*
154 * The below defines can be used to send anonymous requests.
155 * This also indicates that no response is expected.
156 */
157#define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
158#define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
159static const struct vmci_handle VMCI_ANON_SRC_HANDLE = {
160 .context = VMCI_ANON_SRC_CONTEXT_ID,
161 .resource = VMCI_ANON_SRC_RESOURCE_ID
162};
163
164/* The lowest 16 context ids are reserved for internal use. */
165#define VMCI_RESERVED_CID_LIMIT ((u32) 16)
166
167/*
168 * Hypervisor context id, used for calling into hypervisor
169 * supplied services from the VM.
170 */
171#define VMCI_HYPERVISOR_CONTEXT_ID 0
172
173/*
174 * Well-known context id, a logical context that contains a set of
175 * well-known services. This context ID is now obsolete.
176 */
177#define VMCI_WELL_KNOWN_CONTEXT_ID 1
178
179/*
180 * Context ID used by host endpoints.
181 */
182#define VMCI_HOST_CONTEXT_ID 2
183
184#define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) && \
185 (_cid) > VMCI_HOST_CONTEXT_ID)
186
187/*
188 * The VMCI_CONTEXT_RESOURCE_ID is used together with vmci_make_handle to make
189 * handles that refer to a specific context.
190 */
191#define VMCI_CONTEXT_RESOURCE_ID 0
192
193/*
194 * VMCI error codes.
195 */
196enum {
197 VMCI_SUCCESS_QUEUEPAIR_ATTACH = 5,
198 VMCI_SUCCESS_QUEUEPAIR_CREATE = 4,
199 VMCI_SUCCESS_LAST_DETACH = 3,
200 VMCI_SUCCESS_ACCESS_GRANTED = 2,
201 VMCI_SUCCESS_ENTRY_DEAD = 1,
202 VMCI_SUCCESS = 0,
203 VMCI_ERROR_INVALID_RESOURCE = (-1),
204 VMCI_ERROR_INVALID_ARGS = (-2),
205 VMCI_ERROR_NO_MEM = (-3),
206 VMCI_ERROR_DATAGRAM_FAILED = (-4),
207 VMCI_ERROR_MORE_DATA = (-5),
208 VMCI_ERROR_NO_MORE_DATAGRAMS = (-6),
209 VMCI_ERROR_NO_ACCESS = (-7),
210 VMCI_ERROR_NO_HANDLE = (-8),
211 VMCI_ERROR_DUPLICATE_ENTRY = (-9),
212 VMCI_ERROR_DST_UNREACHABLE = (-10),
213 VMCI_ERROR_PAYLOAD_TOO_LARGE = (-11),
214 VMCI_ERROR_INVALID_PRIV = (-12),
215 VMCI_ERROR_GENERIC = (-13),
216 VMCI_ERROR_PAGE_ALREADY_SHARED = (-14),
217 VMCI_ERROR_CANNOT_SHARE_PAGE = (-15),
218 VMCI_ERROR_CANNOT_UNSHARE_PAGE = (-16),
219 VMCI_ERROR_NO_PROCESS = (-17),
220 VMCI_ERROR_NO_DATAGRAM = (-18),
221 VMCI_ERROR_NO_RESOURCES = (-19),
222 VMCI_ERROR_UNAVAILABLE = (-20),
223 VMCI_ERROR_NOT_FOUND = (-21),
224 VMCI_ERROR_ALREADY_EXISTS = (-22),
225 VMCI_ERROR_NOT_PAGE_ALIGNED = (-23),
226 VMCI_ERROR_INVALID_SIZE = (-24),
227 VMCI_ERROR_REGION_ALREADY_SHARED = (-25),
228 VMCI_ERROR_TIMEOUT = (-26),
229 VMCI_ERROR_DATAGRAM_INCOMPLETE = (-27),
230 VMCI_ERROR_INCORRECT_IRQL = (-28),
231 VMCI_ERROR_EVENT_UNKNOWN = (-29),
232 VMCI_ERROR_OBSOLETE = (-30),
233 VMCI_ERROR_QUEUEPAIR_MISMATCH = (-31),
234 VMCI_ERROR_QUEUEPAIR_NOTSET = (-32),
235 VMCI_ERROR_QUEUEPAIR_NOTOWNER = (-33),
236 VMCI_ERROR_QUEUEPAIR_NOTATTACHED = (-34),
237 VMCI_ERROR_QUEUEPAIR_NOSPACE = (-35),
238 VMCI_ERROR_QUEUEPAIR_NODATA = (-36),
239 VMCI_ERROR_BUSMEM_INVALIDATION = (-37),
240 VMCI_ERROR_MODULE_NOT_LOADED = (-38),
241 VMCI_ERROR_DEVICE_NOT_FOUND = (-39),
242 VMCI_ERROR_QUEUEPAIR_NOT_READY = (-40),
243 VMCI_ERROR_WOULD_BLOCK = (-41),
244
245 /* VMCI clients should return error code within this range */
246 VMCI_ERROR_CLIENT_MIN = (-500),
247 VMCI_ERROR_CLIENT_MAX = (-550),
248
249 /* Internal error codes. */
250 VMCI_SHAREDMEM_ERROR_BAD_CONTEXT = (-1000),
251};
252
253/* VMCI reserved events. */
254enum {
255 /* Only applicable to guest endpoints */
256 VMCI_EVENT_CTX_ID_UPDATE = 0,
257
258 /* Applicable to guest and host */
259 VMCI_EVENT_CTX_REMOVED = 1,
260
261 /* Only applicable to guest endpoints */
262 VMCI_EVENT_QP_RESUMED = 2,
263
264 /* Applicable to guest and host */
265 VMCI_EVENT_QP_PEER_ATTACH = 3,
266
267 /* Applicable to guest and host */
268 VMCI_EVENT_QP_PEER_DETACH = 4,
269
270 /*
271 * Applicable to VMX and vmk. On vmk,
272 * this event has the Context payload type.
273 */
274 VMCI_EVENT_MEM_ACCESS_ON = 5,
275
276 /*
277 * Applicable to VMX and vmk. Same as
278 * above for the payload type.
279 */
280 VMCI_EVENT_MEM_ACCESS_OFF = 6,
281 VMCI_EVENT_MAX = 7,
282};
283
284/*
285 * Of the above events, a few are reserved for use in the VMX, and
286 * other endpoints (guest and host kernel) should not use them. For
287 * the rest of the events, we allow both host and guest endpoints to
288 * subscribe to them, to maintain the same API for host and guest
289 * endpoints.
290 */
291#define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \
292 (_event) == VMCI_EVENT_MEM_ACCESS_OFF)
293
294#define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX && \
295 !VMCI_EVENT_VALID_VMX(_event))
296
297/* Reserved guest datagram resource ids. */
298#define VMCI_EVENT_HANDLER 0
299
300/*
301 * VMCI coarse-grained privileges (per context or host
302 * process/endpoint. An entity with the restricted flag is only
303 * allowed to interact with the hypervisor and trusted entities.
304 */
305enum {
306 VMCI_NO_PRIVILEGE_FLAGS = 0,
307 VMCI_PRIVILEGE_FLAG_RESTRICTED = 1,
308 VMCI_PRIVILEGE_FLAG_TRUSTED = 2,
309 VMCI_PRIVILEGE_ALL_FLAGS = (VMCI_PRIVILEGE_FLAG_RESTRICTED |
310 VMCI_PRIVILEGE_FLAG_TRUSTED),
311 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS = VMCI_NO_PRIVILEGE_FLAGS,
312 VMCI_LEAST_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_RESTRICTED,
313 VMCI_MAX_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_TRUSTED,
314};
315
316/* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
317#define VMCI_RESERVED_RESOURCE_ID_MAX 1023
318
319/*
320 * Driver version.
321 *
322 * Increment major version when you make an incompatible change.
323 * Compatibility goes both ways (old driver with new executable
324 * as well as new driver with old executable).
325 */
326
327/* Never change VMCI_VERSION_SHIFT_WIDTH */
328#define VMCI_VERSION_SHIFT_WIDTH 16
329#define VMCI_MAKE_VERSION(_major, _minor) \
330 ((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor))
331
332#define VMCI_VERSION_MAJOR(v) ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
333#define VMCI_VERSION_MINOR(v) ((u16) (v))
334
335/*
336 * VMCI_VERSION is always the current version. Subsequently listed
337 * versions are ways of detecting previous versions of the connecting
338 * application (i.e., VMX).
339 *
340 * VMCI_VERSION_NOVMVM: This version removed support for VM to VM
341 * communication.
342 *
343 * VMCI_VERSION_NOTIFY: This version introduced doorbell notification
344 * support.
345 *
346 * VMCI_VERSION_HOSTQP: This version introduced host end point support
347 * for hosted products.
348 *
349 * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of
350 * support for host end-points.
351 *
352 * VMCI_VERSION_PREVERS2: This fictional version number is intended to
353 * represent the version of a VMX which doesn't call into the driver
354 * with ioctl VERSION2 and thus doesn't establish its version with the
355 * driver.
356 */
357
358#define VMCI_VERSION VMCI_VERSION_NOVMVM
359#define VMCI_VERSION_NOVMVM VMCI_MAKE_VERSION(11, 0)
360#define VMCI_VERSION_NOTIFY VMCI_MAKE_VERSION(10, 0)
361#define VMCI_VERSION_HOSTQP VMCI_MAKE_VERSION(9, 0)
362#define VMCI_VERSION_PREHOSTQP VMCI_MAKE_VERSION(8, 0)
363#define VMCI_VERSION_PREVERS2 VMCI_MAKE_VERSION(1, 0)
364
365#define VMCI_SOCKETS_MAKE_VERSION(_p) \
366 ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
367
368/*
369 * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.h, and
370 * we start at sequence 9f. This gives us the same values that our shipping
371 * products use, starting at 1951, provided we leave out the direction and
372 * structure size. Note that VMMon occupies the block following us, starting
373 * at 2001.
374 */
375#define IOCTL_VMCI_VERSION _IO(7, 0x9f) /* 1951 */
376#define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0)
377#define IOCTL_VMCI_QUEUEPAIR_SETVA _IO(7, 0xa4)
378#define IOCTL_VMCI_NOTIFY_RESOURCE _IO(7, 0xa5)
379#define IOCTL_VMCI_NOTIFICATIONS_RECEIVE _IO(7, 0xa6)
380#define IOCTL_VMCI_VERSION2 _IO(7, 0xa7)
381#define IOCTL_VMCI_QUEUEPAIR_ALLOC _IO(7, 0xa8)
382#define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE _IO(7, 0xa9)
383#define IOCTL_VMCI_QUEUEPAIR_DETACH _IO(7, 0xaa)
384#define IOCTL_VMCI_DATAGRAM_SEND _IO(7, 0xab)
385#define IOCTL_VMCI_DATAGRAM_RECEIVE _IO(7, 0xac)
386#define IOCTL_VMCI_CTX_ADD_NOTIFICATION _IO(7, 0xaf)
387#define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION _IO(7, 0xb0)
388#define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1)
389#define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2)
390#define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3)
391#define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4)
392#define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8)
393#define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
394#define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb) /* 1995 */
395/*IOCTL_VMMON_START _IO(7, 0xd1)*/ /* 2001 */
396
397/*
398 * struct vmci_queue_header - VMCI Queue Header information.
399 *
400 * A Queue cannot stand by itself as designed. Each Queue's header
401 * contains a pointer into itself (the producer_tail) and into its peer
402 * (consumer_head). The reason for the separation is one of
403 * accessibility: Each end-point can modify two things: where the next
404 * location to enqueue is within its produce_q (producer_tail); and
405 * where the next dequeue location is in its consume_q (consumer_head).
406 *
407 * An end-point cannot modify the pointers of its peer (guest to
408 * guest; NOTE that in the host both queue headers are mapped r/w).
409 * But, each end-point needs read access to both Queue header
410 * structures in order to determine how much space is used (or left)
411 * in the Queue. This is because for an end-point to know how full
412 * its produce_q is, it needs to use the consumer_head that points into
413 * the produce_q but -that- consumer_head is in the Queue header for
414 * that end-points consume_q.
415 *
416 * Thoroughly confused? Sorry.
417 *
418 * producer_tail: the point to enqueue new entrants. When you approach
419 * a line in a store, for example, you walk up to the tail.
420 *
421 * consumer_head: the point in the queue from which the next element is
422 * dequeued. In other words, who is next in line is he who is at the
423 * head of the line.
424 *
425 * Also, producer_tail points to an empty byte in the Queue, whereas
426 * consumer_head points to a valid byte of data (unless producer_tail ==
427 * consumer_head in which case consumer_head does not point to a valid
428 * byte of data).
429 *
430 * For a queue of buffer 'size' bytes, the tail and head pointers will be in
431 * the range [0, size-1].
432 *
433 * If produce_q_header->producer_tail == consume_q_header->consumer_head
434 * then the produce_q is empty.
435 */
436struct vmci_queue_header {
437 /* All fields are 64bit and aligned. */
438 struct vmci_handle handle; /* Identifier. */
439 atomic64_t producer_tail; /* Offset in this queue. */
440 atomic64_t consumer_head; /* Offset in peer queue. */
441};
442
443/*
444 * struct vmci_datagram - Base struct for vmci datagrams.
445 * @dst: A vmci_handle that tracks the destination of the datagram.
446 * @src: A vmci_handle that tracks the source of the datagram.
447 * @payload_size: The size of the payload.
448 *
449 * vmci_datagram structs are used when sending vmci datagrams. They include
450 * the necessary source and destination information to properly route
451 * the information along with the size of the package.
452 */
453struct vmci_datagram {
454 struct vmci_handle dst;
455 struct vmci_handle src;
456 u64 payload_size;
457};
458
459/*
460 * Second flag is for creating a well-known handle instead of a per context
461 * handle. Next flag is for deferring datagram delivery, so that the
462 * datagram callback is invoked in a delayed context (not interrupt context).
463 */
464#define VMCI_FLAG_DG_NONE 0
465#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
466#define VMCI_FLAG_ANYCID_DG_HND 0x2
467#define VMCI_FLAG_DG_DELAYED_CB 0x4
468
469/*
470 * Maximum supported size of a VMCI datagram for routable datagrams.
471 * Datagrams going to the hypervisor are allowed to be larger.
472 */
473#define VMCI_MAX_DG_SIZE (17 * 4096)
474#define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \
475 sizeof(struct vmci_datagram))
476#define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + \
477 sizeof(struct vmci_datagram))
478#define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram)
479#define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size)
480#define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7)))
481#define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
482
483struct vmci_event_payload_qp {
484 struct vmci_handle handle; /* queue_pair handle. */
485 u32 peer_id; /* Context id of attaching/detaching VM. */
486 u32 _pad;
487};
488
489/* Flags for VMCI queue_pair API. */
490enum {
491 /* Fail alloc if QP not created by peer. */
492 VMCI_QPFLAG_ATTACH_ONLY = 1 << 0,
493
494 /* Only allow attaches from local context. */
495 VMCI_QPFLAG_LOCAL = 1 << 1,
496
497 /* Host won't block when guest is quiesced. */
498 VMCI_QPFLAG_NONBLOCK = 1 << 2,
499
500 /* Pin data pages in ESX. Used with NONBLOCK */
501 VMCI_QPFLAG_PINNED = 1 << 3,
502
503 /* Update the following flag when adding new flags. */
504 VMCI_QP_ALL_FLAGS = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL |
505 VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
506
507 /* Convenience flags */
508 VMCI_QP_ASYMM = (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
509 VMCI_QP_ASYMM_PEER = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM),
510};
511
512/*
513 * We allow at least 1024 more event datagrams from the hypervisor past the
514 * normally allowed datagrams pending for a given context. We define this
515 * limit on event datagrams from the hypervisor to guard against DoS attack
516 * from a malicious VM which could repeatedly attach to and detach from a queue
517 * pair, causing events to be queued at the destination VM. However, the rate
518 * at which such events can be generated is small since it requires a VM exit
519 * and handling of queue pair attach/detach call at the hypervisor. Event
520 * datagrams may be queued up at the destination VM if it has interrupts
521 * disabled or if it is not draining events for some other reason. 1024
522 * datagrams is a grossly conservative estimate of the time for which
523 * interrupts may be disabled in the destination VM, but at the same time does
524 * not exacerbate the memory pressure problem on the host by much (size of each
525 * event datagram is small).
526 */
527#define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \
528 (VMCI_MAX_DATAGRAM_QUEUE_SIZE + \
529 1024 * (sizeof(struct vmci_datagram) + \
530 sizeof(struct vmci_event_data_max)))
531
532/*
533 * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
534 * hypervisor resources. Struct size is 16 bytes. All fields in struct are
535 * aligned to their natural alignment.
536 */
537struct vmci_resource_query_hdr {
538 struct vmci_datagram hdr;
539 u32 num_resources;
540 u32 _padding;
541};
542
543/*
544 * Convenience struct for negotiating vectors. Must match layout of
545 * VMCIResourceQueryHdr minus the struct vmci_datagram header.
546 */
547struct vmci_resource_query_msg {
548 u32 num_resources;
549 u32 _padding;
550 u32 resources[1];
551};
552
553/*
554 * The maximum number of resources that can be queried using
555 * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
556 * bits of a positive return value. Negative values are reserved for
557 * errors.
558 */
559#define VMCI_RESOURCE_QUERY_MAX_NUM 31
560
561/* Maximum size for the VMCI_RESOURCE_QUERY request. */
562#define VMCI_RESOURCE_QUERY_MAX_SIZE \
563 (sizeof(struct vmci_resource_query_hdr) + \
564 sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM)
565
566/*
567 * Struct used for setting the notification bitmap. All fields in
568 * struct are aligned to their natural alignment.
569 */
570struct vmci_notify_bm_set_msg {
571 struct vmci_datagram hdr;
572 u32 bitmap_ppn;
573 u32 _pad;
574};
575
576/*
577 * Struct used for linking a doorbell handle with an index in the
578 * notify bitmap. All fields in struct are aligned to their natural
579 * alignment.
580 */
581struct vmci_doorbell_link_msg {
582 struct vmci_datagram hdr;
583 struct vmci_handle handle;
584 u64 notify_idx;
585};
586
587/*
588 * Struct used for unlinking a doorbell handle from an index in the
589 * notify bitmap. All fields in struct are aligned to their natural
590 * alignment.
591 */
592struct vmci_doorbell_unlink_msg {
593 struct vmci_datagram hdr;
594 struct vmci_handle handle;
595};
596
597/*
598 * Struct used for generating a notification on a doorbell handle. All
599 * fields in struct are aligned to their natural alignment.
600 */
601struct vmci_doorbell_notify_msg {
602 struct vmci_datagram hdr;
603 struct vmci_handle handle;
604};
605
606/*
607 * This struct is used to contain data for events. Size of this struct is a
608 * multiple of 8 bytes, and all fields are aligned to their natural alignment.
609 */
610struct vmci_event_data {
611 u32 event; /* 4 bytes. */
612 u32 _pad;
613 /* Event payload is put here. */
614};
615
616/*
617 * Define the different VMCI_EVENT payload data types here. All structs must
618 * be a multiple of 8 bytes, and fields must be aligned to their natural
619 * alignment.
620 */
621struct vmci_event_payld_ctx {
622 u32 context_id; /* 4 bytes. */
623 u32 _pad;
624};
625
626struct vmci_event_payld_qp {
627 struct vmci_handle handle; /* queue_pair handle. */
628 u32 peer_id; /* Context id of attaching/detaching VM. */
629 u32 _pad;
630};
631
632/*
633 * We define the following struct to get the size of the maximum event
634 * data the hypervisor may send to the guest. If adding a new event
635 * payload type above, add it to the following struct too (inside the
636 * union).
637 */
638struct vmci_event_data_max {
639 struct vmci_event_data event_data;
640 union {
641 struct vmci_event_payld_ctx context_payload;
642 struct vmci_event_payld_qp qp_payload;
643 } ev_data_payload;
644};
645
646/*
647 * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and
648 * VMCI_EVENT_HANDLER messages. Struct size is 32 bytes. All fields
649 * in struct are aligned to their natural alignment.
650 */
651struct vmci_event_msg {
652 struct vmci_datagram hdr;
653
654 /* Has event type and payload. */
655 struct vmci_event_data event_data;
656
657 /* Payload gets put here. */
658};
659
660/* Event with context payload. */
661struct vmci_event_ctx {
662 struct vmci_event_msg msg;
663 struct vmci_event_payld_ctx payload;
664};
665
666/* Event with QP payload. */
667struct vmci_event_qp {
668 struct vmci_event_msg msg;
669 struct vmci_event_payld_qp payload;
670};
671
672/*
673 * Structs used for queue_pair alloc and detach messages. We align fields of
674 * these structs to 64bit boundaries.
675 */
676struct vmci_qp_alloc_msg {
677 struct vmci_datagram hdr;
678 struct vmci_handle handle;
679 u32 peer;
680 u32 flags;
681 u64 produce_size;
682 u64 consume_size;
683 u64 num_ppns;
684
685 /* List of PPNs placed here. */
686};
687
688struct vmci_qp_detach_msg {
689 struct vmci_datagram hdr;
690 struct vmci_handle handle;
691};
692
693/* VMCI Doorbell API. */
694#define VMCI_FLAG_DELAYED_CB 0x01
695
696typedef void (*vmci_callback) (void *client_data);
697
698/*
699 * struct vmci_qp - A vmw_vmci queue pair handle.
700 *
701 * This structure is used as a handle to a queue pair created by
702 * VMCI. It is intentionally left opaque to clients.
703 */
704struct vmci_qp;
705
706/* Callback needed for correctly waiting on events. */
707typedef int (*vmci_datagram_recv_cb) (void *client_data,
708 struct vmci_datagram *msg);
709
710/* VMCI Event API. */
711typedef void (*vmci_event_cb) (u32 sub_id, const struct vmci_event_data *ed,
712 void *client_data);
713
714/*
715 * We use the following inline function to access the payload data
716 * associated with an event data.
717 */
718static inline const void *
719vmci_event_data_const_payload(const struct vmci_event_data *ev_data)
720{
721 return (const char *)ev_data + sizeof(*ev_data);
722}
723
724static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
725{
726 return (void *)vmci_event_data_const_payload(ev_data);
727}
728
729/*
Jorgen Hansenf42a0fd2015-11-12 01:29:32 -0800730 * Helper to read a value from a head or tail pointer. For X86_32, the
731 * pointer is treated as a 32bit value, since the pointer value
732 * never exceeds a 32bit value in this case. Also, doing an
733 * atomic64_read on X86_32 uniprocessor systems may be implemented
734 * as a non locked cmpxchg8b, that may end up overwriting updates done
735 * by the VMCI device to the memory location. On 32bit SMP, the lock
736 * prefix will be used, so correctness isn't an issue, but using a
737 * 64bit operation still adds unnecessary overhead.
738 */
739static inline u64 vmci_q_read_pointer(atomic64_t *var)
740{
741#if defined(CONFIG_X86_32)
742 return atomic_read((atomic_t *)var);
743#else
744 return atomic64_read(var);
745#endif
746}
747
748/*
749 * Helper to set the value of a head or tail pointer. For X86_32, the
750 * pointer is treated as a 32bit value, since the pointer value
751 * never exceeds a 32bit value in this case. On 32bit SMP, using a
752 * locked cmpxchg8b adds unnecessary overhead.
753 */
754static inline void vmci_q_set_pointer(atomic64_t *var,
755 u64 new_val)
756{
757#if defined(CONFIG_X86_32)
758 return atomic_set((atomic_t *)var, (u32)new_val);
759#else
760 return atomic64_set(var, new_val);
761#endif
762}
763
764/*
George Zhang20259842013-01-08 15:55:59 -0800765 * Helper to add a given offset to a head or tail pointer. Wraps the
766 * value of the pointer around the max size of the queue.
767 */
768static inline void vmci_qp_add_pointer(atomic64_t *var,
769 size_t add,
770 u64 size)
771{
Jorgen Hansenf42a0fd2015-11-12 01:29:32 -0800772 u64 new_val = vmci_q_read_pointer(var);
George Zhang20259842013-01-08 15:55:59 -0800773
774 if (new_val >= size - add)
775 new_val -= size;
776
777 new_val += add;
778
Jorgen Hansenf42a0fd2015-11-12 01:29:32 -0800779 vmci_q_set_pointer(var, new_val);
George Zhang20259842013-01-08 15:55:59 -0800780}
781
782/*
783 * Helper routine to get the Producer Tail from the supplied queue.
784 */
785static inline u64
786vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
787{
788 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
Jorgen Hansenf42a0fd2015-11-12 01:29:32 -0800789 return vmci_q_read_pointer(&qh->producer_tail);
George Zhang20259842013-01-08 15:55:59 -0800790}
791
792/*
793 * Helper routine to get the Consumer Head from the supplied queue.
794 */
795static inline u64
796vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
797{
798 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
Jorgen Hansenf42a0fd2015-11-12 01:29:32 -0800799 return vmci_q_read_pointer(&qh->consumer_head);
George Zhang20259842013-01-08 15:55:59 -0800800}
801
802/*
803 * Helper routine to increment the Producer Tail. Fundamentally,
804 * vmci_qp_add_pointer() is used to manipulate the tail itself.
805 */
806static inline void
807vmci_q_header_add_producer_tail(struct vmci_queue_header *q_header,
808 size_t add,
809 u64 queue_size)
810{
811 vmci_qp_add_pointer(&q_header->producer_tail, add, queue_size);
812}
813
814/*
815 * Helper routine to increment the Consumer Head. Fundamentally,
816 * vmci_qp_add_pointer() is used to manipulate the head itself.
817 */
818static inline void
819vmci_q_header_add_consumer_head(struct vmci_queue_header *q_header,
820 size_t add,
821 u64 queue_size)
822{
823 vmci_qp_add_pointer(&q_header->consumer_head, add, queue_size);
824}
825
826/*
827 * Helper routine for getting the head and the tail pointer for a queue.
828 * Both the VMCIQueues are needed to get both the pointers for one queue.
829 */
830static inline void
831vmci_q_header_get_pointers(const struct vmci_queue_header *produce_q_header,
832 const struct vmci_queue_header *consume_q_header,
833 u64 *producer_tail,
834 u64 *consumer_head)
835{
836 if (producer_tail)
837 *producer_tail = vmci_q_header_producer_tail(produce_q_header);
838
839 if (consumer_head)
840 *consumer_head = vmci_q_header_consumer_head(consume_q_header);
841}
842
843static inline void vmci_q_header_init(struct vmci_queue_header *q_header,
844 const struct vmci_handle handle)
845{
846 q_header->handle = handle;
847 atomic64_set(&q_header->producer_tail, 0);
848 atomic64_set(&q_header->consumer_head, 0);
849}
850
851/*
852 * Finds available free space in a produce queue to enqueue more
853 * data or reports an error if queue pair corruption is detected.
854 */
855static s64
856vmci_q_header_free_space(const struct vmci_queue_header *produce_q_header,
857 const struct vmci_queue_header *consume_q_header,
858 const u64 produce_q_size)
859{
860 u64 tail;
861 u64 head;
862 u64 free_space;
863
864 tail = vmci_q_header_producer_tail(produce_q_header);
865 head = vmci_q_header_consumer_head(consume_q_header);
866
867 if (tail >= produce_q_size || head >= produce_q_size)
868 return VMCI_ERROR_INVALID_SIZE;
869
870 /*
871 * Deduct 1 to avoid tail becoming equal to head which causes
872 * ambiguity. If head and tail are equal it means that the
873 * queue is empty.
874 */
875 if (tail >= head)
876 free_space = produce_q_size - (tail - head) - 1;
877 else
878 free_space = head - tail - 1;
879
880 return free_space;
881}
882
883/*
884 * vmci_q_header_free_space() does all the heavy lifting of
885 * determing the number of free bytes in a Queue. This routine,
886 * then subtracts that size from the full size of the Queue so
887 * the caller knows how many bytes are ready to be dequeued.
888 * Results:
889 * On success, available data size in bytes (up to MAX_INT64).
890 * On failure, appropriate error code.
891 */
892static inline s64
893vmci_q_header_buf_ready(const struct vmci_queue_header *consume_q_header,
894 const struct vmci_queue_header *produce_q_header,
895 const u64 consume_q_size)
896{
897 s64 free_space;
898
899 free_space = vmci_q_header_free_space(consume_q_header,
900 produce_q_header, consume_q_size);
901 if (free_space < VMCI_SUCCESS)
902 return free_space;
903
904 return consume_q_size - free_space - 1;
905}
906
907
908#endif /* _VMW_VMCI_DEF_H_ */