blob: c472bedbe38ebab3f49e80456ec106c9d90301d0 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef __VMWGFX_DRM_H__
29#define __VMWGFX_DRM_H__
30
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +010031#ifndef __KERNEL__
Josh Boyere3519432014-09-05 13:19:59 -040032#include <drm/drm.h>
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +010033#endif
34
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000035#define DRM_VMW_MAX_SURFACE_FACES 6
36#define DRM_VMW_MAX_MIP_LEVELS 24
37
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000038
39#define DRM_VMW_GET_PARAM 0
40#define DRM_VMW_ALLOC_DMABUF 1
41#define DRM_VMW_UNREF_DMABUF 2
42#define DRM_VMW_CURSOR_BYPASS 3
43/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
44#define DRM_VMW_CONTROL_STREAM 4
45#define DRM_VMW_CLAIM_STREAM 5
46#define DRM_VMW_UNREF_STREAM 6
47/* guarded by DRM_VMW_PARAM_3D == 1 */
48#define DRM_VMW_CREATE_CONTEXT 7
49#define DRM_VMW_UNREF_CONTEXT 8
50#define DRM_VMW_CREATE_SURFACE 9
51#define DRM_VMW_UNREF_SURFACE 10
52#define DRM_VMW_REF_SURFACE 11
53#define DRM_VMW_EXECBUF 12
Thomas Hellstromae2a1042011-09-01 20:18:44 +000054#define DRM_VMW_GET_3D_CAP 13
55#define DRM_VMW_FENCE_WAIT 14
56#define DRM_VMW_FENCE_SIGNALED 15
57#define DRM_VMW_FENCE_UNREF 16
58#define DRM_VMW_FENCE_EVENT 17
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +020059#define DRM_VMW_PRESENT 18
60#define DRM_VMW_PRESENT_READBACK 19
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +020061#define DRM_VMW_UPDATE_LAYOUT 20
Thomas Hellstromcfe4d532012-11-21 10:23:14 +010062#define DRM_VMW_CREATE_SHADER 21
63#define DRM_VMW_UNREF_SHADER 22
64#define DRM_VMW_GB_SURFACE_CREATE 23
65#define DRM_VMW_GB_SURFACE_REF 24
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +010066#define DRM_VMW_SYNCCPU 25
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000067
68/*************************************************************************/
69/**
70 * DRM_VMW_GET_PARAM - get device information.
71 *
72 * DRM_VMW_PARAM_FIFO_OFFSET:
73 * Offset to use to map the first page of the FIFO read-only.
74 * The fifo is mapped using the mmap() system call on the drm device.
75 *
76 * DRM_VMW_PARAM_OVERLAY_IOCTL:
77 * Does the driver support the overlay ioctl.
78 */
79
80#define DRM_VMW_PARAM_NUM_STREAMS 0
81#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
82#define DRM_VMW_PARAM_3D 2
Thomas Hellstrom07999a72011-09-01 20:18:40 +000083#define DRM_VMW_PARAM_HW_CAPS 3
84#define DRM_VMW_PARAM_FIFO_CAPS 4
85#define DRM_VMW_PARAM_MAX_FB_SIZE 5
Thomas Hellstromf63f6a52011-09-01 20:18:41 +000086#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
Thomas Hellstromcfe4d532012-11-21 10:23:14 +010087#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
88#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
Thomas Hellstrom311474d2012-11-21 12:34:47 +010089#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
Charmaine Lee857aea12014-02-12 12:07:38 +010090#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000091
92/**
Thomas Hellstromadebcb22014-03-18 15:00:56 +010093 * enum drm_vmw_handle_type - handle type for ref ioctls
94 *
95 */
96enum drm_vmw_handle_type {
97 DRM_VMW_HANDLE_LEGACY = 0,
98 DRM_VMW_HANDLE_PRIME = 1
99};
100
101/**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000102 * struct drm_vmw_getparam_arg
103 *
104 * @value: Returned value. //Out
105 * @param: Parameter to query. //In.
106 *
107 * Argument to the DRM_VMW_GET_PARAM Ioctl.
108 */
109
110struct drm_vmw_getparam_arg {
111 uint64_t value;
112 uint32_t param;
113 uint32_t pad64;
114};
115
116/*************************************************************************/
117/**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000118 * DRM_VMW_CREATE_CONTEXT - Create a host context.
119 *
120 * Allocates a device unique context id, and queues a create context command
121 * for the host. Does not wait for host completion.
122 */
123
124/**
125 * struct drm_vmw_context_arg
126 *
127 * @cid: Device unique context ID.
128 *
129 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
130 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
131 */
132
133struct drm_vmw_context_arg {
134 int32_t cid;
135 uint32_t pad64;
136};
137
138/*************************************************************************/
139/**
140 * DRM_VMW_UNREF_CONTEXT - Create a host context.
141 *
142 * Frees a global context id, and queues a destroy host command for the host.
143 * Does not wait for host completion. The context ID can be used directly
144 * in the command stream and shows up as the same context ID on the host.
145 */
146
147/*************************************************************************/
148/**
149 * DRM_VMW_CREATE_SURFACE - Create a host suface.
150 *
151 * Allocates a device unique surface id, and queues a create surface command
152 * for the host. Does not wait for host completion. The surface ID can be
153 * used directly in the command stream and shows up as the same surface
154 * ID on the host.
155 */
156
157/**
158 * struct drm_wmv_surface_create_req
159 *
160 * @flags: Surface flags as understood by the host.
161 * @format: Surface format as understood by the host.
162 * @mip_levels: Number of mip levels for each face.
163 * An unused face should have 0 encoded.
164 * @size_addr: Address of a user-space array of sruct drm_vmw_size
165 * cast to an uint64_t for 32-64 bit compatibility.
166 * The size of the array should equal the total number of mipmap levels.
167 * @shareable: Boolean whether other clients (as identified by file descriptors)
168 * may reference this surface.
Thomas Hellstromf77cef32010-02-09 19:41:55 +0000169 * @scanout: Boolean whether the surface is intended to be used as a
170 * scanout.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000171 *
172 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
173 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
174 */
175
176struct drm_vmw_surface_create_req {
177 uint32_t flags;
178 uint32_t format;
179 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
180 uint64_t size_addr;
181 int32_t shareable;
Thomas Hellstromf77cef32010-02-09 19:41:55 +0000182 int32_t scanout;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000183};
184
185/**
186 * struct drm_wmv_surface_arg
187 *
188 * @sid: Surface id of created surface or surface to destroy or reference.
Thomas Hellstromadebcb22014-03-18 15:00:56 +0100189 * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000190 *
191 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
192 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
193 * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
194 */
195
196struct drm_vmw_surface_arg {
197 int32_t sid;
Thomas Hellstromadebcb22014-03-18 15:00:56 +0100198 enum drm_vmw_handle_type handle_type;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000199};
200
201/**
202 * struct drm_vmw_size ioctl.
203 *
204 * @width - mip level width
205 * @height - mip level height
206 * @depth - mip level depth
207 *
208 * Description of a mip level.
209 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
210 */
211
212struct drm_vmw_size {
213 uint32_t width;
214 uint32_t height;
215 uint32_t depth;
216 uint32_t pad64;
217};
218
219/**
220 * union drm_vmw_surface_create_arg
221 *
222 * @rep: Output data as described above.
223 * @req: Input data as described above.
224 *
225 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
226 */
227
228union drm_vmw_surface_create_arg {
229 struct drm_vmw_surface_arg rep;
230 struct drm_vmw_surface_create_req req;
231};
232
233/*************************************************************************/
234/**
235 * DRM_VMW_REF_SURFACE - Reference a host surface.
236 *
237 * Puts a reference on a host surface with a give sid, as previously
238 * returned by the DRM_VMW_CREATE_SURFACE ioctl.
239 * A reference will make sure the surface isn't destroyed while we hold
240 * it and will allow the calling client to use the surface ID in the command
241 * stream.
242 *
243 * On successful return, the Ioctl returns the surface information given
244 * in the DRM_VMW_CREATE_SURFACE ioctl.
245 */
246
247/**
248 * union drm_vmw_surface_reference_arg
249 *
250 * @rep: Output data as described above.
251 * @req: Input data as described above.
252 *
253 * Argument to the DRM_VMW_REF_SURFACE Ioctl.
254 */
255
256union drm_vmw_surface_reference_arg {
257 struct drm_vmw_surface_create_req rep;
258 struct drm_vmw_surface_arg req;
259};
260
261/*************************************************************************/
262/**
263 * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
264 *
265 * Clear a reference previously put on a host surface.
266 * When all references are gone, including the one implicitly placed
267 * on creation,
268 * a destroy surface command will be queued for the host.
269 * Does not wait for completion.
270 */
271
272/*************************************************************************/
273/**
274 * DRM_VMW_EXECBUF
275 *
276 * Submit a command buffer for execution on the host, and return a
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000277 * fence seqno that when signaled, indicates that the command buffer has
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000278 * executed.
279 */
280
281/**
282 * struct drm_vmw_execbuf_arg
283 *
284 * @commands: User-space address of a command buffer cast to an uint64_t.
285 * @command-size: Size in bytes of the command buffer.
Thomas Hellstromf77cef32010-02-09 19:41:55 +0000286 * @throttle-us: Sleep until software is less than @throttle_us
287 * microseconds ahead of hardware. The driver may round this value
288 * to the nearest kernel tick.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000289 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
290 * uint64_t.
Jakob Bornecrantza87897e2010-02-09 21:29:47 +0000291 * @version: Allows expanding the execbuf ioctl parameters without breaking
292 * backwards compatibility, since user-space will always tell the kernel
293 * which version it uses.
294 * @flags: Execbuf flags. None currently.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000295 *
296 * Argument to the DRM_VMW_EXECBUF Ioctl.
297 */
298
Thomas Hellstrom2ae7b032011-09-01 20:18:45 +0000299#define DRM_VMW_EXECBUF_VERSION 1
Jakob Bornecrantza87897e2010-02-09 21:29:47 +0000300
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000301struct drm_vmw_execbuf_arg {
302 uint64_t commands;
303 uint32_t command_size;
Thomas Hellstromf77cef32010-02-09 19:41:55 +0000304 uint32_t throttle_us;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000305 uint64_t fence_rep;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000306 uint32_t version;
307 uint32_t flags;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000308};
309
310/**
311 * struct drm_vmw_fence_rep
312 *
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000313 * @handle: Fence object handle for fence associated with a command submission.
314 * @mask: Fence flags relevant for this fence object.
315 * @seqno: Fence sequence number in fifo. A fence object with a lower
316 * seqno will signal the EXEC flag before a fence object with a higher
317 * seqno. This can be used by user-space to avoid kernel calls to determine
318 * whether a fence has signaled the EXEC flag. Note that @seqno will
319 * wrap at 32-bit.
320 * @passed_seqno: The highest seqno number processed by the hardware
321 * so far. This can be used to mark user-space fence objects as signaled, and
322 * to determine whether a fence seqno might be stale.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000323 * @error: This member should've been set to -EFAULT on submission.
324 * The following actions should be take on completion:
325 * error == -EFAULT: Fence communication failed. The host is synchronized.
326 * Use the last fence id read from the FIFO fence register.
327 * error != 0 && error != -EFAULT:
328 * Fence submission failed. The host is synchronized. Use the fence_seq member.
329 * error == 0: All is OK, The host may not be synchronized.
330 * Use the fence_seq member.
331 *
332 * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
333 */
334
335struct drm_vmw_fence_rep {
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000336 uint32_t handle;
337 uint32_t mask;
338 uint32_t seqno;
339 uint32_t passed_seqno;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000340 uint32_t pad64;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000341 int32_t error;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000342};
343
344/*************************************************************************/
345/**
346 * DRM_VMW_ALLOC_DMABUF
347 *
348 * Allocate a DMA buffer that is visible also to the host.
349 * NOTE: The buffer is
350 * identified by a handle and an offset, which are private to the guest, but
351 * useable in the command stream. The guest kernel may translate these
352 * and patch up the command stream accordingly. In the future, the offset may
353 * be zero at all times, or it may disappear from the interface before it is
354 * fixed.
355 *
356 * The DMA buffer may stay user-space mapped in the guest at all times,
357 * and is thus suitable for sub-allocation.
358 *
359 * DMA buffers are mapped using the mmap() syscall on the drm device.
360 */
361
362/**
363 * struct drm_vmw_alloc_dmabuf_req
364 *
365 * @size: Required minimum size of the buffer.
366 *
367 * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
368 */
369
370struct drm_vmw_alloc_dmabuf_req {
371 uint32_t size;
372 uint32_t pad64;
373};
374
375/**
376 * struct drm_vmw_dmabuf_rep
377 *
378 * @map_handle: Offset to use in the mmap() call used to map the buffer.
379 * @handle: Handle unique to this buffer. Used for unreferencing.
380 * @cur_gmr_id: GMR id to use in the command stream when this buffer is
381 * referenced. See not above.
382 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
383 * referenced. See note above.
384 *
385 * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
386 */
387
388struct drm_vmw_dmabuf_rep {
389 uint64_t map_handle;
390 uint32_t handle;
391 uint32_t cur_gmr_id;
392 uint32_t cur_gmr_offset;
393 uint32_t pad64;
394};
395
396/**
397 * union drm_vmw_dmabuf_arg
398 *
399 * @req: Input data as described above.
400 * @rep: Output data as described above.
401 *
402 * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
403 */
404
405union drm_vmw_alloc_dmabuf_arg {
406 struct drm_vmw_alloc_dmabuf_req req;
407 struct drm_vmw_dmabuf_rep rep;
408};
409
410/*************************************************************************/
411/**
412 * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
413 *
414 */
415
416/**
417 * struct drm_vmw_unref_dmabuf_arg
418 *
419 * @handle: Handle indicating what buffer to free. Obtained from the
420 * DRM_VMW_ALLOC_DMABUF Ioctl.
421 *
422 * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
423 */
424
425struct drm_vmw_unref_dmabuf_arg {
426 uint32_t handle;
427 uint32_t pad64;
428};
429
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000430/*************************************************************************/
431/**
432 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
433 *
434 * This IOCTL controls the overlay units of the svga device.
435 * The SVGA overlay units does not work like regular hardware units in
436 * that they do not automaticaly read back the contents of the given dma
437 * buffer. But instead only read back for each call to this ioctl, and
438 * at any point between this call being made and a following call that
439 * either changes the buffer or disables the stream.
440 */
441
442/**
443 * struct drm_vmw_rect
444 *
445 * Defines a rectangle. Used in the overlay ioctl to define
446 * source and destination rectangle.
447 */
448
449struct drm_vmw_rect {
450 int32_t x;
451 int32_t y;
452 uint32_t w;
453 uint32_t h;
454};
455
456/**
457 * struct drm_vmw_control_stream_arg
458 *
459 * @stream_id: Stearm to control
460 * @enabled: If false all following arguments are ignored.
461 * @handle: Handle to buffer for getting data from.
462 * @format: Format of the overlay as understood by the host.
463 * @width: Width of the overlay.
464 * @height: Height of the overlay.
465 * @size: Size of the overlay in bytes.
466 * @pitch: Array of pitches, the two last are only used for YUV12 formats.
467 * @offset: Offset from start of dma buffer to overlay.
468 * @src: Source rect, must be within the defined area above.
469 * @dst: Destination rect, x and y may be negative.
470 *
471 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
472 */
473
474struct drm_vmw_control_stream_arg {
475 uint32_t stream_id;
476 uint32_t enabled;
477
478 uint32_t flags;
479 uint32_t color_key;
480
481 uint32_t handle;
482 uint32_t offset;
483 int32_t format;
484 uint32_t size;
485 uint32_t width;
486 uint32_t height;
487 uint32_t pitch[3];
488
489 uint32_t pad64;
490 struct drm_vmw_rect src;
491 struct drm_vmw_rect dst;
492};
493
494/*************************************************************************/
495/**
496 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
497 *
498 */
499
500#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
501#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
502
503/**
504 * struct drm_vmw_cursor_bypass_arg
505 *
506 * @flags: Flags.
507 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
508 * @xpos: X position of cursor.
509 * @ypos: Y position of cursor.
510 * @xhot: X hotspot.
511 * @yhot: Y hotspot.
512 *
513 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
514 */
515
516struct drm_vmw_cursor_bypass_arg {
517 uint32_t flags;
518 uint32_t crtc_id;
519 int32_t xpos;
520 int32_t ypos;
521 int32_t xhot;
522 int32_t yhot;
523};
524
525/*************************************************************************/
526/**
527 * DRM_VMW_CLAIM_STREAM - Claim a single stream.
528 */
529
530/**
531 * struct drm_vmw_context_arg
532 *
533 * @stream_id: Device unique context ID.
534 *
535 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
536 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
537 */
538
539struct drm_vmw_stream_arg {
540 uint32_t stream_id;
541 uint32_t pad64;
542};
543
544/*************************************************************************/
545/**
546 * DRM_VMW_UNREF_STREAM - Unclaim a stream.
547 *
548 * Return a single stream that was claimed by this process. Also makes
549 * sure that the stream has been stopped.
550 */
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000551
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000552/*************************************************************************/
553/**
554 * DRM_VMW_GET_3D_CAP
555 *
556 * Read 3D capabilities from the FIFO
557 *
558 */
559
560/**
561 * struct drm_vmw_get_3d_cap_arg
562 *
563 * @buffer: Pointer to a buffer for capability data, cast to an uint64_t
564 * @size: Max size to copy
565 *
566 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
567 * ioctls.
568 */
569
570struct drm_vmw_get_3d_cap_arg {
571 uint64_t buffer;
572 uint32_t max_size;
573 uint32_t pad64;
574};
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000575
Jakob Bornecrantzd8bd19d2010-06-01 11:54:20 +0200576/*************************************************************************/
577/**
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000578 * DRM_VMW_FENCE_WAIT
579 *
580 * Waits for a fence object to signal. The wait is interruptible, so that
581 * signals may be delivered during the interrupt. The wait may timeout,
582 * in which case the calls returns -EBUSY. If the wait is restarted,
583 * that is restarting without resetting @cookie_valid to zero,
584 * the timeout is computed from the first call.
585 *
586 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
587 * on:
588 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
589 * stream
590 * have executed.
591 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
592 * commands
593 * in the buffer given to the EXECBUF ioctl returning the fence object handle
594 * are available to user-space.
595 *
596 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
597 * fenc wait ioctl returns 0, the fence object has been unreferenced after
598 * the wait.
599 */
600
601#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
602#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
603
604#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
605
606/**
607 * struct drm_vmw_fence_wait_arg
608 *
609 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
610 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
611 * @kernel_cookie: Set to 0 on first call. Left alone on restart.
612 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
613 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
614 * before returning.
615 * @flags: Fence flags to wait on.
616 * @wait_options: Options that control the behaviour of the wait ioctl.
617 *
618 * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
619 */
620
621struct drm_vmw_fence_wait_arg {
622 uint32_t handle;
623 int32_t cookie_valid;
624 uint64_t kernel_cookie;
625 uint64_t timeout_us;
626 int32_t lazy;
627 int32_t flags;
628 int32_t wait_options;
629 int32_t pad64;
630};
631
632/*************************************************************************/
633/**
634 * DRM_VMW_FENCE_SIGNALED
635 *
636 * Checks if a fence object is signaled..
637 */
638
639/**
640 * struct drm_vmw_fence_signaled_arg
641 *
642 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
643 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
644 * @signaled: Out: Flags signaled.
645 * @sequence: Out: Highest sequence passed so far. Can be used to signal the
646 * EXEC flag of user-space fence objects.
647 *
648 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
649 * ioctls.
650 */
651
652struct drm_vmw_fence_signaled_arg {
653 uint32_t handle;
654 uint32_t flags;
655 int32_t signaled;
656 uint32_t passed_seqno;
657 uint32_t signaled_flags;
658 uint32_t pad64;
659};
660
661/*************************************************************************/
662/**
663 * DRM_VMW_FENCE_UNREF
664 *
665 * Unreferences a fence object, and causes it to be destroyed if there are no
666 * other references to it.
667 *
668 */
669
670/**
671 * struct drm_vmw_fence_arg
672 *
673 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
674 *
675 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
676 */
677
678struct drm_vmw_fence_arg {
679 uint32_t handle;
680 uint32_t pad64;
681};
682
683
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200684/*************************************************************************/
685/**
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200686 * DRM_VMW_FENCE_EVENT
687 *
688 * Queues an event on a fence to be delivered on the drm character device
689 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
690 * Optionally the approximate time when the fence signaled is
691 * given by the event.
692 */
693
694/*
695 * The event type
696 */
697#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
698
699struct drm_vmw_event_fence {
700 struct drm_event base;
701 uint64_t user_data;
702 uint32_t tv_sec;
703 uint32_t tv_usec;
704};
705
706/*
707 * Flags that may be given to the command.
708 */
709/* Request fence signaled time on the event. */
710#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
711
712/**
713 * struct drm_vmw_fence_event_arg
714 *
715 * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if
716 * the fence is not supposed to be referenced by user-space.
717 * @user_info: Info to be delivered with the event.
718 * @handle: Attach the event to this fence only.
719 * @flags: A set of flags as defined above.
720 */
721struct drm_vmw_fence_event_arg {
722 uint64_t fence_rep;
723 uint64_t user_data;
724 uint32_t handle;
725 uint32_t flags;
726};
727
728
729/*************************************************************************/
730/**
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200731 * DRM_VMW_PRESENT
732 *
733 * Executes an SVGA present on a given fb for a given surface. The surface
734 * is placed on the framebuffer. Cliprects are given relative to the given
735 * point (the point disignated by dest_{x|y}).
736 *
737 */
738
739/**
740 * struct drm_vmw_present_arg
741 * @fb_id: framebuffer id to present / read back from.
742 * @sid: Surface id to present from.
743 * @dest_x: X placement coordinate for surface.
744 * @dest_y: Y placement coordinate for surface.
745 * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
746 * @num_clips: Number of cliprects given relative to the framebuffer origin,
747 * in the same coordinate space as the frame buffer.
748 * @pad64: Unused 64-bit padding.
749 *
750 * Input argument to the DRM_VMW_PRESENT ioctl.
751 */
752
753struct drm_vmw_present_arg {
754 uint32_t fb_id;
755 uint32_t sid;
756 int32_t dest_x;
757 int32_t dest_y;
758 uint64_t clips_ptr;
759 uint32_t num_clips;
760 uint32_t pad64;
761};
762
763
764/*************************************************************************/
765/**
766 * DRM_VMW_PRESENT_READBACK
767 *
768 * Executes an SVGA present readback from a given fb to the dma buffer
769 * currently bound as the fb. If there is no dma buffer bound to the fb,
770 * an error will be returned.
771 *
772 */
773
774/**
775 * struct drm_vmw_present_arg
776 * @fb_id: fb_id to present / read back from.
777 * @num_clips: Number of cliprects.
778 * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
779 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t.
780 * If this member is NULL, then the ioctl should not return a fence.
781 */
782
783struct drm_vmw_present_readback_arg {
784 uint32_t fb_id;
785 uint32_t num_clips;
786 uint64_t clips_ptr;
787 uint64_t fence_rep;
788};
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200789
790/*************************************************************************/
791/**
792 * DRM_VMW_UPDATE_LAYOUT - Update layout
793 *
794 * Updates the preferred modes and connection status for connectors. The
795 * command consists of one drm_vmw_update_layout_arg pointing to an array
796 * of num_outputs drm_vmw_rect's.
797 */
798
799/**
800 * struct drm_vmw_update_layout_arg
801 *
802 * @num_outputs: number of active connectors
803 * @rects: pointer to array of drm_vmw_rect cast to an uint64_t
804 *
805 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
806 */
807struct drm_vmw_update_layout_arg {
808 uint32_t num_outputs;
809 uint32_t pad64;
810 uint64_t rects;
811};
812
Thomas Hellstromcfe4d532012-11-21 10:23:14 +0100813
814/*************************************************************************/
815/**
816 * DRM_VMW_CREATE_SHADER - Create shader
817 *
818 * Creates a shader and optionally binds it to a dma buffer containing
819 * the shader byte-code.
820 */
821
822/**
823 * enum drm_vmw_shader_type - Shader types
824 */
825enum drm_vmw_shader_type {
826 drm_vmw_shader_type_vs = 0,
827 drm_vmw_shader_type_ps,
828 drm_vmw_shader_type_gs
829};
830
831
832/**
833 * struct drm_vmw_shader_create_arg
834 *
835 * @shader_type: Shader type of the shader to create.
836 * @size: Size of the byte-code in bytes.
837 * where the shader byte-code starts
838 * @buffer_handle: Buffer handle identifying the buffer containing the
839 * shader byte-code
840 * @shader_handle: On successful completion contains a handle that
841 * can be used to subsequently identify the shader.
842 * @offset: Offset in bytes into the buffer given by @buffer_handle,
843 *
844 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
845 */
846struct drm_vmw_shader_create_arg {
847 enum drm_vmw_shader_type shader_type;
848 uint32_t size;
849 uint32_t buffer_handle;
850 uint32_t shader_handle;
851 uint64_t offset;
852};
853
854/*************************************************************************/
855/**
856 * DRM_VMW_UNREF_SHADER - Unreferences a shader
857 *
858 * Destroys a user-space reference to a shader, optionally destroying
859 * it.
860 */
861
862/**
863 * struct drm_vmw_shader_arg
864 *
865 * @handle: Handle identifying the shader to destroy.
866 *
867 * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
868 */
869struct drm_vmw_shader_arg {
870 uint32_t handle;
871 uint32_t pad64;
872};
873
874/*************************************************************************/
875/**
876 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
877 *
878 * Allocates a surface handle and queues a create surface command
879 * for the host on the first use of the surface. The surface ID can
880 * be used as the surface ID in commands referencing the surface.
881 */
882
883/**
884 * enum drm_vmw_surface_flags
885 *
886 * @drm_vmw_surface_flag_shareable: Whether the surface is shareable
887 * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
888 * surface.
889 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
890 * given.
891 */
892enum drm_vmw_surface_flags {
893 drm_vmw_surface_flag_shareable = (1 << 0),
894 drm_vmw_surface_flag_scanout = (1 << 1),
895 drm_vmw_surface_flag_create_buffer = (1 << 2)
896};
897
898/**
899 * struct drm_vmw_gb_surface_create_req
900 *
901 * @svga3d_flags: SVGA3d surface flags for the device.
902 * @format: SVGA3d format.
903 * @mip_level: Number of mip levels for all faces.
904 * @drm_surface_flags Flags as described above.
Zack Rusin15c6f652012-11-21 12:25:33 +0100905 * @multisample_count Future use. Set to 0.
Thomas Hellstromcfe4d532012-11-21 10:23:14 +0100906 * @autogen_filter Future use. Set to 0.
907 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
908 * if none.
909 * @base_size Size of the base mip level for all faces.
910 *
911 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
912 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
913 */
914struct drm_vmw_gb_surface_create_req {
915 uint32_t svga3d_flags;
916 uint32_t format;
917 uint32_t mip_levels;
918 enum drm_vmw_surface_flags drm_surface_flags;
919 uint32_t multisample_count;
920 uint32_t autogen_filter;
921 uint32_t buffer_handle;
922 uint32_t pad64;
923 struct drm_vmw_size base_size;
924};
925
926/**
927 * struct drm_vmw_gb_surface_create_rep
928 *
929 * @handle: Surface handle.
930 * @backup_size: Size of backup buffers for this surface.
931 * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
932 * @buffer_size: Actual size of the buffer identified by
933 * @buffer_handle
934 * @buffer_map_handle: Offset into device address space for the buffer
935 * identified by @buffer_handle.
936 *
937 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
938 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
939 */
940struct drm_vmw_gb_surface_create_rep {
941 uint32_t handle;
942 uint32_t backup_size;
943 uint32_t buffer_handle;
944 uint32_t buffer_size;
945 uint64_t buffer_map_handle;
946};
947
948/**
949 * union drm_vmw_gb_surface_create_arg
950 *
951 * @req: Input argument as described above.
952 * @rep: Output argument as described above.
953 *
954 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
955 */
956union drm_vmw_gb_surface_create_arg {
957 struct drm_vmw_gb_surface_create_rep rep;
958 struct drm_vmw_gb_surface_create_req req;
959};
960
961/*************************************************************************/
962/**
963 * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
964 *
965 * Puts a reference on a host surface with a given handle, as previously
966 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
967 * A reference will make sure the surface isn't destroyed while we hold
968 * it and will allow the calling client to use the surface handle in
969 * the command stream.
970 *
971 * On successful return, the Ioctl returns the surface information given
972 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
973 */
974
975/**
976 * struct drm_vmw_gb_surface_reference_arg
977 *
978 * @creq: The data used as input when the surface was created, as described
979 * above at "struct drm_vmw_gb_surface_create_req"
980 * @crep: Additional data output when the surface was created, as described
981 * above at "struct drm_vmw_gb_surface_create_rep"
982 *
983 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
984 */
985struct drm_vmw_gb_surface_ref_rep {
986 struct drm_vmw_gb_surface_create_req creq;
987 struct drm_vmw_gb_surface_create_rep crep;
988};
989
990/**
991 * union drm_vmw_gb_surface_reference_arg
992 *
993 * @req: Input data as described above at "struct drm_vmw_surface_arg"
994 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
995 *
996 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
997 */
998union drm_vmw_gb_surface_reference_arg {
999 struct drm_vmw_gb_surface_ref_rep rep;
1000 struct drm_vmw_surface_arg req;
1001};
1002
1003
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +01001004/*************************************************************************/
1005/**
1006 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
1007 *
1008 * Idles any previously submitted GPU operations on the buffer and
1009 * by default blocks command submissions that reference the buffer.
1010 * If the file descriptor used to grab a blocking CPU sync is closed, the
1011 * cpu sync is released.
1012 * The flags argument indicates how the grab / release operation should be
1013 * performed:
1014 */
1015
1016/**
1017 * enum drm_vmw_synccpu_flags - Synccpu flags:
1018 *
1019 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1020 * hint to the kernel to allow command submissions that references the buffer
1021 * for read-only.
1022 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1023 * referencing this buffer.
1024 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1025 * -EBUSY should the buffer be busy.
1026 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1027 * while the buffer is synced for CPU. This is similar to the GEM bo idle
1028 * behavior.
1029 */
1030enum drm_vmw_synccpu_flags {
1031 drm_vmw_synccpu_read = (1 << 0),
1032 drm_vmw_synccpu_write = (1 << 1),
1033 drm_vmw_synccpu_dontblock = (1 << 2),
1034 drm_vmw_synccpu_allow_cs = (1 << 3)
1035};
1036
1037/**
1038 * enum drm_vmw_synccpu_op - Synccpu operations:
1039 *
1040 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
1041 * @drm_vmw_synccpu_release: Release a previous grab.
1042 */
1043enum drm_vmw_synccpu_op {
1044 drm_vmw_synccpu_grab,
1045 drm_vmw_synccpu_release
1046};
1047
1048/**
1049 * struct drm_vmw_synccpu_arg
1050 *
1051 * @op: The synccpu operation as described above.
1052 * @handle: Handle identifying the buffer object.
1053 * @flags: Flags as described above.
1054 */
1055struct drm_vmw_synccpu_arg {
1056 enum drm_vmw_synccpu_op op;
1057 enum drm_vmw_synccpu_flags flags;
1058 uint32_t handle;
1059 uint32_t pad64;
1060};
Thomas Hellstromcfe4d532012-11-21 10:23:14 +01001061
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001062#endif