blob: d6b247b1994a5d5ff2ba4339e0485c9eb158c443 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yehc8261a92015-06-26 01:23:42 -07003 * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010032#include <drm/drmP.h>
33#include <drm/vmwgfx_drm.h>
34#include <drm/drm_hashtab.h>
35#include <linux/suspend.h>
36#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_object.h>
38#include <drm/ttm/ttm_lock.h>
39#include <drm/ttm/ttm_execbuf_util.h>
40#include <drm/ttm/ttm_module.h>
Thomas Hellstromae2a1042011-09-01 20:18:44 +000041#include "vmwgfx_fence.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000042
Sinclair Yeh35c05122015-06-26 01:42:06 -070043#define VMWGFX_DRIVER_DATE "20150626"
Thomas Hellstrom2ae7b032011-09-01 20:18:45 +000044#define VMWGFX_DRIVER_MAJOR 2
Sinclair Yeh35c05122015-06-26 01:42:06 -070045#define VMWGFX_DRIVER_MINOR 7
46#define VMWGFX_DRIVER_PATCHLEVEL 0
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000047#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49#define VMWGFX_MAX_RELOCATIONS 2048
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000050#define VMWGFX_MAX_VALIDATIONS 2048
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +020051#define VMWGFX_MAX_DISPLAYS 16
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000052#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
Sinclair Yeh35c05122015-06-26 01:42:06 -070053#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000054
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010055/*
56 * Perhaps we should have sysfs entries for these.
57 */
58#define VMWGFX_NUM_GB_CONTEXT 256
59#define VMWGFX_NUM_GB_SHADER 20000
60#define VMWGFX_NUM_GB_SURFACE 32768
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010061#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010062#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
63 VMWGFX_NUM_GB_SHADER +\
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010064 VMWGFX_NUM_GB_SURFACE +\
65 VMWGFX_NUM_GB_SCREEN_TARGET)
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010066
Thomas Hellstrom135cba02010-10-26 21:21:47 +020067#define VMW_PL_GMR TTM_PL_PRIV0
68#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
Thomas Hellstrom6da768a2012-11-21 11:06:22 +010069#define VMW_PL_MOB TTM_PL_PRIV1
70#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
Thomas Hellstrom135cba02010-10-26 21:21:47 +020071
Thomas Hellstromae2a1042011-09-01 20:18:44 +000072#define VMW_RES_CONTEXT ttm_driver_type0
73#define VMW_RES_SURFACE ttm_driver_type1
74#define VMW_RES_STREAM ttm_driver_type2
75#define VMW_RES_FENCE ttm_driver_type3
Thomas Hellstromc74c1622012-11-21 12:10:26 +010076#define VMW_RES_SHADER ttm_driver_type4
Thomas Hellstromae2a1042011-09-01 20:18:44 +000077
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000078struct vmw_fpriv {
79 struct drm_master *locked_master;
80 struct ttm_object_file *tfile;
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +010081 struct list_head fence_events;
Thomas Hellstromd5bde952014-01-31 10:12:10 +010082 bool gb_aware;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000083};
84
85struct vmw_dma_buffer {
86 struct ttm_buffer_object base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000087 struct list_head res_list;
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -070088 s32 pin_count;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000089};
90
Thomas Hellstromc0951b72012-11-20 12:19:35 +000091/**
92 * struct vmw_validate_buffer - Carries validation info about buffers.
93 *
94 * @base: Validation info for TTM.
95 * @hash: Hash entry for quick lookup of the TTM buffer object.
96 *
97 * This structure contains also driver private validation info
98 * on top of the info needed by TTM.
99 */
100struct vmw_validate_buffer {
101 struct ttm_validate_buffer base;
102 struct drm_hash_item hash;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100103 bool validate_as_mob;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000104};
105
106struct vmw_res_func;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000107struct vmw_resource {
108 struct kref kref;
109 struct vmw_private *dev_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000110 int id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000111 bool avail;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000112 unsigned long backup_size;
113 bool res_dirty; /* Protected by backup buffer reserved */
114 bool backup_dirty; /* Protected by backup buffer reserved */
115 struct vmw_dma_buffer *backup;
116 unsigned long backup_offset;
Thomas Hellstromed933942015-03-02 23:26:06 -0800117 unsigned long pin_count; /* Protected by resource reserved */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000118 const struct vmw_res_func *func;
119 struct list_head lru_head; /* Protected by the resource lock */
120 struct list_head mob_head; /* Protected by @backup reserved */
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700121 struct list_head binding_head; /* Protected by binding_mutex */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000122 void (*res_free) (struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000123 void (*hw_destroy) (struct vmw_resource *res);
124};
125
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200126
127/*
128 * Resources that are managed using ioctls.
129 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000130enum vmw_res_type {
131 vmw_res_context,
132 vmw_res_surface,
133 vmw_res_stream,
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100134 vmw_res_shader,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000135 vmw_res_max
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000136};
137
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200138/*
139 * Resources that are managed using command streams.
140 */
141enum vmw_cmdbuf_res_type {
142 vmw_cmdbuf_res_compat_shader
143};
144
145struct vmw_cmdbuf_res_manager;
146
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000147struct vmw_cursor_snooper {
148 struct drm_crtc *crtc;
149 size_t age;
150 uint32_t *image;
151};
152
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200153struct vmw_framebuffer;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200154struct vmw_surface_offset;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200155
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000156struct vmw_surface {
157 struct vmw_resource res;
158 uint32_t flags;
159 uint32_t format;
160 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000161 struct drm_vmw_size base_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000162 struct drm_vmw_size *sizes;
163 uint32_t num_sizes;
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000164 bool scanout;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000165 /* TODO so far just a extra pointer */
166 struct vmw_cursor_snooper snooper;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200167 struct vmw_surface_offset *offsets;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000168 SVGA3dTextureFilter autogen_filter;
169 uint32_t multisample_count;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000170};
171
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000172struct vmw_marker_queue {
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200173 struct list_head head;
Thomas Gleixnerf166e6d2014-07-16 21:05:07 +0000174 u64 lag;
175 u64 lag_time;
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200176 spinlock_t lock;
177};
178
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000179struct vmw_fifo_state {
180 unsigned long reserved_size;
181 __le32 *dynamic_buffer;
182 __le32 *static_buffer;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000183 unsigned long static_buffer_size;
184 bool using_bounce_buffer;
185 uint32_t capabilities;
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000186 struct mutex fifo_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000187 struct rw_semaphore rwsem;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000188 struct vmw_marker_queue marker_queue;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000189};
190
191struct vmw_relocation {
Thomas Hellstromddcda242012-11-21 11:26:55 +0100192 SVGAMobId *mob_loc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000193 SVGAGuestPtr *location;
194 uint32_t index;
195};
196
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000197/**
198 * struct vmw_res_cache_entry - resource information cache entry
199 *
200 * @valid: Whether the entry is valid, which also implies that the execbuf
201 * code holds a reference to the resource, and it's placed on the
202 * validation list.
203 * @handle: User-space handle of a resource.
204 * @res: Non-ref-counted pointer to the resource.
205 *
206 * Used to avoid frequent repeated user-space handle lookups of the
207 * same resource.
208 */
209struct vmw_res_cache_entry {
210 bool valid;
211 uint32_t handle;
212 struct vmw_resource *res;
213 struct vmw_resource_val_node *node;
214};
215
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700216/**
217 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
218 */
219enum vmw_dma_map_mode {
220 vmw_dma_phys, /* Use physical page addresses */
221 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
222 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
223 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
224 vmw_dma_map_max
225};
226
227/**
228 * struct vmw_sg_table - Scatter/gather table for binding, with additional
229 * device-specific information.
230 *
231 * @sgt: Pointer to a struct sg_table with binding information
232 * @num_regions: Number of regions with device-address contigous pages
233 */
234struct vmw_sg_table {
235 enum vmw_dma_map_mode mode;
236 struct page **pages;
237 const dma_addr_t *addrs;
238 struct sg_table *sgt;
239 unsigned long num_regions;
240 unsigned long num_pages;
241};
242
243/**
244 * struct vmw_piter - Page iterator that iterates over a list of pages
245 * and DMA addresses that could be either a scatter-gather list or
246 * arrays
247 *
248 * @pages: Array of page pointers to the pages.
249 * @addrs: DMA addresses to the pages if coherent pages are used.
250 * @iter: Scatter-gather page iterator. Current position in SG list.
251 * @i: Current position in arrays.
252 * @num_pages: Number of pages total.
253 * @next: Function to advance the iterator. Returns false if past the list
254 * of pages, true otherwise.
255 * @dma_address: Function to return the DMA address of the current page.
256 */
257struct vmw_piter {
258 struct page **pages;
259 const dma_addr_t *addrs;
260 struct sg_page_iter iter;
261 unsigned long i;
262 unsigned long num_pages;
263 bool (*next)(struct vmw_piter *);
264 dma_addr_t (*dma_address)(struct vmw_piter *);
265 struct page *(*page)(struct vmw_piter *);
266};
267
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700268/*
269 * enum vmw_ctx_binding_type - abstract resource to context binding types
270 */
271enum vmw_ctx_binding_type {
272 vmw_ctx_binding_shader,
273 vmw_ctx_binding_rt,
274 vmw_ctx_binding_tex,
275 vmw_ctx_binding_max
276};
277
278/**
279 * struct vmw_ctx_bindinfo - structure representing a single context binding
280 *
281 * @ctx: Pointer to the context structure. NULL means the binding is not
282 * active.
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700283 * @res: Non ref-counted pointer to the bound resource.
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700284 * @bt: The binding type.
285 * @i1: Union of information needed to unbind.
286 */
287struct vmw_ctx_bindinfo {
288 struct vmw_resource *ctx;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700289 struct vmw_resource *res;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700290 enum vmw_ctx_binding_type bt;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100291 bool scrubbed;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700292 union {
293 SVGA3dShaderType shader_type;
294 SVGA3dRenderTargetType rt_type;
295 uint32 texture_stage;
296 } i1;
297};
298
299/**
300 * struct vmw_ctx_binding - structure representing a single context binding
301 * - suitable for tracking in a context
302 *
303 * @ctx_list: List head for context.
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700304 * @res_list: List head for bound resource.
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700305 * @bi: Binding info
306 */
307struct vmw_ctx_binding {
308 struct list_head ctx_list;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700309 struct list_head res_list;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700310 struct vmw_ctx_bindinfo bi;
311};
312
313
314/**
315 * struct vmw_ctx_binding_state - context binding state
316 *
317 * @list: linked list of individual bindings.
318 * @render_targets: Render target bindings.
319 * @texture_units: Texture units/samplers bindings.
320 * @shaders: Shader bindings.
321 *
322 * Note that this structure also provides storage space for the individual
323 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
324 * for individual bindings.
325 *
326 */
327struct vmw_ctx_binding_state {
328 struct list_head list;
329 struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
330 struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
331 struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
332};
333
Sinclair Yehc8261a92015-06-26 01:23:42 -0700334
335/*
336 * enum vmw_display_unit_type - Describes the display unit
337 */
338enum vmw_display_unit_type {
339 vmw_du_invalid = 0,
340 vmw_du_legacy,
Sinclair Yeh35c05122015-06-26 01:42:06 -0700341 vmw_du_screen_object,
342 vmw_du_screen_target
Sinclair Yehc8261a92015-06-26 01:23:42 -0700343};
344
345
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000346struct vmw_sw_context{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000347 struct drm_open_hash res_ht;
348 bool res_ht_initialized;
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200349 bool kernel; /**< is the called made from the kernel */
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100350 struct vmw_fpriv *fp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000351 struct list_head validate_nodes;
352 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
353 uint32_t cur_reloc;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000354 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000355 uint32_t cur_val_buf;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000356 uint32_t *cmd_bounce;
357 uint32_t cmd_bounce_size;
Thomas Hellstromf18c8842011-10-04 20:13:31 +0200358 struct list_head resource_list;
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700359 struct vmw_dma_buffer *cur_query_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000360 struct list_head res_relocations;
361 uint32_t *buf_start;
362 struct vmw_res_cache_entry res_cache[vmw_res_max];
363 struct vmw_resource *last_query_ctx;
364 bool needs_post_query_barrier;
365 struct vmw_resource *error_resource;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700366 struct vmw_ctx_binding_state staged_bindings;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200367 struct list_head staged_cmd_res;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000368};
369
370struct vmw_legacy_display;
371struct vmw_overlay;
372
373struct vmw_master {
374 struct ttm_lock lock;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200375 struct mutex fb_surf_mutex;
376 struct list_head fb_surf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000377};
378
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200379struct vmw_vga_topology_state {
380 uint32_t width;
381 uint32_t height;
382 uint32_t primary;
383 uint32_t pos_x;
384 uint32_t pos_y;
385};
386
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000387struct vmw_private {
388 struct ttm_bo_device bdev;
389 struct ttm_bo_global_ref bo_global_ref;
Dave Airlieba4420c2010-03-09 10:56:52 +1000390 struct drm_global_reference mem_global_ref;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000391
392 struct vmw_fifo_state fifo;
393
394 struct drm_device *dev;
395 unsigned long vmw_chipset;
396 unsigned int io_start;
397 uint32_t vram_start;
398 uint32_t vram_size;
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100399 uint32_t prim_bb_mem;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000400 uint32_t mmio_start;
401 uint32_t mmio_size;
402 uint32_t fb_max_width;
403 uint32_t fb_max_height;
Sinclair Yeh35c05122015-06-26 01:42:06 -0700404 uint32_t texture_max_width;
405 uint32_t texture_max_height;
406 uint32_t stdu_max_width;
407 uint32_t stdu_max_height;
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100408 uint32_t initial_width;
409 uint32_t initial_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000410 __le32 __iomem *mmio_virt;
411 int mmio_mtrr;
412 uint32_t capabilities;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000413 uint32_t max_gmr_ids;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000414 uint32_t max_gmr_pages;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100415 uint32_t max_mob_pages;
Charmaine Lee857aea12014-02-12 12:07:38 +0100416 uint32_t max_mob_size;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000417 uint32_t memory_size;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200418 bool has_gmr;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100419 bool has_mob;
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800420 spinlock_t hw_lock;
421 spinlock_t cap_lock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000422
423 /*
424 * VGA registers.
425 */
426
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200427 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000428 uint32_t vga_width;
429 uint32_t vga_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000430 uint32_t vga_bpp;
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200431 uint32_t vga_bpl;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200432 uint32_t vga_pitchlock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000433
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200434 uint32_t num_displays;
435
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000436 /*
437 * Framebuffer info.
438 */
439
440 void *fb_info;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700441 enum vmw_display_unit_type active_display_unit;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000442 struct vmw_legacy_display *ldu_priv;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200443 struct vmw_screen_object_display *sou_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000444 struct vmw_overlay *overlay_priv;
445
446 /*
447 * Context and surface management.
448 */
449
450 rwlock_t resource_lock;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000451 struct idr res_idr[vmw_res_max];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000452 /*
453 * Block lastclose from racing with firstopen.
454 */
455
456 struct mutex init_mutex;
457
458 /*
459 * A resource manager for kernel-only surfaces and
460 * contexts.
461 */
462
463 struct ttm_object_device *tdev;
464
465 /*
466 * Fencing and IRQs.
467 */
468
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000469 atomic_t marker_seq;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000470 wait_queue_head_t fence_queue;
471 wait_queue_head_t fifo_queue;
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800472 spinlock_t waiter_lock;
473 int fence_queue_waiters; /* Protected by waiter_lock */
474 int goal_queue_waiters; /* Protected by waiter_lock */
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700475 int cmdbuf_waiters; /* Protected by irq_lock */
476 int error_waiters; /* Protected by irq_lock */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000477 atomic_t fifo_queue_waiters;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000478 uint32_t last_read_seqno;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000479 spinlock_t irq_lock;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000480 struct vmw_fence_manager *fman;
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200481 uint32_t irq_mask;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000482
483 /*
484 * Device state
485 */
486
487 uint32_t traces_state;
488 uint32_t enable_state;
489 uint32_t config_done_state;
490
491 /**
492 * Execbuf
493 */
494 /**
495 * Protected by the cmdbuf mutex.
496 */
497
498 struct vmw_sw_context ctx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000499 struct mutex cmdbuf_mutex;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700500 struct mutex binding_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000501
502 /**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000503 * Operating mode.
504 */
505
506 bool stealth;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200507 bool enable_fb;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700508 spinlock_t svga_lock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000509
510 /**
511 * Master management.
512 */
513
514 struct vmw_master *active_master;
515 struct vmw_master fbdev_master;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100516 struct notifier_block pm_nb;
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200517 bool suspended;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700518 bool refuse_hibernation;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200519
520 struct mutex release_mutex;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700521 atomic_t num_fifo_resources;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200522
523 /*
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100524 * Replace this with an rwsem as soon as we have down_xx_interruptible()
525 */
526 struct ttm_lock reservation_sem;
527
528 /*
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200529 * Query processing. These members
530 * are protected by the cmdbuf mutex.
531 */
532
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700533 struct vmw_dma_buffer *dummy_query_bo;
534 struct vmw_dma_buffer *pinned_bo;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200535 uint32_t query_cid;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000536 uint32_t query_cid_valid;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200537 bool dummy_query_bo_pinned;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200538
539 /*
540 * Surface swapping. The "surface_lru" list is protected by the
541 * resource lock in order to be able to destroy a surface and take
542 * it off the lru atomically. "used_memory_size" is currently
543 * protected by the cmdbuf mutex for simplicity.
544 */
545
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000546 struct list_head res_lru[vmw_res_max];
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200547 uint32_t used_memory_size;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700548
549 /*
550 * DMA mapping stuff.
551 */
552 enum vmw_dma_map_mode map_mode;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100553
554 /*
555 * Guest Backed stuff
556 */
557 struct ttm_buffer_object *otable_bo;
558 struct vmw_otable *otables;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700559
560 struct vmw_cmdbuf_man *cman;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000561};
562
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000563static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
564{
565 return container_of(res, struct vmw_surface, res);
566}
567
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000568static inline struct vmw_private *vmw_priv(struct drm_device *dev)
569{
570 return (struct vmw_private *)dev->dev_private;
571}
572
573static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
574{
575 return (struct vmw_fpriv *)file_priv->driver_priv;
576}
577
578static inline struct vmw_master *vmw_master(struct drm_master *master)
579{
580 return (struct vmw_master *) master->driver_priv;
581}
582
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800583/*
584 * The locking here is fine-grained, so that it is performed once
585 * for every read- and write operation. This is of course costly, but we
586 * don't perform much register access in the timing critical paths anyway.
587 * Instead we have the extra benefit of being sure that we don't forget
588 * the hw lock around register accesses.
589 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000590static inline void vmw_write(struct vmw_private *dev_priv,
591 unsigned int offset, uint32_t value)
592{
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800593 unsigned long irq_flags;
594
595 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000596 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
597 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800598 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000599}
600
601static inline uint32_t vmw_read(struct vmw_private *dev_priv,
602 unsigned int offset)
603{
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800604 unsigned long irq_flags;
605 u32 val;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000606
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800607 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000608 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
609 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800610 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
611
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000612 return val;
613}
614
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700615extern void vmw_svga_enable(struct vmw_private *dev_priv);
616extern void vmw_svga_disable(struct vmw_private *dev_priv);
617
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200618
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000619/**
620 * GMR utilities - vmwgfx_gmr.c
621 */
622
623extern int vmw_gmr_bind(struct vmw_private *dev_priv,
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700624 const struct vmw_sg_table *vsgt,
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200625 unsigned long num_pages,
626 int gmr_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000627extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
628
629/**
630 * Resource utilities - vmwgfx_resource.c
631 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000632struct vmw_user_resource_conv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000633
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000634extern void vmw_resource_unreference(struct vmw_resource **p_res);
635extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100636extern struct vmw_resource *
637vmw_resource_reference_unless_doomed(struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000638extern int vmw_resource_validate(struct vmw_resource *res);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700639extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
640 bool no_backup);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000641extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
Jakob Bornecrantz551a6692011-11-28 13:19:11 +0100642extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
643 struct ttm_object_file *tfile,
644 uint32_t handle,
645 struct vmw_surface **out_surf,
646 struct vmw_dma_buffer **out_buf);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000647extern int vmw_user_resource_lookup_handle(
648 struct vmw_private *dev_priv,
649 struct ttm_object_file *tfile,
650 uint32_t handle,
651 const struct vmw_user_resource_conv *converter,
652 struct vmw_resource **p_res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000653extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
654extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
655 struct vmw_dma_buffer *vmw_bo,
656 size_t size, struct ttm_placement *placement,
657 bool interuptable,
658 void (*bo_free) (struct ttm_buffer_object *bo));
Thomas Hellstromd08a9b92012-11-21 16:04:18 +0100659extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
660 struct ttm_object_file *tfile);
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100661extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
662 struct ttm_object_file *tfile,
663 uint32_t size,
664 bool shareable,
665 uint32_t *handle,
666 struct vmw_dma_buffer **p_dma_buf);
667extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
668 struct vmw_dma_buffer *dma_buf,
669 uint32_t *handle);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000670extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
671 struct drm_file *file_priv);
672extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
673 struct drm_file *file_priv);
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100674extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
675 struct drm_file *file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000676extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
677 uint32_t cur_validate_node);
678extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
679extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
680 uint32_t id, struct vmw_dma_buffer **out);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000681extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
682 struct drm_file *file_priv);
683extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
684 struct drm_file *file_priv);
685extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
686 struct ttm_object_file *tfile,
687 uint32_t *inout_id,
688 struct vmw_resource **out);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000689extern void vmw_resource_unreserve(struct vmw_resource *res,
690 struct vmw_dma_buffer *new_backup,
691 unsigned long new_backup_offset);
692extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
693 struct ttm_mem_reg *mem);
694extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
695 struct vmw_fence_obj *fence);
696extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000697
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200698/**
699 * DMA buffer helper routines - vmwgfx_dmabuf.c
700 */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700701extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200702 struct vmw_dma_buffer *bo,
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700703 struct ttm_placement *placement,
704 bool interruptible);
705extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
706 struct vmw_dma_buffer *buf,
707 bool interruptible);
708extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
709 struct vmw_dma_buffer *buf,
710 bool interruptible);
711extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
712 struct vmw_dma_buffer *bo,
713 bool interruptible);
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200714extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
715 struct vmw_dma_buffer *bo,
716 bool interruptible);
Thomas Hellstromb37a6b92011-10-04 20:13:28 +0200717extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
718 SVGAGuestPtr *ptr);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700719extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000720
721/**
722 * Misc Ioctl functionality - vmwgfx_ioctl.c
723 */
724
725extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
726 struct drm_file *file_priv);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000727extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
728 struct drm_file *file_priv);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200729extern int vmw_present_ioctl(struct drm_device *dev, void *data,
730 struct drm_file *file_priv);
731extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
732 struct drm_file *file_priv);
Thomas Hellstrom5438ae82011-10-10 12:23:27 +0200733extern unsigned int vmw_fops_poll(struct file *filp,
734 struct poll_table_struct *wait);
735extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
736 size_t count, loff_t *offset);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000737
738/**
739 * Fifo utilities - vmwgfx_fifo.c
740 */
741
742extern int vmw_fifo_init(struct vmw_private *dev_priv,
743 struct vmw_fifo_state *fifo);
744extern void vmw_fifo_release(struct vmw_private *dev_priv,
745 struct vmw_fifo_state *fifo);
746extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
747extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
748extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000749 uint32_t *seqno);
Maarten Lankhorst2298e802014-03-26 14:07:44 +0100750extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000751extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +0000752extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200753extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200754extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
755 uint32_t cid);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700756extern int vmw_fifo_flush(struct vmw_private *dev_priv,
757 bool interruptible);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000758
759/**
760 * TTM glue - vmwgfx_ttm_glue.c
761 */
762
763extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
764extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
765extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
766
767/**
768 * TTM buffer object driver - vmwgfx_buffer.c
769 */
770
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800771extern const size_t vmw_tt_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000772extern struct ttm_placement vmw_vram_placement;
773extern struct ttm_placement vmw_vram_ne_placement;
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100774extern struct ttm_placement vmw_vram_sys_placement;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200775extern struct ttm_placement vmw_vram_gmr_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200776extern struct ttm_placement vmw_vram_gmr_ne_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000777extern struct ttm_placement vmw_sys_placement;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100778extern struct ttm_placement vmw_sys_ne_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200779extern struct ttm_placement vmw_evictable_placement;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200780extern struct ttm_placement vmw_srf_placement;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100781extern struct ttm_placement vmw_mob_placement;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700782extern struct ttm_placement vmw_mob_ne_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000783extern struct ttm_bo_driver vmw_bo_driver;
784extern int vmw_dma_quiescent(struct drm_device *dev);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700785extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
786extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
787extern const struct vmw_sg_table *
788vmw_bo_sg_table(struct ttm_buffer_object *bo);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700789extern void vmw_piter_start(struct vmw_piter *viter,
790 const struct vmw_sg_table *vsgt,
791 unsigned long p_offs);
792
793/**
794 * vmw_piter_next - Advance the iterator one page.
795 *
796 * @viter: Pointer to the iterator to advance.
797 *
798 * Returns false if past the list of pages, true otherwise.
799 */
800static inline bool vmw_piter_next(struct vmw_piter *viter)
801{
802 return viter->next(viter);
803}
804
805/**
806 * vmw_piter_dma_addr - Return the DMA address of the current page.
807 *
808 * @viter: Pointer to the iterator
809 *
810 * Returns the DMA address of the page pointed to by @viter.
811 */
812static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
813{
814 return viter->dma_address(viter);
815}
816
817/**
818 * vmw_piter_page - Return a pointer to the current page.
819 *
820 * @viter: Pointer to the iterator
821 *
822 * Returns the DMA address of the page pointed to by @viter.
823 */
824static inline struct page *vmw_piter_page(struct vmw_piter *viter)
825{
826 return viter->page(viter);
827}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000828
829/**
830 * Command submission - vmwgfx_execbuf.c
831 */
832
833extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
834 struct drm_file *file_priv);
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200835extern int vmw_execbuf_process(struct drm_file *file_priv,
836 struct vmw_private *dev_priv,
837 void __user *user_commands,
838 void *kernel_commands,
839 uint32_t command_size,
840 uint64_t throttle_us,
841 struct drm_vmw_fence_rep __user
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +0100842 *user_fence_rep,
843 struct vmw_fence_obj **out_fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000844extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
845 struct vmw_fence_obj *fence);
846extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200847
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200848extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
849 struct vmw_private *dev_priv,
850 struct vmw_fence_obj **p_fence,
851 uint32_t *p_handle);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200852extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
853 struct vmw_fpriv *vmw_fp,
854 int ret,
855 struct drm_vmw_fence_rep __user
856 *user_fence_rep,
857 struct vmw_fence_obj *fence,
858 uint32_t fence_handle);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700859extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
860 struct ttm_buffer_object *bo,
861 bool interruptible,
862 bool validate_as_mob);
863
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200864
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000865/**
866 * IRQs and wating - vmwgfx_irq.c
867 */
868
Daniel Vettere9f0d762013-12-11 11:34:42 +0100869extern irqreturn_t vmw_irq_handler(int irq, void *arg);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000870extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700871 uint32_t seqno, bool interruptible,
872 unsigned long timeout);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000873extern void vmw_irq_preinstall(struct drm_device *dev);
874extern int vmw_irq_postinstall(struct drm_device *dev);
875extern void vmw_irq_uninstall(struct drm_device *dev);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000876extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
877 uint32_t seqno);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000878extern int vmw_fallback_wait(struct vmw_private *dev_priv,
879 bool lazy,
880 bool fifo_idle,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000881 uint32_t seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000882 bool interruptible,
883 unsigned long timeout);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000884extern void vmw_update_seqno(struct vmw_private *dev_priv,
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200885 struct vmw_fifo_state *fifo_state);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000886extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
887extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200888extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
889extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700890extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
891 int *waiter_count);
892extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
893 u32 flag, int *waiter_count);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200894
895/**
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000896 * Rudimentary fence-like objects currently used only for throttling -
897 * vmwgfx_marker.c
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200898 */
899
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000900extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
901extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
902extern int vmw_marker_push(struct vmw_marker_queue *queue,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700903 uint32_t seqno);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000904extern int vmw_marker_pull(struct vmw_marker_queue *queue,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700905 uint32_t signaled_seqno);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200906extern int vmw_wait_lag(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000907 struct vmw_marker_queue *queue, uint32_t us);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000908
909/**
910 * Kernel framebuffer - vmwgfx_fb.c
911 */
912
913int vmw_fb_init(struct vmw_private *vmw_priv);
914int vmw_fb_close(struct vmw_private *dev_priv);
915int vmw_fb_off(struct vmw_private *vmw_priv);
916int vmw_fb_on(struct vmw_private *vmw_priv);
917
918/**
919 * Kernel modesetting - vmwgfx_kms.c
920 */
921
922int vmw_kms_init(struct vmw_private *dev_priv);
923int vmw_kms_close(struct vmw_private *dev_priv);
924int vmw_kms_save_vga(struct vmw_private *vmw_priv);
925int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
926int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
927 struct drm_file *file_priv);
928void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
929void vmw_kms_cursor_snoop(struct vmw_surface *srf,
930 struct ttm_object_file *tfile,
931 struct ttm_buffer_object *bo,
932 SVGA3dCmdHeader *header);
Michel Dänzer0bef23f2011-08-31 07:42:50 +0000933int vmw_kms_write_svga(struct vmw_private *vmw_priv,
934 unsigned width, unsigned height, unsigned pitch,
935 unsigned bpp, unsigned depth);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200936void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
Thomas Hellstrome133e732010-10-05 12:43:04 +0200937bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
938 uint32_t pitch,
939 uint32_t height);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200940u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +0200941int vmw_enable_vblank(struct drm_device *dev, int crtc);
942void vmw_disable_vblank(struct drm_device *dev, int crtc);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200943int vmw_kms_present(struct vmw_private *dev_priv,
944 struct drm_file *file_priv,
945 struct vmw_framebuffer *vfb,
946 struct vmw_surface *surface,
947 uint32_t sid, int32_t destX, int32_t destY,
948 struct drm_vmw_rect *clips,
949 uint32_t num_clips);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200950int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
951 struct drm_file *file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000952
Dave Airlie5e1782d2012-08-28 01:53:54 +0000953int vmw_dumb_create(struct drm_file *file_priv,
954 struct drm_device *dev,
955 struct drm_mode_create_dumb *args);
956
957int vmw_dumb_map_offset(struct drm_file *file_priv,
958 struct drm_device *dev, uint32_t handle,
959 uint64_t *offset);
960int vmw_dumb_destroy(struct drm_file *file_priv,
961 struct drm_device *dev,
962 uint32_t handle);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700963extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
Thomas Hellstromed933942015-03-02 23:26:06 -0800964extern void vmw_resource_unpin(struct vmw_resource *res);
965
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000966/**
967 * Overlay control - vmwgfx_overlay.c
968 */
969
970int vmw_overlay_init(struct vmw_private *dev_priv);
971int vmw_overlay_close(struct vmw_private *dev_priv);
972int vmw_overlay_ioctl(struct drm_device *dev, void *data,
973 struct drm_file *file_priv);
974int vmw_overlay_stop_all(struct vmw_private *dev_priv);
975int vmw_overlay_resume_all(struct vmw_private *dev_priv);
976int vmw_overlay_pause_all(struct vmw_private *dev_priv);
977int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
978int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
979int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
980int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
981
982/**
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200983 * GMR Id manager
984 */
985
986extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
987
988/**
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800989 * Prime - vmwgfx_prime.c
990 */
991
992extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
993extern int vmw_prime_fd_to_handle(struct drm_device *dev,
994 struct drm_file *file_priv,
995 int fd, u32 *handle);
996extern int vmw_prime_handle_to_fd(struct drm_device *dev,
997 struct drm_file *file_priv,
998 uint32_t handle, uint32_t flags,
999 int *prime_fd);
1000
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +01001001/*
1002 * MemoryOBject management - vmwgfx_mob.c
1003 */
1004struct vmw_mob;
1005extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -07001006 const struct vmw_sg_table *vsgt,
1007 unsigned long num_data_pages, int32_t mob_id);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +01001008extern void vmw_mob_unbind(struct vmw_private *dev_priv,
1009 struct vmw_mob *mob);
1010extern void vmw_mob_destroy(struct vmw_mob *mob);
1011extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
1012extern int vmw_otables_setup(struct vmw_private *dev_priv);
1013extern void vmw_otables_takedown(struct vmw_private *dev_priv);
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001014
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001015/*
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001016 * Context management - vmwgfx_context.c
1017 */
1018
1019extern const struct vmw_user_resource_conv *user_context_converter;
1020
1021extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
1022
1023extern int vmw_context_check(struct vmw_private *dev_priv,
1024 struct ttm_object_file *tfile,
1025 int id,
1026 struct vmw_resource **p_res);
1027extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
1028 struct drm_file *file_priv);
1029extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
1030 struct drm_file *file_priv);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001031extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
1032 const struct vmw_ctx_bindinfo *ci);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001033extern void
1034vmw_context_binding_state_transfer(struct vmw_resource *res,
1035 struct vmw_ctx_binding_state *cbs);
1036extern void vmw_context_binding_res_list_kill(struct list_head *head);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01001037extern void vmw_context_binding_res_list_scrub(struct list_head *head);
1038extern int vmw_context_rebind_all(struct vmw_resource *ctx);
1039extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001040extern struct vmw_cmdbuf_res_manager *
1041vmw_context_res_man(struct vmw_resource *ctx);
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001042/*
1043 * Surface management - vmwgfx_surface.c
1044 */
1045
1046extern const struct vmw_user_resource_conv *user_surface_converter;
1047
1048extern void vmw_surface_res_free(struct vmw_resource *res);
1049extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1050 struct drm_file *file_priv);
1051extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1052 struct drm_file *file_priv);
1053extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1054 struct drm_file *file_priv);
1055extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1056 struct drm_file *file_priv);
1057extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1058 struct drm_file *file_priv);
1059extern int vmw_surface_check(struct vmw_private *dev_priv,
1060 struct ttm_object_file *tfile,
1061 uint32_t handle, int *id);
1062extern int vmw_surface_validate(struct vmw_private *dev_priv,
1063 struct vmw_surface *srf);
Sinclair Yeh233826a2015-03-05 01:06:13 -08001064int vmw_surface_gb_priv_define(struct drm_device *dev,
1065 uint32_t user_accounting_size,
1066 uint32_t svga3d_flags,
1067 SVGA3dSurfaceFormat format,
1068 bool for_scanout,
1069 uint32_t num_mip_levels,
1070 uint32_t multisample_count,
1071 struct drm_vmw_size size,
1072 struct vmw_surface **srf_out);
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001073
1074/*
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001075 * Shader management - vmwgfx_shader.c
1076 */
1077
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001078extern const struct vmw_user_resource_conv *user_shader_converter;
1079
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001080extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1081 struct drm_file *file_priv);
1082extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1083 struct drm_file *file_priv);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001084extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1085 struct vmw_cmdbuf_res_manager *man,
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001086 u32 user_key, const void *bytecode,
1087 SVGA3dShaderType shader_type,
1088 size_t size,
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001089 struct list_head *list);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001090extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
1091 u32 user_key, SVGA3dShaderType shader_type,
1092 struct list_head *list);
1093extern struct vmw_resource *
1094vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1095 u32 user_key, SVGA3dShaderType shader_type);
1096
1097/*
1098 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1099 */
1100
1101extern struct vmw_cmdbuf_res_manager *
1102vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1103extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1104extern size_t vmw_cmdbuf_res_man_size(void);
1105extern struct vmw_resource *
1106vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1107 enum vmw_cmdbuf_res_type res_type,
1108 u32 user_key);
1109extern void vmw_cmdbuf_res_revert(struct list_head *list);
1110extern void vmw_cmdbuf_res_commit(struct list_head *list);
1111extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1112 enum vmw_cmdbuf_res_type res_type,
1113 u32 user_key,
1114 struct vmw_resource *res,
1115 struct list_head *list);
1116extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1117 enum vmw_cmdbuf_res_type res_type,
1118 u32 user_key,
1119 struct list_head *list);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001120
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001121
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07001122/*
1123 * Command buffer managerment vmwgfx_cmdbuf.c
1124 */
1125struct vmw_cmdbuf_man;
1126struct vmw_cmdbuf_header;
1127
1128extern struct vmw_cmdbuf_man *
1129vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1130extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1131 size_t size, size_t default_size);
1132extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1133extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1134extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1135 unsigned long timeout);
1136extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1137 int ctx_id, bool interruptible,
1138 struct vmw_cmdbuf_header *header);
1139extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1140 struct vmw_cmdbuf_header *header,
1141 bool flush);
1142extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
1143extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1144 size_t size, bool interruptible,
1145 struct vmw_cmdbuf_header **p_header);
1146extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1147extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1148 bool interruptible);
1149
1150
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001151/**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001152 * Inline helper functions
1153 */
1154
1155static inline void vmw_surface_unreference(struct vmw_surface **srf)
1156{
1157 struct vmw_surface *tmp_srf = *srf;
1158 struct vmw_resource *res = &tmp_srf->res;
1159 *srf = NULL;
1160
1161 vmw_resource_unreference(&res);
1162}
1163
1164static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1165{
1166 (void) vmw_resource_reference(&srf->res);
1167 return srf;
1168}
1169
1170static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
1171{
1172 struct vmw_dma_buffer *tmp_buf = *buf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001173
Thomas Hellstrombf6f0362012-11-09 12:26:15 +00001174 *buf = NULL;
1175 if (tmp_buf != NULL) {
1176 struct ttm_buffer_object *bo = &tmp_buf->base;
1177
1178 ttm_bo_unref(&bo);
1179 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001180}
1181
1182static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
1183{
1184 if (ttm_bo_reference(&buf->base))
1185 return buf;
1186 return NULL;
1187}
1188
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001189static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1190{
1191 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1192}
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001193
1194static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1195{
1196 atomic_inc(&dev_priv->num_fifo_resources);
1197}
1198
1199static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1200{
1201 atomic_dec(&dev_priv->num_fifo_resources);
1202}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001203#endif