blob: c886c024c637e88053a9ccd32c660ba0984fb075 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010032#include <drm/drmP.h>
33#include <drm/vmwgfx_drm.h>
34#include <drm/drm_hashtab.h>
35#include <linux/suspend.h>
36#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_object.h>
38#include <drm/ttm/ttm_lock.h>
39#include <drm/ttm/ttm_execbuf_util.h>
40#include <drm/ttm/ttm_module.h>
Thomas Hellstromae2a1042011-09-01 20:18:44 +000041#include "vmwgfx_fence.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000042
Thomas Hellstrom03c5b8f2014-03-20 13:07:44 +010043#define VMWGFX_DRIVER_DATE "20140325"
Thomas Hellstrom2ae7b032011-09-01 20:18:45 +000044#define VMWGFX_DRIVER_MAJOR 2
Thomas Hellstrom03c5b8f2014-03-20 13:07:44 +010045#define VMWGFX_DRIVER_MINOR 6
Thomas Hellstromf77cef32010-02-09 19:41:55 +000046#define VMWGFX_DRIVER_PATCHLEVEL 0
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000047#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49#define VMWGFX_MAX_RELOCATIONS 2048
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000050#define VMWGFX_MAX_VALIDATIONS 2048
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +020051#define VMWGFX_MAX_DISPLAYS 16
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000052#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010053#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000054
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010055/*
56 * Perhaps we should have sysfs entries for these.
57 */
58#define VMWGFX_NUM_GB_CONTEXT 256
59#define VMWGFX_NUM_GB_SHADER 20000
60#define VMWGFX_NUM_GB_SURFACE 32768
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010061#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010062#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
63 VMWGFX_NUM_GB_SHADER +\
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010064 VMWGFX_NUM_GB_SURFACE +\
65 VMWGFX_NUM_GB_SCREEN_TARGET)
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010066
Thomas Hellstrom135cba02010-10-26 21:21:47 +020067#define VMW_PL_GMR TTM_PL_PRIV0
68#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
Thomas Hellstrom6da768a2012-11-21 11:06:22 +010069#define VMW_PL_MOB TTM_PL_PRIV1
70#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
Thomas Hellstrom135cba02010-10-26 21:21:47 +020071
Thomas Hellstromae2a1042011-09-01 20:18:44 +000072#define VMW_RES_CONTEXT ttm_driver_type0
73#define VMW_RES_SURFACE ttm_driver_type1
74#define VMW_RES_STREAM ttm_driver_type2
75#define VMW_RES_FENCE ttm_driver_type3
Thomas Hellstromc74c1622012-11-21 12:10:26 +010076#define VMW_RES_SHADER ttm_driver_type4
Thomas Hellstromae2a1042011-09-01 20:18:44 +000077
Thomas Hellstromd5bde952014-01-31 10:12:10 +010078struct vmw_compat_shader_manager;
79
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000080struct vmw_fpriv {
81 struct drm_master *locked_master;
82 struct ttm_object_file *tfile;
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +010083 struct list_head fence_events;
Thomas Hellstromd5bde952014-01-31 10:12:10 +010084 bool gb_aware;
85 struct vmw_compat_shader_manager *shman;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000086};
87
88struct vmw_dma_buffer {
89 struct ttm_buffer_object base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000090 struct list_head res_list;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000091};
92
Thomas Hellstromc0951b72012-11-20 12:19:35 +000093/**
94 * struct vmw_validate_buffer - Carries validation info about buffers.
95 *
96 * @base: Validation info for TTM.
97 * @hash: Hash entry for quick lookup of the TTM buffer object.
98 *
99 * This structure contains also driver private validation info
100 * on top of the info needed by TTM.
101 */
102struct vmw_validate_buffer {
103 struct ttm_validate_buffer base;
104 struct drm_hash_item hash;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100105 bool validate_as_mob;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000106};
107
108struct vmw_res_func;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000109struct vmw_resource {
110 struct kref kref;
111 struct vmw_private *dev_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000112 int id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000113 bool avail;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000114 unsigned long backup_size;
115 bool res_dirty; /* Protected by backup buffer reserved */
116 bool backup_dirty; /* Protected by backup buffer reserved */
117 struct vmw_dma_buffer *backup;
118 unsigned long backup_offset;
119 const struct vmw_res_func *func;
120 struct list_head lru_head; /* Protected by the resource lock */
121 struct list_head mob_head; /* Protected by @backup reserved */
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700122 struct list_head binding_head; /* Protected by binding_mutex */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000123 void (*res_free) (struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000124 void (*hw_destroy) (struct vmw_resource *res);
125};
126
127enum vmw_res_type {
128 vmw_res_context,
129 vmw_res_surface,
130 vmw_res_stream,
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100131 vmw_res_shader,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000132 vmw_res_max
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000133};
134
135struct vmw_cursor_snooper {
136 struct drm_crtc *crtc;
137 size_t age;
138 uint32_t *image;
139};
140
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200141struct vmw_framebuffer;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200142struct vmw_surface_offset;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200143
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000144struct vmw_surface {
145 struct vmw_resource res;
146 uint32_t flags;
147 uint32_t format;
148 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000149 struct drm_vmw_size base_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000150 struct drm_vmw_size *sizes;
151 uint32_t num_sizes;
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000152 bool scanout;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000153 /* TODO so far just a extra pointer */
154 struct vmw_cursor_snooper snooper;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200155 struct vmw_surface_offset *offsets;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000156 SVGA3dTextureFilter autogen_filter;
157 uint32_t multisample_count;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000158};
159
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000160struct vmw_marker_queue {
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200161 struct list_head head;
Thomas Gleixnerf166e6d2014-07-16 21:05:07 +0000162 u64 lag;
163 u64 lag_time;
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200164 spinlock_t lock;
165};
166
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000167struct vmw_fifo_state {
168 unsigned long reserved_size;
169 __le32 *dynamic_buffer;
170 __le32 *static_buffer;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000171 unsigned long static_buffer_size;
172 bool using_bounce_buffer;
173 uint32_t capabilities;
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000174 struct mutex fifo_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000175 struct rw_semaphore rwsem;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000176 struct vmw_marker_queue marker_queue;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000177};
178
179struct vmw_relocation {
Thomas Hellstromddcda242012-11-21 11:26:55 +0100180 SVGAMobId *mob_loc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000181 SVGAGuestPtr *location;
182 uint32_t index;
183};
184
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000185/**
186 * struct vmw_res_cache_entry - resource information cache entry
187 *
188 * @valid: Whether the entry is valid, which also implies that the execbuf
189 * code holds a reference to the resource, and it's placed on the
190 * validation list.
191 * @handle: User-space handle of a resource.
192 * @res: Non-ref-counted pointer to the resource.
193 *
194 * Used to avoid frequent repeated user-space handle lookups of the
195 * same resource.
196 */
197struct vmw_res_cache_entry {
198 bool valid;
199 uint32_t handle;
200 struct vmw_resource *res;
201 struct vmw_resource_val_node *node;
202};
203
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700204/**
205 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
206 */
207enum vmw_dma_map_mode {
208 vmw_dma_phys, /* Use physical page addresses */
209 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
210 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
211 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
212 vmw_dma_map_max
213};
214
215/**
216 * struct vmw_sg_table - Scatter/gather table for binding, with additional
217 * device-specific information.
218 *
219 * @sgt: Pointer to a struct sg_table with binding information
220 * @num_regions: Number of regions with device-address contigous pages
221 */
222struct vmw_sg_table {
223 enum vmw_dma_map_mode mode;
224 struct page **pages;
225 const dma_addr_t *addrs;
226 struct sg_table *sgt;
227 unsigned long num_regions;
228 unsigned long num_pages;
229};
230
231/**
232 * struct vmw_piter - Page iterator that iterates over a list of pages
233 * and DMA addresses that could be either a scatter-gather list or
234 * arrays
235 *
236 * @pages: Array of page pointers to the pages.
237 * @addrs: DMA addresses to the pages if coherent pages are used.
238 * @iter: Scatter-gather page iterator. Current position in SG list.
239 * @i: Current position in arrays.
240 * @num_pages: Number of pages total.
241 * @next: Function to advance the iterator. Returns false if past the list
242 * of pages, true otherwise.
243 * @dma_address: Function to return the DMA address of the current page.
244 */
245struct vmw_piter {
246 struct page **pages;
247 const dma_addr_t *addrs;
248 struct sg_page_iter iter;
249 unsigned long i;
250 unsigned long num_pages;
251 bool (*next)(struct vmw_piter *);
252 dma_addr_t (*dma_address)(struct vmw_piter *);
253 struct page *(*page)(struct vmw_piter *);
254};
255
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700256/*
257 * enum vmw_ctx_binding_type - abstract resource to context binding types
258 */
259enum vmw_ctx_binding_type {
260 vmw_ctx_binding_shader,
261 vmw_ctx_binding_rt,
262 vmw_ctx_binding_tex,
263 vmw_ctx_binding_max
264};
265
266/**
267 * struct vmw_ctx_bindinfo - structure representing a single context binding
268 *
269 * @ctx: Pointer to the context structure. NULL means the binding is not
270 * active.
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700271 * @res: Non ref-counted pointer to the bound resource.
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700272 * @bt: The binding type.
273 * @i1: Union of information needed to unbind.
274 */
275struct vmw_ctx_bindinfo {
276 struct vmw_resource *ctx;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700277 struct vmw_resource *res;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700278 enum vmw_ctx_binding_type bt;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100279 bool scrubbed;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700280 union {
281 SVGA3dShaderType shader_type;
282 SVGA3dRenderTargetType rt_type;
283 uint32 texture_stage;
284 } i1;
285};
286
287/**
288 * struct vmw_ctx_binding - structure representing a single context binding
289 * - suitable for tracking in a context
290 *
291 * @ctx_list: List head for context.
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700292 * @res_list: List head for bound resource.
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700293 * @bi: Binding info
294 */
295struct vmw_ctx_binding {
296 struct list_head ctx_list;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700297 struct list_head res_list;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700298 struct vmw_ctx_bindinfo bi;
299};
300
301
302/**
303 * struct vmw_ctx_binding_state - context binding state
304 *
305 * @list: linked list of individual bindings.
306 * @render_targets: Render target bindings.
307 * @texture_units: Texture units/samplers bindings.
308 * @shaders: Shader bindings.
309 *
310 * Note that this structure also provides storage space for the individual
311 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
312 * for individual bindings.
313 *
314 */
315struct vmw_ctx_binding_state {
316 struct list_head list;
317 struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
318 struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
319 struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
320};
321
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000322struct vmw_sw_context{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000323 struct drm_open_hash res_ht;
324 bool res_ht_initialized;
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200325 bool kernel; /**< is the called made from the kernel */
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100326 struct vmw_fpriv *fp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000327 struct list_head validate_nodes;
328 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
329 uint32_t cur_reloc;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000330 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000331 uint32_t cur_val_buf;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000332 uint32_t *cmd_bounce;
333 uint32_t cmd_bounce_size;
Thomas Hellstromf18c8842011-10-04 20:13:31 +0200334 struct list_head resource_list;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200335 uint32_t fence_flags;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200336 struct ttm_buffer_object *cur_query_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000337 struct list_head res_relocations;
338 uint32_t *buf_start;
339 struct vmw_res_cache_entry res_cache[vmw_res_max];
340 struct vmw_resource *last_query_ctx;
341 bool needs_post_query_barrier;
342 struct vmw_resource *error_resource;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700343 struct vmw_ctx_binding_state staged_bindings;
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100344 struct list_head staged_shaders;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000345};
346
347struct vmw_legacy_display;
348struct vmw_overlay;
349
350struct vmw_master {
351 struct ttm_lock lock;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200352 struct mutex fb_surf_mutex;
353 struct list_head fb_surf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000354};
355
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200356struct vmw_vga_topology_state {
357 uint32_t width;
358 uint32_t height;
359 uint32_t primary;
360 uint32_t pos_x;
361 uint32_t pos_y;
362};
363
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000364struct vmw_private {
365 struct ttm_bo_device bdev;
366 struct ttm_bo_global_ref bo_global_ref;
Dave Airlieba4420c2010-03-09 10:56:52 +1000367 struct drm_global_reference mem_global_ref;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000368
369 struct vmw_fifo_state fifo;
370
371 struct drm_device *dev;
372 unsigned long vmw_chipset;
373 unsigned int io_start;
374 uint32_t vram_start;
375 uint32_t vram_size;
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100376 uint32_t prim_bb_mem;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000377 uint32_t mmio_start;
378 uint32_t mmio_size;
379 uint32_t fb_max_width;
380 uint32_t fb_max_height;
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100381 uint32_t initial_width;
382 uint32_t initial_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000383 __le32 __iomem *mmio_virt;
384 int mmio_mtrr;
385 uint32_t capabilities;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000386 uint32_t max_gmr_ids;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000387 uint32_t max_gmr_pages;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100388 uint32_t max_mob_pages;
Charmaine Lee857aea12014-02-12 12:07:38 +0100389 uint32_t max_mob_size;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000390 uint32_t memory_size;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200391 bool has_gmr;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100392 bool has_mob;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000393 struct mutex hw_mutex;
394
395 /*
396 * VGA registers.
397 */
398
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200399 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000400 uint32_t vga_width;
401 uint32_t vga_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000402 uint32_t vga_bpp;
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200403 uint32_t vga_bpl;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200404 uint32_t vga_pitchlock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000405
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200406 uint32_t num_displays;
407
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000408 /*
409 * Framebuffer info.
410 */
411
412 void *fb_info;
413 struct vmw_legacy_display *ldu_priv;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200414 struct vmw_screen_object_display *sou_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000415 struct vmw_overlay *overlay_priv;
416
417 /*
418 * Context and surface management.
419 */
420
421 rwlock_t resource_lock;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000422 struct idr res_idr[vmw_res_max];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000423 /*
424 * Block lastclose from racing with firstopen.
425 */
426
427 struct mutex init_mutex;
428
429 /*
430 * A resource manager for kernel-only surfaces and
431 * contexts.
432 */
433
434 struct ttm_object_device *tdev;
435
436 /*
437 * Fencing and IRQs.
438 */
439
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000440 atomic_t marker_seq;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000441 wait_queue_head_t fence_queue;
442 wait_queue_head_t fifo_queue;
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000443 int fence_queue_waiters; /* Protected by hw_mutex */
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200444 int goal_queue_waiters; /* Protected by hw_mutex */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000445 atomic_t fifo_queue_waiters;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000446 uint32_t last_read_seqno;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000447 spinlock_t irq_lock;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000448 struct vmw_fence_manager *fman;
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200449 uint32_t irq_mask;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000450
451 /*
452 * Device state
453 */
454
455 uint32_t traces_state;
456 uint32_t enable_state;
457 uint32_t config_done_state;
458
459 /**
460 * Execbuf
461 */
462 /**
463 * Protected by the cmdbuf mutex.
464 */
465
466 struct vmw_sw_context ctx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000467 struct mutex cmdbuf_mutex;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700468 struct mutex binding_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000469
470 /**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000471 * Operating mode.
472 */
473
474 bool stealth;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200475 bool enable_fb;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000476
477 /**
478 * Master management.
479 */
480
481 struct vmw_master *active_master;
482 struct vmw_master fbdev_master;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100483 struct notifier_block pm_nb;
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200484 bool suspended;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200485
486 struct mutex release_mutex;
487 uint32_t num_3d_resources;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200488
489 /*
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100490 * Replace this with an rwsem as soon as we have down_xx_interruptible()
491 */
492 struct ttm_lock reservation_sem;
493
494 /*
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200495 * Query processing. These members
496 * are protected by the cmdbuf mutex.
497 */
498
499 struct ttm_buffer_object *dummy_query_bo;
500 struct ttm_buffer_object *pinned_bo;
501 uint32_t query_cid;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000502 uint32_t query_cid_valid;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200503 bool dummy_query_bo_pinned;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200504
505 /*
506 * Surface swapping. The "surface_lru" list is protected by the
507 * resource lock in order to be able to destroy a surface and take
508 * it off the lru atomically. "used_memory_size" is currently
509 * protected by the cmdbuf mutex for simplicity.
510 */
511
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000512 struct list_head res_lru[vmw_res_max];
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200513 uint32_t used_memory_size;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700514
515 /*
516 * DMA mapping stuff.
517 */
518 enum vmw_dma_map_mode map_mode;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100519
520 /*
521 * Guest Backed stuff
522 */
523 struct ttm_buffer_object *otable_bo;
524 struct vmw_otable *otables;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000525};
526
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000527static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
528{
529 return container_of(res, struct vmw_surface, res);
530}
531
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000532static inline struct vmw_private *vmw_priv(struct drm_device *dev)
533{
534 return (struct vmw_private *)dev->dev_private;
535}
536
537static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
538{
539 return (struct vmw_fpriv *)file_priv->driver_priv;
540}
541
542static inline struct vmw_master *vmw_master(struct drm_master *master)
543{
544 return (struct vmw_master *) master->driver_priv;
545}
546
547static inline void vmw_write(struct vmw_private *dev_priv,
548 unsigned int offset, uint32_t value)
549{
550 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
551 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
552}
553
554static inline uint32_t vmw_read(struct vmw_private *dev_priv,
555 unsigned int offset)
556{
557 uint32_t val;
558
559 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
560 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
561 return val;
562}
563
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000564int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
565void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200566
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000567/**
568 * GMR utilities - vmwgfx_gmr.c
569 */
570
571extern int vmw_gmr_bind(struct vmw_private *dev_priv,
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700572 const struct vmw_sg_table *vsgt,
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200573 unsigned long num_pages,
574 int gmr_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000575extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
576
577/**
578 * Resource utilities - vmwgfx_resource.c
579 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000580struct vmw_user_resource_conv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000581
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000582extern void vmw_resource_unreference(struct vmw_resource **p_res);
583extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100584extern struct vmw_resource *
585vmw_resource_reference_unless_doomed(struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000586extern int vmw_resource_validate(struct vmw_resource *res);
587extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
588extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
Jakob Bornecrantz551a6692011-11-28 13:19:11 +0100589extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
590 struct ttm_object_file *tfile,
591 uint32_t handle,
592 struct vmw_surface **out_surf,
593 struct vmw_dma_buffer **out_buf);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000594extern int vmw_user_resource_lookup_handle(
595 struct vmw_private *dev_priv,
596 struct ttm_object_file *tfile,
597 uint32_t handle,
598 const struct vmw_user_resource_conv *converter,
599 struct vmw_resource **p_res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000600extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
601extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
602 struct vmw_dma_buffer *vmw_bo,
603 size_t size, struct ttm_placement *placement,
604 bool interuptable,
605 void (*bo_free) (struct ttm_buffer_object *bo));
Thomas Hellstromd08a9b92012-11-21 16:04:18 +0100606extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
607 struct ttm_object_file *tfile);
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100608extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
609 struct ttm_object_file *tfile,
610 uint32_t size,
611 bool shareable,
612 uint32_t *handle,
613 struct vmw_dma_buffer **p_dma_buf);
614extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
615 struct vmw_dma_buffer *dma_buf,
616 uint32_t *handle);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000617extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
618 struct drm_file *file_priv);
619extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
620 struct drm_file *file_priv);
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100621extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
622 struct drm_file *file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000623extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
624 uint32_t cur_validate_node);
625extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
626extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
627 uint32_t id, struct vmw_dma_buffer **out);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000628extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
629 struct drm_file *file_priv);
630extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
631 struct drm_file *file_priv);
632extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
633 struct ttm_object_file *tfile,
634 uint32_t *inout_id,
635 struct vmw_resource **out);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000636extern void vmw_resource_unreserve(struct vmw_resource *res,
637 struct vmw_dma_buffer *new_backup,
638 unsigned long new_backup_offset);
639extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
640 struct ttm_mem_reg *mem);
641extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
642 struct vmw_fence_obj *fence);
643extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000644
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200645/**
646 * DMA buffer helper routines - vmwgfx_dmabuf.c
647 */
648extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
649 struct vmw_dma_buffer *bo,
650 struct ttm_placement *placement,
651 bool interruptible);
652extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
653 struct vmw_dma_buffer *buf,
654 bool pin, bool interruptible);
655extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
656 struct vmw_dma_buffer *buf,
657 bool pin, bool interruptible);
658extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
659 struct vmw_dma_buffer *bo,
660 bool pin, bool interruptible);
661extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
662 struct vmw_dma_buffer *bo,
663 bool interruptible);
Thomas Hellstromb37a6b92011-10-04 20:13:28 +0200664extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
665 SVGAGuestPtr *ptr);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200666extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000667
668/**
669 * Misc Ioctl functionality - vmwgfx_ioctl.c
670 */
671
672extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
673 struct drm_file *file_priv);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000674extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
675 struct drm_file *file_priv);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200676extern int vmw_present_ioctl(struct drm_device *dev, void *data,
677 struct drm_file *file_priv);
678extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
679 struct drm_file *file_priv);
Thomas Hellstrom5438ae82011-10-10 12:23:27 +0200680extern unsigned int vmw_fops_poll(struct file *filp,
681 struct poll_table_struct *wait);
682extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
683 size_t count, loff_t *offset);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000684
685/**
686 * Fifo utilities - vmwgfx_fifo.c
687 */
688
689extern int vmw_fifo_init(struct vmw_private *dev_priv,
690 struct vmw_fifo_state *fifo);
691extern void vmw_fifo_release(struct vmw_private *dev_priv,
692 struct vmw_fifo_state *fifo);
693extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
694extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
695extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000696 uint32_t *seqno);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000697extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +0000698extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200699extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200700extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
701 uint32_t cid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000702
703/**
704 * TTM glue - vmwgfx_ttm_glue.c
705 */
706
707extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
708extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
709extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
710
711/**
712 * TTM buffer object driver - vmwgfx_buffer.c
713 */
714
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800715extern const size_t vmw_tt_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000716extern struct ttm_placement vmw_vram_placement;
717extern struct ttm_placement vmw_vram_ne_placement;
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100718extern struct ttm_placement vmw_vram_sys_placement;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200719extern struct ttm_placement vmw_vram_gmr_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200720extern struct ttm_placement vmw_vram_gmr_ne_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000721extern struct ttm_placement vmw_sys_placement;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100722extern struct ttm_placement vmw_sys_ne_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200723extern struct ttm_placement vmw_evictable_placement;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200724extern struct ttm_placement vmw_srf_placement;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100725extern struct ttm_placement vmw_mob_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000726extern struct ttm_bo_driver vmw_bo_driver;
727extern int vmw_dma_quiescent(struct drm_device *dev);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700728extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
729extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
730extern const struct vmw_sg_table *
731vmw_bo_sg_table(struct ttm_buffer_object *bo);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700732extern void vmw_piter_start(struct vmw_piter *viter,
733 const struct vmw_sg_table *vsgt,
734 unsigned long p_offs);
735
736/**
737 * vmw_piter_next - Advance the iterator one page.
738 *
739 * @viter: Pointer to the iterator to advance.
740 *
741 * Returns false if past the list of pages, true otherwise.
742 */
743static inline bool vmw_piter_next(struct vmw_piter *viter)
744{
745 return viter->next(viter);
746}
747
748/**
749 * vmw_piter_dma_addr - Return the DMA address of the current page.
750 *
751 * @viter: Pointer to the iterator
752 *
753 * Returns the DMA address of the page pointed to by @viter.
754 */
755static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
756{
757 return viter->dma_address(viter);
758}
759
760/**
761 * vmw_piter_page - Return a pointer to the current page.
762 *
763 * @viter: Pointer to the iterator
764 *
765 * Returns the DMA address of the page pointed to by @viter.
766 */
767static inline struct page *vmw_piter_page(struct vmw_piter *viter)
768{
769 return viter->page(viter);
770}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000771
772/**
773 * Command submission - vmwgfx_execbuf.c
774 */
775
776extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
777 struct drm_file *file_priv);
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200778extern int vmw_execbuf_process(struct drm_file *file_priv,
779 struct vmw_private *dev_priv,
780 void __user *user_commands,
781 void *kernel_commands,
782 uint32_t command_size,
783 uint64_t throttle_us,
784 struct drm_vmw_fence_rep __user
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +0100785 *user_fence_rep,
786 struct vmw_fence_obj **out_fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000787extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
788 struct vmw_fence_obj *fence);
789extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200790
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200791extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
792 struct vmw_private *dev_priv,
793 struct vmw_fence_obj **p_fence,
794 uint32_t *p_handle);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200795extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
796 struct vmw_fpriv *vmw_fp,
797 int ret,
798 struct drm_vmw_fence_rep __user
799 *user_fence_rep,
800 struct vmw_fence_obj *fence,
801 uint32_t fence_handle);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200802
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000803/**
804 * IRQs and wating - vmwgfx_irq.c
805 */
806
Daniel Vettere9f0d762013-12-11 11:34:42 +0100807extern irqreturn_t vmw_irq_handler(int irq, void *arg);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000808extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
809 uint32_t seqno, bool interruptible,
810 unsigned long timeout);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000811extern void vmw_irq_preinstall(struct drm_device *dev);
812extern int vmw_irq_postinstall(struct drm_device *dev);
813extern void vmw_irq_uninstall(struct drm_device *dev);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000814extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
815 uint32_t seqno);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000816extern int vmw_fallback_wait(struct vmw_private *dev_priv,
817 bool lazy,
818 bool fifo_idle,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000819 uint32_t seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000820 bool interruptible,
821 unsigned long timeout);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000822extern void vmw_update_seqno(struct vmw_private *dev_priv,
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200823 struct vmw_fifo_state *fifo_state);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000824extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
825extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200826extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
827extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200828
829/**
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000830 * Rudimentary fence-like objects currently used only for throttling -
831 * vmwgfx_marker.c
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200832 */
833
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000834extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
835extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
836extern int vmw_marker_push(struct vmw_marker_queue *queue,
837 uint32_t seqno);
838extern int vmw_marker_pull(struct vmw_marker_queue *queue,
839 uint32_t signaled_seqno);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200840extern int vmw_wait_lag(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000841 struct vmw_marker_queue *queue, uint32_t us);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000842
843/**
844 * Kernel framebuffer - vmwgfx_fb.c
845 */
846
847int vmw_fb_init(struct vmw_private *vmw_priv);
848int vmw_fb_close(struct vmw_private *dev_priv);
849int vmw_fb_off(struct vmw_private *vmw_priv);
850int vmw_fb_on(struct vmw_private *vmw_priv);
851
852/**
853 * Kernel modesetting - vmwgfx_kms.c
854 */
855
856int vmw_kms_init(struct vmw_private *dev_priv);
857int vmw_kms_close(struct vmw_private *dev_priv);
858int vmw_kms_save_vga(struct vmw_private *vmw_priv);
859int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
860int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
861 struct drm_file *file_priv);
862void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
863void vmw_kms_cursor_snoop(struct vmw_surface *srf,
864 struct ttm_object_file *tfile,
865 struct ttm_buffer_object *bo,
866 SVGA3dCmdHeader *header);
Michel Dänzer0bef23f2011-08-31 07:42:50 +0000867int vmw_kms_write_svga(struct vmw_private *vmw_priv,
868 unsigned width, unsigned height, unsigned pitch,
869 unsigned bpp, unsigned depth);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200870void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
Thomas Hellstrome133e732010-10-05 12:43:04 +0200871bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
872 uint32_t pitch,
873 uint32_t height);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200874u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +0200875int vmw_enable_vblank(struct drm_device *dev, int crtc);
876void vmw_disable_vblank(struct drm_device *dev, int crtc);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200877int vmw_kms_present(struct vmw_private *dev_priv,
878 struct drm_file *file_priv,
879 struct vmw_framebuffer *vfb,
880 struct vmw_surface *surface,
881 uint32_t sid, int32_t destX, int32_t destY,
882 struct drm_vmw_rect *clips,
883 uint32_t num_clips);
884int vmw_kms_readback(struct vmw_private *dev_priv,
885 struct drm_file *file_priv,
886 struct vmw_framebuffer *vfb,
887 struct drm_vmw_fence_rep __user *user_fence_rep,
888 struct drm_vmw_rect *clips,
889 uint32_t num_clips);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200890int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
891 struct drm_file *file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000892
Dave Airlie5e1782d2012-08-28 01:53:54 +0000893int vmw_dumb_create(struct drm_file *file_priv,
894 struct drm_device *dev,
895 struct drm_mode_create_dumb *args);
896
897int vmw_dumb_map_offset(struct drm_file *file_priv,
898 struct drm_device *dev, uint32_t handle,
899 uint64_t *offset);
900int vmw_dumb_destroy(struct drm_file *file_priv,
901 struct drm_device *dev,
902 uint32_t handle);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000903/**
904 * Overlay control - vmwgfx_overlay.c
905 */
906
907int vmw_overlay_init(struct vmw_private *dev_priv);
908int vmw_overlay_close(struct vmw_private *dev_priv);
909int vmw_overlay_ioctl(struct drm_device *dev, void *data,
910 struct drm_file *file_priv);
911int vmw_overlay_stop_all(struct vmw_private *dev_priv);
912int vmw_overlay_resume_all(struct vmw_private *dev_priv);
913int vmw_overlay_pause_all(struct vmw_private *dev_priv);
914int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
915int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
916int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
917int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
918
919/**
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200920 * GMR Id manager
921 */
922
923extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
924
925/**
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800926 * Prime - vmwgfx_prime.c
927 */
928
929extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
930extern int vmw_prime_fd_to_handle(struct drm_device *dev,
931 struct drm_file *file_priv,
932 int fd, u32 *handle);
933extern int vmw_prime_handle_to_fd(struct drm_device *dev,
934 struct drm_file *file_priv,
935 uint32_t handle, uint32_t flags,
936 int *prime_fd);
937
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100938/*
939 * MemoryOBject management - vmwgfx_mob.c
940 */
941struct vmw_mob;
942extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700943 const struct vmw_sg_table *vsgt,
944 unsigned long num_data_pages, int32_t mob_id);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100945extern void vmw_mob_unbind(struct vmw_private *dev_priv,
946 struct vmw_mob *mob);
947extern void vmw_mob_destroy(struct vmw_mob *mob);
948extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
949extern int vmw_otables_setup(struct vmw_private *dev_priv);
950extern void vmw_otables_takedown(struct vmw_private *dev_priv);
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800951
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100952/*
Thomas Hellstrom7086d092012-11-21 12:20:53 +0100953 * Context management - vmwgfx_context.c
954 */
955
956extern const struct vmw_user_resource_conv *user_context_converter;
957
958extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
959
960extern int vmw_context_check(struct vmw_private *dev_priv,
961 struct ttm_object_file *tfile,
962 int id,
963 struct vmw_resource **p_res);
964extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
965 struct drm_file *file_priv);
966extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
967 struct drm_file *file_priv);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700968extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
969 const struct vmw_ctx_bindinfo *ci);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700970extern void
971vmw_context_binding_state_transfer(struct vmw_resource *res,
972 struct vmw_ctx_binding_state *cbs);
973extern void vmw_context_binding_res_list_kill(struct list_head *head);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100974extern void vmw_context_binding_res_list_scrub(struct list_head *head);
975extern int vmw_context_rebind_all(struct vmw_resource *ctx);
976extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
Thomas Hellstrom7086d092012-11-21 12:20:53 +0100977
978/*
979 * Surface management - vmwgfx_surface.c
980 */
981
982extern const struct vmw_user_resource_conv *user_surface_converter;
983
984extern void vmw_surface_res_free(struct vmw_resource *res);
985extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
986 struct drm_file *file_priv);
987extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
988 struct drm_file *file_priv);
989extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
990 struct drm_file *file_priv);
991extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
992 struct drm_file *file_priv);
993extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
994 struct drm_file *file_priv);
995extern int vmw_surface_check(struct vmw_private *dev_priv,
996 struct ttm_object_file *tfile,
997 uint32_t handle, int *id);
998extern int vmw_surface_validate(struct vmw_private *dev_priv,
999 struct vmw_surface *srf);
1000
1001/*
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001002 * Shader management - vmwgfx_shader.c
1003 */
1004
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001005extern const struct vmw_user_resource_conv *user_shader_converter;
1006
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001007extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1008 struct drm_file *file_priv);
1009extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1010 struct drm_file *file_priv);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001011extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
1012 SVGA3dShaderType shader_type,
1013 u32 *user_key);
1014extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
1015 struct list_head *list);
1016extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
1017 struct list_head *list);
1018extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
1019 u32 user_key,
1020 SVGA3dShaderType shader_type,
1021 struct list_head *list);
1022extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
1023 u32 user_key, const void *bytecode,
1024 SVGA3dShaderType shader_type,
1025 size_t size,
1026 struct ttm_object_file *tfile,
1027 struct list_head *list);
1028extern struct vmw_compat_shader_manager *
1029vmw_compat_shader_man_create(struct vmw_private *dev_priv);
1030extern void
1031vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
1032
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001033
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001034/**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001035 * Inline helper functions
1036 */
1037
1038static inline void vmw_surface_unreference(struct vmw_surface **srf)
1039{
1040 struct vmw_surface *tmp_srf = *srf;
1041 struct vmw_resource *res = &tmp_srf->res;
1042 *srf = NULL;
1043
1044 vmw_resource_unreference(&res);
1045}
1046
1047static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1048{
1049 (void) vmw_resource_reference(&srf->res);
1050 return srf;
1051}
1052
1053static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
1054{
1055 struct vmw_dma_buffer *tmp_buf = *buf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001056
Thomas Hellstrombf6f0362012-11-09 12:26:15 +00001057 *buf = NULL;
1058 if (tmp_buf != NULL) {
1059 struct ttm_buffer_object *bo = &tmp_buf->base;
1060
1061 ttm_bo_unref(&bo);
1062 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001063}
1064
1065static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
1066{
1067 if (ttm_bo_reference(&buf->base))
1068 return buf;
1069 return NULL;
1070}
1071
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001072static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1073{
1074 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1075}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001076#endif