blob: a962e4c12a75e182f83ed5adc6884c981872562c [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010032#include <drm/drmP.h>
33#include <drm/vmwgfx_drm.h>
34#include <drm/drm_hashtab.h>
35#include <linux/suspend.h>
36#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_object.h>
38#include <drm/ttm/ttm_lock.h>
39#include <drm/ttm/ttm_execbuf_util.h>
40#include <drm/ttm/ttm_module.h>
Thomas Hellstromae2a1042011-09-01 20:18:44 +000041#include "vmwgfx_fence.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000042
Thomas Hellstrom311474d2012-11-21 12:34:47 +010043#define VMWGFX_DRIVER_DATE "20121114"
Thomas Hellstrom2ae7b032011-09-01 20:18:45 +000044#define VMWGFX_DRIVER_MAJOR 2
Thomas Hellstrom311474d2012-11-21 12:34:47 +010045#define VMWGFX_DRIVER_MINOR 5
Thomas Hellstromf77cef32010-02-09 19:41:55 +000046#define VMWGFX_DRIVER_PATCHLEVEL 0
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000047#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49#define VMWGFX_MAX_RELOCATIONS 2048
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000050#define VMWGFX_MAX_VALIDATIONS 2048
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +020051#define VMWGFX_MAX_DISPLAYS 16
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000052#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000053
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010054/*
55 * Perhaps we should have sysfs entries for these.
56 */
57#define VMWGFX_NUM_GB_CONTEXT 256
58#define VMWGFX_NUM_GB_SHADER 20000
59#define VMWGFX_NUM_GB_SURFACE 32768
60#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
61 VMWGFX_NUM_GB_SHADER +\
62 VMWGFX_NUM_GB_SURFACE)
63
Thomas Hellstrom135cba02010-10-26 21:21:47 +020064#define VMW_PL_GMR TTM_PL_PRIV0
65#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
Thomas Hellstrom6da768a2012-11-21 11:06:22 +010066#define VMW_PL_MOB TTM_PL_PRIV1
67#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
Thomas Hellstrom135cba02010-10-26 21:21:47 +020068
Thomas Hellstromae2a1042011-09-01 20:18:44 +000069#define VMW_RES_CONTEXT ttm_driver_type0
70#define VMW_RES_SURFACE ttm_driver_type1
71#define VMW_RES_STREAM ttm_driver_type2
72#define VMW_RES_FENCE ttm_driver_type3
Thomas Hellstromc74c1622012-11-21 12:10:26 +010073#define VMW_RES_SHADER ttm_driver_type4
Thomas Hellstromae2a1042011-09-01 20:18:44 +000074
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000075struct vmw_fpriv {
76 struct drm_master *locked_master;
77 struct ttm_object_file *tfile;
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +010078 struct list_head fence_events;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000079};
80
81struct vmw_dma_buffer {
82 struct ttm_buffer_object base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000083 struct list_head res_list;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000084};
85
Thomas Hellstromc0951b72012-11-20 12:19:35 +000086/**
87 * struct vmw_validate_buffer - Carries validation info about buffers.
88 *
89 * @base: Validation info for TTM.
90 * @hash: Hash entry for quick lookup of the TTM buffer object.
91 *
92 * This structure contains also driver private validation info
93 * on top of the info needed by TTM.
94 */
95struct vmw_validate_buffer {
96 struct ttm_validate_buffer base;
97 struct drm_hash_item hash;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +010098 bool validate_as_mob;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000099};
100
101struct vmw_res_func;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000102struct vmw_resource {
103 struct kref kref;
104 struct vmw_private *dev_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000105 int id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000106 bool avail;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000107 unsigned long backup_size;
108 bool res_dirty; /* Protected by backup buffer reserved */
109 bool backup_dirty; /* Protected by backup buffer reserved */
110 struct vmw_dma_buffer *backup;
111 unsigned long backup_offset;
112 const struct vmw_res_func *func;
113 struct list_head lru_head; /* Protected by the resource lock */
114 struct list_head mob_head; /* Protected by @backup reserved */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000115 void (*res_free) (struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000116 void (*hw_destroy) (struct vmw_resource *res);
117};
118
119enum vmw_res_type {
120 vmw_res_context,
121 vmw_res_surface,
122 vmw_res_stream,
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100123 vmw_res_shader,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000124 vmw_res_max
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000125};
126
127struct vmw_cursor_snooper {
128 struct drm_crtc *crtc;
129 size_t age;
130 uint32_t *image;
131};
132
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200133struct vmw_framebuffer;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200134struct vmw_surface_offset;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200135
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000136struct vmw_surface {
137 struct vmw_resource res;
138 uint32_t flags;
139 uint32_t format;
140 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000141 struct drm_vmw_size base_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000142 struct drm_vmw_size *sizes;
143 uint32_t num_sizes;
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000144 bool scanout;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000145 /* TODO so far just a extra pointer */
146 struct vmw_cursor_snooper snooper;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200147 struct vmw_surface_offset *offsets;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000148 SVGA3dTextureFilter autogen_filter;
149 uint32_t multisample_count;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000150};
151
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000152struct vmw_marker_queue {
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200153 struct list_head head;
154 struct timespec lag;
155 struct timespec lag_time;
156 spinlock_t lock;
157};
158
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000159struct vmw_fifo_state {
160 unsigned long reserved_size;
161 __le32 *dynamic_buffer;
162 __le32 *static_buffer;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000163 unsigned long static_buffer_size;
164 bool using_bounce_buffer;
165 uint32_t capabilities;
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000166 struct mutex fifo_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000167 struct rw_semaphore rwsem;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000168 struct vmw_marker_queue marker_queue;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000169};
170
171struct vmw_relocation {
Thomas Hellstromddcda242012-11-21 11:26:55 +0100172 SVGAMobId *mob_loc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000173 SVGAGuestPtr *location;
174 uint32_t index;
175};
176
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000177/**
178 * struct vmw_res_cache_entry - resource information cache entry
179 *
180 * @valid: Whether the entry is valid, which also implies that the execbuf
181 * code holds a reference to the resource, and it's placed on the
182 * validation list.
183 * @handle: User-space handle of a resource.
184 * @res: Non-ref-counted pointer to the resource.
185 *
186 * Used to avoid frequent repeated user-space handle lookups of the
187 * same resource.
188 */
189struct vmw_res_cache_entry {
190 bool valid;
191 uint32_t handle;
192 struct vmw_resource *res;
193 struct vmw_resource_val_node *node;
194};
195
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700196/**
197 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
198 */
199enum vmw_dma_map_mode {
200 vmw_dma_phys, /* Use physical page addresses */
201 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
202 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
203 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
204 vmw_dma_map_max
205};
206
207/**
208 * struct vmw_sg_table - Scatter/gather table for binding, with additional
209 * device-specific information.
210 *
211 * @sgt: Pointer to a struct sg_table with binding information
212 * @num_regions: Number of regions with device-address contigous pages
213 */
214struct vmw_sg_table {
215 enum vmw_dma_map_mode mode;
216 struct page **pages;
217 const dma_addr_t *addrs;
218 struct sg_table *sgt;
219 unsigned long num_regions;
220 unsigned long num_pages;
221};
222
223/**
224 * struct vmw_piter - Page iterator that iterates over a list of pages
225 * and DMA addresses that could be either a scatter-gather list or
226 * arrays
227 *
228 * @pages: Array of page pointers to the pages.
229 * @addrs: DMA addresses to the pages if coherent pages are used.
230 * @iter: Scatter-gather page iterator. Current position in SG list.
231 * @i: Current position in arrays.
232 * @num_pages: Number of pages total.
233 * @next: Function to advance the iterator. Returns false if past the list
234 * of pages, true otherwise.
235 * @dma_address: Function to return the DMA address of the current page.
236 */
237struct vmw_piter {
238 struct page **pages;
239 const dma_addr_t *addrs;
240 struct sg_page_iter iter;
241 unsigned long i;
242 unsigned long num_pages;
243 bool (*next)(struct vmw_piter *);
244 dma_addr_t (*dma_address)(struct vmw_piter *);
245 struct page *(*page)(struct vmw_piter *);
246};
247
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700248/*
249 * enum vmw_ctx_binding_type - abstract resource to context binding types
250 */
251enum vmw_ctx_binding_type {
252 vmw_ctx_binding_shader,
253 vmw_ctx_binding_rt,
254 vmw_ctx_binding_tex,
255 vmw_ctx_binding_max
256};
257
258/**
259 * struct vmw_ctx_bindinfo - structure representing a single context binding
260 *
261 * @ctx: Pointer to the context structure. NULL means the binding is not
262 * active.
263 * @bt: The binding type.
264 * @i1: Union of information needed to unbind.
265 */
266struct vmw_ctx_bindinfo {
267 struct vmw_resource *ctx;
268 enum vmw_ctx_binding_type bt;
269 union {
270 SVGA3dShaderType shader_type;
271 SVGA3dRenderTargetType rt_type;
272 uint32 texture_stage;
273 } i1;
274};
275
276/**
277 * struct vmw_ctx_binding - structure representing a single context binding
278 * - suitable for tracking in a context
279 *
280 * @ctx_list: List head for context.
281 * @bi: Binding info
282 */
283struct vmw_ctx_binding {
284 struct list_head ctx_list;
285 struct vmw_ctx_bindinfo bi;
286};
287
288
289/**
290 * struct vmw_ctx_binding_state - context binding state
291 *
292 * @list: linked list of individual bindings.
293 * @render_targets: Render target bindings.
294 * @texture_units: Texture units/samplers bindings.
295 * @shaders: Shader bindings.
296 *
297 * Note that this structure also provides storage space for the individual
298 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
299 * for individual bindings.
300 *
301 */
302struct vmw_ctx_binding_state {
303 struct list_head list;
304 struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
305 struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
306 struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
307};
308
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000309struct vmw_sw_context{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000310 struct drm_open_hash res_ht;
311 bool res_ht_initialized;
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200312 bool kernel; /**< is the called made from the kernel */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000313 struct ttm_object_file *tfile;
314 struct list_head validate_nodes;
315 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
316 uint32_t cur_reloc;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000317 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000318 uint32_t cur_val_buf;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000319 uint32_t *cmd_bounce;
320 uint32_t cmd_bounce_size;
Thomas Hellstromf18c8842011-10-04 20:13:31 +0200321 struct list_head resource_list;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200322 uint32_t fence_flags;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200323 struct ttm_buffer_object *cur_query_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000324 struct list_head res_relocations;
325 uint32_t *buf_start;
326 struct vmw_res_cache_entry res_cache[vmw_res_max];
327 struct vmw_resource *last_query_ctx;
328 bool needs_post_query_barrier;
329 struct vmw_resource *error_resource;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700330 struct vmw_ctx_binding_state staged_bindings;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000331};
332
333struct vmw_legacy_display;
334struct vmw_overlay;
335
336struct vmw_master {
337 struct ttm_lock lock;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200338 struct mutex fb_surf_mutex;
339 struct list_head fb_surf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000340};
341
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200342struct vmw_vga_topology_state {
343 uint32_t width;
344 uint32_t height;
345 uint32_t primary;
346 uint32_t pos_x;
347 uint32_t pos_y;
348};
349
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000350struct vmw_private {
351 struct ttm_bo_device bdev;
352 struct ttm_bo_global_ref bo_global_ref;
Dave Airlieba4420c2010-03-09 10:56:52 +1000353 struct drm_global_reference mem_global_ref;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000354
355 struct vmw_fifo_state fifo;
356
357 struct drm_device *dev;
358 unsigned long vmw_chipset;
359 unsigned int io_start;
360 uint32_t vram_start;
361 uint32_t vram_size;
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100362 uint32_t prim_bb_mem;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000363 uint32_t mmio_start;
364 uint32_t mmio_size;
365 uint32_t fb_max_width;
366 uint32_t fb_max_height;
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100367 uint32_t initial_width;
368 uint32_t initial_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000369 __le32 __iomem *mmio_virt;
370 int mmio_mtrr;
371 uint32_t capabilities;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000372 uint32_t max_gmr_ids;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000373 uint32_t max_gmr_pages;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100374 uint32_t max_mob_pages;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000375 uint32_t memory_size;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200376 bool has_gmr;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100377 bool has_mob;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000378 struct mutex hw_mutex;
379
380 /*
381 * VGA registers.
382 */
383
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200384 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000385 uint32_t vga_width;
386 uint32_t vga_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000387 uint32_t vga_bpp;
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200388 uint32_t vga_bpl;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200389 uint32_t vga_pitchlock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000390
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200391 uint32_t num_displays;
392
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000393 /*
394 * Framebuffer info.
395 */
396
397 void *fb_info;
398 struct vmw_legacy_display *ldu_priv;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200399 struct vmw_screen_object_display *sou_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000400 struct vmw_overlay *overlay_priv;
401
402 /*
403 * Context and surface management.
404 */
405
406 rwlock_t resource_lock;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000407 struct idr res_idr[vmw_res_max];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000408 /*
409 * Block lastclose from racing with firstopen.
410 */
411
412 struct mutex init_mutex;
413
414 /*
415 * A resource manager for kernel-only surfaces and
416 * contexts.
417 */
418
419 struct ttm_object_device *tdev;
420
421 /*
422 * Fencing and IRQs.
423 */
424
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000425 atomic_t marker_seq;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000426 wait_queue_head_t fence_queue;
427 wait_queue_head_t fifo_queue;
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000428 int fence_queue_waiters; /* Protected by hw_mutex */
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200429 int goal_queue_waiters; /* Protected by hw_mutex */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000430 atomic_t fifo_queue_waiters;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000431 uint32_t last_read_seqno;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000432 spinlock_t irq_lock;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000433 struct vmw_fence_manager *fman;
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200434 uint32_t irq_mask;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000435
436 /*
437 * Device state
438 */
439
440 uint32_t traces_state;
441 uint32_t enable_state;
442 uint32_t config_done_state;
443
444 /**
445 * Execbuf
446 */
447 /**
448 * Protected by the cmdbuf mutex.
449 */
450
451 struct vmw_sw_context ctx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000452 struct mutex cmdbuf_mutex;
453
454 /**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000455 * Operating mode.
456 */
457
458 bool stealth;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200459 bool enable_fb;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000460
461 /**
462 * Master management.
463 */
464
465 struct vmw_master *active_master;
466 struct vmw_master fbdev_master;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100467 struct notifier_block pm_nb;
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200468 bool suspended;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200469
470 struct mutex release_mutex;
471 uint32_t num_3d_resources;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200472
473 /*
474 * Query processing. These members
475 * are protected by the cmdbuf mutex.
476 */
477
478 struct ttm_buffer_object *dummy_query_bo;
479 struct ttm_buffer_object *pinned_bo;
480 uint32_t query_cid;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000481 uint32_t query_cid_valid;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200482 bool dummy_query_bo_pinned;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200483
484 /*
485 * Surface swapping. The "surface_lru" list is protected by the
486 * resource lock in order to be able to destroy a surface and take
487 * it off the lru atomically. "used_memory_size" is currently
488 * protected by the cmdbuf mutex for simplicity.
489 */
490
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000491 struct list_head res_lru[vmw_res_max];
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200492 uint32_t used_memory_size;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700493
494 /*
495 * DMA mapping stuff.
496 */
497 enum vmw_dma_map_mode map_mode;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100498
499 /*
500 * Guest Backed stuff
501 */
502 struct ttm_buffer_object *otable_bo;
503 struct vmw_otable *otables;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000504};
505
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000506static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
507{
508 return container_of(res, struct vmw_surface, res);
509}
510
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000511static inline struct vmw_private *vmw_priv(struct drm_device *dev)
512{
513 return (struct vmw_private *)dev->dev_private;
514}
515
516static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
517{
518 return (struct vmw_fpriv *)file_priv->driver_priv;
519}
520
521static inline struct vmw_master *vmw_master(struct drm_master *master)
522{
523 return (struct vmw_master *) master->driver_priv;
524}
525
526static inline void vmw_write(struct vmw_private *dev_priv,
527 unsigned int offset, uint32_t value)
528{
529 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
530 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
531}
532
533static inline uint32_t vmw_read(struct vmw_private *dev_priv,
534 unsigned int offset)
535{
536 uint32_t val;
537
538 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
539 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
540 return val;
541}
542
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000543int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
544void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200545
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000546/**
547 * GMR utilities - vmwgfx_gmr.c
548 */
549
550extern int vmw_gmr_bind(struct vmw_private *dev_priv,
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700551 const struct vmw_sg_table *vsgt,
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200552 unsigned long num_pages,
553 int gmr_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000554extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
555
556/**
557 * Resource utilities - vmwgfx_resource.c
558 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000559struct vmw_user_resource_conv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000560
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000561extern void vmw_resource_unreference(struct vmw_resource **p_res);
562extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000563extern int vmw_resource_validate(struct vmw_resource *res);
564extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
565extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
Jakob Bornecrantz551a6692011-11-28 13:19:11 +0100566extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
567 struct ttm_object_file *tfile,
568 uint32_t handle,
569 struct vmw_surface **out_surf,
570 struct vmw_dma_buffer **out_buf);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000571extern int vmw_user_resource_lookup_handle(
572 struct vmw_private *dev_priv,
573 struct ttm_object_file *tfile,
574 uint32_t handle,
575 const struct vmw_user_resource_conv *converter,
576 struct vmw_resource **p_res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000577extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
578extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
579 struct vmw_dma_buffer *vmw_bo,
580 size_t size, struct ttm_placement *placement,
581 bool interuptable,
582 void (*bo_free) (struct ttm_buffer_object *bo));
Thomas Hellstromd08a9b92012-11-21 16:04:18 +0100583extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
584 struct ttm_object_file *tfile);
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100585extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
586 struct ttm_object_file *tfile,
587 uint32_t size,
588 bool shareable,
589 uint32_t *handle,
590 struct vmw_dma_buffer **p_dma_buf);
591extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
592 struct vmw_dma_buffer *dma_buf,
593 uint32_t *handle);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000594extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
595 struct drm_file *file_priv);
596extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
597 struct drm_file *file_priv);
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100598extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
599 struct drm_file *file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000600extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
601 uint32_t cur_validate_node);
602extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
603extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
604 uint32_t id, struct vmw_dma_buffer **out);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000605extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
606 struct drm_file *file_priv);
607extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
608 struct drm_file *file_priv);
609extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
610 struct ttm_object_file *tfile,
611 uint32_t *inout_id,
612 struct vmw_resource **out);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000613extern void vmw_resource_unreserve(struct vmw_resource *res,
614 struct vmw_dma_buffer *new_backup,
615 unsigned long new_backup_offset);
616extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
617 struct ttm_mem_reg *mem);
618extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
619 struct vmw_fence_obj *fence);
620extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000621
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200622/**
623 * DMA buffer helper routines - vmwgfx_dmabuf.c
624 */
625extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
626 struct vmw_dma_buffer *bo,
627 struct ttm_placement *placement,
628 bool interruptible);
629extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
630 struct vmw_dma_buffer *buf,
631 bool pin, bool interruptible);
632extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
633 struct vmw_dma_buffer *buf,
634 bool pin, bool interruptible);
635extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
636 struct vmw_dma_buffer *bo,
637 bool pin, bool interruptible);
638extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
639 struct vmw_dma_buffer *bo,
640 bool interruptible);
Thomas Hellstromb37a6b92011-10-04 20:13:28 +0200641extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
642 SVGAGuestPtr *ptr);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200643extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000644
645/**
646 * Misc Ioctl functionality - vmwgfx_ioctl.c
647 */
648
649extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
650 struct drm_file *file_priv);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000651extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
652 struct drm_file *file_priv);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200653extern int vmw_present_ioctl(struct drm_device *dev, void *data,
654 struct drm_file *file_priv);
655extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
656 struct drm_file *file_priv);
Thomas Hellstrom5438ae82011-10-10 12:23:27 +0200657extern unsigned int vmw_fops_poll(struct file *filp,
658 struct poll_table_struct *wait);
659extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
660 size_t count, loff_t *offset);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000661
662/**
663 * Fifo utilities - vmwgfx_fifo.c
664 */
665
666extern int vmw_fifo_init(struct vmw_private *dev_priv,
667 struct vmw_fifo_state *fifo);
668extern void vmw_fifo_release(struct vmw_private *dev_priv,
669 struct vmw_fifo_state *fifo);
670extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
671extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
672extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000673 uint32_t *seqno);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000674extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +0000675extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200676extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200677extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
678 uint32_t cid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000679
680/**
681 * TTM glue - vmwgfx_ttm_glue.c
682 */
683
684extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
685extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
686extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
687
688/**
689 * TTM buffer object driver - vmwgfx_buffer.c
690 */
691
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800692extern const size_t vmw_tt_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000693extern struct ttm_placement vmw_vram_placement;
694extern struct ttm_placement vmw_vram_ne_placement;
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100695extern struct ttm_placement vmw_vram_sys_placement;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200696extern struct ttm_placement vmw_vram_gmr_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200697extern struct ttm_placement vmw_vram_gmr_ne_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000698extern struct ttm_placement vmw_sys_placement;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100699extern struct ttm_placement vmw_sys_ne_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200700extern struct ttm_placement vmw_evictable_placement;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200701extern struct ttm_placement vmw_srf_placement;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100702extern struct ttm_placement vmw_mob_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000703extern struct ttm_bo_driver vmw_bo_driver;
704extern int vmw_dma_quiescent(struct drm_device *dev);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700705extern void vmw_piter_start(struct vmw_piter *viter,
706 const struct vmw_sg_table *vsgt,
707 unsigned long p_offs);
708
709/**
710 * vmw_piter_next - Advance the iterator one page.
711 *
712 * @viter: Pointer to the iterator to advance.
713 *
714 * Returns false if past the list of pages, true otherwise.
715 */
716static inline bool vmw_piter_next(struct vmw_piter *viter)
717{
718 return viter->next(viter);
719}
720
721/**
722 * vmw_piter_dma_addr - Return the DMA address of the current page.
723 *
724 * @viter: Pointer to the iterator
725 *
726 * Returns the DMA address of the page pointed to by @viter.
727 */
728static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
729{
730 return viter->dma_address(viter);
731}
732
733/**
734 * vmw_piter_page - Return a pointer to the current page.
735 *
736 * @viter: Pointer to the iterator
737 *
738 * Returns the DMA address of the page pointed to by @viter.
739 */
740static inline struct page *vmw_piter_page(struct vmw_piter *viter)
741{
742 return viter->page(viter);
743}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000744
745/**
746 * Command submission - vmwgfx_execbuf.c
747 */
748
749extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
750 struct drm_file *file_priv);
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200751extern int vmw_execbuf_process(struct drm_file *file_priv,
752 struct vmw_private *dev_priv,
753 void __user *user_commands,
754 void *kernel_commands,
755 uint32_t command_size,
756 uint64_t throttle_us,
757 struct drm_vmw_fence_rep __user
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +0100758 *user_fence_rep,
759 struct vmw_fence_obj **out_fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000760extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
761 struct vmw_fence_obj *fence);
762extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200763
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200764extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
765 struct vmw_private *dev_priv,
766 struct vmw_fence_obj **p_fence,
767 uint32_t *p_handle);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200768extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
769 struct vmw_fpriv *vmw_fp,
770 int ret,
771 struct drm_vmw_fence_rep __user
772 *user_fence_rep,
773 struct vmw_fence_obj *fence,
774 uint32_t fence_handle);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200775
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000776/**
777 * IRQs and wating - vmwgfx_irq.c
778 */
779
Daniel Vettere9f0d762013-12-11 11:34:42 +0100780extern irqreturn_t vmw_irq_handler(int irq, void *arg);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000781extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
782 uint32_t seqno, bool interruptible,
783 unsigned long timeout);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000784extern void vmw_irq_preinstall(struct drm_device *dev);
785extern int vmw_irq_postinstall(struct drm_device *dev);
786extern void vmw_irq_uninstall(struct drm_device *dev);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000787extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
788 uint32_t seqno);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000789extern int vmw_fallback_wait(struct vmw_private *dev_priv,
790 bool lazy,
791 bool fifo_idle,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000792 uint32_t seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000793 bool interruptible,
794 unsigned long timeout);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000795extern void vmw_update_seqno(struct vmw_private *dev_priv,
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200796 struct vmw_fifo_state *fifo_state);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000797extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
798extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200799extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
800extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200801
802/**
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000803 * Rudimentary fence-like objects currently used only for throttling -
804 * vmwgfx_marker.c
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200805 */
806
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000807extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
808extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
809extern int vmw_marker_push(struct vmw_marker_queue *queue,
810 uint32_t seqno);
811extern int vmw_marker_pull(struct vmw_marker_queue *queue,
812 uint32_t signaled_seqno);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200813extern int vmw_wait_lag(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000814 struct vmw_marker_queue *queue, uint32_t us);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000815
816/**
817 * Kernel framebuffer - vmwgfx_fb.c
818 */
819
820int vmw_fb_init(struct vmw_private *vmw_priv);
821int vmw_fb_close(struct vmw_private *dev_priv);
822int vmw_fb_off(struct vmw_private *vmw_priv);
823int vmw_fb_on(struct vmw_private *vmw_priv);
824
825/**
826 * Kernel modesetting - vmwgfx_kms.c
827 */
828
829int vmw_kms_init(struct vmw_private *dev_priv);
830int vmw_kms_close(struct vmw_private *dev_priv);
831int vmw_kms_save_vga(struct vmw_private *vmw_priv);
832int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
833int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
834 struct drm_file *file_priv);
835void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
836void vmw_kms_cursor_snoop(struct vmw_surface *srf,
837 struct ttm_object_file *tfile,
838 struct ttm_buffer_object *bo,
839 SVGA3dCmdHeader *header);
Michel Dänzer0bef23f2011-08-31 07:42:50 +0000840int vmw_kms_write_svga(struct vmw_private *vmw_priv,
841 unsigned width, unsigned height, unsigned pitch,
842 unsigned bpp, unsigned depth);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200843void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
Thomas Hellstrome133e732010-10-05 12:43:04 +0200844bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
845 uint32_t pitch,
846 uint32_t height);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200847u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +0200848int vmw_enable_vblank(struct drm_device *dev, int crtc);
849void vmw_disable_vblank(struct drm_device *dev, int crtc);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200850int vmw_kms_present(struct vmw_private *dev_priv,
851 struct drm_file *file_priv,
852 struct vmw_framebuffer *vfb,
853 struct vmw_surface *surface,
854 uint32_t sid, int32_t destX, int32_t destY,
855 struct drm_vmw_rect *clips,
856 uint32_t num_clips);
857int vmw_kms_readback(struct vmw_private *dev_priv,
858 struct drm_file *file_priv,
859 struct vmw_framebuffer *vfb,
860 struct drm_vmw_fence_rep __user *user_fence_rep,
861 struct drm_vmw_rect *clips,
862 uint32_t num_clips);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200863int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
864 struct drm_file *file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000865
Dave Airlie5e1782d2012-08-28 01:53:54 +0000866int vmw_dumb_create(struct drm_file *file_priv,
867 struct drm_device *dev,
868 struct drm_mode_create_dumb *args);
869
870int vmw_dumb_map_offset(struct drm_file *file_priv,
871 struct drm_device *dev, uint32_t handle,
872 uint64_t *offset);
873int vmw_dumb_destroy(struct drm_file *file_priv,
874 struct drm_device *dev,
875 uint32_t handle);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000876/**
877 * Overlay control - vmwgfx_overlay.c
878 */
879
880int vmw_overlay_init(struct vmw_private *dev_priv);
881int vmw_overlay_close(struct vmw_private *dev_priv);
882int vmw_overlay_ioctl(struct drm_device *dev, void *data,
883 struct drm_file *file_priv);
884int vmw_overlay_stop_all(struct vmw_private *dev_priv);
885int vmw_overlay_resume_all(struct vmw_private *dev_priv);
886int vmw_overlay_pause_all(struct vmw_private *dev_priv);
887int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
888int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
889int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
890int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
891
892/**
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200893 * GMR Id manager
894 */
895
896extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
897
898/**
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800899 * Prime - vmwgfx_prime.c
900 */
901
902extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
903extern int vmw_prime_fd_to_handle(struct drm_device *dev,
904 struct drm_file *file_priv,
905 int fd, u32 *handle);
906extern int vmw_prime_handle_to_fd(struct drm_device *dev,
907 struct drm_file *file_priv,
908 uint32_t handle, uint32_t flags,
909 int *prime_fd);
910
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100911/*
912 * MemoryOBject management - vmwgfx_mob.c
913 */
914struct vmw_mob;
915extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
916 struct page **data_pages, unsigned long num_data_pages,
917 int32_t mob_id);
918extern void vmw_mob_unbind(struct vmw_private *dev_priv,
919 struct vmw_mob *mob);
920extern void vmw_mob_destroy(struct vmw_mob *mob);
921extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
922extern int vmw_otables_setup(struct vmw_private *dev_priv);
923extern void vmw_otables_takedown(struct vmw_private *dev_priv);
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800924
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100925/*
Thomas Hellstrom7086d092012-11-21 12:20:53 +0100926 * Context management - vmwgfx_context.c
927 */
928
929extern const struct vmw_user_resource_conv *user_context_converter;
930
931extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
932
933extern int vmw_context_check(struct vmw_private *dev_priv,
934 struct ttm_object_file *tfile,
935 int id,
936 struct vmw_resource **p_res);
937extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
938 struct drm_file *file_priv);
939extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
940 struct drm_file *file_priv);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700941extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
942 const struct vmw_ctx_bindinfo *ci);
943extern void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
Thomas Hellstrom7086d092012-11-21 12:20:53 +0100944
945/*
946 * Surface management - vmwgfx_surface.c
947 */
948
949extern const struct vmw_user_resource_conv *user_surface_converter;
950
951extern void vmw_surface_res_free(struct vmw_resource *res);
952extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
953 struct drm_file *file_priv);
954extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
955 struct drm_file *file_priv);
956extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
957 struct drm_file *file_priv);
958extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
959 struct drm_file *file_priv);
960extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
961 struct drm_file *file_priv);
962extern int vmw_surface_check(struct vmw_private *dev_priv,
963 struct ttm_object_file *tfile,
964 uint32_t handle, int *id);
965extern int vmw_surface_validate(struct vmw_private *dev_priv,
966 struct vmw_surface *srf);
967
968/*
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100969 * Shader management - vmwgfx_shader.c
970 */
971
Thomas Hellstrom7086d092012-11-21 12:20:53 +0100972extern const struct vmw_user_resource_conv *user_shader_converter;
973
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100974extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
975 struct drm_file *file_priv);
976extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
977 struct drm_file *file_priv);
978
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800979/**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000980 * Inline helper functions
981 */
982
983static inline void vmw_surface_unreference(struct vmw_surface **srf)
984{
985 struct vmw_surface *tmp_srf = *srf;
986 struct vmw_resource *res = &tmp_srf->res;
987 *srf = NULL;
988
989 vmw_resource_unreference(&res);
990}
991
992static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
993{
994 (void) vmw_resource_reference(&srf->res);
995 return srf;
996}
997
998static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
999{
1000 struct vmw_dma_buffer *tmp_buf = *buf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001001
Thomas Hellstrombf6f0362012-11-09 12:26:15 +00001002 *buf = NULL;
1003 if (tmp_buf != NULL) {
1004 struct ttm_buffer_object *bo = &tmp_buf->base;
1005
1006 ttm_bo_unref(&bo);
1007 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001008}
1009
1010static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
1011{
1012 if (ttm_bo_reference(&buf->base))
1013 return buf;
1014 return NULL;
1015}
1016
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001017static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1018{
1019 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1020}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001021#endif