blob: 453e55d28f0b85891c906e3b251c1e14068b28d5 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010032#include <drm/drmP.h>
33#include <drm/vmwgfx_drm.h>
34#include <drm/drm_hashtab.h>
35#include <linux/suspend.h>
36#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_object.h>
38#include <drm/ttm/ttm_lock.h>
39#include <drm/ttm/ttm_execbuf_util.h>
40#include <drm/ttm/ttm_module.h>
Thomas Hellstromae2a1042011-09-01 20:18:44 +000041#include "vmwgfx_fence.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000042
Thomas Hellstrom0a240ec2012-02-09 16:56:49 +010043#define VMWGFX_DRIVER_DATE "20120209"
Thomas Hellstrom2ae7b032011-09-01 20:18:45 +000044#define VMWGFX_DRIVER_MAJOR 2
Thomas Hellstrom0a240ec2012-02-09 16:56:49 +010045#define VMWGFX_DRIVER_MINOR 4
Thomas Hellstromf77cef32010-02-09 19:41:55 +000046#define VMWGFX_DRIVER_PATCHLEVEL 0
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000047#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49#define VMWGFX_MAX_RELOCATIONS 2048
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000050#define VMWGFX_MAX_VALIDATIONS 2048
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +020051#define VMWGFX_MAX_DISPLAYS 16
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000052#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000053
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010054/*
55 * Perhaps we should have sysfs entries for these.
56 */
57#define VMWGFX_NUM_GB_CONTEXT 256
58#define VMWGFX_NUM_GB_SHADER 20000
59#define VMWGFX_NUM_GB_SURFACE 32768
60#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
61 VMWGFX_NUM_GB_SHADER +\
62 VMWGFX_NUM_GB_SURFACE)
63
Thomas Hellstrom135cba02010-10-26 21:21:47 +020064#define VMW_PL_GMR TTM_PL_PRIV0
65#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
Thomas Hellstrom6da768a2012-11-21 11:06:22 +010066#define VMW_PL_MOB TTM_PL_PRIV1
67#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
Thomas Hellstrom135cba02010-10-26 21:21:47 +020068
Thomas Hellstromae2a1042011-09-01 20:18:44 +000069#define VMW_RES_CONTEXT ttm_driver_type0
70#define VMW_RES_SURFACE ttm_driver_type1
71#define VMW_RES_STREAM ttm_driver_type2
72#define VMW_RES_FENCE ttm_driver_type3
Thomas Hellstromc74c1622012-11-21 12:10:26 +010073#define VMW_RES_SHADER ttm_driver_type4
Thomas Hellstromae2a1042011-09-01 20:18:44 +000074
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000075struct vmw_fpriv {
76 struct drm_master *locked_master;
77 struct ttm_object_file *tfile;
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +010078 struct list_head fence_events;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000079};
80
81struct vmw_dma_buffer {
82 struct ttm_buffer_object base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000083 struct list_head res_list;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000084};
85
Thomas Hellstromc0951b72012-11-20 12:19:35 +000086/**
87 * struct vmw_validate_buffer - Carries validation info about buffers.
88 *
89 * @base: Validation info for TTM.
90 * @hash: Hash entry for quick lookup of the TTM buffer object.
91 *
92 * This structure contains also driver private validation info
93 * on top of the info needed by TTM.
94 */
95struct vmw_validate_buffer {
96 struct ttm_validate_buffer base;
97 struct drm_hash_item hash;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +010098 bool validate_as_mob;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000099};
100
101struct vmw_res_func;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000102struct vmw_resource {
103 struct kref kref;
104 struct vmw_private *dev_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000105 int id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000106 bool avail;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000107 unsigned long backup_size;
108 bool res_dirty; /* Protected by backup buffer reserved */
109 bool backup_dirty; /* Protected by backup buffer reserved */
110 struct vmw_dma_buffer *backup;
111 unsigned long backup_offset;
112 const struct vmw_res_func *func;
113 struct list_head lru_head; /* Protected by the resource lock */
114 struct list_head mob_head; /* Protected by @backup reserved */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000115 void (*res_free) (struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000116 void (*hw_destroy) (struct vmw_resource *res);
117};
118
119enum vmw_res_type {
120 vmw_res_context,
121 vmw_res_surface,
122 vmw_res_stream,
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100123 vmw_res_shader,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000124 vmw_res_max
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000125};
126
127struct vmw_cursor_snooper {
128 struct drm_crtc *crtc;
129 size_t age;
130 uint32_t *image;
131};
132
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200133struct vmw_framebuffer;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200134struct vmw_surface_offset;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200135
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000136struct vmw_surface {
137 struct vmw_resource res;
138 uint32_t flags;
139 uint32_t format;
140 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000141 struct drm_vmw_size base_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000142 struct drm_vmw_size *sizes;
143 uint32_t num_sizes;
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000144 bool scanout;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000145 /* TODO so far just a extra pointer */
146 struct vmw_cursor_snooper snooper;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200147 struct vmw_surface_offset *offsets;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000148 SVGA3dTextureFilter autogen_filter;
149 uint32_t multisample_count;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000150};
151
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000152struct vmw_marker_queue {
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200153 struct list_head head;
154 struct timespec lag;
155 struct timespec lag_time;
156 spinlock_t lock;
157};
158
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000159struct vmw_fifo_state {
160 unsigned long reserved_size;
161 __le32 *dynamic_buffer;
162 __le32 *static_buffer;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000163 unsigned long static_buffer_size;
164 bool using_bounce_buffer;
165 uint32_t capabilities;
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000166 struct mutex fifo_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000167 struct rw_semaphore rwsem;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000168 struct vmw_marker_queue marker_queue;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000169};
170
171struct vmw_relocation {
Thomas Hellstromddcda242012-11-21 11:26:55 +0100172 SVGAMobId *mob_loc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000173 SVGAGuestPtr *location;
174 uint32_t index;
175};
176
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000177/**
178 * struct vmw_res_cache_entry - resource information cache entry
179 *
180 * @valid: Whether the entry is valid, which also implies that the execbuf
181 * code holds a reference to the resource, and it's placed on the
182 * validation list.
183 * @handle: User-space handle of a resource.
184 * @res: Non-ref-counted pointer to the resource.
185 *
186 * Used to avoid frequent repeated user-space handle lookups of the
187 * same resource.
188 */
189struct vmw_res_cache_entry {
190 bool valid;
191 uint32_t handle;
192 struct vmw_resource *res;
193 struct vmw_resource_val_node *node;
194};
195
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700196/**
197 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
198 */
199enum vmw_dma_map_mode {
200 vmw_dma_phys, /* Use physical page addresses */
201 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
202 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
203 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
204 vmw_dma_map_max
205};
206
207/**
208 * struct vmw_sg_table - Scatter/gather table for binding, with additional
209 * device-specific information.
210 *
211 * @sgt: Pointer to a struct sg_table with binding information
212 * @num_regions: Number of regions with device-address contigous pages
213 */
214struct vmw_sg_table {
215 enum vmw_dma_map_mode mode;
216 struct page **pages;
217 const dma_addr_t *addrs;
218 struct sg_table *sgt;
219 unsigned long num_regions;
220 unsigned long num_pages;
221};
222
223/**
224 * struct vmw_piter - Page iterator that iterates over a list of pages
225 * and DMA addresses that could be either a scatter-gather list or
226 * arrays
227 *
228 * @pages: Array of page pointers to the pages.
229 * @addrs: DMA addresses to the pages if coherent pages are used.
230 * @iter: Scatter-gather page iterator. Current position in SG list.
231 * @i: Current position in arrays.
232 * @num_pages: Number of pages total.
233 * @next: Function to advance the iterator. Returns false if past the list
234 * of pages, true otherwise.
235 * @dma_address: Function to return the DMA address of the current page.
236 */
237struct vmw_piter {
238 struct page **pages;
239 const dma_addr_t *addrs;
240 struct sg_page_iter iter;
241 unsigned long i;
242 unsigned long num_pages;
243 bool (*next)(struct vmw_piter *);
244 dma_addr_t (*dma_address)(struct vmw_piter *);
245 struct page *(*page)(struct vmw_piter *);
246};
247
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000248struct vmw_sw_context{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000249 struct drm_open_hash res_ht;
250 bool res_ht_initialized;
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200251 bool kernel; /**< is the called made from the kernel */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000252 struct ttm_object_file *tfile;
253 struct list_head validate_nodes;
254 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
255 uint32_t cur_reloc;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000256 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000257 uint32_t cur_val_buf;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000258 uint32_t *cmd_bounce;
259 uint32_t cmd_bounce_size;
Thomas Hellstromf18c8842011-10-04 20:13:31 +0200260 struct list_head resource_list;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200261 uint32_t fence_flags;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200262 struct ttm_buffer_object *cur_query_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000263 struct list_head res_relocations;
264 uint32_t *buf_start;
265 struct vmw_res_cache_entry res_cache[vmw_res_max];
266 struct vmw_resource *last_query_ctx;
267 bool needs_post_query_barrier;
268 struct vmw_resource *error_resource;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000269};
270
271struct vmw_legacy_display;
272struct vmw_overlay;
273
274struct vmw_master {
275 struct ttm_lock lock;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200276 struct mutex fb_surf_mutex;
277 struct list_head fb_surf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000278};
279
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200280struct vmw_vga_topology_state {
281 uint32_t width;
282 uint32_t height;
283 uint32_t primary;
284 uint32_t pos_x;
285 uint32_t pos_y;
286};
287
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000288struct vmw_private {
289 struct ttm_bo_device bdev;
290 struct ttm_bo_global_ref bo_global_ref;
Dave Airlieba4420c2010-03-09 10:56:52 +1000291 struct drm_global_reference mem_global_ref;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000292
293 struct vmw_fifo_state fifo;
294
295 struct drm_device *dev;
296 unsigned long vmw_chipset;
297 unsigned int io_start;
298 uint32_t vram_start;
299 uint32_t vram_size;
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100300 uint32_t prim_bb_mem;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000301 uint32_t mmio_start;
302 uint32_t mmio_size;
303 uint32_t fb_max_width;
304 uint32_t fb_max_height;
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100305 uint32_t initial_width;
306 uint32_t initial_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000307 __le32 __iomem *mmio_virt;
308 int mmio_mtrr;
309 uint32_t capabilities;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000310 uint32_t max_gmr_ids;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000311 uint32_t max_gmr_pages;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100312 uint32_t max_mob_pages;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000313 uint32_t memory_size;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200314 bool has_gmr;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100315 bool has_mob;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000316 struct mutex hw_mutex;
317
318 /*
319 * VGA registers.
320 */
321
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200322 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000323 uint32_t vga_width;
324 uint32_t vga_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000325 uint32_t vga_bpp;
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200326 uint32_t vga_bpl;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200327 uint32_t vga_pitchlock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000328
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200329 uint32_t num_displays;
330
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000331 /*
332 * Framebuffer info.
333 */
334
335 void *fb_info;
336 struct vmw_legacy_display *ldu_priv;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200337 struct vmw_screen_object_display *sou_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000338 struct vmw_overlay *overlay_priv;
339
340 /*
341 * Context and surface management.
342 */
343
344 rwlock_t resource_lock;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000345 struct idr res_idr[vmw_res_max];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000346 /*
347 * Block lastclose from racing with firstopen.
348 */
349
350 struct mutex init_mutex;
351
352 /*
353 * A resource manager for kernel-only surfaces and
354 * contexts.
355 */
356
357 struct ttm_object_device *tdev;
358
359 /*
360 * Fencing and IRQs.
361 */
362
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000363 atomic_t marker_seq;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000364 wait_queue_head_t fence_queue;
365 wait_queue_head_t fifo_queue;
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000366 int fence_queue_waiters; /* Protected by hw_mutex */
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200367 int goal_queue_waiters; /* Protected by hw_mutex */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000368 atomic_t fifo_queue_waiters;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000369 uint32_t last_read_seqno;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000370 spinlock_t irq_lock;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000371 struct vmw_fence_manager *fman;
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200372 uint32_t irq_mask;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000373
374 /*
375 * Device state
376 */
377
378 uint32_t traces_state;
379 uint32_t enable_state;
380 uint32_t config_done_state;
381
382 /**
383 * Execbuf
384 */
385 /**
386 * Protected by the cmdbuf mutex.
387 */
388
389 struct vmw_sw_context ctx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000390 struct mutex cmdbuf_mutex;
391
392 /**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000393 * Operating mode.
394 */
395
396 bool stealth;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200397 bool enable_fb;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000398
399 /**
400 * Master management.
401 */
402
403 struct vmw_master *active_master;
404 struct vmw_master fbdev_master;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100405 struct notifier_block pm_nb;
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200406 bool suspended;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200407
408 struct mutex release_mutex;
409 uint32_t num_3d_resources;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200410
411 /*
412 * Query processing. These members
413 * are protected by the cmdbuf mutex.
414 */
415
416 struct ttm_buffer_object *dummy_query_bo;
417 struct ttm_buffer_object *pinned_bo;
418 uint32_t query_cid;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000419 uint32_t query_cid_valid;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200420 bool dummy_query_bo_pinned;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200421
422 /*
423 * Surface swapping. The "surface_lru" list is protected by the
424 * resource lock in order to be able to destroy a surface and take
425 * it off the lru atomically. "used_memory_size" is currently
426 * protected by the cmdbuf mutex for simplicity.
427 */
428
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000429 struct list_head res_lru[vmw_res_max];
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200430 uint32_t used_memory_size;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700431
432 /*
433 * DMA mapping stuff.
434 */
435 enum vmw_dma_map_mode map_mode;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100436
437 /*
438 * Guest Backed stuff
439 */
440 struct ttm_buffer_object *otable_bo;
441 struct vmw_otable *otables;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000442};
443
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000444static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
445{
446 return container_of(res, struct vmw_surface, res);
447}
448
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000449static inline struct vmw_private *vmw_priv(struct drm_device *dev)
450{
451 return (struct vmw_private *)dev->dev_private;
452}
453
454static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
455{
456 return (struct vmw_fpriv *)file_priv->driver_priv;
457}
458
459static inline struct vmw_master *vmw_master(struct drm_master *master)
460{
461 return (struct vmw_master *) master->driver_priv;
462}
463
464static inline void vmw_write(struct vmw_private *dev_priv,
465 unsigned int offset, uint32_t value)
466{
467 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
468 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
469}
470
471static inline uint32_t vmw_read(struct vmw_private *dev_priv,
472 unsigned int offset)
473{
474 uint32_t val;
475
476 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
477 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
478 return val;
479}
480
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000481int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
482void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200483
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000484/**
485 * GMR utilities - vmwgfx_gmr.c
486 */
487
488extern int vmw_gmr_bind(struct vmw_private *dev_priv,
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700489 const struct vmw_sg_table *vsgt,
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200490 unsigned long num_pages,
491 int gmr_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000492extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
493
494/**
495 * Resource utilities - vmwgfx_resource.c
496 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000497struct vmw_user_resource_conv;
498extern const struct vmw_user_resource_conv *user_surface_converter;
499extern const struct vmw_user_resource_conv *user_context_converter;
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100500extern const struct vmw_user_resource_conv *user_shader_converter;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000501
502extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
503extern void vmw_resource_unreference(struct vmw_resource **p_res);
504extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000505extern int vmw_resource_validate(struct vmw_resource *res);
506extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
507extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000508extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
509 struct drm_file *file_priv);
510extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
511 struct drm_file *file_priv);
512extern int vmw_context_check(struct vmw_private *dev_priv,
513 struct ttm_object_file *tfile,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000514 int id,
515 struct vmw_resource **p_res);
Jakob Bornecrantz551a6692011-11-28 13:19:11 +0100516extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
517 struct ttm_object_file *tfile,
518 uint32_t handle,
519 struct vmw_surface **out_surf,
520 struct vmw_dma_buffer **out_buf);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000521extern int vmw_user_resource_lookup_handle(
522 struct vmw_private *dev_priv,
523 struct ttm_object_file *tfile,
524 uint32_t handle,
525 const struct vmw_user_resource_conv *converter,
526 struct vmw_resource **p_res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000527extern void vmw_surface_res_free(struct vmw_resource *res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000528extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
529 struct drm_file *file_priv);
530extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
531 struct drm_file *file_priv);
532extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
533 struct drm_file *file_priv);
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100534extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
535 struct drm_file *file_priv);
536extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
537 struct drm_file *file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000538extern int vmw_surface_check(struct vmw_private *dev_priv,
539 struct ttm_object_file *tfile,
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100540 uint32_t handle, int *id);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200541extern int vmw_surface_validate(struct vmw_private *dev_priv,
542 struct vmw_surface *srf);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000543extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
544extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
545 struct vmw_dma_buffer *vmw_bo,
546 size_t size, struct ttm_placement *placement,
547 bool interuptable,
548 void (*bo_free) (struct ttm_buffer_object *bo));
Thomas Hellstromd08a9b92012-11-21 16:04:18 +0100549extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
550 struct ttm_object_file *tfile);
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100551extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
552 struct ttm_object_file *tfile,
553 uint32_t size,
554 bool shareable,
555 uint32_t *handle,
556 struct vmw_dma_buffer **p_dma_buf);
557extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
558 struct vmw_dma_buffer *dma_buf,
559 uint32_t *handle);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000560extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
561 struct drm_file *file_priv);
562extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
563 struct drm_file *file_priv);
564extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
565 uint32_t cur_validate_node);
566extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
567extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
568 uint32_t id, struct vmw_dma_buffer **out);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000569extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
570 struct drm_file *file_priv);
571extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
572 struct drm_file *file_priv);
573extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
574 struct ttm_object_file *tfile,
575 uint32_t *inout_id,
576 struct vmw_resource **out);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000577extern void vmw_resource_unreserve(struct vmw_resource *res,
578 struct vmw_dma_buffer *new_backup,
579 unsigned long new_backup_offset);
580extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
581 struct ttm_mem_reg *mem);
582extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
583 struct vmw_fence_obj *fence);
584extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000585
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200586/**
587 * DMA buffer helper routines - vmwgfx_dmabuf.c
588 */
589extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
590 struct vmw_dma_buffer *bo,
591 struct ttm_placement *placement,
592 bool interruptible);
593extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
594 struct vmw_dma_buffer *buf,
595 bool pin, bool interruptible);
596extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
597 struct vmw_dma_buffer *buf,
598 bool pin, bool interruptible);
599extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
600 struct vmw_dma_buffer *bo,
601 bool pin, bool interruptible);
602extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
603 struct vmw_dma_buffer *bo,
604 bool interruptible);
Thomas Hellstromb37a6b92011-10-04 20:13:28 +0200605extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
606 SVGAGuestPtr *ptr);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200607extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000608
609/**
610 * Misc Ioctl functionality - vmwgfx_ioctl.c
611 */
612
613extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
614 struct drm_file *file_priv);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000615extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
616 struct drm_file *file_priv);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200617extern int vmw_present_ioctl(struct drm_device *dev, void *data,
618 struct drm_file *file_priv);
619extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
620 struct drm_file *file_priv);
Thomas Hellstrom5438ae82011-10-10 12:23:27 +0200621extern unsigned int vmw_fops_poll(struct file *filp,
622 struct poll_table_struct *wait);
623extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
624 size_t count, loff_t *offset);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000625
626/**
627 * Fifo utilities - vmwgfx_fifo.c
628 */
629
630extern int vmw_fifo_init(struct vmw_private *dev_priv,
631 struct vmw_fifo_state *fifo);
632extern void vmw_fifo_release(struct vmw_private *dev_priv,
633 struct vmw_fifo_state *fifo);
634extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
635extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
636extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000637 uint32_t *seqno);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000638extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +0000639extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200640extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200641extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
642 uint32_t cid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000643
644/**
645 * TTM glue - vmwgfx_ttm_glue.c
646 */
647
648extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
649extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
650extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
651
652/**
653 * TTM buffer object driver - vmwgfx_buffer.c
654 */
655
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800656extern const size_t vmw_tt_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000657extern struct ttm_placement vmw_vram_placement;
658extern struct ttm_placement vmw_vram_ne_placement;
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100659extern struct ttm_placement vmw_vram_sys_placement;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200660extern struct ttm_placement vmw_vram_gmr_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200661extern struct ttm_placement vmw_vram_gmr_ne_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000662extern struct ttm_placement vmw_sys_placement;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100663extern struct ttm_placement vmw_sys_ne_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200664extern struct ttm_placement vmw_evictable_placement;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200665extern struct ttm_placement vmw_srf_placement;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100666extern struct ttm_placement vmw_mob_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000667extern struct ttm_bo_driver vmw_bo_driver;
668extern int vmw_dma_quiescent(struct drm_device *dev);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700669extern void vmw_piter_start(struct vmw_piter *viter,
670 const struct vmw_sg_table *vsgt,
671 unsigned long p_offs);
672
673/**
674 * vmw_piter_next - Advance the iterator one page.
675 *
676 * @viter: Pointer to the iterator to advance.
677 *
678 * Returns false if past the list of pages, true otherwise.
679 */
680static inline bool vmw_piter_next(struct vmw_piter *viter)
681{
682 return viter->next(viter);
683}
684
685/**
686 * vmw_piter_dma_addr - Return the DMA address of the current page.
687 *
688 * @viter: Pointer to the iterator
689 *
690 * Returns the DMA address of the page pointed to by @viter.
691 */
692static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
693{
694 return viter->dma_address(viter);
695}
696
697/**
698 * vmw_piter_page - Return a pointer to the current page.
699 *
700 * @viter: Pointer to the iterator
701 *
702 * Returns the DMA address of the page pointed to by @viter.
703 */
704static inline struct page *vmw_piter_page(struct vmw_piter *viter)
705{
706 return viter->page(viter);
707}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000708
709/**
710 * Command submission - vmwgfx_execbuf.c
711 */
712
713extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
714 struct drm_file *file_priv);
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200715extern int vmw_execbuf_process(struct drm_file *file_priv,
716 struct vmw_private *dev_priv,
717 void __user *user_commands,
718 void *kernel_commands,
719 uint32_t command_size,
720 uint64_t throttle_us,
721 struct drm_vmw_fence_rep __user
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +0100722 *user_fence_rep,
723 struct vmw_fence_obj **out_fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000724extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
725 struct vmw_fence_obj *fence);
726extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200727
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200728extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
729 struct vmw_private *dev_priv,
730 struct vmw_fence_obj **p_fence,
731 uint32_t *p_handle);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200732extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
733 struct vmw_fpriv *vmw_fp,
734 int ret,
735 struct drm_vmw_fence_rep __user
736 *user_fence_rep,
737 struct vmw_fence_obj *fence,
738 uint32_t fence_handle);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200739
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000740/**
741 * IRQs and wating - vmwgfx_irq.c
742 */
743
Daniel Vettere9f0d762013-12-11 11:34:42 +0100744extern irqreturn_t vmw_irq_handler(int irq, void *arg);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000745extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
746 uint32_t seqno, bool interruptible,
747 unsigned long timeout);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000748extern void vmw_irq_preinstall(struct drm_device *dev);
749extern int vmw_irq_postinstall(struct drm_device *dev);
750extern void vmw_irq_uninstall(struct drm_device *dev);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000751extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
752 uint32_t seqno);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000753extern int vmw_fallback_wait(struct vmw_private *dev_priv,
754 bool lazy,
755 bool fifo_idle,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000756 uint32_t seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000757 bool interruptible,
758 unsigned long timeout);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000759extern void vmw_update_seqno(struct vmw_private *dev_priv,
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200760 struct vmw_fifo_state *fifo_state);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000761extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
762extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200763extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
764extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200765
766/**
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000767 * Rudimentary fence-like objects currently used only for throttling -
768 * vmwgfx_marker.c
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200769 */
770
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000771extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
772extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
773extern int vmw_marker_push(struct vmw_marker_queue *queue,
774 uint32_t seqno);
775extern int vmw_marker_pull(struct vmw_marker_queue *queue,
776 uint32_t signaled_seqno);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200777extern int vmw_wait_lag(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000778 struct vmw_marker_queue *queue, uint32_t us);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000779
780/**
781 * Kernel framebuffer - vmwgfx_fb.c
782 */
783
784int vmw_fb_init(struct vmw_private *vmw_priv);
785int vmw_fb_close(struct vmw_private *dev_priv);
786int vmw_fb_off(struct vmw_private *vmw_priv);
787int vmw_fb_on(struct vmw_private *vmw_priv);
788
789/**
790 * Kernel modesetting - vmwgfx_kms.c
791 */
792
793int vmw_kms_init(struct vmw_private *dev_priv);
794int vmw_kms_close(struct vmw_private *dev_priv);
795int vmw_kms_save_vga(struct vmw_private *vmw_priv);
796int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
797int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
798 struct drm_file *file_priv);
799void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
800void vmw_kms_cursor_snoop(struct vmw_surface *srf,
801 struct ttm_object_file *tfile,
802 struct ttm_buffer_object *bo,
803 SVGA3dCmdHeader *header);
Michel Dänzer0bef23f2011-08-31 07:42:50 +0000804int vmw_kms_write_svga(struct vmw_private *vmw_priv,
805 unsigned width, unsigned height, unsigned pitch,
806 unsigned bpp, unsigned depth);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200807void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
Thomas Hellstrome133e732010-10-05 12:43:04 +0200808bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
809 uint32_t pitch,
810 uint32_t height);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200811u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +0200812int vmw_enable_vblank(struct drm_device *dev, int crtc);
813void vmw_disable_vblank(struct drm_device *dev, int crtc);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200814int vmw_kms_present(struct vmw_private *dev_priv,
815 struct drm_file *file_priv,
816 struct vmw_framebuffer *vfb,
817 struct vmw_surface *surface,
818 uint32_t sid, int32_t destX, int32_t destY,
819 struct drm_vmw_rect *clips,
820 uint32_t num_clips);
821int vmw_kms_readback(struct vmw_private *dev_priv,
822 struct drm_file *file_priv,
823 struct vmw_framebuffer *vfb,
824 struct drm_vmw_fence_rep __user *user_fence_rep,
825 struct drm_vmw_rect *clips,
826 uint32_t num_clips);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200827int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
828 struct drm_file *file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000829
Dave Airlie5e1782d2012-08-28 01:53:54 +0000830int vmw_dumb_create(struct drm_file *file_priv,
831 struct drm_device *dev,
832 struct drm_mode_create_dumb *args);
833
834int vmw_dumb_map_offset(struct drm_file *file_priv,
835 struct drm_device *dev, uint32_t handle,
836 uint64_t *offset);
837int vmw_dumb_destroy(struct drm_file *file_priv,
838 struct drm_device *dev,
839 uint32_t handle);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000840/**
841 * Overlay control - vmwgfx_overlay.c
842 */
843
844int vmw_overlay_init(struct vmw_private *dev_priv);
845int vmw_overlay_close(struct vmw_private *dev_priv);
846int vmw_overlay_ioctl(struct drm_device *dev, void *data,
847 struct drm_file *file_priv);
848int vmw_overlay_stop_all(struct vmw_private *dev_priv);
849int vmw_overlay_resume_all(struct vmw_private *dev_priv);
850int vmw_overlay_pause_all(struct vmw_private *dev_priv);
851int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
852int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
853int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
854int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
855
856/**
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200857 * GMR Id manager
858 */
859
860extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
861
862/**
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800863 * Prime - vmwgfx_prime.c
864 */
865
866extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
867extern int vmw_prime_fd_to_handle(struct drm_device *dev,
868 struct drm_file *file_priv,
869 int fd, u32 *handle);
870extern int vmw_prime_handle_to_fd(struct drm_device *dev,
871 struct drm_file *file_priv,
872 uint32_t handle, uint32_t flags,
873 int *prime_fd);
874
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100875/*
876 * MemoryOBject management - vmwgfx_mob.c
877 */
878struct vmw_mob;
879extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
880 struct page **data_pages, unsigned long num_data_pages,
881 int32_t mob_id);
882extern void vmw_mob_unbind(struct vmw_private *dev_priv,
883 struct vmw_mob *mob);
884extern void vmw_mob_destroy(struct vmw_mob *mob);
885extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
886extern int vmw_otables_setup(struct vmw_private *dev_priv);
887extern void vmw_otables_takedown(struct vmw_private *dev_priv);
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800888
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100889/*
890 * Shader management - vmwgfx_shader.c
891 */
892
893extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
894 struct drm_file *file_priv);
895extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
896 struct drm_file *file_priv);
897
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800898/**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000899 * Inline helper functions
900 */
901
902static inline void vmw_surface_unreference(struct vmw_surface **srf)
903{
904 struct vmw_surface *tmp_srf = *srf;
905 struct vmw_resource *res = &tmp_srf->res;
906 *srf = NULL;
907
908 vmw_resource_unreference(&res);
909}
910
911static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
912{
913 (void) vmw_resource_reference(&srf->res);
914 return srf;
915}
916
917static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
918{
919 struct vmw_dma_buffer *tmp_buf = *buf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000920
Thomas Hellstrombf6f0362012-11-09 12:26:15 +0000921 *buf = NULL;
922 if (tmp_buf != NULL) {
923 struct ttm_buffer_object *bo = &tmp_buf->base;
924
925 ttm_bo_unref(&bo);
926 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000927}
928
929static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
930{
931 if (ttm_bo_reference(&buf->base))
932 return buf;
933 return NULL;
934}
935
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000936static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
937{
938 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
939}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000940#endif