blob: 98cf4ff455cae143c1401a69ba28d78ab69ffb50 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010032#include <drm/drmP.h>
33#include <drm/vmwgfx_drm.h>
34#include <drm/drm_hashtab.h>
35#include <linux/suspend.h>
36#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_object.h>
38#include <drm/ttm/ttm_lock.h>
39#include <drm/ttm/ttm_execbuf_util.h>
40#include <drm/ttm/ttm_module.h>
Thomas Hellstromae2a1042011-09-01 20:18:44 +000041#include "vmwgfx_fence.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000042
Sinclair Yeh54fbde82015-07-29 12:38:02 -070043#define VMWGFX_DRIVER_DATE "20150810"
Thomas Hellstrom2ae7b032011-09-01 20:18:45 +000044#define VMWGFX_DRIVER_MAJOR 2
Sinclair Yeh54fbde82015-07-29 12:38:02 -070045#define VMWGFX_DRIVER_MINOR 9
Sinclair Yeh35c05122015-06-26 01:42:06 -070046#define VMWGFX_DRIVER_PATCHLEVEL 0
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000047#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49#define VMWGFX_MAX_RELOCATIONS 2048
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000050#define VMWGFX_MAX_VALIDATIONS 2048
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +020051#define VMWGFX_MAX_DISPLAYS 16
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000052#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
Sinclair Yeh35c05122015-06-26 01:42:06 -070053#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000054
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010055/*
56 * Perhaps we should have sysfs entries for these.
57 */
58#define VMWGFX_NUM_GB_CONTEXT 256
59#define VMWGFX_NUM_GB_SHADER 20000
60#define VMWGFX_NUM_GB_SURFACE 32768
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010061#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
Thomas Hellstromd80efd52015-08-10 10:39:35 -070062#define VMWGFX_NUM_DXCONTEXT 256
63#define VMWGFX_NUM_DXQUERY 512
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010064#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
65 VMWGFX_NUM_GB_SHADER +\
Thomas Hellstrom7cba9062014-01-09 11:03:18 +010066 VMWGFX_NUM_GB_SURFACE +\
67 VMWGFX_NUM_GB_SCREEN_TARGET)
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +010068
Thomas Hellstrom135cba02010-10-26 21:21:47 +020069#define VMW_PL_GMR TTM_PL_PRIV0
70#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
Thomas Hellstrom6da768a2012-11-21 11:06:22 +010071#define VMW_PL_MOB TTM_PL_PRIV1
72#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
Thomas Hellstrom135cba02010-10-26 21:21:47 +020073
Thomas Hellstromae2a1042011-09-01 20:18:44 +000074#define VMW_RES_CONTEXT ttm_driver_type0
75#define VMW_RES_SURFACE ttm_driver_type1
76#define VMW_RES_STREAM ttm_driver_type2
77#define VMW_RES_FENCE ttm_driver_type3
Thomas Hellstromc74c1622012-11-21 12:10:26 +010078#define VMW_RES_SHADER ttm_driver_type4
Thomas Hellstromae2a1042011-09-01 20:18:44 +000079
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000080struct vmw_fpriv {
81 struct drm_master *locked_master;
82 struct ttm_object_file *tfile;
Thomas Hellstromd5bde952014-01-31 10:12:10 +010083 bool gb_aware;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000084};
85
86struct vmw_dma_buffer {
87 struct ttm_buffer_object base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000088 struct list_head res_list;
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -070089 s32 pin_count;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -070090 /* Not ref-counted. Protected by binding_mutex */
91 struct vmw_resource *dx_query_ctx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000092};
93
Thomas Hellstromc0951b72012-11-20 12:19:35 +000094/**
95 * struct vmw_validate_buffer - Carries validation info about buffers.
96 *
97 * @base: Validation info for TTM.
98 * @hash: Hash entry for quick lookup of the TTM buffer object.
99 *
100 * This structure contains also driver private validation info
101 * on top of the info needed by TTM.
102 */
103struct vmw_validate_buffer {
104 struct ttm_validate_buffer base;
105 struct drm_hash_item hash;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100106 bool validate_as_mob;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000107};
108
109struct vmw_res_func;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000110struct vmw_resource {
111 struct kref kref;
112 struct vmw_private *dev_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000113 int id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000114 bool avail;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000115 unsigned long backup_size;
116 bool res_dirty; /* Protected by backup buffer reserved */
117 bool backup_dirty; /* Protected by backup buffer reserved */
118 struct vmw_dma_buffer *backup;
119 unsigned long backup_offset;
Thomas Hellstromed933942015-03-02 23:26:06 -0800120 unsigned long pin_count; /* Protected by resource reserved */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000121 const struct vmw_res_func *func;
122 struct list_head lru_head; /* Protected by the resource lock */
123 struct list_head mob_head; /* Protected by @backup reserved */
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700124 struct list_head binding_head; /* Protected by binding_mutex */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000125 void (*res_free) (struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000126 void (*hw_destroy) (struct vmw_resource *res);
127};
128
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200129
130/*
131 * Resources that are managed using ioctls.
132 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000133enum vmw_res_type {
134 vmw_res_context,
135 vmw_res_surface,
136 vmw_res_stream,
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100137 vmw_res_shader,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700138 vmw_res_dx_context,
139 vmw_res_cotable,
140 vmw_res_view,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000141 vmw_res_max
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000142};
143
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200144/*
145 * Resources that are managed using command streams.
146 */
147enum vmw_cmdbuf_res_type {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700148 vmw_cmdbuf_res_shader,
149 vmw_cmdbuf_res_view
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200150};
151
152struct vmw_cmdbuf_res_manager;
153
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000154struct vmw_cursor_snooper {
155 struct drm_crtc *crtc;
156 size_t age;
157 uint32_t *image;
158};
159
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200160struct vmw_framebuffer;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200161struct vmw_surface_offset;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200162
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000163struct vmw_surface {
164 struct vmw_resource res;
165 uint32_t flags;
166 uint32_t format;
167 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000168 struct drm_vmw_size base_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000169 struct drm_vmw_size *sizes;
170 uint32_t num_sizes;
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000171 bool scanout;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700172 uint32_t array_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000173 /* TODO so far just a extra pointer */
174 struct vmw_cursor_snooper snooper;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200175 struct vmw_surface_offset *offsets;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000176 SVGA3dTextureFilter autogen_filter;
177 uint32_t multisample_count;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700178 struct list_head view_list;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000179};
180
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000181struct vmw_marker_queue {
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200182 struct list_head head;
Thomas Gleixnerf166e6d2014-07-16 21:05:07 +0000183 u64 lag;
184 u64 lag_time;
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200185 spinlock_t lock;
186};
187
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000188struct vmw_fifo_state {
189 unsigned long reserved_size;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700190 u32 *dynamic_buffer;
191 u32 *static_buffer;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000192 unsigned long static_buffer_size;
193 bool using_bounce_buffer;
194 uint32_t capabilities;
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000195 struct mutex fifo_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000196 struct rw_semaphore rwsem;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000197 struct vmw_marker_queue marker_queue;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700198 bool dx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000199};
200
201struct vmw_relocation {
Thomas Hellstromddcda242012-11-21 11:26:55 +0100202 SVGAMobId *mob_loc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000203 SVGAGuestPtr *location;
204 uint32_t index;
205};
206
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000207/**
208 * struct vmw_res_cache_entry - resource information cache entry
209 *
210 * @valid: Whether the entry is valid, which also implies that the execbuf
211 * code holds a reference to the resource, and it's placed on the
212 * validation list.
213 * @handle: User-space handle of a resource.
214 * @res: Non-ref-counted pointer to the resource.
215 *
216 * Used to avoid frequent repeated user-space handle lookups of the
217 * same resource.
218 */
219struct vmw_res_cache_entry {
220 bool valid;
221 uint32_t handle;
222 struct vmw_resource *res;
223 struct vmw_resource_val_node *node;
224};
225
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700226/**
227 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
228 */
229enum vmw_dma_map_mode {
230 vmw_dma_phys, /* Use physical page addresses */
231 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
232 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
233 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
234 vmw_dma_map_max
235};
236
237/**
238 * struct vmw_sg_table - Scatter/gather table for binding, with additional
239 * device-specific information.
240 *
241 * @sgt: Pointer to a struct sg_table with binding information
Masahiro Yamadae1c05062015-07-07 10:14:59 +0900242 * @num_regions: Number of regions with device-address contiguous pages
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700243 */
244struct vmw_sg_table {
245 enum vmw_dma_map_mode mode;
246 struct page **pages;
247 const dma_addr_t *addrs;
248 struct sg_table *sgt;
249 unsigned long num_regions;
250 unsigned long num_pages;
251};
252
253/**
254 * struct vmw_piter - Page iterator that iterates over a list of pages
255 * and DMA addresses that could be either a scatter-gather list or
256 * arrays
257 *
258 * @pages: Array of page pointers to the pages.
259 * @addrs: DMA addresses to the pages if coherent pages are used.
260 * @iter: Scatter-gather page iterator. Current position in SG list.
261 * @i: Current position in arrays.
262 * @num_pages: Number of pages total.
263 * @next: Function to advance the iterator. Returns false if past the list
264 * of pages, true otherwise.
265 * @dma_address: Function to return the DMA address of the current page.
266 */
267struct vmw_piter {
268 struct page **pages;
269 const dma_addr_t *addrs;
270 struct sg_page_iter iter;
271 unsigned long i;
272 unsigned long num_pages;
273 bool (*next)(struct vmw_piter *);
274 dma_addr_t (*dma_address)(struct vmw_piter *);
275 struct page *(*page)(struct vmw_piter *);
276};
277
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700278/*
Sinclair Yehc8261a92015-06-26 01:23:42 -0700279 * enum vmw_display_unit_type - Describes the display unit
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700280 */
Sinclair Yehc8261a92015-06-26 01:23:42 -0700281enum vmw_display_unit_type {
282 vmw_du_invalid = 0,
283 vmw_du_legacy,
Sinclair Yeh35c05122015-06-26 01:42:06 -0700284 vmw_du_screen_object,
285 vmw_du_screen_target
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700286};
287
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700288
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000289struct vmw_sw_context{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000290 struct drm_open_hash res_ht;
291 bool res_ht_initialized;
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200292 bool kernel; /**< is the called made from the kernel */
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100293 struct vmw_fpriv *fp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000294 struct list_head validate_nodes;
295 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
296 uint32_t cur_reloc;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000297 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000298 uint32_t cur_val_buf;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000299 uint32_t *cmd_bounce;
300 uint32_t cmd_bounce_size;
Thomas Hellstromf18c8842011-10-04 20:13:31 +0200301 struct list_head resource_list;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700302 struct list_head ctx_resource_list; /* For contexts and cotables */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700303 struct vmw_dma_buffer *cur_query_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000304 struct list_head res_relocations;
305 uint32_t *buf_start;
306 struct vmw_res_cache_entry res_cache[vmw_res_max];
307 struct vmw_resource *last_query_ctx;
308 bool needs_post_query_barrier;
309 struct vmw_resource *error_resource;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700310 struct vmw_ctx_binding_state *staged_bindings;
311 bool staged_bindings_inuse;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200312 struct list_head staged_cmd_res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700313 struct vmw_resource_val_node *dx_ctx_node;
314 struct vmw_dma_buffer *dx_query_mob;
315 struct vmw_resource *dx_query_ctx;
316 struct vmw_cmdbuf_res_manager *man;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000317};
318
319struct vmw_legacy_display;
320struct vmw_overlay;
321
322struct vmw_master {
323 struct ttm_lock lock;
324};
325
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200326struct vmw_vga_topology_state {
327 uint32_t width;
328 uint32_t height;
329 uint32_t primary;
330 uint32_t pos_x;
331 uint32_t pos_y;
332};
333
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700334
335/*
336 * struct vmw_otable - Guest Memory OBject table metadata
337 *
338 * @size: Size of the table (page-aligned).
339 * @page_table: Pointer to a struct vmw_mob holding the page table.
340 */
341struct vmw_otable {
342 unsigned long size;
343 struct vmw_mob *page_table;
344 bool enabled;
345};
346
347struct vmw_otable_batch {
348 unsigned num_otables;
349 struct vmw_otable *otables;
350 struct vmw_resource *context;
351 struct ttm_buffer_object *otable_bo;
352};
353
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000354struct vmw_private {
355 struct ttm_bo_device bdev;
356 struct ttm_bo_global_ref bo_global_ref;
Dave Airlieba4420c2010-03-09 10:56:52 +1000357 struct drm_global_reference mem_global_ref;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000358
359 struct vmw_fifo_state fifo;
360
361 struct drm_device *dev;
362 unsigned long vmw_chipset;
363 unsigned int io_start;
364 uint32_t vram_start;
365 uint32_t vram_size;
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100366 uint32_t prim_bb_mem;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000367 uint32_t mmio_start;
368 uint32_t mmio_size;
369 uint32_t fb_max_width;
370 uint32_t fb_max_height;
Sinclair Yeh35c05122015-06-26 01:42:06 -0700371 uint32_t texture_max_width;
372 uint32_t texture_max_height;
373 uint32_t stdu_max_width;
374 uint32_t stdu_max_height;
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100375 uint32_t initial_width;
376 uint32_t initial_height;
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100377 u32 *mmio_virt;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000378 uint32_t capabilities;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000379 uint32_t max_gmr_ids;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000380 uint32_t max_gmr_pages;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100381 uint32_t max_mob_pages;
Charmaine Lee857aea12014-02-12 12:07:38 +0100382 uint32_t max_mob_size;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000383 uint32_t memory_size;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200384 bool has_gmr;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100385 bool has_mob;
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800386 spinlock_t hw_lock;
387 spinlock_t cap_lock;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700388 bool has_dx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000389
390 /*
391 * VGA registers.
392 */
393
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200394 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000395 uint32_t vga_width;
396 uint32_t vga_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000397 uint32_t vga_bpp;
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200398 uint32_t vga_bpl;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200399 uint32_t vga_pitchlock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000400
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200401 uint32_t num_displays;
402
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000403 /*
404 * Framebuffer info.
405 */
406
407 void *fb_info;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700408 enum vmw_display_unit_type active_display_unit;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000409 struct vmw_legacy_display *ldu_priv;
410 struct vmw_overlay *overlay_priv;
Thomas Hellstrom578e6092016-02-12 09:45:42 +0100411 struct drm_property *hotplug_mode_update_property;
Thomas Hellstrom76404ac2016-02-12 09:55:45 +0100412 struct drm_property *implicit_placement_property;
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100413 unsigned num_implicit;
414 struct vmw_framebuffer *implicit_fb;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000415
416 /*
417 * Context and surface management.
418 */
419
420 rwlock_t resource_lock;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000421 struct idr res_idr[vmw_res_max];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000422 /*
423 * Block lastclose from racing with firstopen.
424 */
425
426 struct mutex init_mutex;
427
428 /*
429 * A resource manager for kernel-only surfaces and
430 * contexts.
431 */
432
433 struct ttm_object_device *tdev;
434
435 /*
436 * Fencing and IRQs.
437 */
438
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000439 atomic_t marker_seq;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000440 wait_queue_head_t fence_queue;
441 wait_queue_head_t fifo_queue;
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800442 spinlock_t waiter_lock;
443 int fence_queue_waiters; /* Protected by waiter_lock */
444 int goal_queue_waiters; /* Protected by waiter_lock */
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100445 int cmdbuf_waiters; /* Protected by waiter_lock */
446 int error_waiters; /* Protected by waiter_lock */
447 int fifo_queue_waiters; /* Protected by waiter_lock */
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000448 uint32_t last_read_seqno;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000449 struct vmw_fence_manager *fman;
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100450 uint32_t irq_mask; /* Updates protected by waiter_lock */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000451
452 /*
453 * Device state
454 */
455
456 uint32_t traces_state;
457 uint32_t enable_state;
458 uint32_t config_done_state;
459
460 /**
461 * Execbuf
462 */
463 /**
464 * Protected by the cmdbuf mutex.
465 */
466
467 struct vmw_sw_context ctx;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000468 struct mutex cmdbuf_mutex;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700469 struct mutex binding_mutex;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000470
471 /**
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000472 * Operating mode.
473 */
474
475 bool stealth;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200476 bool enable_fb;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700477 spinlock_t svga_lock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000478
479 /**
480 * Master management.
481 */
482
483 struct vmw_master *active_master;
484 struct vmw_master fbdev_master;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100485 struct notifier_block pm_nb;
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200486 bool suspended;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700487 bool refuse_hibernation;
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200488
489 struct mutex release_mutex;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700490 atomic_t num_fifo_resources;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200491
492 /*
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100493 * Replace this with an rwsem as soon as we have down_xx_interruptible()
494 */
495 struct ttm_lock reservation_sem;
496
497 /*
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200498 * Query processing. These members
499 * are protected by the cmdbuf mutex.
500 */
501
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700502 struct vmw_dma_buffer *dummy_query_bo;
503 struct vmw_dma_buffer *pinned_bo;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200504 uint32_t query_cid;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000505 uint32_t query_cid_valid;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200506 bool dummy_query_bo_pinned;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200507
508 /*
509 * Surface swapping. The "surface_lru" list is protected by the
510 * resource lock in order to be able to destroy a surface and take
511 * it off the lru atomically. "used_memory_size" is currently
512 * protected by the cmdbuf mutex for simplicity.
513 */
514
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000515 struct list_head res_lru[vmw_res_max];
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200516 uint32_t used_memory_size;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700517
518 /*
519 * DMA mapping stuff.
520 */
521 enum vmw_dma_map_mode map_mode;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100522
523 /*
524 * Guest Backed stuff
525 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700526 struct vmw_otable_batch otable_batch;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700527
528 struct vmw_cmdbuf_man *cman;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000529};
530
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000531static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
532{
533 return container_of(res, struct vmw_surface, res);
534}
535
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000536static inline struct vmw_private *vmw_priv(struct drm_device *dev)
537{
538 return (struct vmw_private *)dev->dev_private;
539}
540
541static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
542{
543 return (struct vmw_fpriv *)file_priv->driver_priv;
544}
545
546static inline struct vmw_master *vmw_master(struct drm_master *master)
547{
548 return (struct vmw_master *) master->driver_priv;
549}
550
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800551/*
552 * The locking here is fine-grained, so that it is performed once
553 * for every read- and write operation. This is of course costly, but we
554 * don't perform much register access in the timing critical paths anyway.
555 * Instead we have the extra benefit of being sure that we don't forget
556 * the hw lock around register accesses.
557 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000558static inline void vmw_write(struct vmw_private *dev_priv,
559 unsigned int offset, uint32_t value)
560{
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800561 unsigned long irq_flags;
562
563 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000564 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
565 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800566 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000567}
568
569static inline uint32_t vmw_read(struct vmw_private *dev_priv,
570 unsigned int offset)
571{
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800572 unsigned long irq_flags;
573 u32 val;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000574
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800575 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000576 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
577 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800578 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
579
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000580 return val;
581}
582
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700583extern void vmw_svga_enable(struct vmw_private *dev_priv);
584extern void vmw_svga_disable(struct vmw_private *dev_priv);
585
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200586
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000587/**
588 * GMR utilities - vmwgfx_gmr.c
589 */
590
591extern int vmw_gmr_bind(struct vmw_private *dev_priv,
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700592 const struct vmw_sg_table *vsgt,
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200593 unsigned long num_pages,
594 int gmr_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000595extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
596
597/**
598 * Resource utilities - vmwgfx_resource.c
599 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000600struct vmw_user_resource_conv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000601
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000602extern void vmw_resource_unreference(struct vmw_resource **p_res);
603extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100604extern struct vmw_resource *
605vmw_resource_reference_unless_doomed(struct vmw_resource *res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000606extern int vmw_resource_validate(struct vmw_resource *res);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700607extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
608 bool no_backup);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000609extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
Jakob Bornecrantz551a6692011-11-28 13:19:11 +0100610extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
611 struct ttm_object_file *tfile,
612 uint32_t handle,
613 struct vmw_surface **out_surf,
614 struct vmw_dma_buffer **out_buf);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000615extern int vmw_user_resource_lookup_handle(
616 struct vmw_private *dev_priv,
617 struct ttm_object_file *tfile,
618 uint32_t handle,
619 const struct vmw_user_resource_conv *converter,
620 struct vmw_resource **p_res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000621extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
622extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
623 struct vmw_dma_buffer *vmw_bo,
624 size_t size, struct ttm_placement *placement,
625 bool interuptable,
626 void (*bo_free) (struct ttm_buffer_object *bo));
Thomas Hellstromd08a9b92012-11-21 16:04:18 +0100627extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
628 struct ttm_object_file *tfile);
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100629extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
630 struct ttm_object_file *tfile,
631 uint32_t size,
632 bool shareable,
633 uint32_t *handle,
Thomas Hellstrom54c12bc2015-09-14 01:13:11 -0700634 struct vmw_dma_buffer **p_dma_buf,
635 struct ttm_base_object **p_base);
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100636extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
637 struct vmw_dma_buffer *dma_buf,
638 uint32_t *handle);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000639extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
640 struct drm_file *file_priv);
641extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
642 struct drm_file *file_priv);
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100643extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
644 struct drm_file *file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000645extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
646 uint32_t cur_validate_node);
647extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
648extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
Thomas Hellstrom54c12bc2015-09-14 01:13:11 -0700649 uint32_t id, struct vmw_dma_buffer **out,
650 struct ttm_base_object **base);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000651extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
652 struct drm_file *file_priv);
653extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
654 struct drm_file *file_priv);
655extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
656 struct ttm_object_file *tfile,
657 uint32_t *inout_id,
658 struct vmw_resource **out);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000659extern void vmw_resource_unreserve(struct vmw_resource *res,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700660 bool switch_backup,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000661 struct vmw_dma_buffer *new_backup,
662 unsigned long new_backup_offset);
663extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
664 struct ttm_mem_reg *mem);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700665extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
666 struct ttm_mem_reg *mem);
667extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000668extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
669 struct vmw_fence_obj *fence);
670extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000671
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200672/**
673 * DMA buffer helper routines - vmwgfx_dmabuf.c
674 */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700675extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200676 struct vmw_dma_buffer *bo,
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700677 struct ttm_placement *placement,
678 bool interruptible);
679extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
680 struct vmw_dma_buffer *buf,
681 bool interruptible);
682extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
683 struct vmw_dma_buffer *buf,
684 bool interruptible);
685extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
686 struct vmw_dma_buffer *bo,
687 bool interruptible);
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200688extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
689 struct vmw_dma_buffer *bo,
690 bool interruptible);
Thomas Hellstromb37a6b92011-10-04 20:13:28 +0200691extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
692 SVGAGuestPtr *ptr);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700693extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000694
695/**
696 * Misc Ioctl functionality - vmwgfx_ioctl.c
697 */
698
699extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
700 struct drm_file *file_priv);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000701extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
702 struct drm_file *file_priv);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200703extern int vmw_present_ioctl(struct drm_device *dev, void *data,
704 struct drm_file *file_priv);
705extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
706 struct drm_file *file_priv);
Thomas Hellstrom5438ae82011-10-10 12:23:27 +0200707extern unsigned int vmw_fops_poll(struct file *filp,
708 struct poll_table_struct *wait);
709extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
710 size_t count, loff_t *offset);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000711
712/**
713 * Fifo utilities - vmwgfx_fifo.c
714 */
715
716extern int vmw_fifo_init(struct vmw_private *dev_priv,
717 struct vmw_fifo_state *fifo);
718extern void vmw_fifo_release(struct vmw_private *dev_priv,
719 struct vmw_fifo_state *fifo);
720extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700721extern void *
722vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000723extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700724extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000725extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000726 uint32_t *seqno);
Maarten Lankhorst2298e802014-03-26 14:07:44 +0100727extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000728extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +0000729extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200730extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200731extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
732 uint32_t cid);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700733extern int vmw_fifo_flush(struct vmw_private *dev_priv,
734 bool interruptible);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000735
736/**
737 * TTM glue - vmwgfx_ttm_glue.c
738 */
739
740extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
741extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
742extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
743
744/**
745 * TTM buffer object driver - vmwgfx_buffer.c
746 */
747
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800748extern const size_t vmw_tt_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000749extern struct ttm_placement vmw_vram_placement;
750extern struct ttm_placement vmw_vram_ne_placement;
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100751extern struct ttm_placement vmw_vram_sys_placement;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200752extern struct ttm_placement vmw_vram_gmr_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200753extern struct ttm_placement vmw_vram_gmr_ne_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000754extern struct ttm_placement vmw_sys_placement;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100755extern struct ttm_placement vmw_sys_ne_placement;
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200756extern struct ttm_placement vmw_evictable_placement;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200757extern struct ttm_placement vmw_srf_placement;
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100758extern struct ttm_placement vmw_mob_placement;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700759extern struct ttm_placement vmw_mob_ne_placement;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000760extern struct ttm_bo_driver vmw_bo_driver;
761extern int vmw_dma_quiescent(struct drm_device *dev);
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700762extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
763extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
764extern const struct vmw_sg_table *
765vmw_bo_sg_table(struct ttm_buffer_object *bo);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700766extern void vmw_piter_start(struct vmw_piter *viter,
767 const struct vmw_sg_table *vsgt,
768 unsigned long p_offs);
769
770/**
771 * vmw_piter_next - Advance the iterator one page.
772 *
773 * @viter: Pointer to the iterator to advance.
774 *
775 * Returns false if past the list of pages, true otherwise.
776 */
777static inline bool vmw_piter_next(struct vmw_piter *viter)
778{
779 return viter->next(viter);
780}
781
782/**
783 * vmw_piter_dma_addr - Return the DMA address of the current page.
784 *
785 * @viter: Pointer to the iterator
786 *
787 * Returns the DMA address of the page pointed to by @viter.
788 */
789static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
790{
791 return viter->dma_address(viter);
792}
793
794/**
795 * vmw_piter_page - Return a pointer to the current page.
796 *
797 * @viter: Pointer to the iterator
798 *
799 * Returns the DMA address of the page pointed to by @viter.
800 */
801static inline struct page *vmw_piter_page(struct vmw_piter *viter)
802{
803 return viter->page(viter);
804}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000805
806/**
807 * Command submission - vmwgfx_execbuf.c
808 */
809
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700810extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
811 struct drm_file *file_priv, size_t size);
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200812extern int vmw_execbuf_process(struct drm_file *file_priv,
813 struct vmw_private *dev_priv,
814 void __user *user_commands,
815 void *kernel_commands,
816 uint32_t command_size,
817 uint64_t throttle_us,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700818 uint32_t dx_context_handle,
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200819 struct drm_vmw_fence_rep __user
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +0100820 *user_fence_rep,
821 struct vmw_fence_obj **out_fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000822extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
823 struct vmw_fence_obj *fence);
824extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200825
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200826extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
827 struct vmw_private *dev_priv,
828 struct vmw_fence_obj **p_fence,
829 uint32_t *p_handle);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200830extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
831 struct vmw_fpriv *vmw_fp,
832 int ret,
833 struct drm_vmw_fence_rep __user
834 *user_fence_rep,
835 struct vmw_fence_obj *fence,
836 uint32_t fence_handle);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700837extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
838 struct ttm_buffer_object *bo,
839 bool interruptible,
840 bool validate_as_mob);
841
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200842
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000843/**
844 * IRQs and wating - vmwgfx_irq.c
845 */
846
Daniel Vettere9f0d762013-12-11 11:34:42 +0100847extern irqreturn_t vmw_irq_handler(int irq, void *arg);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000848extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700849 uint32_t seqno, bool interruptible,
850 unsigned long timeout);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000851extern void vmw_irq_preinstall(struct drm_device *dev);
852extern int vmw_irq_postinstall(struct drm_device *dev);
853extern void vmw_irq_uninstall(struct drm_device *dev);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000854extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
855 uint32_t seqno);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000856extern int vmw_fallback_wait(struct vmw_private *dev_priv,
857 bool lazy,
858 bool fifo_idle,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000859 uint32_t seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000860 bool interruptible,
861 unsigned long timeout);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000862extern void vmw_update_seqno(struct vmw_private *dev_priv,
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200863 struct vmw_fifo_state *fifo_state);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000864extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
865extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200866extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
867extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700868extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
869 int *waiter_count);
870extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
871 u32 flag, int *waiter_count);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200872
873/**
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000874 * Rudimentary fence-like objects currently used only for throttling -
875 * vmwgfx_marker.c
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200876 */
877
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000878extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
879extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
880extern int vmw_marker_push(struct vmw_marker_queue *queue,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700881 uint32_t seqno);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000882extern int vmw_marker_pull(struct vmw_marker_queue *queue,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700883 uint32_t signaled_seqno);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200884extern int vmw_wait_lag(struct vmw_private *dev_priv,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000885 struct vmw_marker_queue *queue, uint32_t us);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000886
887/**
888 * Kernel framebuffer - vmwgfx_fb.c
889 */
890
891int vmw_fb_init(struct vmw_private *vmw_priv);
892int vmw_fb_close(struct vmw_private *dev_priv);
893int vmw_fb_off(struct vmw_private *vmw_priv);
894int vmw_fb_on(struct vmw_private *vmw_priv);
895
896/**
897 * Kernel modesetting - vmwgfx_kms.c
898 */
899
900int vmw_kms_init(struct vmw_private *dev_priv);
901int vmw_kms_close(struct vmw_private *dev_priv);
902int vmw_kms_save_vga(struct vmw_private *vmw_priv);
903int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
904int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
905 struct drm_file *file_priv);
906void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
907void vmw_kms_cursor_snoop(struct vmw_surface *srf,
908 struct ttm_object_file *tfile,
909 struct ttm_buffer_object *bo,
910 SVGA3dCmdHeader *header);
Michel Dänzer0bef23f2011-08-31 07:42:50 +0000911int vmw_kms_write_svga(struct vmw_private *vmw_priv,
912 unsigned width, unsigned height, unsigned pitch,
913 unsigned bpp, unsigned depth);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200914void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
Thomas Hellstrome133e732010-10-05 12:43:04 +0200915bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
916 uint32_t pitch,
917 uint32_t height);
Thierry Reding88e72712015-09-24 18:35:31 +0200918u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
919int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
920void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200921int vmw_kms_present(struct vmw_private *dev_priv,
922 struct drm_file *file_priv,
923 struct vmw_framebuffer *vfb,
924 struct vmw_surface *surface,
925 uint32_t sid, int32_t destX, int32_t destY,
926 struct drm_vmw_rect *clips,
927 uint32_t num_clips);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200928int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
929 struct drm_file *file_priv);
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +0100930void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000931
Dave Airlie5e1782d2012-08-28 01:53:54 +0000932int vmw_dumb_create(struct drm_file *file_priv,
933 struct drm_device *dev,
934 struct drm_mode_create_dumb *args);
935
936int vmw_dumb_map_offset(struct drm_file *file_priv,
937 struct drm_device *dev, uint32_t handle,
938 uint64_t *offset);
939int vmw_dumb_destroy(struct drm_file *file_priv,
940 struct drm_device *dev,
941 uint32_t handle);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700942extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
Thomas Hellstromed933942015-03-02 23:26:06 -0800943extern void vmw_resource_unpin(struct vmw_resource *res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700944extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
Thomas Hellstromed933942015-03-02 23:26:06 -0800945
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000946/**
947 * Overlay control - vmwgfx_overlay.c
948 */
949
950int vmw_overlay_init(struct vmw_private *dev_priv);
951int vmw_overlay_close(struct vmw_private *dev_priv);
952int vmw_overlay_ioctl(struct drm_device *dev, void *data,
953 struct drm_file *file_priv);
954int vmw_overlay_stop_all(struct vmw_private *dev_priv);
955int vmw_overlay_resume_all(struct vmw_private *dev_priv);
956int vmw_overlay_pause_all(struct vmw_private *dev_priv);
957int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
958int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
959int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
960int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
961
962/**
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200963 * GMR Id manager
964 */
965
966extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
967
968/**
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800969 * Prime - vmwgfx_prime.c
970 */
971
972extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
973extern int vmw_prime_fd_to_handle(struct drm_device *dev,
974 struct drm_file *file_priv,
975 int fd, u32 *handle);
976extern int vmw_prime_handle_to_fd(struct drm_device *dev,
977 struct drm_file *file_priv,
978 uint32_t handle, uint32_t flags,
979 int *prime_fd);
980
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100981/*
982 * MemoryOBject management - vmwgfx_mob.c
983 */
984struct vmw_mob;
985extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
Thomas Hellstrom0fd53cf2013-10-24 13:27:38 -0700986 const struct vmw_sg_table *vsgt,
987 unsigned long num_data_pages, int32_t mob_id);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100988extern void vmw_mob_unbind(struct vmw_private *dev_priv,
989 struct vmw_mob *mob);
990extern void vmw_mob_destroy(struct vmw_mob *mob);
991extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
992extern int vmw_otables_setup(struct vmw_private *dev_priv);
993extern void vmw_otables_takedown(struct vmw_private *dev_priv);
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800994
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100995/*
Thomas Hellstrom7086d092012-11-21 12:20:53 +0100996 * Context management - vmwgfx_context.c
997 */
998
999extern const struct vmw_user_resource_conv *user_context_converter;
1000
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001001extern int vmw_context_check(struct vmw_private *dev_priv,
1002 struct ttm_object_file *tfile,
1003 int id,
1004 struct vmw_resource **p_res);
1005extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
1006 struct drm_file *file_priv);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001007extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
1008 struct drm_file *file_priv);
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001009extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
1010 struct drm_file *file_priv);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01001011extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001012extern struct vmw_cmdbuf_res_manager *
1013vmw_context_res_man(struct vmw_resource *ctx);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001014extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
1015 SVGACOTableType cotable_type);
1016extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1017struct vmw_ctx_binding_state;
1018extern struct vmw_ctx_binding_state *
1019vmw_context_binding_state(struct vmw_resource *ctx);
1020extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
1021 bool readback);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001022extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
1023 struct vmw_dma_buffer *mob);
1024extern struct vmw_dma_buffer *
1025vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
1026
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001027
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001028/*
1029 * Surface management - vmwgfx_surface.c
1030 */
1031
1032extern const struct vmw_user_resource_conv *user_surface_converter;
1033
1034extern void vmw_surface_res_free(struct vmw_resource *res);
1035extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1036 struct drm_file *file_priv);
1037extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1038 struct drm_file *file_priv);
1039extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1040 struct drm_file *file_priv);
1041extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1042 struct drm_file *file_priv);
1043extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1044 struct drm_file *file_priv);
1045extern int vmw_surface_check(struct vmw_private *dev_priv,
1046 struct ttm_object_file *tfile,
1047 uint32_t handle, int *id);
1048extern int vmw_surface_validate(struct vmw_private *dev_priv,
1049 struct vmw_surface *srf);
Sinclair Yeh233826a2015-03-05 01:06:13 -08001050int vmw_surface_gb_priv_define(struct drm_device *dev,
1051 uint32_t user_accounting_size,
1052 uint32_t svga3d_flags,
1053 SVGA3dSurfaceFormat format,
1054 bool for_scanout,
1055 uint32_t num_mip_levels,
1056 uint32_t multisample_count,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001057 uint32_t array_size,
Sinclair Yeh233826a2015-03-05 01:06:13 -08001058 struct drm_vmw_size size,
1059 struct vmw_surface **srf_out);
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001060
1061/*
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001062 * Shader management - vmwgfx_shader.c
1063 */
1064
Thomas Hellstrom7086d092012-11-21 12:20:53 +01001065extern const struct vmw_user_resource_conv *user_shader_converter;
1066
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001067extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1068 struct drm_file *file_priv);
1069extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1070 struct drm_file *file_priv);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001071extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1072 struct vmw_cmdbuf_res_manager *man,
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001073 u32 user_key, const void *bytecode,
1074 SVGA3dShaderType shader_type,
1075 size_t size,
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001076 struct list_head *list);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001077extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
1078 u32 user_key, SVGA3dShaderType shader_type,
1079 struct list_head *list);
1080extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
1081 struct vmw_resource *ctx,
1082 u32 user_key,
1083 SVGA3dShaderType shader_type,
1084 struct list_head *list);
1085extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
1086 struct list_head *list,
1087 bool readback);
1088
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001089extern struct vmw_resource *
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001090vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1091 u32 user_key, SVGA3dShaderType shader_type);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02001092
1093/*
1094 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1095 */
1096
1097extern struct vmw_cmdbuf_res_manager *
1098vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1099extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1100extern size_t vmw_cmdbuf_res_man_size(void);
1101extern struct vmw_resource *
1102vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1103 enum vmw_cmdbuf_res_type res_type,
1104 u32 user_key);
1105extern void vmw_cmdbuf_res_revert(struct list_head *list);
1106extern void vmw_cmdbuf_res_commit(struct list_head *list);
1107extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1108 enum vmw_cmdbuf_res_type res_type,
1109 u32 user_key,
1110 struct vmw_resource *res,
1111 struct list_head *list);
1112extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1113 enum vmw_cmdbuf_res_type res_type,
1114 u32 user_key,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001115 struct list_head *list,
1116 struct vmw_resource **res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001117
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001118/*
1119 * COTable management - vmwgfx_cotable.c
1120 */
1121extern const SVGACOTableType vmw_cotable_scrub_order[];
1122extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
1123 struct vmw_resource *ctx,
1124 u32 type);
1125extern int vmw_cotable_notify(struct vmw_resource *res, int id);
1126extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
1127extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
1128 struct list_head *head);
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001129
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07001130/*
1131 * Command buffer managerment vmwgfx_cmdbuf.c
1132 */
1133struct vmw_cmdbuf_man;
1134struct vmw_cmdbuf_header;
1135
1136extern struct vmw_cmdbuf_man *
1137vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1138extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1139 size_t size, size_t default_size);
1140extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1141extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1142extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1143 unsigned long timeout);
1144extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1145 int ctx_id, bool interruptible,
1146 struct vmw_cmdbuf_header *header);
1147extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1148 struct vmw_cmdbuf_header *header,
1149 bool flush);
1150extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
1151extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1152 size_t size, bool interruptible,
1153 struct vmw_cmdbuf_header **p_header);
1154extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1155extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1156 bool interruptible);
Thomas Hellstrombf6f0362012-11-09 12:26:15 +00001157
1158
1159/**
1160 * Inline helper functions
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001161 */
1162
1163static inline void vmw_surface_unreference(struct vmw_surface **srf)
1164{
1165 struct vmw_surface *tmp_srf = *srf;
1166 struct vmw_resource *res = &tmp_srf->res;
1167 *srf = NULL;
1168
1169 vmw_resource_unreference(&res);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001170}
1171
1172static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1173{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001174 (void) vmw_resource_reference(&srf->res);
1175 return srf;
1176}
1177
1178static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
1179{
1180 struct vmw_dma_buffer *tmp_buf = *buf;
1181
1182 *buf = NULL;
1183 if (tmp_buf != NULL) {
1184 struct ttm_buffer_object *bo = &tmp_buf->base;
1185
1186 ttm_bo_unref(&bo);
1187 }
1188}
1189
1190static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
1191{
1192 if (ttm_bo_reference(&buf->base))
1193 return buf;
1194 return NULL;
1195}
1196
1197static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1198{
1199 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1200}
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001201
1202static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1203{
1204 atomic_inc(&dev_priv->num_fifo_resources);
1205}
1206
1207static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1208{
1209 atomic_dec(&dev_priv->num_fifo_resources);
1210}
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +01001211
1212/**
1213 * vmw_mmio_read - Perform a MMIO read from volatile memory
1214 *
1215 * @addr: The address to read from
1216 *
1217 * This function is intended to be equivalent to ioread32() on
1218 * memremap'd memory, but without byteswapping.
1219 */
1220static inline u32 vmw_mmio_read(u32 *addr)
1221{
1222 return READ_ONCE(*addr);
1223}
1224
1225/**
1226 * vmw_mmio_write - Perform a MMIO write to volatile memory
1227 *
1228 * @addr: The address to write to
1229 *
1230 * This function is intended to be equivalent to iowrite32 on
1231 * memremap'd memory, but without byteswapping.
1232 */
1233static inline void vmw_mmio_write(u32 value, u32 *addr)
1234{
1235 WRITE_ONCE(*addr, value);
1236}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001237#endif