Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
Sinclair Yeh | 54fbde8 | 2015-07-29 12:38:02 -0700 | [diff] [blame] | 3 | * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | |
| 28 | #ifndef _VMWGFX_DRV_H_ |
| 29 | #define _VMWGFX_DRV_H_ |
| 30 | |
| 31 | #include "vmwgfx_reg.h" |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 32 | #include <drm/drmP.h> |
| 33 | #include <drm/vmwgfx_drm.h> |
| 34 | #include <drm/drm_hashtab.h> |
Daniel Vetter | 3b96a0b | 2016-06-21 10:54:22 +0200 | [diff] [blame] | 35 | #include <drm/drm_auth.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 36 | #include <linux/suspend.h> |
| 37 | #include <drm/ttm/ttm_bo_driver.h> |
| 38 | #include <drm/ttm/ttm_object.h> |
| 39 | #include <drm/ttm/ttm_lock.h> |
| 40 | #include <drm/ttm/ttm_execbuf_util.h> |
| 41 | #include <drm/ttm/ttm_module.h> |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 42 | #include "vmwgfx_fence.h" |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 43 | |
Thomas Hellstrom | 5476aa4 | 2016-03-14 14:41:08 +0100 | [diff] [blame] | 44 | #define VMWGFX_DRIVER_DATE "20160210" |
Thomas Hellstrom | 2ae7b03 | 2011-09-01 20:18:45 +0000 | [diff] [blame] | 45 | #define VMWGFX_DRIVER_MAJOR 2 |
Thomas Hellstrom | 5476aa4 | 2016-03-14 14:41:08 +0100 | [diff] [blame] | 46 | #define VMWGFX_DRIVER_MINOR 10 |
Sinclair Yeh | 35c0512 | 2015-06-26 01:42:06 -0700 | [diff] [blame] | 47 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 48 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
| 49 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
| 50 | #define VMWGFX_MAX_RELOCATIONS 2048 |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 51 | #define VMWGFX_MAX_VALIDATIONS 2048 |
Thomas Hellstrom | 7c4f778 | 2010-06-01 11:38:17 +0200 | [diff] [blame] | 52 | #define VMWGFX_MAX_DISPLAYS 16 |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 53 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 |
Sinclair Yeh | 35c0512 | 2015-06-26 01:42:06 -0700 | [diff] [blame] | 54 | #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1 |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 55 | |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 56 | /* |
| 57 | * Perhaps we should have sysfs entries for these. |
| 58 | */ |
| 59 | #define VMWGFX_NUM_GB_CONTEXT 256 |
| 60 | #define VMWGFX_NUM_GB_SHADER 20000 |
| 61 | #define VMWGFX_NUM_GB_SURFACE 32768 |
Thomas Hellstrom | 7cba906 | 2014-01-09 11:03:18 +0100 | [diff] [blame] | 62 | #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 63 | #define VMWGFX_NUM_DXCONTEXT 256 |
| 64 | #define VMWGFX_NUM_DXQUERY 512 |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 65 | #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ |
| 66 | VMWGFX_NUM_GB_SHADER +\ |
Thomas Hellstrom | 7cba906 | 2014-01-09 11:03:18 +0100 | [diff] [blame] | 67 | VMWGFX_NUM_GB_SURFACE +\ |
| 68 | VMWGFX_NUM_GB_SCREEN_TARGET) |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 69 | |
Christian König | 283cde6 | 2016-09-12 13:34:37 +0200 | [diff] [blame] | 70 | #define VMW_PL_GMR (TTM_PL_PRIV + 0) |
| 71 | #define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0) |
| 72 | #define VMW_PL_MOB (TTM_PL_PRIV + 1) |
| 73 | #define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1) |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 74 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 75 | #define VMW_RES_CONTEXT ttm_driver_type0 |
| 76 | #define VMW_RES_SURFACE ttm_driver_type1 |
| 77 | #define VMW_RES_STREAM ttm_driver_type2 |
| 78 | #define VMW_RES_FENCE ttm_driver_type3 |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 79 | #define VMW_RES_SHADER ttm_driver_type4 |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 80 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 81 | struct vmw_fpriv { |
| 82 | struct drm_master *locked_master; |
| 83 | struct ttm_object_file *tfile; |
Thomas Hellstrom | d5bde95 | 2014-01-31 10:12:10 +0100 | [diff] [blame] | 84 | bool gb_aware; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 85 | }; |
| 86 | |
| 87 | struct vmw_dma_buffer { |
| 88 | struct ttm_buffer_object base; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 89 | struct list_head res_list; |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 90 | s32 pin_count; |
Sinclair Yeh | fd11a3c | 2015-08-10 10:56:15 -0700 | [diff] [blame] | 91 | /* Not ref-counted. Protected by binding_mutex */ |
| 92 | struct vmw_resource *dx_query_ctx; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 93 | }; |
| 94 | |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 95 | /** |
| 96 | * struct vmw_validate_buffer - Carries validation info about buffers. |
| 97 | * |
| 98 | * @base: Validation info for TTM. |
| 99 | * @hash: Hash entry for quick lookup of the TTM buffer object. |
| 100 | * |
| 101 | * This structure contains also driver private validation info |
| 102 | * on top of the info needed by TTM. |
| 103 | */ |
| 104 | struct vmw_validate_buffer { |
| 105 | struct ttm_validate_buffer base; |
| 106 | struct drm_hash_item hash; |
Thomas Hellstrom | 96c5f0d | 2012-11-21 11:19:53 +0100 | [diff] [blame] | 107 | bool validate_as_mob; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 108 | }; |
| 109 | |
| 110 | struct vmw_res_func; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 111 | struct vmw_resource { |
| 112 | struct kref kref; |
| 113 | struct vmw_private *dev_priv; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 114 | int id; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 115 | bool avail; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 116 | unsigned long backup_size; |
| 117 | bool res_dirty; /* Protected by backup buffer reserved */ |
| 118 | bool backup_dirty; /* Protected by backup buffer reserved */ |
| 119 | struct vmw_dma_buffer *backup; |
| 120 | unsigned long backup_offset; |
Thomas Hellstrom | ed93394 | 2015-03-02 23:26:06 -0800 | [diff] [blame] | 121 | unsigned long pin_count; /* Protected by resource reserved */ |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 122 | const struct vmw_res_func *func; |
| 123 | struct list_head lru_head; /* Protected by the resource lock */ |
| 124 | struct list_head mob_head; /* Protected by @backup reserved */ |
Thomas Hellstrom | 173fb7d | 2013-10-08 02:32:36 -0700 | [diff] [blame] | 125 | struct list_head binding_head; /* Protected by binding_mutex */ |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 126 | void (*res_free) (struct vmw_resource *res); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 127 | void (*hw_destroy) (struct vmw_resource *res); |
| 128 | }; |
| 129 | |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 130 | |
| 131 | /* |
| 132 | * Resources that are managed using ioctls. |
| 133 | */ |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 134 | enum vmw_res_type { |
| 135 | vmw_res_context, |
| 136 | vmw_res_surface, |
| 137 | vmw_res_stream, |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 138 | vmw_res_shader, |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 139 | vmw_res_dx_context, |
| 140 | vmw_res_cotable, |
| 141 | vmw_res_view, |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 142 | vmw_res_max |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 143 | }; |
| 144 | |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 145 | /* |
| 146 | * Resources that are managed using command streams. |
| 147 | */ |
| 148 | enum vmw_cmdbuf_res_type { |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 149 | vmw_cmdbuf_res_shader, |
| 150 | vmw_cmdbuf_res_view |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 151 | }; |
| 152 | |
| 153 | struct vmw_cmdbuf_res_manager; |
| 154 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 155 | struct vmw_cursor_snooper { |
| 156 | struct drm_crtc *crtc; |
| 157 | size_t age; |
| 158 | uint32_t *image; |
| 159 | }; |
| 160 | |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 161 | struct vmw_framebuffer; |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 162 | struct vmw_surface_offset; |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 163 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 164 | struct vmw_surface { |
| 165 | struct vmw_resource res; |
| 166 | uint32_t flags; |
| 167 | uint32_t format; |
| 168 | uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 169 | struct drm_vmw_size base_size; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 170 | struct drm_vmw_size *sizes; |
| 171 | uint32_t num_sizes; |
Jakob Bornecrantz | 5ffdb65 | 2010-01-30 03:38:08 +0000 | [diff] [blame] | 172 | bool scanout; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 173 | uint32_t array_size; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 174 | /* TODO so far just a extra pointer */ |
| 175 | struct vmw_cursor_snooper snooper; |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 176 | struct vmw_surface_offset *offsets; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 177 | SVGA3dTextureFilter autogen_filter; |
| 178 | uint32_t multisample_count; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 179 | struct list_head view_list; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 180 | }; |
| 181 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 182 | struct vmw_marker_queue { |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 183 | struct list_head head; |
Thomas Gleixner | f166e6d | 2014-07-16 21:05:07 +0000 | [diff] [blame] | 184 | u64 lag; |
| 185 | u64 lag_time; |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 186 | spinlock_t lock; |
| 187 | }; |
| 188 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 189 | struct vmw_fifo_state { |
| 190 | unsigned long reserved_size; |
Thomas Hellstrom | b9eb1a6 | 2015-04-02 02:39:45 -0700 | [diff] [blame] | 191 | u32 *dynamic_buffer; |
| 192 | u32 *static_buffer; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 193 | unsigned long static_buffer_size; |
| 194 | bool using_bounce_buffer; |
| 195 | uint32_t capabilities; |
Thomas Hellstrom | 85b9e48 | 2010-02-08 09:57:25 +0000 | [diff] [blame] | 196 | struct mutex fifo_mutex; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 197 | struct rw_semaphore rwsem; |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 198 | struct vmw_marker_queue marker_queue; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 199 | bool dx; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 200 | }; |
| 201 | |
| 202 | struct vmw_relocation { |
Thomas Hellstrom | ddcda24 | 2012-11-21 11:26:55 +0100 | [diff] [blame] | 203 | SVGAMobId *mob_loc; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 204 | SVGAGuestPtr *location; |
| 205 | uint32_t index; |
| 206 | }; |
| 207 | |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 208 | /** |
| 209 | * struct vmw_res_cache_entry - resource information cache entry |
| 210 | * |
| 211 | * @valid: Whether the entry is valid, which also implies that the execbuf |
| 212 | * code holds a reference to the resource, and it's placed on the |
| 213 | * validation list. |
| 214 | * @handle: User-space handle of a resource. |
| 215 | * @res: Non-ref-counted pointer to the resource. |
| 216 | * |
| 217 | * Used to avoid frequent repeated user-space handle lookups of the |
| 218 | * same resource. |
| 219 | */ |
| 220 | struct vmw_res_cache_entry { |
| 221 | bool valid; |
| 222 | uint32_t handle; |
| 223 | struct vmw_resource *res; |
| 224 | struct vmw_resource_val_node *node; |
| 225 | }; |
| 226 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 227 | /** |
| 228 | * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. |
| 229 | */ |
| 230 | enum vmw_dma_map_mode { |
| 231 | vmw_dma_phys, /* Use physical page addresses */ |
| 232 | vmw_dma_alloc_coherent, /* Use TTM coherent pages */ |
| 233 | vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ |
| 234 | vmw_dma_map_bind, /* Unmap from DMA just before unbind */ |
| 235 | vmw_dma_map_max |
| 236 | }; |
| 237 | |
| 238 | /** |
| 239 | * struct vmw_sg_table - Scatter/gather table for binding, with additional |
| 240 | * device-specific information. |
| 241 | * |
| 242 | * @sgt: Pointer to a struct sg_table with binding information |
Masahiro Yamada | e1c0506 | 2015-07-07 10:14:59 +0900 | [diff] [blame] | 243 | * @num_regions: Number of regions with device-address contiguous pages |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 244 | */ |
| 245 | struct vmw_sg_table { |
| 246 | enum vmw_dma_map_mode mode; |
| 247 | struct page **pages; |
| 248 | const dma_addr_t *addrs; |
| 249 | struct sg_table *sgt; |
| 250 | unsigned long num_regions; |
| 251 | unsigned long num_pages; |
| 252 | }; |
| 253 | |
| 254 | /** |
| 255 | * struct vmw_piter - Page iterator that iterates over a list of pages |
| 256 | * and DMA addresses that could be either a scatter-gather list or |
| 257 | * arrays |
| 258 | * |
| 259 | * @pages: Array of page pointers to the pages. |
| 260 | * @addrs: DMA addresses to the pages if coherent pages are used. |
| 261 | * @iter: Scatter-gather page iterator. Current position in SG list. |
| 262 | * @i: Current position in arrays. |
| 263 | * @num_pages: Number of pages total. |
| 264 | * @next: Function to advance the iterator. Returns false if past the list |
| 265 | * of pages, true otherwise. |
| 266 | * @dma_address: Function to return the DMA address of the current page. |
| 267 | */ |
| 268 | struct vmw_piter { |
| 269 | struct page **pages; |
| 270 | const dma_addr_t *addrs; |
| 271 | struct sg_page_iter iter; |
| 272 | unsigned long i; |
| 273 | unsigned long num_pages; |
| 274 | bool (*next)(struct vmw_piter *); |
| 275 | dma_addr_t (*dma_address)(struct vmw_piter *); |
| 276 | struct page *(*page)(struct vmw_piter *); |
| 277 | }; |
| 278 | |
Thomas Hellstrom | b5c3b1a6 | 2013-10-08 02:27:17 -0700 | [diff] [blame] | 279 | /* |
Sinclair Yeh | c8261a9 | 2015-06-26 01:23:42 -0700 | [diff] [blame] | 280 | * enum vmw_display_unit_type - Describes the display unit |
Thomas Hellstrom | b5c3b1a6 | 2013-10-08 02:27:17 -0700 | [diff] [blame] | 281 | */ |
Sinclair Yeh | c8261a9 | 2015-06-26 01:23:42 -0700 | [diff] [blame] | 282 | enum vmw_display_unit_type { |
| 283 | vmw_du_invalid = 0, |
| 284 | vmw_du_legacy, |
Sinclair Yeh | 35c0512 | 2015-06-26 01:42:06 -0700 | [diff] [blame] | 285 | vmw_du_screen_object, |
| 286 | vmw_du_screen_target |
Thomas Hellstrom | b5c3b1a6 | 2013-10-08 02:27:17 -0700 | [diff] [blame] | 287 | }; |
| 288 | |
Thomas Hellstrom | b5c3b1a6 | 2013-10-08 02:27:17 -0700 | [diff] [blame] | 289 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 290 | struct vmw_sw_context{ |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 291 | struct drm_open_hash res_ht; |
| 292 | bool res_ht_initialized; |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 293 | bool kernel; /**< is the called made from the kernel */ |
Thomas Hellstrom | d5bde95 | 2014-01-31 10:12:10 +0100 | [diff] [blame] | 294 | struct vmw_fpriv *fp; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 295 | struct list_head validate_nodes; |
| 296 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; |
| 297 | uint32_t cur_reloc; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 298 | struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 299 | uint32_t cur_val_buf; |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 300 | uint32_t *cmd_bounce; |
| 301 | uint32_t cmd_bounce_size; |
Thomas Hellstrom | f18c884 | 2011-10-04 20:13:31 +0200 | [diff] [blame] | 302 | struct list_head resource_list; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 303 | struct list_head ctx_resource_list; /* For contexts and cotables */ |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 304 | struct vmw_dma_buffer *cur_query_bo; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 305 | struct list_head res_relocations; |
| 306 | uint32_t *buf_start; |
| 307 | struct vmw_res_cache_entry res_cache[vmw_res_max]; |
| 308 | struct vmw_resource *last_query_ctx; |
| 309 | bool needs_post_query_barrier; |
| 310 | struct vmw_resource *error_resource; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 311 | struct vmw_ctx_binding_state *staged_bindings; |
| 312 | bool staged_bindings_inuse; |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 313 | struct list_head staged_cmd_res; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 314 | struct vmw_resource_val_node *dx_ctx_node; |
| 315 | struct vmw_dma_buffer *dx_query_mob; |
| 316 | struct vmw_resource *dx_query_ctx; |
| 317 | struct vmw_cmdbuf_res_manager *man; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 318 | }; |
| 319 | |
| 320 | struct vmw_legacy_display; |
| 321 | struct vmw_overlay; |
| 322 | |
| 323 | struct vmw_master { |
| 324 | struct ttm_lock lock; |
| 325 | }; |
| 326 | |
Thomas Hellstrom | 7c4f778 | 2010-06-01 11:38:17 +0200 | [diff] [blame] | 327 | struct vmw_vga_topology_state { |
| 328 | uint32_t width; |
| 329 | uint32_t height; |
| 330 | uint32_t primary; |
| 331 | uint32_t pos_x; |
| 332 | uint32_t pos_y; |
| 333 | }; |
| 334 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 335 | |
| 336 | /* |
| 337 | * struct vmw_otable - Guest Memory OBject table metadata |
| 338 | * |
| 339 | * @size: Size of the table (page-aligned). |
| 340 | * @page_table: Pointer to a struct vmw_mob holding the page table. |
| 341 | */ |
| 342 | struct vmw_otable { |
| 343 | unsigned long size; |
| 344 | struct vmw_mob *page_table; |
| 345 | bool enabled; |
| 346 | }; |
| 347 | |
| 348 | struct vmw_otable_batch { |
| 349 | unsigned num_otables; |
| 350 | struct vmw_otable *otables; |
| 351 | struct vmw_resource *context; |
| 352 | struct ttm_buffer_object *otable_bo; |
| 353 | }; |
| 354 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 355 | struct vmw_private { |
| 356 | struct ttm_bo_device bdev; |
| 357 | struct ttm_bo_global_ref bo_global_ref; |
Dave Airlie | ba4420c | 2010-03-09 10:56:52 +1000 | [diff] [blame] | 358 | struct drm_global_reference mem_global_ref; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 359 | |
| 360 | struct vmw_fifo_state fifo; |
| 361 | |
| 362 | struct drm_device *dev; |
| 363 | unsigned long vmw_chipset; |
| 364 | unsigned int io_start; |
| 365 | uint32_t vram_start; |
| 366 | uint32_t vram_size; |
Thomas Hellstrom | bc2d650 | 2012-11-21 10:32:36 +0100 | [diff] [blame] | 367 | uint32_t prim_bb_mem; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 368 | uint32_t mmio_start; |
| 369 | uint32_t mmio_size; |
| 370 | uint32_t fb_max_width; |
| 371 | uint32_t fb_max_height; |
Sinclair Yeh | 35c0512 | 2015-06-26 01:42:06 -0700 | [diff] [blame] | 372 | uint32_t texture_max_width; |
| 373 | uint32_t texture_max_height; |
| 374 | uint32_t stdu_max_width; |
| 375 | uint32_t stdu_max_height; |
Jakob Bornecrantz | eb4f923 | 2012-02-09 16:56:46 +0100 | [diff] [blame] | 376 | uint32_t initial_width; |
| 377 | uint32_t initial_height; |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 378 | u32 *mmio_virt; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 379 | uint32_t capabilities; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 380 | uint32_t max_gmr_ids; |
Thomas Hellstrom | fb17f18 | 2011-08-31 07:42:53 +0000 | [diff] [blame] | 381 | uint32_t max_gmr_pages; |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 382 | uint32_t max_mob_pages; |
Charmaine Lee | 857aea1 | 2014-02-12 12:07:38 +0100 | [diff] [blame] | 383 | uint32_t max_mob_size; |
Thomas Hellstrom | fb17f18 | 2011-08-31 07:42:53 +0000 | [diff] [blame] | 384 | uint32_t memory_size; |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 385 | bool has_gmr; |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 386 | bool has_mob; |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 387 | spinlock_t hw_lock; |
| 388 | spinlock_t cap_lock; |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 389 | bool has_dx; |
Sinclair Yeh | 04319d8 | 2016-06-29 12:15:48 -0700 | [diff] [blame] | 390 | bool assume_16bpp; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 391 | |
| 392 | /* |
| 393 | * VGA registers. |
| 394 | */ |
| 395 | |
Thomas Hellstrom | 7c4f778 | 2010-06-01 11:38:17 +0200 | [diff] [blame] | 396 | struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 397 | uint32_t vga_width; |
| 398 | uint32_t vga_height; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 399 | uint32_t vga_bpp; |
Thomas Hellstrom | 7c4f778 | 2010-06-01 11:38:17 +0200 | [diff] [blame] | 400 | uint32_t vga_bpl; |
Jakob Bornecrantz | d7e1958 | 2010-05-28 11:21:59 +0200 | [diff] [blame] | 401 | uint32_t vga_pitchlock; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 402 | |
Thomas Hellstrom | 7c4f778 | 2010-06-01 11:38:17 +0200 | [diff] [blame] | 403 | uint32_t num_displays; |
| 404 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 405 | /* |
| 406 | * Framebuffer info. |
| 407 | */ |
| 408 | |
| 409 | void *fb_info; |
Sinclair Yeh | c8261a9 | 2015-06-26 01:23:42 -0700 | [diff] [blame] | 410 | enum vmw_display_unit_type active_display_unit; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 411 | struct vmw_legacy_display *ldu_priv; |
| 412 | struct vmw_overlay *overlay_priv; |
Thomas Hellstrom | 578e609 | 2016-02-12 09:45:42 +0100 | [diff] [blame] | 413 | struct drm_property *hotplug_mode_update_property; |
Thomas Hellstrom | 76404ac | 2016-02-12 09:55:45 +0100 | [diff] [blame] | 414 | struct drm_property *implicit_placement_property; |
Thomas Hellstrom | 75c0685 | 2016-02-12 09:00:26 +0100 | [diff] [blame] | 415 | unsigned num_implicit; |
| 416 | struct vmw_framebuffer *implicit_fb; |
Thomas Hellstrom | 93cd168 | 2016-05-03 11:24:35 +0200 | [diff] [blame] | 417 | struct mutex global_kms_state_mutex; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 418 | |
| 419 | /* |
| 420 | * Context and surface management. |
| 421 | */ |
| 422 | |
| 423 | rwlock_t resource_lock; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 424 | struct idr res_idr[vmw_res_max]; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 425 | /* |
| 426 | * Block lastclose from racing with firstopen. |
| 427 | */ |
| 428 | |
| 429 | struct mutex init_mutex; |
| 430 | |
| 431 | /* |
| 432 | * A resource manager for kernel-only surfaces and |
| 433 | * contexts. |
| 434 | */ |
| 435 | |
| 436 | struct ttm_object_device *tdev; |
| 437 | |
| 438 | /* |
| 439 | * Fencing and IRQs. |
| 440 | */ |
| 441 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 442 | atomic_t marker_seq; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 443 | wait_queue_head_t fence_queue; |
| 444 | wait_queue_head_t fifo_queue; |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 445 | spinlock_t waiter_lock; |
| 446 | int fence_queue_waiters; /* Protected by waiter_lock */ |
| 447 | int goal_queue_waiters; /* Protected by waiter_lock */ |
Thomas Hellstrom | d2e8851 | 2015-10-28 19:07:35 +0100 | [diff] [blame] | 448 | int cmdbuf_waiters; /* Protected by waiter_lock */ |
| 449 | int error_waiters; /* Protected by waiter_lock */ |
| 450 | int fifo_queue_waiters; /* Protected by waiter_lock */ |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 451 | uint32_t last_read_seqno; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 452 | struct vmw_fence_manager *fman; |
Thomas Hellstrom | d2e8851 | 2015-10-28 19:07:35 +0100 | [diff] [blame] | 453 | uint32_t irq_mask; /* Updates protected by waiter_lock */ |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 454 | |
| 455 | /* |
| 456 | * Device state |
| 457 | */ |
| 458 | |
| 459 | uint32_t traces_state; |
| 460 | uint32_t enable_state; |
| 461 | uint32_t config_done_state; |
| 462 | |
| 463 | /** |
| 464 | * Execbuf |
| 465 | */ |
| 466 | /** |
| 467 | * Protected by the cmdbuf mutex. |
| 468 | */ |
| 469 | |
| 470 | struct vmw_sw_context ctx; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 471 | struct mutex cmdbuf_mutex; |
Thomas Hellstrom | 173fb7d | 2013-10-08 02:32:36 -0700 | [diff] [blame] | 472 | struct mutex binding_mutex; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 473 | |
| 474 | /** |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 475 | * Operating mode. |
| 476 | */ |
| 477 | |
| 478 | bool stealth; |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 479 | bool enable_fb; |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 480 | spinlock_t svga_lock; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 481 | |
| 482 | /** |
| 483 | * Master management. |
| 484 | */ |
| 485 | |
| 486 | struct vmw_master *active_master; |
| 487 | struct vmw_master fbdev_master; |
Thomas Hellstrom | d9f36a0 | 2010-01-13 22:28:43 +0100 | [diff] [blame] | 488 | struct notifier_block pm_nb; |
Thomas Hellstrom | 094e0fa | 2010-10-05 12:43:00 +0200 | [diff] [blame] | 489 | bool suspended; |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 490 | bool refuse_hibernation; |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 491 | |
| 492 | struct mutex release_mutex; |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 493 | atomic_t num_fifo_resources; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 494 | |
| 495 | /* |
Thomas Hellstrom | 294adf7 | 2014-02-27 12:34:51 +0100 | [diff] [blame] | 496 | * Replace this with an rwsem as soon as we have down_xx_interruptible() |
| 497 | */ |
| 498 | struct ttm_lock reservation_sem; |
| 499 | |
| 500 | /* |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 501 | * Query processing. These members |
| 502 | * are protected by the cmdbuf mutex. |
| 503 | */ |
| 504 | |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 505 | struct vmw_dma_buffer *dummy_query_bo; |
| 506 | struct vmw_dma_buffer *pinned_bo; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 507 | uint32_t query_cid; |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 508 | uint32_t query_cid_valid; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 509 | bool dummy_query_bo_pinned; |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 510 | |
| 511 | /* |
| 512 | * Surface swapping. The "surface_lru" list is protected by the |
| 513 | * resource lock in order to be able to destroy a surface and take |
| 514 | * it off the lru atomically. "used_memory_size" is currently |
| 515 | * protected by the cmdbuf mutex for simplicity. |
| 516 | */ |
| 517 | |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 518 | struct list_head res_lru[vmw_res_max]; |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 519 | uint32_t used_memory_size; |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 520 | |
| 521 | /* |
| 522 | * DMA mapping stuff. |
| 523 | */ |
| 524 | enum vmw_dma_map_mode map_mode; |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 525 | |
| 526 | /* |
| 527 | * Guest Backed stuff |
| 528 | */ |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 529 | struct vmw_otable_batch otable_batch; |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 530 | |
| 531 | struct vmw_cmdbuf_man *cman; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 532 | }; |
| 533 | |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 534 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) |
| 535 | { |
| 536 | return container_of(res, struct vmw_surface, res); |
| 537 | } |
| 538 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 539 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
| 540 | { |
| 541 | return (struct vmw_private *)dev->dev_private; |
| 542 | } |
| 543 | |
| 544 | static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) |
| 545 | { |
| 546 | return (struct vmw_fpriv *)file_priv->driver_priv; |
| 547 | } |
| 548 | |
| 549 | static inline struct vmw_master *vmw_master(struct drm_master *master) |
| 550 | { |
| 551 | return (struct vmw_master *) master->driver_priv; |
| 552 | } |
| 553 | |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 554 | /* |
| 555 | * The locking here is fine-grained, so that it is performed once |
| 556 | * for every read- and write operation. This is of course costly, but we |
| 557 | * don't perform much register access in the timing critical paths anyway. |
| 558 | * Instead we have the extra benefit of being sure that we don't forget |
| 559 | * the hw lock around register accesses. |
| 560 | */ |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 561 | static inline void vmw_write(struct vmw_private *dev_priv, |
| 562 | unsigned int offset, uint32_t value) |
| 563 | { |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 564 | unsigned long irq_flags; |
| 565 | |
| 566 | spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 567 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); |
| 568 | outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 569 | spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 570 | } |
| 571 | |
| 572 | static inline uint32_t vmw_read(struct vmw_private *dev_priv, |
| 573 | unsigned int offset) |
| 574 | { |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 575 | unsigned long irq_flags; |
| 576 | u32 val; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 577 | |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 578 | spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 579 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); |
| 580 | val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 581 | spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); |
| 582 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 583 | return val; |
| 584 | } |
| 585 | |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 586 | extern void vmw_svga_enable(struct vmw_private *dev_priv); |
| 587 | extern void vmw_svga_disable(struct vmw_private *dev_priv); |
| 588 | |
Thomas Hellstrom | 30c78bb | 2010-10-01 10:21:48 +0200 | [diff] [blame] | 589 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 590 | /** |
| 591 | * GMR utilities - vmwgfx_gmr.c |
| 592 | */ |
| 593 | |
| 594 | extern int vmw_gmr_bind(struct vmw_private *dev_priv, |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 595 | const struct vmw_sg_table *vsgt, |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 596 | unsigned long num_pages, |
| 597 | int gmr_id); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 598 | extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); |
| 599 | |
| 600 | /** |
| 601 | * Resource utilities - vmwgfx_resource.c |
| 602 | */ |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 603 | struct vmw_user_resource_conv; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 604 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 605 | extern void vmw_resource_unreference(struct vmw_resource **p_res); |
| 606 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); |
Thomas Hellstrom | 30f82d81 | 2014-02-05 08:13:56 +0100 | [diff] [blame] | 607 | extern struct vmw_resource * |
| 608 | vmw_resource_reference_unless_doomed(struct vmw_resource *res); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 609 | extern int vmw_resource_validate(struct vmw_resource *res); |
Thomas Hellstrom | 1a4b172 | 2015-06-26 02:03:53 -0700 | [diff] [blame] | 610 | extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, |
| 611 | bool no_backup); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 612 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); |
Jakob Bornecrantz | 551a669 | 2011-11-28 13:19:11 +0100 | [diff] [blame] | 613 | extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, |
| 614 | struct ttm_object_file *tfile, |
| 615 | uint32_t handle, |
| 616 | struct vmw_surface **out_surf, |
| 617 | struct vmw_dma_buffer **out_buf); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 618 | extern int vmw_user_resource_lookup_handle( |
| 619 | struct vmw_private *dev_priv, |
| 620 | struct ttm_object_file *tfile, |
| 621 | uint32_t handle, |
| 622 | const struct vmw_user_resource_conv *converter, |
| 623 | struct vmw_resource **p_res); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 624 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); |
| 625 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, |
| 626 | struct vmw_dma_buffer *vmw_bo, |
| 627 | size_t size, struct ttm_placement *placement, |
| 628 | bool interuptable, |
| 629 | void (*bo_free) (struct ttm_buffer_object *bo)); |
Thomas Hellstrom | d08a9b9 | 2012-11-21 16:04:18 +0100 | [diff] [blame] | 630 | extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, |
| 631 | struct ttm_object_file *tfile); |
Thomas Hellstrom | a97e219 | 2012-11-21 11:45:13 +0100 | [diff] [blame] | 632 | extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, |
| 633 | struct ttm_object_file *tfile, |
| 634 | uint32_t size, |
| 635 | bool shareable, |
| 636 | uint32_t *handle, |
Thomas Hellstrom | 54c12bc | 2015-09-14 01:13:11 -0700 | [diff] [blame] | 637 | struct vmw_dma_buffer **p_dma_buf, |
| 638 | struct ttm_base_object **p_base); |
Thomas Hellstrom | a97e219 | 2012-11-21 11:45:13 +0100 | [diff] [blame] | 639 | extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
| 640 | struct vmw_dma_buffer *dma_buf, |
| 641 | uint32_t *handle); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 642 | extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
| 643 | struct drm_file *file_priv); |
| 644 | extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
| 645 | struct drm_file *file_priv); |
Thomas Hellstrom | 1d7a5cb | 2012-11-21 12:32:19 +0100 | [diff] [blame] | 646 | extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, |
| 647 | struct drm_file *file_priv); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 648 | extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, |
| 649 | uint32_t cur_validate_node); |
| 650 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); |
| 651 | extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
Thomas Hellstrom | 54c12bc | 2015-09-14 01:13:11 -0700 | [diff] [blame] | 652 | uint32_t id, struct vmw_dma_buffer **out, |
| 653 | struct ttm_base_object **base); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 654 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
| 655 | struct drm_file *file_priv); |
| 656 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
| 657 | struct drm_file *file_priv); |
| 658 | extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, |
| 659 | struct ttm_object_file *tfile, |
| 660 | uint32_t *inout_id, |
| 661 | struct vmw_resource **out); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 662 | extern void vmw_resource_unreserve(struct vmw_resource *res, |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 663 | bool switch_backup, |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 664 | struct vmw_dma_buffer *new_backup, |
| 665 | unsigned long new_backup_offset); |
| 666 | extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, |
| 667 | struct ttm_mem_reg *mem); |
Sinclair Yeh | fd11a3c | 2015-08-10 10:56:15 -0700 | [diff] [blame] | 668 | extern void vmw_query_move_notify(struct ttm_buffer_object *bo, |
| 669 | struct ttm_mem_reg *mem); |
| 670 | extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 671 | extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, |
| 672 | struct vmw_fence_obj *fence); |
| 673 | extern void vmw_resource_evict_all(struct vmw_private *dev_priv); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 674 | |
Jakob Bornecrantz | d991ef0 | 2011-10-04 20:13:21 +0200 | [diff] [blame] | 675 | /** |
| 676 | * DMA buffer helper routines - vmwgfx_dmabuf.c |
| 677 | */ |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 678 | extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv, |
Jakob Bornecrantz | d991ef0 | 2011-10-04 20:13:21 +0200 | [diff] [blame] | 679 | struct vmw_dma_buffer *bo, |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 680 | struct ttm_placement *placement, |
| 681 | bool interruptible); |
| 682 | extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, |
| 683 | struct vmw_dma_buffer *buf, |
| 684 | bool interruptible); |
| 685 | extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, |
| 686 | struct vmw_dma_buffer *buf, |
| 687 | bool interruptible); |
| 688 | extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv, |
| 689 | struct vmw_dma_buffer *bo, |
| 690 | bool interruptible); |
Jakob Bornecrantz | d991ef0 | 2011-10-04 20:13:21 +0200 | [diff] [blame] | 691 | extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv, |
| 692 | struct vmw_dma_buffer *bo, |
| 693 | bool interruptible); |
Thomas Hellstrom | b37a6b9 | 2011-10-04 20:13:28 +0200 | [diff] [blame] | 694 | extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, |
| 695 | SVGAGuestPtr *ptr); |
Thomas Hellstrom | 459d0fa | 2015-06-26 00:25:37 -0700 | [diff] [blame] | 696 | extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 697 | |
| 698 | /** |
| 699 | * Misc Ioctl functionality - vmwgfx_ioctl.c |
| 700 | */ |
| 701 | |
| 702 | extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, |
| 703 | struct drm_file *file_priv); |
Thomas Hellstrom | f63f6a5 | 2011-09-01 20:18:41 +0000 | [diff] [blame] | 704 | extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, |
| 705 | struct drm_file *file_priv); |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 706 | extern int vmw_present_ioctl(struct drm_device *dev, void *data, |
| 707 | struct drm_file *file_priv); |
| 708 | extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, |
| 709 | struct drm_file *file_priv); |
Thomas Hellstrom | 5438ae8 | 2011-10-10 12:23:27 +0200 | [diff] [blame] | 710 | extern unsigned int vmw_fops_poll(struct file *filp, |
| 711 | struct poll_table_struct *wait); |
| 712 | extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, |
| 713 | size_t count, loff_t *offset); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 714 | |
| 715 | /** |
| 716 | * Fifo utilities - vmwgfx_fifo.c |
| 717 | */ |
| 718 | |
| 719 | extern int vmw_fifo_init(struct vmw_private *dev_priv, |
| 720 | struct vmw_fifo_state *fifo); |
| 721 | extern void vmw_fifo_release(struct vmw_private *dev_priv, |
| 722 | struct vmw_fifo_state *fifo); |
| 723 | extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 724 | extern void * |
| 725 | vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 726 | extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 727 | extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 728 | extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 729 | uint32_t *seqno); |
Maarten Lankhorst | 2298e80 | 2014-03-26 14:07:44 +0100 | [diff] [blame] | 730 | extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 731 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); |
Jakob Bornecrantz | 8e19a95 | 2010-01-30 03:38:06 +0000 | [diff] [blame] | 732 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); |
Jakob Bornecrantz | d7e1958 | 2010-05-28 11:21:59 +0200 | [diff] [blame] | 733 | extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 734 | extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, |
| 735 | uint32_t cid); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 736 | extern int vmw_fifo_flush(struct vmw_private *dev_priv, |
| 737 | bool interruptible); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 738 | |
| 739 | /** |
| 740 | * TTM glue - vmwgfx_ttm_glue.c |
| 741 | */ |
| 742 | |
| 743 | extern int vmw_ttm_global_init(struct vmw_private *dev_priv); |
| 744 | extern void vmw_ttm_global_release(struct vmw_private *dev_priv); |
| 745 | extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); |
| 746 | |
| 747 | /** |
| 748 | * TTM buffer object driver - vmwgfx_buffer.c |
| 749 | */ |
| 750 | |
Thomas Hellstrom | 308d17e | 2013-11-28 01:46:56 -0800 | [diff] [blame] | 751 | extern const size_t vmw_tt_size; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 752 | extern struct ttm_placement vmw_vram_placement; |
| 753 | extern struct ttm_placement vmw_vram_ne_placement; |
Thomas Hellstrom | 8ba5152 | 2010-01-16 16:05:05 +0100 | [diff] [blame] | 754 | extern struct ttm_placement vmw_vram_sys_placement; |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 755 | extern struct ttm_placement vmw_vram_gmr_placement; |
Jakob Bornecrantz | d991ef0 | 2011-10-04 20:13:21 +0200 | [diff] [blame] | 756 | extern struct ttm_placement vmw_vram_gmr_ne_placement; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 757 | extern struct ttm_placement vmw_sys_placement; |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 758 | extern struct ttm_placement vmw_sys_ne_placement; |
Jakob Bornecrantz | d991ef0 | 2011-10-04 20:13:21 +0200 | [diff] [blame] | 759 | extern struct ttm_placement vmw_evictable_placement; |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 760 | extern struct ttm_placement vmw_srf_placement; |
Thomas Hellstrom | 96c5f0d | 2012-11-21 11:19:53 +0100 | [diff] [blame] | 761 | extern struct ttm_placement vmw_mob_placement; |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 762 | extern struct ttm_placement vmw_mob_ne_placement; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 763 | extern struct ttm_bo_driver vmw_bo_driver; |
| 764 | extern int vmw_dma_quiescent(struct drm_device *dev); |
Thomas Hellstrom | 0fd53cf | 2013-10-24 13:27:38 -0700 | [diff] [blame] | 765 | extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); |
| 766 | extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo); |
| 767 | extern const struct vmw_sg_table * |
| 768 | vmw_bo_sg_table(struct ttm_buffer_object *bo); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 769 | extern void vmw_piter_start(struct vmw_piter *viter, |
| 770 | const struct vmw_sg_table *vsgt, |
| 771 | unsigned long p_offs); |
| 772 | |
| 773 | /** |
| 774 | * vmw_piter_next - Advance the iterator one page. |
| 775 | * |
| 776 | * @viter: Pointer to the iterator to advance. |
| 777 | * |
| 778 | * Returns false if past the list of pages, true otherwise. |
| 779 | */ |
| 780 | static inline bool vmw_piter_next(struct vmw_piter *viter) |
| 781 | { |
| 782 | return viter->next(viter); |
| 783 | } |
| 784 | |
| 785 | /** |
| 786 | * vmw_piter_dma_addr - Return the DMA address of the current page. |
| 787 | * |
| 788 | * @viter: Pointer to the iterator |
| 789 | * |
| 790 | * Returns the DMA address of the page pointed to by @viter. |
| 791 | */ |
| 792 | static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) |
| 793 | { |
| 794 | return viter->dma_address(viter); |
| 795 | } |
| 796 | |
| 797 | /** |
| 798 | * vmw_piter_page - Return a pointer to the current page. |
| 799 | * |
| 800 | * @viter: Pointer to the iterator |
| 801 | * |
| 802 | * Returns the DMA address of the page pointed to by @viter. |
| 803 | */ |
| 804 | static inline struct page *vmw_piter_page(struct vmw_piter *viter) |
| 805 | { |
| 806 | return viter->page(viter); |
| 807 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 808 | |
| 809 | /** |
| 810 | * Command submission - vmwgfx_execbuf.c |
| 811 | */ |
| 812 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 813 | extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, |
| 814 | struct drm_file *file_priv, size_t size); |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 815 | extern int vmw_execbuf_process(struct drm_file *file_priv, |
| 816 | struct vmw_private *dev_priv, |
| 817 | void __user *user_commands, |
| 818 | void *kernel_commands, |
| 819 | uint32_t command_size, |
| 820 | uint64_t throttle_us, |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 821 | uint32_t dx_context_handle, |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 822 | struct drm_vmw_fence_rep __user |
Jakob Bornecrantz | bb1bd2f | 2012-02-09 16:56:43 +0100 | [diff] [blame] | 823 | *user_fence_rep, |
| 824 | struct vmw_fence_obj **out_fence); |
Thomas Hellstrom | c0951b7 | 2012-11-20 12:19:35 +0000 | [diff] [blame] | 825 | extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
| 826 | struct vmw_fence_obj *fence); |
| 827 | extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame] | 828 | |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 829 | extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
| 830 | struct vmw_private *dev_priv, |
| 831 | struct vmw_fence_obj **p_fence, |
| 832 | uint32_t *p_handle); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 833 | extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, |
| 834 | struct vmw_fpriv *vmw_fp, |
| 835 | int ret, |
| 836 | struct drm_vmw_fence_rep __user |
| 837 | *user_fence_rep, |
| 838 | struct vmw_fence_obj *fence, |
| 839 | uint32_t fence_handle); |
Thomas Hellstrom | 1a4b172 | 2015-06-26 02:03:53 -0700 | [diff] [blame] | 840 | extern int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
| 841 | struct ttm_buffer_object *bo, |
| 842 | bool interruptible, |
| 843 | bool validate_as_mob); |
| 844 | |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 845 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 846 | /** |
| 847 | * IRQs and wating - vmwgfx_irq.c |
| 848 | */ |
| 849 | |
Daniel Vetter | e9f0d76 | 2013-12-11 11:34:42 +0100 | [diff] [blame] | 850 | extern irqreturn_t vmw_irq_handler(int irq, void *arg); |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 851 | extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, |
Sinclair Yeh | c8261a9 | 2015-06-26 01:23:42 -0700 | [diff] [blame] | 852 | uint32_t seqno, bool interruptible, |
| 853 | unsigned long timeout); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 854 | extern void vmw_irq_preinstall(struct drm_device *dev); |
| 855 | extern int vmw_irq_postinstall(struct drm_device *dev); |
| 856 | extern void vmw_irq_uninstall(struct drm_device *dev); |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 857 | extern bool vmw_seqno_passed(struct vmw_private *dev_priv, |
| 858 | uint32_t seqno); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 859 | extern int vmw_fallback_wait(struct vmw_private *dev_priv, |
| 860 | bool lazy, |
| 861 | bool fifo_idle, |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 862 | uint32_t seqno, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 863 | bool interruptible, |
| 864 | unsigned long timeout); |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 865 | extern void vmw_update_seqno(struct vmw_private *dev_priv, |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 866 | struct vmw_fifo_state *fifo_state); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 867 | extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); |
| 868 | extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 869 | extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); |
| 870 | extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 871 | extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, |
| 872 | int *waiter_count); |
| 873 | extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv, |
| 874 | u32 flag, int *waiter_count); |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 875 | |
| 876 | /** |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 877 | * Rudimentary fence-like objects currently used only for throttling - |
| 878 | * vmwgfx_marker.c |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 879 | */ |
| 880 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 881 | extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); |
| 882 | extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); |
| 883 | extern int vmw_marker_push(struct vmw_marker_queue *queue, |
Sinclair Yeh | c8261a9 | 2015-06-26 01:23:42 -0700 | [diff] [blame] | 884 | uint32_t seqno); |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 885 | extern int vmw_marker_pull(struct vmw_marker_queue *queue, |
Sinclair Yeh | c8261a9 | 2015-06-26 01:23:42 -0700 | [diff] [blame] | 886 | uint32_t signaled_seqno); |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 887 | extern int vmw_wait_lag(struct vmw_private *dev_priv, |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 888 | struct vmw_marker_queue *queue, uint32_t us); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 889 | |
| 890 | /** |
| 891 | * Kernel framebuffer - vmwgfx_fb.c |
| 892 | */ |
| 893 | |
| 894 | int vmw_fb_init(struct vmw_private *vmw_priv); |
| 895 | int vmw_fb_close(struct vmw_private *dev_priv); |
| 896 | int vmw_fb_off(struct vmw_private *vmw_priv); |
| 897 | int vmw_fb_on(struct vmw_private *vmw_priv); |
| 898 | |
| 899 | /** |
| 900 | * Kernel modesetting - vmwgfx_kms.c |
| 901 | */ |
| 902 | |
| 903 | int vmw_kms_init(struct vmw_private *dev_priv); |
| 904 | int vmw_kms_close(struct vmw_private *dev_priv); |
| 905 | int vmw_kms_save_vga(struct vmw_private *vmw_priv); |
| 906 | int vmw_kms_restore_vga(struct vmw_private *vmw_priv); |
| 907 | int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, |
| 908 | struct drm_file *file_priv); |
| 909 | void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); |
| 910 | void vmw_kms_cursor_snoop(struct vmw_surface *srf, |
| 911 | struct ttm_object_file *tfile, |
| 912 | struct ttm_buffer_object *bo, |
| 913 | SVGA3dCmdHeader *header); |
Michel Dänzer | 0bef23f | 2011-08-31 07:42:50 +0000 | [diff] [blame] | 914 | int vmw_kms_write_svga(struct vmw_private *vmw_priv, |
| 915 | unsigned width, unsigned height, unsigned pitch, |
| 916 | unsigned bpp, unsigned depth); |
Thomas Hellstrom | 3a939a5 | 2010-10-05 12:43:03 +0200 | [diff] [blame] | 917 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster); |
Thomas Hellstrom | e133e73 | 2010-10-05 12:43:04 +0200 | [diff] [blame] | 918 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, |
| 919 | uint32_t pitch, |
| 920 | uint32_t height); |
Thierry Reding | 88e7271 | 2015-09-24 18:35:31 +0200 | [diff] [blame] | 921 | u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe); |
| 922 | int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe); |
| 923 | void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe); |
Jakob Bornecrantz | 2fcd5a7 | 2011-10-04 20:13:26 +0200 | [diff] [blame] | 924 | int vmw_kms_present(struct vmw_private *dev_priv, |
| 925 | struct drm_file *file_priv, |
| 926 | struct vmw_framebuffer *vfb, |
| 927 | struct vmw_surface *surface, |
| 928 | uint32_t sid, int32_t destX, int32_t destY, |
| 929 | struct drm_vmw_rect *clips, |
| 930 | uint32_t num_clips); |
Thomas Hellstrom | cd2b89e | 2011-10-25 23:35:53 +0200 | [diff] [blame] | 931 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
| 932 | struct drm_file *file_priv); |
Thomas Hellstrom | 8fbf9d9 | 2015-11-26 19:45:16 +0100 | [diff] [blame] | 933 | void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 934 | |
Dave Airlie | 5e1782d | 2012-08-28 01:53:54 +0000 | [diff] [blame] | 935 | int vmw_dumb_create(struct drm_file *file_priv, |
| 936 | struct drm_device *dev, |
| 937 | struct drm_mode_create_dumb *args); |
| 938 | |
| 939 | int vmw_dumb_map_offset(struct drm_file *file_priv, |
| 940 | struct drm_device *dev, uint32_t handle, |
| 941 | uint64_t *offset); |
| 942 | int vmw_dumb_destroy(struct drm_file *file_priv, |
| 943 | struct drm_device *dev, |
| 944 | uint32_t handle); |
Thomas Hellstrom | 1a4b172 | 2015-06-26 02:03:53 -0700 | [diff] [blame] | 945 | extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); |
Thomas Hellstrom | ed93394 | 2015-03-02 23:26:06 -0800 | [diff] [blame] | 946 | extern void vmw_resource_unpin(struct vmw_resource *res); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 947 | extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); |
Thomas Hellstrom | ed93394 | 2015-03-02 23:26:06 -0800 | [diff] [blame] | 948 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 949 | /** |
| 950 | * Overlay control - vmwgfx_overlay.c |
| 951 | */ |
| 952 | |
| 953 | int vmw_overlay_init(struct vmw_private *dev_priv); |
| 954 | int vmw_overlay_close(struct vmw_private *dev_priv); |
| 955 | int vmw_overlay_ioctl(struct drm_device *dev, void *data, |
| 956 | struct drm_file *file_priv); |
| 957 | int vmw_overlay_stop_all(struct vmw_private *dev_priv); |
| 958 | int vmw_overlay_resume_all(struct vmw_private *dev_priv); |
| 959 | int vmw_overlay_pause_all(struct vmw_private *dev_priv); |
| 960 | int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); |
| 961 | int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); |
| 962 | int vmw_overlay_num_overlays(struct vmw_private *dev_priv); |
| 963 | int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); |
| 964 | |
| 965 | /** |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 966 | * GMR Id manager |
| 967 | */ |
| 968 | |
| 969 | extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; |
| 970 | |
| 971 | /** |
Thomas Hellstrom | 69977ff | 2013-11-13 01:50:46 -0800 | [diff] [blame] | 972 | * Prime - vmwgfx_prime.c |
| 973 | */ |
| 974 | |
| 975 | extern const struct dma_buf_ops vmw_prime_dmabuf_ops; |
| 976 | extern int vmw_prime_fd_to_handle(struct drm_device *dev, |
| 977 | struct drm_file *file_priv, |
| 978 | int fd, u32 *handle); |
| 979 | extern int vmw_prime_handle_to_fd(struct drm_device *dev, |
| 980 | struct drm_file *file_priv, |
| 981 | uint32_t handle, uint32_t flags, |
| 982 | int *prime_fd); |
| 983 | |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 984 | /* |
| 985 | * MemoryOBject management - vmwgfx_mob.c |
| 986 | */ |
| 987 | struct vmw_mob; |
| 988 | extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, |
Thomas Hellstrom | 0fd53cf | 2013-10-24 13:27:38 -0700 | [diff] [blame] | 989 | const struct vmw_sg_table *vsgt, |
| 990 | unsigned long num_data_pages, int32_t mob_id); |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 991 | extern void vmw_mob_unbind(struct vmw_private *dev_priv, |
| 992 | struct vmw_mob *mob); |
| 993 | extern void vmw_mob_destroy(struct vmw_mob *mob); |
| 994 | extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); |
| 995 | extern int vmw_otables_setup(struct vmw_private *dev_priv); |
| 996 | extern void vmw_otables_takedown(struct vmw_private *dev_priv); |
Thomas Hellstrom | 69977ff | 2013-11-13 01:50:46 -0800 | [diff] [blame] | 997 | |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 998 | /* |
Thomas Hellstrom | 7086d09 | 2012-11-21 12:20:53 +0100 | [diff] [blame] | 999 | * Context management - vmwgfx_context.c |
| 1000 | */ |
| 1001 | |
| 1002 | extern const struct vmw_user_resource_conv *user_context_converter; |
| 1003 | |
Thomas Hellstrom | 7086d09 | 2012-11-21 12:20:53 +0100 | [diff] [blame] | 1004 | extern int vmw_context_check(struct vmw_private *dev_priv, |
| 1005 | struct ttm_object_file *tfile, |
| 1006 | int id, |
| 1007 | struct vmw_resource **p_res); |
| 1008 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, |
| 1009 | struct drm_file *file_priv); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1010 | extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, |
| 1011 | struct drm_file *file_priv); |
Thomas Hellstrom | 7086d09 | 2012-11-21 12:20:53 +0100 | [diff] [blame] | 1012 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, |
| 1013 | struct drm_file *file_priv); |
Thomas Hellstrom | 30f82d81 | 2014-02-05 08:13:56 +0100 | [diff] [blame] | 1014 | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 1015 | extern struct vmw_cmdbuf_res_manager * |
| 1016 | vmw_context_res_man(struct vmw_resource *ctx); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1017 | extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, |
| 1018 | SVGACOTableType cotable_type); |
| 1019 | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); |
| 1020 | struct vmw_ctx_binding_state; |
| 1021 | extern struct vmw_ctx_binding_state * |
| 1022 | vmw_context_binding_state(struct vmw_resource *ctx); |
| 1023 | extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, |
| 1024 | bool readback); |
Sinclair Yeh | fd11a3c | 2015-08-10 10:56:15 -0700 | [diff] [blame] | 1025 | extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, |
| 1026 | struct vmw_dma_buffer *mob); |
| 1027 | extern struct vmw_dma_buffer * |
| 1028 | vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); |
| 1029 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1030 | |
Thomas Hellstrom | 7086d09 | 2012-11-21 12:20:53 +0100 | [diff] [blame] | 1031 | /* |
| 1032 | * Surface management - vmwgfx_surface.c |
| 1033 | */ |
| 1034 | |
| 1035 | extern const struct vmw_user_resource_conv *user_surface_converter; |
| 1036 | |
| 1037 | extern void vmw_surface_res_free(struct vmw_resource *res); |
| 1038 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, |
| 1039 | struct drm_file *file_priv); |
| 1040 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, |
| 1041 | struct drm_file *file_priv); |
| 1042 | extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, |
| 1043 | struct drm_file *file_priv); |
| 1044 | extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, |
| 1045 | struct drm_file *file_priv); |
| 1046 | extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, |
| 1047 | struct drm_file *file_priv); |
| 1048 | extern int vmw_surface_check(struct vmw_private *dev_priv, |
| 1049 | struct ttm_object_file *tfile, |
| 1050 | uint32_t handle, int *id); |
| 1051 | extern int vmw_surface_validate(struct vmw_private *dev_priv, |
| 1052 | struct vmw_surface *srf); |
Sinclair Yeh | 233826a | 2015-03-05 01:06:13 -0800 | [diff] [blame] | 1053 | int vmw_surface_gb_priv_define(struct drm_device *dev, |
| 1054 | uint32_t user_accounting_size, |
| 1055 | uint32_t svga3d_flags, |
| 1056 | SVGA3dSurfaceFormat format, |
| 1057 | bool for_scanout, |
| 1058 | uint32_t num_mip_levels, |
| 1059 | uint32_t multisample_count, |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1060 | uint32_t array_size, |
Sinclair Yeh | 233826a | 2015-03-05 01:06:13 -0800 | [diff] [blame] | 1061 | struct drm_vmw_size size, |
| 1062 | struct vmw_surface **srf_out); |
Thomas Hellstrom | 7086d09 | 2012-11-21 12:20:53 +0100 | [diff] [blame] | 1063 | |
| 1064 | /* |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 1065 | * Shader management - vmwgfx_shader.c |
| 1066 | */ |
| 1067 | |
Thomas Hellstrom | 7086d09 | 2012-11-21 12:20:53 +0100 | [diff] [blame] | 1068 | extern const struct vmw_user_resource_conv *user_shader_converter; |
| 1069 | |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 1070 | extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, |
| 1071 | struct drm_file *file_priv); |
| 1072 | extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, |
| 1073 | struct drm_file *file_priv); |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 1074 | extern int vmw_compat_shader_add(struct vmw_private *dev_priv, |
| 1075 | struct vmw_cmdbuf_res_manager *man, |
Thomas Hellstrom | d5bde95 | 2014-01-31 10:12:10 +0100 | [diff] [blame] | 1076 | u32 user_key, const void *bytecode, |
| 1077 | SVGA3dShaderType shader_type, |
| 1078 | size_t size, |
Thomas Hellstrom | d5bde95 | 2014-01-31 10:12:10 +0100 | [diff] [blame] | 1079 | struct list_head *list); |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1080 | extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, |
| 1081 | u32 user_key, SVGA3dShaderType shader_type, |
| 1082 | struct list_head *list); |
| 1083 | extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, |
| 1084 | struct vmw_resource *ctx, |
| 1085 | u32 user_key, |
| 1086 | SVGA3dShaderType shader_type, |
| 1087 | struct list_head *list); |
| 1088 | extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, |
| 1089 | struct list_head *list, |
| 1090 | bool readback); |
| 1091 | |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 1092 | extern struct vmw_resource * |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1093 | vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, |
| 1094 | u32 user_key, SVGA3dShaderType shader_type); |
Thomas Hellstrom | 18e4a46 | 2014-06-09 12:39:22 +0200 | [diff] [blame] | 1095 | |
| 1096 | /* |
| 1097 | * Command buffer managed resources - vmwgfx_cmdbuf_res.c |
| 1098 | */ |
| 1099 | |
| 1100 | extern struct vmw_cmdbuf_res_manager * |
| 1101 | vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); |
| 1102 | extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); |
| 1103 | extern size_t vmw_cmdbuf_res_man_size(void); |
| 1104 | extern struct vmw_resource * |
| 1105 | vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, |
| 1106 | enum vmw_cmdbuf_res_type res_type, |
| 1107 | u32 user_key); |
| 1108 | extern void vmw_cmdbuf_res_revert(struct list_head *list); |
| 1109 | extern void vmw_cmdbuf_res_commit(struct list_head *list); |
| 1110 | extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, |
| 1111 | enum vmw_cmdbuf_res_type res_type, |
| 1112 | u32 user_key, |
| 1113 | struct vmw_resource *res, |
| 1114 | struct list_head *list); |
| 1115 | extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, |
| 1116 | enum vmw_cmdbuf_res_type res_type, |
| 1117 | u32 user_key, |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1118 | struct list_head *list, |
| 1119 | struct vmw_resource **res); |
Thomas Hellstrom | d5bde95 | 2014-01-31 10:12:10 +0100 | [diff] [blame] | 1120 | |
Thomas Hellstrom | d80efd5 | 2015-08-10 10:39:35 -0700 | [diff] [blame] | 1121 | /* |
| 1122 | * COTable management - vmwgfx_cotable.c |
| 1123 | */ |
| 1124 | extern const SVGACOTableType vmw_cotable_scrub_order[]; |
| 1125 | extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, |
| 1126 | struct vmw_resource *ctx, |
| 1127 | u32 type); |
| 1128 | extern int vmw_cotable_notify(struct vmw_resource *res, int id); |
| 1129 | extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback); |
| 1130 | extern void vmw_cotable_add_resource(struct vmw_resource *ctx, |
| 1131 | struct list_head *head); |
Thomas Hellstrom | c74c162 | 2012-11-21 12:10:26 +0100 | [diff] [blame] | 1132 | |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 1133 | /* |
| 1134 | * Command buffer managerment vmwgfx_cmdbuf.c |
| 1135 | */ |
| 1136 | struct vmw_cmdbuf_man; |
| 1137 | struct vmw_cmdbuf_header; |
| 1138 | |
| 1139 | extern struct vmw_cmdbuf_man * |
| 1140 | vmw_cmdbuf_man_create(struct vmw_private *dev_priv); |
| 1141 | extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, |
| 1142 | size_t size, size_t default_size); |
| 1143 | extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man); |
| 1144 | extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man); |
| 1145 | extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, |
| 1146 | unsigned long timeout); |
| 1147 | extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, |
| 1148 | int ctx_id, bool interruptible, |
| 1149 | struct vmw_cmdbuf_header *header); |
| 1150 | extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, |
| 1151 | struct vmw_cmdbuf_header *header, |
| 1152 | bool flush); |
| 1153 | extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man); |
| 1154 | extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, |
| 1155 | size_t size, bool interruptible, |
| 1156 | struct vmw_cmdbuf_header **p_header); |
| 1157 | extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header); |
| 1158 | extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, |
| 1159 | bool interruptible); |
Thomas Hellstrom | bf6f036 | 2012-11-09 12:26:15 +0000 | [diff] [blame] | 1160 | |
| 1161 | |
| 1162 | /** |
| 1163 | * Inline helper functions |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1164 | */ |
| 1165 | |
| 1166 | static inline void vmw_surface_unreference(struct vmw_surface **srf) |
| 1167 | { |
| 1168 | struct vmw_surface *tmp_srf = *srf; |
| 1169 | struct vmw_resource *res = &tmp_srf->res; |
| 1170 | *srf = NULL; |
| 1171 | |
| 1172 | vmw_resource_unreference(&res); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 1173 | } |
| 1174 | |
| 1175 | static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) |
| 1176 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1177 | (void) vmw_resource_reference(&srf->res); |
| 1178 | return srf; |
| 1179 | } |
| 1180 | |
| 1181 | static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) |
| 1182 | { |
| 1183 | struct vmw_dma_buffer *tmp_buf = *buf; |
| 1184 | |
| 1185 | *buf = NULL; |
| 1186 | if (tmp_buf != NULL) { |
| 1187 | struct ttm_buffer_object *bo = &tmp_buf->base; |
| 1188 | |
| 1189 | ttm_bo_unref(&bo); |
| 1190 | } |
| 1191 | } |
| 1192 | |
| 1193 | static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) |
| 1194 | { |
| 1195 | if (ttm_bo_reference(&buf->base)) |
| 1196 | return buf; |
| 1197 | return NULL; |
| 1198 | } |
| 1199 | |
| 1200 | static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) |
| 1201 | { |
| 1202 | return (struct ttm_mem_global *) dev_priv->mem_global_ref.object; |
| 1203 | } |
Thomas Hellstrom | 153b3d5 | 2015-06-25 10:47:43 -0700 | [diff] [blame] | 1204 | |
| 1205 | static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) |
| 1206 | { |
| 1207 | atomic_inc(&dev_priv->num_fifo_resources); |
| 1208 | } |
| 1209 | |
| 1210 | static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv) |
| 1211 | { |
| 1212 | atomic_dec(&dev_priv->num_fifo_resources); |
| 1213 | } |
Thomas Hellstrom | b76ff5e | 2015-10-28 10:44:04 +0100 | [diff] [blame] | 1214 | |
| 1215 | /** |
| 1216 | * vmw_mmio_read - Perform a MMIO read from volatile memory |
| 1217 | * |
| 1218 | * @addr: The address to read from |
| 1219 | * |
| 1220 | * This function is intended to be equivalent to ioread32() on |
| 1221 | * memremap'd memory, but without byteswapping. |
| 1222 | */ |
| 1223 | static inline u32 vmw_mmio_read(u32 *addr) |
| 1224 | { |
| 1225 | return READ_ONCE(*addr); |
| 1226 | } |
| 1227 | |
| 1228 | /** |
| 1229 | * vmw_mmio_write - Perform a MMIO write to volatile memory |
| 1230 | * |
| 1231 | * @addr: The address to write to |
| 1232 | * |
| 1233 | * This function is intended to be equivalent to iowrite32 on |
| 1234 | * memremap'd memory, but without byteswapping. |
| 1235 | */ |
| 1236 | static inline void vmw_mmio_write(u32 value, u32 *addr) |
| 1237 | { |
| 1238 | WRITE_ONCE(*addr, value); |
| 1239 | } |
Sinclair Yeh | f921791 | 2016-04-27 19:11:18 -0700 | [diff] [blame] | 1240 | |
| 1241 | /** |
| 1242 | * Add vmw_msg module function |
| 1243 | */ |
| 1244 | extern int vmw_host_log(const char *log); |
| 1245 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1246 | #endif |