blob: 43546d09d1b047eb13eab55c1421365b5d0cff2e [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
32#include "drmP.h"
33#include "vmwgfx_drm.h"
34#include "drm_hashtab.h"
35#include "ttm/ttm_bo_driver.h"
36#include "ttm/ttm_object.h"
37#include "ttm/ttm_lock.h"
38#include "ttm/ttm_execbuf_util.h"
39#include "ttm/ttm_module.h"
40
41#define VMWGFX_DRIVER_DATE "20090724"
42#define VMWGFX_DRIVER_MAJOR 0
43#define VMWGFX_DRIVER_MINOR 1
44#define VMWGFX_DRIVER_PATCHLEVEL 2
45#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
46#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
47#define VMWGFX_MAX_RELOCATIONS 2048
48#define VMWGFX_MAX_GMRS 2048
49
50struct vmw_fpriv {
51 struct drm_master *locked_master;
52 struct ttm_object_file *tfile;
53};
54
55struct vmw_dma_buffer {
56 struct ttm_buffer_object base;
57 struct list_head validate_list;
58 struct list_head gmr_lru;
59 uint32_t gmr_id;
60 bool gmr_bound;
61 uint32_t cur_validate_node;
62 bool on_validate_list;
63};
64
65struct vmw_resource {
66 struct kref kref;
67 struct vmw_private *dev_priv;
68 struct idr *idr;
69 int id;
70 enum ttm_object_type res_type;
71 bool avail;
72 void (*hw_destroy) (struct vmw_resource *res);
73 void (*res_free) (struct vmw_resource *res);
74
75 /* TODO is a generic snooper needed? */
76#if 0
77 void (*snoop)(struct vmw_resource *res,
78 struct ttm_object_file *tfile,
79 SVGA3dCmdHeader *header);
80 void *snoop_priv;
81#endif
82};
83
84struct vmw_cursor_snooper {
85 struct drm_crtc *crtc;
86 size_t age;
87 uint32_t *image;
88};
89
90struct vmw_surface {
91 struct vmw_resource res;
92 uint32_t flags;
93 uint32_t format;
94 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
95 struct drm_vmw_size *sizes;
96 uint32_t num_sizes;
97
98 /* TODO so far just a extra pointer */
99 struct vmw_cursor_snooper snooper;
100};
101
102struct vmw_fifo_state {
103 unsigned long reserved_size;
104 __le32 *dynamic_buffer;
105 __le32 *static_buffer;
106 __le32 *last_buffer;
107 uint32_t last_data_size;
108 uint32_t last_buffer_size;
109 bool last_buffer_add;
110 unsigned long static_buffer_size;
111 bool using_bounce_buffer;
112 uint32_t capabilities;
113 struct rw_semaphore rwsem;
114};
115
116struct vmw_relocation {
117 SVGAGuestPtr *location;
118 uint32_t index;
119};
120
121struct vmw_sw_context{
122 struct ida bo_list;
123 uint32_t last_cid;
124 bool cid_valid;
125 uint32_t last_sid;
126 bool sid_valid;
127 struct ttm_object_file *tfile;
128 struct list_head validate_nodes;
129 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
130 uint32_t cur_reloc;
131 struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
132 uint32_t cur_val_buf;
133};
134
135struct vmw_legacy_display;
136struct vmw_overlay;
137
138struct vmw_master {
139 struct ttm_lock lock;
140};
141
142struct vmw_private {
143 struct ttm_bo_device bdev;
144 struct ttm_bo_global_ref bo_global_ref;
145 struct ttm_global_reference mem_global_ref;
146
147 struct vmw_fifo_state fifo;
148
149 struct drm_device *dev;
150 unsigned long vmw_chipset;
151 unsigned int io_start;
152 uint32_t vram_start;
153 uint32_t vram_size;
154 uint32_t mmio_start;
155 uint32_t mmio_size;
156 uint32_t fb_max_width;
157 uint32_t fb_max_height;
158 __le32 __iomem *mmio_virt;
159 int mmio_mtrr;
160 uint32_t capabilities;
161 uint32_t max_gmr_descriptors;
162 uint32_t max_gmr_ids;
163 struct mutex hw_mutex;
164
165 /*
166 * VGA registers.
167 */
168
169 uint32_t vga_width;
170 uint32_t vga_height;
171 uint32_t vga_depth;
172 uint32_t vga_bpp;
173 uint32_t vga_pseudo;
174 uint32_t vga_red_mask;
175 uint32_t vga_blue_mask;
176 uint32_t vga_green_mask;
177
178 /*
179 * Framebuffer info.
180 */
181
182 void *fb_info;
183 struct vmw_legacy_display *ldu_priv;
184 struct vmw_overlay *overlay_priv;
185
186 /*
187 * Context and surface management.
188 */
189
190 rwlock_t resource_lock;
191 struct idr context_idr;
192 struct idr surface_idr;
193 struct idr stream_idr;
194
195 /*
196 * Block lastclose from racing with firstopen.
197 */
198
199 struct mutex init_mutex;
200
201 /*
202 * A resource manager for kernel-only surfaces and
203 * contexts.
204 */
205
206 struct ttm_object_device *tdev;
207
208 /*
209 * Fencing and IRQs.
210 */
211
212 uint32_t fence_seq;
213 wait_queue_head_t fence_queue;
214 wait_queue_head_t fifo_queue;
215 atomic_t fence_queue_waiters;
216 atomic_t fifo_queue_waiters;
217 uint32_t last_read_sequence;
218 spinlock_t irq_lock;
219
220 /*
221 * Device state
222 */
223
224 uint32_t traces_state;
225 uint32_t enable_state;
226 uint32_t config_done_state;
227
228 /**
229 * Execbuf
230 */
231 /**
232 * Protected by the cmdbuf mutex.
233 */
234
235 struct vmw_sw_context ctx;
236 uint32_t val_seq;
237 struct mutex cmdbuf_mutex;
238
239 /**
240 * GMR management. Protected by the lru spinlock.
241 */
242
243 struct ida gmr_ida;
244 struct list_head gmr_lru;
245
246
247 /**
248 * Operating mode.
249 */
250
251 bool stealth;
252 bool is_opened;
253
254 /**
255 * Master management.
256 */
257
258 struct vmw_master *active_master;
259 struct vmw_master fbdev_master;
260};
261
262static inline struct vmw_private *vmw_priv(struct drm_device *dev)
263{
264 return (struct vmw_private *)dev->dev_private;
265}
266
267static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
268{
269 return (struct vmw_fpriv *)file_priv->driver_priv;
270}
271
272static inline struct vmw_master *vmw_master(struct drm_master *master)
273{
274 return (struct vmw_master *) master->driver_priv;
275}
276
277static inline void vmw_write(struct vmw_private *dev_priv,
278 unsigned int offset, uint32_t value)
279{
280 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
281 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
282}
283
284static inline uint32_t vmw_read(struct vmw_private *dev_priv,
285 unsigned int offset)
286{
287 uint32_t val;
288
289 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
290 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
291 return val;
292}
293
294/**
295 * GMR utilities - vmwgfx_gmr.c
296 */
297
298extern int vmw_gmr_bind(struct vmw_private *dev_priv,
299 struct ttm_buffer_object *bo);
300extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
301
302/**
303 * Resource utilities - vmwgfx_resource.c
304 */
305
306extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
307extern void vmw_resource_unreference(struct vmw_resource **p_res);
308extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
309extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
310 struct drm_file *file_priv);
311extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
312 struct drm_file *file_priv);
313extern int vmw_context_check(struct vmw_private *dev_priv,
314 struct ttm_object_file *tfile,
315 int id);
316extern void vmw_surface_res_free(struct vmw_resource *res);
317extern int vmw_surface_init(struct vmw_private *dev_priv,
318 struct vmw_surface *srf,
319 void (*res_free) (struct vmw_resource *res));
320extern int vmw_user_surface_lookup(struct vmw_private *dev_priv,
321 struct ttm_object_file *tfile,
322 int sid, struct vmw_surface **out);
323extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
324 struct drm_file *file_priv);
325extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *file_priv);
327extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
328 struct drm_file *file_priv);
329extern int vmw_surface_check(struct vmw_private *dev_priv,
330 struct ttm_object_file *tfile,
331 int id);
332extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
333extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
334 struct vmw_dma_buffer *vmw_bo,
335 size_t size, struct ttm_placement *placement,
336 bool interuptable,
337 void (*bo_free) (struct ttm_buffer_object *bo));
338extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
339 struct drm_file *file_priv);
340extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
341 struct drm_file *file_priv);
342extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
343 uint32_t cur_validate_node);
344extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
345extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
346 uint32_t id, struct vmw_dma_buffer **out);
347extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
348extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
349extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
350extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
351 struct vmw_dma_buffer *bo);
352extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
353 struct vmw_dma_buffer *bo);
354extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
355 struct drm_file *file_priv);
356extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
357 struct drm_file *file_priv);
358extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
359 struct ttm_object_file *tfile,
360 uint32_t *inout_id,
361 struct vmw_resource **out);
362
363
364/**
365 * Misc Ioctl functionality - vmwgfx_ioctl.c
366 */
367
368extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
369 struct drm_file *file_priv);
370extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
371 struct drm_file *file_priv);
372
373/**
374 * Fifo utilities - vmwgfx_fifo.c
375 */
376
377extern int vmw_fifo_init(struct vmw_private *dev_priv,
378 struct vmw_fifo_state *fifo);
379extern void vmw_fifo_release(struct vmw_private *dev_priv,
380 struct vmw_fifo_state *fifo);
381extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
382extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
383extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
384 uint32_t *sequence);
385extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
386extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
387
388/**
389 * TTM glue - vmwgfx_ttm_glue.c
390 */
391
392extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
393extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
394extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
395
396/**
397 * TTM buffer object driver - vmwgfx_buffer.c
398 */
399
400extern struct ttm_placement vmw_vram_placement;
401extern struct ttm_placement vmw_vram_ne_placement;
402extern struct ttm_placement vmw_sys_placement;
403extern struct ttm_bo_driver vmw_bo_driver;
404extern int vmw_dma_quiescent(struct drm_device *dev);
405
406/**
407 * Command submission - vmwgfx_execbuf.c
408 */
409
410extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
411 struct drm_file *file_priv);
412
413/**
414 * IRQs and wating - vmwgfx_irq.c
415 */
416
417extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
418extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
419 uint32_t sequence, bool interruptible,
420 unsigned long timeout);
421extern void vmw_irq_preinstall(struct drm_device *dev);
422extern int vmw_irq_postinstall(struct drm_device *dev);
423extern void vmw_irq_uninstall(struct drm_device *dev);
424extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
425 uint32_t sequence);
426extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
427 struct drm_file *file_priv);
428extern int vmw_fallback_wait(struct vmw_private *dev_priv,
429 bool lazy,
430 bool fifo_idle,
431 uint32_t sequence,
432 bool interruptible,
433 unsigned long timeout);
434
435/**
436 * Kernel framebuffer - vmwgfx_fb.c
437 */
438
439int vmw_fb_init(struct vmw_private *vmw_priv);
440int vmw_fb_close(struct vmw_private *dev_priv);
441int vmw_fb_off(struct vmw_private *vmw_priv);
442int vmw_fb_on(struct vmw_private *vmw_priv);
443
444/**
445 * Kernel modesetting - vmwgfx_kms.c
446 */
447
448int vmw_kms_init(struct vmw_private *dev_priv);
449int vmw_kms_close(struct vmw_private *dev_priv);
450int vmw_kms_save_vga(struct vmw_private *vmw_priv);
451int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
452int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
453 struct drm_file *file_priv);
454void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
455void vmw_kms_cursor_snoop(struct vmw_surface *srf,
456 struct ttm_object_file *tfile,
457 struct ttm_buffer_object *bo,
458 SVGA3dCmdHeader *header);
459
460/**
461 * Overlay control - vmwgfx_overlay.c
462 */
463
464int vmw_overlay_init(struct vmw_private *dev_priv);
465int vmw_overlay_close(struct vmw_private *dev_priv);
466int vmw_overlay_ioctl(struct drm_device *dev, void *data,
467 struct drm_file *file_priv);
468int vmw_overlay_stop_all(struct vmw_private *dev_priv);
469int vmw_overlay_resume_all(struct vmw_private *dev_priv);
470int vmw_overlay_pause_all(struct vmw_private *dev_priv);
471int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
472int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
473int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
474int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
475
476/**
477 * Inline helper functions
478 */
479
480static inline void vmw_surface_unreference(struct vmw_surface **srf)
481{
482 struct vmw_surface *tmp_srf = *srf;
483 struct vmw_resource *res = &tmp_srf->res;
484 *srf = NULL;
485
486 vmw_resource_unreference(&res);
487}
488
489static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
490{
491 (void) vmw_resource_reference(&srf->res);
492 return srf;
493}
494
495static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
496{
497 struct vmw_dma_buffer *tmp_buf = *buf;
498 struct ttm_buffer_object *bo = &tmp_buf->base;
499 *buf = NULL;
500
501 ttm_bo_unref(&bo);
502}
503
504static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
505{
506 if (ttm_bo_reference(&buf->base))
507 return buf;
508 return NULL;
509}
510
511#endif