blob: a1c68e6a689e32fd0dd4d74c805ee4afd0836a99 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/ttm/ttm_placement.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000031
Thomas Hellstromd80efd52015-08-10 10:39:35 -070032struct vmw_temp_set_context {
33 SVGA3dCmdHeader header;
34 SVGA3dCmdDXTempSetContext body;
35};
36
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +000037bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
38{
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +010039 u32 *fifo_mem = dev_priv->mmio_virt;
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +000040 uint32_t fifo_min, hwversion;
Thomas Hellstromebd4c6f2011-11-28 13:19:08 +010041 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +000042
Thomas Hellstromd8c08b22012-11-21 12:18:31 +010043 if (!(dev_priv->capabilities & SVGA_CAP_3D))
44 return false;
45
46 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
47 uint32_t result;
48
49 if (!dev_priv->has_mob)
50 return false;
51
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -080052 spin_lock(&dev_priv->cap_lock);
Thomas Hellstromd8c08b22012-11-21 12:18:31 +010053 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
54 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -080055 spin_unlock(&dev_priv->cap_lock);
Thomas Hellstromd8c08b22012-11-21 12:18:31 +010056
57 return (result != 0);
58 }
59
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +020060 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
61 return false;
62
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +010063 fifo_min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +000064 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
65 return false;
66
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +010067 hwversion = vmw_mmio_read(fifo_mem +
68 ((fifo->capabilities &
69 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
70 SVGA_FIFO_3D_HWVERSION_REVISED :
71 SVGA_FIFO_3D_HWVERSION));
Thomas Hellstromebd4c6f2011-11-28 13:19:08 +010072
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +000073 if (hwversion == 0)
74 return false;
75
Thomas Hellstromb7b70022011-10-04 20:13:23 +020076 if (hwversion < SVGA3D_HWVERSION_WS8_B1)
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +000077 return false;
78
Sinclair Yehc8261a92015-06-26 01:23:42 -070079 /* Legacy Display Unit does not support surfaces */
80 if (dev_priv->active_display_unit == vmw_du_legacy)
Jakob Bornecrantz01e81412011-10-04 20:13:24 +020081 return false;
82
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +000083 return true;
84}
85
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +020086bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
87{
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +010088 u32 *fifo_mem = dev_priv->mmio_virt;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +020089 uint32_t caps;
90
91 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
92 return false;
93
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +010094 caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +020095 if (caps & SVGA_FIFO_CAP_PITCHLOCK)
96 return true;
97
98 return false;
99}
100
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000101int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
102{
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100103 u32 *fifo_mem = dev_priv->mmio_virt;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000104 uint32_t max;
105 uint32_t min;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000106
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700107 fifo->dx = false;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000108 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
109 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
110 if (unlikely(fifo->static_buffer == NULL))
111 return -ENOMEM;
112
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000113 fifo->dynamic_buffer = NULL;
114 fifo->reserved_size = 0;
115 fifo->using_bounce_buffer = false;
116
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000117 mutex_init(&fifo->fifo_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000118 init_rwsem(&fifo->rwsem);
119
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000120 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
121 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
122 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
123
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000124 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
125 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200126 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700127
Sinclair Yeh8ce75f82015-07-08 21:20:39 -0700128 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
129 SVGA_REG_ENABLE_HIDE);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700130 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000131
132 min = 4;
133 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
134 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
135 min <<= 2;
136
137 if (min < PAGE_SIZE)
138 min = PAGE_SIZE;
139
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100140 vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
141 vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000142 wmb();
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100143 vmw_mmio_write(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
144 vmw_mmio_write(min, fifo_mem + SVGA_FIFO_STOP);
145 vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000146 mb();
147
148 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000149
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100150 max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
151 min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
152 fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000153
154 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
155 (unsigned int) max,
156 (unsigned int) min,
157 (unsigned int) fifo->capabilities);
158
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000159 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100160 vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000161 vmw_marker_queue_init(&fifo->marker_queue);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700162
163 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000164}
165
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800166void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000167{
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100168 u32 *fifo_mem = dev_priv->mmio_virt;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000169
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100170 preempt_disable();
171 if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000172 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100173 preempt_enable();
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000174}
175
176void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
177{
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100178 u32 *fifo_mem = dev_priv->mmio_virt;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000179
Thomas Hellstromf01ea0c2014-08-28 11:53:23 +0200180 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000181 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
Thomas Hellstromf01ea0c2014-08-28 11:53:23 +0200182 ;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000183
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100184 dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000185
186 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
187 dev_priv->config_done_state);
188 vmw_write(dev_priv, SVGA_REG_ENABLE,
189 dev_priv->enable_state);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200190 vmw_write(dev_priv, SVGA_REG_TRACES,
191 dev_priv->traces_state);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000192
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000193 vmw_marker_queue_takedown(&fifo->marker_queue);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000194
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000195 if (likely(fifo->static_buffer != NULL)) {
196 vfree(fifo->static_buffer);
197 fifo->static_buffer = NULL;
198 }
199
200 if (likely(fifo->dynamic_buffer != NULL)) {
201 vfree(fifo->dynamic_buffer);
202 fifo->dynamic_buffer = NULL;
203 }
204}
205
206static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
207{
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100208 u32 *fifo_mem = dev_priv->mmio_virt;
209 uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
210 uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
211 uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
212 uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000213
214 return ((max - next_cmd) + (stop - min) <= bytes);
215}
216
217static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
218 uint32_t bytes, bool interruptible,
219 unsigned long timeout)
220{
221 int ret = 0;
222 unsigned long end_jiffies = jiffies + timeout;
223 DEFINE_WAIT(__wait);
224
225 DRM_INFO("Fifo wait noirq.\n");
226
227 for (;;) {
228 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
229 (interruptible) ?
230 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
231 if (!vmw_fifo_is_full(dev_priv, bytes))
232 break;
233 if (time_after_eq(jiffies, end_jiffies)) {
234 ret = -EBUSY;
235 DRM_ERROR("SVGA device lockup.\n");
236 break;
237 }
238 schedule_timeout(1);
239 if (interruptible && signal_pending(current)) {
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +0100240 ret = -ERESTARTSYS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000241 break;
242 }
243 }
244 finish_wait(&dev_priv->fifo_queue, &__wait);
245 wake_up_all(&dev_priv->fifo_queue);
246 DRM_INFO("Fifo noirq exit.\n");
247 return ret;
248}
249
250static int vmw_fifo_wait(struct vmw_private *dev_priv,
251 uint32_t bytes, bool interruptible,
252 unsigned long timeout)
253{
254 long ret = 1L;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000255
256 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
257 return 0;
258
259 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
260 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
261 return vmw_fifo_wait_noirq(dev_priv, bytes,
262 interruptible, timeout);
263
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100264 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
265 &dev_priv->fifo_queue_waiters);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000266
267 if (interruptible)
268 ret = wait_event_interruptible_timeout
269 (dev_priv->fifo_queue,
270 !vmw_fifo_is_full(dev_priv, bytes), timeout);
271 else
272 ret = wait_event_timeout
273 (dev_priv->fifo_queue,
274 !vmw_fifo_is_full(dev_priv, bytes), timeout);
275
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +0100276 if (unlikely(ret == 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000277 ret = -EBUSY;
278 else if (likely(ret > 0))
279 ret = 0;
280
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100281 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
282 &dev_priv->fifo_queue_waiters);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000283
284 return ret;
285}
286
Jakob Bornecrantzde12d442011-10-04 20:13:13 +0200287/**
288 * Reserve @bytes number of bytes in the fifo.
289 *
290 * This function will return NULL (error) on two conditions:
291 * If it timeouts waiting for fifo space, or if @bytes is larger than the
292 * available fifo space.
293 *
294 * Returns:
295 * Pointer to the fifo, or null on error (possible hardware hang).
296 */
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700297static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
298 uint32_t bytes)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000299{
300 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100301 u32 *fifo_mem = dev_priv->mmio_virt;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000302 uint32_t max;
303 uint32_t min;
304 uint32_t next_cmd;
305 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
306 int ret;
307
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000308 mutex_lock(&fifo_state->fifo_mutex);
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100309 max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
310 min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
311 next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000312
313 if (unlikely(bytes >= (max - min)))
314 goto out_err;
315
316 BUG_ON(fifo_state->reserved_size != 0);
317 BUG_ON(fifo_state->dynamic_buffer != NULL);
318
319 fifo_state->reserved_size = bytes;
320
321 while (1) {
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100322 uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000323 bool need_bounce = false;
324 bool reserve_in_place = false;
325
326 if (next_cmd >= stop) {
327 if (likely((next_cmd + bytes < max ||
328 (next_cmd + bytes == max && stop > min))))
329 reserve_in_place = true;
330
331 else if (vmw_fifo_is_full(dev_priv, bytes)) {
332 ret = vmw_fifo_wait(dev_priv, bytes,
333 false, 3 * HZ);
334 if (unlikely(ret != 0))
335 goto out_err;
336 } else
337 need_bounce = true;
338
339 } else {
340
341 if (likely((next_cmd + bytes < stop)))
342 reserve_in_place = true;
343 else {
344 ret = vmw_fifo_wait(dev_priv, bytes,
345 false, 3 * HZ);
346 if (unlikely(ret != 0))
347 goto out_err;
348 }
349 }
350
351 if (reserve_in_place) {
352 if (reserveable || bytes <= sizeof(uint32_t)) {
353 fifo_state->using_bounce_buffer = false;
354
355 if (reserveable)
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100356 vmw_mmio_write(bytes, fifo_mem +
357 SVGA_FIFO_RESERVED);
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700358 return (void __force *) (fifo_mem +
359 (next_cmd >> 2));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000360 } else {
361 need_bounce = true;
362 }
363 }
364
365 if (need_bounce) {
366 fifo_state->using_bounce_buffer = true;
367 if (bytes < fifo_state->static_buffer_size)
368 return fifo_state->static_buffer;
369 else {
370 fifo_state->dynamic_buffer = vmalloc(bytes);
Dan Carpenter64c21af2017-04-27 12:12:08 +0300371 if (!fifo_state->dynamic_buffer)
372 goto out_err;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000373 return fifo_state->dynamic_buffer;
374 }
375 }
376 }
377out_err:
378 fifo_state->reserved_size = 0;
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000379 mutex_unlock(&fifo_state->fifo_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700380
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000381 return NULL;
382}
383
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700384void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
385 int ctx_id)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700386{
387 void *ret;
388
389 if (dev_priv->cman)
390 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700391 ctx_id, false, NULL);
392 else if (ctx_id == SVGA3D_INVALID_ID)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700393 ret = vmw_local_fifo_reserve(dev_priv, bytes);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700394 else {
Dan Carpenter99f9be42015-11-21 13:29:39 +0300395 WARN(1, "Command buffer has not been allocated.\n");
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700396 ret = NULL;
397 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700398 if (IS_ERR_OR_NULL(ret)) {
399 DRM_ERROR("Fifo reserve failure of %u bytes.\n",
400 (unsigned) bytes);
401 dump_stack();
402 return NULL;
403 }
404
405 return ret;
406}
407
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000408static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100409 u32 *fifo_mem,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000410 uint32_t next_cmd,
411 uint32_t max, uint32_t min, uint32_t bytes)
412{
413 uint32_t chunk_size = max - next_cmd;
414 uint32_t rest;
415 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
416 fifo_state->dynamic_buffer : fifo_state->static_buffer;
417
418 if (bytes < chunk_size)
419 chunk_size = bytes;
420
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100421 vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000422 mb();
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100423 memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000424 rest = bytes - chunk_size;
425 if (rest)
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100426 memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000427}
428
429static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100430 u32 *fifo_mem,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000431 uint32_t next_cmd,
432 uint32_t max, uint32_t min, uint32_t bytes)
433{
434 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
435 fifo_state->dynamic_buffer : fifo_state->static_buffer;
436
437 while (bytes > 0) {
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100438 vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000439 next_cmd += sizeof(uint32_t);
440 if (unlikely(next_cmd == max))
441 next_cmd = min;
442 mb();
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100443 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000444 mb();
445 bytes -= sizeof(uint32_t);
446 }
447}
448
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700449static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000450{
451 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100452 u32 *fifo_mem = dev_priv->mmio_virt;
453 uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
454 uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
455 uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000456 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
457
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700458 if (fifo_state->dx)
459 bytes += sizeof(struct vmw_temp_set_context);
460
461 fifo_state->dx = false;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000462 BUG_ON((bytes & 3) != 0);
463 BUG_ON(bytes > fifo_state->reserved_size);
464
465 fifo_state->reserved_size = 0;
466
467 if (fifo_state->using_bounce_buffer) {
468 if (reserveable)
469 vmw_fifo_res_copy(fifo_state, fifo_mem,
470 next_cmd, max, min, bytes);
471 else
472 vmw_fifo_slow_copy(fifo_state, fifo_mem,
473 next_cmd, max, min, bytes);
474
475 if (fifo_state->dynamic_buffer) {
476 vfree(fifo_state->dynamic_buffer);
477 fifo_state->dynamic_buffer = NULL;
478 }
479
480 }
481
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000482 down_write(&fifo_state->rwsem);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000483 if (fifo_state->using_bounce_buffer || reserveable) {
484 next_cmd += bytes;
485 if (next_cmd >= max)
486 next_cmd -= max - min;
487 mb();
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100488 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000489 }
490
491 if (reserveable)
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100492 vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000493 mb();
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000494 up_write(&fifo_state->rwsem);
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000495 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
496 mutex_unlock(&fifo_state->fifo_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000497}
498
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700499void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
500{
501 if (dev_priv->cman)
502 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
503 else
504 vmw_local_fifo_commit(dev_priv, bytes);
505}
506
507
508/**
509 * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
510 *
511 * @dev_priv: Pointer to device private structure.
512 * @bytes: Number of bytes to commit.
513 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700514void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700515{
516 if (dev_priv->cman)
517 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
518 else
519 vmw_local_fifo_commit(dev_priv, bytes);
520}
521
522/**
523 * vmw_fifo_flush - Flush any buffered commands and make sure command processing
524 * starts.
525 *
526 * @dev_priv: Pointer to device private structure.
527 * @interruptible: Whether to wait interruptible if function needs to sleep.
528 */
529int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
530{
531 might_sleep();
532
533 if (dev_priv->cman)
534 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
535 else
536 return 0;
537}
538
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000539int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000540{
541 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
542 struct svga_fifo_cmd_fence *cmd_fence;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700543 u32 *fm;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000544 int ret = 0;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700545 uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000546
547 fm = vmw_fifo_reserve(dev_priv, bytes);
548 if (unlikely(fm == NULL)) {
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000549 *seqno = atomic_read(&dev_priv->marker_seq);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000550 ret = -ENOMEM;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000551 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000552 false, 3*HZ);
553 goto out_err;
554 }
555
556 do {
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000557 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
558 } while (*seqno == 0);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000559
560 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
561
562 /*
563 * Don't request hardware to send a fence. The
564 * waiting code in vmwgfx_irq.c will emulate this.
565 */
566
567 vmw_fifo_commit(dev_priv, 0);
568 return 0;
569 }
570
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700571 *fm++ = SVGA_CMD_FENCE;
572 cmd_fence = (struct svga_fifo_cmd_fence *) fm;
573 cmd_fence->fence = *seqno;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700574 vmw_fifo_commit_flush(dev_priv, bytes);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000575 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
576 vmw_update_seqno(dev_priv, fifo_state);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000577
578out_err:
579 return ret;
580}
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200581
582/**
Thomas Hellstromddcda242012-11-21 11:26:55 +0100583 * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
584 * legacy query commands.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200585 *
586 * @dev_priv: The device private structure.
587 * @cid: The hardware context id used for the query.
588 *
Thomas Hellstromddcda242012-11-21 11:26:55 +0100589 * See the vmw_fifo_emit_dummy_query documentation.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200590 */
Thomas Hellstromddcda242012-11-21 11:26:55 +0100591static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
592 uint32_t cid)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200593{
594 /*
595 * A query wait without a preceding query end will
596 * actually finish all queries for this cid
597 * without writing to the query result structure.
598 */
599
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700600 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200601 struct {
602 SVGA3dCmdHeader header;
603 SVGA3dCmdWaitForQuery body;
604 } *cmd;
605
606 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
607
608 if (unlikely(cmd == NULL)) {
609 DRM_ERROR("Out of fifo space for dummy query.\n");
610 return -ENOMEM;
611 }
612
613 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
614 cmd->header.size = sizeof(cmd->body);
615 cmd->body.cid = cid;
616 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
617
618 if (bo->mem.mem_type == TTM_PL_VRAM) {
619 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
620 cmd->body.guestResult.offset = bo->offset;
621 } else {
622 cmd->body.guestResult.gmrId = bo->mem.start;
623 cmd->body.guestResult.offset = 0;
624 }
625
626 vmw_fifo_commit(dev_priv, sizeof(*cmd));
627
628 return 0;
629}
Thomas Hellstromddcda242012-11-21 11:26:55 +0100630
631/**
632 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
633 * guest-backed resource query commands.
634 *
635 * @dev_priv: The device private structure.
636 * @cid: The hardware context id used for the query.
637 *
638 * See the vmw_fifo_emit_dummy_query documentation.
639 */
640static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
641 uint32_t cid)
642{
643 /*
644 * A query wait without a preceding query end will
645 * actually finish all queries for this cid
646 * without writing to the query result structure.
647 */
648
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700649 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
Thomas Hellstromddcda242012-11-21 11:26:55 +0100650 struct {
651 SVGA3dCmdHeader header;
652 SVGA3dCmdWaitForGBQuery body;
653 } *cmd;
654
655 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
656
657 if (unlikely(cmd == NULL)) {
658 DRM_ERROR("Out of fifo space for dummy query.\n");
659 return -ENOMEM;
660 }
661
662 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
663 cmd->header.size = sizeof(cmd->body);
664 cmd->body.cid = cid;
665 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
666 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
667 cmd->body.mobid = bo->mem.start;
668 cmd->body.offset = 0;
669
670 vmw_fifo_commit(dev_priv, sizeof(*cmd));
671
672 return 0;
673}
674
675
676/**
677 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
678 * appropriate resource query commands.
679 *
680 * @dev_priv: The device private structure.
681 * @cid: The hardware context id used for the query.
682 *
683 * This function is used to emit a dummy occlusion query with
684 * no primitives rendered between query begin and query end.
685 * It's used to provide a query barrier, in order to know that when
686 * this query is finished, all preceding queries are also finished.
687 *
688 * A Query results structure should have been initialized at the start
689 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
690 * must also be either reserved or pinned when this function is called.
691 *
692 * Returns -ENOMEM on failure to reserve fifo space.
693 */
694int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
695 uint32_t cid)
696{
697 if (dev_priv->has_mob)
698 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
699
700 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
701}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700702
703void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
704{
705 return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
706}