blob: ac3eccd9223f3eaefb1187b03018f501af7a16a7 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000029#include "vmwgfx_drv.h"
30
31#define VMW_FENCE_WRAP (1 << 24)
32
Daniel Vettere9f0d762013-12-11 11:34:42 +010033irqreturn_t vmw_irq_handler(int irq, void *arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000034{
35 struct drm_device *dev = (struct drm_device *)arg;
36 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020037 uint32_t status, masked_status;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000038
39 spin_lock(&dev_priv->irq_lock);
40 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020041 masked_status = status & dev_priv->irq_mask;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000042 spin_unlock(&dev_priv->irq_lock);
43
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020044 if (likely(status))
45 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
Thomas Hellstromae2a1042011-09-01 20:18:44 +000046
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020047 if (!masked_status)
48 return IRQ_NONE;
49
50 if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
51 SVGA_IRQFLAG_FENCE_GOAL)) {
52 vmw_fences_update(dev_priv->fman);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000053 wake_up_all(&dev_priv->fence_queue);
Thomas Hellstromae2a1042011-09-01 20:18:44 +000054 }
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020055
56 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000057 wake_up_all(&dev_priv->fifo_queue);
58
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -070059 if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
60 SVGA_IRQFLAG_ERROR))
61 vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000062
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020063 return IRQ_HANDLED;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000064}
65
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000066static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000067{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000068
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -080069 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000070}
71
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000072void vmw_update_seqno(struct vmw_private *dev_priv,
Thomas Hellstrom1925d452010-05-28 11:21:57 +020073 struct vmw_fifo_state *fifo_state)
74{
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +010075 u32 *fifo_mem = dev_priv->mmio_virt;
76 uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
Thomas Hellstrom1925d452010-05-28 11:21:57 +020077
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000078 if (dev_priv->last_read_seqno != seqno) {
79 dev_priv->last_read_seqno = seqno;
80 vmw_marker_pull(&fifo_state->marker_queue, seqno);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020081 vmw_fences_update(dev_priv->fman);
Thomas Hellstrom1925d452010-05-28 11:21:57 +020082 }
83}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000084
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000085bool vmw_seqno_passed(struct vmw_private *dev_priv,
86 uint32_t seqno)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000087{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000088 struct vmw_fifo_state *fifo_state;
89 bool ret;
90
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000091 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000092 return true;
93
Thomas Hellstrom1925d452010-05-28 11:21:57 +020094 fifo_state = &dev_priv->fifo;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000095 vmw_update_seqno(dev_priv, fifo_state);
96 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000097 return true;
98
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000099 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000100 vmw_fifo_idle(dev_priv, seqno))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000101 return true;
102
103 /**
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000104 * Then check if the seqno is higher than what we've actually
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000105 * emitted. Then the fence is stale and signaled.
106 */
107
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000108 ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000109 > VMW_FENCE_WRAP);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000110
111 return ret;
112}
113
114int vmw_fallback_wait(struct vmw_private *dev_priv,
115 bool lazy,
116 bool fifo_idle,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000117 uint32_t seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000118 bool interruptible,
119 unsigned long timeout)
120{
121 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
122
123 uint32_t count = 0;
124 uint32_t signal_seq;
125 int ret;
126 unsigned long end_jiffies = jiffies + timeout;
127 bool (*wait_condition)(struct vmw_private *, uint32_t);
128 DEFINE_WAIT(__wait);
129
130 wait_condition = (fifo_idle) ? &vmw_fifo_idle :
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000131 &vmw_seqno_passed;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000132
133 /**
134 * Block command submission while waiting for idle.
135 */
136
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700137 if (fifo_idle) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000138 down_read(&fifo_state->rwsem);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700139 if (dev_priv->cman) {
140 ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
141 10*HZ);
142 if (ret)
143 goto out_err;
144 }
145 }
146
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000147 signal_seq = atomic_read(&dev_priv->marker_seq);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000148 ret = 0;
149
150 for (;;) {
151 prepare_to_wait(&dev_priv->fence_queue, &__wait,
152 (interruptible) ?
153 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000154 if (wait_condition(dev_priv, seqno))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000155 break;
156 if (time_after_eq(jiffies, end_jiffies)) {
157 DRM_ERROR("SVGA device lockup.\n");
158 break;
159 }
160 if (lazy)
161 schedule_timeout(1);
162 else if ((++count & 0x0F) == 0) {
163 /**
164 * FIXME: Use schedule_hr_timeout here for
165 * newer kernels and lower CPU utilization.
166 */
167
168 __set_current_state(TASK_RUNNING);
169 schedule();
170 __set_current_state((interruptible) ?
171 TASK_INTERRUPTIBLE :
172 TASK_UNINTERRUPTIBLE);
173 }
174 if (interruptible && signal_pending(current)) {
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +0100175 ret = -ERESTARTSYS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000176 break;
177 }
178 }
179 finish_wait(&dev_priv->fence_queue, &__wait);
180 if (ret == 0 && fifo_idle) {
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100181 u32 *fifo_mem = dev_priv->mmio_virt;
182
183 vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000184 }
185 wake_up_all(&dev_priv->fence_queue);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700186out_err:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000187 if (fifo_idle)
188 up_read(&fifo_state->rwsem);
189
190 return ret;
191}
192
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000193void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000194{
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800195 spin_lock(&dev_priv->waiter_lock);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000196 if (dev_priv->fence_queue_waiters++ == 0) {
197 unsigned long irq_flags;
198
199 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
200 outl(SVGA_IRQFLAG_ANY_FENCE,
201 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200202 dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
203 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000204 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
205 }
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800206 spin_unlock(&dev_priv->waiter_lock);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000207}
208
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000209void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000210{
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800211 spin_lock(&dev_priv->waiter_lock);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000212 if (--dev_priv->fence_queue_waiters == 0) {
213 unsigned long irq_flags;
214
215 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200216 dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
217 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
218 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
219 }
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800220 spin_unlock(&dev_priv->waiter_lock);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200221}
222
223
224void vmw_goal_waiter_add(struct vmw_private *dev_priv)
225{
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800226 spin_lock(&dev_priv->waiter_lock);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200227 if (dev_priv->goal_queue_waiters++ == 0) {
228 unsigned long irq_flags;
229
230 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
231 outl(SVGA_IRQFLAG_FENCE_GOAL,
232 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
233 dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
234 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
235 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
236 }
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800237 spin_unlock(&dev_priv->waiter_lock);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200238}
239
240void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
241{
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800242 spin_lock(&dev_priv->waiter_lock);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200243 if (--dev_priv->goal_queue_waiters == 0) {
244 unsigned long irq_flags;
245
246 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
247 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
248 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000249 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
250 }
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800251 spin_unlock(&dev_priv->waiter_lock);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000252}
253
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000254int vmw_wait_seqno(struct vmw_private *dev_priv,
255 bool lazy, uint32_t seqno,
256 bool interruptible, unsigned long timeout)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000257{
258 long ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000259 struct vmw_fifo_state *fifo = &dev_priv->fifo;
260
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000261 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000262 return 0;
263
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000264 if (likely(vmw_seqno_passed(dev_priv, seqno)))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000265 return 0;
266
267 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
268
269 if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000270 return vmw_fallback_wait(dev_priv, lazy, true, seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000271 interruptible, timeout);
272
273 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000274 return vmw_fallback_wait(dev_priv, lazy, false, seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000275 interruptible, timeout);
276
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000277 vmw_seqno_waiter_add(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000278
279 if (interruptible)
280 ret = wait_event_interruptible_timeout
281 (dev_priv->fence_queue,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000282 vmw_seqno_passed(dev_priv, seqno),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000283 timeout);
284 else
285 ret = wait_event_timeout
286 (dev_priv->fence_queue,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000287 vmw_seqno_passed(dev_priv, seqno),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000288 timeout);
289
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000290 vmw_seqno_waiter_remove(dev_priv);
291
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +0100292 if (unlikely(ret == 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000293 ret = -EBUSY;
294 else if (likely(ret > 0))
295 ret = 0;
296
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000297 return ret;
298}
299
300void vmw_irq_preinstall(struct drm_device *dev)
301{
302 struct vmw_private *dev_priv = vmw_priv(dev);
303 uint32_t status;
304
305 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
306 return;
307
308 spin_lock_init(&dev_priv->irq_lock);
309 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
310 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
311}
312
313int vmw_irq_postinstall(struct drm_device *dev)
314{
315 return 0;
316}
317
318void vmw_irq_uninstall(struct drm_device *dev)
319{
320 struct vmw_private *dev_priv = vmw_priv(dev);
321 uint32_t status;
322
323 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
324 return;
325
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000326 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000327
328 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
329 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
330}
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700331
332void vmw_generic_waiter_add(struct vmw_private *dev_priv,
333 u32 flag, int *waiter_count)
334{
335 unsigned long irq_flags;
336
337 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
338 if ((*waiter_count)++ == 0) {
339 outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
340 dev_priv->irq_mask |= flag;
341 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
342 }
343 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
344}
345
346void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
347 u32 flag, int *waiter_count)
348{
349 unsigned long irq_flags;
350
351 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
352 if (--(*waiter_count) == 0) {
353 dev_priv->irq_mask &= ~flag;
354 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
355 }
356 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
357}