blob: 4640adbcaf91b9609643895432d858313e56e2e4 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000029#include "vmwgfx_drv.h"
30
31#define VMW_FENCE_WRAP (1 << 24)
32
33irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
34{
35 struct drm_device *dev = (struct drm_device *)arg;
36 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020037 uint32_t status, masked_status;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000038
39 spin_lock(&dev_priv->irq_lock);
40 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020041 masked_status = status & dev_priv->irq_mask;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000042 spin_unlock(&dev_priv->irq_lock);
43
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020044 if (likely(status))
45 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
Thomas Hellstromae2a1042011-09-01 20:18:44 +000046
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020047 if (!masked_status)
48 return IRQ_NONE;
49
50 if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
51 SVGA_IRQFLAG_FENCE_GOAL)) {
52 vmw_fences_update(dev_priv->fman);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000053 wake_up_all(&dev_priv->fence_queue);
Thomas Hellstromae2a1042011-09-01 20:18:44 +000054 }
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020055
56 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000057 wake_up_all(&dev_priv->fifo_queue);
58
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000059
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020060 return IRQ_HANDLED;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000061}
62
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000063static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000064{
65 uint32_t busy;
66
67 mutex_lock(&dev_priv->hw_mutex);
68 busy = vmw_read(dev_priv, SVGA_REG_BUSY);
69 mutex_unlock(&dev_priv->hw_mutex);
70
71 return (busy == 0);
72}
73
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000074void vmw_update_seqno(struct vmw_private *dev_priv,
Thomas Hellstrom1925d452010-05-28 11:21:57 +020075 struct vmw_fifo_state *fifo_state)
76{
77 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000078 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
Thomas Hellstrom1925d452010-05-28 11:21:57 +020079
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000080 if (dev_priv->last_read_seqno != seqno) {
81 dev_priv->last_read_seqno = seqno;
82 vmw_marker_pull(&fifo_state->marker_queue, seqno);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020083 vmw_fences_update(dev_priv->fman);
Thomas Hellstrom1925d452010-05-28 11:21:57 +020084 }
85}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000086
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000087bool vmw_seqno_passed(struct vmw_private *dev_priv,
88 uint32_t seqno)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000089{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000090 struct vmw_fifo_state *fifo_state;
91 bool ret;
92
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000093 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000094 return true;
95
Thomas Hellstrom1925d452010-05-28 11:21:57 +020096 fifo_state = &dev_priv->fifo;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000097 vmw_update_seqno(dev_priv, fifo_state);
98 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000099 return true;
100
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000101 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000102 vmw_fifo_idle(dev_priv, seqno))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000103 return true;
104
105 /**
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000106 * Then check if the seqno is higher than what we've actually
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000107 * emitted. Then the fence is stale and signaled.
108 */
109
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000110 ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000111 > VMW_FENCE_WRAP);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000112
113 return ret;
114}
115
116int vmw_fallback_wait(struct vmw_private *dev_priv,
117 bool lazy,
118 bool fifo_idle,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000119 uint32_t seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000120 bool interruptible,
121 unsigned long timeout)
122{
123 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
124
125 uint32_t count = 0;
126 uint32_t signal_seq;
127 int ret;
128 unsigned long end_jiffies = jiffies + timeout;
129 bool (*wait_condition)(struct vmw_private *, uint32_t);
130 DEFINE_WAIT(__wait);
131
132 wait_condition = (fifo_idle) ? &vmw_fifo_idle :
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000133 &vmw_seqno_passed;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000134
135 /**
136 * Block command submission while waiting for idle.
137 */
138
139 if (fifo_idle)
140 down_read(&fifo_state->rwsem);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000141 signal_seq = atomic_read(&dev_priv->marker_seq);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000142 ret = 0;
143
144 for (;;) {
145 prepare_to_wait(&dev_priv->fence_queue, &__wait,
146 (interruptible) ?
147 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000148 if (wait_condition(dev_priv, seqno))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000149 break;
150 if (time_after_eq(jiffies, end_jiffies)) {
151 DRM_ERROR("SVGA device lockup.\n");
152 break;
153 }
154 if (lazy)
155 schedule_timeout(1);
156 else if ((++count & 0x0F) == 0) {
157 /**
158 * FIXME: Use schedule_hr_timeout here for
159 * newer kernels and lower CPU utilization.
160 */
161
162 __set_current_state(TASK_RUNNING);
163 schedule();
164 __set_current_state((interruptible) ?
165 TASK_INTERRUPTIBLE :
166 TASK_UNINTERRUPTIBLE);
167 }
168 if (interruptible && signal_pending(current)) {
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +0100169 ret = -ERESTARTSYS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000170 break;
171 }
172 }
173 finish_wait(&dev_priv->fence_queue, &__wait);
174 if (ret == 0 && fifo_idle) {
175 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
176 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
177 }
178 wake_up_all(&dev_priv->fence_queue);
179 if (fifo_idle)
180 up_read(&fifo_state->rwsem);
181
182 return ret;
183}
184
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000185void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000186{
187 mutex_lock(&dev_priv->hw_mutex);
188 if (dev_priv->fence_queue_waiters++ == 0) {
189 unsigned long irq_flags;
190
191 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
192 outl(SVGA_IRQFLAG_ANY_FENCE,
193 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200194 dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
195 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000196 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
197 }
198 mutex_unlock(&dev_priv->hw_mutex);
199}
200
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000201void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000202{
203 mutex_lock(&dev_priv->hw_mutex);
204 if (--dev_priv->fence_queue_waiters == 0) {
205 unsigned long irq_flags;
206
207 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200208 dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
209 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
210 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
211 }
212 mutex_unlock(&dev_priv->hw_mutex);
213}
214
215
216void vmw_goal_waiter_add(struct vmw_private *dev_priv)
217{
218 mutex_lock(&dev_priv->hw_mutex);
219 if (dev_priv->goal_queue_waiters++ == 0) {
220 unsigned long irq_flags;
221
222 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
223 outl(SVGA_IRQFLAG_FENCE_GOAL,
224 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
225 dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
226 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
227 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
228 }
229 mutex_unlock(&dev_priv->hw_mutex);
230}
231
232void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
233{
234 mutex_lock(&dev_priv->hw_mutex);
235 if (--dev_priv->goal_queue_waiters == 0) {
236 unsigned long irq_flags;
237
238 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
239 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
240 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000241 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
242 }
243 mutex_unlock(&dev_priv->hw_mutex);
244}
245
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000246int vmw_wait_seqno(struct vmw_private *dev_priv,
247 bool lazy, uint32_t seqno,
248 bool interruptible, unsigned long timeout)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000249{
250 long ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000251 struct vmw_fifo_state *fifo = &dev_priv->fifo;
252
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000253 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000254 return 0;
255
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000256 if (likely(vmw_seqno_passed(dev_priv, seqno)))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000257 return 0;
258
259 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
260
261 if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000262 return vmw_fallback_wait(dev_priv, lazy, true, seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000263 interruptible, timeout);
264
265 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000266 return vmw_fallback_wait(dev_priv, lazy, false, seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000267 interruptible, timeout);
268
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000269 vmw_seqno_waiter_add(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000270
271 if (interruptible)
272 ret = wait_event_interruptible_timeout
273 (dev_priv->fence_queue,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000274 vmw_seqno_passed(dev_priv, seqno),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000275 timeout);
276 else
277 ret = wait_event_timeout
278 (dev_priv->fence_queue,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000279 vmw_seqno_passed(dev_priv, seqno),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000280 timeout);
281
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000282 vmw_seqno_waiter_remove(dev_priv);
283
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +0100284 if (unlikely(ret == 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000285 ret = -EBUSY;
286 else if (likely(ret > 0))
287 ret = 0;
288
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000289 return ret;
290}
291
292void vmw_irq_preinstall(struct drm_device *dev)
293{
294 struct vmw_private *dev_priv = vmw_priv(dev);
295 uint32_t status;
296
297 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
298 return;
299
300 spin_lock_init(&dev_priv->irq_lock);
301 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
302 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
303}
304
305int vmw_irq_postinstall(struct drm_device *dev)
306{
307 return 0;
308}
309
310void vmw_irq_uninstall(struct drm_device *dev)
311{
312 struct vmw_private *dev_priv = vmw_priv(dev);
313 uint32_t status;
314
315 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
316 return;
317
318 mutex_lock(&dev_priv->hw_mutex);
319 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
320 mutex_unlock(&dev_priv->hw_mutex);
321
322 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
323 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
324}