blob: 0c7e1723292c0db7a200ff1b7fd3f3f7c580c9a4 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000029#include "vmwgfx_drv.h"
30
31#define VMW_FENCE_WRAP (1 << 24)
32
Daniel Vettere9f0d762013-12-11 11:34:42 +010033irqreturn_t vmw_irq_handler(int irq, void *arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000034{
35 struct drm_device *dev = (struct drm_device *)arg;
36 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020037 uint32_t status, masked_status;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000038
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000039 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
Thomas Hellstromd2e88512015-10-28 19:07:35 +010040 masked_status = status & READ_ONCE(dev_priv->irq_mask);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000041
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020042 if (likely(status))
43 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
Thomas Hellstromae2a1042011-09-01 20:18:44 +000044
Thomas Hellstromd2e88512015-10-28 19:07:35 +010045 if (!status)
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020046 return IRQ_NONE;
47
48 if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
49 SVGA_IRQFLAG_FENCE_GOAL)) {
50 vmw_fences_update(dev_priv->fman);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000051 wake_up_all(&dev_priv->fence_queue);
Thomas Hellstromae2a1042011-09-01 20:18:44 +000052 }
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020053
54 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000055 wake_up_all(&dev_priv->fifo_queue);
56
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -070057 if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
58 SVGA_IRQFLAG_ERROR))
59 vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000060
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020061 return IRQ_HANDLED;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000062}
63
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000064static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000065{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000066
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -080067 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000068}
69
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000070void vmw_update_seqno(struct vmw_private *dev_priv,
Thomas Hellstrom1925d452010-05-28 11:21:57 +020071 struct vmw_fifo_state *fifo_state)
72{
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +010073 u32 *fifo_mem = dev_priv->mmio_virt;
74 uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
Thomas Hellstrom1925d452010-05-28 11:21:57 +020075
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000076 if (dev_priv->last_read_seqno != seqno) {
77 dev_priv->last_read_seqno = seqno;
78 vmw_marker_pull(&fifo_state->marker_queue, seqno);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020079 vmw_fences_update(dev_priv->fman);
Thomas Hellstrom1925d452010-05-28 11:21:57 +020080 }
81}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000082
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000083bool vmw_seqno_passed(struct vmw_private *dev_priv,
84 uint32_t seqno)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000085{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000086 struct vmw_fifo_state *fifo_state;
87 bool ret;
88
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000089 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000090 return true;
91
Thomas Hellstrom1925d452010-05-28 11:21:57 +020092 fifo_state = &dev_priv->fifo;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000093 vmw_update_seqno(dev_priv, fifo_state);
94 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000095 return true;
96
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000097 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +000098 vmw_fifo_idle(dev_priv, seqno))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000099 return true;
100
101 /**
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000102 * Then check if the seqno is higher than what we've actually
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000103 * emitted. Then the fence is stale and signaled.
104 */
105
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000106 ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
Thomas Hellstrom85b9e482010-02-08 09:57:25 +0000107 > VMW_FENCE_WRAP);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000108
109 return ret;
110}
111
112int vmw_fallback_wait(struct vmw_private *dev_priv,
113 bool lazy,
114 bool fifo_idle,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000115 uint32_t seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000116 bool interruptible,
117 unsigned long timeout)
118{
119 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
120
121 uint32_t count = 0;
122 uint32_t signal_seq;
123 int ret;
124 unsigned long end_jiffies = jiffies + timeout;
125 bool (*wait_condition)(struct vmw_private *, uint32_t);
126 DEFINE_WAIT(__wait);
127
128 wait_condition = (fifo_idle) ? &vmw_fifo_idle :
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000129 &vmw_seqno_passed;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000130
131 /**
132 * Block command submission while waiting for idle.
133 */
134
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700135 if (fifo_idle) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000136 down_read(&fifo_state->rwsem);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700137 if (dev_priv->cman) {
138 ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
139 10*HZ);
140 if (ret)
141 goto out_err;
142 }
143 }
144
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000145 signal_seq = atomic_read(&dev_priv->marker_seq);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000146 ret = 0;
147
148 for (;;) {
149 prepare_to_wait(&dev_priv->fence_queue, &__wait,
150 (interruptible) ?
151 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000152 if (wait_condition(dev_priv, seqno))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000153 break;
154 if (time_after_eq(jiffies, end_jiffies)) {
155 DRM_ERROR("SVGA device lockup.\n");
156 break;
157 }
158 if (lazy)
159 schedule_timeout(1);
160 else if ((++count & 0x0F) == 0) {
161 /**
162 * FIXME: Use schedule_hr_timeout here for
163 * newer kernels and lower CPU utilization.
164 */
165
166 __set_current_state(TASK_RUNNING);
167 schedule();
168 __set_current_state((interruptible) ?
169 TASK_INTERRUPTIBLE :
170 TASK_UNINTERRUPTIBLE);
171 }
172 if (interruptible && signal_pending(current)) {
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +0100173 ret = -ERESTARTSYS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000174 break;
175 }
176 }
177 finish_wait(&dev_priv->fence_queue, &__wait);
178 if (ret == 0 && fifo_idle) {
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100179 u32 *fifo_mem = dev_priv->mmio_virt;
180
181 vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000182 }
183 wake_up_all(&dev_priv->fence_queue);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700184out_err:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000185 if (fifo_idle)
186 up_read(&fifo_state->rwsem);
187
188 return ret;
189}
190
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100191void vmw_generic_waiter_add(struct vmw_private *dev_priv,
192 u32 flag, int *waiter_count)
193{
194 spin_lock_bh(&dev_priv->waiter_lock);
195 if ((*waiter_count)++ == 0) {
196 outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
197 dev_priv->irq_mask |= flag;
198 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
199 }
200 spin_unlock_bh(&dev_priv->waiter_lock);
201}
202
203void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
204 u32 flag, int *waiter_count)
205{
206 spin_lock_bh(&dev_priv->waiter_lock);
207 if (--(*waiter_count) == 0) {
208 dev_priv->irq_mask &= ~flag;
209 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
210 }
211 spin_unlock_bh(&dev_priv->waiter_lock);
212}
213
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000214void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000215{
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100216 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
217 &dev_priv->fence_queue_waiters);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000218}
219
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000220void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000221{
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100222 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
223 &dev_priv->fence_queue_waiters);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200224}
225
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200226void vmw_goal_waiter_add(struct vmw_private *dev_priv)
227{
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100228 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
229 &dev_priv->goal_queue_waiters);
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200230}
231
232void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
233{
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100234 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
235 &dev_priv->goal_queue_waiters);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000236}
237
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000238int vmw_wait_seqno(struct vmw_private *dev_priv,
239 bool lazy, uint32_t seqno,
240 bool interruptible, unsigned long timeout)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000241{
242 long ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000243 struct vmw_fifo_state *fifo = &dev_priv->fifo;
244
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000245 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000246 return 0;
247
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000248 if (likely(vmw_seqno_passed(dev_priv, seqno)))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000249 return 0;
250
251 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
252
253 if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000254 return vmw_fallback_wait(dev_priv, lazy, true, seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000255 interruptible, timeout);
256
257 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000258 return vmw_fallback_wait(dev_priv, lazy, false, seqno,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000259 interruptible, timeout);
260
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000261 vmw_seqno_waiter_add(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000262
263 if (interruptible)
264 ret = wait_event_interruptible_timeout
265 (dev_priv->fence_queue,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000266 vmw_seqno_passed(dev_priv, seqno),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000267 timeout);
268 else
269 ret = wait_event_timeout
270 (dev_priv->fence_queue,
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000271 vmw_seqno_passed(dev_priv, seqno),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000272 timeout);
273
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000274 vmw_seqno_waiter_remove(dev_priv);
275
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +0100276 if (unlikely(ret == 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000277 ret = -EBUSY;
278 else if (likely(ret > 0))
279 ret = 0;
280
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000281 return ret;
282}
283
284void vmw_irq_preinstall(struct drm_device *dev)
285{
286 struct vmw_private *dev_priv = vmw_priv(dev);
287 uint32_t status;
288
289 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
290 return;
291
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000292 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
293 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
294}
295
296int vmw_irq_postinstall(struct drm_device *dev)
297{
298 return 0;
299}
300
301void vmw_irq_uninstall(struct drm_device *dev)
302{
303 struct vmw_private *dev_priv = vmw_priv(dev);
304 uint32_t status;
305
306 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
307 return;
308
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000309 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000310
311 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
312 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
313}