Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
| 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 28 | #include <drm/drmP.h> |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 29 | #include "vmwgfx_drv.h" |
| 30 | |
| 31 | #define VMW_FENCE_WRAP (1 << 24) |
| 32 | |
Daniel Vetter | e9f0d76 | 2013-12-11 11:34:42 +0100 | [diff] [blame] | 33 | irqreturn_t vmw_irq_handler(int irq, void *arg) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 34 | { |
| 35 | struct drm_device *dev = (struct drm_device *)arg; |
| 36 | struct vmw_private *dev_priv = vmw_priv(dev); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 37 | uint32_t status, masked_status; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 38 | |
| 39 | spin_lock(&dev_priv->irq_lock); |
| 40 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 41 | masked_status = status & dev_priv->irq_mask; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 42 | spin_unlock(&dev_priv->irq_lock); |
| 43 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 44 | if (likely(status)) |
| 45 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 46 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 47 | if (!masked_status) |
| 48 | return IRQ_NONE; |
| 49 | |
| 50 | if (masked_status & (SVGA_IRQFLAG_ANY_FENCE | |
| 51 | SVGA_IRQFLAG_FENCE_GOAL)) { |
| 52 | vmw_fences_update(dev_priv->fman); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 53 | wake_up_all(&dev_priv->fence_queue); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 54 | } |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 55 | |
| 56 | if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 57 | wake_up_all(&dev_priv->fifo_queue); |
| 58 | |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 59 | if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER | |
| 60 | SVGA_IRQFLAG_ERROR)) |
| 61 | vmw_cmdbuf_tasklet_schedule(dev_priv->cman); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 62 | |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 63 | return IRQ_HANDLED; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 64 | } |
| 65 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 66 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 67 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 68 | |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 69 | return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 70 | } |
| 71 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 72 | void vmw_update_seqno(struct vmw_private *dev_priv, |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 73 | struct vmw_fifo_state *fifo_state) |
| 74 | { |
Thomas Hellstrom | b9eb1a6 | 2015-04-02 02:39:45 -0700 | [diff] [blame] | 75 | u32 __iomem *fifo_mem = dev_priv->mmio_virt; |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 76 | uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 77 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 78 | if (dev_priv->last_read_seqno != seqno) { |
| 79 | dev_priv->last_read_seqno = seqno; |
| 80 | vmw_marker_pull(&fifo_state->marker_queue, seqno); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 81 | vmw_fences_update(dev_priv->fman); |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 82 | } |
| 83 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 84 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 85 | bool vmw_seqno_passed(struct vmw_private *dev_priv, |
| 86 | uint32_t seqno) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 87 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 88 | struct vmw_fifo_state *fifo_state; |
| 89 | bool ret; |
| 90 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 91 | if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 92 | return true; |
| 93 | |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 94 | fifo_state = &dev_priv->fifo; |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 95 | vmw_update_seqno(dev_priv, fifo_state); |
| 96 | if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 97 | return true; |
| 98 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 99 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 100 | vmw_fifo_idle(dev_priv, seqno)) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 101 | return true; |
| 102 | |
| 103 | /** |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 104 | * Then check if the seqno is higher than what we've actually |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 105 | * emitted. Then the fence is stale and signaled. |
| 106 | */ |
| 107 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 108 | ret = ((atomic_read(&dev_priv->marker_seq) - seqno) |
Thomas Hellstrom | 85b9e48 | 2010-02-08 09:57:25 +0000 | [diff] [blame] | 109 | > VMW_FENCE_WRAP); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 110 | |
| 111 | return ret; |
| 112 | } |
| 113 | |
| 114 | int vmw_fallback_wait(struct vmw_private *dev_priv, |
| 115 | bool lazy, |
| 116 | bool fifo_idle, |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 117 | uint32_t seqno, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 118 | bool interruptible, |
| 119 | unsigned long timeout) |
| 120 | { |
| 121 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; |
| 122 | |
| 123 | uint32_t count = 0; |
| 124 | uint32_t signal_seq; |
| 125 | int ret; |
| 126 | unsigned long end_jiffies = jiffies + timeout; |
| 127 | bool (*wait_condition)(struct vmw_private *, uint32_t); |
| 128 | DEFINE_WAIT(__wait); |
| 129 | |
| 130 | wait_condition = (fifo_idle) ? &vmw_fifo_idle : |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 131 | &vmw_seqno_passed; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 132 | |
| 133 | /** |
| 134 | * Block command submission while waiting for idle. |
| 135 | */ |
| 136 | |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 137 | if (fifo_idle) { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 138 | down_read(&fifo_state->rwsem); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 139 | if (dev_priv->cman) { |
| 140 | ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible, |
| 141 | 10*HZ); |
| 142 | if (ret) |
| 143 | goto out_err; |
| 144 | } |
| 145 | } |
| 146 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 147 | signal_seq = atomic_read(&dev_priv->marker_seq); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 148 | ret = 0; |
| 149 | |
| 150 | for (;;) { |
| 151 | prepare_to_wait(&dev_priv->fence_queue, &__wait, |
| 152 | (interruptible) ? |
| 153 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 154 | if (wait_condition(dev_priv, seqno)) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 155 | break; |
| 156 | if (time_after_eq(jiffies, end_jiffies)) { |
| 157 | DRM_ERROR("SVGA device lockup.\n"); |
| 158 | break; |
| 159 | } |
| 160 | if (lazy) |
| 161 | schedule_timeout(1); |
| 162 | else if ((++count & 0x0F) == 0) { |
| 163 | /** |
| 164 | * FIXME: Use schedule_hr_timeout here for |
| 165 | * newer kernels and lower CPU utilization. |
| 166 | */ |
| 167 | |
| 168 | __set_current_state(TASK_RUNNING); |
| 169 | schedule(); |
| 170 | __set_current_state((interruptible) ? |
| 171 | TASK_INTERRUPTIBLE : |
| 172 | TASK_UNINTERRUPTIBLE); |
| 173 | } |
| 174 | if (interruptible && signal_pending(current)) { |
Thomas Hellstrom | 3d3a5b3 | 2009-12-08 12:59:34 +0100 | [diff] [blame] | 175 | ret = -ERESTARTSYS; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 176 | break; |
| 177 | } |
| 178 | } |
| 179 | finish_wait(&dev_priv->fence_queue, &__wait); |
| 180 | if (ret == 0 && fifo_idle) { |
Thomas Hellstrom | b9eb1a6 | 2015-04-02 02:39:45 -0700 | [diff] [blame] | 181 | u32 __iomem *fifo_mem = dev_priv->mmio_virt; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 182 | iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE); |
| 183 | } |
| 184 | wake_up_all(&dev_priv->fence_queue); |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 185 | out_err: |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 186 | if (fifo_idle) |
| 187 | up_read(&fifo_state->rwsem); |
| 188 | |
| 189 | return ret; |
| 190 | } |
| 191 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 192 | void vmw_seqno_waiter_add(struct vmw_private *dev_priv) |
Thomas Hellstrom | 4f73a96 | 2011-09-01 20:18:43 +0000 | [diff] [blame] | 193 | { |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 194 | spin_lock(&dev_priv->waiter_lock); |
Thomas Hellstrom | 4f73a96 | 2011-09-01 20:18:43 +0000 | [diff] [blame] | 195 | if (dev_priv->fence_queue_waiters++ == 0) { |
| 196 | unsigned long irq_flags; |
| 197 | |
| 198 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
| 199 | outl(SVGA_IRQFLAG_ANY_FENCE, |
| 200 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 201 | dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE; |
| 202 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
Thomas Hellstrom | 4f73a96 | 2011-09-01 20:18:43 +0000 | [diff] [blame] | 203 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
| 204 | } |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 205 | spin_unlock(&dev_priv->waiter_lock); |
Thomas Hellstrom | 4f73a96 | 2011-09-01 20:18:43 +0000 | [diff] [blame] | 206 | } |
| 207 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 208 | void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) |
Thomas Hellstrom | 4f73a96 | 2011-09-01 20:18:43 +0000 | [diff] [blame] | 209 | { |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 210 | spin_lock(&dev_priv->waiter_lock); |
Thomas Hellstrom | 4f73a96 | 2011-09-01 20:18:43 +0000 | [diff] [blame] | 211 | if (--dev_priv->fence_queue_waiters == 0) { |
| 212 | unsigned long irq_flags; |
| 213 | |
| 214 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 215 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE; |
| 216 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
| 217 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
| 218 | } |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 219 | spin_unlock(&dev_priv->waiter_lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 220 | } |
| 221 | |
| 222 | |
| 223 | void vmw_goal_waiter_add(struct vmw_private *dev_priv) |
| 224 | { |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 225 | spin_lock(&dev_priv->waiter_lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 226 | if (dev_priv->goal_queue_waiters++ == 0) { |
| 227 | unsigned long irq_flags; |
| 228 | |
| 229 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
| 230 | outl(SVGA_IRQFLAG_FENCE_GOAL, |
| 231 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
| 232 | dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL; |
| 233 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
| 234 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
| 235 | } |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 236 | spin_unlock(&dev_priv->waiter_lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | void vmw_goal_waiter_remove(struct vmw_private *dev_priv) |
| 240 | { |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 241 | spin_lock(&dev_priv->waiter_lock); |
Thomas Hellstrom | 57c5ee7 | 2011-10-10 12:23:26 +0200 | [diff] [blame] | 242 | if (--dev_priv->goal_queue_waiters == 0) { |
| 243 | unsigned long irq_flags; |
| 244 | |
| 245 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
| 246 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL; |
| 247 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
Thomas Hellstrom | 4f73a96 | 2011-09-01 20:18:43 +0000 | [diff] [blame] | 248 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
| 249 | } |
Thomas Hellstrom | 496eb6f | 2015-01-14 02:33:39 -0800 | [diff] [blame] | 250 | spin_unlock(&dev_priv->waiter_lock); |
Thomas Hellstrom | 4f73a96 | 2011-09-01 20:18:43 +0000 | [diff] [blame] | 251 | } |
| 252 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 253 | int vmw_wait_seqno(struct vmw_private *dev_priv, |
| 254 | bool lazy, uint32_t seqno, |
| 255 | bool interruptible, unsigned long timeout) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 256 | { |
| 257 | long ret; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 258 | struct vmw_fifo_state *fifo = &dev_priv->fifo; |
| 259 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 260 | if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 261 | return 0; |
| 262 | |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 263 | if (likely(vmw_seqno_passed(dev_priv, seqno))) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 264 | return 0; |
| 265 | |
| 266 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
| 267 | |
| 268 | if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 269 | return vmw_fallback_wait(dev_priv, lazy, true, seqno, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 270 | interruptible, timeout); |
| 271 | |
| 272 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 273 | return vmw_fallback_wait(dev_priv, lazy, false, seqno, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 274 | interruptible, timeout); |
| 275 | |
Thomas Hellstrom | 4f73a96 | 2011-09-01 20:18:43 +0000 | [diff] [blame] | 276 | vmw_seqno_waiter_add(dev_priv); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 277 | |
| 278 | if (interruptible) |
| 279 | ret = wait_event_interruptible_timeout |
| 280 | (dev_priv->fence_queue, |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 281 | vmw_seqno_passed(dev_priv, seqno), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 282 | timeout); |
| 283 | else |
| 284 | ret = wait_event_timeout |
| 285 | (dev_priv->fence_queue, |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 286 | vmw_seqno_passed(dev_priv, seqno), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 287 | timeout); |
| 288 | |
Thomas Hellstrom | 4f73a96 | 2011-09-01 20:18:43 +0000 | [diff] [blame] | 289 | vmw_seqno_waiter_remove(dev_priv); |
| 290 | |
Thomas Hellstrom | 3d3a5b3 | 2009-12-08 12:59:34 +0100 | [diff] [blame] | 291 | if (unlikely(ret == 0)) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 292 | ret = -EBUSY; |
| 293 | else if (likely(ret > 0)) |
| 294 | ret = 0; |
| 295 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 296 | return ret; |
| 297 | } |
| 298 | |
| 299 | void vmw_irq_preinstall(struct drm_device *dev) |
| 300 | { |
| 301 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 302 | uint32_t status; |
| 303 | |
| 304 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) |
| 305 | return; |
| 306 | |
| 307 | spin_lock_init(&dev_priv->irq_lock); |
| 308 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
| 309 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
| 310 | } |
| 311 | |
| 312 | int vmw_irq_postinstall(struct drm_device *dev) |
| 313 | { |
| 314 | return 0; |
| 315 | } |
| 316 | |
| 317 | void vmw_irq_uninstall(struct drm_device *dev) |
| 318 | { |
| 319 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 320 | uint32_t status; |
| 321 | |
| 322 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) |
| 323 | return; |
| 324 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 325 | vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 326 | |
| 327 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
| 328 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
| 329 | } |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 330 | |
| 331 | void vmw_generic_waiter_add(struct vmw_private *dev_priv, |
| 332 | u32 flag, int *waiter_count) |
| 333 | { |
| 334 | unsigned long irq_flags; |
| 335 | |
| 336 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
| 337 | if ((*waiter_count)++ == 0) { |
| 338 | outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
| 339 | dev_priv->irq_mask |= flag; |
| 340 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
| 341 | } |
| 342 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
| 343 | } |
| 344 | |
| 345 | void vmw_generic_waiter_remove(struct vmw_private *dev_priv, |
| 346 | u32 flag, int *waiter_count) |
| 347 | { |
| 348 | unsigned long irq_flags; |
| 349 | |
| 350 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
| 351 | if (--(*waiter_count) == 0) { |
| 352 | dev_priv->irq_mask &= ~flag; |
| 353 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
| 354 | } |
| 355 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
| 356 | } |