Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2008-2010 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Eric Anholt <eric@anholt.net> |
| 25 | * Zou Nan hai <nanhai.zou@intel.com> |
| 26 | * Xiang Hai hao<haihao.xiang@intel.com> |
| 27 | * |
| 28 | */ |
| 29 | |
| 30 | #include "drmP.h" |
| 31 | #include "drm.h" |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 32 | #include "i915_drv.h" |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 33 | #include "i915_drm.h" |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 34 | #include "i915_trace.h" |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 35 | |
Chris Wilson | 6f392d5 | 2010-08-07 11:01:22 +0100 | [diff] [blame] | 36 | static u32 i915_gem_get_seqno(struct drm_device *dev) |
| 37 | { |
| 38 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 39 | u32 seqno; |
| 40 | |
| 41 | seqno = dev_priv->next_seqno; |
| 42 | |
| 43 | /* reserve 0 for non-seqno */ |
| 44 | if (++dev_priv->next_seqno == 0) |
| 45 | dev_priv->next_seqno = 1; |
| 46 | |
| 47 | return seqno; |
| 48 | } |
| 49 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 50 | static void |
| 51 | render_ring_flush(struct drm_device *dev, |
| 52 | struct intel_ring_buffer *ring, |
| 53 | u32 invalidate_domains, |
| 54 | u32 flush_domains) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 55 | { |
Chris Wilson | 6f392d5 | 2010-08-07 11:01:22 +0100 | [diff] [blame] | 56 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 57 | u32 cmd; |
| 58 | |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 59 | #if WATCH_EXEC |
| 60 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, |
| 61 | invalidate_domains, flush_domains); |
| 62 | #endif |
Chris Wilson | 6f392d5 | 2010-08-07 11:01:22 +0100 | [diff] [blame] | 63 | |
| 64 | trace_i915_gem_request_flush(dev, dev_priv->next_seqno, |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 65 | invalidate_domains, flush_domains); |
| 66 | |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 67 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { |
| 68 | /* |
| 69 | * read/write caches: |
| 70 | * |
| 71 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is |
| 72 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is |
| 73 | * also flushed at 2d versus 3d pipeline switches. |
| 74 | * |
| 75 | * read-only caches: |
| 76 | * |
| 77 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if |
| 78 | * MI_READ_FLUSH is set, and is always flushed on 965. |
| 79 | * |
| 80 | * I915_GEM_DOMAIN_COMMAND may not exist? |
| 81 | * |
| 82 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is |
| 83 | * invalidated when MI_EXE_FLUSH is set. |
| 84 | * |
| 85 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is |
| 86 | * invalidated with every MI_FLUSH. |
| 87 | * |
| 88 | * TLBs: |
| 89 | * |
| 90 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND |
| 91 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and |
| 92 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER |
| 93 | * are flushed at any MI_FLUSH. |
| 94 | */ |
| 95 | |
| 96 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; |
| 97 | if ((invalidate_domains|flush_domains) & |
| 98 | I915_GEM_DOMAIN_RENDER) |
| 99 | cmd &= ~MI_NO_WRITE_FLUSH; |
| 100 | if (!IS_I965G(dev)) { |
| 101 | /* |
| 102 | * On the 965, the sampler cache always gets flushed |
| 103 | * and this bit is reserved. |
| 104 | */ |
| 105 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) |
| 106 | cmd |= MI_READ_FLUSH; |
| 107 | } |
| 108 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) |
| 109 | cmd |= MI_EXE_FLUSH; |
| 110 | |
| 111 | #if WATCH_EXEC |
| 112 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); |
| 113 | #endif |
Zou Nan hai | be26a10 | 2010-06-12 17:40:24 +0800 | [diff] [blame] | 114 | intel_ring_begin(dev, ring, 2); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 115 | intel_ring_emit(dev, ring, cmd); |
| 116 | intel_ring_emit(dev, ring, MI_NOOP); |
| 117 | intel_ring_advance(dev, ring); |
| 118 | } |
Daniel Vetter | 8a1a49f | 2010-02-11 22:29:04 +0100 | [diff] [blame^] | 119 | |
| 120 | i915_gem_process_flushing_list(dev, flush_domains, ring); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | static unsigned int render_ring_get_head(struct drm_device *dev, |
| 124 | struct intel_ring_buffer *ring) |
| 125 | { |
| 126 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 127 | return I915_READ(PRB0_HEAD) & HEAD_ADDR; |
| 128 | } |
| 129 | |
| 130 | static unsigned int render_ring_get_tail(struct drm_device *dev, |
| 131 | struct intel_ring_buffer *ring) |
| 132 | { |
| 133 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 134 | return I915_READ(PRB0_TAIL) & TAIL_ADDR; |
| 135 | } |
| 136 | |
| 137 | static unsigned int render_ring_get_active_head(struct drm_device *dev, |
| 138 | struct intel_ring_buffer *ring) |
| 139 | { |
| 140 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 141 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; |
| 142 | |
| 143 | return I915_READ(acthd_reg); |
| 144 | } |
| 145 | |
| 146 | static void render_ring_advance_ring(struct drm_device *dev, |
| 147 | struct intel_ring_buffer *ring) |
| 148 | { |
| 149 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 150 | I915_WRITE(PRB0_TAIL, ring->tail); |
| 151 | } |
| 152 | |
| 153 | static int init_ring_common(struct drm_device *dev, |
| 154 | struct intel_ring_buffer *ring) |
| 155 | { |
| 156 | u32 head; |
| 157 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 158 | struct drm_i915_gem_object *obj_priv; |
| 159 | obj_priv = to_intel_bo(ring->gem_object); |
| 160 | |
| 161 | /* Stop the ring if it's running. */ |
| 162 | I915_WRITE(ring->regs.ctl, 0); |
| 163 | I915_WRITE(ring->regs.head, 0); |
| 164 | I915_WRITE(ring->regs.tail, 0); |
| 165 | |
| 166 | /* Initialize the ring. */ |
| 167 | I915_WRITE(ring->regs.start, obj_priv->gtt_offset); |
| 168 | head = ring->get_head(dev, ring); |
| 169 | |
| 170 | /* G45 ring initialization fails to reset head to zero */ |
| 171 | if (head != 0) { |
| 172 | DRM_ERROR("%s head not reset to zero " |
| 173 | "ctl %08x head %08x tail %08x start %08x\n", |
| 174 | ring->name, |
| 175 | I915_READ(ring->regs.ctl), |
| 176 | I915_READ(ring->regs.head), |
| 177 | I915_READ(ring->regs.tail), |
| 178 | I915_READ(ring->regs.start)); |
| 179 | |
| 180 | I915_WRITE(ring->regs.head, 0); |
| 181 | |
| 182 | DRM_ERROR("%s head forced to zero " |
| 183 | "ctl %08x head %08x tail %08x start %08x\n", |
| 184 | ring->name, |
| 185 | I915_READ(ring->regs.ctl), |
| 186 | I915_READ(ring->regs.head), |
| 187 | I915_READ(ring->regs.tail), |
| 188 | I915_READ(ring->regs.start)); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 189 | } |
| 190 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 191 | I915_WRITE(ring->regs.ctl, |
| 192 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) |
| 193 | | RING_NO_REPORT | RING_VALID); |
| 194 | |
| 195 | head = I915_READ(ring->regs.head) & HEAD_ADDR; |
| 196 | /* If the head is still not zero, the ring is dead */ |
| 197 | if (head != 0) { |
| 198 | DRM_ERROR("%s initialization failed " |
| 199 | "ctl %08x head %08x tail %08x start %08x\n", |
| 200 | ring->name, |
| 201 | I915_READ(ring->regs.ctl), |
| 202 | I915_READ(ring->regs.head), |
| 203 | I915_READ(ring->regs.tail), |
| 204 | I915_READ(ring->regs.start)); |
| 205 | return -EIO; |
| 206 | } |
| 207 | |
| 208 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
| 209 | i915_kernel_lost_context(dev); |
| 210 | else { |
| 211 | ring->head = ring->get_head(dev, ring); |
| 212 | ring->tail = ring->get_tail(dev, ring); |
| 213 | ring->space = ring->head - (ring->tail + 8); |
| 214 | if (ring->space < 0) |
| 215 | ring->space += ring->size; |
| 216 | } |
| 217 | return 0; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 218 | } |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 219 | |
| 220 | static int init_render_ring(struct drm_device *dev, |
| 221 | struct intel_ring_buffer *ring) |
| 222 | { |
| 223 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 224 | int ret = init_ring_common(dev, ring); |
Zhenyu Wang | a69ffdb | 2010-08-30 16:12:42 +0800 | [diff] [blame] | 225 | int mode; |
| 226 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 227 | if (IS_I9XX(dev) && !IS_GEN3(dev)) { |
Zhenyu Wang | a69ffdb | 2010-08-30 16:12:42 +0800 | [diff] [blame] | 228 | mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
| 229 | if (IS_GEN6(dev)) |
| 230 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
| 231 | I915_WRITE(MI_MODE, mode); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 232 | } |
| 233 | return ret; |
| 234 | } |
| 235 | |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 236 | #define PIPE_CONTROL_FLUSH(addr) \ |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 237 | do { \ |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 238 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ |
Zhenyu Wang | ca76482 | 2010-05-27 10:26:42 +0800 | [diff] [blame] | 239 | PIPE_CONTROL_DEPTH_STALL | 2); \ |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 240 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ |
| 241 | OUT_RING(0); \ |
| 242 | OUT_RING(0); \ |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 243 | } while (0) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 244 | |
| 245 | /** |
| 246 | * Creates a new sequence number, emitting a write of it to the status page |
| 247 | * plus an interrupt, which will trigger i915_user_interrupt_handler. |
| 248 | * |
| 249 | * Must be called with struct_lock held. |
| 250 | * |
| 251 | * Returned sequence numbers are nonzero on success. |
| 252 | */ |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 253 | static u32 |
| 254 | render_ring_add_request(struct drm_device *dev, |
| 255 | struct intel_ring_buffer *ring, |
| 256 | struct drm_file *file_priv, |
| 257 | u32 flush_domains) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 258 | { |
| 259 | drm_i915_private_t *dev_priv = dev->dev_private; |
Chris Wilson | 6f392d5 | 2010-08-07 11:01:22 +0100 | [diff] [blame] | 260 | u32 seqno; |
| 261 | |
| 262 | seqno = i915_gem_get_seqno(dev); |
Zhenyu Wang | ca76482 | 2010-05-27 10:26:42 +0800 | [diff] [blame] | 263 | |
| 264 | if (IS_GEN6(dev)) { |
| 265 | BEGIN_LP_RING(6); |
| 266 | OUT_RING(GFX_OP_PIPE_CONTROL | 3); |
| 267 | OUT_RING(PIPE_CONTROL_QW_WRITE | |
| 268 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | |
| 269 | PIPE_CONTROL_NOTIFY); |
| 270 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); |
| 271 | OUT_RING(seqno); |
| 272 | OUT_RING(0); |
| 273 | OUT_RING(0); |
| 274 | ADVANCE_LP_RING(); |
| 275 | } else if (HAS_PIPE_CONTROL(dev)) { |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 276 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; |
| 277 | |
| 278 | /* |
| 279 | * Workaround qword write incoherence by flushing the |
| 280 | * PIPE_NOTIFY buffers out to memory before requesting |
| 281 | * an interrupt. |
| 282 | */ |
| 283 | BEGIN_LP_RING(32); |
| 284 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | |
| 285 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); |
| 286 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); |
| 287 | OUT_RING(seqno); |
| 288 | OUT_RING(0); |
| 289 | PIPE_CONTROL_FLUSH(scratch_addr); |
| 290 | scratch_addr += 128; /* write to separate cachelines */ |
| 291 | PIPE_CONTROL_FLUSH(scratch_addr); |
| 292 | scratch_addr += 128; |
| 293 | PIPE_CONTROL_FLUSH(scratch_addr); |
| 294 | scratch_addr += 128; |
| 295 | PIPE_CONTROL_FLUSH(scratch_addr); |
| 296 | scratch_addr += 128; |
| 297 | PIPE_CONTROL_FLUSH(scratch_addr); |
| 298 | scratch_addr += 128; |
| 299 | PIPE_CONTROL_FLUSH(scratch_addr); |
| 300 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | |
| 301 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | |
| 302 | PIPE_CONTROL_NOTIFY); |
| 303 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); |
| 304 | OUT_RING(seqno); |
| 305 | OUT_RING(0); |
| 306 | ADVANCE_LP_RING(); |
| 307 | } else { |
| 308 | BEGIN_LP_RING(4); |
| 309 | OUT_RING(MI_STORE_DWORD_INDEX); |
| 310 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
| 311 | OUT_RING(seqno); |
| 312 | |
| 313 | OUT_RING(MI_USER_INTERRUPT); |
| 314 | ADVANCE_LP_RING(); |
| 315 | } |
| 316 | return seqno; |
| 317 | } |
| 318 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 319 | static u32 |
| 320 | render_ring_get_gem_seqno(struct drm_device *dev, |
| 321 | struct intel_ring_buffer *ring) |
| 322 | { |
| 323 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 324 | if (HAS_PIPE_CONTROL(dev)) |
| 325 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; |
| 326 | else |
| 327 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
| 328 | } |
| 329 | |
| 330 | static void |
| 331 | render_ring_get_user_irq(struct drm_device *dev, |
| 332 | struct intel_ring_buffer *ring) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 333 | { |
| 334 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 335 | unsigned long irqflags; |
| 336 | |
| 337 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 338 | if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 339 | if (HAS_PCH_SPLIT(dev)) |
| 340 | ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); |
| 341 | else |
| 342 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
| 343 | } |
| 344 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
| 345 | } |
| 346 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 347 | static void |
| 348 | render_ring_put_user_irq(struct drm_device *dev, |
| 349 | struct intel_ring_buffer *ring) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 350 | { |
| 351 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 352 | unsigned long irqflags; |
| 353 | |
| 354 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 355 | BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); |
| 356 | if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 357 | if (HAS_PCH_SPLIT(dev)) |
| 358 | ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); |
| 359 | else |
| 360 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
| 361 | } |
| 362 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
| 363 | } |
| 364 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 365 | static void render_setup_status_page(struct drm_device *dev, |
| 366 | struct intel_ring_buffer *ring) |
| 367 | { |
| 368 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 369 | if (IS_GEN6(dev)) { |
| 370 | I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); |
| 371 | I915_READ(HWS_PGA_GEN6); /* posting read */ |
| 372 | } else { |
| 373 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); |
| 374 | I915_READ(HWS_PGA); /* posting read */ |
| 375 | } |
| 376 | |
| 377 | } |
| 378 | |
Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 379 | void |
| 380 | bsd_ring_flush(struct drm_device *dev, |
| 381 | struct intel_ring_buffer *ring, |
| 382 | u32 invalidate_domains, |
| 383 | u32 flush_domains) |
| 384 | { |
Zou Nan hai | be26a10 | 2010-06-12 17:40:24 +0800 | [diff] [blame] | 385 | intel_ring_begin(dev, ring, 2); |
Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 386 | intel_ring_emit(dev, ring, MI_FLUSH); |
| 387 | intel_ring_emit(dev, ring, MI_NOOP); |
| 388 | intel_ring_advance(dev, ring); |
Daniel Vetter | 8a1a49f | 2010-02-11 22:29:04 +0100 | [diff] [blame^] | 389 | |
| 390 | i915_gem_process_flushing_list(dev, flush_domains, ring); |
Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 391 | } |
| 392 | |
| 393 | static inline unsigned int bsd_ring_get_head(struct drm_device *dev, |
| 394 | struct intel_ring_buffer *ring) |
| 395 | { |
| 396 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 397 | return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; |
| 398 | } |
| 399 | |
| 400 | static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, |
| 401 | struct intel_ring_buffer *ring) |
| 402 | { |
| 403 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 404 | return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; |
| 405 | } |
| 406 | |
| 407 | static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, |
| 408 | struct intel_ring_buffer *ring) |
| 409 | { |
| 410 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 411 | return I915_READ(BSD_RING_ACTHD); |
| 412 | } |
| 413 | |
| 414 | static inline void bsd_ring_advance_ring(struct drm_device *dev, |
| 415 | struct intel_ring_buffer *ring) |
| 416 | { |
| 417 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 418 | I915_WRITE(BSD_RING_TAIL, ring->tail); |
| 419 | } |
| 420 | |
| 421 | static int init_bsd_ring(struct drm_device *dev, |
| 422 | struct intel_ring_buffer *ring) |
| 423 | { |
| 424 | return init_ring_common(dev, ring); |
| 425 | } |
| 426 | |
| 427 | static u32 |
| 428 | bsd_ring_add_request(struct drm_device *dev, |
| 429 | struct intel_ring_buffer *ring, |
| 430 | struct drm_file *file_priv, |
| 431 | u32 flush_domains) |
| 432 | { |
| 433 | u32 seqno; |
Chris Wilson | 6f392d5 | 2010-08-07 11:01:22 +0100 | [diff] [blame] | 434 | |
| 435 | seqno = i915_gem_get_seqno(dev); |
| 436 | |
Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 437 | intel_ring_begin(dev, ring, 4); |
| 438 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); |
| 439 | intel_ring_emit(dev, ring, |
| 440 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
| 441 | intel_ring_emit(dev, ring, seqno); |
| 442 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); |
| 443 | intel_ring_advance(dev, ring); |
| 444 | |
| 445 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); |
| 446 | |
| 447 | return seqno; |
| 448 | } |
| 449 | |
| 450 | static void bsd_setup_status_page(struct drm_device *dev, |
| 451 | struct intel_ring_buffer *ring) |
| 452 | { |
| 453 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 454 | I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); |
| 455 | I915_READ(BSD_HWS_PGA); |
| 456 | } |
| 457 | |
| 458 | static void |
| 459 | bsd_ring_get_user_irq(struct drm_device *dev, |
| 460 | struct intel_ring_buffer *ring) |
| 461 | { |
| 462 | /* do nothing */ |
| 463 | } |
| 464 | static void |
| 465 | bsd_ring_put_user_irq(struct drm_device *dev, |
| 466 | struct intel_ring_buffer *ring) |
| 467 | { |
| 468 | /* do nothing */ |
| 469 | } |
| 470 | |
| 471 | static u32 |
| 472 | bsd_ring_get_gem_seqno(struct drm_device *dev, |
| 473 | struct intel_ring_buffer *ring) |
| 474 | { |
| 475 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
| 476 | } |
| 477 | |
| 478 | static int |
| 479 | bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, |
| 480 | struct intel_ring_buffer *ring, |
| 481 | struct drm_i915_gem_execbuffer2 *exec, |
| 482 | struct drm_clip_rect *cliprects, |
| 483 | uint64_t exec_offset) |
| 484 | { |
| 485 | uint32_t exec_start; |
| 486 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
| 487 | intel_ring_begin(dev, ring, 2); |
| 488 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | |
| 489 | (2 << 6) | MI_BATCH_NON_SECURE_I965); |
| 490 | intel_ring_emit(dev, ring, exec_start); |
| 491 | intel_ring_advance(dev, ring); |
| 492 | return 0; |
| 493 | } |
| 494 | |
| 495 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 496 | static int |
| 497 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, |
| 498 | struct intel_ring_buffer *ring, |
| 499 | struct drm_i915_gem_execbuffer2 *exec, |
| 500 | struct drm_clip_rect *cliprects, |
| 501 | uint64_t exec_offset) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 502 | { |
| 503 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 504 | int nbox = exec->num_cliprects; |
| 505 | int i = 0, count; |
| 506 | uint32_t exec_start, exec_len; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 507 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
| 508 | exec_len = (uint32_t) exec->batch_len; |
| 509 | |
Chris Wilson | 6f392d5 | 2010-08-07 11:01:22 +0100 | [diff] [blame] | 510 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 511 | |
| 512 | count = nbox ? nbox : 1; |
| 513 | |
| 514 | for (i = 0; i < count; i++) { |
| 515 | if (i < nbox) { |
| 516 | int ret = i915_emit_box(dev, cliprects, i, |
| 517 | exec->DR1, exec->DR4); |
| 518 | if (ret) |
| 519 | return ret; |
| 520 | } |
| 521 | |
| 522 | if (IS_I830(dev) || IS_845G(dev)) { |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 523 | intel_ring_begin(dev, ring, 4); |
| 524 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER); |
| 525 | intel_ring_emit(dev, ring, |
| 526 | exec_start | MI_BATCH_NON_SECURE); |
| 527 | intel_ring_emit(dev, ring, exec_start + exec_len - 4); |
| 528 | intel_ring_emit(dev, ring, 0); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 529 | } else { |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 530 | intel_ring_begin(dev, ring, 4); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 531 | if (IS_I965G(dev)) { |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 532 | intel_ring_emit(dev, ring, |
| 533 | MI_BATCH_BUFFER_START | (2 << 6) |
| 534 | | MI_BATCH_NON_SECURE_I965); |
| 535 | intel_ring_emit(dev, ring, exec_start); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 536 | } else { |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 537 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
| 538 | | (2 << 6)); |
| 539 | intel_ring_emit(dev, ring, exec_start | |
| 540 | MI_BATCH_NON_SECURE); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 541 | } |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 542 | } |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 543 | intel_ring_advance(dev, ring); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 544 | } |
| 545 | |
Zou Nan hai | 1cafd34 | 2010-06-25 13:40:24 +0800 | [diff] [blame] | 546 | if (IS_G4X(dev) || IS_IRONLAKE(dev)) { |
| 547 | intel_ring_begin(dev, ring, 2); |
| 548 | intel_ring_emit(dev, ring, MI_FLUSH | |
| 549 | MI_NO_WRITE_FLUSH | |
| 550 | MI_INVALIDATE_ISP ); |
| 551 | intel_ring_emit(dev, ring, MI_NOOP); |
| 552 | intel_ring_advance(dev, ring); |
| 553 | } |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 554 | /* XXX breadcrumb */ |
Zou Nan hai | 1cafd34 | 2010-06-25 13:40:24 +0800 | [diff] [blame] | 555 | |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 556 | return 0; |
| 557 | } |
| 558 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 559 | static void cleanup_status_page(struct drm_device *dev, |
| 560 | struct intel_ring_buffer *ring) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 561 | { |
| 562 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 563 | struct drm_gem_object *obj; |
| 564 | struct drm_i915_gem_object *obj_priv; |
| 565 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 566 | obj = ring->status_page.obj; |
| 567 | if (obj == NULL) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 568 | return; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 569 | obj_priv = to_intel_bo(obj); |
| 570 | |
| 571 | kunmap(obj_priv->pages[0]); |
| 572 | i915_gem_object_unpin(obj); |
| 573 | drm_gem_object_unreference(obj); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 574 | ring->status_page.obj = NULL; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 575 | |
| 576 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 577 | } |
| 578 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 579 | static int init_status_page(struct drm_device *dev, |
| 580 | struct intel_ring_buffer *ring) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 581 | { |
| 582 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 583 | struct drm_gem_object *obj; |
| 584 | struct drm_i915_gem_object *obj_priv; |
| 585 | int ret; |
| 586 | |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 587 | obj = i915_gem_alloc_object(dev, 4096); |
| 588 | if (obj == NULL) { |
| 589 | DRM_ERROR("Failed to allocate status page\n"); |
| 590 | ret = -ENOMEM; |
| 591 | goto err; |
| 592 | } |
| 593 | obj_priv = to_intel_bo(obj); |
| 594 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; |
| 595 | |
| 596 | ret = i915_gem_object_pin(obj, 4096); |
| 597 | if (ret != 0) { |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 598 | goto err_unref; |
| 599 | } |
| 600 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 601 | ring->status_page.gfx_addr = obj_priv->gtt_offset; |
| 602 | ring->status_page.page_addr = kmap(obj_priv->pages[0]); |
| 603 | if (ring->status_page.page_addr == NULL) { |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 604 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 605 | goto err_unpin; |
| 606 | } |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 607 | ring->status_page.obj = obj; |
| 608 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 609 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 610 | ring->setup_status_page(dev, ring); |
| 611 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
| 612 | ring->name, ring->status_page.gfx_addr); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 613 | |
| 614 | return 0; |
| 615 | |
| 616 | err_unpin: |
| 617 | i915_gem_object_unpin(obj); |
| 618 | err_unref: |
| 619 | drm_gem_object_unreference(obj); |
| 620 | err: |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 621 | return ret; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 622 | } |
| 623 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 624 | |
| 625 | int intel_init_ring_buffer(struct drm_device *dev, |
| 626 | struct intel_ring_buffer *ring) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 627 | { |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 628 | struct drm_i915_gem_object *obj_priv; |
| 629 | struct drm_gem_object *obj; |
Chris Wilson | dd785e3 | 2010-08-07 11:01:34 +0100 | [diff] [blame] | 630 | int ret; |
| 631 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 632 | ring->dev = dev; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 633 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 634 | if (I915_NEED_GFX_HWS(dev)) { |
| 635 | ret = init_status_page(dev, ring); |
| 636 | if (ret) |
| 637 | return ret; |
| 638 | } |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 639 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 640 | obj = i915_gem_alloc_object(dev, ring->size); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 641 | if (obj == NULL) { |
| 642 | DRM_ERROR("Failed to allocate ringbuffer\n"); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 643 | ret = -ENOMEM; |
Chris Wilson | dd785e3 | 2010-08-07 11:01:34 +0100 | [diff] [blame] | 644 | goto err_hws; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 645 | } |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 646 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 647 | ring->gem_object = obj; |
| 648 | |
| 649 | ret = i915_gem_object_pin(obj, ring->alignment); |
Chris Wilson | dd785e3 | 2010-08-07 11:01:34 +0100 | [diff] [blame] | 650 | if (ret) |
| 651 | goto err_unref; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 652 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 653 | obj_priv = to_intel_bo(obj); |
| 654 | ring->map.size = ring->size; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 655 | ring->map.offset = dev->agp->base + obj_priv->gtt_offset; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 656 | ring->map.type = 0; |
| 657 | ring->map.flags = 0; |
| 658 | ring->map.mtrr = 0; |
| 659 | |
| 660 | drm_core_ioremap_wc(&ring->map, dev); |
| 661 | if (ring->map.handle == NULL) { |
| 662 | DRM_ERROR("Failed to map ringbuffer.\n"); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 663 | ret = -EINVAL; |
Chris Wilson | dd785e3 | 2010-08-07 11:01:34 +0100 | [diff] [blame] | 664 | goto err_unpin; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 665 | } |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 666 | |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 667 | ring->virtual_start = ring->map.handle; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 668 | ret = ring->init(dev, ring); |
Chris Wilson | dd785e3 | 2010-08-07 11:01:34 +0100 | [diff] [blame] | 669 | if (ret) |
| 670 | goto err_unmap; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 671 | |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 672 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
| 673 | i915_kernel_lost_context(dev); |
| 674 | else { |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 675 | ring->head = ring->get_head(dev, ring); |
| 676 | ring->tail = ring->get_tail(dev, ring); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 677 | ring->space = ring->head - (ring->tail + 8); |
| 678 | if (ring->space < 0) |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 679 | ring->space += ring->size; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 680 | } |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 681 | INIT_LIST_HEAD(&ring->active_list); |
| 682 | INIT_LIST_HEAD(&ring->request_list); |
| 683 | return ret; |
Chris Wilson | dd785e3 | 2010-08-07 11:01:34 +0100 | [diff] [blame] | 684 | |
| 685 | err_unmap: |
| 686 | drm_core_ioremapfree(&ring->map, dev); |
| 687 | err_unpin: |
| 688 | i915_gem_object_unpin(obj); |
| 689 | err_unref: |
| 690 | drm_gem_object_unreference(obj); |
| 691 | ring->gem_object = NULL; |
| 692 | err_hws: |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 693 | cleanup_status_page(dev, ring); |
| 694 | return ret; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 695 | } |
| 696 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 697 | void intel_cleanup_ring_buffer(struct drm_device *dev, |
| 698 | struct intel_ring_buffer *ring) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 699 | { |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 700 | if (ring->gem_object == NULL) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 701 | return; |
| 702 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 703 | drm_core_ioremapfree(&ring->map, dev); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 704 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 705 | i915_gem_object_unpin(ring->gem_object); |
| 706 | drm_gem_object_unreference(ring->gem_object); |
| 707 | ring->gem_object = NULL; |
| 708 | cleanup_status_page(dev, ring); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 709 | } |
| 710 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 711 | int intel_wrap_ring_buffer(struct drm_device *dev, |
| 712 | struct intel_ring_buffer *ring) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 713 | { |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 714 | unsigned int *virt; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 715 | int rem; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 716 | rem = ring->size - ring->tail; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 717 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 718 | if (ring->space < rem) { |
| 719 | int ret = intel_wait_ring_buffer(dev, ring, rem); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 720 | if (ret) |
| 721 | return ret; |
| 722 | } |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 723 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 724 | virt = (unsigned int *)(ring->virtual_start + ring->tail); |
Chris Wilson | 1741dd4 | 2010-08-04 15:18:12 +0100 | [diff] [blame] | 725 | rem /= 8; |
| 726 | while (rem--) { |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 727 | *virt++ = MI_NOOP; |
Chris Wilson | 1741dd4 | 2010-08-04 15:18:12 +0100 | [diff] [blame] | 728 | *virt++ = MI_NOOP; |
| 729 | } |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 730 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 731 | ring->tail = 0; |
Chris Wilson | 43ed340 | 2010-07-01 17:53:00 +0100 | [diff] [blame] | 732 | ring->space = ring->head - 8; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 733 | |
| 734 | return 0; |
| 735 | } |
| 736 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 737 | int intel_wait_ring_buffer(struct drm_device *dev, |
| 738 | struct intel_ring_buffer *ring, int n) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 739 | { |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 740 | unsigned long end; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 741 | |
| 742 | trace_i915_ring_wait_begin (dev); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 743 | end = jiffies + 3 * HZ; |
| 744 | do { |
| 745 | ring->head = ring->get_head(dev, ring); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 746 | ring->space = ring->head - (ring->tail + 8); |
| 747 | if (ring->space < 0) |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 748 | ring->space += ring->size; |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 749 | if (ring->space >= n) { |
| 750 | trace_i915_ring_wait_end (dev); |
| 751 | return 0; |
| 752 | } |
| 753 | |
| 754 | if (dev->primary->master) { |
| 755 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
| 756 | if (master_priv->sarea_priv) |
| 757 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
| 758 | } |
Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 759 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 760 | yield(); |
| 761 | } while (!time_after(jiffies, end)); |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 762 | trace_i915_ring_wait_end (dev); |
| 763 | return -EBUSY; |
| 764 | } |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 765 | |
| 766 | void intel_ring_begin(struct drm_device *dev, |
Zou Nan hai | be26a10 | 2010-06-12 17:40:24 +0800 | [diff] [blame] | 767 | struct intel_ring_buffer *ring, int num_dwords) |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 768 | { |
Zou Nan hai | be26a10 | 2010-06-12 17:40:24 +0800 | [diff] [blame] | 769 | int n = 4*num_dwords; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 770 | if (unlikely(ring->tail + n > ring->size)) |
| 771 | intel_wrap_ring_buffer(dev, ring); |
| 772 | if (unlikely(ring->space < n)) |
| 773 | intel_wait_ring_buffer(dev, ring, n); |
Chris Wilson | d97ed33 | 2010-08-04 15:18:13 +0100 | [diff] [blame] | 774 | |
| 775 | ring->space -= n; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 776 | } |
| 777 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 778 | void intel_ring_advance(struct drm_device *dev, |
| 779 | struct intel_ring_buffer *ring) |
| 780 | { |
Chris Wilson | d97ed33 | 2010-08-04 15:18:13 +0100 | [diff] [blame] | 781 | ring->tail &= ring->size - 1; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 782 | ring->advance_ring(dev, ring); |
| 783 | } |
| 784 | |
| 785 | void intel_fill_struct(struct drm_device *dev, |
| 786 | struct intel_ring_buffer *ring, |
| 787 | void *data, |
| 788 | unsigned int len) |
| 789 | { |
| 790 | unsigned int *virt = ring->virtual_start + ring->tail; |
| 791 | BUG_ON((len&~(4-1)) != 0); |
Zou Nan hai | be26a10 | 2010-06-12 17:40:24 +0800 | [diff] [blame] | 792 | intel_ring_begin(dev, ring, len/4); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 793 | memcpy(virt, data, len); |
| 794 | ring->tail += len; |
| 795 | ring->tail &= ring->size - 1; |
| 796 | ring->space -= len; |
| 797 | intel_ring_advance(dev, ring); |
| 798 | } |
| 799 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 800 | struct intel_ring_buffer render_ring = { |
| 801 | .name = "render ring", |
| 802 | .regs = { |
| 803 | .ctl = PRB0_CTL, |
| 804 | .head = PRB0_HEAD, |
| 805 | .tail = PRB0_TAIL, |
| 806 | .start = PRB0_START |
| 807 | }, |
| 808 | .ring_flag = I915_EXEC_RENDER, |
| 809 | .size = 32 * PAGE_SIZE, |
| 810 | .alignment = PAGE_SIZE, |
| 811 | .virtual_start = NULL, |
| 812 | .dev = NULL, |
| 813 | .gem_object = NULL, |
| 814 | .head = 0, |
| 815 | .tail = 0, |
| 816 | .space = 0, |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 817 | .user_irq_refcount = 0, |
| 818 | .irq_gem_seqno = 0, |
| 819 | .waiting_gem_seqno = 0, |
| 820 | .setup_status_page = render_setup_status_page, |
| 821 | .init = init_render_ring, |
| 822 | .get_head = render_ring_get_head, |
| 823 | .get_tail = render_ring_get_tail, |
| 824 | .get_active_head = render_ring_get_active_head, |
| 825 | .advance_ring = render_ring_advance_ring, |
| 826 | .flush = render_ring_flush, |
| 827 | .add_request = render_ring_add_request, |
| 828 | .get_gem_seqno = render_ring_get_gem_seqno, |
| 829 | .user_irq_get = render_ring_get_user_irq, |
| 830 | .user_irq_put = render_ring_put_user_irq, |
| 831 | .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, |
| 832 | .status_page = {NULL, 0, NULL}, |
| 833 | .map = {0,} |
| 834 | }; |
Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 835 | |
| 836 | /* ring buffer for bit-stream decoder */ |
| 837 | |
| 838 | struct intel_ring_buffer bsd_ring = { |
| 839 | .name = "bsd ring", |
| 840 | .regs = { |
| 841 | .ctl = BSD_RING_CTL, |
| 842 | .head = BSD_RING_HEAD, |
| 843 | .tail = BSD_RING_TAIL, |
| 844 | .start = BSD_RING_START |
| 845 | }, |
| 846 | .ring_flag = I915_EXEC_BSD, |
| 847 | .size = 32 * PAGE_SIZE, |
| 848 | .alignment = PAGE_SIZE, |
| 849 | .virtual_start = NULL, |
| 850 | .dev = NULL, |
| 851 | .gem_object = NULL, |
| 852 | .head = 0, |
| 853 | .tail = 0, |
| 854 | .space = 0, |
Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 855 | .user_irq_refcount = 0, |
| 856 | .irq_gem_seqno = 0, |
| 857 | .waiting_gem_seqno = 0, |
| 858 | .setup_status_page = bsd_setup_status_page, |
| 859 | .init = init_bsd_ring, |
| 860 | .get_head = bsd_ring_get_head, |
| 861 | .get_tail = bsd_ring_get_tail, |
| 862 | .get_active_head = bsd_ring_get_active_head, |
| 863 | .advance_ring = bsd_ring_advance_ring, |
| 864 | .flush = bsd_ring_flush, |
| 865 | .add_request = bsd_ring_add_request, |
| 866 | .get_gem_seqno = bsd_ring_get_gem_seqno, |
| 867 | .user_irq_get = bsd_ring_get_user_irq, |
| 868 | .user_irq_put = bsd_ring_put_user_irq, |
| 869 | .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, |
| 870 | .status_page = {NULL, 0, NULL}, |
| 871 | .map = {0,} |
| 872 | }; |