blob: f6b84fe8099a7cfb14386e284da5c6b6b18b057f [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070035
Zou Nan hai8187a2b2010-05-21 09:08:55 +080036static void
37render_ring_flush(struct drm_device *dev,
38 struct intel_ring_buffer *ring,
39 u32 invalidate_domains,
40 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070041{
Eric Anholt62fdfea2010-05-21 13:26:39 -070042#if WATCH_EXEC
43 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
44 invalidate_domains, flush_domains);
45#endif
Zou Nan hai8187a2b2010-05-21 09:08:55 +080046 u32 cmd;
47 trace_i915_gem_request_flush(dev, ring->next_seqno,
Eric Anholt62fdfea2010-05-21 13:26:39 -070048 invalidate_domains, flush_domains);
49
Eric Anholt62fdfea2010-05-21 13:26:39 -070050 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
51 /*
52 * read/write caches:
53 *
54 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
55 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
56 * also flushed at 2d versus 3d pipeline switches.
57 *
58 * read-only caches:
59 *
60 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
61 * MI_READ_FLUSH is set, and is always flushed on 965.
62 *
63 * I915_GEM_DOMAIN_COMMAND may not exist?
64 *
65 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
66 * invalidated when MI_EXE_FLUSH is set.
67 *
68 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
69 * invalidated with every MI_FLUSH.
70 *
71 * TLBs:
72 *
73 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
74 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
75 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
76 * are flushed at any MI_FLUSH.
77 */
78
79 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
80 if ((invalidate_domains|flush_domains) &
81 I915_GEM_DOMAIN_RENDER)
82 cmd &= ~MI_NO_WRITE_FLUSH;
83 if (!IS_I965G(dev)) {
84 /*
85 * On the 965, the sampler cache always gets flushed
86 * and this bit is reserved.
87 */
88 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
89 cmd |= MI_READ_FLUSH;
90 }
91 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
92 cmd |= MI_EXE_FLUSH;
93
94#if WATCH_EXEC
95 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
96#endif
Zou Nan hai8187a2b2010-05-21 09:08:55 +080097 intel_ring_begin(dev, ring, 8);
98 intel_ring_emit(dev, ring, cmd);
99 intel_ring_emit(dev, ring, MI_NOOP);
100 intel_ring_advance(dev, ring);
101 }
102}
103
104static unsigned int render_ring_get_head(struct drm_device *dev,
105 struct intel_ring_buffer *ring)
106{
107 drm_i915_private_t *dev_priv = dev->dev_private;
108 return I915_READ(PRB0_HEAD) & HEAD_ADDR;
109}
110
111static unsigned int render_ring_get_tail(struct drm_device *dev,
112 struct intel_ring_buffer *ring)
113{
114 drm_i915_private_t *dev_priv = dev->dev_private;
115 return I915_READ(PRB0_TAIL) & TAIL_ADDR;
116}
117
118static unsigned int render_ring_get_active_head(struct drm_device *dev,
119 struct intel_ring_buffer *ring)
120{
121 drm_i915_private_t *dev_priv = dev->dev_private;
122 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
123
124 return I915_READ(acthd_reg);
125}
126
127static void render_ring_advance_ring(struct drm_device *dev,
128 struct intel_ring_buffer *ring)
129{
130 drm_i915_private_t *dev_priv = dev->dev_private;
131 I915_WRITE(PRB0_TAIL, ring->tail);
132}
133
134static int init_ring_common(struct drm_device *dev,
135 struct intel_ring_buffer *ring)
136{
137 u32 head;
138 drm_i915_private_t *dev_priv = dev->dev_private;
139 struct drm_i915_gem_object *obj_priv;
140 obj_priv = to_intel_bo(ring->gem_object);
141
142 /* Stop the ring if it's running. */
143 I915_WRITE(ring->regs.ctl, 0);
144 I915_WRITE(ring->regs.head, 0);
145 I915_WRITE(ring->regs.tail, 0);
146
147 /* Initialize the ring. */
148 I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
149 head = ring->get_head(dev, ring);
150
151 /* G45 ring initialization fails to reset head to zero */
152 if (head != 0) {
153 DRM_ERROR("%s head not reset to zero "
154 "ctl %08x head %08x tail %08x start %08x\n",
155 ring->name,
156 I915_READ(ring->regs.ctl),
157 I915_READ(ring->regs.head),
158 I915_READ(ring->regs.tail),
159 I915_READ(ring->regs.start));
160
161 I915_WRITE(ring->regs.head, 0);
162
163 DRM_ERROR("%s head forced to zero "
164 "ctl %08x head %08x tail %08x start %08x\n",
165 ring->name,
166 I915_READ(ring->regs.ctl),
167 I915_READ(ring->regs.head),
168 I915_READ(ring->regs.tail),
169 I915_READ(ring->regs.start));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700170 }
171
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800172 I915_WRITE(ring->regs.ctl,
173 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
174 | RING_NO_REPORT | RING_VALID);
175
176 head = I915_READ(ring->regs.head) & HEAD_ADDR;
177 /* If the head is still not zero, the ring is dead */
178 if (head != 0) {
179 DRM_ERROR("%s initialization failed "
180 "ctl %08x head %08x tail %08x start %08x\n",
181 ring->name,
182 I915_READ(ring->regs.ctl),
183 I915_READ(ring->regs.head),
184 I915_READ(ring->regs.tail),
185 I915_READ(ring->regs.start));
186 return -EIO;
187 }
188
189 if (!drm_core_check_feature(dev, DRIVER_MODESET))
190 i915_kernel_lost_context(dev);
191 else {
192 ring->head = ring->get_head(dev, ring);
193 ring->tail = ring->get_tail(dev, ring);
194 ring->space = ring->head - (ring->tail + 8);
195 if (ring->space < 0)
196 ring->space += ring->size;
197 }
198 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700199}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800200
201static int init_render_ring(struct drm_device *dev,
202 struct intel_ring_buffer *ring)
203{
204 drm_i915_private_t *dev_priv = dev->dev_private;
205 int ret = init_ring_common(dev, ring);
206 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
207 I915_WRITE(MI_MODE,
208 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
209 }
210 return ret;
211}
212
Eric Anholt62fdfea2010-05-21 13:26:39 -0700213#define PIPE_CONTROL_FLUSH(addr) \
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800214do { \
Eric Anholt62fdfea2010-05-21 13:26:39 -0700215 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
216 PIPE_CONTROL_DEPTH_STALL); \
217 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
218 OUT_RING(0); \
219 OUT_RING(0); \
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800220} while (0)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700221
222/**
223 * Creates a new sequence number, emitting a write of it to the status page
224 * plus an interrupt, which will trigger i915_user_interrupt_handler.
225 *
226 * Must be called with struct_lock held.
227 *
228 * Returned sequence numbers are nonzero on success.
229 */
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800230static u32
231render_ring_add_request(struct drm_device *dev,
232 struct intel_ring_buffer *ring,
233 struct drm_file *file_priv,
234 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700235{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800236 u32 seqno;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700237 drm_i915_private_t *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800238 seqno = intel_ring_get_seqno(dev, ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700239 if (HAS_PIPE_CONTROL(dev)) {
240 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
241
242 /*
243 * Workaround qword write incoherence by flushing the
244 * PIPE_NOTIFY buffers out to memory before requesting
245 * an interrupt.
246 */
247 BEGIN_LP_RING(32);
248 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
249 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
250 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
251 OUT_RING(seqno);
252 OUT_RING(0);
253 PIPE_CONTROL_FLUSH(scratch_addr);
254 scratch_addr += 128; /* write to separate cachelines */
255 PIPE_CONTROL_FLUSH(scratch_addr);
256 scratch_addr += 128;
257 PIPE_CONTROL_FLUSH(scratch_addr);
258 scratch_addr += 128;
259 PIPE_CONTROL_FLUSH(scratch_addr);
260 scratch_addr += 128;
261 PIPE_CONTROL_FLUSH(scratch_addr);
262 scratch_addr += 128;
263 PIPE_CONTROL_FLUSH(scratch_addr);
264 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
265 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
266 PIPE_CONTROL_NOTIFY);
267 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
268 OUT_RING(seqno);
269 OUT_RING(0);
270 ADVANCE_LP_RING();
271 } else {
272 BEGIN_LP_RING(4);
273 OUT_RING(MI_STORE_DWORD_INDEX);
274 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
275 OUT_RING(seqno);
276
277 OUT_RING(MI_USER_INTERRUPT);
278 ADVANCE_LP_RING();
279 }
280 return seqno;
281}
282
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800283static u32
284render_ring_get_gem_seqno(struct drm_device *dev,
285 struct intel_ring_buffer *ring)
286{
287 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
288 if (HAS_PIPE_CONTROL(dev))
289 return ((volatile u32 *)(dev_priv->seqno_page))[0];
290 else
291 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
292}
293
294static void
295render_ring_get_user_irq(struct drm_device *dev,
296 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700297{
298 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
299 unsigned long irqflags;
300
301 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800302 if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700303 if (HAS_PCH_SPLIT(dev))
304 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
305 else
306 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
307 }
308 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
309}
310
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800311static void
312render_ring_put_user_irq(struct drm_device *dev,
313 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700314{
315 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
316 unsigned long irqflags;
317
318 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800319 BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
320 if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700321 if (HAS_PCH_SPLIT(dev))
322 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
323 else
324 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
325 }
326 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
327}
328
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800329static void render_setup_status_page(struct drm_device *dev,
330 struct intel_ring_buffer *ring)
331{
332 drm_i915_private_t *dev_priv = dev->dev_private;
333 if (IS_GEN6(dev)) {
334 I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
335 I915_READ(HWS_PGA_GEN6); /* posting read */
336 } else {
337 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
338 I915_READ(HWS_PGA); /* posting read */
339 }
340
341}
342
Zou Nan haid1b851f2010-05-21 09:08:57 +0800343void
344bsd_ring_flush(struct drm_device *dev,
345 struct intel_ring_buffer *ring,
346 u32 invalidate_domains,
347 u32 flush_domains)
348{
349 intel_ring_begin(dev, ring, 8);
350 intel_ring_emit(dev, ring, MI_FLUSH);
351 intel_ring_emit(dev, ring, MI_NOOP);
352 intel_ring_advance(dev, ring);
353}
354
355static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
356 struct intel_ring_buffer *ring)
357{
358 drm_i915_private_t *dev_priv = dev->dev_private;
359 return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
360}
361
362static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
363 struct intel_ring_buffer *ring)
364{
365 drm_i915_private_t *dev_priv = dev->dev_private;
366 return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
367}
368
369static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
370 struct intel_ring_buffer *ring)
371{
372 drm_i915_private_t *dev_priv = dev->dev_private;
373 return I915_READ(BSD_RING_ACTHD);
374}
375
376static inline void bsd_ring_advance_ring(struct drm_device *dev,
377 struct intel_ring_buffer *ring)
378{
379 drm_i915_private_t *dev_priv = dev->dev_private;
380 I915_WRITE(BSD_RING_TAIL, ring->tail);
381}
382
383static int init_bsd_ring(struct drm_device *dev,
384 struct intel_ring_buffer *ring)
385{
386 return init_ring_common(dev, ring);
387}
388
389static u32
390bsd_ring_add_request(struct drm_device *dev,
391 struct intel_ring_buffer *ring,
392 struct drm_file *file_priv,
393 u32 flush_domains)
394{
395 u32 seqno;
396 seqno = intel_ring_get_seqno(dev, ring);
397 intel_ring_begin(dev, ring, 4);
398 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
399 intel_ring_emit(dev, ring,
400 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
401 intel_ring_emit(dev, ring, seqno);
402 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
403 intel_ring_advance(dev, ring);
404
405 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
406
407 return seqno;
408}
409
410static void bsd_setup_status_page(struct drm_device *dev,
411 struct intel_ring_buffer *ring)
412{
413 drm_i915_private_t *dev_priv = dev->dev_private;
414 I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
415 I915_READ(BSD_HWS_PGA);
416}
417
418static void
419bsd_ring_get_user_irq(struct drm_device *dev,
420 struct intel_ring_buffer *ring)
421{
422 /* do nothing */
423}
424static void
425bsd_ring_put_user_irq(struct drm_device *dev,
426 struct intel_ring_buffer *ring)
427{
428 /* do nothing */
429}
430
431static u32
432bsd_ring_get_gem_seqno(struct drm_device *dev,
433 struct intel_ring_buffer *ring)
434{
435 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
436}
437
438static int
439bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
440 struct intel_ring_buffer *ring,
441 struct drm_i915_gem_execbuffer2 *exec,
442 struct drm_clip_rect *cliprects,
443 uint64_t exec_offset)
444{
445 uint32_t exec_start;
446 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
447 intel_ring_begin(dev, ring, 2);
448 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
449 (2 << 6) | MI_BATCH_NON_SECURE_I965);
450 intel_ring_emit(dev, ring, exec_start);
451 intel_ring_advance(dev, ring);
452 return 0;
453}
454
455
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800456static int
457render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
458 struct intel_ring_buffer *ring,
459 struct drm_i915_gem_execbuffer2 *exec,
460 struct drm_clip_rect *cliprects,
461 uint64_t exec_offset)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700462{
463 drm_i915_private_t *dev_priv = dev->dev_private;
464 int nbox = exec->num_cliprects;
465 int i = 0, count;
466 uint32_t exec_start, exec_len;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700467 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
468 exec_len = (uint32_t) exec->batch_len;
469
470 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
471
472 count = nbox ? nbox : 1;
473
474 for (i = 0; i < count; i++) {
475 if (i < nbox) {
476 int ret = i915_emit_box(dev, cliprects, i,
477 exec->DR1, exec->DR4);
478 if (ret)
479 return ret;
480 }
481
482 if (IS_I830(dev) || IS_845G(dev)) {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800483 intel_ring_begin(dev, ring, 4);
484 intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
485 intel_ring_emit(dev, ring,
486 exec_start | MI_BATCH_NON_SECURE);
487 intel_ring_emit(dev, ring, exec_start + exec_len - 4);
488 intel_ring_emit(dev, ring, 0);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700489 } else {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800490 intel_ring_begin(dev, ring, 4);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700491 if (IS_I965G(dev)) {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800492 intel_ring_emit(dev, ring,
493 MI_BATCH_BUFFER_START | (2 << 6)
494 | MI_BATCH_NON_SECURE_I965);
495 intel_ring_emit(dev, ring, exec_start);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700496 } else {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800497 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
498 | (2 << 6));
499 intel_ring_emit(dev, ring, exec_start |
500 MI_BATCH_NON_SECURE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700501 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700502 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800503 intel_ring_advance(dev, ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700504 }
505
506 /* XXX breadcrumb */
507 return 0;
508}
509
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800510static void cleanup_status_page(struct drm_device *dev,
511 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700512{
513 drm_i915_private_t *dev_priv = dev->dev_private;
514 struct drm_gem_object *obj;
515 struct drm_i915_gem_object *obj_priv;
516
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800517 obj = ring->status_page.obj;
518 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700519 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700520 obj_priv = to_intel_bo(obj);
521
522 kunmap(obj_priv->pages[0]);
523 i915_gem_object_unpin(obj);
524 drm_gem_object_unreference(obj);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800525 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700526
527 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700528}
529
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800530static int init_status_page(struct drm_device *dev,
531 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700532{
533 drm_i915_private_t *dev_priv = dev->dev_private;
534 struct drm_gem_object *obj;
535 struct drm_i915_gem_object *obj_priv;
536 int ret;
537
Eric Anholt62fdfea2010-05-21 13:26:39 -0700538 obj = i915_gem_alloc_object(dev, 4096);
539 if (obj == NULL) {
540 DRM_ERROR("Failed to allocate status page\n");
541 ret = -ENOMEM;
542 goto err;
543 }
544 obj_priv = to_intel_bo(obj);
545 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
546
547 ret = i915_gem_object_pin(obj, 4096);
548 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700549 goto err_unref;
550 }
551
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800552 ring->status_page.gfx_addr = obj_priv->gtt_offset;
553 ring->status_page.page_addr = kmap(obj_priv->pages[0]);
554 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700555 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700556 goto err_unpin;
557 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800558 ring->status_page.obj = obj;
559 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700560
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800561 ring->setup_status_page(dev, ring);
562 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
563 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700564
565 return 0;
566
567err_unpin:
568 i915_gem_object_unpin(obj);
569err_unref:
570 drm_gem_object_unreference(obj);
571err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800572 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700573}
574
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800575
576int intel_init_ring_buffer(struct drm_device *dev,
577 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700578{
Eric Anholt62fdfea2010-05-21 13:26:39 -0700579 int ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800580 struct drm_i915_gem_object *obj_priv;
581 struct drm_gem_object *obj;
582 ring->dev = dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700583
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800584 if (I915_NEED_GFX_HWS(dev)) {
585 ret = init_status_page(dev, ring);
586 if (ret)
587 return ret;
588 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700589
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800590 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700591 if (obj == NULL) {
592 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800593 ret = -ENOMEM;
594 goto cleanup;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700595 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700596
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800597 ring->gem_object = obj;
598
599 ret = i915_gem_object_pin(obj, ring->alignment);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700600 if (ret != 0) {
601 drm_gem_object_unreference(obj);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800602 goto cleanup;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700603 }
604
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800605 obj_priv = to_intel_bo(obj);
606 ring->map.size = ring->size;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700607 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700608 ring->map.type = 0;
609 ring->map.flags = 0;
610 ring->map.mtrr = 0;
611
612 drm_core_ioremap_wc(&ring->map, dev);
613 if (ring->map.handle == NULL) {
614 DRM_ERROR("Failed to map ringbuffer.\n");
Eric Anholt62fdfea2010-05-21 13:26:39 -0700615 i915_gem_object_unpin(obj);
616 drm_gem_object_unreference(obj);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800617 ret = -EINVAL;
618 goto cleanup;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700619 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800620
Eric Anholt62fdfea2010-05-21 13:26:39 -0700621 ring->virtual_start = ring->map.handle;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800622 ret = ring->init(dev, ring);
623 if (ret != 0) {
624 intel_cleanup_ring_buffer(dev, ring);
625 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700626 }
627
Eric Anholt62fdfea2010-05-21 13:26:39 -0700628 if (!drm_core_check_feature(dev, DRIVER_MODESET))
629 i915_kernel_lost_context(dev);
630 else {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800631 ring->head = ring->get_head(dev, ring);
632 ring->tail = ring->get_tail(dev, ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700633 ring->space = ring->head - (ring->tail + 8);
634 if (ring->space < 0)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800635 ring->space += ring->size;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700636 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800637 INIT_LIST_HEAD(&ring->active_list);
638 INIT_LIST_HEAD(&ring->request_list);
639 return ret;
640cleanup:
641 cleanup_status_page(dev, ring);
642 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700643}
644
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800645void intel_cleanup_ring_buffer(struct drm_device *dev,
646 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700647{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800648 if (ring->gem_object == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700649 return;
650
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800651 drm_core_ioremapfree(&ring->map, dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700652
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800653 i915_gem_object_unpin(ring->gem_object);
654 drm_gem_object_unreference(ring->gem_object);
655 ring->gem_object = NULL;
656 cleanup_status_page(dev, ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700657}
658
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800659int intel_wrap_ring_buffer(struct drm_device *dev,
660 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700661{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800662 unsigned int *virt;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700663 int rem;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800664 rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700665
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800666 if (ring->space < rem) {
667 int ret = intel_wait_ring_buffer(dev, ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700668 if (ret)
669 return ret;
670 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700671
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800672 virt = (unsigned int *)(ring->virtual_start + ring->tail);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700673 rem /= 4;
674 while (rem--)
675 *virt++ = MI_NOOP;
676
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800677 ring->tail = 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700678
679 return 0;
680}
681
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800682int intel_wait_ring_buffer(struct drm_device *dev,
683 struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700684{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800685 unsigned long end;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700686
687 trace_i915_ring_wait_begin (dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800688 end = jiffies + 3 * HZ;
689 do {
690 ring->head = ring->get_head(dev, ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700691 ring->space = ring->head - (ring->tail + 8);
692 if (ring->space < 0)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800693 ring->space += ring->size;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700694 if (ring->space >= n) {
695 trace_i915_ring_wait_end (dev);
696 return 0;
697 }
698
699 if (dev->primary->master) {
700 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
701 if (master_priv->sarea_priv)
702 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
703 }
Zou Nan haid1b851f2010-05-21 09:08:57 +0800704
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800705 yield();
706 } while (!time_after(jiffies, end));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700707 trace_i915_ring_wait_end (dev);
708 return -EBUSY;
709}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800710
711void intel_ring_begin(struct drm_device *dev,
712 struct intel_ring_buffer *ring, int n)
713{
714 if (unlikely(ring->tail + n > ring->size))
715 intel_wrap_ring_buffer(dev, ring);
716 if (unlikely(ring->space < n))
717 intel_wait_ring_buffer(dev, ring, n);
718}
719
720void intel_ring_emit(struct drm_device *dev,
721 struct intel_ring_buffer *ring, unsigned int data)
722{
723 unsigned int *virt = ring->virtual_start + ring->tail;
724 *virt = data;
725 ring->tail += 4;
726 ring->tail &= ring->size - 1;
727 ring->space -= 4;
728}
729
730void intel_ring_advance(struct drm_device *dev,
731 struct intel_ring_buffer *ring)
732{
733 ring->advance_ring(dev, ring);
734}
735
736void intel_fill_struct(struct drm_device *dev,
737 struct intel_ring_buffer *ring,
738 void *data,
739 unsigned int len)
740{
741 unsigned int *virt = ring->virtual_start + ring->tail;
742 BUG_ON((len&~(4-1)) != 0);
743 intel_ring_begin(dev, ring, len);
744 memcpy(virt, data, len);
745 ring->tail += len;
746 ring->tail &= ring->size - 1;
747 ring->space -= len;
748 intel_ring_advance(dev, ring);
749}
750
751u32 intel_ring_get_seqno(struct drm_device *dev,
752 struct intel_ring_buffer *ring)
753{
754 u32 seqno;
755 seqno = ring->next_seqno;
756
757 /* reserve 0 for non-seqno */
758 if (++ring->next_seqno == 0)
759 ring->next_seqno = 1;
760 return seqno;
761}
762
763struct intel_ring_buffer render_ring = {
764 .name = "render ring",
765 .regs = {
766 .ctl = PRB0_CTL,
767 .head = PRB0_HEAD,
768 .tail = PRB0_TAIL,
769 .start = PRB0_START
770 },
771 .ring_flag = I915_EXEC_RENDER,
772 .size = 32 * PAGE_SIZE,
773 .alignment = PAGE_SIZE,
774 .virtual_start = NULL,
775 .dev = NULL,
776 .gem_object = NULL,
777 .head = 0,
778 .tail = 0,
779 .space = 0,
780 .next_seqno = 1,
781 .user_irq_refcount = 0,
782 .irq_gem_seqno = 0,
783 .waiting_gem_seqno = 0,
784 .setup_status_page = render_setup_status_page,
785 .init = init_render_ring,
786 .get_head = render_ring_get_head,
787 .get_tail = render_ring_get_tail,
788 .get_active_head = render_ring_get_active_head,
789 .advance_ring = render_ring_advance_ring,
790 .flush = render_ring_flush,
791 .add_request = render_ring_add_request,
792 .get_gem_seqno = render_ring_get_gem_seqno,
793 .user_irq_get = render_ring_get_user_irq,
794 .user_irq_put = render_ring_put_user_irq,
795 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
796 .status_page = {NULL, 0, NULL},
797 .map = {0,}
798};
Zou Nan haid1b851f2010-05-21 09:08:57 +0800799
800/* ring buffer for bit-stream decoder */
801
802struct intel_ring_buffer bsd_ring = {
803 .name = "bsd ring",
804 .regs = {
805 .ctl = BSD_RING_CTL,
806 .head = BSD_RING_HEAD,
807 .tail = BSD_RING_TAIL,
808 .start = BSD_RING_START
809 },
810 .ring_flag = I915_EXEC_BSD,
811 .size = 32 * PAGE_SIZE,
812 .alignment = PAGE_SIZE,
813 .virtual_start = NULL,
814 .dev = NULL,
815 .gem_object = NULL,
816 .head = 0,
817 .tail = 0,
818 .space = 0,
819 .next_seqno = 1,
820 .user_irq_refcount = 0,
821 .irq_gem_seqno = 0,
822 .waiting_gem_seqno = 0,
823 .setup_status_page = bsd_setup_status_page,
824 .init = init_bsd_ring,
825 .get_head = bsd_ring_get_head,
826 .get_tail = bsd_ring_get_tail,
827 .get_active_head = bsd_ring_get_active_head,
828 .advance_ring = bsd_ring_advance_ring,
829 .flush = bsd_ring_flush,
830 .add_request = bsd_ring_add_request,
831 .get_gem_seqno = bsd_ring_get_gem_seqno,
832 .user_irq_get = bsd_ring_get_user_irq,
833 .user_irq_put = bsd_ring_put_user_irq,
834 .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
835 .status_page = {NULL, 0, NULL},
836 .map = {0,}
837};