blob: 7eb936a315bd0629209ef5894865331192acc9ae [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Chris Wilson6f392d5482010-08-07 11:01:22 +010037static u32 i915_gem_get_seqno(struct drm_device *dev)
38{
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 u32 seqno;
41
42 seqno = dev_priv->next_seqno;
43
44 /* reserve 0 for non-seqno */
45 if (++dev_priv->next_seqno == 0)
46 dev_priv->next_seqno = 1;
47
48 return seqno;
49}
50
Zou Nan hai8187a2b2010-05-21 09:08:55 +080051static void
52render_ring_flush(struct drm_device *dev,
53 struct intel_ring_buffer *ring,
54 u32 invalidate_domains,
55 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070056{
Chris Wilson6f392d5482010-08-07 11:01:22 +010057 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 cmd;
59
Eric Anholt62fdfea2010-05-21 13:26:39 -070060#if WATCH_EXEC
61 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
62 invalidate_domains, flush_domains);
63#endif
Chris Wilson6f392d5482010-08-07 11:01:22 +010064
65 trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
Eric Anholt62fdfea2010-05-21 13:26:39 -070066 invalidate_domains, flush_domains);
67
Eric Anholt62fdfea2010-05-21 13:26:39 -070068 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
69 /*
70 * read/write caches:
71 *
72 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
73 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
74 * also flushed at 2d versus 3d pipeline switches.
75 *
76 * read-only caches:
77 *
78 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
79 * MI_READ_FLUSH is set, and is always flushed on 965.
80 *
81 * I915_GEM_DOMAIN_COMMAND may not exist?
82 *
83 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
84 * invalidated when MI_EXE_FLUSH is set.
85 *
86 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
87 * invalidated with every MI_FLUSH.
88 *
89 * TLBs:
90 *
91 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
92 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
93 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
94 * are flushed at any MI_FLUSH.
95 */
96
97 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
98 if ((invalidate_domains|flush_domains) &
99 I915_GEM_DOMAIN_RENDER)
100 cmd &= ~MI_NO_WRITE_FLUSH;
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100101 if (INTEL_INFO(dev)->gen < 4) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700102 /*
103 * On the 965, the sampler cache always gets flushed
104 * and this bit is reserved.
105 */
106 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
107 cmd |= MI_READ_FLUSH;
108 }
109 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
110 cmd |= MI_EXE_FLUSH;
111
112#if WATCH_EXEC
113 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
114#endif
Zou Nan haibe26a102010-06-12 17:40:24 +0800115 intel_ring_begin(dev, ring, 2);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800116 intel_ring_emit(dev, ring, cmd);
117 intel_ring_emit(dev, ring, MI_NOOP);
118 intel_ring_advance(dev, ring);
119 }
120}
121
Daniel Vetter870e86d2010-08-02 16:29:44 +0200122static void ring_set_tail(struct drm_device *dev,
123 struct intel_ring_buffer *ring,
124 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800125{
126 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter870e86d2010-08-02 16:29:44 +0200127 I915_WRITE_TAIL(ring, ring->tail);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800128}
129
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800130static unsigned int render_ring_get_active_head(struct drm_device *dev,
131 struct intel_ring_buffer *ring)
132{
133 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100134 u32 acthd_reg = INTEL_INFO(dev)->gen ? ACTHD_I965 : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800135
136 return I915_READ(acthd_reg);
137}
138
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800139static int init_ring_common(struct drm_device *dev,
140 struct intel_ring_buffer *ring)
141{
142 u32 head;
143 drm_i915_private_t *dev_priv = dev->dev_private;
144 struct drm_i915_gem_object *obj_priv;
145 obj_priv = to_intel_bo(ring->gem_object);
146
147 /* Stop the ring if it's running. */
148 I915_WRITE(ring->regs.ctl, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200149 I915_WRITE_HEAD(ring, 0);
Daniel Vetter870e86d2010-08-02 16:29:44 +0200150 ring->set_tail(dev, ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800151
152 /* Initialize the ring. */
Daniel Vetter6c0e1c52010-08-02 16:33:33 +0200153 I915_WRITE_START(ring, obj_priv->gtt_offset);
Daniel Vetter570ef602010-08-02 17:06:23 +0200154 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800155
156 /* G45 ring initialization fails to reset head to zero */
157 if (head != 0) {
158 DRM_ERROR("%s head not reset to zero "
159 "ctl %08x head %08x tail %08x start %08x\n",
160 ring->name,
161 I915_READ(ring->regs.ctl),
Daniel Vetter570ef602010-08-02 17:06:23 +0200162 I915_READ_HEAD(ring),
Daniel Vetter870e86d2010-08-02 16:29:44 +0200163 I915_READ_TAIL(ring),
Daniel Vetter6c0e1c52010-08-02 16:33:33 +0200164 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800165
Daniel Vetter570ef602010-08-02 17:06:23 +0200166 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800167
168 DRM_ERROR("%s head forced to zero "
169 "ctl %08x head %08x tail %08x start %08x\n",
170 ring->name,
171 I915_READ(ring->regs.ctl),
Daniel Vetter570ef602010-08-02 17:06:23 +0200172 I915_READ_HEAD(ring),
Daniel Vetter870e86d2010-08-02 16:29:44 +0200173 I915_READ_TAIL(ring),
Daniel Vetter6c0e1c52010-08-02 16:33:33 +0200174 I915_READ_START(ring));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700175 }
176
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800177 I915_WRITE(ring->regs.ctl,
178 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
179 | RING_NO_REPORT | RING_VALID);
180
Daniel Vetter570ef602010-08-02 17:06:23 +0200181 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800182 /* If the head is still not zero, the ring is dead */
183 if (head != 0) {
184 DRM_ERROR("%s initialization failed "
185 "ctl %08x head %08x tail %08x start %08x\n",
186 ring->name,
187 I915_READ(ring->regs.ctl),
Daniel Vetter570ef602010-08-02 17:06:23 +0200188 I915_READ_HEAD(ring),
Daniel Vetter870e86d2010-08-02 16:29:44 +0200189 I915_READ_TAIL(ring),
Daniel Vetter6c0e1c52010-08-02 16:33:33 +0200190 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800191 return -EIO;
192 }
193
194 if (!drm_core_check_feature(dev, DRIVER_MODESET))
195 i915_kernel_lost_context(dev);
196 else {
Daniel Vetter570ef602010-08-02 17:06:23 +0200197 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
Daniel Vetter870e86d2010-08-02 16:29:44 +0200198 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800199 ring->space = ring->head - (ring->tail + 8);
200 if (ring->space < 0)
201 ring->space += ring->size;
202 }
203 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700204}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800205
206static int init_render_ring(struct drm_device *dev,
207 struct intel_ring_buffer *ring)
208{
209 drm_i915_private_t *dev_priv = dev->dev_private;
210 int ret = init_ring_common(dev, ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800211 int mode;
212
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100213 if (INTEL_INFO(dev)->gen > 3) {
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800214 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
215 if (IS_GEN6(dev))
216 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
217 I915_WRITE(MI_MODE, mode);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800218 }
219 return ret;
220}
221
Eric Anholt62fdfea2010-05-21 13:26:39 -0700222#define PIPE_CONTROL_FLUSH(addr) \
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800223do { \
Eric Anholt62fdfea2010-05-21 13:26:39 -0700224 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
Zhenyu Wangca764822010-05-27 10:26:42 +0800225 PIPE_CONTROL_DEPTH_STALL | 2); \
Eric Anholt62fdfea2010-05-21 13:26:39 -0700226 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
227 OUT_RING(0); \
228 OUT_RING(0); \
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800229} while (0)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700230
231/**
232 * Creates a new sequence number, emitting a write of it to the status page
233 * plus an interrupt, which will trigger i915_user_interrupt_handler.
234 *
235 * Must be called with struct_lock held.
236 *
237 * Returned sequence numbers are nonzero on success.
238 */
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800239static u32
240render_ring_add_request(struct drm_device *dev,
241 struct intel_ring_buffer *ring,
242 struct drm_file *file_priv,
243 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700244{
245 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson6f392d5482010-08-07 11:01:22 +0100246 u32 seqno;
247
248 seqno = i915_gem_get_seqno(dev);
Zhenyu Wangca764822010-05-27 10:26:42 +0800249
250 if (IS_GEN6(dev)) {
251 BEGIN_LP_RING(6);
252 OUT_RING(GFX_OP_PIPE_CONTROL | 3);
253 OUT_RING(PIPE_CONTROL_QW_WRITE |
254 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
255 PIPE_CONTROL_NOTIFY);
256 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
257 OUT_RING(seqno);
258 OUT_RING(0);
259 OUT_RING(0);
260 ADVANCE_LP_RING();
261 } else if (HAS_PIPE_CONTROL(dev)) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700262 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
263
264 /*
265 * Workaround qword write incoherence by flushing the
266 * PIPE_NOTIFY buffers out to memory before requesting
267 * an interrupt.
268 */
269 BEGIN_LP_RING(32);
270 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
271 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
272 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
273 OUT_RING(seqno);
274 OUT_RING(0);
275 PIPE_CONTROL_FLUSH(scratch_addr);
276 scratch_addr += 128; /* write to separate cachelines */
277 PIPE_CONTROL_FLUSH(scratch_addr);
278 scratch_addr += 128;
279 PIPE_CONTROL_FLUSH(scratch_addr);
280 scratch_addr += 128;
281 PIPE_CONTROL_FLUSH(scratch_addr);
282 scratch_addr += 128;
283 PIPE_CONTROL_FLUSH(scratch_addr);
284 scratch_addr += 128;
285 PIPE_CONTROL_FLUSH(scratch_addr);
286 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
287 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
288 PIPE_CONTROL_NOTIFY);
289 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
290 OUT_RING(seqno);
291 OUT_RING(0);
292 ADVANCE_LP_RING();
293 } else {
294 BEGIN_LP_RING(4);
295 OUT_RING(MI_STORE_DWORD_INDEX);
296 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
297 OUT_RING(seqno);
298
299 OUT_RING(MI_USER_INTERRUPT);
300 ADVANCE_LP_RING();
301 }
302 return seqno;
303}
304
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800305static u32
306render_ring_get_gem_seqno(struct drm_device *dev,
307 struct intel_ring_buffer *ring)
308{
309 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
310 if (HAS_PIPE_CONTROL(dev))
311 return ((volatile u32 *)(dev_priv->seqno_page))[0];
312 else
313 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
314}
315
316static void
317render_ring_get_user_irq(struct drm_device *dev,
318 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700319{
320 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
321 unsigned long irqflags;
322
323 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800324 if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700325 if (HAS_PCH_SPLIT(dev))
326 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
327 else
328 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
329 }
330 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
331}
332
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800333static void
334render_ring_put_user_irq(struct drm_device *dev,
335 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700336{
337 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
338 unsigned long irqflags;
339
340 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800341 BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
342 if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700343 if (HAS_PCH_SPLIT(dev))
344 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
345 else
346 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
347 }
348 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
349}
350
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800351static void render_setup_status_page(struct drm_device *dev,
352 struct intel_ring_buffer *ring)
353{
354 drm_i915_private_t *dev_priv = dev->dev_private;
355 if (IS_GEN6(dev)) {
356 I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
357 I915_READ(HWS_PGA_GEN6); /* posting read */
358 } else {
359 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
360 I915_READ(HWS_PGA); /* posting read */
361 }
362
363}
364
Zou Nan haid1b851f2010-05-21 09:08:57 +0800365void
366bsd_ring_flush(struct drm_device *dev,
367 struct intel_ring_buffer *ring,
368 u32 invalidate_domains,
369 u32 flush_domains)
370{
Zou Nan haibe26a102010-06-12 17:40:24 +0800371 intel_ring_begin(dev, ring, 2);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800372 intel_ring_emit(dev, ring, MI_FLUSH);
373 intel_ring_emit(dev, ring, MI_NOOP);
374 intel_ring_advance(dev, ring);
375}
376
Zou Nan haid1b851f2010-05-21 09:08:57 +0800377static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
378 struct intel_ring_buffer *ring)
379{
380 drm_i915_private_t *dev_priv = dev->dev_private;
381 return I915_READ(BSD_RING_ACTHD);
382}
383
Zou Nan haid1b851f2010-05-21 09:08:57 +0800384static int init_bsd_ring(struct drm_device *dev,
385 struct intel_ring_buffer *ring)
386{
387 return init_ring_common(dev, ring);
388}
389
390static u32
391bsd_ring_add_request(struct drm_device *dev,
392 struct intel_ring_buffer *ring,
393 struct drm_file *file_priv,
394 u32 flush_domains)
395{
396 u32 seqno;
Chris Wilson6f392d5482010-08-07 11:01:22 +0100397
398 seqno = i915_gem_get_seqno(dev);
399
Zou Nan haid1b851f2010-05-21 09:08:57 +0800400 intel_ring_begin(dev, ring, 4);
401 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
402 intel_ring_emit(dev, ring,
403 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
404 intel_ring_emit(dev, ring, seqno);
405 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
406 intel_ring_advance(dev, ring);
407
408 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
409
410 return seqno;
411}
412
413static void bsd_setup_status_page(struct drm_device *dev,
414 struct intel_ring_buffer *ring)
415{
416 drm_i915_private_t *dev_priv = dev->dev_private;
417 I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
418 I915_READ(BSD_HWS_PGA);
419}
420
421static void
422bsd_ring_get_user_irq(struct drm_device *dev,
423 struct intel_ring_buffer *ring)
424{
425 /* do nothing */
426}
427static void
428bsd_ring_put_user_irq(struct drm_device *dev,
429 struct intel_ring_buffer *ring)
430{
431 /* do nothing */
432}
433
434static u32
435bsd_ring_get_gem_seqno(struct drm_device *dev,
436 struct intel_ring_buffer *ring)
437{
438 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
439}
440
441static int
442bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
443 struct intel_ring_buffer *ring,
444 struct drm_i915_gem_execbuffer2 *exec,
445 struct drm_clip_rect *cliprects,
446 uint64_t exec_offset)
447{
448 uint32_t exec_start;
449 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
450 intel_ring_begin(dev, ring, 2);
451 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
452 (2 << 6) | MI_BATCH_NON_SECURE_I965);
453 intel_ring_emit(dev, ring, exec_start);
454 intel_ring_advance(dev, ring);
455 return 0;
456}
457
458
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800459static int
460render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
461 struct intel_ring_buffer *ring,
462 struct drm_i915_gem_execbuffer2 *exec,
463 struct drm_clip_rect *cliprects,
464 uint64_t exec_offset)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700465{
466 drm_i915_private_t *dev_priv = dev->dev_private;
467 int nbox = exec->num_cliprects;
468 int i = 0, count;
469 uint32_t exec_start, exec_len;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700470 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
471 exec_len = (uint32_t) exec->batch_len;
472
Chris Wilson6f392d5482010-08-07 11:01:22 +0100473 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700474
475 count = nbox ? nbox : 1;
476
477 for (i = 0; i < count; i++) {
478 if (i < nbox) {
479 int ret = i915_emit_box(dev, cliprects, i,
480 exec->DR1, exec->DR4);
481 if (ret)
482 return ret;
483 }
484
485 if (IS_I830(dev) || IS_845G(dev)) {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800486 intel_ring_begin(dev, ring, 4);
487 intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
488 intel_ring_emit(dev, ring,
489 exec_start | MI_BATCH_NON_SECURE);
490 intel_ring_emit(dev, ring, exec_start + exec_len - 4);
491 intel_ring_emit(dev, ring, 0);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700492 } else {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800493 intel_ring_begin(dev, ring, 4);
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100494 if (INTEL_INFO(dev)->gen >= 4) {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800495 intel_ring_emit(dev, ring,
496 MI_BATCH_BUFFER_START | (2 << 6)
497 | MI_BATCH_NON_SECURE_I965);
498 intel_ring_emit(dev, ring, exec_start);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700499 } else {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800500 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
501 | (2 << 6));
502 intel_ring_emit(dev, ring, exec_start |
503 MI_BATCH_NON_SECURE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700504 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700505 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800506 intel_ring_advance(dev, ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700507 }
508
Zou Nan hai1cafd342010-06-25 13:40:24 +0800509 if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
510 intel_ring_begin(dev, ring, 2);
511 intel_ring_emit(dev, ring, MI_FLUSH |
512 MI_NO_WRITE_FLUSH |
513 MI_INVALIDATE_ISP );
514 intel_ring_emit(dev, ring, MI_NOOP);
515 intel_ring_advance(dev, ring);
516 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700517 /* XXX breadcrumb */
Zou Nan hai1cafd342010-06-25 13:40:24 +0800518
Eric Anholt62fdfea2010-05-21 13:26:39 -0700519 return 0;
520}
521
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800522static void cleanup_status_page(struct drm_device *dev,
523 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700524{
525 drm_i915_private_t *dev_priv = dev->dev_private;
526 struct drm_gem_object *obj;
527 struct drm_i915_gem_object *obj_priv;
528
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800529 obj = ring->status_page.obj;
530 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700531 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700532 obj_priv = to_intel_bo(obj);
533
534 kunmap(obj_priv->pages[0]);
535 i915_gem_object_unpin(obj);
536 drm_gem_object_unreference(obj);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800537 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700538
539 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700540}
541
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800542static int init_status_page(struct drm_device *dev,
543 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700544{
545 drm_i915_private_t *dev_priv = dev->dev_private;
546 struct drm_gem_object *obj;
547 struct drm_i915_gem_object *obj_priv;
548 int ret;
549
Eric Anholt62fdfea2010-05-21 13:26:39 -0700550 obj = i915_gem_alloc_object(dev, 4096);
551 if (obj == NULL) {
552 DRM_ERROR("Failed to allocate status page\n");
553 ret = -ENOMEM;
554 goto err;
555 }
556 obj_priv = to_intel_bo(obj);
557 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
558
559 ret = i915_gem_object_pin(obj, 4096);
560 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700561 goto err_unref;
562 }
563
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800564 ring->status_page.gfx_addr = obj_priv->gtt_offset;
565 ring->status_page.page_addr = kmap(obj_priv->pages[0]);
566 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700567 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700568 goto err_unpin;
569 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800570 ring->status_page.obj = obj;
571 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700572
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800573 ring->setup_status_page(dev, ring);
574 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
575 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700576
577 return 0;
578
579err_unpin:
580 i915_gem_object_unpin(obj);
581err_unref:
582 drm_gem_object_unreference(obj);
583err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800584 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700585}
586
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800587
588int intel_init_ring_buffer(struct drm_device *dev,
589 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700590{
Daniel Vetter870e86d2010-08-02 16:29:44 +0200591 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800592 struct drm_i915_gem_object *obj_priv;
593 struct drm_gem_object *obj;
Chris Wilsondd785e32010-08-07 11:01:34 +0100594 int ret;
595
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800596 ring->dev = dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700597
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800598 if (I915_NEED_GFX_HWS(dev)) {
599 ret = init_status_page(dev, ring);
600 if (ret)
601 return ret;
602 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700603
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800604 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700605 if (obj == NULL) {
606 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800607 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +0100608 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700609 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700610
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800611 ring->gem_object = obj;
612
613 ret = i915_gem_object_pin(obj, ring->alignment);
Chris Wilsondd785e32010-08-07 11:01:34 +0100614 if (ret)
615 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700616
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800617 obj_priv = to_intel_bo(obj);
618 ring->map.size = ring->size;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700619 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700620 ring->map.type = 0;
621 ring->map.flags = 0;
622 ring->map.mtrr = 0;
623
624 drm_core_ioremap_wc(&ring->map, dev);
625 if (ring->map.handle == NULL) {
626 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800627 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100628 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700629 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800630
Eric Anholt62fdfea2010-05-21 13:26:39 -0700631 ring->virtual_start = ring->map.handle;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800632 ret = ring->init(dev, ring);
Chris Wilsondd785e32010-08-07 11:01:34 +0100633 if (ret)
634 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700635
Eric Anholt62fdfea2010-05-21 13:26:39 -0700636 if (!drm_core_check_feature(dev, DRIVER_MODESET))
637 i915_kernel_lost_context(dev);
638 else {
Daniel Vetter570ef602010-08-02 17:06:23 +0200639 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
Daniel Vetter870e86d2010-08-02 16:29:44 +0200640 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700641 ring->space = ring->head - (ring->tail + 8);
642 if (ring->space < 0)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800643 ring->space += ring->size;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700644 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800645 INIT_LIST_HEAD(&ring->active_list);
646 INIT_LIST_HEAD(&ring->request_list);
647 return ret;
Chris Wilsondd785e32010-08-07 11:01:34 +0100648
649err_unmap:
650 drm_core_ioremapfree(&ring->map, dev);
651err_unpin:
652 i915_gem_object_unpin(obj);
653err_unref:
654 drm_gem_object_unreference(obj);
655 ring->gem_object = NULL;
656err_hws:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800657 cleanup_status_page(dev, ring);
658 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700659}
660
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800661void intel_cleanup_ring_buffer(struct drm_device *dev,
662 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700663{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800664 if (ring->gem_object == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700665 return;
666
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800667 drm_core_ioremapfree(&ring->map, dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700668
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800669 i915_gem_object_unpin(ring->gem_object);
670 drm_gem_object_unreference(ring->gem_object);
671 ring->gem_object = NULL;
672 cleanup_status_page(dev, ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700673}
674
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800675int intel_wrap_ring_buffer(struct drm_device *dev,
676 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700677{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800678 unsigned int *virt;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700679 int rem;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800680 rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700681
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800682 if (ring->space < rem) {
683 int ret = intel_wait_ring_buffer(dev, ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700684 if (ret)
685 return ret;
686 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700687
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800688 virt = (unsigned int *)(ring->virtual_start + ring->tail);
Chris Wilson1741dd42010-08-04 15:18:12 +0100689 rem /= 8;
690 while (rem--) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700691 *virt++ = MI_NOOP;
Chris Wilson1741dd42010-08-04 15:18:12 +0100692 *virt++ = MI_NOOP;
693 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700694
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800695 ring->tail = 0;
Chris Wilson43ed3402010-07-01 17:53:00 +0100696 ring->space = ring->head - 8;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700697
698 return 0;
699}
700
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800701int intel_wait_ring_buffer(struct drm_device *dev,
702 struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700703{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800704 unsigned long end;
Daniel Vetter570ef602010-08-02 17:06:23 +0200705 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700706
707 trace_i915_ring_wait_begin (dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800708 end = jiffies + 3 * HZ;
709 do {
Daniel Vetter570ef602010-08-02 17:06:23 +0200710 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700711 ring->space = ring->head - (ring->tail + 8);
712 if (ring->space < 0)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800713 ring->space += ring->size;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700714 if (ring->space >= n) {
715 trace_i915_ring_wait_end (dev);
716 return 0;
717 }
718
719 if (dev->primary->master) {
720 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
721 if (master_priv->sarea_priv)
722 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
723 }
Zou Nan haid1b851f2010-05-21 09:08:57 +0800724
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800725 yield();
726 } while (!time_after(jiffies, end));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700727 trace_i915_ring_wait_end (dev);
728 return -EBUSY;
729}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800730
731void intel_ring_begin(struct drm_device *dev,
Zou Nan haibe26a102010-06-12 17:40:24 +0800732 struct intel_ring_buffer *ring, int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800733{
Zou Nan haibe26a102010-06-12 17:40:24 +0800734 int n = 4*num_dwords;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800735 if (unlikely(ring->tail + n > ring->size))
736 intel_wrap_ring_buffer(dev, ring);
737 if (unlikely(ring->space < n))
738 intel_wait_ring_buffer(dev, ring, n);
Chris Wilsond97ed332010-08-04 15:18:13 +0100739
740 ring->space -= n;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800741}
742
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800743void intel_ring_advance(struct drm_device *dev,
744 struct intel_ring_buffer *ring)
745{
Chris Wilsond97ed332010-08-04 15:18:13 +0100746 ring->tail &= ring->size - 1;
Daniel Vetter870e86d2010-08-02 16:29:44 +0200747 ring->set_tail(dev, ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800748}
749
750void intel_fill_struct(struct drm_device *dev,
751 struct intel_ring_buffer *ring,
752 void *data,
753 unsigned int len)
754{
755 unsigned int *virt = ring->virtual_start + ring->tail;
756 BUG_ON((len&~(4-1)) != 0);
Zou Nan haibe26a102010-06-12 17:40:24 +0800757 intel_ring_begin(dev, ring, len/4);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800758 memcpy(virt, data, len);
759 ring->tail += len;
760 ring->tail &= ring->size - 1;
761 ring->space -= len;
762 intel_ring_advance(dev, ring);
763}
764
Chris Wilsone0708682010-09-19 14:46:27 +0100765static const struct intel_ring_buffer render_ring = {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800766 .name = "render ring",
Chris Wilson92204342010-09-18 11:02:01 +0100767 .id = RING_RENDER,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800768 .regs = {
769 .ctl = PRB0_CTL,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800770 },
Daniel Vetter333e9fe2010-08-02 16:24:01 +0200771 .mmio_base = RENDER_RING_BASE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800772 .size = 32 * PAGE_SIZE,
773 .alignment = PAGE_SIZE,
774 .virtual_start = NULL,
775 .dev = NULL,
776 .gem_object = NULL,
777 .head = 0,
778 .tail = 0,
779 .space = 0,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800780 .user_irq_refcount = 0,
781 .irq_gem_seqno = 0,
782 .waiting_gem_seqno = 0,
783 .setup_status_page = render_setup_status_page,
784 .init = init_render_ring,
Daniel Vetter870e86d2010-08-02 16:29:44 +0200785 .set_tail = ring_set_tail,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800786 .get_active_head = render_ring_get_active_head,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800787 .flush = render_ring_flush,
788 .add_request = render_ring_add_request,
789 .get_gem_seqno = render_ring_get_gem_seqno,
790 .user_irq_get = render_ring_get_user_irq,
791 .user_irq_put = render_ring_put_user_irq,
792 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
793 .status_page = {NULL, 0, NULL},
794 .map = {0,}
795};
Zou Nan haid1b851f2010-05-21 09:08:57 +0800796
797/* ring buffer for bit-stream decoder */
798
Chris Wilsone0708682010-09-19 14:46:27 +0100799static const struct intel_ring_buffer bsd_ring = {
Zou Nan haid1b851f2010-05-21 09:08:57 +0800800 .name = "bsd ring",
Chris Wilson92204342010-09-18 11:02:01 +0100801 .id = RING_BSD,
Zou Nan haid1b851f2010-05-21 09:08:57 +0800802 .regs = {
803 .ctl = BSD_RING_CTL,
Zou Nan haid1b851f2010-05-21 09:08:57 +0800804 },
Daniel Vetter333e9fe2010-08-02 16:24:01 +0200805 .mmio_base = BSD_RING_BASE,
Zou Nan haid1b851f2010-05-21 09:08:57 +0800806 .size = 32 * PAGE_SIZE,
807 .alignment = PAGE_SIZE,
808 .virtual_start = NULL,
809 .dev = NULL,
810 .gem_object = NULL,
811 .head = 0,
812 .tail = 0,
813 .space = 0,
Zou Nan haid1b851f2010-05-21 09:08:57 +0800814 .user_irq_refcount = 0,
815 .irq_gem_seqno = 0,
816 .waiting_gem_seqno = 0,
817 .setup_status_page = bsd_setup_status_page,
818 .init = init_bsd_ring,
Daniel Vetter870e86d2010-08-02 16:29:44 +0200819 .set_tail = ring_set_tail,
Zou Nan haid1b851f2010-05-21 09:08:57 +0800820 .get_active_head = bsd_ring_get_active_head,
Zou Nan haid1b851f2010-05-21 09:08:57 +0800821 .flush = bsd_ring_flush,
822 .add_request = bsd_ring_add_request,
823 .get_gem_seqno = bsd_ring_get_gem_seqno,
824 .user_irq_get = bsd_ring_get_user_irq,
825 .user_irq_put = bsd_ring_put_user_irq,
826 .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
827 .status_page = {NULL, 0, NULL},
828 .map = {0,}
829};
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800830
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100831
832static void gen6_bsd_setup_status_page(struct drm_device *dev,
833 struct intel_ring_buffer *ring)
834{
835 drm_i915_private_t *dev_priv = dev->dev_private;
836 I915_WRITE(GEN6_BSD_HWS_PGA, ring->status_page.gfx_addr);
837 I915_READ(GEN6_BSD_HWS_PGA);
838}
839
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100840static inline void gen6_bsd_ring_set_tail(struct drm_device *dev,
Daniel Vetter870e86d2010-08-02 16:29:44 +0200841 struct intel_ring_buffer *ring,
842 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100843{
844 drm_i915_private_t *dev_priv = dev->dev_private;
845
846 /* Every tail move must follow the sequence below */
847 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
848 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
849 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
850 I915_WRITE(GEN6_BSD_RNCID, 0x0);
851
852 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
853 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
854 50))
855 DRM_ERROR("timed out waiting for IDLE Indicator\n");
856
Daniel Vetter870e86d2010-08-02 16:29:44 +0200857 I915_WRITE_TAIL(ring, value);
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100858 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
859 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
860 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
861}
862
863static inline unsigned int gen6_bsd_ring_get_active_head(struct drm_device *dev,
864 struct intel_ring_buffer *ring)
865{
866 drm_i915_private_t *dev_priv = dev->dev_private;
867 return I915_READ(GEN6_BSD_RING_ACTHD);
868}
869
870static void gen6_bsd_ring_flush(struct drm_device *dev,
871 struct intel_ring_buffer *ring,
872 u32 invalidate_domains,
873 u32 flush_domains)
874{
875 intel_ring_begin(dev, ring, 4);
876 intel_ring_emit(dev, ring, MI_FLUSH_DW);
877 intel_ring_emit(dev, ring, 0);
878 intel_ring_emit(dev, ring, 0);
879 intel_ring_emit(dev, ring, 0);
880 intel_ring_advance(dev, ring);
881}
882
883static int
884gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
885 struct intel_ring_buffer *ring,
886 struct drm_i915_gem_execbuffer2 *exec,
887 struct drm_clip_rect *cliprects,
888 uint64_t exec_offset)
889{
890 uint32_t exec_start;
891 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
892 intel_ring_begin(dev, ring, 2);
893 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */
894 intel_ring_emit(dev, ring, exec_start);
895 intel_ring_advance(dev, ring);
896 return 0;
897}
898
899/* ring buffer for Video Codec for Gen6+ */
Chris Wilsone0708682010-09-19 14:46:27 +0100900static const struct intel_ring_buffer gen6_bsd_ring = {
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100901 .name = "gen6 bsd ring",
902 .id = RING_BSD,
903 .regs = {
904 .ctl = GEN6_BSD_RING_CTL,
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100905 },
Daniel Vetter333e9fe2010-08-02 16:24:01 +0200906 .mmio_base = GEN6_BSD_RING_BASE,
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100907 .size = 32 * PAGE_SIZE,
908 .alignment = PAGE_SIZE,
909 .virtual_start = NULL,
910 .dev = NULL,
911 .gem_object = NULL,
912 .head = 0,
913 .tail = 0,
914 .space = 0,
915 .user_irq_refcount = 0,
916 .irq_gem_seqno = 0,
917 .waiting_gem_seqno = 0,
918 .setup_status_page = gen6_bsd_setup_status_page,
919 .init = init_bsd_ring,
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100920 .set_tail = gen6_bsd_ring_set_tail,
921 .get_active_head = gen6_bsd_ring_get_active_head,
922 .flush = gen6_bsd_ring_flush,
923 .add_request = bsd_ring_add_request,
924 .get_gem_seqno = bsd_ring_get_gem_seqno,
925 .user_irq_get = bsd_ring_get_user_irq,
926 .user_irq_put = bsd_ring_put_user_irq,
927 .dispatch_gem_execbuffer = gen6_bsd_ring_dispatch_gem_execbuffer,
928 .status_page = {NULL, 0, NULL},
929 .map = {0,}
930};
931
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800932int intel_init_render_ring_buffer(struct drm_device *dev)
933{
934 drm_i915_private_t *dev_priv = dev->dev_private;
935
936 dev_priv->render_ring = render_ring;
937
938 if (!I915_NEED_GFX_HWS(dev)) {
939 dev_priv->render_ring.status_page.page_addr
940 = dev_priv->status_page_dmah->vaddr;
941 memset(dev_priv->render_ring.status_page.page_addr,
942 0, PAGE_SIZE);
943 }
944
945 return intel_init_ring_buffer(dev, &dev_priv->render_ring);
946}
947
948int intel_init_bsd_ring_buffer(struct drm_device *dev)
949{
950 drm_i915_private_t *dev_priv = dev->dev_private;
951
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100952 if (IS_GEN6(dev))
953 dev_priv->bsd_ring = gen6_bsd_ring;
954 else
955 dev_priv->bsd_ring = bsd_ring;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800956
957 return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
958}