blob: f71db0cf490990830412fdfbe23b982ccbb68cc5 [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Chris Wilson6f392d5482010-08-07 11:01:22 +010037static u32 i915_gem_get_seqno(struct drm_device *dev)
38{
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 u32 seqno;
41
42 seqno = dev_priv->next_seqno;
43
44 /* reserve 0 for non-seqno */
45 if (++dev_priv->next_seqno == 0)
46 dev_priv->next_seqno = 1;
47
48 return seqno;
49}
50
Zou Nan hai8187a2b2010-05-21 09:08:55 +080051static void
Chris Wilson78501ea2010-10-27 12:18:21 +010052render_ring_flush(struct intel_ring_buffer *ring,
Chris Wilsonab6f8e32010-09-19 17:53:44 +010053 u32 invalidate_domains,
54 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070055{
Chris Wilson78501ea2010-10-27 12:18:21 +010056 struct drm_device *dev = ring->dev;
Chris Wilson6f392d5482010-08-07 11:01:22 +010057 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 cmd;
59
Eric Anholt62fdfea2010-05-21 13:26:39 -070060#if WATCH_EXEC
61 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
62 invalidate_domains, flush_domains);
63#endif
Chris Wilson6f392d5482010-08-07 11:01:22 +010064
65 trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
Eric Anholt62fdfea2010-05-21 13:26:39 -070066 invalidate_domains, flush_domains);
67
Eric Anholt62fdfea2010-05-21 13:26:39 -070068 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
69 /*
70 * read/write caches:
71 *
72 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
73 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
74 * also flushed at 2d versus 3d pipeline switches.
75 *
76 * read-only caches:
77 *
78 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
79 * MI_READ_FLUSH is set, and is always flushed on 965.
80 *
81 * I915_GEM_DOMAIN_COMMAND may not exist?
82 *
83 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
84 * invalidated when MI_EXE_FLUSH is set.
85 *
86 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
87 * invalidated with every MI_FLUSH.
88 *
89 * TLBs:
90 *
91 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
92 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
93 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
94 * are flushed at any MI_FLUSH.
95 */
96
97 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
98 if ((invalidate_domains|flush_domains) &
99 I915_GEM_DOMAIN_RENDER)
100 cmd &= ~MI_NO_WRITE_FLUSH;
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100101 if (INTEL_INFO(dev)->gen < 4) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700102 /*
103 * On the 965, the sampler cache always gets flushed
104 * and this bit is reserved.
105 */
106 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
107 cmd |= MI_READ_FLUSH;
108 }
109 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
110 cmd |= MI_EXE_FLUSH;
111
Chris Wilson70eac332010-11-30 14:07:47 +0000112 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
113 (IS_G4X(dev) || IS_GEN5(dev)))
114 cmd |= MI_INVALIDATE_ISP;
115
Eric Anholt62fdfea2010-05-21 13:26:39 -0700116#if WATCH_EXEC
117 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
118#endif
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100119 if (intel_ring_begin(ring, 2) == 0) {
120 intel_ring_emit(ring, cmd);
121 intel_ring_emit(ring, MI_NOOP);
122 intel_ring_advance(ring);
123 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800124 }
125}
126
Chris Wilson78501ea2010-10-27 12:18:21 +0100127static void ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100128 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800129{
Chris Wilson78501ea2010-10-27 12:18:21 +0100130 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson297b0c52010-10-22 17:02:41 +0100131 I915_WRITE_TAIL(ring, value);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800132}
133
Chris Wilson78501ea2010-10-27 12:18:21 +0100134u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800135{
Chris Wilson78501ea2010-10-27 12:18:21 +0100136 drm_i915_private_t *dev_priv = ring->dev->dev_private;
137 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
Daniel Vetter3d281d82010-09-24 21:14:22 +0200138 RING_ACTHD(ring->mmio_base) : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800139
140 return I915_READ(acthd_reg);
141}
142
Chris Wilson78501ea2010-10-27 12:18:21 +0100143static int init_ring_common(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800144{
Chris Wilson78501ea2010-10-27 12:18:21 +0100145 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000146 struct drm_i915_gem_object *obj = ring->obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800147 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800148
149 /* Stop the ring if it's running. */
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200150 I915_WRITE_CTL(ring, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200151 I915_WRITE_HEAD(ring, 0);
Chris Wilson78501ea2010-10-27 12:18:21 +0100152 ring->write_tail(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800153
154 /* Initialize the ring. */
Chris Wilson05394f32010-11-08 19:18:58 +0000155 I915_WRITE_START(ring, obj->gtt_offset);
Daniel Vetter570ef602010-08-02 17:06:23 +0200156 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800157
158 /* G45 ring initialization fails to reset head to zero */
159 if (head != 0) {
160 DRM_ERROR("%s head not reset to zero "
161 "ctl %08x head %08x tail %08x start %08x\n",
162 ring->name,
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200163 I915_READ_CTL(ring),
Daniel Vetter570ef602010-08-02 17:06:23 +0200164 I915_READ_HEAD(ring),
Daniel Vetter870e86d2010-08-02 16:29:44 +0200165 I915_READ_TAIL(ring),
Daniel Vetter6c0e1c52010-08-02 16:33:33 +0200166 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800167
Daniel Vetter570ef602010-08-02 17:06:23 +0200168 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800169
170 DRM_ERROR("%s head forced to zero "
171 "ctl %08x head %08x tail %08x start %08x\n",
172 ring->name,
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200173 I915_READ_CTL(ring),
Daniel Vetter570ef602010-08-02 17:06:23 +0200174 I915_READ_HEAD(ring),
Daniel Vetter870e86d2010-08-02 16:29:44 +0200175 I915_READ_TAIL(ring),
Daniel Vetter6c0e1c52010-08-02 16:33:33 +0200176 I915_READ_START(ring));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700177 }
178
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200179 I915_WRITE_CTL(ring,
Chris Wilsonae69b422010-11-07 11:45:52 +0000180 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson6aa56062010-10-29 21:44:37 +0100181 | RING_REPORT_64K | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800182
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800183 /* If the head is still not zero, the ring is dead */
Chris Wilson176f28e2010-10-28 11:18:07 +0100184 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
Chris Wilson05394f32010-11-08 19:18:58 +0000185 I915_READ_START(ring) != obj->gtt_offset ||
Chris Wilson176f28e2010-10-28 11:18:07 +0100186 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000187 DRM_ERROR("%s initialization failed "
188 "ctl %08x head %08x tail %08x start %08x\n",
189 ring->name,
190 I915_READ_CTL(ring),
191 I915_READ_HEAD(ring),
192 I915_READ_TAIL(ring),
193 I915_READ_START(ring));
194 return -EIO;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800195 }
196
Chris Wilson78501ea2010-10-27 12:18:21 +0100197 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
198 i915_kernel_lost_context(ring->dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800199 else {
Daniel Vetter570ef602010-08-02 17:06:23 +0200200 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
Daniel Vetter870e86d2010-08-02 16:29:44 +0200201 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800202 ring->space = ring->head - (ring->tail + 8);
203 if (ring->space < 0)
204 ring->space += ring->size;
205 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000206
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800207 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700208}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800209
Chris Wilsonb6913e42010-11-12 10:46:37 +0000210/*
211 * 965+ support PIPE_CONTROL commands, which provide finer grained control
212 * over cache flushing.
213 */
214struct pipe_control {
215 struct drm_i915_gem_object *obj;
216 volatile u32 *cpu_page;
217 u32 gtt_offset;
218};
219
220static int
221init_pipe_control(struct intel_ring_buffer *ring)
222{
223 struct pipe_control *pc;
224 struct drm_i915_gem_object *obj;
225 int ret;
226
227 if (ring->private)
228 return 0;
229
230 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
231 if (!pc)
232 return -ENOMEM;
233
234 obj = i915_gem_alloc_object(ring->dev, 4096);
235 if (obj == NULL) {
236 DRM_ERROR("Failed to allocate seqno page\n");
237 ret = -ENOMEM;
238 goto err;
239 }
240 obj->agp_type = AGP_USER_CACHED_MEMORY;
241
242 ret = i915_gem_object_pin(obj, 4096, true);
243 if (ret)
244 goto err_unref;
245
246 pc->gtt_offset = obj->gtt_offset;
247 pc->cpu_page = kmap(obj->pages[0]);
248 if (pc->cpu_page == NULL)
249 goto err_unpin;
250
251 pc->obj = obj;
252 ring->private = pc;
253 return 0;
254
255err_unpin:
256 i915_gem_object_unpin(obj);
257err_unref:
258 drm_gem_object_unreference(&obj->base);
259err:
260 kfree(pc);
261 return ret;
262}
263
264static void
265cleanup_pipe_control(struct intel_ring_buffer *ring)
266{
267 struct pipe_control *pc = ring->private;
268 struct drm_i915_gem_object *obj;
269
270 if (!ring->private)
271 return;
272
273 obj = pc->obj;
274 kunmap(obj->pages[0]);
275 i915_gem_object_unpin(obj);
276 drm_gem_object_unreference(&obj->base);
277
278 kfree(pc);
279 ring->private = NULL;
280}
281
Chris Wilson78501ea2010-10-27 12:18:21 +0100282static int init_render_ring(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800283{
Chris Wilson78501ea2010-10-27 12:18:21 +0100284 struct drm_device *dev = ring->dev;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000285 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100286 int ret = init_ring_common(ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800287
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100288 if (INTEL_INFO(dev)->gen > 3) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100289 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800290 if (IS_GEN6(dev))
291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
292 I915_WRITE(MI_MODE, mode);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800293 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100294
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000295 if (INTEL_INFO(dev)->gen >= 6) {
296 } else if (HAS_PIPE_CONTROL(dev)) {
Chris Wilsonb6913e42010-11-12 10:46:37 +0000297 ret = init_pipe_control(ring);
298 if (ret)
299 return ret;
300 }
301
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800302 return ret;
303}
304
Chris Wilsonb6913e42010-11-12 10:46:37 +0000305static void render_ring_cleanup(struct intel_ring_buffer *ring)
306{
307 if (!ring->private)
308 return;
309
310 cleanup_pipe_control(ring);
311}
312
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000313static void
314update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
315{
316 struct drm_device *dev = ring->dev;
317 struct drm_i915_private *dev_priv = dev->dev_private;
318 int id;
319
320 /*
321 * cs -> 1 = vcs, 0 = bcs
322 * vcs -> 1 = bcs, 0 = cs,
323 * bcs -> 1 = cs, 0 = vcs.
324 */
325 id = ring - dev_priv->ring;
326 id += 2 - i;
327 id %= 3;
328
329 intel_ring_emit(ring,
330 MI_SEMAPHORE_MBOX |
331 MI_SEMAPHORE_REGISTER |
332 MI_SEMAPHORE_UPDATE);
333 intel_ring_emit(ring, seqno);
334 intel_ring_emit(ring,
335 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
336}
337
338static int
339gen6_add_request(struct intel_ring_buffer *ring,
340 u32 *result)
341{
342 u32 seqno;
343 int ret;
344
345 ret = intel_ring_begin(ring, 10);
346 if (ret)
347 return ret;
348
349 seqno = i915_gem_get_seqno(ring->dev);
350 update_semaphore(ring, 0, seqno);
351 update_semaphore(ring, 1, seqno);
352
353 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
354 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
355 intel_ring_emit(ring, seqno);
356 intel_ring_emit(ring, MI_USER_INTERRUPT);
357 intel_ring_advance(ring);
358
359 *result = seqno;
360 return 0;
361}
362
363int
364intel_ring_sync(struct intel_ring_buffer *ring,
365 struct intel_ring_buffer *to,
366 u32 seqno)
367{
368 int ret;
369
370 ret = intel_ring_begin(ring, 4);
371 if (ret)
372 return ret;
373
374 intel_ring_emit(ring,
375 MI_SEMAPHORE_MBOX |
376 MI_SEMAPHORE_REGISTER |
377 intel_ring_sync_index(ring, to) << 17 |
378 MI_SEMAPHORE_COMPARE);
379 intel_ring_emit(ring, seqno);
380 intel_ring_emit(ring, 0);
381 intel_ring_emit(ring, MI_NOOP);
382 intel_ring_advance(ring);
383
384 return 0;
385}
386
Chris Wilson78501ea2010-10-27 12:18:21 +0100387#define PIPE_CONTROL_FLUSH(ring__, addr__) \
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800388do { \
Chris Wilson78501ea2010-10-27 12:18:21 +0100389 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
Zhenyu Wangca764822010-05-27 10:26:42 +0800390 PIPE_CONTROL_DEPTH_STALL | 2); \
Chris Wilson78501ea2010-10-27 12:18:21 +0100391 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
392 intel_ring_emit(ring__, 0); \
393 intel_ring_emit(ring__, 0); \
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800394} while (0)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700395
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000396static int
397pc_render_add_request(struct intel_ring_buffer *ring,
398 u32 *result)
399{
400 struct drm_device *dev = ring->dev;
401 u32 seqno = i915_gem_get_seqno(dev);
402 struct pipe_control *pc = ring->private;
403 u32 scratch_addr = pc->gtt_offset + 128;
404 int ret;
405
406 /*
407 * Workaround qword write incoherence by flushing the
408 * PIPE_NOTIFY buffers out to memory before requesting
409 * an interrupt.
410 */
411 ret = intel_ring_begin(ring, 32);
412 if (ret)
413 return ret;
414
415 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
416 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
417 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
418 intel_ring_emit(ring, seqno);
419 intel_ring_emit(ring, 0);
420 PIPE_CONTROL_FLUSH(ring, scratch_addr);
421 scratch_addr += 128; /* write to separate cachelines */
422 PIPE_CONTROL_FLUSH(ring, scratch_addr);
423 scratch_addr += 128;
424 PIPE_CONTROL_FLUSH(ring, scratch_addr);
425 scratch_addr += 128;
426 PIPE_CONTROL_FLUSH(ring, scratch_addr);
427 scratch_addr += 128;
428 PIPE_CONTROL_FLUSH(ring, scratch_addr);
429 scratch_addr += 128;
430 PIPE_CONTROL_FLUSH(ring, scratch_addr);
431 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
432 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
433 PIPE_CONTROL_NOTIFY);
434 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
435 intel_ring_emit(ring, seqno);
436 intel_ring_emit(ring, 0);
437 intel_ring_advance(ring);
438
439 *result = seqno;
440 return 0;
441}
442
Chris Wilson3cce4692010-10-27 16:11:02 +0100443static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100444render_ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100445 u32 *result)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700446{
Chris Wilson78501ea2010-10-27 12:18:21 +0100447 struct drm_device *dev = ring->dev;
Chris Wilson3cce4692010-10-27 16:11:02 +0100448 u32 seqno = i915_gem_get_seqno(dev);
449 int ret;
Zhenyu Wangca764822010-05-27 10:26:42 +0800450
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000451 ret = intel_ring_begin(ring, 4);
452 if (ret)
453 return ret;
Chris Wilson3cce4692010-10-27 16:11:02 +0100454
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000455 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
456 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
457 intel_ring_emit(ring, seqno);
458 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilson3cce4692010-10-27 16:11:02 +0100459 intel_ring_advance(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000460
Chris Wilson3cce4692010-10-27 16:11:02 +0100461 *result = seqno;
462 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700463}
464
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800465static u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000466ring_get_seqno(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800467{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000468 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
469}
470
471static u32
472pc_render_get_seqno(struct intel_ring_buffer *ring)
473{
474 struct pipe_control *pc = ring->private;
475 return pc->cpu_page[0];
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800476}
477
478static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000479render_ring_get_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700480{
Chris Wilson78501ea2010-10-27 12:18:21 +0100481 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700482
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000483 if (dev->irq_enabled && ++ring->irq_refcount == 1) {
484 drm_i915_private_t *dev_priv = dev->dev_private;
485 unsigned long irqflags;
486
487 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
488
Eric Anholt62fdfea2010-05-21 13:26:39 -0700489 if (HAS_PCH_SPLIT(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000490 ironlake_enable_graphics_irq(dev_priv,
491 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700492 else
493 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000494
495 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700496 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700497}
498
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800499static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000500render_ring_put_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700501{
Chris Wilson78501ea2010-10-27 12:18:21 +0100502 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700503
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000504 BUG_ON(dev->irq_enabled && ring->irq_refcount == 0);
505 if (dev->irq_enabled && --ring->irq_refcount == 0) {
506 drm_i915_private_t *dev_priv = dev->dev_private;
507 unsigned long irqflags;
508
509 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700510 if (HAS_PCH_SPLIT(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000511 ironlake_disable_graphics_irq(dev_priv,
512 GT_USER_INTERRUPT |
513 GT_PIPE_NOTIFY);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700514 else
515 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000516 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700517 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700518}
519
Chris Wilson78501ea2010-10-27 12:18:21 +0100520void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800521{
Chris Wilson78501ea2010-10-27 12:18:21 +0100522 drm_i915_private_t *dev_priv = ring->dev->dev_private;
523 u32 mmio = IS_GEN6(ring->dev) ?
524 RING_HWS_PGA_GEN6(ring->mmio_base) :
525 RING_HWS_PGA(ring->mmio_base);
526 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
527 POSTING_READ(mmio);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800528}
529
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100530static void
Chris Wilson78501ea2010-10-27 12:18:21 +0100531bsd_ring_flush(struct intel_ring_buffer *ring,
532 u32 invalidate_domains,
533 u32 flush_domains)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800534{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000535 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
536 return;
537
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100538 if (intel_ring_begin(ring, 2) == 0) {
539 intel_ring_emit(ring, MI_FLUSH);
540 intel_ring_emit(ring, MI_NOOP);
541 intel_ring_advance(ring);
542 }
Zou Nan haid1b851f2010-05-21 09:08:57 +0800543}
544
Chris Wilson3cce4692010-10-27 16:11:02 +0100545static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100546ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100547 u32 *result)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800548{
549 u32 seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +0100550 int ret;
551
552 ret = intel_ring_begin(ring, 4);
553 if (ret)
554 return ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +0100555
Chris Wilson78501ea2010-10-27 12:18:21 +0100556 seqno = i915_gem_get_seqno(ring->dev);
Chris Wilson6f392d5482010-08-07 11:01:22 +0100557
Chris Wilson3cce4692010-10-27 16:11:02 +0100558 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
559 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
560 intel_ring_emit(ring, seqno);
561 intel_ring_emit(ring, MI_USER_INTERRUPT);
562 intel_ring_advance(ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800563
564 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
Chris Wilson3cce4692010-10-27 16:11:02 +0100565 *result = seqno;
566 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800567}
568
Zou Nan haid1b851f2010-05-21 09:08:57 +0800569static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000570ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800571{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000572 struct drm_device *dev = ring->dev;
573
574 if (dev->irq_enabled && ++ring->irq_refcount == 1) {
575 drm_i915_private_t *dev_priv = dev->dev_private;
576 unsigned long irqflags;
577
578 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
579 ironlake_enable_graphics_irq(dev_priv, flag);
580 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
581 }
Zou Nan haid1b851f2010-05-21 09:08:57 +0800582}
583
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000584static void
585ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800586{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000587 struct drm_device *dev = ring->dev;
588
589 if (dev->irq_enabled && --ring->irq_refcount == 0) {
590 drm_i915_private_t *dev_priv = dev->dev_private;
591 unsigned long irqflags;
592
593 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
594 ironlake_disable_graphics_irq(dev_priv, flag);
595 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
596 }
597}
598
599
600static void
601bsd_ring_get_irq(struct intel_ring_buffer *ring)
602{
603 ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
604}
605static void
606bsd_ring_put_irq(struct intel_ring_buffer *ring)
607{
608 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800609}
610
611static int
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000612ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800613{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100614 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100615
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100616 ret = intel_ring_begin(ring, 2);
617 if (ret)
618 return ret;
619
Chris Wilson78501ea2010-10-27 12:18:21 +0100620 intel_ring_emit(ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000621 MI_BATCH_BUFFER_START | (2 << 6) |
Chris Wilson78501ea2010-10-27 12:18:21 +0100622 MI_BATCH_NON_SECURE_I965);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000623 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100624 intel_ring_advance(ring);
625
Zou Nan haid1b851f2010-05-21 09:08:57 +0800626 return 0;
627}
628
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800629static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100630render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000631 u32 offset, u32 len)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700632{
Chris Wilson78501ea2010-10-27 12:18:21 +0100633 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700634 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000635 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700636
Chris Wilson6f392d5482010-08-07 11:01:22 +0100637 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700638
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000639 if (IS_I830(dev) || IS_845G(dev)) {
640 ret = intel_ring_begin(ring, 4);
641 if (ret)
642 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700643
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000644 intel_ring_emit(ring, MI_BATCH_BUFFER);
645 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
646 intel_ring_emit(ring, offset + len - 8);
647 intel_ring_emit(ring, 0);
648 } else {
649 ret = intel_ring_begin(ring, 2);
650 if (ret)
651 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100652
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000653 if (INTEL_INFO(dev)->gen >= 4) {
654 intel_ring_emit(ring,
655 MI_BATCH_BUFFER_START | (2 << 6) |
656 MI_BATCH_NON_SECURE_I965);
657 intel_ring_emit(ring, offset);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700658 } else {
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000659 intel_ring_emit(ring,
660 MI_BATCH_BUFFER_START | (2 << 6));
661 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700662 }
663 }
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000664 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700665
Eric Anholt62fdfea2010-05-21 13:26:39 -0700666 return 0;
667}
668
Chris Wilson78501ea2010-10-27 12:18:21 +0100669static void cleanup_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700670{
Chris Wilson78501ea2010-10-27 12:18:21 +0100671 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000672 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700673
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800674 obj = ring->status_page.obj;
675 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700676 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700677
Chris Wilson05394f32010-11-08 19:18:58 +0000678 kunmap(obj->pages[0]);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700679 i915_gem_object_unpin(obj);
Chris Wilson05394f32010-11-08 19:18:58 +0000680 drm_gem_object_unreference(&obj->base);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800681 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700682
683 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700684}
685
Chris Wilson78501ea2010-10-27 12:18:21 +0100686static int init_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700687{
Chris Wilson78501ea2010-10-27 12:18:21 +0100688 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700689 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000690 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700691 int ret;
692
Eric Anholt62fdfea2010-05-21 13:26:39 -0700693 obj = i915_gem_alloc_object(dev, 4096);
694 if (obj == NULL) {
695 DRM_ERROR("Failed to allocate status page\n");
696 ret = -ENOMEM;
697 goto err;
698 }
Chris Wilson05394f32010-11-08 19:18:58 +0000699 obj->agp_type = AGP_USER_CACHED_MEMORY;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700700
Daniel Vetter75e9e912010-11-04 17:11:09 +0100701 ret = i915_gem_object_pin(obj, 4096, true);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700702 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700703 goto err_unref;
704 }
705
Chris Wilson05394f32010-11-08 19:18:58 +0000706 ring->status_page.gfx_addr = obj->gtt_offset;
707 ring->status_page.page_addr = kmap(obj->pages[0]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800708 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700709 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700710 goto err_unpin;
711 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800712 ring->status_page.obj = obj;
713 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700714
Chris Wilson78501ea2010-10-27 12:18:21 +0100715 intel_ring_setup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800716 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
717 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700718
719 return 0;
720
721err_unpin:
722 i915_gem_object_unpin(obj);
723err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000724 drm_gem_object_unreference(&obj->base);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700725err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800726 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700727}
728
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800729int intel_init_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100730 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700731{
Chris Wilson05394f32010-11-08 19:18:58 +0000732 struct drm_i915_gem_object *obj;
Chris Wilsondd785e32010-08-07 11:01:34 +0100733 int ret;
734
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800735 ring->dev = dev;
Chris Wilson23bc5982010-09-29 16:10:57 +0100736 INIT_LIST_HEAD(&ring->active_list);
737 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +0100738 INIT_LIST_HEAD(&ring->gpu_write_list);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700739
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800740 if (I915_NEED_GFX_HWS(dev)) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100741 ret = init_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800742 if (ret)
743 return ret;
744 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700745
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800746 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700747 if (obj == NULL) {
748 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800749 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +0100750 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700751 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700752
Chris Wilson05394f32010-11-08 19:18:58 +0000753 ring->obj = obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800754
Daniel Vetter75e9e912010-11-04 17:11:09 +0100755 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
Chris Wilsondd785e32010-08-07 11:01:34 +0100756 if (ret)
757 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700758
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800759 ring->map.size = ring->size;
Chris Wilson05394f32010-11-08 19:18:58 +0000760 ring->map.offset = dev->agp->base + obj->gtt_offset;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700761 ring->map.type = 0;
762 ring->map.flags = 0;
763 ring->map.mtrr = 0;
764
765 drm_core_ioremap_wc(&ring->map, dev);
766 if (ring->map.handle == NULL) {
767 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800768 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100769 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700770 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800771
Eric Anholt62fdfea2010-05-21 13:26:39 -0700772 ring->virtual_start = ring->map.handle;
Chris Wilson78501ea2010-10-27 12:18:21 +0100773 ret = ring->init(ring);
Chris Wilsondd785e32010-08-07 11:01:34 +0100774 if (ret)
775 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700776
Chris Wilsonc584fe42010-10-29 18:15:52 +0100777 return 0;
Chris Wilsondd785e32010-08-07 11:01:34 +0100778
779err_unmap:
780 drm_core_ioremapfree(&ring->map, dev);
781err_unpin:
782 i915_gem_object_unpin(obj);
783err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000784 drm_gem_object_unreference(&obj->base);
785 ring->obj = NULL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100786err_hws:
Chris Wilson78501ea2010-10-27 12:18:21 +0100787 cleanup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800788 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700789}
790
Chris Wilson78501ea2010-10-27 12:18:21 +0100791void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700792{
Chris Wilson33626e62010-10-29 16:18:36 +0100793 struct drm_i915_private *dev_priv;
794 int ret;
795
Chris Wilson05394f32010-11-08 19:18:58 +0000796 if (ring->obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700797 return;
798
Chris Wilson33626e62010-10-29 16:18:36 +0100799 /* Disable the ring buffer. The ring must be idle at this point */
800 dev_priv = ring->dev->dev_private;
801 ret = intel_wait_ring_buffer(ring, ring->size - 8);
802 I915_WRITE_CTL(ring, 0);
803
Chris Wilson78501ea2010-10-27 12:18:21 +0100804 drm_core_ioremapfree(&ring->map, ring->dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700805
Chris Wilson05394f32010-11-08 19:18:58 +0000806 i915_gem_object_unpin(ring->obj);
807 drm_gem_object_unreference(&ring->obj->base);
808 ring->obj = NULL;
Chris Wilson78501ea2010-10-27 12:18:21 +0100809
Zou Nan hai8d192152010-11-02 16:31:01 +0800810 if (ring->cleanup)
811 ring->cleanup(ring);
812
Chris Wilson78501ea2010-10-27 12:18:21 +0100813 cleanup_status_page(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700814}
815
Chris Wilson78501ea2010-10-27 12:18:21 +0100816static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700817{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800818 unsigned int *virt;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700819 int rem;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800820 rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700821
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800822 if (ring->space < rem) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100823 int ret = intel_wait_ring_buffer(ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700824 if (ret)
825 return ret;
826 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700827
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800828 virt = (unsigned int *)(ring->virtual_start + ring->tail);
Chris Wilson1741dd42010-08-04 15:18:12 +0100829 rem /= 8;
830 while (rem--) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700831 *virt++ = MI_NOOP;
Chris Wilson1741dd42010-08-04 15:18:12 +0100832 *virt++ = MI_NOOP;
833 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700834
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800835 ring->tail = 0;
Chris Wilson43ed3402010-07-01 17:53:00 +0100836 ring->space = ring->head - 8;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700837
838 return 0;
839}
840
Chris Wilson78501ea2010-10-27 12:18:21 +0100841int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700842{
Chris Wilson78501ea2010-10-27 12:18:21 +0100843 struct drm_device *dev = ring->dev;
Zou Nan haicae58522010-11-09 17:17:32 +0800844 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100845 unsigned long end;
Chris Wilson6aa56062010-10-29 21:44:37 +0100846 u32 head;
847
848 head = intel_read_status_page(ring, 4);
849 if (head) {
850 ring->head = head & HEAD_ADDR;
851 ring->space = ring->head - (ring->tail + 8);
852 if (ring->space < 0)
853 ring->space += ring->size;
854 if (ring->space >= n)
855 return 0;
856 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700857
858 trace_i915_ring_wait_begin (dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800859 end = jiffies + 3 * HZ;
860 do {
Daniel Vetter570ef602010-08-02 17:06:23 +0200861 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700862 ring->space = ring->head - (ring->tail + 8);
863 if (ring->space < 0)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800864 ring->space += ring->size;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700865 if (ring->space >= n) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100866 trace_i915_ring_wait_end(dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700867 return 0;
868 }
869
870 if (dev->primary->master) {
871 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
872 if (master_priv->sarea_priv)
873 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
874 }
Zou Nan haid1b851f2010-05-21 09:08:57 +0800875
Chris Wilsone60a0b12010-10-13 10:09:14 +0100876 msleep(1);
Chris Wilsonf4e0b292010-10-29 21:06:16 +0100877 if (atomic_read(&dev_priv->mm.wedged))
878 return -EAGAIN;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800879 } while (!time_after(jiffies, end));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700880 trace_i915_ring_wait_end (dev);
881 return -EBUSY;
882}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800883
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100884int intel_ring_begin(struct intel_ring_buffer *ring,
885 int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800886{
Zou Nan haibe26a102010-06-12 17:40:24 +0800887 int n = 4*num_dwords;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100888 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100889
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100890 if (unlikely(ring->tail + n > ring->size)) {
891 ret = intel_wrap_ring_buffer(ring);
892 if (unlikely(ret))
893 return ret;
894 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100895
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100896 if (unlikely(ring->space < n)) {
897 ret = intel_wait_ring_buffer(ring, n);
898 if (unlikely(ret))
899 return ret;
900 }
Chris Wilsond97ed332010-08-04 15:18:13 +0100901
902 ring->space -= n;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100903 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800904}
905
Chris Wilson78501ea2010-10-27 12:18:21 +0100906void intel_ring_advance(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800907{
Chris Wilsond97ed332010-08-04 15:18:13 +0100908 ring->tail &= ring->size - 1;
Chris Wilson78501ea2010-10-27 12:18:21 +0100909 ring->write_tail(ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800910}
911
Chris Wilsone0708682010-09-19 14:46:27 +0100912static const struct intel_ring_buffer render_ring = {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800913 .name = "render ring",
Chris Wilson92204342010-09-18 11:02:01 +0100914 .id = RING_RENDER,
Daniel Vetter333e9fe2010-08-02 16:24:01 +0200915 .mmio_base = RENDER_RING_BASE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800916 .size = 32 * PAGE_SIZE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800917 .init = init_render_ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100918 .write_tail = ring_write_tail,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800919 .flush = render_ring_flush,
920 .add_request = render_ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000921 .get_seqno = ring_get_seqno,
922 .irq_get = render_ring_get_irq,
923 .irq_put = render_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +0100924 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
Chris Wilsonb6913e42010-11-12 10:46:37 +0000925 .cleanup = render_ring_cleanup,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800926};
Zou Nan haid1b851f2010-05-21 09:08:57 +0800927
928/* ring buffer for bit-stream decoder */
929
Chris Wilsone0708682010-09-19 14:46:27 +0100930static const struct intel_ring_buffer bsd_ring = {
Zou Nan haid1b851f2010-05-21 09:08:57 +0800931 .name = "bsd ring",
Chris Wilson92204342010-09-18 11:02:01 +0100932 .id = RING_BSD,
Daniel Vetter333e9fe2010-08-02 16:24:01 +0200933 .mmio_base = BSD_RING_BASE,
Zou Nan haid1b851f2010-05-21 09:08:57 +0800934 .size = 32 * PAGE_SIZE,
Chris Wilson78501ea2010-10-27 12:18:21 +0100935 .init = init_ring_common,
Chris Wilson297b0c52010-10-22 17:02:41 +0100936 .write_tail = ring_write_tail,
Zou Nan haid1b851f2010-05-21 09:08:57 +0800937 .flush = bsd_ring_flush,
Chris Wilson549f7362010-10-19 11:19:32 +0100938 .add_request = ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000939 .get_seqno = ring_get_seqno,
940 .irq_get = bsd_ring_get_irq,
941 .irq_put = bsd_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +0100942 .dispatch_execbuffer = ring_dispatch_execbuffer,
Zou Nan haid1b851f2010-05-21 09:08:57 +0800943};
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800944
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100945
Chris Wilson78501ea2010-10-27 12:18:21 +0100946static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100947 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100948{
Chris Wilson78501ea2010-10-27 12:18:21 +0100949 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100950
951 /* Every tail move must follow the sequence below */
952 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
953 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
954 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
955 I915_WRITE(GEN6_BSD_RNCID, 0x0);
956
957 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
958 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
959 50))
960 DRM_ERROR("timed out waiting for IDLE Indicator\n");
961
Daniel Vetter870e86d2010-08-02 16:29:44 +0200962 I915_WRITE_TAIL(ring, value);
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100963 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
964 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
965 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
966}
967
Chris Wilson78501ea2010-10-27 12:18:21 +0100968static void gen6_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson549f7362010-10-19 11:19:32 +0100969 u32 invalidate_domains,
970 u32 flush_domains)
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100971{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000972 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
973 return;
974
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100975 if (intel_ring_begin(ring, 4) == 0) {
976 intel_ring_emit(ring, MI_FLUSH_DW);
977 intel_ring_emit(ring, 0);
978 intel_ring_emit(ring, 0);
979 intel_ring_emit(ring, 0);
980 intel_ring_advance(ring);
981 }
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100982}
983
984static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100985gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000986 u32 offset, u32 len)
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100987{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100988 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100989
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100990 ret = intel_ring_begin(ring, 2);
991 if (ret)
992 return ret;
993
Chris Wilson78501ea2010-10-27 12:18:21 +0100994 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100995 /* bit0-7 is the length on GEN6+ */
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000996 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100997 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100998
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100999 return 0;
1000}
1001
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001002static void
1003gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1004{
1005 ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
1006}
1007
1008static void
1009gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1010{
1011 ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
1012}
1013
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001014/* ring buffer for Video Codec for Gen6+ */
Chris Wilsone0708682010-09-19 14:46:27 +01001015static const struct intel_ring_buffer gen6_bsd_ring = {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001016 .name = "gen6 bsd ring",
1017 .id = RING_BSD,
1018 .mmio_base = GEN6_BSD_RING_BASE,
1019 .size = 32 * PAGE_SIZE,
1020 .init = init_ring_common,
1021 .write_tail = gen6_bsd_ring_write_tail,
1022 .flush = gen6_ring_flush,
1023 .add_request = gen6_add_request,
1024 .get_seqno = ring_get_seqno,
1025 .irq_get = gen6_bsd_ring_get_irq,
1026 .irq_put = gen6_bsd_ring_put_irq,
1027 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Chris Wilson549f7362010-10-19 11:19:32 +01001028};
1029
1030/* Blitter support (SandyBridge+) */
1031
1032static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001033blt_ring_get_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001034{
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001035 ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001036}
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001037
Chris Wilson549f7362010-10-19 11:19:32 +01001038static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001039blt_ring_put_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001040{
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001041 ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001042}
1043
Zou Nan hai8d192152010-11-02 16:31:01 +08001044
1045/* Workaround for some stepping of SNB,
1046 * each time when BLT engine ring tail moved,
1047 * the first command in the ring to be parsed
1048 * should be MI_BATCH_BUFFER_START
1049 */
1050#define NEED_BLT_WORKAROUND(dev) \
1051 (IS_GEN6(dev) && (dev->pdev->revision < 8))
1052
1053static inline struct drm_i915_gem_object *
1054to_blt_workaround(struct intel_ring_buffer *ring)
1055{
1056 return ring->private;
1057}
1058
1059static int blt_ring_init(struct intel_ring_buffer *ring)
1060{
1061 if (NEED_BLT_WORKAROUND(ring->dev)) {
1062 struct drm_i915_gem_object *obj;
Chris Wilson27153f72010-11-02 11:17:23 +00001063 u32 *ptr;
Zou Nan hai8d192152010-11-02 16:31:01 +08001064 int ret;
1065
Chris Wilson05394f32010-11-08 19:18:58 +00001066 obj = i915_gem_alloc_object(ring->dev, 4096);
Zou Nan hai8d192152010-11-02 16:31:01 +08001067 if (obj == NULL)
1068 return -ENOMEM;
1069
Chris Wilson05394f32010-11-08 19:18:58 +00001070 ret = i915_gem_object_pin(obj, 4096, true);
Zou Nan hai8d192152010-11-02 16:31:01 +08001071 if (ret) {
1072 drm_gem_object_unreference(&obj->base);
1073 return ret;
1074 }
1075
1076 ptr = kmap(obj->pages[0]);
Chris Wilson27153f72010-11-02 11:17:23 +00001077 *ptr++ = MI_BATCH_BUFFER_END;
1078 *ptr++ = MI_NOOP;
Zou Nan hai8d192152010-11-02 16:31:01 +08001079 kunmap(obj->pages[0]);
1080
Chris Wilson05394f32010-11-08 19:18:58 +00001081 ret = i915_gem_object_set_to_gtt_domain(obj, false);
Zou Nan hai8d192152010-11-02 16:31:01 +08001082 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00001083 i915_gem_object_unpin(obj);
Zou Nan hai8d192152010-11-02 16:31:01 +08001084 drm_gem_object_unreference(&obj->base);
1085 return ret;
1086 }
1087
1088 ring->private = obj;
1089 }
1090
1091 return init_ring_common(ring);
1092}
1093
1094static int blt_ring_begin(struct intel_ring_buffer *ring,
1095 int num_dwords)
1096{
1097 if (ring->private) {
1098 int ret = intel_ring_begin(ring, num_dwords+2);
1099 if (ret)
1100 return ret;
1101
1102 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1103 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1104
1105 return 0;
1106 } else
1107 return intel_ring_begin(ring, 4);
1108}
1109
1110static void blt_ring_flush(struct intel_ring_buffer *ring,
1111 u32 invalidate_domains,
1112 u32 flush_domains)
1113{
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001114 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1115 return;
1116
Zou Nan hai8d192152010-11-02 16:31:01 +08001117 if (blt_ring_begin(ring, 4) == 0) {
1118 intel_ring_emit(ring, MI_FLUSH_DW);
1119 intel_ring_emit(ring, 0);
1120 intel_ring_emit(ring, 0);
1121 intel_ring_emit(ring, 0);
1122 intel_ring_advance(ring);
1123 }
1124}
1125
Zou Nan hai8d192152010-11-02 16:31:01 +08001126static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1127{
1128 if (!ring->private)
1129 return;
1130
1131 i915_gem_object_unpin(ring->private);
1132 drm_gem_object_unreference(ring->private);
1133 ring->private = NULL;
1134}
1135
Chris Wilson549f7362010-10-19 11:19:32 +01001136static const struct intel_ring_buffer gen6_blt_ring = {
1137 .name = "blt ring",
1138 .id = RING_BLT,
1139 .mmio_base = BLT_RING_BASE,
1140 .size = 32 * PAGE_SIZE,
Zou Nan hai8d192152010-11-02 16:31:01 +08001141 .init = blt_ring_init,
Chris Wilson297b0c52010-10-22 17:02:41 +01001142 .write_tail = ring_write_tail,
Zou Nan hai8d192152010-11-02 16:31:01 +08001143 .flush = blt_ring_flush,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001144 .add_request = gen6_add_request,
1145 .get_seqno = ring_get_seqno,
1146 .irq_get = blt_ring_get_irq,
1147 .irq_put = blt_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001148 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Zou Nan hai8d192152010-11-02 16:31:01 +08001149 .cleanup = blt_ring_cleanup,
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001150};
1151
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001152int intel_init_render_ring_buffer(struct drm_device *dev)
1153{
1154 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001155 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001156
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001157 *ring = render_ring;
1158 if (INTEL_INFO(dev)->gen >= 6) {
1159 ring->add_request = gen6_add_request;
1160 } else if (HAS_PIPE_CONTROL(dev)) {
1161 ring->add_request = pc_render_add_request;
1162 ring->get_seqno = pc_render_get_seqno;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001163 }
1164
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001165 if (!I915_NEED_GFX_HWS(dev)) {
1166 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1167 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1168 }
1169
1170 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001171}
1172
1173int intel_init_bsd_ring_buffer(struct drm_device *dev)
1174{
1175 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001176 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001177
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001178 if (IS_GEN6(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001179 *ring = gen6_bsd_ring;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001180 else
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001181 *ring = bsd_ring;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001182
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001183 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001184}
Chris Wilson549f7362010-10-19 11:19:32 +01001185
1186int intel_init_blt_ring_buffer(struct drm_device *dev)
1187{
1188 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001189 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
Chris Wilson549f7362010-10-19 11:19:32 +01001190
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001191 *ring = gen6_blt_ring;
Chris Wilson549f7362010-10-19 11:19:32 +01001192
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001193 return intel_init_ring_buffer(dev, ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001194}