blob: 3bff7fb723419d0d9a4d2726c293737fa6fa36d1 [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Chris Wilson6f392d5482010-08-07 11:01:22 +010037static u32 i915_gem_get_seqno(struct drm_device *dev)
38{
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 u32 seqno;
41
42 seqno = dev_priv->next_seqno;
43
44 /* reserve 0 for non-seqno */
45 if (++dev_priv->next_seqno == 0)
46 dev_priv->next_seqno = 1;
47
48 return seqno;
49}
50
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000051static int
Chris Wilson78501ea2010-10-27 12:18:21 +010052render_ring_flush(struct intel_ring_buffer *ring,
Chris Wilsonab6f8e32010-09-19 17:53:44 +010053 u32 invalidate_domains,
54 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070055{
Chris Wilson78501ea2010-10-27 12:18:21 +010056 struct drm_device *dev = ring->dev;
Chris Wilson6f392d5482010-08-07 11:01:22 +010057 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000059 int ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +010060
Eric Anholt62fdfea2010-05-21 13:26:39 -070061#if WATCH_EXEC
62 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
63 invalidate_domains, flush_domains);
64#endif
Chris Wilson6f392d5482010-08-07 11:01:22 +010065
66 trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
Eric Anholt62fdfea2010-05-21 13:26:39 -070067 invalidate_domains, flush_domains);
68
Eric Anholt62fdfea2010-05-21 13:26:39 -070069 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
70 /*
71 * read/write caches:
72 *
73 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
74 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
75 * also flushed at 2d versus 3d pipeline switches.
76 *
77 * read-only caches:
78 *
79 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
80 * MI_READ_FLUSH is set, and is always flushed on 965.
81 *
82 * I915_GEM_DOMAIN_COMMAND may not exist?
83 *
84 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
85 * invalidated when MI_EXE_FLUSH is set.
86 *
87 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
88 * invalidated with every MI_FLUSH.
89 *
90 * TLBs:
91 *
92 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
93 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
94 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
95 * are flushed at any MI_FLUSH.
96 */
97
98 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
99 if ((invalidate_domains|flush_domains) &
100 I915_GEM_DOMAIN_RENDER)
101 cmd &= ~MI_NO_WRITE_FLUSH;
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100102 if (INTEL_INFO(dev)->gen < 4) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700103 /*
104 * On the 965, the sampler cache always gets flushed
105 * and this bit is reserved.
106 */
107 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
108 cmd |= MI_READ_FLUSH;
109 }
110 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
111 cmd |= MI_EXE_FLUSH;
112
Chris Wilson70eac332010-11-30 14:07:47 +0000113 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
114 (IS_G4X(dev) || IS_GEN5(dev)))
115 cmd |= MI_INVALIDATE_ISP;
116
Eric Anholt62fdfea2010-05-21 13:26:39 -0700117#if WATCH_EXEC
118 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
119#endif
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000120 ret = intel_ring_begin(ring, 2);
121 if (ret)
122 return ret;
123
124 intel_ring_emit(ring, cmd);
125 intel_ring_emit(ring, MI_NOOP);
126 intel_ring_advance(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800127 }
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000128
129 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800130}
131
Chris Wilson78501ea2010-10-27 12:18:21 +0100132static void ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100133 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800134{
Chris Wilson78501ea2010-10-27 12:18:21 +0100135 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson297b0c52010-10-22 17:02:41 +0100136 I915_WRITE_TAIL(ring, value);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800137}
138
Chris Wilson78501ea2010-10-27 12:18:21 +0100139u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800140{
Chris Wilson78501ea2010-10-27 12:18:21 +0100141 drm_i915_private_t *dev_priv = ring->dev->dev_private;
142 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
Daniel Vetter3d281d82010-09-24 21:14:22 +0200143 RING_ACTHD(ring->mmio_base) : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800144
145 return I915_READ(acthd_reg);
146}
147
Chris Wilson78501ea2010-10-27 12:18:21 +0100148static int init_ring_common(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800149{
Chris Wilson78501ea2010-10-27 12:18:21 +0100150 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000151 struct drm_i915_gem_object *obj = ring->obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800152 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800153
154 /* Stop the ring if it's running. */
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200155 I915_WRITE_CTL(ring, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200156 I915_WRITE_HEAD(ring, 0);
Chris Wilson78501ea2010-10-27 12:18:21 +0100157 ring->write_tail(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800158
159 /* Initialize the ring. */
Chris Wilson05394f32010-11-08 19:18:58 +0000160 I915_WRITE_START(ring, obj->gtt_offset);
Daniel Vetter570ef602010-08-02 17:06:23 +0200161 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800162
163 /* G45 ring initialization fails to reset head to zero */
164 if (head != 0) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000165 DRM_DEBUG_KMS("%s head not reset to zero "
166 "ctl %08x head %08x tail %08x start %08x\n",
167 ring->name,
168 I915_READ_CTL(ring),
169 I915_READ_HEAD(ring),
170 I915_READ_TAIL(ring),
171 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800172
Daniel Vetter570ef602010-08-02 17:06:23 +0200173 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800174
Chris Wilson6fd0d562010-12-05 20:42:33 +0000175 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
176 DRM_ERROR("failed to set %s head to zero "
177 "ctl %08x head %08x tail %08x start %08x\n",
178 ring->name,
179 I915_READ_CTL(ring),
180 I915_READ_HEAD(ring),
181 I915_READ_TAIL(ring),
182 I915_READ_START(ring));
183 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700184 }
185
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200186 I915_WRITE_CTL(ring,
Chris Wilsonae69b422010-11-07 11:45:52 +0000187 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson6aa56062010-10-29 21:44:37 +0100188 | RING_REPORT_64K | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800189
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800190 /* If the head is still not zero, the ring is dead */
Chris Wilson176f28e2010-10-28 11:18:07 +0100191 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
Chris Wilson05394f32010-11-08 19:18:58 +0000192 I915_READ_START(ring) != obj->gtt_offset ||
Chris Wilson176f28e2010-10-28 11:18:07 +0100193 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000194 DRM_ERROR("%s initialization failed "
195 "ctl %08x head %08x tail %08x start %08x\n",
196 ring->name,
197 I915_READ_CTL(ring),
198 I915_READ_HEAD(ring),
199 I915_READ_TAIL(ring),
200 I915_READ_START(ring));
201 return -EIO;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800202 }
203
Chris Wilson78501ea2010-10-27 12:18:21 +0100204 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
205 i915_kernel_lost_context(ring->dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800206 else {
Daniel Vetter570ef602010-08-02 17:06:23 +0200207 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
Daniel Vetter870e86d2010-08-02 16:29:44 +0200208 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800209 ring->space = ring->head - (ring->tail + 8);
210 if (ring->space < 0)
211 ring->space += ring->size;
212 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000213
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800214 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700215}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800216
Chris Wilsonc6df5412010-12-15 09:56:50 +0000217/*
218 * 965+ support PIPE_CONTROL commands, which provide finer grained control
219 * over cache flushing.
220 */
221struct pipe_control {
222 struct drm_i915_gem_object *obj;
223 volatile u32 *cpu_page;
224 u32 gtt_offset;
225};
226
227static int
228init_pipe_control(struct intel_ring_buffer *ring)
229{
230 struct pipe_control *pc;
231 struct drm_i915_gem_object *obj;
232 int ret;
233
234 if (ring->private)
235 return 0;
236
237 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
238 if (!pc)
239 return -ENOMEM;
240
241 obj = i915_gem_alloc_object(ring->dev, 4096);
242 if (obj == NULL) {
243 DRM_ERROR("Failed to allocate seqno page\n");
244 ret = -ENOMEM;
245 goto err;
246 }
247 obj->agp_type = AGP_USER_CACHED_MEMORY;
248
249 ret = i915_gem_object_pin(obj, 4096, true);
250 if (ret)
251 goto err_unref;
252
253 pc->gtt_offset = obj->gtt_offset;
254 pc->cpu_page = kmap(obj->pages[0]);
255 if (pc->cpu_page == NULL)
256 goto err_unpin;
257
258 pc->obj = obj;
259 ring->private = pc;
260 return 0;
261
262err_unpin:
263 i915_gem_object_unpin(obj);
264err_unref:
265 drm_gem_object_unreference(&obj->base);
266err:
267 kfree(pc);
268 return ret;
269}
270
271static void
272cleanup_pipe_control(struct intel_ring_buffer *ring)
273{
274 struct pipe_control *pc = ring->private;
275 struct drm_i915_gem_object *obj;
276
277 if (!ring->private)
278 return;
279
280 obj = pc->obj;
281 kunmap(obj->pages[0]);
282 i915_gem_object_unpin(obj);
283 drm_gem_object_unreference(&obj->base);
284
285 kfree(pc);
286 ring->private = NULL;
287}
288
Chris Wilson78501ea2010-10-27 12:18:21 +0100289static int init_render_ring(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800290{
Chris Wilson78501ea2010-10-27 12:18:21 +0100291 struct drm_device *dev = ring->dev;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000292 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100293 int ret = init_ring_common(ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800294
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100295 if (INTEL_INFO(dev)->gen > 3) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100296 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800297 if (IS_GEN6(dev))
298 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
299 I915_WRITE(MI_MODE, mode);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800300 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100301
Chris Wilsonc6df5412010-12-15 09:56:50 +0000302 if (INTEL_INFO(dev)->gen >= 6) {
303 } else if (IS_GEN5(dev)) {
304 ret = init_pipe_control(ring);
305 if (ret)
306 return ret;
307 }
308
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800309 return ret;
310}
311
Chris Wilsonc6df5412010-12-15 09:56:50 +0000312static void render_ring_cleanup(struct intel_ring_buffer *ring)
313{
314 if (!ring->private)
315 return;
316
317 cleanup_pipe_control(ring);
318}
319
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000320static void
321update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
322{
323 struct drm_device *dev = ring->dev;
324 struct drm_i915_private *dev_priv = dev->dev_private;
325 int id;
326
327 /*
328 * cs -> 1 = vcs, 0 = bcs
329 * vcs -> 1 = bcs, 0 = cs,
330 * bcs -> 1 = cs, 0 = vcs.
331 */
332 id = ring - dev_priv->ring;
333 id += 2 - i;
334 id %= 3;
335
336 intel_ring_emit(ring,
337 MI_SEMAPHORE_MBOX |
338 MI_SEMAPHORE_REGISTER |
339 MI_SEMAPHORE_UPDATE);
340 intel_ring_emit(ring, seqno);
341 intel_ring_emit(ring,
342 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
343}
344
345static int
346gen6_add_request(struct intel_ring_buffer *ring,
347 u32 *result)
348{
349 u32 seqno;
350 int ret;
351
352 ret = intel_ring_begin(ring, 10);
353 if (ret)
354 return ret;
355
356 seqno = i915_gem_get_seqno(ring->dev);
357 update_semaphore(ring, 0, seqno);
358 update_semaphore(ring, 1, seqno);
359
360 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
361 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
362 intel_ring_emit(ring, seqno);
363 intel_ring_emit(ring, MI_USER_INTERRUPT);
364 intel_ring_advance(ring);
365
366 *result = seqno;
367 return 0;
368}
369
370int
371intel_ring_sync(struct intel_ring_buffer *ring,
372 struct intel_ring_buffer *to,
373 u32 seqno)
374{
375 int ret;
376
377 ret = intel_ring_begin(ring, 4);
378 if (ret)
379 return ret;
380
381 intel_ring_emit(ring,
382 MI_SEMAPHORE_MBOX |
383 MI_SEMAPHORE_REGISTER |
384 intel_ring_sync_index(ring, to) << 17 |
385 MI_SEMAPHORE_COMPARE);
386 intel_ring_emit(ring, seqno);
387 intel_ring_emit(ring, 0);
388 intel_ring_emit(ring, MI_NOOP);
389 intel_ring_advance(ring);
390
391 return 0;
392}
393
Chris Wilsonc6df5412010-12-15 09:56:50 +0000394#define PIPE_CONTROL_FLUSH(ring__, addr__) \
395do { \
396 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
397 PIPE_CONTROL_DEPTH_STALL | 2); \
398 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
399 intel_ring_emit(ring__, 0); \
400 intel_ring_emit(ring__, 0); \
401} while (0)
402
403static int
404pc_render_add_request(struct intel_ring_buffer *ring,
405 u32 *result)
406{
407 struct drm_device *dev = ring->dev;
408 u32 seqno = i915_gem_get_seqno(dev);
409 struct pipe_control *pc = ring->private;
410 u32 scratch_addr = pc->gtt_offset + 128;
411 int ret;
412
413 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
414 * incoherent with writes to memory, i.e. completely fubar,
415 * so we need to use PIPE_NOTIFY instead.
416 *
417 * However, we also need to workaround the qword write
418 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
419 * memory before requesting an interrupt.
420 */
421 ret = intel_ring_begin(ring, 32);
422 if (ret)
423 return ret;
424
425 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
426 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
427 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
428 intel_ring_emit(ring, seqno);
429 intel_ring_emit(ring, 0);
430 PIPE_CONTROL_FLUSH(ring, scratch_addr);
431 scratch_addr += 128; /* write to separate cachelines */
432 PIPE_CONTROL_FLUSH(ring, scratch_addr);
433 scratch_addr += 128;
434 PIPE_CONTROL_FLUSH(ring, scratch_addr);
435 scratch_addr += 128;
436 PIPE_CONTROL_FLUSH(ring, scratch_addr);
437 scratch_addr += 128;
438 PIPE_CONTROL_FLUSH(ring, scratch_addr);
439 scratch_addr += 128;
440 PIPE_CONTROL_FLUSH(ring, scratch_addr);
441 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
442 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
443 PIPE_CONTROL_NOTIFY);
444 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
445 intel_ring_emit(ring, seqno);
446 intel_ring_emit(ring, 0);
447 intel_ring_advance(ring);
448
449 *result = seqno;
450 return 0;
451}
452
Chris Wilson3cce4692010-10-27 16:11:02 +0100453static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100454render_ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100455 u32 *result)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700456{
Chris Wilson78501ea2010-10-27 12:18:21 +0100457 struct drm_device *dev = ring->dev;
Chris Wilson3cce4692010-10-27 16:11:02 +0100458 u32 seqno = i915_gem_get_seqno(dev);
459 int ret;
Zhenyu Wangca764822010-05-27 10:26:42 +0800460
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000461 ret = intel_ring_begin(ring, 4);
462 if (ret)
463 return ret;
Chris Wilson3cce4692010-10-27 16:11:02 +0100464
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000465 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
466 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
467 intel_ring_emit(ring, seqno);
468 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilson3cce4692010-10-27 16:11:02 +0100469 intel_ring_advance(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000470
Chris Wilson3cce4692010-10-27 16:11:02 +0100471 *result = seqno;
472 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700473}
474
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800475static u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000476ring_get_seqno(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800477{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000478 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
479}
480
Chris Wilsonc6df5412010-12-15 09:56:50 +0000481static u32
482pc_render_get_seqno(struct intel_ring_buffer *ring)
483{
484 struct pipe_control *pc = ring->private;
485 return pc->cpu_page[0];
486}
487
Chris Wilson0f468322011-01-04 17:35:21 +0000488static void
489ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
490{
491 dev_priv->gt_irq_mask &= ~mask;
492 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
493 POSTING_READ(GTIMR);
494}
495
496static void
497ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
498{
499 dev_priv->gt_irq_mask |= mask;
500 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
501 POSTING_READ(GTIMR);
502}
503
504static void
505i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
506{
507 dev_priv->irq_mask &= ~mask;
508 I915_WRITE(IMR, dev_priv->irq_mask);
509 POSTING_READ(IMR);
510}
511
512static void
513i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
514{
515 dev_priv->irq_mask |= mask;
516 I915_WRITE(IMR, dev_priv->irq_mask);
517 POSTING_READ(IMR);
518}
519
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000520static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000521render_ring_get_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700522{
Chris Wilson78501ea2010-10-27 12:18:21 +0100523 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700524
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000525 if (!dev->irq_enabled)
526 return false;
527
528 if (atomic_inc_return(&ring->irq_refcount) == 1) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000529 drm_i915_private_t *dev_priv = dev->dev_private;
530 unsigned long irqflags;
531
532 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700533 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000534 ironlake_enable_irq(dev_priv,
535 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700536 else
537 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000538 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700539 }
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000540
541 return true;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700542}
543
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800544static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000545render_ring_put_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700546{
Chris Wilson78501ea2010-10-27 12:18:21 +0100547 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700548
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000549 if (atomic_dec_and_test(&ring->irq_refcount)) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000550 drm_i915_private_t *dev_priv = dev->dev_private;
551 unsigned long irqflags;
552
553 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700554 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000555 ironlake_disable_irq(dev_priv,
556 GT_USER_INTERRUPT |
557 GT_PIPE_NOTIFY);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700558 else
559 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000560 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700561 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700562}
563
Chris Wilson78501ea2010-10-27 12:18:21 +0100564void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800565{
Chris Wilson78501ea2010-10-27 12:18:21 +0100566 drm_i915_private_t *dev_priv = ring->dev->dev_private;
567 u32 mmio = IS_GEN6(ring->dev) ?
568 RING_HWS_PGA_GEN6(ring->mmio_base) :
569 RING_HWS_PGA(ring->mmio_base);
570 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
571 POSTING_READ(mmio);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800572}
573
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000574static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100575bsd_ring_flush(struct intel_ring_buffer *ring,
576 u32 invalidate_domains,
577 u32 flush_domains)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800578{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000579 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000580
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000581 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
582 return 0;
583
584 ret = intel_ring_begin(ring, 2);
585 if (ret)
586 return ret;
587
588 intel_ring_emit(ring, MI_FLUSH);
589 intel_ring_emit(ring, MI_NOOP);
590 intel_ring_advance(ring);
591 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800592}
593
Chris Wilson3cce4692010-10-27 16:11:02 +0100594static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100595ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100596 u32 *result)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800597{
598 u32 seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +0100599 int ret;
600
601 ret = intel_ring_begin(ring, 4);
602 if (ret)
603 return ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +0100604
Chris Wilson78501ea2010-10-27 12:18:21 +0100605 seqno = i915_gem_get_seqno(ring->dev);
Chris Wilson6f392d5482010-08-07 11:01:22 +0100606
Chris Wilson3cce4692010-10-27 16:11:02 +0100607 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
608 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
609 intel_ring_emit(ring, seqno);
610 intel_ring_emit(ring, MI_USER_INTERRUPT);
611 intel_ring_advance(ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800612
613 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
Chris Wilson3cce4692010-10-27 16:11:02 +0100614 *result = seqno;
615 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800616}
617
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000618static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000619ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800620{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000621 struct drm_device *dev = ring->dev;
622
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000623 if (!dev->irq_enabled)
624 return false;
625
626 if (atomic_inc_return(&ring->irq_refcount) == 1) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000627 drm_i915_private_t *dev_priv = dev->dev_private;
628 unsigned long irqflags;
629
630 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilson0f468322011-01-04 17:35:21 +0000631 ironlake_enable_irq(dev_priv, flag);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000632 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
633 }
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000634
635 return true;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800636}
637
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000638static void
639ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800640{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000641 struct drm_device *dev = ring->dev;
642
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000643 if (atomic_dec_and_test(&ring->irq_refcount)) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000644 drm_i915_private_t *dev_priv = dev->dev_private;
645 unsigned long irqflags;
646
647 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilson0f468322011-01-04 17:35:21 +0000648 ironlake_disable_irq(dev_priv, flag);
649 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
650 }
651}
652
653static bool
654gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
655{
656 struct drm_device *dev = ring->dev;
657
658 if (!dev->irq_enabled)
659 return false;
660
661 if (atomic_inc_return(&ring->irq_refcount) == 1) {
662 drm_i915_private_t *dev_priv = dev->dev_private;
663 unsigned long irqflags;
664
665 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
666 ring->irq_mask &= ~rflag;
667 I915_WRITE_IMR(ring, ring->irq_mask);
668 ironlake_enable_irq(dev_priv, gflag);
669 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
670 }
671
672 return true;
673}
674
675static void
676gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
677{
678 struct drm_device *dev = ring->dev;
679
680 if (atomic_dec_and_test(&ring->irq_refcount)) {
681 drm_i915_private_t *dev_priv = dev->dev_private;
682 unsigned long irqflags;
683
684 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
685 ring->irq_mask |= rflag;
686 I915_WRITE_IMR(ring, ring->irq_mask);
687 ironlake_disable_irq(dev_priv, gflag);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000688 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
689 }
690}
691
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000692static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000693bsd_ring_get_irq(struct intel_ring_buffer *ring)
694{
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000695 return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000696}
697static void
698bsd_ring_put_irq(struct intel_ring_buffer *ring)
699{
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000700 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800701}
702
703static int
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000704ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800705{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100706 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100707
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100708 ret = intel_ring_begin(ring, 2);
709 if (ret)
710 return ret;
711
Chris Wilson78501ea2010-10-27 12:18:21 +0100712 intel_ring_emit(ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000713 MI_BATCH_BUFFER_START | (2 << 6) |
Chris Wilson78501ea2010-10-27 12:18:21 +0100714 MI_BATCH_NON_SECURE_I965);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000715 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100716 intel_ring_advance(ring);
717
Zou Nan haid1b851f2010-05-21 09:08:57 +0800718 return 0;
719}
720
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800721static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100722render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000723 u32 offset, u32 len)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700724{
Chris Wilson78501ea2010-10-27 12:18:21 +0100725 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700726 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000727 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700728
Chris Wilson6f392d5482010-08-07 11:01:22 +0100729 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700730
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000731 if (IS_I830(dev) || IS_845G(dev)) {
732 ret = intel_ring_begin(ring, 4);
733 if (ret)
734 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700735
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000736 intel_ring_emit(ring, MI_BATCH_BUFFER);
737 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
738 intel_ring_emit(ring, offset + len - 8);
739 intel_ring_emit(ring, 0);
740 } else {
741 ret = intel_ring_begin(ring, 2);
742 if (ret)
743 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100744
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000745 if (INTEL_INFO(dev)->gen >= 4) {
746 intel_ring_emit(ring,
747 MI_BATCH_BUFFER_START | (2 << 6) |
748 MI_BATCH_NON_SECURE_I965);
749 intel_ring_emit(ring, offset);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700750 } else {
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000751 intel_ring_emit(ring,
752 MI_BATCH_BUFFER_START | (2 << 6));
753 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700754 }
755 }
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000756 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700757
Eric Anholt62fdfea2010-05-21 13:26:39 -0700758 return 0;
759}
760
Chris Wilson78501ea2010-10-27 12:18:21 +0100761static void cleanup_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700762{
Chris Wilson78501ea2010-10-27 12:18:21 +0100763 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000764 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700765
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800766 obj = ring->status_page.obj;
767 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700768 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700769
Chris Wilson05394f32010-11-08 19:18:58 +0000770 kunmap(obj->pages[0]);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700771 i915_gem_object_unpin(obj);
Chris Wilson05394f32010-11-08 19:18:58 +0000772 drm_gem_object_unreference(&obj->base);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800773 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700774
775 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700776}
777
Chris Wilson78501ea2010-10-27 12:18:21 +0100778static int init_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700779{
Chris Wilson78501ea2010-10-27 12:18:21 +0100780 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700781 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000782 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700783 int ret;
784
Eric Anholt62fdfea2010-05-21 13:26:39 -0700785 obj = i915_gem_alloc_object(dev, 4096);
786 if (obj == NULL) {
787 DRM_ERROR("Failed to allocate status page\n");
788 ret = -ENOMEM;
789 goto err;
790 }
Chris Wilson05394f32010-11-08 19:18:58 +0000791 obj->agp_type = AGP_USER_CACHED_MEMORY;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700792
Daniel Vetter75e9e912010-11-04 17:11:09 +0100793 ret = i915_gem_object_pin(obj, 4096, true);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700794 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700795 goto err_unref;
796 }
797
Chris Wilson05394f32010-11-08 19:18:58 +0000798 ring->status_page.gfx_addr = obj->gtt_offset;
799 ring->status_page.page_addr = kmap(obj->pages[0]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800800 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700801 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700802 goto err_unpin;
803 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800804 ring->status_page.obj = obj;
805 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700806
Chris Wilson78501ea2010-10-27 12:18:21 +0100807 intel_ring_setup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800808 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
809 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700810
811 return 0;
812
813err_unpin:
814 i915_gem_object_unpin(obj);
815err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000816 drm_gem_object_unreference(&obj->base);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700817err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800818 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700819}
820
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800821int intel_init_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100822 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700823{
Chris Wilson05394f32010-11-08 19:18:58 +0000824 struct drm_i915_gem_object *obj;
Chris Wilsondd785e32010-08-07 11:01:34 +0100825 int ret;
826
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800827 ring->dev = dev;
Chris Wilson23bc5982010-09-29 16:10:57 +0100828 INIT_LIST_HEAD(&ring->active_list);
829 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +0100830 INIT_LIST_HEAD(&ring->gpu_write_list);
Chris Wilson0f468322011-01-04 17:35:21 +0000831 ring->irq_mask = ~0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700832
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800833 if (I915_NEED_GFX_HWS(dev)) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100834 ret = init_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800835 if (ret)
836 return ret;
837 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700838
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800839 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700840 if (obj == NULL) {
841 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800842 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +0100843 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700844 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700845
Chris Wilson05394f32010-11-08 19:18:58 +0000846 ring->obj = obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800847
Daniel Vetter75e9e912010-11-04 17:11:09 +0100848 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
Chris Wilsondd785e32010-08-07 11:01:34 +0100849 if (ret)
850 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700851
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800852 ring->map.size = ring->size;
Chris Wilson05394f32010-11-08 19:18:58 +0000853 ring->map.offset = dev->agp->base + obj->gtt_offset;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700854 ring->map.type = 0;
855 ring->map.flags = 0;
856 ring->map.mtrr = 0;
857
858 drm_core_ioremap_wc(&ring->map, dev);
859 if (ring->map.handle == NULL) {
860 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800861 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100862 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700863 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800864
Eric Anholt62fdfea2010-05-21 13:26:39 -0700865 ring->virtual_start = ring->map.handle;
Chris Wilson78501ea2010-10-27 12:18:21 +0100866 ret = ring->init(ring);
Chris Wilsondd785e32010-08-07 11:01:34 +0100867 if (ret)
868 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700869
Chris Wilson55249ba2010-12-22 14:04:47 +0000870 /* Workaround an erratum on the i830 which causes a hang if
871 * the TAIL pointer points to within the last 2 cachelines
872 * of the buffer.
873 */
874 ring->effective_size = ring->size;
875 if (IS_I830(ring->dev))
876 ring->effective_size -= 128;
877
Chris Wilsonc584fe42010-10-29 18:15:52 +0100878 return 0;
Chris Wilsondd785e32010-08-07 11:01:34 +0100879
880err_unmap:
881 drm_core_ioremapfree(&ring->map, dev);
882err_unpin:
883 i915_gem_object_unpin(obj);
884err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000885 drm_gem_object_unreference(&obj->base);
886 ring->obj = NULL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100887err_hws:
Chris Wilson78501ea2010-10-27 12:18:21 +0100888 cleanup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800889 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700890}
891
Chris Wilson78501ea2010-10-27 12:18:21 +0100892void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700893{
Chris Wilson33626e62010-10-29 16:18:36 +0100894 struct drm_i915_private *dev_priv;
895 int ret;
896
Chris Wilson05394f32010-11-08 19:18:58 +0000897 if (ring->obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700898 return;
899
Chris Wilson33626e62010-10-29 16:18:36 +0100900 /* Disable the ring buffer. The ring must be idle at this point */
901 dev_priv = ring->dev->dev_private;
902 ret = intel_wait_ring_buffer(ring, ring->size - 8);
903 I915_WRITE_CTL(ring, 0);
904
Chris Wilson78501ea2010-10-27 12:18:21 +0100905 drm_core_ioremapfree(&ring->map, ring->dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700906
Chris Wilson05394f32010-11-08 19:18:58 +0000907 i915_gem_object_unpin(ring->obj);
908 drm_gem_object_unreference(&ring->obj->base);
909 ring->obj = NULL;
Chris Wilson78501ea2010-10-27 12:18:21 +0100910
Zou Nan hai8d192152010-11-02 16:31:01 +0800911 if (ring->cleanup)
912 ring->cleanup(ring);
913
Chris Wilson78501ea2010-10-27 12:18:21 +0100914 cleanup_status_page(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700915}
916
Chris Wilson78501ea2010-10-27 12:18:21 +0100917static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700918{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800919 unsigned int *virt;
Chris Wilson55249ba2010-12-22 14:04:47 +0000920 int rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700921
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800922 if (ring->space < rem) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100923 int ret = intel_wait_ring_buffer(ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700924 if (ret)
925 return ret;
926 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700927
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800928 virt = (unsigned int *)(ring->virtual_start + ring->tail);
Chris Wilson1741dd42010-08-04 15:18:12 +0100929 rem /= 8;
930 while (rem--) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700931 *virt++ = MI_NOOP;
Chris Wilson1741dd42010-08-04 15:18:12 +0100932 *virt++ = MI_NOOP;
933 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700934
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800935 ring->tail = 0;
Chris Wilson43ed3402010-07-01 17:53:00 +0100936 ring->space = ring->head - 8;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700937
938 return 0;
939}
940
Chris Wilson78501ea2010-10-27 12:18:21 +0100941int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700942{
Chris Wilson78501ea2010-10-27 12:18:21 +0100943 struct drm_device *dev = ring->dev;
Zou Nan haicae58522010-11-09 17:17:32 +0800944 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100945 unsigned long end;
Chris Wilson6aa56062010-10-29 21:44:37 +0100946 u32 head;
947
Eric Anholt62fdfea2010-05-21 13:26:39 -0700948 trace_i915_ring_wait_begin (dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800949 end = jiffies + 3 * HZ;
950 do {
Chris Wilson8c0a6bf2010-12-09 12:56:37 +0000951 /* If the reported head position has wrapped or hasn't advanced,
952 * fallback to the slow and accurate path.
953 */
954 head = intel_read_status_page(ring, 4);
955 if (head < ring->actual_head)
956 head = I915_READ_HEAD(ring);
957 ring->actual_head = head;
958 ring->head = head & HEAD_ADDR;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700959 ring->space = ring->head - (ring->tail + 8);
960 if (ring->space < 0)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800961 ring->space += ring->size;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700962 if (ring->space >= n) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100963 trace_i915_ring_wait_end(dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700964 return 0;
965 }
966
967 if (dev->primary->master) {
968 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
969 if (master_priv->sarea_priv)
970 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
971 }
Zou Nan haid1b851f2010-05-21 09:08:57 +0800972
Chris Wilsone60a0b12010-10-13 10:09:14 +0100973 msleep(1);
Chris Wilsonf4e0b292010-10-29 21:06:16 +0100974 if (atomic_read(&dev_priv->mm.wedged))
975 return -EAGAIN;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800976 } while (!time_after(jiffies, end));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700977 trace_i915_ring_wait_end (dev);
978 return -EBUSY;
979}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800980
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100981int intel_ring_begin(struct intel_ring_buffer *ring,
982 int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800983{
Zou Nan haibe26a102010-06-12 17:40:24 +0800984 int n = 4*num_dwords;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100985 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100986
Chris Wilson55249ba2010-12-22 14:04:47 +0000987 if (unlikely(ring->tail + n > ring->effective_size)) {
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100988 ret = intel_wrap_ring_buffer(ring);
989 if (unlikely(ret))
990 return ret;
991 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100992
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100993 if (unlikely(ring->space < n)) {
994 ret = intel_wait_ring_buffer(ring, n);
995 if (unlikely(ret))
996 return ret;
997 }
Chris Wilsond97ed332010-08-04 15:18:13 +0100998
999 ring->space -= n;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001000 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001001}
1002
Chris Wilson78501ea2010-10-27 12:18:21 +01001003void intel_ring_advance(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001004{
Chris Wilsond97ed332010-08-04 15:18:13 +01001005 ring->tail &= ring->size - 1;
Chris Wilson78501ea2010-10-27 12:18:21 +01001006 ring->write_tail(ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001007}
1008
Chris Wilsone0708682010-09-19 14:46:27 +01001009static const struct intel_ring_buffer render_ring = {
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001010 .name = "render ring",
Chris Wilson92204342010-09-18 11:02:01 +01001011 .id = RING_RENDER,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001012 .mmio_base = RENDER_RING_BASE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001013 .size = 32 * PAGE_SIZE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001014 .init = init_render_ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001015 .write_tail = ring_write_tail,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001016 .flush = render_ring_flush,
1017 .add_request = render_ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001018 .get_seqno = ring_get_seqno,
1019 .irq_get = render_ring_get_irq,
1020 .irq_put = render_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001021 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
Chris Wilsonc6df5412010-12-15 09:56:50 +00001022 .cleanup = render_ring_cleanup,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001023};
Zou Nan haid1b851f2010-05-21 09:08:57 +08001024
1025/* ring buffer for bit-stream decoder */
1026
Chris Wilsone0708682010-09-19 14:46:27 +01001027static const struct intel_ring_buffer bsd_ring = {
Zou Nan haid1b851f2010-05-21 09:08:57 +08001028 .name = "bsd ring",
Chris Wilson92204342010-09-18 11:02:01 +01001029 .id = RING_BSD,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001030 .mmio_base = BSD_RING_BASE,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001031 .size = 32 * PAGE_SIZE,
Chris Wilson78501ea2010-10-27 12:18:21 +01001032 .init = init_ring_common,
Chris Wilson297b0c52010-10-22 17:02:41 +01001033 .write_tail = ring_write_tail,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001034 .flush = bsd_ring_flush,
Chris Wilson549f7362010-10-19 11:19:32 +01001035 .add_request = ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001036 .get_seqno = ring_get_seqno,
1037 .irq_get = bsd_ring_get_irq,
1038 .irq_put = bsd_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001039 .dispatch_execbuffer = ring_dispatch_execbuffer,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001040};
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001041
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001042
Chris Wilson78501ea2010-10-27 12:18:21 +01001043static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001044 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001045{
Chris Wilson78501ea2010-10-27 12:18:21 +01001046 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001047
1048 /* Every tail move must follow the sequence below */
1049 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1050 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1051 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1052 I915_WRITE(GEN6_BSD_RNCID, 0x0);
1053
1054 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1055 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1056 50))
1057 DRM_ERROR("timed out waiting for IDLE Indicator\n");
1058
Daniel Vetter870e86d2010-08-02 16:29:44 +02001059 I915_WRITE_TAIL(ring, value);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001060 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1061 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1062 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1063}
1064
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001065static int gen6_ring_flush(struct intel_ring_buffer *ring,
1066 u32 invalidate_domains,
1067 u32 flush_domains)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001068{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001069 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001070
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001071 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1072 return 0;
1073
1074 ret = intel_ring_begin(ring, 4);
1075 if (ret)
1076 return ret;
1077
1078 intel_ring_emit(ring, MI_FLUSH_DW);
1079 intel_ring_emit(ring, 0);
1080 intel_ring_emit(ring, 0);
1081 intel_ring_emit(ring, 0);
1082 intel_ring_advance(ring);
1083 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001084}
1085
1086static int
Chris Wilson78501ea2010-10-27 12:18:21 +01001087gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001088 u32 offset, u32 len)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001089{
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001090 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001091
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001092 ret = intel_ring_begin(ring, 2);
1093 if (ret)
1094 return ret;
1095
Chris Wilson78501ea2010-10-27 12:18:21 +01001096 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001097 /* bit0-7 is the length on GEN6+ */
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001098 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +01001099 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001100
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001101 return 0;
1102}
1103
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001104static bool
Chris Wilson0f468322011-01-04 17:35:21 +00001105gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1106{
1107 return gen6_ring_get_irq(ring,
1108 GT_USER_INTERRUPT,
1109 GEN6_RENDER_USER_INTERRUPT);
1110}
1111
1112static void
1113gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1114{
1115 return gen6_ring_put_irq(ring,
1116 GT_USER_INTERRUPT,
1117 GEN6_RENDER_USER_INTERRUPT);
1118}
1119
1120static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001121gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1122{
Chris Wilson0f468322011-01-04 17:35:21 +00001123 return gen6_ring_get_irq(ring,
1124 GT_GEN6_BSD_USER_INTERRUPT,
1125 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001126}
1127
1128static void
1129gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1130{
Chris Wilson0f468322011-01-04 17:35:21 +00001131 return gen6_ring_put_irq(ring,
1132 GT_GEN6_BSD_USER_INTERRUPT,
1133 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001134}
1135
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001136/* ring buffer for Video Codec for Gen6+ */
Chris Wilsone0708682010-09-19 14:46:27 +01001137static const struct intel_ring_buffer gen6_bsd_ring = {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001138 .name = "gen6 bsd ring",
1139 .id = RING_BSD,
1140 .mmio_base = GEN6_BSD_RING_BASE,
1141 .size = 32 * PAGE_SIZE,
1142 .init = init_ring_common,
1143 .write_tail = gen6_bsd_ring_write_tail,
1144 .flush = gen6_ring_flush,
1145 .add_request = gen6_add_request,
1146 .get_seqno = ring_get_seqno,
1147 .irq_get = gen6_bsd_ring_get_irq,
1148 .irq_put = gen6_bsd_ring_put_irq,
1149 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Chris Wilson549f7362010-10-19 11:19:32 +01001150};
1151
1152/* Blitter support (SandyBridge+) */
1153
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001154static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001155blt_ring_get_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001156{
Chris Wilson0f468322011-01-04 17:35:21 +00001157 return gen6_ring_get_irq(ring,
1158 GT_BLT_USER_INTERRUPT,
1159 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001160}
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001161
Chris Wilson549f7362010-10-19 11:19:32 +01001162static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001163blt_ring_put_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001164{
Chris Wilson0f468322011-01-04 17:35:21 +00001165 gen6_ring_put_irq(ring,
1166 GT_BLT_USER_INTERRUPT,
1167 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001168}
1169
Zou Nan hai8d192152010-11-02 16:31:01 +08001170
1171/* Workaround for some stepping of SNB,
1172 * each time when BLT engine ring tail moved,
1173 * the first command in the ring to be parsed
1174 * should be MI_BATCH_BUFFER_START
1175 */
1176#define NEED_BLT_WORKAROUND(dev) \
1177 (IS_GEN6(dev) && (dev->pdev->revision < 8))
1178
1179static inline struct drm_i915_gem_object *
1180to_blt_workaround(struct intel_ring_buffer *ring)
1181{
1182 return ring->private;
1183}
1184
1185static int blt_ring_init(struct intel_ring_buffer *ring)
1186{
1187 if (NEED_BLT_WORKAROUND(ring->dev)) {
1188 struct drm_i915_gem_object *obj;
Chris Wilson27153f72010-11-02 11:17:23 +00001189 u32 *ptr;
Zou Nan hai8d192152010-11-02 16:31:01 +08001190 int ret;
1191
Chris Wilson05394f32010-11-08 19:18:58 +00001192 obj = i915_gem_alloc_object(ring->dev, 4096);
Zou Nan hai8d192152010-11-02 16:31:01 +08001193 if (obj == NULL)
1194 return -ENOMEM;
1195
Chris Wilson05394f32010-11-08 19:18:58 +00001196 ret = i915_gem_object_pin(obj, 4096, true);
Zou Nan hai8d192152010-11-02 16:31:01 +08001197 if (ret) {
1198 drm_gem_object_unreference(&obj->base);
1199 return ret;
1200 }
1201
1202 ptr = kmap(obj->pages[0]);
Chris Wilson27153f72010-11-02 11:17:23 +00001203 *ptr++ = MI_BATCH_BUFFER_END;
1204 *ptr++ = MI_NOOP;
Zou Nan hai8d192152010-11-02 16:31:01 +08001205 kunmap(obj->pages[0]);
1206
Chris Wilson05394f32010-11-08 19:18:58 +00001207 ret = i915_gem_object_set_to_gtt_domain(obj, false);
Zou Nan hai8d192152010-11-02 16:31:01 +08001208 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00001209 i915_gem_object_unpin(obj);
Zou Nan hai8d192152010-11-02 16:31:01 +08001210 drm_gem_object_unreference(&obj->base);
1211 return ret;
1212 }
1213
1214 ring->private = obj;
1215 }
1216
1217 return init_ring_common(ring);
1218}
1219
1220static int blt_ring_begin(struct intel_ring_buffer *ring,
1221 int num_dwords)
1222{
1223 if (ring->private) {
1224 int ret = intel_ring_begin(ring, num_dwords+2);
1225 if (ret)
1226 return ret;
1227
1228 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1229 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1230
1231 return 0;
1232 } else
1233 return intel_ring_begin(ring, 4);
1234}
1235
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001236static int blt_ring_flush(struct intel_ring_buffer *ring,
Zou Nan hai8d192152010-11-02 16:31:01 +08001237 u32 invalidate_domains,
1238 u32 flush_domains)
1239{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001240 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001241
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001242 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1243 return 0;
1244
1245 ret = blt_ring_begin(ring, 4);
1246 if (ret)
1247 return ret;
1248
1249 intel_ring_emit(ring, MI_FLUSH_DW);
1250 intel_ring_emit(ring, 0);
1251 intel_ring_emit(ring, 0);
1252 intel_ring_emit(ring, 0);
1253 intel_ring_advance(ring);
1254 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08001255}
1256
Zou Nan hai8d192152010-11-02 16:31:01 +08001257static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1258{
1259 if (!ring->private)
1260 return;
1261
1262 i915_gem_object_unpin(ring->private);
1263 drm_gem_object_unreference(ring->private);
1264 ring->private = NULL;
1265}
1266
Chris Wilson549f7362010-10-19 11:19:32 +01001267static const struct intel_ring_buffer gen6_blt_ring = {
1268 .name = "blt ring",
1269 .id = RING_BLT,
1270 .mmio_base = BLT_RING_BASE,
1271 .size = 32 * PAGE_SIZE,
Zou Nan hai8d192152010-11-02 16:31:01 +08001272 .init = blt_ring_init,
Chris Wilson297b0c52010-10-22 17:02:41 +01001273 .write_tail = ring_write_tail,
Zou Nan hai8d192152010-11-02 16:31:01 +08001274 .flush = blt_ring_flush,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001275 .add_request = gen6_add_request,
1276 .get_seqno = ring_get_seqno,
1277 .irq_get = blt_ring_get_irq,
1278 .irq_put = blt_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001279 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Zou Nan hai8d192152010-11-02 16:31:01 +08001280 .cleanup = blt_ring_cleanup,
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001281};
1282
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001283int intel_init_render_ring_buffer(struct drm_device *dev)
1284{
1285 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001286 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001287
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001288 *ring = render_ring;
1289 if (INTEL_INFO(dev)->gen >= 6) {
1290 ring->add_request = gen6_add_request;
Chris Wilson0f468322011-01-04 17:35:21 +00001291 ring->irq_get = gen6_render_ring_get_irq;
1292 ring->irq_put = gen6_render_ring_put_irq;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001293 } else if (IS_GEN5(dev)) {
1294 ring->add_request = pc_render_add_request;
1295 ring->get_seqno = pc_render_get_seqno;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001296 }
1297
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001298 if (!I915_NEED_GFX_HWS(dev)) {
1299 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1300 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1301 }
1302
1303 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001304}
1305
1306int intel_init_bsd_ring_buffer(struct drm_device *dev)
1307{
1308 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001309 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001310
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001311 if (IS_GEN6(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001312 *ring = gen6_bsd_ring;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001313 else
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001314 *ring = bsd_ring;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001315
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001316 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001317}
Chris Wilson549f7362010-10-19 11:19:32 +01001318
1319int intel_init_blt_ring_buffer(struct drm_device *dev)
1320{
1321 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001322 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
Chris Wilson549f7362010-10-19 11:19:32 +01001323
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001324 *ring = gen6_blt_ring;
Chris Wilson549f7362010-10-19 11:19:32 +01001325
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001326 return intel_init_ring_buffer(dev, ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001327}