blob: e9615685a39cd771239e2a0d087b7128327d9575 [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Chris Wilsonc7dca472011-01-20 17:00:10 +000037static inline int ring_space(struct intel_ring_buffer *ring)
38{
39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40 if (space < 0)
41 space += ring->size;
42 return space;
43}
44
Chris Wilson6f392d5482010-08-07 11:01:22 +010045static u32 i915_gem_get_seqno(struct drm_device *dev)
46{
47 drm_i915_private_t *dev_priv = dev->dev_private;
48 u32 seqno;
49
50 seqno = dev_priv->next_seqno;
51
52 /* reserve 0 for non-seqno */
53 if (++dev_priv->next_seqno == 0)
54 dev_priv->next_seqno = 1;
55
56 return seqno;
57}
58
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000059static int
Chris Wilson78501ea2010-10-27 12:18:21 +010060render_ring_flush(struct intel_ring_buffer *ring,
Chris Wilsonab6f8e32010-09-19 17:53:44 +010061 u32 invalidate_domains,
62 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070063{
Chris Wilson78501ea2010-10-27 12:18:21 +010064 struct drm_device *dev = ring->dev;
Chris Wilson6f392d5482010-08-07 11:01:22 +010065 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000066 int ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +010067
Chris Wilson36d527d2011-03-19 22:26:49 +000068 /*
69 * read/write caches:
70 *
71 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
72 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
73 * also flushed at 2d versus 3d pipeline switches.
74 *
75 * read-only caches:
76 *
77 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
78 * MI_READ_FLUSH is set, and is always flushed on 965.
79 *
80 * I915_GEM_DOMAIN_COMMAND may not exist?
81 *
82 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
83 * invalidated when MI_EXE_FLUSH is set.
84 *
85 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
86 * invalidated with every MI_FLUSH.
87 *
88 * TLBs:
89 *
90 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
91 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
92 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
93 * are flushed at any MI_FLUSH.
94 */
95
96 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
97 if ((invalidate_domains|flush_domains) &
98 I915_GEM_DOMAIN_RENDER)
99 cmd &= ~MI_NO_WRITE_FLUSH;
100 if (INTEL_INFO(dev)->gen < 4) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700101 /*
Chris Wilson36d527d2011-03-19 22:26:49 +0000102 * On the 965, the sampler cache always gets flushed
103 * and this bit is reserved.
Eric Anholt62fdfea2010-05-21 13:26:39 -0700104 */
Chris Wilson36d527d2011-03-19 22:26:49 +0000105 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
106 cmd |= MI_READ_FLUSH;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800107 }
Chris Wilson36d527d2011-03-19 22:26:49 +0000108 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
109 cmd |= MI_EXE_FLUSH;
110
111 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
112 (IS_G4X(dev) || IS_GEN5(dev)))
113 cmd |= MI_INVALIDATE_ISP;
114
115 ret = intel_ring_begin(ring, 2);
116 if (ret)
117 return ret;
118
119 intel_ring_emit(ring, cmd);
120 intel_ring_emit(ring, MI_NOOP);
121 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000122
123 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800124}
125
Chris Wilson78501ea2010-10-27 12:18:21 +0100126static void ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100127 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800128{
Chris Wilson78501ea2010-10-27 12:18:21 +0100129 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson297b0c52010-10-22 17:02:41 +0100130 I915_WRITE_TAIL(ring, value);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800131}
132
Chris Wilson78501ea2010-10-27 12:18:21 +0100133u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800134{
Chris Wilson78501ea2010-10-27 12:18:21 +0100135 drm_i915_private_t *dev_priv = ring->dev->dev_private;
136 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
Daniel Vetter3d281d82010-09-24 21:14:22 +0200137 RING_ACTHD(ring->mmio_base) : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800138
139 return I915_READ(acthd_reg);
140}
141
Chris Wilson78501ea2010-10-27 12:18:21 +0100142static int init_ring_common(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800143{
Chris Wilson78501ea2010-10-27 12:18:21 +0100144 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000145 struct drm_i915_gem_object *obj = ring->obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800146 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800147
148 /* Stop the ring if it's running. */
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200149 I915_WRITE_CTL(ring, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200150 I915_WRITE_HEAD(ring, 0);
Chris Wilson78501ea2010-10-27 12:18:21 +0100151 ring->write_tail(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800152
153 /* Initialize the ring. */
Chris Wilson05394f32010-11-08 19:18:58 +0000154 I915_WRITE_START(ring, obj->gtt_offset);
Daniel Vetter570ef602010-08-02 17:06:23 +0200155 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800156
157 /* G45 ring initialization fails to reset head to zero */
158 if (head != 0) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000159 DRM_DEBUG_KMS("%s head not reset to zero "
160 "ctl %08x head %08x tail %08x start %08x\n",
161 ring->name,
162 I915_READ_CTL(ring),
163 I915_READ_HEAD(ring),
164 I915_READ_TAIL(ring),
165 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800166
Daniel Vetter570ef602010-08-02 17:06:23 +0200167 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800168
Chris Wilson6fd0d562010-12-05 20:42:33 +0000169 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
170 DRM_ERROR("failed to set %s head to zero "
171 "ctl %08x head %08x tail %08x start %08x\n",
172 ring->name,
173 I915_READ_CTL(ring),
174 I915_READ_HEAD(ring),
175 I915_READ_TAIL(ring),
176 I915_READ_START(ring));
177 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700178 }
179
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200180 I915_WRITE_CTL(ring,
Chris Wilsonae69b422010-11-07 11:45:52 +0000181 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson6aa56062010-10-29 21:44:37 +0100182 | RING_REPORT_64K | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800183
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800184 /* If the head is still not zero, the ring is dead */
Chris Wilson176f28e2010-10-28 11:18:07 +0100185 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
Chris Wilson05394f32010-11-08 19:18:58 +0000186 I915_READ_START(ring) != obj->gtt_offset ||
Chris Wilson176f28e2010-10-28 11:18:07 +0100187 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000188 DRM_ERROR("%s initialization failed "
189 "ctl %08x head %08x tail %08x start %08x\n",
190 ring->name,
191 I915_READ_CTL(ring),
192 I915_READ_HEAD(ring),
193 I915_READ_TAIL(ring),
194 I915_READ_START(ring));
195 return -EIO;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800196 }
197
Chris Wilson78501ea2010-10-27 12:18:21 +0100198 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
199 i915_kernel_lost_context(ring->dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800200 else {
Chris Wilsonc7dca472011-01-20 17:00:10 +0000201 ring->head = I915_READ_HEAD(ring);
Daniel Vetter870e86d2010-08-02 16:29:44 +0200202 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000203 ring->space = ring_space(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800204 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000205
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800206 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700207}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800208
Chris Wilsonc6df5412010-12-15 09:56:50 +0000209/*
210 * 965+ support PIPE_CONTROL commands, which provide finer grained control
211 * over cache flushing.
212 */
213struct pipe_control {
214 struct drm_i915_gem_object *obj;
215 volatile u32 *cpu_page;
216 u32 gtt_offset;
217};
218
219static int
220init_pipe_control(struct intel_ring_buffer *ring)
221{
222 struct pipe_control *pc;
223 struct drm_i915_gem_object *obj;
224 int ret;
225
226 if (ring->private)
227 return 0;
228
229 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
230 if (!pc)
231 return -ENOMEM;
232
233 obj = i915_gem_alloc_object(ring->dev, 4096);
234 if (obj == NULL) {
235 DRM_ERROR("Failed to allocate seqno page\n");
236 ret = -ENOMEM;
237 goto err;
238 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100239
240 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000241
242 ret = i915_gem_object_pin(obj, 4096, true);
243 if (ret)
244 goto err_unref;
245
246 pc->gtt_offset = obj->gtt_offset;
247 pc->cpu_page = kmap(obj->pages[0]);
248 if (pc->cpu_page == NULL)
249 goto err_unpin;
250
251 pc->obj = obj;
252 ring->private = pc;
253 return 0;
254
255err_unpin:
256 i915_gem_object_unpin(obj);
257err_unref:
258 drm_gem_object_unreference(&obj->base);
259err:
260 kfree(pc);
261 return ret;
262}
263
264static void
265cleanup_pipe_control(struct intel_ring_buffer *ring)
266{
267 struct pipe_control *pc = ring->private;
268 struct drm_i915_gem_object *obj;
269
270 if (!ring->private)
271 return;
272
273 obj = pc->obj;
274 kunmap(obj->pages[0]);
275 i915_gem_object_unpin(obj);
276 drm_gem_object_unreference(&obj->base);
277
278 kfree(pc);
279 ring->private = NULL;
280}
281
Chris Wilson78501ea2010-10-27 12:18:21 +0100282static int init_render_ring(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800283{
Chris Wilson78501ea2010-10-27 12:18:21 +0100284 struct drm_device *dev = ring->dev;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000285 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100286 int ret = init_ring_common(ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800287
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100288 if (INTEL_INFO(dev)->gen > 3) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100289 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
Jesse Barnes65d3eb12011-04-06 14:54:44 -0700290 if (IS_GEN6(dev) || IS_GEN7(dev))
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
292 I915_WRITE(MI_MODE, mode);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800293 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100294
Chris Wilsonc6df5412010-12-15 09:56:50 +0000295 if (INTEL_INFO(dev)->gen >= 6) {
296 } else if (IS_GEN5(dev)) {
297 ret = init_pipe_control(ring);
298 if (ret)
299 return ret;
300 }
301
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800302 return ret;
303}
304
Chris Wilsonc6df5412010-12-15 09:56:50 +0000305static void render_ring_cleanup(struct intel_ring_buffer *ring)
306{
307 if (!ring->private)
308 return;
309
310 cleanup_pipe_control(ring);
311}
312
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000313static void
314update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
315{
316 struct drm_device *dev = ring->dev;
317 struct drm_i915_private *dev_priv = dev->dev_private;
318 int id;
319
320 /*
321 * cs -> 1 = vcs, 0 = bcs
322 * vcs -> 1 = bcs, 0 = cs,
323 * bcs -> 1 = cs, 0 = vcs.
324 */
325 id = ring - dev_priv->ring;
326 id += 2 - i;
327 id %= 3;
328
329 intel_ring_emit(ring,
330 MI_SEMAPHORE_MBOX |
331 MI_SEMAPHORE_REGISTER |
332 MI_SEMAPHORE_UPDATE);
333 intel_ring_emit(ring, seqno);
334 intel_ring_emit(ring,
335 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
336}
337
338static int
339gen6_add_request(struct intel_ring_buffer *ring,
340 u32 *result)
341{
342 u32 seqno;
343 int ret;
344
345 ret = intel_ring_begin(ring, 10);
346 if (ret)
347 return ret;
348
349 seqno = i915_gem_get_seqno(ring->dev);
350 update_semaphore(ring, 0, seqno);
351 update_semaphore(ring, 1, seqno);
352
353 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
354 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
355 intel_ring_emit(ring, seqno);
356 intel_ring_emit(ring, MI_USER_INTERRUPT);
357 intel_ring_advance(ring);
358
359 *result = seqno;
360 return 0;
361}
362
363int
364intel_ring_sync(struct intel_ring_buffer *ring,
365 struct intel_ring_buffer *to,
366 u32 seqno)
367{
368 int ret;
369
370 ret = intel_ring_begin(ring, 4);
371 if (ret)
372 return ret;
373
374 intel_ring_emit(ring,
375 MI_SEMAPHORE_MBOX |
376 MI_SEMAPHORE_REGISTER |
377 intel_ring_sync_index(ring, to) << 17 |
378 MI_SEMAPHORE_COMPARE);
379 intel_ring_emit(ring, seqno);
380 intel_ring_emit(ring, 0);
381 intel_ring_emit(ring, MI_NOOP);
382 intel_ring_advance(ring);
383
384 return 0;
385}
386
Chris Wilsonc6df5412010-12-15 09:56:50 +0000387#define PIPE_CONTROL_FLUSH(ring__, addr__) \
388do { \
389 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
390 PIPE_CONTROL_DEPTH_STALL | 2); \
391 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
392 intel_ring_emit(ring__, 0); \
393 intel_ring_emit(ring__, 0); \
394} while (0)
395
396static int
397pc_render_add_request(struct intel_ring_buffer *ring,
398 u32 *result)
399{
400 struct drm_device *dev = ring->dev;
401 u32 seqno = i915_gem_get_seqno(dev);
402 struct pipe_control *pc = ring->private;
403 u32 scratch_addr = pc->gtt_offset + 128;
404 int ret;
405
406 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
407 * incoherent with writes to memory, i.e. completely fubar,
408 * so we need to use PIPE_NOTIFY instead.
409 *
410 * However, we also need to workaround the qword write
411 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
412 * memory before requesting an interrupt.
413 */
414 ret = intel_ring_begin(ring, 32);
415 if (ret)
416 return ret;
417
418 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
419 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
420 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
421 intel_ring_emit(ring, seqno);
422 intel_ring_emit(ring, 0);
423 PIPE_CONTROL_FLUSH(ring, scratch_addr);
424 scratch_addr += 128; /* write to separate cachelines */
425 PIPE_CONTROL_FLUSH(ring, scratch_addr);
426 scratch_addr += 128;
427 PIPE_CONTROL_FLUSH(ring, scratch_addr);
428 scratch_addr += 128;
429 PIPE_CONTROL_FLUSH(ring, scratch_addr);
430 scratch_addr += 128;
431 PIPE_CONTROL_FLUSH(ring, scratch_addr);
432 scratch_addr += 128;
433 PIPE_CONTROL_FLUSH(ring, scratch_addr);
434 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
435 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
436 PIPE_CONTROL_NOTIFY);
437 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
438 intel_ring_emit(ring, seqno);
439 intel_ring_emit(ring, 0);
440 intel_ring_advance(ring);
441
442 *result = seqno;
443 return 0;
444}
445
Chris Wilson3cce4692010-10-27 16:11:02 +0100446static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100447render_ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100448 u32 *result)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700449{
Chris Wilson78501ea2010-10-27 12:18:21 +0100450 struct drm_device *dev = ring->dev;
Chris Wilson3cce4692010-10-27 16:11:02 +0100451 u32 seqno = i915_gem_get_seqno(dev);
452 int ret;
Zhenyu Wangca764822010-05-27 10:26:42 +0800453
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000454 ret = intel_ring_begin(ring, 4);
455 if (ret)
456 return ret;
Chris Wilson3cce4692010-10-27 16:11:02 +0100457
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000458 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
459 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
460 intel_ring_emit(ring, seqno);
461 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilson3cce4692010-10-27 16:11:02 +0100462 intel_ring_advance(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000463
Chris Wilson3cce4692010-10-27 16:11:02 +0100464 *result = seqno;
465 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700466}
467
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800468static u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000469ring_get_seqno(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800470{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000471 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
472}
473
Chris Wilsonc6df5412010-12-15 09:56:50 +0000474static u32
475pc_render_get_seqno(struct intel_ring_buffer *ring)
476{
477 struct pipe_control *pc = ring->private;
478 return pc->cpu_page[0];
479}
480
Chris Wilson0f468322011-01-04 17:35:21 +0000481static void
482ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
483{
484 dev_priv->gt_irq_mask &= ~mask;
485 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
486 POSTING_READ(GTIMR);
487}
488
489static void
490ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
491{
492 dev_priv->gt_irq_mask |= mask;
493 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
494 POSTING_READ(GTIMR);
495}
496
497static void
498i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
499{
500 dev_priv->irq_mask &= ~mask;
501 I915_WRITE(IMR, dev_priv->irq_mask);
502 POSTING_READ(IMR);
503}
504
505static void
506i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
507{
508 dev_priv->irq_mask |= mask;
509 I915_WRITE(IMR, dev_priv->irq_mask);
510 POSTING_READ(IMR);
511}
512
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000513static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000514render_ring_get_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700515{
Chris Wilson78501ea2010-10-27 12:18:21 +0100516 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000517 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700518
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000519 if (!dev->irq_enabled)
520 return false;
521
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000522 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000523 if (ring->irq_refcount++ == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700524 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000525 ironlake_enable_irq(dev_priv,
526 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700527 else
528 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
529 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000530 spin_unlock(&ring->irq_lock);
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000531
532 return true;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700533}
534
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800535static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000536render_ring_put_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700537{
Chris Wilson78501ea2010-10-27 12:18:21 +0100538 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000539 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700540
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000541 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000542 if (--ring->irq_refcount == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700543 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000544 ironlake_disable_irq(dev_priv,
545 GT_USER_INTERRUPT |
546 GT_PIPE_NOTIFY);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700547 else
548 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
549 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000550 spin_unlock(&ring->irq_lock);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700551}
552
Chris Wilson78501ea2010-10-27 12:18:21 +0100553void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800554{
Eric Anholt45930102011-05-06 17:12:35 -0700555 struct drm_device *dev = ring->dev;
Chris Wilson78501ea2010-10-27 12:18:21 +0100556 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt45930102011-05-06 17:12:35 -0700557 u32 mmio = 0;
558
559 /* The ring status page addresses are no longer next to the rest of
560 * the ring registers as of gen7.
561 */
562 if (IS_GEN7(dev)) {
563 switch (ring->id) {
564 case RING_RENDER:
565 mmio = RENDER_HWS_PGA_GEN7;
566 break;
567 case RING_BLT:
568 mmio = BLT_HWS_PGA_GEN7;
569 break;
570 case RING_BSD:
571 mmio = BSD_HWS_PGA_GEN7;
572 break;
573 }
574 } else if (IS_GEN6(ring->dev)) {
575 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
576 } else {
577 mmio = RING_HWS_PGA(ring->mmio_base);
578 }
579
Chris Wilson78501ea2010-10-27 12:18:21 +0100580 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
581 POSTING_READ(mmio);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800582}
583
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000584static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100585bsd_ring_flush(struct intel_ring_buffer *ring,
586 u32 invalidate_domains,
587 u32 flush_domains)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800588{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000589 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000590
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000591 ret = intel_ring_begin(ring, 2);
592 if (ret)
593 return ret;
594
595 intel_ring_emit(ring, MI_FLUSH);
596 intel_ring_emit(ring, MI_NOOP);
597 intel_ring_advance(ring);
598 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800599}
600
Chris Wilson3cce4692010-10-27 16:11:02 +0100601static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100602ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100603 u32 *result)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800604{
605 u32 seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +0100606 int ret;
607
608 ret = intel_ring_begin(ring, 4);
609 if (ret)
610 return ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +0100611
Chris Wilson78501ea2010-10-27 12:18:21 +0100612 seqno = i915_gem_get_seqno(ring->dev);
Chris Wilson6f392d5482010-08-07 11:01:22 +0100613
Chris Wilson3cce4692010-10-27 16:11:02 +0100614 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
615 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
616 intel_ring_emit(ring, seqno);
617 intel_ring_emit(ring, MI_USER_INTERRUPT);
618 intel_ring_advance(ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800619
Chris Wilson3cce4692010-10-27 16:11:02 +0100620 *result = seqno;
621 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800622}
623
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000624static bool
Chris Wilson0f468322011-01-04 17:35:21 +0000625gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
626{
627 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000628 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000629
630 if (!dev->irq_enabled)
631 return false;
632
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000633 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000634 if (ring->irq_refcount++ == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000635 ring->irq_mask &= ~rflag;
636 I915_WRITE_IMR(ring, ring->irq_mask);
637 ironlake_enable_irq(dev_priv, gflag);
Chris Wilson0f468322011-01-04 17:35:21 +0000638 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000639 spin_unlock(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000640
641 return true;
642}
643
644static void
645gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
646{
647 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000648 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000649
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000650 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000651 if (--ring->irq_refcount == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000652 ring->irq_mask |= rflag;
653 I915_WRITE_IMR(ring, ring->irq_mask);
654 ironlake_disable_irq(dev_priv, gflag);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000655 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000656 spin_unlock(&ring->irq_lock);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000657}
658
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000659static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000660bsd_ring_get_irq(struct intel_ring_buffer *ring)
661{
Feng, Boqun5bfa1062011-05-16 16:02:39 +0800662 struct drm_device *dev = ring->dev;
663 drm_i915_private_t *dev_priv = dev->dev_private;
664
665 if (!dev->irq_enabled)
666 return false;
667
668 spin_lock(&ring->irq_lock);
669 if (ring->irq_refcount++ == 0) {
670 if (IS_G4X(dev))
671 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
672 else
673 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
674 }
675 spin_unlock(&ring->irq_lock);
676
677 return true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000678}
679static void
680bsd_ring_put_irq(struct intel_ring_buffer *ring)
681{
Feng, Boqun5bfa1062011-05-16 16:02:39 +0800682 struct drm_device *dev = ring->dev;
683 drm_i915_private_t *dev_priv = dev->dev_private;
684
685 spin_lock(&ring->irq_lock);
686 if (--ring->irq_refcount == 0) {
687 if (IS_G4X(dev))
688 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
689 else
690 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
691 }
692 spin_unlock(&ring->irq_lock);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800693}
694
695static int
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000696ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800697{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100698 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100699
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100700 ret = intel_ring_begin(ring, 2);
701 if (ret)
702 return ret;
703
Chris Wilson78501ea2010-10-27 12:18:21 +0100704 intel_ring_emit(ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000705 MI_BATCH_BUFFER_START | (2 << 6) |
Chris Wilson78501ea2010-10-27 12:18:21 +0100706 MI_BATCH_NON_SECURE_I965);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000707 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100708 intel_ring_advance(ring);
709
Zou Nan haid1b851f2010-05-21 09:08:57 +0800710 return 0;
711}
712
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800713static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100714render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000715 u32 offset, u32 len)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700716{
Chris Wilson78501ea2010-10-27 12:18:21 +0100717 struct drm_device *dev = ring->dev;
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000718 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700719
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000720 if (IS_I830(dev) || IS_845G(dev)) {
721 ret = intel_ring_begin(ring, 4);
722 if (ret)
723 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700724
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000725 intel_ring_emit(ring, MI_BATCH_BUFFER);
726 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
727 intel_ring_emit(ring, offset + len - 8);
728 intel_ring_emit(ring, 0);
729 } else {
730 ret = intel_ring_begin(ring, 2);
731 if (ret)
732 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100733
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000734 if (INTEL_INFO(dev)->gen >= 4) {
735 intel_ring_emit(ring,
736 MI_BATCH_BUFFER_START | (2 << 6) |
737 MI_BATCH_NON_SECURE_I965);
738 intel_ring_emit(ring, offset);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700739 } else {
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000740 intel_ring_emit(ring,
741 MI_BATCH_BUFFER_START | (2 << 6));
742 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700743 }
744 }
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000745 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700746
Eric Anholt62fdfea2010-05-21 13:26:39 -0700747 return 0;
748}
749
Chris Wilson78501ea2010-10-27 12:18:21 +0100750static void cleanup_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700751{
Chris Wilson78501ea2010-10-27 12:18:21 +0100752 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000753 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700754
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800755 obj = ring->status_page.obj;
756 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700757 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700758
Chris Wilson05394f32010-11-08 19:18:58 +0000759 kunmap(obj->pages[0]);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700760 i915_gem_object_unpin(obj);
Chris Wilson05394f32010-11-08 19:18:58 +0000761 drm_gem_object_unreference(&obj->base);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800762 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700763
764 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700765}
766
Chris Wilson78501ea2010-10-27 12:18:21 +0100767static int init_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700768{
Chris Wilson78501ea2010-10-27 12:18:21 +0100769 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700770 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000771 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700772 int ret;
773
Eric Anholt62fdfea2010-05-21 13:26:39 -0700774 obj = i915_gem_alloc_object(dev, 4096);
775 if (obj == NULL) {
776 DRM_ERROR("Failed to allocate status page\n");
777 ret = -ENOMEM;
778 goto err;
779 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100780
781 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700782
Daniel Vetter75e9e912010-11-04 17:11:09 +0100783 ret = i915_gem_object_pin(obj, 4096, true);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700784 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700785 goto err_unref;
786 }
787
Chris Wilson05394f32010-11-08 19:18:58 +0000788 ring->status_page.gfx_addr = obj->gtt_offset;
789 ring->status_page.page_addr = kmap(obj->pages[0]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800790 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700791 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700792 goto err_unpin;
793 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800794 ring->status_page.obj = obj;
795 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700796
Chris Wilson78501ea2010-10-27 12:18:21 +0100797 intel_ring_setup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800798 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
799 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700800
801 return 0;
802
803err_unpin:
804 i915_gem_object_unpin(obj);
805err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000806 drm_gem_object_unreference(&obj->base);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700807err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800808 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700809}
810
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800811int intel_init_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100812 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700813{
Chris Wilson05394f32010-11-08 19:18:58 +0000814 struct drm_i915_gem_object *obj;
Chris Wilsondd785e32010-08-07 11:01:34 +0100815 int ret;
816
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800817 ring->dev = dev;
Chris Wilson23bc5982010-09-29 16:10:57 +0100818 INIT_LIST_HEAD(&ring->active_list);
819 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +0100820 INIT_LIST_HEAD(&ring->gpu_write_list);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000821
Chris Wilsonb259f672011-03-29 13:19:09 +0100822 init_waitqueue_head(&ring->irq_queue);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000823 spin_lock_init(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000824 ring->irq_mask = ~0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700825
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800826 if (I915_NEED_GFX_HWS(dev)) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100827 ret = init_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800828 if (ret)
829 return ret;
830 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700831
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800832 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700833 if (obj == NULL) {
834 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800835 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +0100836 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700837 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700838
Chris Wilson05394f32010-11-08 19:18:58 +0000839 ring->obj = obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800840
Daniel Vetter75e9e912010-11-04 17:11:09 +0100841 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
Chris Wilsondd785e32010-08-07 11:01:34 +0100842 if (ret)
843 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700844
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800845 ring->map.size = ring->size;
Chris Wilson05394f32010-11-08 19:18:58 +0000846 ring->map.offset = dev->agp->base + obj->gtt_offset;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700847 ring->map.type = 0;
848 ring->map.flags = 0;
849 ring->map.mtrr = 0;
850
851 drm_core_ioremap_wc(&ring->map, dev);
852 if (ring->map.handle == NULL) {
853 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800854 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100855 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700856 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800857
Eric Anholt62fdfea2010-05-21 13:26:39 -0700858 ring->virtual_start = ring->map.handle;
Chris Wilson78501ea2010-10-27 12:18:21 +0100859 ret = ring->init(ring);
Chris Wilsondd785e32010-08-07 11:01:34 +0100860 if (ret)
861 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700862
Chris Wilson55249ba2010-12-22 14:04:47 +0000863 /* Workaround an erratum on the i830 which causes a hang if
864 * the TAIL pointer points to within the last 2 cachelines
865 * of the buffer.
866 */
867 ring->effective_size = ring->size;
868 if (IS_I830(ring->dev))
869 ring->effective_size -= 128;
870
Chris Wilsonc584fe42010-10-29 18:15:52 +0100871 return 0;
Chris Wilsondd785e32010-08-07 11:01:34 +0100872
873err_unmap:
874 drm_core_ioremapfree(&ring->map, dev);
875err_unpin:
876 i915_gem_object_unpin(obj);
877err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000878 drm_gem_object_unreference(&obj->base);
879 ring->obj = NULL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100880err_hws:
Chris Wilson78501ea2010-10-27 12:18:21 +0100881 cleanup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800882 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700883}
884
Chris Wilson78501ea2010-10-27 12:18:21 +0100885void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700886{
Chris Wilson33626e62010-10-29 16:18:36 +0100887 struct drm_i915_private *dev_priv;
888 int ret;
889
Chris Wilson05394f32010-11-08 19:18:58 +0000890 if (ring->obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700891 return;
892
Chris Wilson33626e62010-10-29 16:18:36 +0100893 /* Disable the ring buffer. The ring must be idle at this point */
894 dev_priv = ring->dev->dev_private;
Ben Widawsky96f298a2011-03-19 18:14:27 -0700895 ret = intel_wait_ring_idle(ring);
Chris Wilson29ee3992011-01-24 16:35:42 +0000896 if (ret)
897 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
898 ring->name, ret);
899
Chris Wilson33626e62010-10-29 16:18:36 +0100900 I915_WRITE_CTL(ring, 0);
901
Chris Wilson78501ea2010-10-27 12:18:21 +0100902 drm_core_ioremapfree(&ring->map, ring->dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700903
Chris Wilson05394f32010-11-08 19:18:58 +0000904 i915_gem_object_unpin(ring->obj);
905 drm_gem_object_unreference(&ring->obj->base);
906 ring->obj = NULL;
Chris Wilson78501ea2010-10-27 12:18:21 +0100907
Zou Nan hai8d192152010-11-02 16:31:01 +0800908 if (ring->cleanup)
909 ring->cleanup(ring);
910
Chris Wilson78501ea2010-10-27 12:18:21 +0100911 cleanup_status_page(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700912}
913
Chris Wilson78501ea2010-10-27 12:18:21 +0100914static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700915{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800916 unsigned int *virt;
Chris Wilson55249ba2010-12-22 14:04:47 +0000917 int rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700918
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800919 if (ring->space < rem) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100920 int ret = intel_wait_ring_buffer(ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700921 if (ret)
922 return ret;
923 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700924
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800925 virt = (unsigned int *)(ring->virtual_start + ring->tail);
Chris Wilson1741dd42010-08-04 15:18:12 +0100926 rem /= 8;
927 while (rem--) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700928 *virt++ = MI_NOOP;
Chris Wilson1741dd42010-08-04 15:18:12 +0100929 *virt++ = MI_NOOP;
930 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700931
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800932 ring->tail = 0;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000933 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700934
935 return 0;
936}
937
Chris Wilson78501ea2010-10-27 12:18:21 +0100938int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700939{
Chris Wilson78501ea2010-10-27 12:18:21 +0100940 struct drm_device *dev = ring->dev;
Zou Nan haicae58522010-11-09 17:17:32 +0800941 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100942 unsigned long end;
Chris Wilson6aa56062010-10-29 21:44:37 +0100943 u32 head;
944
Chris Wilsonc7dca472011-01-20 17:00:10 +0000945 /* If the reported head position has wrapped or hasn't advanced,
946 * fallback to the slow and accurate path.
947 */
948 head = intel_read_status_page(ring, 4);
949 if (head > ring->head) {
950 ring->head = head;
951 ring->space = ring_space(ring);
952 if (ring->space >= n)
953 return 0;
954 }
955
Chris Wilsondb53a302011-02-03 11:57:46 +0000956 trace_i915_ring_wait_begin(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800957 end = jiffies + 3 * HZ;
958 do {
Chris Wilsonc7dca472011-01-20 17:00:10 +0000959 ring->head = I915_READ_HEAD(ring);
960 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700961 if (ring->space >= n) {
Chris Wilsondb53a302011-02-03 11:57:46 +0000962 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700963 return 0;
964 }
965
966 if (dev->primary->master) {
967 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
968 if (master_priv->sarea_priv)
969 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
970 }
Zou Nan haid1b851f2010-05-21 09:08:57 +0800971
Chris Wilsone60a0b12010-10-13 10:09:14 +0100972 msleep(1);
Chris Wilsonf4e0b292010-10-29 21:06:16 +0100973 if (atomic_read(&dev_priv->mm.wedged))
974 return -EAGAIN;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800975 } while (!time_after(jiffies, end));
Chris Wilsondb53a302011-02-03 11:57:46 +0000976 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700977 return -EBUSY;
978}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800979
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100980int intel_ring_begin(struct intel_ring_buffer *ring,
981 int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800982{
Chris Wilson21dd3732011-01-26 15:55:56 +0000983 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Zou Nan haibe26a102010-06-12 17:40:24 +0800984 int n = 4*num_dwords;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100985 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100986
Chris Wilson21dd3732011-01-26 15:55:56 +0000987 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
988 return -EIO;
989
Chris Wilson55249ba2010-12-22 14:04:47 +0000990 if (unlikely(ring->tail + n > ring->effective_size)) {
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100991 ret = intel_wrap_ring_buffer(ring);
992 if (unlikely(ret))
993 return ret;
994 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100995
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100996 if (unlikely(ring->space < n)) {
997 ret = intel_wait_ring_buffer(ring, n);
998 if (unlikely(ret))
999 return ret;
1000 }
Chris Wilsond97ed332010-08-04 15:18:13 +01001001
1002 ring->space -= n;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001003 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001004}
1005
Chris Wilson78501ea2010-10-27 12:18:21 +01001006void intel_ring_advance(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001007{
Chris Wilsond97ed332010-08-04 15:18:13 +01001008 ring->tail &= ring->size - 1;
Chris Wilson78501ea2010-10-27 12:18:21 +01001009 ring->write_tail(ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001010}
1011
Chris Wilsone0708682010-09-19 14:46:27 +01001012static const struct intel_ring_buffer render_ring = {
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001013 .name = "render ring",
Chris Wilson92204342010-09-18 11:02:01 +01001014 .id = RING_RENDER,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001015 .mmio_base = RENDER_RING_BASE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001016 .size = 32 * PAGE_SIZE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001017 .init = init_render_ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001018 .write_tail = ring_write_tail,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001019 .flush = render_ring_flush,
1020 .add_request = render_ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001021 .get_seqno = ring_get_seqno,
1022 .irq_get = render_ring_get_irq,
1023 .irq_put = render_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001024 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
Chris Wilsonc6df5412010-12-15 09:56:50 +00001025 .cleanup = render_ring_cleanup,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001026};
Zou Nan haid1b851f2010-05-21 09:08:57 +08001027
1028/* ring buffer for bit-stream decoder */
1029
Chris Wilsone0708682010-09-19 14:46:27 +01001030static const struct intel_ring_buffer bsd_ring = {
Zou Nan haid1b851f2010-05-21 09:08:57 +08001031 .name = "bsd ring",
Chris Wilson92204342010-09-18 11:02:01 +01001032 .id = RING_BSD,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001033 .mmio_base = BSD_RING_BASE,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001034 .size = 32 * PAGE_SIZE,
Chris Wilson78501ea2010-10-27 12:18:21 +01001035 .init = init_ring_common,
Chris Wilson297b0c52010-10-22 17:02:41 +01001036 .write_tail = ring_write_tail,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001037 .flush = bsd_ring_flush,
Chris Wilson549f7362010-10-19 11:19:32 +01001038 .add_request = ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001039 .get_seqno = ring_get_seqno,
1040 .irq_get = bsd_ring_get_irq,
1041 .irq_put = bsd_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001042 .dispatch_execbuffer = ring_dispatch_execbuffer,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001043};
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001044
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001045
Chris Wilson78501ea2010-10-27 12:18:21 +01001046static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001047 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001048{
Chris Wilson78501ea2010-10-27 12:18:21 +01001049 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001050
1051 /* Every tail move must follow the sequence below */
1052 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1053 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1054 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1055 I915_WRITE(GEN6_BSD_RNCID, 0x0);
1056
1057 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1058 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1059 50))
1060 DRM_ERROR("timed out waiting for IDLE Indicator\n");
1061
Daniel Vetter870e86d2010-08-02 16:29:44 +02001062 I915_WRITE_TAIL(ring, value);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001063 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1064 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1065 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1066}
1067
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001068static int gen6_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001069 u32 invalidate, u32 flush)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001070{
Chris Wilson71a77e02011-02-02 12:13:49 +00001071 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001072 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001073
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001074 ret = intel_ring_begin(ring, 4);
1075 if (ret)
1076 return ret;
1077
Chris Wilson71a77e02011-02-02 12:13:49 +00001078 cmd = MI_FLUSH_DW;
1079 if (invalidate & I915_GEM_GPU_DOMAINS)
1080 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1081 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001082 intel_ring_emit(ring, 0);
1083 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001084 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001085 intel_ring_advance(ring);
1086 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001087}
1088
1089static int
Chris Wilson78501ea2010-10-27 12:18:21 +01001090gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001091 u32 offset, u32 len)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001092{
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001093 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001094
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001095 ret = intel_ring_begin(ring, 2);
1096 if (ret)
1097 return ret;
1098
Chris Wilson78501ea2010-10-27 12:18:21 +01001099 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001100 /* bit0-7 is the length on GEN6+ */
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001101 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +01001102 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001103
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001104 return 0;
1105}
1106
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001107static bool
Chris Wilson0f468322011-01-04 17:35:21 +00001108gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1109{
1110 return gen6_ring_get_irq(ring,
1111 GT_USER_INTERRUPT,
1112 GEN6_RENDER_USER_INTERRUPT);
1113}
1114
1115static void
1116gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1117{
1118 return gen6_ring_put_irq(ring,
1119 GT_USER_INTERRUPT,
1120 GEN6_RENDER_USER_INTERRUPT);
1121}
1122
1123static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001124gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1125{
Chris Wilson0f468322011-01-04 17:35:21 +00001126 return gen6_ring_get_irq(ring,
1127 GT_GEN6_BSD_USER_INTERRUPT,
1128 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001129}
1130
1131static void
1132gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1133{
Chris Wilson0f468322011-01-04 17:35:21 +00001134 return gen6_ring_put_irq(ring,
1135 GT_GEN6_BSD_USER_INTERRUPT,
1136 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001137}
1138
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001139/* ring buffer for Video Codec for Gen6+ */
Chris Wilsone0708682010-09-19 14:46:27 +01001140static const struct intel_ring_buffer gen6_bsd_ring = {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001141 .name = "gen6 bsd ring",
1142 .id = RING_BSD,
1143 .mmio_base = GEN6_BSD_RING_BASE,
1144 .size = 32 * PAGE_SIZE,
1145 .init = init_ring_common,
1146 .write_tail = gen6_bsd_ring_write_tail,
1147 .flush = gen6_ring_flush,
1148 .add_request = gen6_add_request,
1149 .get_seqno = ring_get_seqno,
1150 .irq_get = gen6_bsd_ring_get_irq,
1151 .irq_put = gen6_bsd_ring_put_irq,
1152 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Chris Wilson549f7362010-10-19 11:19:32 +01001153};
1154
1155/* Blitter support (SandyBridge+) */
1156
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001157static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001158blt_ring_get_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001159{
Chris Wilson0f468322011-01-04 17:35:21 +00001160 return gen6_ring_get_irq(ring,
1161 GT_BLT_USER_INTERRUPT,
1162 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001163}
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001164
Chris Wilson549f7362010-10-19 11:19:32 +01001165static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001166blt_ring_put_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001167{
Chris Wilson0f468322011-01-04 17:35:21 +00001168 gen6_ring_put_irq(ring,
1169 GT_BLT_USER_INTERRUPT,
1170 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001171}
1172
Zou Nan hai8d192152010-11-02 16:31:01 +08001173
1174/* Workaround for some stepping of SNB,
1175 * each time when BLT engine ring tail moved,
1176 * the first command in the ring to be parsed
1177 * should be MI_BATCH_BUFFER_START
1178 */
1179#define NEED_BLT_WORKAROUND(dev) \
1180 (IS_GEN6(dev) && (dev->pdev->revision < 8))
1181
1182static inline struct drm_i915_gem_object *
1183to_blt_workaround(struct intel_ring_buffer *ring)
1184{
1185 return ring->private;
1186}
1187
1188static int blt_ring_init(struct intel_ring_buffer *ring)
1189{
1190 if (NEED_BLT_WORKAROUND(ring->dev)) {
1191 struct drm_i915_gem_object *obj;
Chris Wilson27153f72010-11-02 11:17:23 +00001192 u32 *ptr;
Zou Nan hai8d192152010-11-02 16:31:01 +08001193 int ret;
1194
Chris Wilson05394f32010-11-08 19:18:58 +00001195 obj = i915_gem_alloc_object(ring->dev, 4096);
Zou Nan hai8d192152010-11-02 16:31:01 +08001196 if (obj == NULL)
1197 return -ENOMEM;
1198
Chris Wilson05394f32010-11-08 19:18:58 +00001199 ret = i915_gem_object_pin(obj, 4096, true);
Zou Nan hai8d192152010-11-02 16:31:01 +08001200 if (ret) {
1201 drm_gem_object_unreference(&obj->base);
1202 return ret;
1203 }
1204
1205 ptr = kmap(obj->pages[0]);
Chris Wilson27153f72010-11-02 11:17:23 +00001206 *ptr++ = MI_BATCH_BUFFER_END;
1207 *ptr++ = MI_NOOP;
Zou Nan hai8d192152010-11-02 16:31:01 +08001208 kunmap(obj->pages[0]);
1209
Chris Wilson05394f32010-11-08 19:18:58 +00001210 ret = i915_gem_object_set_to_gtt_domain(obj, false);
Zou Nan hai8d192152010-11-02 16:31:01 +08001211 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00001212 i915_gem_object_unpin(obj);
Zou Nan hai8d192152010-11-02 16:31:01 +08001213 drm_gem_object_unreference(&obj->base);
1214 return ret;
1215 }
1216
1217 ring->private = obj;
1218 }
1219
1220 return init_ring_common(ring);
1221}
1222
1223static int blt_ring_begin(struct intel_ring_buffer *ring,
1224 int num_dwords)
1225{
1226 if (ring->private) {
1227 int ret = intel_ring_begin(ring, num_dwords+2);
1228 if (ret)
1229 return ret;
1230
1231 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1232 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1233
1234 return 0;
1235 } else
1236 return intel_ring_begin(ring, 4);
1237}
1238
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001239static int blt_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001240 u32 invalidate, u32 flush)
Zou Nan hai8d192152010-11-02 16:31:01 +08001241{
Chris Wilson71a77e02011-02-02 12:13:49 +00001242 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001243 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001244
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001245 ret = blt_ring_begin(ring, 4);
1246 if (ret)
1247 return ret;
1248
Chris Wilson71a77e02011-02-02 12:13:49 +00001249 cmd = MI_FLUSH_DW;
1250 if (invalidate & I915_GEM_DOMAIN_RENDER)
1251 cmd |= MI_INVALIDATE_TLB;
1252 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001253 intel_ring_emit(ring, 0);
1254 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001255 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001256 intel_ring_advance(ring);
1257 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08001258}
1259
Zou Nan hai8d192152010-11-02 16:31:01 +08001260static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1261{
1262 if (!ring->private)
1263 return;
1264
1265 i915_gem_object_unpin(ring->private);
1266 drm_gem_object_unreference(ring->private);
1267 ring->private = NULL;
1268}
1269
Chris Wilson549f7362010-10-19 11:19:32 +01001270static const struct intel_ring_buffer gen6_blt_ring = {
1271 .name = "blt ring",
1272 .id = RING_BLT,
1273 .mmio_base = BLT_RING_BASE,
1274 .size = 32 * PAGE_SIZE,
Zou Nan hai8d192152010-11-02 16:31:01 +08001275 .init = blt_ring_init,
Chris Wilson297b0c52010-10-22 17:02:41 +01001276 .write_tail = ring_write_tail,
Zou Nan hai8d192152010-11-02 16:31:01 +08001277 .flush = blt_ring_flush,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001278 .add_request = gen6_add_request,
1279 .get_seqno = ring_get_seqno,
1280 .irq_get = blt_ring_get_irq,
1281 .irq_put = blt_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001282 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Zou Nan hai8d192152010-11-02 16:31:01 +08001283 .cleanup = blt_ring_cleanup,
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001284};
1285
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001286int intel_init_render_ring_buffer(struct drm_device *dev)
1287{
1288 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001289 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001290
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001291 *ring = render_ring;
1292 if (INTEL_INFO(dev)->gen >= 6) {
1293 ring->add_request = gen6_add_request;
Chris Wilson0f468322011-01-04 17:35:21 +00001294 ring->irq_get = gen6_render_ring_get_irq;
1295 ring->irq_put = gen6_render_ring_put_irq;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001296 } else if (IS_GEN5(dev)) {
1297 ring->add_request = pc_render_add_request;
1298 ring->get_seqno = pc_render_get_seqno;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001299 }
1300
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001301 if (!I915_NEED_GFX_HWS(dev)) {
1302 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1303 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1304 }
1305
1306 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001307}
1308
Chris Wilsone8616b62011-01-20 09:57:11 +00001309int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1310{
1311 drm_i915_private_t *dev_priv = dev->dev_private;
1312 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1313
1314 *ring = render_ring;
1315 if (INTEL_INFO(dev)->gen >= 6) {
1316 ring->add_request = gen6_add_request;
1317 ring->irq_get = gen6_render_ring_get_irq;
1318 ring->irq_put = gen6_render_ring_put_irq;
1319 } else if (IS_GEN5(dev)) {
1320 ring->add_request = pc_render_add_request;
1321 ring->get_seqno = pc_render_get_seqno;
1322 }
1323
1324 ring->dev = dev;
1325 INIT_LIST_HEAD(&ring->active_list);
1326 INIT_LIST_HEAD(&ring->request_list);
1327 INIT_LIST_HEAD(&ring->gpu_write_list);
1328
1329 ring->size = size;
1330 ring->effective_size = ring->size;
1331 if (IS_I830(ring->dev))
1332 ring->effective_size -= 128;
1333
1334 ring->map.offset = start;
1335 ring->map.size = size;
1336 ring->map.type = 0;
1337 ring->map.flags = 0;
1338 ring->map.mtrr = 0;
1339
1340 drm_core_ioremap_wc(&ring->map, dev);
1341 if (ring->map.handle == NULL) {
1342 DRM_ERROR("can not ioremap virtual address for"
1343 " ring buffer\n");
1344 return -ENOMEM;
1345 }
1346
1347 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1348 return 0;
1349}
1350
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001351int intel_init_bsd_ring_buffer(struct drm_device *dev)
1352{
1353 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001354 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001355
Jesse Barnes65d3eb12011-04-06 14:54:44 -07001356 if (IS_GEN6(dev) || IS_GEN7(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001357 *ring = gen6_bsd_ring;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001358 else
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001359 *ring = bsd_ring;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001360
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001361 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001362}
Chris Wilson549f7362010-10-19 11:19:32 +01001363
1364int intel_init_blt_ring_buffer(struct drm_device *dev)
1365{
1366 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001367 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
Chris Wilson549f7362010-10-19 11:19:32 +01001368
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001369 *ring = gen6_blt_ring;
Chris Wilson549f7362010-10-19 11:19:32 +01001370
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001371 return intel_init_ring_buffer(dev, ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001372}