blob: 07e59072e12910fd6576b4ef5eeb20926b491bdf [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Chris Wilsonc7dca472011-01-20 17:00:10 +000037static inline int ring_space(struct intel_ring_buffer *ring)
38{
39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40 if (space < 0)
41 space += ring->size;
42 return space;
43}
44
Chris Wilson6f392d52010-08-07 11:01:22 +010045static u32 i915_gem_get_seqno(struct drm_device *dev)
46{
47 drm_i915_private_t *dev_priv = dev->dev_private;
48 u32 seqno;
49
50 seqno = dev_priv->next_seqno;
51
52 /* reserve 0 for non-seqno */
53 if (++dev_priv->next_seqno == 0)
54 dev_priv->next_seqno = 1;
55
56 return seqno;
57}
58
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000059static int
Chris Wilson78501ea2010-10-27 12:18:21 +010060render_ring_flush(struct intel_ring_buffer *ring,
Chris Wilsonab6f8e32010-09-19 17:53:44 +010061 u32 invalidate_domains,
62 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070063{
Chris Wilson78501ea2010-10-27 12:18:21 +010064 struct drm_device *dev = ring->dev;
Chris Wilson6f392d52010-08-07 11:01:22 +010065 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000066 int ret;
Chris Wilson6f392d52010-08-07 11:01:22 +010067
Chris Wilson36d527d2011-03-19 22:26:49 +000068 /*
69 * read/write caches:
70 *
71 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
72 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
73 * also flushed at 2d versus 3d pipeline switches.
74 *
75 * read-only caches:
76 *
77 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
78 * MI_READ_FLUSH is set, and is always flushed on 965.
79 *
80 * I915_GEM_DOMAIN_COMMAND may not exist?
81 *
82 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
83 * invalidated when MI_EXE_FLUSH is set.
84 *
85 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
86 * invalidated with every MI_FLUSH.
87 *
88 * TLBs:
89 *
90 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
91 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
92 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
93 * are flushed at any MI_FLUSH.
94 */
95
96 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
97 if ((invalidate_domains|flush_domains) &
98 I915_GEM_DOMAIN_RENDER)
99 cmd &= ~MI_NO_WRITE_FLUSH;
100 if (INTEL_INFO(dev)->gen < 4) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700101 /*
Chris Wilson36d527d2011-03-19 22:26:49 +0000102 * On the 965, the sampler cache always gets flushed
103 * and this bit is reserved.
Eric Anholt62fdfea2010-05-21 13:26:39 -0700104 */
Chris Wilson36d527d2011-03-19 22:26:49 +0000105 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
106 cmd |= MI_READ_FLUSH;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800107 }
Chris Wilson36d527d2011-03-19 22:26:49 +0000108 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
109 cmd |= MI_EXE_FLUSH;
110
111 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
112 (IS_G4X(dev) || IS_GEN5(dev)))
113 cmd |= MI_INVALIDATE_ISP;
114
115 ret = intel_ring_begin(ring, 2);
116 if (ret)
117 return ret;
118
119 intel_ring_emit(ring, cmd);
120 intel_ring_emit(ring, MI_NOOP);
121 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000122
123 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800124}
125
Chris Wilson78501ea2010-10-27 12:18:21 +0100126static void ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100127 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800128{
Chris Wilson78501ea2010-10-27 12:18:21 +0100129 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson297b0c52010-10-22 17:02:41 +0100130 I915_WRITE_TAIL(ring, value);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800131}
132
Chris Wilson78501ea2010-10-27 12:18:21 +0100133u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800134{
Chris Wilson78501ea2010-10-27 12:18:21 +0100135 drm_i915_private_t *dev_priv = ring->dev->dev_private;
136 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
Daniel Vetter3d281d82010-09-24 21:14:22 +0200137 RING_ACTHD(ring->mmio_base) : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800138
139 return I915_READ(acthd_reg);
140}
141
Chris Wilson78501ea2010-10-27 12:18:21 +0100142static int init_ring_common(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800143{
Chris Wilson78501ea2010-10-27 12:18:21 +0100144 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000145 struct drm_i915_gem_object *obj = ring->obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800146 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800147
148 /* Stop the ring if it's running. */
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200149 I915_WRITE_CTL(ring, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200150 I915_WRITE_HEAD(ring, 0);
Chris Wilson78501ea2010-10-27 12:18:21 +0100151 ring->write_tail(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800152
153 /* Initialize the ring. */
Chris Wilson05394f32010-11-08 19:18:58 +0000154 I915_WRITE_START(ring, obj->gtt_offset);
Daniel Vetter570ef602010-08-02 17:06:23 +0200155 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800156
157 /* G45 ring initialization fails to reset head to zero */
158 if (head != 0) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000159 DRM_DEBUG_KMS("%s head not reset to zero "
160 "ctl %08x head %08x tail %08x start %08x\n",
161 ring->name,
162 I915_READ_CTL(ring),
163 I915_READ_HEAD(ring),
164 I915_READ_TAIL(ring),
165 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800166
Daniel Vetter570ef602010-08-02 17:06:23 +0200167 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800168
Chris Wilson6fd0d562010-12-05 20:42:33 +0000169 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
170 DRM_ERROR("failed to set %s head to zero "
171 "ctl %08x head %08x tail %08x start %08x\n",
172 ring->name,
173 I915_READ_CTL(ring),
174 I915_READ_HEAD(ring),
175 I915_READ_TAIL(ring),
176 I915_READ_START(ring));
177 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700178 }
179
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200180 I915_WRITE_CTL(ring,
Chris Wilsonae69b422010-11-07 11:45:52 +0000181 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson6aa56062010-10-29 21:44:37 +0100182 | RING_REPORT_64K | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800183
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800184 /* If the head is still not zero, the ring is dead */
Chris Wilson176f28e2010-10-28 11:18:07 +0100185 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
Chris Wilson05394f32010-11-08 19:18:58 +0000186 I915_READ_START(ring) != obj->gtt_offset ||
Chris Wilson176f28e2010-10-28 11:18:07 +0100187 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000188 DRM_ERROR("%s initialization failed "
189 "ctl %08x head %08x tail %08x start %08x\n",
190 ring->name,
191 I915_READ_CTL(ring),
192 I915_READ_HEAD(ring),
193 I915_READ_TAIL(ring),
194 I915_READ_START(ring));
195 return -EIO;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800196 }
197
Chris Wilson78501ea2010-10-27 12:18:21 +0100198 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
199 i915_kernel_lost_context(ring->dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800200 else {
Chris Wilsonc7dca472011-01-20 17:00:10 +0000201 ring->head = I915_READ_HEAD(ring);
Daniel Vetter870e86d2010-08-02 16:29:44 +0200202 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000203 ring->space = ring_space(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800204 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000205
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800206 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700207}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800208
Chris Wilsonc6df5412010-12-15 09:56:50 +0000209/*
210 * 965+ support PIPE_CONTROL commands, which provide finer grained control
211 * over cache flushing.
212 */
213struct pipe_control {
214 struct drm_i915_gem_object *obj;
215 volatile u32 *cpu_page;
216 u32 gtt_offset;
217};
218
219static int
220init_pipe_control(struct intel_ring_buffer *ring)
221{
222 struct pipe_control *pc;
223 struct drm_i915_gem_object *obj;
224 int ret;
225
226 if (ring->private)
227 return 0;
228
229 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
230 if (!pc)
231 return -ENOMEM;
232
233 obj = i915_gem_alloc_object(ring->dev, 4096);
234 if (obj == NULL) {
235 DRM_ERROR("Failed to allocate seqno page\n");
236 ret = -ENOMEM;
237 goto err;
238 }
239 obj->agp_type = AGP_USER_CACHED_MEMORY;
240
241 ret = i915_gem_object_pin(obj, 4096, true);
242 if (ret)
243 goto err_unref;
244
245 pc->gtt_offset = obj->gtt_offset;
246 pc->cpu_page = kmap(obj->pages[0]);
247 if (pc->cpu_page == NULL)
248 goto err_unpin;
249
250 pc->obj = obj;
251 ring->private = pc;
252 return 0;
253
254err_unpin:
255 i915_gem_object_unpin(obj);
256err_unref:
257 drm_gem_object_unreference(&obj->base);
258err:
259 kfree(pc);
260 return ret;
261}
262
263static void
264cleanup_pipe_control(struct intel_ring_buffer *ring)
265{
266 struct pipe_control *pc = ring->private;
267 struct drm_i915_gem_object *obj;
268
269 if (!ring->private)
270 return;
271
272 obj = pc->obj;
273 kunmap(obj->pages[0]);
274 i915_gem_object_unpin(obj);
275 drm_gem_object_unreference(&obj->base);
276
277 kfree(pc);
278 ring->private = NULL;
279}
280
Chris Wilson78501ea2010-10-27 12:18:21 +0100281static int init_render_ring(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800282{
Chris Wilson78501ea2010-10-27 12:18:21 +0100283 struct drm_device *dev = ring->dev;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000284 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100285 int ret = init_ring_common(ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800286
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100287 if (INTEL_INFO(dev)->gen > 3) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100288 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800289 if (IS_GEN6(dev))
290 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
291 I915_WRITE(MI_MODE, mode);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800292 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100293
Chris Wilsonc6df5412010-12-15 09:56:50 +0000294 if (INTEL_INFO(dev)->gen >= 6) {
295 } else if (IS_GEN5(dev)) {
296 ret = init_pipe_control(ring);
297 if (ret)
298 return ret;
299 }
300
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800301 return ret;
302}
303
Chris Wilsonc6df5412010-12-15 09:56:50 +0000304static void render_ring_cleanup(struct intel_ring_buffer *ring)
305{
306 if (!ring->private)
307 return;
308
309 cleanup_pipe_control(ring);
310}
311
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000312static void
313update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
314{
315 struct drm_device *dev = ring->dev;
316 struct drm_i915_private *dev_priv = dev->dev_private;
317 int id;
318
319 /*
320 * cs -> 1 = vcs, 0 = bcs
321 * vcs -> 1 = bcs, 0 = cs,
322 * bcs -> 1 = cs, 0 = vcs.
323 */
324 id = ring - dev_priv->ring;
325 id += 2 - i;
326 id %= 3;
327
328 intel_ring_emit(ring,
329 MI_SEMAPHORE_MBOX |
330 MI_SEMAPHORE_REGISTER |
331 MI_SEMAPHORE_UPDATE);
332 intel_ring_emit(ring, seqno);
333 intel_ring_emit(ring,
334 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
335}
336
337static int
338gen6_add_request(struct intel_ring_buffer *ring,
339 u32 *result)
340{
341 u32 seqno;
342 int ret;
343
344 ret = intel_ring_begin(ring, 10);
345 if (ret)
346 return ret;
347
348 seqno = i915_gem_get_seqno(ring->dev);
349 update_semaphore(ring, 0, seqno);
350 update_semaphore(ring, 1, seqno);
351
352 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
353 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
354 intel_ring_emit(ring, seqno);
355 intel_ring_emit(ring, MI_USER_INTERRUPT);
356 intel_ring_advance(ring);
357
358 *result = seqno;
359 return 0;
360}
361
362int
363intel_ring_sync(struct intel_ring_buffer *ring,
364 struct intel_ring_buffer *to,
365 u32 seqno)
366{
367 int ret;
368
369 ret = intel_ring_begin(ring, 4);
370 if (ret)
371 return ret;
372
373 intel_ring_emit(ring,
374 MI_SEMAPHORE_MBOX |
375 MI_SEMAPHORE_REGISTER |
376 intel_ring_sync_index(ring, to) << 17 |
377 MI_SEMAPHORE_COMPARE);
378 intel_ring_emit(ring, seqno);
379 intel_ring_emit(ring, 0);
380 intel_ring_emit(ring, MI_NOOP);
381 intel_ring_advance(ring);
382
383 return 0;
384}
385
Chris Wilsonc6df5412010-12-15 09:56:50 +0000386#define PIPE_CONTROL_FLUSH(ring__, addr__) \
387do { \
388 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
389 PIPE_CONTROL_DEPTH_STALL | 2); \
390 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
391 intel_ring_emit(ring__, 0); \
392 intel_ring_emit(ring__, 0); \
393} while (0)
394
395static int
396pc_render_add_request(struct intel_ring_buffer *ring,
397 u32 *result)
398{
399 struct drm_device *dev = ring->dev;
400 u32 seqno = i915_gem_get_seqno(dev);
401 struct pipe_control *pc = ring->private;
402 u32 scratch_addr = pc->gtt_offset + 128;
403 int ret;
404
405 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
406 * incoherent with writes to memory, i.e. completely fubar,
407 * so we need to use PIPE_NOTIFY instead.
408 *
409 * However, we also need to workaround the qword write
410 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
411 * memory before requesting an interrupt.
412 */
413 ret = intel_ring_begin(ring, 32);
414 if (ret)
415 return ret;
416
417 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
418 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
419 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
420 intel_ring_emit(ring, seqno);
421 intel_ring_emit(ring, 0);
422 PIPE_CONTROL_FLUSH(ring, scratch_addr);
423 scratch_addr += 128; /* write to separate cachelines */
424 PIPE_CONTROL_FLUSH(ring, scratch_addr);
425 scratch_addr += 128;
426 PIPE_CONTROL_FLUSH(ring, scratch_addr);
427 scratch_addr += 128;
428 PIPE_CONTROL_FLUSH(ring, scratch_addr);
429 scratch_addr += 128;
430 PIPE_CONTROL_FLUSH(ring, scratch_addr);
431 scratch_addr += 128;
432 PIPE_CONTROL_FLUSH(ring, scratch_addr);
433 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
434 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
435 PIPE_CONTROL_NOTIFY);
436 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
437 intel_ring_emit(ring, seqno);
438 intel_ring_emit(ring, 0);
439 intel_ring_advance(ring);
440
441 *result = seqno;
442 return 0;
443}
444
Chris Wilson3cce4692010-10-27 16:11:02 +0100445static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100446render_ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100447 u32 *result)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700448{
Chris Wilson78501ea2010-10-27 12:18:21 +0100449 struct drm_device *dev = ring->dev;
Chris Wilson3cce4692010-10-27 16:11:02 +0100450 u32 seqno = i915_gem_get_seqno(dev);
451 int ret;
Zhenyu Wangca764822010-05-27 10:26:42 +0800452
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000453 ret = intel_ring_begin(ring, 4);
454 if (ret)
455 return ret;
Chris Wilson3cce4692010-10-27 16:11:02 +0100456
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000457 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
458 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
459 intel_ring_emit(ring, seqno);
460 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilson3cce4692010-10-27 16:11:02 +0100461 intel_ring_advance(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000462
Chris Wilson3cce4692010-10-27 16:11:02 +0100463 *result = seqno;
464 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700465}
466
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800467static u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000468ring_get_seqno(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800469{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000470 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
471}
472
Chris Wilsonc6df5412010-12-15 09:56:50 +0000473static u32
474pc_render_get_seqno(struct intel_ring_buffer *ring)
475{
476 struct pipe_control *pc = ring->private;
477 return pc->cpu_page[0];
478}
479
Chris Wilson0f468322011-01-04 17:35:21 +0000480static void
481ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
482{
483 dev_priv->gt_irq_mask &= ~mask;
484 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
485 POSTING_READ(GTIMR);
486}
487
488static void
489ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
490{
491 dev_priv->gt_irq_mask |= mask;
492 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
493 POSTING_READ(GTIMR);
494}
495
496static void
497i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
498{
499 dev_priv->irq_mask &= ~mask;
500 I915_WRITE(IMR, dev_priv->irq_mask);
501 POSTING_READ(IMR);
502}
503
504static void
505i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
506{
507 dev_priv->irq_mask |= mask;
508 I915_WRITE(IMR, dev_priv->irq_mask);
509 POSTING_READ(IMR);
510}
511
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000512static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000513render_ring_get_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700514{
Chris Wilson78501ea2010-10-27 12:18:21 +0100515 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000516 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700517
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000518 if (!dev->irq_enabled)
519 return false;
520
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000521 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000522 if (ring->irq_refcount++ == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700523 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000524 ironlake_enable_irq(dev_priv,
525 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700526 else
527 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
528 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000529 spin_unlock(&ring->irq_lock);
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000530
531 return true;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700532}
533
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800534static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000535render_ring_put_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700536{
Chris Wilson78501ea2010-10-27 12:18:21 +0100537 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000538 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700539
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000540 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000541 if (--ring->irq_refcount == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700542 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000543 ironlake_disable_irq(dev_priv,
544 GT_USER_INTERRUPT |
545 GT_PIPE_NOTIFY);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700546 else
547 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
548 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000549 spin_unlock(&ring->irq_lock);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700550}
551
Chris Wilson78501ea2010-10-27 12:18:21 +0100552void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800553{
Chris Wilson78501ea2010-10-27 12:18:21 +0100554 drm_i915_private_t *dev_priv = ring->dev->dev_private;
555 u32 mmio = IS_GEN6(ring->dev) ?
556 RING_HWS_PGA_GEN6(ring->mmio_base) :
557 RING_HWS_PGA(ring->mmio_base);
558 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
559 POSTING_READ(mmio);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800560}
561
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000562static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100563bsd_ring_flush(struct intel_ring_buffer *ring,
564 u32 invalidate_domains,
565 u32 flush_domains)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800566{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000567 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000568
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000569 ret = intel_ring_begin(ring, 2);
570 if (ret)
571 return ret;
572
573 intel_ring_emit(ring, MI_FLUSH);
574 intel_ring_emit(ring, MI_NOOP);
575 intel_ring_advance(ring);
576 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800577}
578
Chris Wilson3cce4692010-10-27 16:11:02 +0100579static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100580ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100581 u32 *result)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800582{
583 u32 seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +0100584 int ret;
585
586 ret = intel_ring_begin(ring, 4);
587 if (ret)
588 return ret;
Chris Wilson6f392d52010-08-07 11:01:22 +0100589
Chris Wilson78501ea2010-10-27 12:18:21 +0100590 seqno = i915_gem_get_seqno(ring->dev);
Chris Wilson6f392d52010-08-07 11:01:22 +0100591
Chris Wilson3cce4692010-10-27 16:11:02 +0100592 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
593 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
594 intel_ring_emit(ring, seqno);
595 intel_ring_emit(ring, MI_USER_INTERRUPT);
596 intel_ring_advance(ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800597
Chris Wilson3cce4692010-10-27 16:11:02 +0100598 *result = seqno;
599 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800600}
601
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000602static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000603ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800604{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000605 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000606 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000607
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000608 if (!dev->irq_enabled)
609 return false;
610
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000611 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000612 if (ring->irq_refcount++ == 0)
Chris Wilson0f468322011-01-04 17:35:21 +0000613 ironlake_enable_irq(dev_priv, flag);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000614 spin_unlock(&ring->irq_lock);
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000615
616 return true;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800617}
618
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000619static void
620ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800621{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000622 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000623 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000624
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000625 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000626 if (--ring->irq_refcount == 0)
Chris Wilson0f468322011-01-04 17:35:21 +0000627 ironlake_disable_irq(dev_priv, flag);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000628 spin_unlock(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000629}
630
631static bool
632gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
633{
634 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000635 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000636
637 if (!dev->irq_enabled)
638 return false;
639
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000640 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000641 if (ring->irq_refcount++ == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000642 ring->irq_mask &= ~rflag;
643 I915_WRITE_IMR(ring, ring->irq_mask);
644 ironlake_enable_irq(dev_priv, gflag);
Chris Wilson0f468322011-01-04 17:35:21 +0000645 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000646 spin_unlock(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000647
648 return true;
649}
650
651static void
652gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
653{
654 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000655 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000656
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000657 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000658 if (--ring->irq_refcount == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000659 ring->irq_mask |= rflag;
660 I915_WRITE_IMR(ring, ring->irq_mask);
661 ironlake_disable_irq(dev_priv, gflag);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000662 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000663 spin_unlock(&ring->irq_lock);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000664}
665
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000666static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000667bsd_ring_get_irq(struct intel_ring_buffer *ring)
668{
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000669 return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000670}
671static void
672bsd_ring_put_irq(struct intel_ring_buffer *ring)
673{
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000674 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800675}
676
677static int
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000678ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800679{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100680 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100681
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100682 ret = intel_ring_begin(ring, 2);
683 if (ret)
684 return ret;
685
Chris Wilson78501ea2010-10-27 12:18:21 +0100686 intel_ring_emit(ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000687 MI_BATCH_BUFFER_START | (2 << 6) |
Chris Wilson78501ea2010-10-27 12:18:21 +0100688 MI_BATCH_NON_SECURE_I965);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000689 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100690 intel_ring_advance(ring);
691
Zou Nan haid1b851f2010-05-21 09:08:57 +0800692 return 0;
693}
694
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800695static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100696render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000697 u32 offset, u32 len)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700698{
Chris Wilson78501ea2010-10-27 12:18:21 +0100699 struct drm_device *dev = ring->dev;
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000700 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700701
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000702 if (IS_I830(dev) || IS_845G(dev)) {
703 ret = intel_ring_begin(ring, 4);
704 if (ret)
705 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700706
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000707 intel_ring_emit(ring, MI_BATCH_BUFFER);
708 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
709 intel_ring_emit(ring, offset + len - 8);
710 intel_ring_emit(ring, 0);
711 } else {
712 ret = intel_ring_begin(ring, 2);
713 if (ret)
714 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100715
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000716 if (INTEL_INFO(dev)->gen >= 4) {
717 intel_ring_emit(ring,
718 MI_BATCH_BUFFER_START | (2 << 6) |
719 MI_BATCH_NON_SECURE_I965);
720 intel_ring_emit(ring, offset);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700721 } else {
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000722 intel_ring_emit(ring,
723 MI_BATCH_BUFFER_START | (2 << 6));
724 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700725 }
726 }
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000727 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700728
Eric Anholt62fdfea2010-05-21 13:26:39 -0700729 return 0;
730}
731
Chris Wilson78501ea2010-10-27 12:18:21 +0100732static void cleanup_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700733{
Chris Wilson78501ea2010-10-27 12:18:21 +0100734 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000735 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700736
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800737 obj = ring->status_page.obj;
738 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700739 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700740
Chris Wilson05394f32010-11-08 19:18:58 +0000741 kunmap(obj->pages[0]);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700742 i915_gem_object_unpin(obj);
Chris Wilson05394f32010-11-08 19:18:58 +0000743 drm_gem_object_unreference(&obj->base);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800744 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700745
746 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700747}
748
Chris Wilson78501ea2010-10-27 12:18:21 +0100749static int init_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700750{
Chris Wilson78501ea2010-10-27 12:18:21 +0100751 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700752 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000753 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700754 int ret;
755
Eric Anholt62fdfea2010-05-21 13:26:39 -0700756 obj = i915_gem_alloc_object(dev, 4096);
757 if (obj == NULL) {
758 DRM_ERROR("Failed to allocate status page\n");
759 ret = -ENOMEM;
760 goto err;
761 }
Chris Wilson05394f32010-11-08 19:18:58 +0000762 obj->agp_type = AGP_USER_CACHED_MEMORY;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700763
Daniel Vetter75e9e912010-11-04 17:11:09 +0100764 ret = i915_gem_object_pin(obj, 4096, true);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700765 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700766 goto err_unref;
767 }
768
Chris Wilson05394f32010-11-08 19:18:58 +0000769 ring->status_page.gfx_addr = obj->gtt_offset;
770 ring->status_page.page_addr = kmap(obj->pages[0]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800771 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700772 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700773 goto err_unpin;
774 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800775 ring->status_page.obj = obj;
776 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700777
Chris Wilson78501ea2010-10-27 12:18:21 +0100778 intel_ring_setup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800779 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
780 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700781
782 return 0;
783
784err_unpin:
785 i915_gem_object_unpin(obj);
786err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000787 drm_gem_object_unreference(&obj->base);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700788err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800789 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700790}
791
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800792int intel_init_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100793 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700794{
Chris Wilson05394f32010-11-08 19:18:58 +0000795 struct drm_i915_gem_object *obj;
Chris Wilsondd785e32010-08-07 11:01:34 +0100796 int ret;
797
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800798 ring->dev = dev;
Chris Wilson23bc5982010-09-29 16:10:57 +0100799 INIT_LIST_HEAD(&ring->active_list);
800 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +0100801 INIT_LIST_HEAD(&ring->gpu_write_list);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000802
Chris Wilsonb259f672011-03-29 13:19:09 +0100803 init_waitqueue_head(&ring->irq_queue);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000804 spin_lock_init(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000805 ring->irq_mask = ~0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700806
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800807 if (I915_NEED_GFX_HWS(dev)) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100808 ret = init_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800809 if (ret)
810 return ret;
811 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700812
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800813 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700814 if (obj == NULL) {
815 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800816 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +0100817 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700818 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700819
Chris Wilson05394f32010-11-08 19:18:58 +0000820 ring->obj = obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800821
Daniel Vetter75e9e912010-11-04 17:11:09 +0100822 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
Chris Wilsondd785e32010-08-07 11:01:34 +0100823 if (ret)
824 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700825
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800826 ring->map.size = ring->size;
Chris Wilson05394f32010-11-08 19:18:58 +0000827 ring->map.offset = dev->agp->base + obj->gtt_offset;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700828 ring->map.type = 0;
829 ring->map.flags = 0;
830 ring->map.mtrr = 0;
831
832 drm_core_ioremap_wc(&ring->map, dev);
833 if (ring->map.handle == NULL) {
834 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800835 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100836 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700837 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800838
Eric Anholt62fdfea2010-05-21 13:26:39 -0700839 ring->virtual_start = ring->map.handle;
Chris Wilson78501ea2010-10-27 12:18:21 +0100840 ret = ring->init(ring);
Chris Wilsondd785e32010-08-07 11:01:34 +0100841 if (ret)
842 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700843
Chris Wilson55249ba2010-12-22 14:04:47 +0000844 /* Workaround an erratum on the i830 which causes a hang if
845 * the TAIL pointer points to within the last 2 cachelines
846 * of the buffer.
847 */
848 ring->effective_size = ring->size;
849 if (IS_I830(ring->dev))
850 ring->effective_size -= 128;
851
Chris Wilsonc584fe42010-10-29 18:15:52 +0100852 return 0;
Chris Wilsondd785e32010-08-07 11:01:34 +0100853
854err_unmap:
855 drm_core_ioremapfree(&ring->map, dev);
856err_unpin:
857 i915_gem_object_unpin(obj);
858err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000859 drm_gem_object_unreference(&obj->base);
860 ring->obj = NULL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100861err_hws:
Chris Wilson78501ea2010-10-27 12:18:21 +0100862 cleanup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800863 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700864}
865
Chris Wilson78501ea2010-10-27 12:18:21 +0100866void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700867{
Chris Wilson33626e62010-10-29 16:18:36 +0100868 struct drm_i915_private *dev_priv;
869 int ret;
870
Chris Wilson05394f32010-11-08 19:18:58 +0000871 if (ring->obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700872 return;
873
Chris Wilson33626e62010-10-29 16:18:36 +0100874 /* Disable the ring buffer. The ring must be idle at this point */
875 dev_priv = ring->dev->dev_private;
Ben Widawsky96f298a2011-03-19 18:14:27 -0700876 ret = intel_wait_ring_idle(ring);
Chris Wilson29ee3992011-01-24 16:35:42 +0000877 if (ret)
878 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
879 ring->name, ret);
880
Chris Wilson33626e62010-10-29 16:18:36 +0100881 I915_WRITE_CTL(ring, 0);
882
Chris Wilson78501ea2010-10-27 12:18:21 +0100883 drm_core_ioremapfree(&ring->map, ring->dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700884
Chris Wilson05394f32010-11-08 19:18:58 +0000885 i915_gem_object_unpin(ring->obj);
886 drm_gem_object_unreference(&ring->obj->base);
887 ring->obj = NULL;
Chris Wilson78501ea2010-10-27 12:18:21 +0100888
Zou Nan hai8d192152010-11-02 16:31:01 +0800889 if (ring->cleanup)
890 ring->cleanup(ring);
891
Chris Wilson78501ea2010-10-27 12:18:21 +0100892 cleanup_status_page(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700893}
894
Chris Wilson78501ea2010-10-27 12:18:21 +0100895static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700896{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800897 unsigned int *virt;
Chris Wilson55249ba2010-12-22 14:04:47 +0000898 int rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700899
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800900 if (ring->space < rem) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100901 int ret = intel_wait_ring_buffer(ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700902 if (ret)
903 return ret;
904 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700905
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800906 virt = (unsigned int *)(ring->virtual_start + ring->tail);
Chris Wilson1741dd42010-08-04 15:18:12 +0100907 rem /= 8;
908 while (rem--) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700909 *virt++ = MI_NOOP;
Chris Wilson1741dd42010-08-04 15:18:12 +0100910 *virt++ = MI_NOOP;
911 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700912
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800913 ring->tail = 0;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000914 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700915
916 return 0;
917}
918
Chris Wilson78501ea2010-10-27 12:18:21 +0100919int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700920{
Chris Wilson78501ea2010-10-27 12:18:21 +0100921 struct drm_device *dev = ring->dev;
Zou Nan haicae58522010-11-09 17:17:32 +0800922 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100923 unsigned long end;
Chris Wilson6aa56062010-10-29 21:44:37 +0100924 u32 head;
925
Chris Wilsonc7dca472011-01-20 17:00:10 +0000926 /* If the reported head position has wrapped or hasn't advanced,
927 * fallback to the slow and accurate path.
928 */
929 head = intel_read_status_page(ring, 4);
930 if (head > ring->head) {
931 ring->head = head;
932 ring->space = ring_space(ring);
933 if (ring->space >= n)
934 return 0;
935 }
936
Chris Wilsondb53a302011-02-03 11:57:46 +0000937 trace_i915_ring_wait_begin(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800938 end = jiffies + 3 * HZ;
939 do {
Chris Wilsonc7dca472011-01-20 17:00:10 +0000940 ring->head = I915_READ_HEAD(ring);
941 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700942 if (ring->space >= n) {
Chris Wilsondb53a302011-02-03 11:57:46 +0000943 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700944 return 0;
945 }
946
947 if (dev->primary->master) {
948 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
949 if (master_priv->sarea_priv)
950 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
951 }
Zou Nan haid1b851f2010-05-21 09:08:57 +0800952
Chris Wilsone60a0b12010-10-13 10:09:14 +0100953 msleep(1);
Chris Wilsonf4e0b292010-10-29 21:06:16 +0100954 if (atomic_read(&dev_priv->mm.wedged))
955 return -EAGAIN;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800956 } while (!time_after(jiffies, end));
Chris Wilsondb53a302011-02-03 11:57:46 +0000957 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700958 return -EBUSY;
959}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800960
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100961int intel_ring_begin(struct intel_ring_buffer *ring,
962 int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800963{
Chris Wilson21dd3732011-01-26 15:55:56 +0000964 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Zou Nan haibe26a102010-06-12 17:40:24 +0800965 int n = 4*num_dwords;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100966 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100967
Chris Wilson21dd3732011-01-26 15:55:56 +0000968 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
969 return -EIO;
970
Chris Wilson55249ba2010-12-22 14:04:47 +0000971 if (unlikely(ring->tail + n > ring->effective_size)) {
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100972 ret = intel_wrap_ring_buffer(ring);
973 if (unlikely(ret))
974 return ret;
975 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100976
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100977 if (unlikely(ring->space < n)) {
978 ret = intel_wait_ring_buffer(ring, n);
979 if (unlikely(ret))
980 return ret;
981 }
Chris Wilsond97ed332010-08-04 15:18:13 +0100982
983 ring->space -= n;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100984 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800985}
986
Chris Wilson78501ea2010-10-27 12:18:21 +0100987void intel_ring_advance(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800988{
Chris Wilsond97ed332010-08-04 15:18:13 +0100989 ring->tail &= ring->size - 1;
Chris Wilson78501ea2010-10-27 12:18:21 +0100990 ring->write_tail(ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800991}
992
Chris Wilsone0708682010-09-19 14:46:27 +0100993static const struct intel_ring_buffer render_ring = {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800994 .name = "render ring",
Chris Wilson92204342010-09-18 11:02:01 +0100995 .id = RING_RENDER,
Daniel Vetter333e9fe2010-08-02 16:24:01 +0200996 .mmio_base = RENDER_RING_BASE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800997 .size = 32 * PAGE_SIZE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800998 .init = init_render_ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100999 .write_tail = ring_write_tail,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001000 .flush = render_ring_flush,
1001 .add_request = render_ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001002 .get_seqno = ring_get_seqno,
1003 .irq_get = render_ring_get_irq,
1004 .irq_put = render_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001005 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
Chris Wilsonc6df5412010-12-15 09:56:50 +00001006 .cleanup = render_ring_cleanup,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001007};
Zou Nan haid1b851f2010-05-21 09:08:57 +08001008
1009/* ring buffer for bit-stream decoder */
1010
Chris Wilsone0708682010-09-19 14:46:27 +01001011static const struct intel_ring_buffer bsd_ring = {
Zou Nan haid1b851f2010-05-21 09:08:57 +08001012 .name = "bsd ring",
Chris Wilson92204342010-09-18 11:02:01 +01001013 .id = RING_BSD,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001014 .mmio_base = BSD_RING_BASE,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001015 .size = 32 * PAGE_SIZE,
Chris Wilson78501ea2010-10-27 12:18:21 +01001016 .init = init_ring_common,
Chris Wilson297b0c52010-10-22 17:02:41 +01001017 .write_tail = ring_write_tail,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001018 .flush = bsd_ring_flush,
Chris Wilson549f7362010-10-19 11:19:32 +01001019 .add_request = ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001020 .get_seqno = ring_get_seqno,
1021 .irq_get = bsd_ring_get_irq,
1022 .irq_put = bsd_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001023 .dispatch_execbuffer = ring_dispatch_execbuffer,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001024};
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001025
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001026
Chris Wilson78501ea2010-10-27 12:18:21 +01001027static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001028 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001029{
Chris Wilson78501ea2010-10-27 12:18:21 +01001030 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001031
1032 /* Every tail move must follow the sequence below */
1033 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1034 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1035 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1036 I915_WRITE(GEN6_BSD_RNCID, 0x0);
1037
1038 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1039 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1040 50))
1041 DRM_ERROR("timed out waiting for IDLE Indicator\n");
1042
Daniel Vetter870e86d2010-08-02 16:29:44 +02001043 I915_WRITE_TAIL(ring, value);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001044 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1045 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1046 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1047}
1048
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001049static int gen6_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001050 u32 invalidate, u32 flush)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001051{
Chris Wilson71a77e02011-02-02 12:13:49 +00001052 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001053 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001054
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001055 ret = intel_ring_begin(ring, 4);
1056 if (ret)
1057 return ret;
1058
Chris Wilson71a77e02011-02-02 12:13:49 +00001059 cmd = MI_FLUSH_DW;
1060 if (invalidate & I915_GEM_GPU_DOMAINS)
1061 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1062 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001063 intel_ring_emit(ring, 0);
1064 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001065 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001066 intel_ring_advance(ring);
1067 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001068}
1069
1070static int
Chris Wilson78501ea2010-10-27 12:18:21 +01001071gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001072 u32 offset, u32 len)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001073{
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001074 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001075
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001076 ret = intel_ring_begin(ring, 2);
1077 if (ret)
1078 return ret;
1079
Chris Wilson78501ea2010-10-27 12:18:21 +01001080 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001081 /* bit0-7 is the length on GEN6+ */
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001082 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +01001083 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001084
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001085 return 0;
1086}
1087
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001088static bool
Chris Wilson0f468322011-01-04 17:35:21 +00001089gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1090{
1091 return gen6_ring_get_irq(ring,
1092 GT_USER_INTERRUPT,
1093 GEN6_RENDER_USER_INTERRUPT);
1094}
1095
1096static void
1097gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1098{
1099 return gen6_ring_put_irq(ring,
1100 GT_USER_INTERRUPT,
1101 GEN6_RENDER_USER_INTERRUPT);
1102}
1103
1104static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001105gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1106{
Chris Wilson0f468322011-01-04 17:35:21 +00001107 return gen6_ring_get_irq(ring,
1108 GT_GEN6_BSD_USER_INTERRUPT,
1109 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001110}
1111
1112static void
1113gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1114{
Chris Wilson0f468322011-01-04 17:35:21 +00001115 return gen6_ring_put_irq(ring,
1116 GT_GEN6_BSD_USER_INTERRUPT,
1117 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001118}
1119
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001120/* ring buffer for Video Codec for Gen6+ */
Chris Wilsone0708682010-09-19 14:46:27 +01001121static const struct intel_ring_buffer gen6_bsd_ring = {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001122 .name = "gen6 bsd ring",
1123 .id = RING_BSD,
1124 .mmio_base = GEN6_BSD_RING_BASE,
1125 .size = 32 * PAGE_SIZE,
1126 .init = init_ring_common,
1127 .write_tail = gen6_bsd_ring_write_tail,
1128 .flush = gen6_ring_flush,
1129 .add_request = gen6_add_request,
1130 .get_seqno = ring_get_seqno,
1131 .irq_get = gen6_bsd_ring_get_irq,
1132 .irq_put = gen6_bsd_ring_put_irq,
1133 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Chris Wilson549f7362010-10-19 11:19:32 +01001134};
1135
1136/* Blitter support (SandyBridge+) */
1137
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001138static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001139blt_ring_get_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001140{
Chris Wilson0f468322011-01-04 17:35:21 +00001141 return gen6_ring_get_irq(ring,
1142 GT_BLT_USER_INTERRUPT,
1143 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001144}
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001145
Chris Wilson549f7362010-10-19 11:19:32 +01001146static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001147blt_ring_put_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001148{
Chris Wilson0f468322011-01-04 17:35:21 +00001149 gen6_ring_put_irq(ring,
1150 GT_BLT_USER_INTERRUPT,
1151 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001152}
1153
Zou Nan hai8d192152010-11-02 16:31:01 +08001154
1155/* Workaround for some stepping of SNB,
1156 * each time when BLT engine ring tail moved,
1157 * the first command in the ring to be parsed
1158 * should be MI_BATCH_BUFFER_START
1159 */
1160#define NEED_BLT_WORKAROUND(dev) \
1161 (IS_GEN6(dev) && (dev->pdev->revision < 8))
1162
1163static inline struct drm_i915_gem_object *
1164to_blt_workaround(struct intel_ring_buffer *ring)
1165{
1166 return ring->private;
1167}
1168
1169static int blt_ring_init(struct intel_ring_buffer *ring)
1170{
1171 if (NEED_BLT_WORKAROUND(ring->dev)) {
1172 struct drm_i915_gem_object *obj;
Chris Wilson27153f72010-11-02 11:17:23 +00001173 u32 *ptr;
Zou Nan hai8d192152010-11-02 16:31:01 +08001174 int ret;
1175
Chris Wilson05394f32010-11-08 19:18:58 +00001176 obj = i915_gem_alloc_object(ring->dev, 4096);
Zou Nan hai8d192152010-11-02 16:31:01 +08001177 if (obj == NULL)
1178 return -ENOMEM;
1179
Chris Wilson05394f32010-11-08 19:18:58 +00001180 ret = i915_gem_object_pin(obj, 4096, true);
Zou Nan hai8d192152010-11-02 16:31:01 +08001181 if (ret) {
1182 drm_gem_object_unreference(&obj->base);
1183 return ret;
1184 }
1185
1186 ptr = kmap(obj->pages[0]);
Chris Wilson27153f72010-11-02 11:17:23 +00001187 *ptr++ = MI_BATCH_BUFFER_END;
1188 *ptr++ = MI_NOOP;
Zou Nan hai8d192152010-11-02 16:31:01 +08001189 kunmap(obj->pages[0]);
1190
Chris Wilson05394f32010-11-08 19:18:58 +00001191 ret = i915_gem_object_set_to_gtt_domain(obj, false);
Zou Nan hai8d192152010-11-02 16:31:01 +08001192 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00001193 i915_gem_object_unpin(obj);
Zou Nan hai8d192152010-11-02 16:31:01 +08001194 drm_gem_object_unreference(&obj->base);
1195 return ret;
1196 }
1197
1198 ring->private = obj;
1199 }
1200
1201 return init_ring_common(ring);
1202}
1203
1204static int blt_ring_begin(struct intel_ring_buffer *ring,
1205 int num_dwords)
1206{
1207 if (ring->private) {
1208 int ret = intel_ring_begin(ring, num_dwords+2);
1209 if (ret)
1210 return ret;
1211
1212 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1213 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1214
1215 return 0;
1216 } else
1217 return intel_ring_begin(ring, 4);
1218}
1219
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001220static int blt_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001221 u32 invalidate, u32 flush)
Zou Nan hai8d192152010-11-02 16:31:01 +08001222{
Chris Wilson71a77e02011-02-02 12:13:49 +00001223 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001224 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001225
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001226 ret = blt_ring_begin(ring, 4);
1227 if (ret)
1228 return ret;
1229
Chris Wilson71a77e02011-02-02 12:13:49 +00001230 cmd = MI_FLUSH_DW;
1231 if (invalidate & I915_GEM_DOMAIN_RENDER)
1232 cmd |= MI_INVALIDATE_TLB;
1233 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001234 intel_ring_emit(ring, 0);
1235 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001236 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001237 intel_ring_advance(ring);
1238 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08001239}
1240
Zou Nan hai8d192152010-11-02 16:31:01 +08001241static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1242{
1243 if (!ring->private)
1244 return;
1245
1246 i915_gem_object_unpin(ring->private);
1247 drm_gem_object_unreference(ring->private);
1248 ring->private = NULL;
1249}
1250
Chris Wilson549f7362010-10-19 11:19:32 +01001251static const struct intel_ring_buffer gen6_blt_ring = {
1252 .name = "blt ring",
1253 .id = RING_BLT,
1254 .mmio_base = BLT_RING_BASE,
1255 .size = 32 * PAGE_SIZE,
Zou Nan hai8d192152010-11-02 16:31:01 +08001256 .init = blt_ring_init,
Chris Wilson297b0c52010-10-22 17:02:41 +01001257 .write_tail = ring_write_tail,
Zou Nan hai8d192152010-11-02 16:31:01 +08001258 .flush = blt_ring_flush,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001259 .add_request = gen6_add_request,
1260 .get_seqno = ring_get_seqno,
1261 .irq_get = blt_ring_get_irq,
1262 .irq_put = blt_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001263 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Zou Nan hai8d192152010-11-02 16:31:01 +08001264 .cleanup = blt_ring_cleanup,
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001265};
1266
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001267int intel_init_render_ring_buffer(struct drm_device *dev)
1268{
1269 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001270 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001271
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001272 *ring = render_ring;
1273 if (INTEL_INFO(dev)->gen >= 6) {
1274 ring->add_request = gen6_add_request;
Chris Wilson0f468322011-01-04 17:35:21 +00001275 ring->irq_get = gen6_render_ring_get_irq;
1276 ring->irq_put = gen6_render_ring_put_irq;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001277 } else if (IS_GEN5(dev)) {
1278 ring->add_request = pc_render_add_request;
1279 ring->get_seqno = pc_render_get_seqno;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001280 }
1281
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001282 if (!I915_NEED_GFX_HWS(dev)) {
1283 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1284 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1285 }
1286
1287 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001288}
1289
Chris Wilsone8616b62011-01-20 09:57:11 +00001290int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1291{
1292 drm_i915_private_t *dev_priv = dev->dev_private;
1293 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1294
1295 *ring = render_ring;
1296 if (INTEL_INFO(dev)->gen >= 6) {
1297 ring->add_request = gen6_add_request;
1298 ring->irq_get = gen6_render_ring_get_irq;
1299 ring->irq_put = gen6_render_ring_put_irq;
1300 } else if (IS_GEN5(dev)) {
1301 ring->add_request = pc_render_add_request;
1302 ring->get_seqno = pc_render_get_seqno;
1303 }
1304
1305 ring->dev = dev;
1306 INIT_LIST_HEAD(&ring->active_list);
1307 INIT_LIST_HEAD(&ring->request_list);
1308 INIT_LIST_HEAD(&ring->gpu_write_list);
1309
1310 ring->size = size;
1311 ring->effective_size = ring->size;
1312 if (IS_I830(ring->dev))
1313 ring->effective_size -= 128;
1314
1315 ring->map.offset = start;
1316 ring->map.size = size;
1317 ring->map.type = 0;
1318 ring->map.flags = 0;
1319 ring->map.mtrr = 0;
1320
1321 drm_core_ioremap_wc(&ring->map, dev);
1322 if (ring->map.handle == NULL) {
1323 DRM_ERROR("can not ioremap virtual address for"
1324 " ring buffer\n");
1325 return -ENOMEM;
1326 }
1327
1328 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1329 return 0;
1330}
1331
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001332int intel_init_bsd_ring_buffer(struct drm_device *dev)
1333{
1334 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001335 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001336
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001337 if (IS_GEN6(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001338 *ring = gen6_bsd_ring;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001339 else
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001340 *ring = bsd_ring;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001341
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001342 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001343}
Chris Wilson549f7362010-10-19 11:19:32 +01001344
1345int intel_init_blt_ring_buffer(struct drm_device *dev)
1346{
1347 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001348 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
Chris Wilson549f7362010-10-19 11:19:32 +01001349
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001350 *ring = gen6_blt_ring;
Chris Wilson549f7362010-10-19 11:19:32 +01001351
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001352 return intel_init_ring_buffer(dev, ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001353}