blob: ca8363531a64fe610a13b13d1679828f44d7a01e [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Chris Wilsonc7dca472011-01-20 17:00:10 +000037static inline int ring_space(struct intel_ring_buffer *ring)
38{
39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40 if (space < 0)
41 space += ring->size;
42 return space;
43}
44
Chris Wilson6f392d5482010-08-07 11:01:22 +010045static u32 i915_gem_get_seqno(struct drm_device *dev)
46{
47 drm_i915_private_t *dev_priv = dev->dev_private;
48 u32 seqno;
49
50 seqno = dev_priv->next_seqno;
51
52 /* reserve 0 for non-seqno */
53 if (++dev_priv->next_seqno == 0)
54 dev_priv->next_seqno = 1;
55
56 return seqno;
57}
58
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000059static int
Chris Wilson78501ea2010-10-27 12:18:21 +010060render_ring_flush(struct intel_ring_buffer *ring,
Chris Wilsonab6f8e32010-09-19 17:53:44 +010061 u32 invalidate_domains,
62 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070063{
Chris Wilson78501ea2010-10-27 12:18:21 +010064 struct drm_device *dev = ring->dev;
Chris Wilson6f392d5482010-08-07 11:01:22 +010065 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000066 int ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +010067
Chris Wilson36d527d2011-03-19 22:26:49 +000068 /*
69 * read/write caches:
70 *
71 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
72 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
73 * also flushed at 2d versus 3d pipeline switches.
74 *
75 * read-only caches:
76 *
77 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
78 * MI_READ_FLUSH is set, and is always flushed on 965.
79 *
80 * I915_GEM_DOMAIN_COMMAND may not exist?
81 *
82 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
83 * invalidated when MI_EXE_FLUSH is set.
84 *
85 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
86 * invalidated with every MI_FLUSH.
87 *
88 * TLBs:
89 *
90 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
91 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
92 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
93 * are flushed at any MI_FLUSH.
94 */
95
96 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
97 if ((invalidate_domains|flush_domains) &
98 I915_GEM_DOMAIN_RENDER)
99 cmd &= ~MI_NO_WRITE_FLUSH;
100 if (INTEL_INFO(dev)->gen < 4) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700101 /*
Chris Wilson36d527d2011-03-19 22:26:49 +0000102 * On the 965, the sampler cache always gets flushed
103 * and this bit is reserved.
Eric Anholt62fdfea2010-05-21 13:26:39 -0700104 */
Chris Wilson36d527d2011-03-19 22:26:49 +0000105 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
106 cmd |= MI_READ_FLUSH;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800107 }
Chris Wilson36d527d2011-03-19 22:26:49 +0000108 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
109 cmd |= MI_EXE_FLUSH;
110
111 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
112 (IS_G4X(dev) || IS_GEN5(dev)))
113 cmd |= MI_INVALIDATE_ISP;
114
115 ret = intel_ring_begin(ring, 2);
116 if (ret)
117 return ret;
118
119 intel_ring_emit(ring, cmd);
120 intel_ring_emit(ring, MI_NOOP);
121 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000122
123 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800124}
125
Chris Wilson78501ea2010-10-27 12:18:21 +0100126static void ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100127 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800128{
Chris Wilson78501ea2010-10-27 12:18:21 +0100129 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson297b0c52010-10-22 17:02:41 +0100130 I915_WRITE_TAIL(ring, value);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800131}
132
Chris Wilson78501ea2010-10-27 12:18:21 +0100133u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800134{
Chris Wilson78501ea2010-10-27 12:18:21 +0100135 drm_i915_private_t *dev_priv = ring->dev->dev_private;
136 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
Daniel Vetter3d281d82010-09-24 21:14:22 +0200137 RING_ACTHD(ring->mmio_base) : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800138
139 return I915_READ(acthd_reg);
140}
141
Chris Wilson78501ea2010-10-27 12:18:21 +0100142static int init_ring_common(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800143{
Chris Wilson78501ea2010-10-27 12:18:21 +0100144 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000145 struct drm_i915_gem_object *obj = ring->obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800146 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800147
148 /* Stop the ring if it's running. */
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200149 I915_WRITE_CTL(ring, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200150 I915_WRITE_HEAD(ring, 0);
Chris Wilson78501ea2010-10-27 12:18:21 +0100151 ring->write_tail(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800152
153 /* Initialize the ring. */
Chris Wilson05394f32010-11-08 19:18:58 +0000154 I915_WRITE_START(ring, obj->gtt_offset);
Daniel Vetter570ef602010-08-02 17:06:23 +0200155 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800156
157 /* G45 ring initialization fails to reset head to zero */
158 if (head != 0) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000159 DRM_DEBUG_KMS("%s head not reset to zero "
160 "ctl %08x head %08x tail %08x start %08x\n",
161 ring->name,
162 I915_READ_CTL(ring),
163 I915_READ_HEAD(ring),
164 I915_READ_TAIL(ring),
165 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800166
Daniel Vetter570ef602010-08-02 17:06:23 +0200167 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800168
Chris Wilson6fd0d562010-12-05 20:42:33 +0000169 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
170 DRM_ERROR("failed to set %s head to zero "
171 "ctl %08x head %08x tail %08x start %08x\n",
172 ring->name,
173 I915_READ_CTL(ring),
174 I915_READ_HEAD(ring),
175 I915_READ_TAIL(ring),
176 I915_READ_START(ring));
177 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700178 }
179
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200180 I915_WRITE_CTL(ring,
Chris Wilsonae69b422010-11-07 11:45:52 +0000181 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson6aa56062010-10-29 21:44:37 +0100182 | RING_REPORT_64K | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800183
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800184 /* If the head is still not zero, the ring is dead */
Chris Wilson176f28e2010-10-28 11:18:07 +0100185 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
Chris Wilson05394f32010-11-08 19:18:58 +0000186 I915_READ_START(ring) != obj->gtt_offset ||
Chris Wilson176f28e2010-10-28 11:18:07 +0100187 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000188 DRM_ERROR("%s initialization failed "
189 "ctl %08x head %08x tail %08x start %08x\n",
190 ring->name,
191 I915_READ_CTL(ring),
192 I915_READ_HEAD(ring),
193 I915_READ_TAIL(ring),
194 I915_READ_START(ring));
195 return -EIO;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800196 }
197
Chris Wilson78501ea2010-10-27 12:18:21 +0100198 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
199 i915_kernel_lost_context(ring->dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800200 else {
Chris Wilsonc7dca472011-01-20 17:00:10 +0000201 ring->head = I915_READ_HEAD(ring);
Daniel Vetter870e86d2010-08-02 16:29:44 +0200202 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000203 ring->space = ring_space(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800204 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000205
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800206 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700207}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800208
Chris Wilsonc6df5412010-12-15 09:56:50 +0000209/*
210 * 965+ support PIPE_CONTROL commands, which provide finer grained control
211 * over cache flushing.
212 */
213struct pipe_control {
214 struct drm_i915_gem_object *obj;
215 volatile u32 *cpu_page;
216 u32 gtt_offset;
217};
218
219static int
220init_pipe_control(struct intel_ring_buffer *ring)
221{
222 struct pipe_control *pc;
223 struct drm_i915_gem_object *obj;
224 int ret;
225
226 if (ring->private)
227 return 0;
228
229 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
230 if (!pc)
231 return -ENOMEM;
232
233 obj = i915_gem_alloc_object(ring->dev, 4096);
234 if (obj == NULL) {
235 DRM_ERROR("Failed to allocate seqno page\n");
236 ret = -ENOMEM;
237 goto err;
238 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100239
240 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000241
242 ret = i915_gem_object_pin(obj, 4096, true);
243 if (ret)
244 goto err_unref;
245
246 pc->gtt_offset = obj->gtt_offset;
247 pc->cpu_page = kmap(obj->pages[0]);
248 if (pc->cpu_page == NULL)
249 goto err_unpin;
250
251 pc->obj = obj;
252 ring->private = pc;
253 return 0;
254
255err_unpin:
256 i915_gem_object_unpin(obj);
257err_unref:
258 drm_gem_object_unreference(&obj->base);
259err:
260 kfree(pc);
261 return ret;
262}
263
264static void
265cleanup_pipe_control(struct intel_ring_buffer *ring)
266{
267 struct pipe_control *pc = ring->private;
268 struct drm_i915_gem_object *obj;
269
270 if (!ring->private)
271 return;
272
273 obj = pc->obj;
274 kunmap(obj->pages[0]);
275 i915_gem_object_unpin(obj);
276 drm_gem_object_unreference(&obj->base);
277
278 kfree(pc);
279 ring->private = NULL;
280}
281
Chris Wilson78501ea2010-10-27 12:18:21 +0100282static int init_render_ring(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800283{
Chris Wilson78501ea2010-10-27 12:18:21 +0100284 struct drm_device *dev = ring->dev;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000285 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100286 int ret = init_ring_common(ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800287
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100288 if (INTEL_INFO(dev)->gen > 3) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100289 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
Jesse Barnes65d3eb12011-04-06 14:54:44 -0700290 if (IS_GEN6(dev) || IS_GEN7(dev))
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
292 I915_WRITE(MI_MODE, mode);
Jesse Barnesb095cd02011-08-12 15:28:32 -0700293 if (IS_GEN7(dev))
294 I915_WRITE(GFX_MODE_GEN7,
295 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
296 GFX_MODE_ENABLE(GFX_REPLAY_MODE));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800297 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100298
Chris Wilsonc6df5412010-12-15 09:56:50 +0000299 if (INTEL_INFO(dev)->gen >= 6) {
300 } else if (IS_GEN5(dev)) {
301 ret = init_pipe_control(ring);
302 if (ret)
303 return ret;
304 }
305
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800306 return ret;
307}
308
Chris Wilsonc6df5412010-12-15 09:56:50 +0000309static void render_ring_cleanup(struct intel_ring_buffer *ring)
310{
311 if (!ring->private)
312 return;
313
314 cleanup_pipe_control(ring);
315}
316
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000317static void
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700318update_mboxes(struct intel_ring_buffer *ring,
319 u32 seqno,
320 u32 mmio_offset)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000321{
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700322 intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
323 MI_SEMAPHORE_GLOBAL_GTT |
324 MI_SEMAPHORE_REGISTER |
325 MI_SEMAPHORE_UPDATE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000326 intel_ring_emit(ring, seqno);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700327 intel_ring_emit(ring, mmio_offset);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000328}
329
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700330/**
331 * gen6_add_request - Update the semaphore mailbox registers
332 *
333 * @ring - ring that is adding a request
334 * @seqno - return seqno stuck into the ring
335 *
336 * Update the mailbox registers in the *other* rings with the current seqno.
337 * This acts like a signal in the canonical semaphore.
338 */
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000339static int
340gen6_add_request(struct intel_ring_buffer *ring,
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700341 u32 *seqno)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000342{
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700343 u32 mbox1_reg;
344 u32 mbox2_reg;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000345 int ret;
346
347 ret = intel_ring_begin(ring, 10);
348 if (ret)
349 return ret;
350
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700351 mbox1_reg = ring->signal_mbox[0];
352 mbox2_reg = ring->signal_mbox[1];
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000353
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700354 *seqno = i915_gem_get_seqno(ring->dev);
355
356 update_mboxes(ring, *seqno, mbox1_reg);
357 update_mboxes(ring, *seqno, mbox2_reg);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000358 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
359 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700360 intel_ring_emit(ring, *seqno);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000361 intel_ring_emit(ring, MI_USER_INTERRUPT);
362 intel_ring_advance(ring);
363
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000364 return 0;
365}
366
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700367/**
368 * intel_ring_sync - sync the waiter to the signaller on seqno
369 *
370 * @waiter - ring that is waiting
371 * @signaller - ring which has, or will signal
372 * @seqno - seqno which the waiter will block on
373 */
374static int
375intel_ring_sync(struct intel_ring_buffer *waiter,
376 struct intel_ring_buffer *signaller,
377 int ring,
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000378 u32 seqno)
379{
380 int ret;
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700381 u32 dw1 = MI_SEMAPHORE_MBOX |
382 MI_SEMAPHORE_COMPARE |
383 MI_SEMAPHORE_REGISTER;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000384
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700385 ret = intel_ring_begin(waiter, 4);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000386 if (ret)
387 return ret;
388
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700389 intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
390 intel_ring_emit(waiter, seqno);
391 intel_ring_emit(waiter, 0);
392 intel_ring_emit(waiter, MI_NOOP);
393 intel_ring_advance(waiter);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000394
395 return 0;
396}
397
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700398/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
399int
400render_ring_sync_to(struct intel_ring_buffer *waiter,
401 struct intel_ring_buffer *signaller,
402 u32 seqno)
403{
404 WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
405 return intel_ring_sync(waiter,
406 signaller,
407 RCS,
408 seqno);
409}
410
411/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
412int
413gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
414 struct intel_ring_buffer *signaller,
415 u32 seqno)
416{
417 WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
418 return intel_ring_sync(waiter,
419 signaller,
420 VCS,
421 seqno);
422}
423
424/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
425int
426gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
427 struct intel_ring_buffer *signaller,
428 u32 seqno)
429{
430 WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
431 return intel_ring_sync(waiter,
432 signaller,
433 BCS,
434 seqno);
435}
436
437
438
Chris Wilsonc6df5412010-12-15 09:56:50 +0000439#define PIPE_CONTROL_FLUSH(ring__, addr__) \
440do { \
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200441 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
442 PIPE_CONTROL_DEPTH_STALL); \
Chris Wilsonc6df5412010-12-15 09:56:50 +0000443 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
444 intel_ring_emit(ring__, 0); \
445 intel_ring_emit(ring__, 0); \
446} while (0)
447
448static int
449pc_render_add_request(struct intel_ring_buffer *ring,
450 u32 *result)
451{
452 struct drm_device *dev = ring->dev;
453 u32 seqno = i915_gem_get_seqno(dev);
454 struct pipe_control *pc = ring->private;
455 u32 scratch_addr = pc->gtt_offset + 128;
456 int ret;
457
458 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
459 * incoherent with writes to memory, i.e. completely fubar,
460 * so we need to use PIPE_NOTIFY instead.
461 *
462 * However, we also need to workaround the qword write
463 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
464 * memory before requesting an interrupt.
465 */
466 ret = intel_ring_begin(ring, 32);
467 if (ret)
468 return ret;
469
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200470 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
Kenneth Graunke9d971b32011-10-11 23:41:09 +0200471 PIPE_CONTROL_WRITE_FLUSH |
472 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000473 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
474 intel_ring_emit(ring, seqno);
475 intel_ring_emit(ring, 0);
476 PIPE_CONTROL_FLUSH(ring, scratch_addr);
477 scratch_addr += 128; /* write to separate cachelines */
478 PIPE_CONTROL_FLUSH(ring, scratch_addr);
479 scratch_addr += 128;
480 PIPE_CONTROL_FLUSH(ring, scratch_addr);
481 scratch_addr += 128;
482 PIPE_CONTROL_FLUSH(ring, scratch_addr);
483 scratch_addr += 128;
484 PIPE_CONTROL_FLUSH(ring, scratch_addr);
485 scratch_addr += 128;
486 PIPE_CONTROL_FLUSH(ring, scratch_addr);
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200487 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
Kenneth Graunke9d971b32011-10-11 23:41:09 +0200488 PIPE_CONTROL_WRITE_FLUSH |
489 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
Chris Wilsonc6df5412010-12-15 09:56:50 +0000490 PIPE_CONTROL_NOTIFY);
491 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
492 intel_ring_emit(ring, seqno);
493 intel_ring_emit(ring, 0);
494 intel_ring_advance(ring);
495
496 *result = seqno;
497 return 0;
498}
499
Chris Wilson3cce4692010-10-27 16:11:02 +0100500static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100501render_ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100502 u32 *result)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700503{
Chris Wilson78501ea2010-10-27 12:18:21 +0100504 struct drm_device *dev = ring->dev;
Chris Wilson3cce4692010-10-27 16:11:02 +0100505 u32 seqno = i915_gem_get_seqno(dev);
506 int ret;
Zhenyu Wangca764822010-05-27 10:26:42 +0800507
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000508 ret = intel_ring_begin(ring, 4);
509 if (ret)
510 return ret;
Chris Wilson3cce4692010-10-27 16:11:02 +0100511
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000512 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
513 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
514 intel_ring_emit(ring, seqno);
515 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilson3cce4692010-10-27 16:11:02 +0100516 intel_ring_advance(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000517
Chris Wilson3cce4692010-10-27 16:11:02 +0100518 *result = seqno;
519 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700520}
521
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800522static u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000523ring_get_seqno(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800524{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000525 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
526}
527
Chris Wilsonc6df5412010-12-15 09:56:50 +0000528static u32
529pc_render_get_seqno(struct intel_ring_buffer *ring)
530{
531 struct pipe_control *pc = ring->private;
532 return pc->cpu_page[0];
533}
534
Chris Wilson0f468322011-01-04 17:35:21 +0000535static void
536ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
537{
538 dev_priv->gt_irq_mask &= ~mask;
539 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
540 POSTING_READ(GTIMR);
541}
542
543static void
544ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
545{
546 dev_priv->gt_irq_mask |= mask;
547 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
548 POSTING_READ(GTIMR);
549}
550
551static void
552i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
553{
554 dev_priv->irq_mask &= ~mask;
555 I915_WRITE(IMR, dev_priv->irq_mask);
556 POSTING_READ(IMR);
557}
558
559static void
560i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
561{
562 dev_priv->irq_mask |= mask;
563 I915_WRITE(IMR, dev_priv->irq_mask);
564 POSTING_READ(IMR);
565}
566
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000567static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000568render_ring_get_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700569{
Chris Wilson78501ea2010-10-27 12:18:21 +0100570 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000571 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700572
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000573 if (!dev->irq_enabled)
574 return false;
575
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000576 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000577 if (ring->irq_refcount++ == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700578 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000579 ironlake_enable_irq(dev_priv,
580 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700581 else
582 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
583 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000584 spin_unlock(&ring->irq_lock);
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000585
586 return true;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700587}
588
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800589static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000590render_ring_put_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700591{
Chris Wilson78501ea2010-10-27 12:18:21 +0100592 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000593 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700594
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000595 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000596 if (--ring->irq_refcount == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700597 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000598 ironlake_disable_irq(dev_priv,
599 GT_USER_INTERRUPT |
600 GT_PIPE_NOTIFY);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700601 else
602 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
603 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000604 spin_unlock(&ring->irq_lock);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700605}
606
Chris Wilson78501ea2010-10-27 12:18:21 +0100607void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800608{
Eric Anholt45930102011-05-06 17:12:35 -0700609 struct drm_device *dev = ring->dev;
Chris Wilson78501ea2010-10-27 12:18:21 +0100610 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt45930102011-05-06 17:12:35 -0700611 u32 mmio = 0;
612
613 /* The ring status page addresses are no longer next to the rest of
614 * the ring registers as of gen7.
615 */
616 if (IS_GEN7(dev)) {
617 switch (ring->id) {
618 case RING_RENDER:
619 mmio = RENDER_HWS_PGA_GEN7;
620 break;
621 case RING_BLT:
622 mmio = BLT_HWS_PGA_GEN7;
623 break;
624 case RING_BSD:
625 mmio = BSD_HWS_PGA_GEN7;
626 break;
627 }
628 } else if (IS_GEN6(ring->dev)) {
629 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
630 } else {
631 mmio = RING_HWS_PGA(ring->mmio_base);
632 }
633
Chris Wilson78501ea2010-10-27 12:18:21 +0100634 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
635 POSTING_READ(mmio);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800636}
637
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000638static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100639bsd_ring_flush(struct intel_ring_buffer *ring,
640 u32 invalidate_domains,
641 u32 flush_domains)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800642{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000643 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000644
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000645 ret = intel_ring_begin(ring, 2);
646 if (ret)
647 return ret;
648
649 intel_ring_emit(ring, MI_FLUSH);
650 intel_ring_emit(ring, MI_NOOP);
651 intel_ring_advance(ring);
652 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800653}
654
Chris Wilson3cce4692010-10-27 16:11:02 +0100655static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100656ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100657 u32 *result)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800658{
659 u32 seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +0100660 int ret;
661
662 ret = intel_ring_begin(ring, 4);
663 if (ret)
664 return ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +0100665
Chris Wilson78501ea2010-10-27 12:18:21 +0100666 seqno = i915_gem_get_seqno(ring->dev);
Chris Wilson6f392d5482010-08-07 11:01:22 +0100667
Chris Wilson3cce4692010-10-27 16:11:02 +0100668 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
669 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
670 intel_ring_emit(ring, seqno);
671 intel_ring_emit(ring, MI_USER_INTERRUPT);
672 intel_ring_advance(ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800673
Chris Wilson3cce4692010-10-27 16:11:02 +0100674 *result = seqno;
675 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800676}
677
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000678static bool
Chris Wilson0f468322011-01-04 17:35:21 +0000679gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
680{
681 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000682 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000683
684 if (!dev->irq_enabled)
685 return false;
686
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000687 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000688 if (ring->irq_refcount++ == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000689 ring->irq_mask &= ~rflag;
690 I915_WRITE_IMR(ring, ring->irq_mask);
691 ironlake_enable_irq(dev_priv, gflag);
Chris Wilson0f468322011-01-04 17:35:21 +0000692 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000693 spin_unlock(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000694
695 return true;
696}
697
698static void
699gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
700{
701 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000702 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000703
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000704 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000705 if (--ring->irq_refcount == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000706 ring->irq_mask |= rflag;
707 I915_WRITE_IMR(ring, ring->irq_mask);
708 ironlake_disable_irq(dev_priv, gflag);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000709 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000710 spin_unlock(&ring->irq_lock);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000711}
712
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000713static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000714bsd_ring_get_irq(struct intel_ring_buffer *ring)
715{
Feng, Boqun5bfa1062011-05-16 16:02:39 +0800716 struct drm_device *dev = ring->dev;
717 drm_i915_private_t *dev_priv = dev->dev_private;
718
719 if (!dev->irq_enabled)
720 return false;
721
722 spin_lock(&ring->irq_lock);
723 if (ring->irq_refcount++ == 0) {
724 if (IS_G4X(dev))
725 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
726 else
727 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
728 }
729 spin_unlock(&ring->irq_lock);
730
731 return true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000732}
733static void
734bsd_ring_put_irq(struct intel_ring_buffer *ring)
735{
Feng, Boqun5bfa1062011-05-16 16:02:39 +0800736 struct drm_device *dev = ring->dev;
737 drm_i915_private_t *dev_priv = dev->dev_private;
738
739 spin_lock(&ring->irq_lock);
740 if (--ring->irq_refcount == 0) {
741 if (IS_G4X(dev))
742 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
743 else
744 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
745 }
746 spin_unlock(&ring->irq_lock);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800747}
748
749static int
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000750ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800751{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100752 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100753
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100754 ret = intel_ring_begin(ring, 2);
755 if (ret)
756 return ret;
757
Chris Wilson78501ea2010-10-27 12:18:21 +0100758 intel_ring_emit(ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000759 MI_BATCH_BUFFER_START | (2 << 6) |
Chris Wilson78501ea2010-10-27 12:18:21 +0100760 MI_BATCH_NON_SECURE_I965);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000761 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100762 intel_ring_advance(ring);
763
Zou Nan haid1b851f2010-05-21 09:08:57 +0800764 return 0;
765}
766
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800767static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100768render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000769 u32 offset, u32 len)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700770{
Chris Wilson78501ea2010-10-27 12:18:21 +0100771 struct drm_device *dev = ring->dev;
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000772 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700773
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000774 if (IS_I830(dev) || IS_845G(dev)) {
775 ret = intel_ring_begin(ring, 4);
776 if (ret)
777 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700778
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000779 intel_ring_emit(ring, MI_BATCH_BUFFER);
780 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
781 intel_ring_emit(ring, offset + len - 8);
782 intel_ring_emit(ring, 0);
783 } else {
784 ret = intel_ring_begin(ring, 2);
785 if (ret)
786 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100787
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000788 if (INTEL_INFO(dev)->gen >= 4) {
789 intel_ring_emit(ring,
790 MI_BATCH_BUFFER_START | (2 << 6) |
791 MI_BATCH_NON_SECURE_I965);
792 intel_ring_emit(ring, offset);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700793 } else {
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000794 intel_ring_emit(ring,
795 MI_BATCH_BUFFER_START | (2 << 6));
796 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700797 }
798 }
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000799 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700800
Eric Anholt62fdfea2010-05-21 13:26:39 -0700801 return 0;
802}
803
Chris Wilson78501ea2010-10-27 12:18:21 +0100804static void cleanup_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700805{
Chris Wilson78501ea2010-10-27 12:18:21 +0100806 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000807 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700808
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800809 obj = ring->status_page.obj;
810 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700811 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700812
Chris Wilson05394f32010-11-08 19:18:58 +0000813 kunmap(obj->pages[0]);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700814 i915_gem_object_unpin(obj);
Chris Wilson05394f32010-11-08 19:18:58 +0000815 drm_gem_object_unreference(&obj->base);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800816 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700817
818 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700819}
820
Chris Wilson78501ea2010-10-27 12:18:21 +0100821static int init_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700822{
Chris Wilson78501ea2010-10-27 12:18:21 +0100823 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700824 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000825 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700826 int ret;
827
Eric Anholt62fdfea2010-05-21 13:26:39 -0700828 obj = i915_gem_alloc_object(dev, 4096);
829 if (obj == NULL) {
830 DRM_ERROR("Failed to allocate status page\n");
831 ret = -ENOMEM;
832 goto err;
833 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100834
835 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700836
Daniel Vetter75e9e912010-11-04 17:11:09 +0100837 ret = i915_gem_object_pin(obj, 4096, true);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700838 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700839 goto err_unref;
840 }
841
Chris Wilson05394f32010-11-08 19:18:58 +0000842 ring->status_page.gfx_addr = obj->gtt_offset;
843 ring->status_page.page_addr = kmap(obj->pages[0]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800844 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700845 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700846 goto err_unpin;
847 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800848 ring->status_page.obj = obj;
849 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700850
Chris Wilson78501ea2010-10-27 12:18:21 +0100851 intel_ring_setup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800852 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
853 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700854
855 return 0;
856
857err_unpin:
858 i915_gem_object_unpin(obj);
859err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000860 drm_gem_object_unreference(&obj->base);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700861err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800862 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700863}
864
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800865int intel_init_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100866 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700867{
Chris Wilson05394f32010-11-08 19:18:58 +0000868 struct drm_i915_gem_object *obj;
Chris Wilsondd785e32010-08-07 11:01:34 +0100869 int ret;
870
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800871 ring->dev = dev;
Chris Wilson23bc5982010-09-29 16:10:57 +0100872 INIT_LIST_HEAD(&ring->active_list);
873 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +0100874 INIT_LIST_HEAD(&ring->gpu_write_list);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000875
Chris Wilsonb259f672011-03-29 13:19:09 +0100876 init_waitqueue_head(&ring->irq_queue);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000877 spin_lock_init(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000878 ring->irq_mask = ~0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700879
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800880 if (I915_NEED_GFX_HWS(dev)) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100881 ret = init_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800882 if (ret)
883 return ret;
884 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700885
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800886 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700887 if (obj == NULL) {
888 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800889 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +0100890 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700891 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700892
Chris Wilson05394f32010-11-08 19:18:58 +0000893 ring->obj = obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800894
Daniel Vetter75e9e912010-11-04 17:11:09 +0100895 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
Chris Wilsondd785e32010-08-07 11:01:34 +0100896 if (ret)
897 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700898
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800899 ring->map.size = ring->size;
Chris Wilson05394f32010-11-08 19:18:58 +0000900 ring->map.offset = dev->agp->base + obj->gtt_offset;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700901 ring->map.type = 0;
902 ring->map.flags = 0;
903 ring->map.mtrr = 0;
904
905 drm_core_ioremap_wc(&ring->map, dev);
906 if (ring->map.handle == NULL) {
907 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800908 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100909 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700910 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800911
Eric Anholt62fdfea2010-05-21 13:26:39 -0700912 ring->virtual_start = ring->map.handle;
Chris Wilson78501ea2010-10-27 12:18:21 +0100913 ret = ring->init(ring);
Chris Wilsondd785e32010-08-07 11:01:34 +0100914 if (ret)
915 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700916
Chris Wilson55249ba2010-12-22 14:04:47 +0000917 /* Workaround an erratum on the i830 which causes a hang if
918 * the TAIL pointer points to within the last 2 cachelines
919 * of the buffer.
920 */
921 ring->effective_size = ring->size;
922 if (IS_I830(ring->dev))
923 ring->effective_size -= 128;
924
Chris Wilsonc584fe42010-10-29 18:15:52 +0100925 return 0;
Chris Wilsondd785e32010-08-07 11:01:34 +0100926
927err_unmap:
928 drm_core_ioremapfree(&ring->map, dev);
929err_unpin:
930 i915_gem_object_unpin(obj);
931err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000932 drm_gem_object_unreference(&obj->base);
933 ring->obj = NULL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100934err_hws:
Chris Wilson78501ea2010-10-27 12:18:21 +0100935 cleanup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800936 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700937}
938
Chris Wilson78501ea2010-10-27 12:18:21 +0100939void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700940{
Chris Wilson33626e62010-10-29 16:18:36 +0100941 struct drm_i915_private *dev_priv;
942 int ret;
943
Chris Wilson05394f32010-11-08 19:18:58 +0000944 if (ring->obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700945 return;
946
Chris Wilson33626e62010-10-29 16:18:36 +0100947 /* Disable the ring buffer. The ring must be idle at this point */
948 dev_priv = ring->dev->dev_private;
Ben Widawsky96f298a2011-03-19 18:14:27 -0700949 ret = intel_wait_ring_idle(ring);
Chris Wilson29ee3992011-01-24 16:35:42 +0000950 if (ret)
951 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
952 ring->name, ret);
953
Chris Wilson33626e62010-10-29 16:18:36 +0100954 I915_WRITE_CTL(ring, 0);
955
Chris Wilson78501ea2010-10-27 12:18:21 +0100956 drm_core_ioremapfree(&ring->map, ring->dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700957
Chris Wilson05394f32010-11-08 19:18:58 +0000958 i915_gem_object_unpin(ring->obj);
959 drm_gem_object_unreference(&ring->obj->base);
960 ring->obj = NULL;
Chris Wilson78501ea2010-10-27 12:18:21 +0100961
Zou Nan hai8d192152010-11-02 16:31:01 +0800962 if (ring->cleanup)
963 ring->cleanup(ring);
964
Chris Wilson78501ea2010-10-27 12:18:21 +0100965 cleanup_status_page(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700966}
967
Chris Wilson78501ea2010-10-27 12:18:21 +0100968static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700969{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800970 unsigned int *virt;
Chris Wilson55249ba2010-12-22 14:04:47 +0000971 int rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700972
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800973 if (ring->space < rem) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100974 int ret = intel_wait_ring_buffer(ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700975 if (ret)
976 return ret;
977 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700978
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800979 virt = (unsigned int *)(ring->virtual_start + ring->tail);
Chris Wilson1741dd42010-08-04 15:18:12 +0100980 rem /= 8;
981 while (rem--) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700982 *virt++ = MI_NOOP;
Chris Wilson1741dd42010-08-04 15:18:12 +0100983 *virt++ = MI_NOOP;
984 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700985
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800986 ring->tail = 0;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000987 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700988
989 return 0;
990}
991
Chris Wilson78501ea2010-10-27 12:18:21 +0100992int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700993{
Chris Wilson78501ea2010-10-27 12:18:21 +0100994 struct drm_device *dev = ring->dev;
Zou Nan haicae58522010-11-09 17:17:32 +0800995 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100996 unsigned long end;
Chris Wilson6aa56062010-10-29 21:44:37 +0100997 u32 head;
998
Chris Wilsonc7dca472011-01-20 17:00:10 +0000999 /* If the reported head position has wrapped or hasn't advanced,
1000 * fallback to the slow and accurate path.
1001 */
1002 head = intel_read_status_page(ring, 4);
1003 if (head > ring->head) {
1004 ring->head = head;
1005 ring->space = ring_space(ring);
1006 if (ring->space >= n)
1007 return 0;
1008 }
1009
Chris Wilsondb53a302011-02-03 11:57:46 +00001010 trace_i915_ring_wait_begin(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001011 end = jiffies + 3 * HZ;
1012 do {
Chris Wilsonc7dca472011-01-20 17:00:10 +00001013 ring->head = I915_READ_HEAD(ring);
1014 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001015 if (ring->space >= n) {
Chris Wilsondb53a302011-02-03 11:57:46 +00001016 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001017 return 0;
1018 }
1019
1020 if (dev->primary->master) {
1021 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1022 if (master_priv->sarea_priv)
1023 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1024 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08001025
Chris Wilsone60a0b12010-10-13 10:09:14 +01001026 msleep(1);
Chris Wilsonf4e0b292010-10-29 21:06:16 +01001027 if (atomic_read(&dev_priv->mm.wedged))
1028 return -EAGAIN;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001029 } while (!time_after(jiffies, end));
Chris Wilsondb53a302011-02-03 11:57:46 +00001030 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001031 return -EBUSY;
1032}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001033
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001034int intel_ring_begin(struct intel_ring_buffer *ring,
1035 int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001036{
Chris Wilson21dd3732011-01-26 15:55:56 +00001037 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Zou Nan haibe26a102010-06-12 17:40:24 +08001038 int n = 4*num_dwords;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001039 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +01001040
Chris Wilson21dd3732011-01-26 15:55:56 +00001041 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1042 return -EIO;
1043
Chris Wilson55249ba2010-12-22 14:04:47 +00001044 if (unlikely(ring->tail + n > ring->effective_size)) {
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001045 ret = intel_wrap_ring_buffer(ring);
1046 if (unlikely(ret))
1047 return ret;
1048 }
Chris Wilson78501ea2010-10-27 12:18:21 +01001049
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001050 if (unlikely(ring->space < n)) {
1051 ret = intel_wait_ring_buffer(ring, n);
1052 if (unlikely(ret))
1053 return ret;
1054 }
Chris Wilsond97ed332010-08-04 15:18:13 +01001055
1056 ring->space -= n;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001057 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001058}
1059
Chris Wilson78501ea2010-10-27 12:18:21 +01001060void intel_ring_advance(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001061{
Chris Wilsond97ed332010-08-04 15:18:13 +01001062 ring->tail &= ring->size - 1;
Chris Wilson78501ea2010-10-27 12:18:21 +01001063 ring->write_tail(ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001064}
1065
Chris Wilsone0708682010-09-19 14:46:27 +01001066static const struct intel_ring_buffer render_ring = {
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001067 .name = "render ring",
Chris Wilson92204342010-09-18 11:02:01 +01001068 .id = RING_RENDER,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001069 .mmio_base = RENDER_RING_BASE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001070 .size = 32 * PAGE_SIZE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001071 .init = init_render_ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001072 .write_tail = ring_write_tail,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001073 .flush = render_ring_flush,
1074 .add_request = render_ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001075 .get_seqno = ring_get_seqno,
1076 .irq_get = render_ring_get_irq,
1077 .irq_put = render_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001078 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
Akshay Joshi0206e352011-08-16 15:34:10 -04001079 .cleanup = render_ring_cleanup,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001080 .sync_to = render_ring_sync_to,
1081 .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
1082 MI_SEMAPHORE_SYNC_RV,
1083 MI_SEMAPHORE_SYNC_RB},
1084 .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001085};
Zou Nan haid1b851f2010-05-21 09:08:57 +08001086
1087/* ring buffer for bit-stream decoder */
1088
Chris Wilsone0708682010-09-19 14:46:27 +01001089static const struct intel_ring_buffer bsd_ring = {
Zou Nan haid1b851f2010-05-21 09:08:57 +08001090 .name = "bsd ring",
Chris Wilson92204342010-09-18 11:02:01 +01001091 .id = RING_BSD,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001092 .mmio_base = BSD_RING_BASE,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001093 .size = 32 * PAGE_SIZE,
Chris Wilson78501ea2010-10-27 12:18:21 +01001094 .init = init_ring_common,
Chris Wilson297b0c52010-10-22 17:02:41 +01001095 .write_tail = ring_write_tail,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001096 .flush = bsd_ring_flush,
Chris Wilson549f7362010-10-19 11:19:32 +01001097 .add_request = ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001098 .get_seqno = ring_get_seqno,
1099 .irq_get = bsd_ring_get_irq,
1100 .irq_put = bsd_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001101 .dispatch_execbuffer = ring_dispatch_execbuffer,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001102};
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001103
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001104
Chris Wilson78501ea2010-10-27 12:18:21 +01001105static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001106 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001107{
Akshay Joshi0206e352011-08-16 15:34:10 -04001108 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001109
1110 /* Every tail move must follow the sequence below */
Akshay Joshi0206e352011-08-16 15:34:10 -04001111 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1112 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1113 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1114 I915_WRITE(GEN6_BSD_RNCID, 0x0);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001115
Akshay Joshi0206e352011-08-16 15:34:10 -04001116 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1117 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1118 50))
1119 DRM_ERROR("timed out waiting for IDLE Indicator\n");
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001120
Akshay Joshi0206e352011-08-16 15:34:10 -04001121 I915_WRITE_TAIL(ring, value);
1122 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1123 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1124 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001125}
1126
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001127static int gen6_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001128 u32 invalidate, u32 flush)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001129{
Chris Wilson71a77e02011-02-02 12:13:49 +00001130 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001131 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001132
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001133 ret = intel_ring_begin(ring, 4);
1134 if (ret)
1135 return ret;
1136
Chris Wilson71a77e02011-02-02 12:13:49 +00001137 cmd = MI_FLUSH_DW;
1138 if (invalidate & I915_GEM_GPU_DOMAINS)
1139 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1140 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001141 intel_ring_emit(ring, 0);
1142 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001143 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001144 intel_ring_advance(ring);
1145 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001146}
1147
1148static int
Chris Wilson78501ea2010-10-27 12:18:21 +01001149gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001150 u32 offset, u32 len)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001151{
Akshay Joshi0206e352011-08-16 15:34:10 -04001152 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001153
Akshay Joshi0206e352011-08-16 15:34:10 -04001154 ret = intel_ring_begin(ring, 2);
1155 if (ret)
1156 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001157
Akshay Joshi0206e352011-08-16 15:34:10 -04001158 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1159 /* bit0-7 is the length on GEN6+ */
1160 intel_ring_emit(ring, offset);
1161 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001162
Akshay Joshi0206e352011-08-16 15:34:10 -04001163 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001164}
1165
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001166static bool
Chris Wilson0f468322011-01-04 17:35:21 +00001167gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1168{
1169 return gen6_ring_get_irq(ring,
1170 GT_USER_INTERRUPT,
1171 GEN6_RENDER_USER_INTERRUPT);
1172}
1173
1174static void
1175gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1176{
1177 return gen6_ring_put_irq(ring,
1178 GT_USER_INTERRUPT,
1179 GEN6_RENDER_USER_INTERRUPT);
1180}
1181
1182static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001183gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1184{
Chris Wilson0f468322011-01-04 17:35:21 +00001185 return gen6_ring_get_irq(ring,
1186 GT_GEN6_BSD_USER_INTERRUPT,
1187 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001188}
1189
1190static void
1191gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1192{
Chris Wilson0f468322011-01-04 17:35:21 +00001193 return gen6_ring_put_irq(ring,
1194 GT_GEN6_BSD_USER_INTERRUPT,
1195 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001196}
1197
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001198/* ring buffer for Video Codec for Gen6+ */
Chris Wilsone0708682010-09-19 14:46:27 +01001199static const struct intel_ring_buffer gen6_bsd_ring = {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001200 .name = "gen6 bsd ring",
1201 .id = RING_BSD,
1202 .mmio_base = GEN6_BSD_RING_BASE,
1203 .size = 32 * PAGE_SIZE,
1204 .init = init_ring_common,
1205 .write_tail = gen6_bsd_ring_write_tail,
1206 .flush = gen6_ring_flush,
1207 .add_request = gen6_add_request,
1208 .get_seqno = ring_get_seqno,
1209 .irq_get = gen6_bsd_ring_get_irq,
1210 .irq_put = gen6_bsd_ring_put_irq,
1211 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001212 .sync_to = gen6_bsd_ring_sync_to,
1213 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
1214 MI_SEMAPHORE_SYNC_INVALID,
1215 MI_SEMAPHORE_SYNC_VB},
1216 .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
Chris Wilson549f7362010-10-19 11:19:32 +01001217};
1218
1219/* Blitter support (SandyBridge+) */
1220
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001221static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001222blt_ring_get_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001223{
Chris Wilson0f468322011-01-04 17:35:21 +00001224 return gen6_ring_get_irq(ring,
1225 GT_BLT_USER_INTERRUPT,
1226 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001227}
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001228
Chris Wilson549f7362010-10-19 11:19:32 +01001229static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001230blt_ring_put_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001231{
Chris Wilson0f468322011-01-04 17:35:21 +00001232 gen6_ring_put_irq(ring,
1233 GT_BLT_USER_INTERRUPT,
1234 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001235}
1236
Zou Nan hai8d192152010-11-02 16:31:01 +08001237
1238/* Workaround for some stepping of SNB,
1239 * each time when BLT engine ring tail moved,
1240 * the first command in the ring to be parsed
1241 * should be MI_BATCH_BUFFER_START
1242 */
1243#define NEED_BLT_WORKAROUND(dev) \
1244 (IS_GEN6(dev) && (dev->pdev->revision < 8))
1245
1246static inline struct drm_i915_gem_object *
1247to_blt_workaround(struct intel_ring_buffer *ring)
1248{
1249 return ring->private;
1250}
1251
1252static int blt_ring_init(struct intel_ring_buffer *ring)
1253{
1254 if (NEED_BLT_WORKAROUND(ring->dev)) {
1255 struct drm_i915_gem_object *obj;
Chris Wilson27153f72010-11-02 11:17:23 +00001256 u32 *ptr;
Zou Nan hai8d192152010-11-02 16:31:01 +08001257 int ret;
1258
Chris Wilson05394f32010-11-08 19:18:58 +00001259 obj = i915_gem_alloc_object(ring->dev, 4096);
Zou Nan hai8d192152010-11-02 16:31:01 +08001260 if (obj == NULL)
1261 return -ENOMEM;
1262
Chris Wilson05394f32010-11-08 19:18:58 +00001263 ret = i915_gem_object_pin(obj, 4096, true);
Zou Nan hai8d192152010-11-02 16:31:01 +08001264 if (ret) {
1265 drm_gem_object_unreference(&obj->base);
1266 return ret;
1267 }
1268
1269 ptr = kmap(obj->pages[0]);
Chris Wilson27153f72010-11-02 11:17:23 +00001270 *ptr++ = MI_BATCH_BUFFER_END;
1271 *ptr++ = MI_NOOP;
Zou Nan hai8d192152010-11-02 16:31:01 +08001272 kunmap(obj->pages[0]);
1273
Chris Wilson05394f32010-11-08 19:18:58 +00001274 ret = i915_gem_object_set_to_gtt_domain(obj, false);
Zou Nan hai8d192152010-11-02 16:31:01 +08001275 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00001276 i915_gem_object_unpin(obj);
Zou Nan hai8d192152010-11-02 16:31:01 +08001277 drm_gem_object_unreference(&obj->base);
1278 return ret;
1279 }
1280
1281 ring->private = obj;
1282 }
1283
1284 return init_ring_common(ring);
1285}
1286
1287static int blt_ring_begin(struct intel_ring_buffer *ring,
1288 int num_dwords)
1289{
1290 if (ring->private) {
1291 int ret = intel_ring_begin(ring, num_dwords+2);
1292 if (ret)
1293 return ret;
1294
1295 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1296 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1297
1298 return 0;
1299 } else
1300 return intel_ring_begin(ring, 4);
1301}
1302
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001303static int blt_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001304 u32 invalidate, u32 flush)
Zou Nan hai8d192152010-11-02 16:31:01 +08001305{
Chris Wilson71a77e02011-02-02 12:13:49 +00001306 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001307 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001308
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001309 ret = blt_ring_begin(ring, 4);
1310 if (ret)
1311 return ret;
1312
Chris Wilson71a77e02011-02-02 12:13:49 +00001313 cmd = MI_FLUSH_DW;
1314 if (invalidate & I915_GEM_DOMAIN_RENDER)
1315 cmd |= MI_INVALIDATE_TLB;
1316 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001317 intel_ring_emit(ring, 0);
1318 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001319 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001320 intel_ring_advance(ring);
1321 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08001322}
1323
Zou Nan hai8d192152010-11-02 16:31:01 +08001324static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1325{
1326 if (!ring->private)
1327 return;
1328
1329 i915_gem_object_unpin(ring->private);
1330 drm_gem_object_unreference(ring->private);
1331 ring->private = NULL;
1332}
1333
Chris Wilson549f7362010-10-19 11:19:32 +01001334static const struct intel_ring_buffer gen6_blt_ring = {
Akshay Joshi0206e352011-08-16 15:34:10 -04001335 .name = "blt ring",
1336 .id = RING_BLT,
1337 .mmio_base = BLT_RING_BASE,
1338 .size = 32 * PAGE_SIZE,
1339 .init = blt_ring_init,
1340 .write_tail = ring_write_tail,
1341 .flush = blt_ring_flush,
1342 .add_request = gen6_add_request,
1343 .get_seqno = ring_get_seqno,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001344 .irq_get = blt_ring_get_irq,
1345 .irq_put = blt_ring_put_irq,
Akshay Joshi0206e352011-08-16 15:34:10 -04001346 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001347 .cleanup = blt_ring_cleanup,
1348 .sync_to = gen6_blt_ring_sync_to,
1349 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
1350 MI_SEMAPHORE_SYNC_BV,
1351 MI_SEMAPHORE_SYNC_INVALID},
1352 .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001353};
1354
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001355int intel_init_render_ring_buffer(struct drm_device *dev)
1356{
1357 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001358 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001359
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001360 *ring = render_ring;
1361 if (INTEL_INFO(dev)->gen >= 6) {
1362 ring->add_request = gen6_add_request;
Chris Wilson0f468322011-01-04 17:35:21 +00001363 ring->irq_get = gen6_render_ring_get_irq;
1364 ring->irq_put = gen6_render_ring_put_irq;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001365 } else if (IS_GEN5(dev)) {
1366 ring->add_request = pc_render_add_request;
1367 ring->get_seqno = pc_render_get_seqno;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001368 }
1369
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001370 if (!I915_NEED_GFX_HWS(dev)) {
1371 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1372 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1373 }
1374
1375 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001376}
1377
Chris Wilsone8616b62011-01-20 09:57:11 +00001378int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1379{
1380 drm_i915_private_t *dev_priv = dev->dev_private;
1381 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1382
1383 *ring = render_ring;
1384 if (INTEL_INFO(dev)->gen >= 6) {
1385 ring->add_request = gen6_add_request;
1386 ring->irq_get = gen6_render_ring_get_irq;
1387 ring->irq_put = gen6_render_ring_put_irq;
1388 } else if (IS_GEN5(dev)) {
1389 ring->add_request = pc_render_add_request;
1390 ring->get_seqno = pc_render_get_seqno;
1391 }
1392
Keith Packardf3234702011-07-22 10:44:39 -07001393 if (!I915_NEED_GFX_HWS(dev))
1394 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1395
Chris Wilsone8616b62011-01-20 09:57:11 +00001396 ring->dev = dev;
1397 INIT_LIST_HEAD(&ring->active_list);
1398 INIT_LIST_HEAD(&ring->request_list);
1399 INIT_LIST_HEAD(&ring->gpu_write_list);
1400
1401 ring->size = size;
1402 ring->effective_size = ring->size;
1403 if (IS_I830(ring->dev))
1404 ring->effective_size -= 128;
1405
1406 ring->map.offset = start;
1407 ring->map.size = size;
1408 ring->map.type = 0;
1409 ring->map.flags = 0;
1410 ring->map.mtrr = 0;
1411
1412 drm_core_ioremap_wc(&ring->map, dev);
1413 if (ring->map.handle == NULL) {
1414 DRM_ERROR("can not ioremap virtual address for"
1415 " ring buffer\n");
1416 return -ENOMEM;
1417 }
1418
1419 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1420 return 0;
1421}
1422
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001423int intel_init_bsd_ring_buffer(struct drm_device *dev)
1424{
1425 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001426 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001427
Jesse Barnes65d3eb12011-04-06 14:54:44 -07001428 if (IS_GEN6(dev) || IS_GEN7(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001429 *ring = gen6_bsd_ring;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001430 else
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001431 *ring = bsd_ring;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001432
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001433 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001434}
Chris Wilson549f7362010-10-19 11:19:32 +01001435
1436int intel_init_blt_ring_buffer(struct drm_device *dev)
1437{
1438 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001439 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
Chris Wilson549f7362010-10-19 11:19:32 +01001440
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001441 *ring = gen6_blt_ring;
Chris Wilson549f7362010-10-19 11:19:32 +01001442
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001443 return intel_init_ring_buffer(dev, ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001444}