blob: 1aef516cc6fac6da853bcbd6f1fbcdf9228db9c3 [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/drmP.h>
Eric Anholt62fdfea2010-05-21 13:26:39 -070031#include "i915_drv.h"
David Howells760285e2012-10-02 18:01:07 +010032#include <drm/i915_drm.h>
Eric Anholt62fdfea2010-05-21 13:26:39 -070033#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010034#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070035
Jesse Barnes8d315282011-10-16 10:23:31 +020036/*
37 * 965+ support PIPE_CONTROL commands, which provide finer grained control
38 * over cache flushing.
39 */
40struct pipe_control {
41 struct drm_i915_gem_object *obj;
42 volatile u32 *cpu_page;
43 u32 gtt_offset;
44};
45
Chris Wilsonc7dca472011-01-20 17:00:10 +000046static inline int ring_space(struct intel_ring_buffer *ring)
47{
48 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
49 if (space < 0)
50 space += ring->size;
51 return space;
52}
53
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000054static int
Chris Wilson46f0f8d2012-04-18 11:12:11 +010055gen2_render_ring_flush(struct intel_ring_buffer *ring,
56 u32 invalidate_domains,
57 u32 flush_domains)
58{
59 u32 cmd;
60 int ret;
61
62 cmd = MI_FLUSH;
Daniel Vetter31b14c92012-04-19 16:45:22 +020063 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
Chris Wilson46f0f8d2012-04-18 11:12:11 +010064 cmd |= MI_NO_WRITE_FLUSH;
65
66 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
67 cmd |= MI_READ_FLUSH;
68
69 ret = intel_ring_begin(ring, 2);
70 if (ret)
71 return ret;
72
73 intel_ring_emit(ring, cmd);
74 intel_ring_emit(ring, MI_NOOP);
75 intel_ring_advance(ring);
76
77 return 0;
78}
79
80static int
81gen4_render_ring_flush(struct intel_ring_buffer *ring,
82 u32 invalidate_domains,
83 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070084{
Chris Wilson78501ea2010-10-27 12:18:21 +010085 struct drm_device *dev = ring->dev;
Chris Wilson6f392d5482010-08-07 11:01:22 +010086 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000087 int ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +010088
Chris Wilson36d527d2011-03-19 22:26:49 +000089 /*
90 * read/write caches:
91 *
92 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
93 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
94 * also flushed at 2d versus 3d pipeline switches.
95 *
96 * read-only caches:
97 *
98 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
99 * MI_READ_FLUSH is set, and is always flushed on 965.
100 *
101 * I915_GEM_DOMAIN_COMMAND may not exist?
102 *
103 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
104 * invalidated when MI_EXE_FLUSH is set.
105 *
106 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
107 * invalidated with every MI_FLUSH.
108 *
109 * TLBs:
110 *
111 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
112 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
113 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
114 * are flushed at any MI_FLUSH.
115 */
116
117 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
Chris Wilson46f0f8d2012-04-18 11:12:11 +0100118 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
Chris Wilson36d527d2011-03-19 22:26:49 +0000119 cmd &= ~MI_NO_WRITE_FLUSH;
Chris Wilson36d527d2011-03-19 22:26:49 +0000120 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
121 cmd |= MI_EXE_FLUSH;
122
123 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
124 (IS_G4X(dev) || IS_GEN5(dev)))
125 cmd |= MI_INVALIDATE_ISP;
126
127 ret = intel_ring_begin(ring, 2);
128 if (ret)
129 return ret;
130
131 intel_ring_emit(ring, cmd);
132 intel_ring_emit(ring, MI_NOOP);
133 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000134
135 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800136}
137
Jesse Barnes8d315282011-10-16 10:23:31 +0200138/**
139 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
140 * implementing two workarounds on gen6. From section 1.4.7.1
141 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
142 *
143 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
144 * produced by non-pipelined state commands), software needs to first
145 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
146 * 0.
147 *
148 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
149 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
150 *
151 * And the workaround for these two requires this workaround first:
152 *
153 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
154 * BEFORE the pipe-control with a post-sync op and no write-cache
155 * flushes.
156 *
157 * And this last workaround is tricky because of the requirements on
158 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
159 * volume 2 part 1:
160 *
161 * "1 of the following must also be set:
162 * - Render Target Cache Flush Enable ([12] of DW1)
163 * - Depth Cache Flush Enable ([0] of DW1)
164 * - Stall at Pixel Scoreboard ([1] of DW1)
165 * - Depth Stall ([13] of DW1)
166 * - Post-Sync Operation ([13] of DW1)
167 * - Notify Enable ([8] of DW1)"
168 *
169 * The cache flushes require the workaround flush that triggered this
170 * one, so we can't use it. Depth stall would trigger the same.
171 * Post-sync nonzero is what triggered this second workaround, so we
172 * can't use that one either. Notify enable is IRQs, which aren't
173 * really our business. That leaves only stall at scoreboard.
174 */
175static int
176intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
177{
178 struct pipe_control *pc = ring->private;
179 u32 scratch_addr = pc->gtt_offset + 128;
180 int ret;
181
182
183 ret = intel_ring_begin(ring, 6);
184 if (ret)
185 return ret;
186
187 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
188 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
189 PIPE_CONTROL_STALL_AT_SCOREBOARD);
190 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
191 intel_ring_emit(ring, 0); /* low dword */
192 intel_ring_emit(ring, 0); /* high dword */
193 intel_ring_emit(ring, MI_NOOP);
194 intel_ring_advance(ring);
195
196 ret = intel_ring_begin(ring, 6);
197 if (ret)
198 return ret;
199
200 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
201 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
202 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
203 intel_ring_emit(ring, 0);
204 intel_ring_emit(ring, 0);
205 intel_ring_emit(ring, MI_NOOP);
206 intel_ring_advance(ring);
207
208 return 0;
209}
210
211static int
212gen6_render_ring_flush(struct intel_ring_buffer *ring,
213 u32 invalidate_domains, u32 flush_domains)
214{
215 u32 flags = 0;
216 struct pipe_control *pc = ring->private;
217 u32 scratch_addr = pc->gtt_offset + 128;
218 int ret;
219
220 /* Force SNB workarounds for PIPE_CONTROL flushes */
Daniel Vetter97f209b2012-06-28 09:48:42 +0200221 ret = intel_emit_post_sync_nonzero_flush(ring);
222 if (ret)
223 return ret;
Jesse Barnes8d315282011-10-16 10:23:31 +0200224
225 /* Just flush everything. Experiments have shown that reducing the
226 * number of bits based on the write domains has little performance
227 * impact.
228 */
Chris Wilson7d54a902012-08-10 10:18:10 +0100229 if (flush_domains) {
230 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
231 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
232 /*
233 * Ensure that any following seqno writes only happen
234 * when the render cache is indeed flushed.
235 */
Daniel Vetter97f209b2012-06-28 09:48:42 +0200236 flags |= PIPE_CONTROL_CS_STALL;
Chris Wilson7d54a902012-08-10 10:18:10 +0100237 }
238 if (invalidate_domains) {
239 flags |= PIPE_CONTROL_TLB_INVALIDATE;
240 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
241 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
242 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
243 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
244 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
245 /*
246 * TLB invalidate requires a post-sync write.
247 */
248 flags |= PIPE_CONTROL_QW_WRITE;
249 }
Jesse Barnes8d315282011-10-16 10:23:31 +0200250
Chris Wilson7d54a902012-08-10 10:18:10 +0100251 ret = intel_ring_begin(ring, 4);
Jesse Barnes8d315282011-10-16 10:23:31 +0200252 if (ret)
253 return ret;
254
Chris Wilson7d54a902012-08-10 10:18:10 +0100255 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
Jesse Barnes8d315282011-10-16 10:23:31 +0200256 intel_ring_emit(ring, flags);
257 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
Chris Wilson7d54a902012-08-10 10:18:10 +0100258 intel_ring_emit(ring, 0);
Jesse Barnes8d315282011-10-16 10:23:31 +0200259 intel_ring_advance(ring);
260
261 return 0;
262}
263
Chris Wilson78501ea2010-10-27 12:18:21 +0100264static void ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100265 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800266{
Chris Wilson78501ea2010-10-27 12:18:21 +0100267 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson297b0c52010-10-22 17:02:41 +0100268 I915_WRITE_TAIL(ring, value);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800269}
270
Chris Wilson78501ea2010-10-27 12:18:21 +0100271u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800272{
Chris Wilson78501ea2010-10-27 12:18:21 +0100273 drm_i915_private_t *dev_priv = ring->dev->dev_private;
274 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
Daniel Vetter3d281d82010-09-24 21:14:22 +0200275 RING_ACTHD(ring->mmio_base) : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800276
277 return I915_READ(acthd_reg);
278}
279
Chris Wilson78501ea2010-10-27 12:18:21 +0100280static int init_ring_common(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800281{
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200282 struct drm_device *dev = ring->dev;
283 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000284 struct drm_i915_gem_object *obj = ring->obj;
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200285 int ret = 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800286 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800287
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200288 if (HAS_FORCE_WAKE(dev))
289 gen6_gt_force_wake_get(dev_priv);
290
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800291 /* Stop the ring if it's running. */
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200292 I915_WRITE_CTL(ring, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200293 I915_WRITE_HEAD(ring, 0);
Chris Wilson78501ea2010-10-27 12:18:21 +0100294 ring->write_tail(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800295
Daniel Vetter570ef602010-08-02 17:06:23 +0200296 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800297
298 /* G45 ring initialization fails to reset head to zero */
299 if (head != 0) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000300 DRM_DEBUG_KMS("%s head not reset to zero "
301 "ctl %08x head %08x tail %08x start %08x\n",
302 ring->name,
303 I915_READ_CTL(ring),
304 I915_READ_HEAD(ring),
305 I915_READ_TAIL(ring),
306 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800307
Daniel Vetter570ef602010-08-02 17:06:23 +0200308 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800309
Chris Wilson6fd0d562010-12-05 20:42:33 +0000310 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
311 DRM_ERROR("failed to set %s head to zero "
312 "ctl %08x head %08x tail %08x start %08x\n",
313 ring->name,
314 I915_READ_CTL(ring),
315 I915_READ_HEAD(ring),
316 I915_READ_TAIL(ring),
317 I915_READ_START(ring));
318 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700319 }
320
Daniel Vetter0d8957c2012-08-07 09:54:14 +0200321 /* Initialize the ring. This must happen _after_ we've cleared the ring
322 * registers with the above sequence (the readback of the HEAD registers
323 * also enforces ordering), otherwise the hw might lose the new ring
324 * register values. */
325 I915_WRITE_START(ring, obj->gtt_offset);
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200326 I915_WRITE_CTL(ring,
Chris Wilsonae69b422010-11-07 11:45:52 +0000327 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson5d031e52012-02-08 13:34:13 +0000328 | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800329
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800330 /* If the head is still not zero, the ring is dead */
Sean Paulf01db982012-03-16 12:43:22 -0400331 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
332 I915_READ_START(ring) == obj->gtt_offset &&
333 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000334 DRM_ERROR("%s initialization failed "
335 "ctl %08x head %08x tail %08x start %08x\n",
336 ring->name,
337 I915_READ_CTL(ring),
338 I915_READ_HEAD(ring),
339 I915_READ_TAIL(ring),
340 I915_READ_START(ring));
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200341 ret = -EIO;
342 goto out;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800343 }
344
Chris Wilson78501ea2010-10-27 12:18:21 +0100345 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
346 i915_kernel_lost_context(ring->dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800347 else {
Chris Wilsonc7dca472011-01-20 17:00:10 +0000348 ring->head = I915_READ_HEAD(ring);
Daniel Vetter870e86d2010-08-02 16:29:44 +0200349 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000350 ring->space = ring_space(ring);
Chris Wilsonc3b20032012-05-28 22:33:02 +0100351 ring->last_retired_head = -1;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800352 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000353
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200354out:
355 if (HAS_FORCE_WAKE(dev))
356 gen6_gt_force_wake_put(dev_priv);
357
358 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700359}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800360
Chris Wilsonc6df5412010-12-15 09:56:50 +0000361static int
362init_pipe_control(struct intel_ring_buffer *ring)
363{
364 struct pipe_control *pc;
365 struct drm_i915_gem_object *obj;
366 int ret;
367
368 if (ring->private)
369 return 0;
370
371 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
372 if (!pc)
373 return -ENOMEM;
374
375 obj = i915_gem_alloc_object(ring->dev, 4096);
376 if (obj == NULL) {
377 DRM_ERROR("Failed to allocate seqno page\n");
378 ret = -ENOMEM;
379 goto err;
380 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100381
382 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000383
384 ret = i915_gem_object_pin(obj, 4096, true);
385 if (ret)
386 goto err_unref;
387
388 pc->gtt_offset = obj->gtt_offset;
389 pc->cpu_page = kmap(obj->pages[0]);
390 if (pc->cpu_page == NULL)
391 goto err_unpin;
392
393 pc->obj = obj;
394 ring->private = pc;
395 return 0;
396
397err_unpin:
398 i915_gem_object_unpin(obj);
399err_unref:
400 drm_gem_object_unreference(&obj->base);
401err:
402 kfree(pc);
403 return ret;
404}
405
406static void
407cleanup_pipe_control(struct intel_ring_buffer *ring)
408{
409 struct pipe_control *pc = ring->private;
410 struct drm_i915_gem_object *obj;
411
412 if (!ring->private)
413 return;
414
415 obj = pc->obj;
416 kunmap(obj->pages[0]);
417 i915_gem_object_unpin(obj);
418 drm_gem_object_unreference(&obj->base);
419
420 kfree(pc);
421 ring->private = NULL;
422}
423
Chris Wilson78501ea2010-10-27 12:18:21 +0100424static int init_render_ring(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800425{
Chris Wilson78501ea2010-10-27 12:18:21 +0100426 struct drm_device *dev = ring->dev;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000427 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100428 int ret = init_ring_common(ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800429
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100430 if (INTEL_INFO(dev)->gen > 3) {
Daniel Vetter6b26c862012-04-24 14:04:12 +0200431 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
Jesse Barnesb095cd02011-08-12 15:28:32 -0700432 if (IS_GEN7(dev))
433 I915_WRITE(GFX_MODE_GEN7,
Daniel Vetter6b26c862012-04-24 14:04:12 +0200434 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
435 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800436 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100437
Jesse Barnes8d315282011-10-16 10:23:31 +0200438 if (INTEL_INFO(dev)->gen >= 5) {
Chris Wilsonc6df5412010-12-15 09:56:50 +0000439 ret = init_pipe_control(ring);
440 if (ret)
441 return ret;
442 }
443
Daniel Vetter5e13a0c2012-05-08 13:39:59 +0200444 if (IS_GEN6(dev)) {
Kenneth Graunke3a69ddd2012-04-27 12:44:41 -0700445 /* From the Sandybridge PRM, volume 1 part 3, page 24:
446 * "If this bit is set, STCunit will have LRA as replacement
447 * policy. [...] This bit must be reset. LRA replacement
448 * policy is not supported."
449 */
450 I915_WRITE(CACHE_MODE_0,
Daniel Vetter5e13a0c2012-05-08 13:39:59 +0200451 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
Ben Widawsky12b02862012-06-04 14:42:50 -0700452
453 /* This is not explicitly set for GEN6, so read the register.
454 * see intel_ring_mi_set_context() for why we care.
455 * TODO: consider explicitly setting the bit for GEN5
456 */
457 ring->itlb_before_ctx_switch =
458 !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
Ben Widawsky84f9f932011-12-12 19:21:58 -0800459 }
460
Daniel Vetter6b26c862012-04-24 14:04:12 +0200461 if (INTEL_INFO(dev)->gen >= 6)
462 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
Chris Wilsonc6df5412010-12-15 09:56:50 +0000463
Ben Widawsky15b9f802012-05-25 16:56:23 -0700464 if (IS_IVYBRIDGE(dev))
465 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
466
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800467 return ret;
468}
469
Chris Wilsonc6df5412010-12-15 09:56:50 +0000470static void render_ring_cleanup(struct intel_ring_buffer *ring)
471{
472 if (!ring->private)
473 return;
474
475 cleanup_pipe_control(ring);
476}
477
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000478static void
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700479update_mboxes(struct intel_ring_buffer *ring,
480 u32 seqno,
481 u32 mmio_offset)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000482{
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700483 intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
484 MI_SEMAPHORE_GLOBAL_GTT |
485 MI_SEMAPHORE_REGISTER |
486 MI_SEMAPHORE_UPDATE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000487 intel_ring_emit(ring, seqno);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700488 intel_ring_emit(ring, mmio_offset);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000489}
490
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700491/**
492 * gen6_add_request - Update the semaphore mailbox registers
493 *
494 * @ring - ring that is adding a request
495 * @seqno - return seqno stuck into the ring
496 *
497 * Update the mailbox registers in the *other* rings with the current seqno.
498 * This acts like a signal in the canonical semaphore.
499 */
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000500static int
501gen6_add_request(struct intel_ring_buffer *ring,
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700502 u32 *seqno)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000503{
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700504 u32 mbox1_reg;
505 u32 mbox2_reg;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000506 int ret;
507
508 ret = intel_ring_begin(ring, 10);
509 if (ret)
510 return ret;
511
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700512 mbox1_reg = ring->signal_mbox[0];
513 mbox2_reg = ring->signal_mbox[1];
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000514
Daniel Vetter53d227f2012-01-25 16:32:49 +0100515 *seqno = i915_gem_next_request_seqno(ring);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700516
517 update_mboxes(ring, *seqno, mbox1_reg);
518 update_mboxes(ring, *seqno, mbox2_reg);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000519 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
520 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700521 intel_ring_emit(ring, *seqno);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000522 intel_ring_emit(ring, MI_USER_INTERRUPT);
523 intel_ring_advance(ring);
524
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000525 return 0;
526}
527
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700528/**
529 * intel_ring_sync - sync the waiter to the signaller on seqno
530 *
531 * @waiter - ring that is waiting
532 * @signaller - ring which has, or will signal
533 * @seqno - seqno which the waiter will block on
534 */
535static int
Daniel Vetter686cb5f2012-04-11 22:12:52 +0200536gen6_ring_sync(struct intel_ring_buffer *waiter,
537 struct intel_ring_buffer *signaller,
538 u32 seqno)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000539{
540 int ret;
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700541 u32 dw1 = MI_SEMAPHORE_MBOX |
542 MI_SEMAPHORE_COMPARE |
543 MI_SEMAPHORE_REGISTER;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000544
Ben Widawsky1500f7e2012-04-11 11:18:21 -0700545 /* Throughout all of the GEM code, seqno passed implies our current
546 * seqno is >= the last seqno executed. However for hardware the
547 * comparison is strictly greater than.
548 */
549 seqno -= 1;
550
Daniel Vetter686cb5f2012-04-11 22:12:52 +0200551 WARN_ON(signaller->semaphore_register[waiter->id] ==
552 MI_SEMAPHORE_SYNC_INVALID);
553
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700554 ret = intel_ring_begin(waiter, 4);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000555 if (ret)
556 return ret;
557
Daniel Vetter686cb5f2012-04-11 22:12:52 +0200558 intel_ring_emit(waiter,
559 dw1 | signaller->semaphore_register[waiter->id]);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700560 intel_ring_emit(waiter, seqno);
561 intel_ring_emit(waiter, 0);
562 intel_ring_emit(waiter, MI_NOOP);
563 intel_ring_advance(waiter);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000564
565 return 0;
566}
567
Chris Wilsonc6df5412010-12-15 09:56:50 +0000568#define PIPE_CONTROL_FLUSH(ring__, addr__) \
569do { \
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200570 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
571 PIPE_CONTROL_DEPTH_STALL); \
Chris Wilsonc6df5412010-12-15 09:56:50 +0000572 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
573 intel_ring_emit(ring__, 0); \
574 intel_ring_emit(ring__, 0); \
575} while (0)
576
577static int
578pc_render_add_request(struct intel_ring_buffer *ring,
579 u32 *result)
580{
Daniel Vetter53d227f2012-01-25 16:32:49 +0100581 u32 seqno = i915_gem_next_request_seqno(ring);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000582 struct pipe_control *pc = ring->private;
583 u32 scratch_addr = pc->gtt_offset + 128;
584 int ret;
585
586 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
587 * incoherent with writes to memory, i.e. completely fubar,
588 * so we need to use PIPE_NOTIFY instead.
589 *
590 * However, we also need to workaround the qword write
591 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
592 * memory before requesting an interrupt.
593 */
594 ret = intel_ring_begin(ring, 32);
595 if (ret)
596 return ret;
597
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200598 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
Kenneth Graunke9d971b32011-10-11 23:41:09 +0200599 PIPE_CONTROL_WRITE_FLUSH |
600 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000601 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
602 intel_ring_emit(ring, seqno);
603 intel_ring_emit(ring, 0);
604 PIPE_CONTROL_FLUSH(ring, scratch_addr);
605 scratch_addr += 128; /* write to separate cachelines */
606 PIPE_CONTROL_FLUSH(ring, scratch_addr);
607 scratch_addr += 128;
608 PIPE_CONTROL_FLUSH(ring, scratch_addr);
609 scratch_addr += 128;
610 PIPE_CONTROL_FLUSH(ring, scratch_addr);
611 scratch_addr += 128;
612 PIPE_CONTROL_FLUSH(ring, scratch_addr);
613 scratch_addr += 128;
614 PIPE_CONTROL_FLUSH(ring, scratch_addr);
Chris Wilsona71d8d92012-02-15 11:25:36 +0000615
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200616 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
Kenneth Graunke9d971b32011-10-11 23:41:09 +0200617 PIPE_CONTROL_WRITE_FLUSH |
618 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
Chris Wilsonc6df5412010-12-15 09:56:50 +0000619 PIPE_CONTROL_NOTIFY);
620 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
621 intel_ring_emit(ring, seqno);
622 intel_ring_emit(ring, 0);
623 intel_ring_advance(ring);
624
625 *result = seqno;
626 return 0;
627}
628
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800629static u32
Daniel Vetter4cd53c02012-12-14 16:01:25 +0100630gen6_ring_get_seqno(struct intel_ring_buffer *ring)
631{
632 struct drm_device *dev = ring->dev;
633
634 /* Workaround to force correct ordering between irq and seqno writes on
635 * ivb (and maybe also on snb) by reading from a CS register (like
636 * ACTHD) before reading the status page. */
Daniel Vetter1c7eaac2012-03-27 09:31:24 +0200637 if (IS_GEN6(dev) || IS_GEN7(dev))
Daniel Vetter4cd53c02012-12-14 16:01:25 +0100638 intel_ring_get_active_head(ring);
639 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
640}
641
642static u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000643ring_get_seqno(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800644{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000645 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
646}
647
Chris Wilsonc6df5412010-12-15 09:56:50 +0000648static u32
649pc_render_get_seqno(struct intel_ring_buffer *ring)
650{
651 struct pipe_control *pc = ring->private;
652 return pc->cpu_page[0];
653}
654
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000655static bool
Daniel Vettere48d8632012-04-11 22:12:54 +0200656gen5_ring_get_irq(struct intel_ring_buffer *ring)
657{
658 struct drm_device *dev = ring->dev;
659 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100660 unsigned long flags;
Daniel Vettere48d8632012-04-11 22:12:54 +0200661
662 if (!dev->irq_enabled)
663 return false;
664
Chris Wilson7338aef2012-04-24 21:48:47 +0100665 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Daniel Vetterf637fde2012-04-11 22:12:59 +0200666 if (ring->irq_refcount++ == 0) {
667 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
668 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
669 POSTING_READ(GTIMR);
670 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100671 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Daniel Vettere48d8632012-04-11 22:12:54 +0200672
673 return true;
674}
675
676static void
677gen5_ring_put_irq(struct intel_ring_buffer *ring)
678{
679 struct drm_device *dev = ring->dev;
680 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100681 unsigned long flags;
Daniel Vettere48d8632012-04-11 22:12:54 +0200682
Chris Wilson7338aef2012-04-24 21:48:47 +0100683 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Daniel Vetterf637fde2012-04-11 22:12:59 +0200684 if (--ring->irq_refcount == 0) {
685 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
686 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
687 POSTING_READ(GTIMR);
688 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100689 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Daniel Vettere48d8632012-04-11 22:12:54 +0200690}
691
692static bool
Daniel Vettere3670312012-04-11 22:12:53 +0200693i9xx_ring_get_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700694{
Chris Wilson78501ea2010-10-27 12:18:21 +0100695 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000696 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100697 unsigned long flags;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700698
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000699 if (!dev->irq_enabled)
700 return false;
701
Chris Wilson7338aef2012-04-24 21:48:47 +0100702 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Daniel Vetterf637fde2012-04-11 22:12:59 +0200703 if (ring->irq_refcount++ == 0) {
704 dev_priv->irq_mask &= ~ring->irq_enable_mask;
705 I915_WRITE(IMR, dev_priv->irq_mask);
706 POSTING_READ(IMR);
707 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100708 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000709
710 return true;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700711}
712
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800713static void
Daniel Vettere3670312012-04-11 22:12:53 +0200714i9xx_ring_put_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700715{
Chris Wilson78501ea2010-10-27 12:18:21 +0100716 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000717 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100718 unsigned long flags;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700719
Chris Wilson7338aef2012-04-24 21:48:47 +0100720 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Daniel Vetterf637fde2012-04-11 22:12:59 +0200721 if (--ring->irq_refcount == 0) {
722 dev_priv->irq_mask |= ring->irq_enable_mask;
723 I915_WRITE(IMR, dev_priv->irq_mask);
724 POSTING_READ(IMR);
725 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100726 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700727}
728
Chris Wilsonc2798b12012-04-22 21:13:57 +0100729static bool
730i8xx_ring_get_irq(struct intel_ring_buffer *ring)
731{
732 struct drm_device *dev = ring->dev;
733 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100734 unsigned long flags;
Chris Wilsonc2798b12012-04-22 21:13:57 +0100735
736 if (!dev->irq_enabled)
737 return false;
738
Chris Wilson7338aef2012-04-24 21:48:47 +0100739 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Chris Wilsonc2798b12012-04-22 21:13:57 +0100740 if (ring->irq_refcount++ == 0) {
741 dev_priv->irq_mask &= ~ring->irq_enable_mask;
742 I915_WRITE16(IMR, dev_priv->irq_mask);
743 POSTING_READ16(IMR);
744 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100745 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Chris Wilsonc2798b12012-04-22 21:13:57 +0100746
747 return true;
748}
749
750static void
751i8xx_ring_put_irq(struct intel_ring_buffer *ring)
752{
753 struct drm_device *dev = ring->dev;
754 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100755 unsigned long flags;
Chris Wilsonc2798b12012-04-22 21:13:57 +0100756
Chris Wilson7338aef2012-04-24 21:48:47 +0100757 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Chris Wilsonc2798b12012-04-22 21:13:57 +0100758 if (--ring->irq_refcount == 0) {
759 dev_priv->irq_mask |= ring->irq_enable_mask;
760 I915_WRITE16(IMR, dev_priv->irq_mask);
761 POSTING_READ16(IMR);
762 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100763 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Chris Wilsonc2798b12012-04-22 21:13:57 +0100764}
765
Chris Wilson78501ea2010-10-27 12:18:21 +0100766void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800767{
Eric Anholt45930102011-05-06 17:12:35 -0700768 struct drm_device *dev = ring->dev;
Chris Wilson78501ea2010-10-27 12:18:21 +0100769 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt45930102011-05-06 17:12:35 -0700770 u32 mmio = 0;
771
772 /* The ring status page addresses are no longer next to the rest of
773 * the ring registers as of gen7.
774 */
775 if (IS_GEN7(dev)) {
776 switch (ring->id) {
Daniel Vetter96154f22011-12-14 13:57:00 +0100777 case RCS:
Eric Anholt45930102011-05-06 17:12:35 -0700778 mmio = RENDER_HWS_PGA_GEN7;
779 break;
Daniel Vetter96154f22011-12-14 13:57:00 +0100780 case BCS:
Eric Anholt45930102011-05-06 17:12:35 -0700781 mmio = BLT_HWS_PGA_GEN7;
782 break;
Daniel Vetter96154f22011-12-14 13:57:00 +0100783 case VCS:
Eric Anholt45930102011-05-06 17:12:35 -0700784 mmio = BSD_HWS_PGA_GEN7;
785 break;
786 }
787 } else if (IS_GEN6(ring->dev)) {
788 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
789 } else {
790 mmio = RING_HWS_PGA(ring->mmio_base);
791 }
792
Chris Wilson78501ea2010-10-27 12:18:21 +0100793 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
794 POSTING_READ(mmio);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800795}
796
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000797static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100798bsd_ring_flush(struct intel_ring_buffer *ring,
799 u32 invalidate_domains,
800 u32 flush_domains)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800801{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000802 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000803
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000804 ret = intel_ring_begin(ring, 2);
805 if (ret)
806 return ret;
807
808 intel_ring_emit(ring, MI_FLUSH);
809 intel_ring_emit(ring, MI_NOOP);
810 intel_ring_advance(ring);
811 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800812}
813
Chris Wilson3cce4692010-10-27 16:11:02 +0100814static int
Daniel Vetter8620a3a2012-04-11 22:12:57 +0200815i9xx_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100816 u32 *result)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800817{
818 u32 seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +0100819 int ret;
820
821 ret = intel_ring_begin(ring, 4);
822 if (ret)
823 return ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +0100824
Daniel Vetter53d227f2012-01-25 16:32:49 +0100825 seqno = i915_gem_next_request_seqno(ring);
Chris Wilson6f392d5482010-08-07 11:01:22 +0100826
Chris Wilson3cce4692010-10-27 16:11:02 +0100827 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
828 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
829 intel_ring_emit(ring, seqno);
830 intel_ring_emit(ring, MI_USER_INTERRUPT);
831 intel_ring_advance(ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800832
Chris Wilson3cce4692010-10-27 16:11:02 +0100833 *result = seqno;
834 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800835}
836
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000837static bool
Ben Widawsky25c06302012-03-29 19:11:27 -0700838gen6_ring_get_irq(struct intel_ring_buffer *ring)
Chris Wilson0f468322011-01-04 17:35:21 +0000839{
840 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000841 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100842 unsigned long flags;
Chris Wilson0f468322011-01-04 17:35:21 +0000843
844 if (!dev->irq_enabled)
845 return false;
846
Daniel Vetter4cd53c02012-12-14 16:01:25 +0100847 /* It looks like we need to prevent the gt from suspending while waiting
848 * for an notifiy irq, otherwise irqs seem to get lost on at least the
849 * blt/bsd rings on ivb. */
Daniel Vetter99ffa162012-01-25 14:04:00 +0100850 gen6_gt_force_wake_get(dev_priv);
Daniel Vetter4cd53c02012-12-14 16:01:25 +0100851
Chris Wilson7338aef2012-04-24 21:48:47 +0100852 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Chris Wilson01a03332011-01-04 22:22:56 +0000853 if (ring->irq_refcount++ == 0) {
Ben Widawsky15b9f802012-05-25 16:56:23 -0700854 if (IS_IVYBRIDGE(dev) && ring->id == RCS)
855 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
856 GEN6_RENDER_L3_PARITY_ERROR));
857 else
858 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
Daniel Vetterf637fde2012-04-11 22:12:59 +0200859 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
860 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
861 POSTING_READ(GTIMR);
Chris Wilson0f468322011-01-04 17:35:21 +0000862 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100863 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Chris Wilson0f468322011-01-04 17:35:21 +0000864
865 return true;
866}
867
868static void
Ben Widawsky25c06302012-03-29 19:11:27 -0700869gen6_ring_put_irq(struct intel_ring_buffer *ring)
Chris Wilson0f468322011-01-04 17:35:21 +0000870{
871 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000872 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100873 unsigned long flags;
Chris Wilson0f468322011-01-04 17:35:21 +0000874
Chris Wilson7338aef2012-04-24 21:48:47 +0100875 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Chris Wilson01a03332011-01-04 22:22:56 +0000876 if (--ring->irq_refcount == 0) {
Ben Widawsky15b9f802012-05-25 16:56:23 -0700877 if (IS_IVYBRIDGE(dev) && ring->id == RCS)
878 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
879 else
880 I915_WRITE_IMR(ring, ~0);
Daniel Vetterf637fde2012-04-11 22:12:59 +0200881 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
882 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
883 POSTING_READ(GTIMR);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000884 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100885 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Daniel Vetter4cd53c02012-12-14 16:01:25 +0100886
Daniel Vetter99ffa162012-01-25 14:04:00 +0100887 gen6_gt_force_wake_put(dev_priv);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000888}
889
Zou Nan haid1b851f2010-05-21 09:08:57 +0800890static int
Daniel Vetterfb3256d2012-04-11 22:12:56 +0200891i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800892{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100893 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100894
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100895 ret = intel_ring_begin(ring, 2);
896 if (ret)
897 return ret;
898
Chris Wilson78501ea2010-10-27 12:18:21 +0100899 intel_ring_emit(ring,
Chris Wilson65f56872012-04-17 16:38:12 +0100900 MI_BATCH_BUFFER_START |
901 MI_BATCH_GTT |
Chris Wilson78501ea2010-10-27 12:18:21 +0100902 MI_BATCH_NON_SECURE_I965);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000903 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100904 intel_ring_advance(ring);
905
Zou Nan haid1b851f2010-05-21 09:08:57 +0800906 return 0;
907}
908
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800909static int
Daniel Vetterfb3256d2012-04-11 22:12:56 +0200910i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000911 u32 offset, u32 len)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700912{
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000913 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700914
Daniel Vetterfb3256d2012-04-11 22:12:56 +0200915 ret = intel_ring_begin(ring, 4);
916 if (ret)
917 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700918
Daniel Vetterfb3256d2012-04-11 22:12:56 +0200919 intel_ring_emit(ring, MI_BATCH_BUFFER);
920 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
921 intel_ring_emit(ring, offset + len - 8);
922 intel_ring_emit(ring, 0);
923 intel_ring_advance(ring);
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100924
Daniel Vetterfb3256d2012-04-11 22:12:56 +0200925 return 0;
926}
927
928static int
929i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
930 u32 offset, u32 len)
931{
932 int ret;
933
934 ret = intel_ring_begin(ring, 2);
935 if (ret)
936 return ret;
937
Chris Wilson65f56872012-04-17 16:38:12 +0100938 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
Daniel Vetterfb3256d2012-04-11 22:12:56 +0200939 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000940 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700941
Eric Anholt62fdfea2010-05-21 13:26:39 -0700942 return 0;
943}
944
Chris Wilson78501ea2010-10-27 12:18:21 +0100945static void cleanup_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700946{
Chris Wilson05394f32010-11-08 19:18:58 +0000947 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700948
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800949 obj = ring->status_page.obj;
950 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700951 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700952
Chris Wilson05394f32010-11-08 19:18:58 +0000953 kunmap(obj->pages[0]);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700954 i915_gem_object_unpin(obj);
Chris Wilson05394f32010-11-08 19:18:58 +0000955 drm_gem_object_unreference(&obj->base);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800956 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700957}
958
Chris Wilson78501ea2010-10-27 12:18:21 +0100959static int init_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700960{
Chris Wilson78501ea2010-10-27 12:18:21 +0100961 struct drm_device *dev = ring->dev;
Chris Wilson05394f32010-11-08 19:18:58 +0000962 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700963 int ret;
964
Eric Anholt62fdfea2010-05-21 13:26:39 -0700965 obj = i915_gem_alloc_object(dev, 4096);
966 if (obj == NULL) {
967 DRM_ERROR("Failed to allocate status page\n");
968 ret = -ENOMEM;
969 goto err;
970 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100971
972 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700973
Daniel Vetter75e9e912010-11-04 17:11:09 +0100974 ret = i915_gem_object_pin(obj, 4096, true);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700975 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700976 goto err_unref;
977 }
978
Chris Wilson05394f32010-11-08 19:18:58 +0000979 ring->status_page.gfx_addr = obj->gtt_offset;
980 ring->status_page.page_addr = kmap(obj->pages[0]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800981 if (ring->status_page.page_addr == NULL) {
Ben Widawsky2e6c21e2012-07-12 23:16:12 -0700982 ret = -ENOMEM;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700983 goto err_unpin;
984 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800985 ring->status_page.obj = obj;
986 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700987
Chris Wilson78501ea2010-10-27 12:18:21 +0100988 intel_ring_setup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800989 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
990 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700991
992 return 0;
993
994err_unpin:
995 i915_gem_object_unpin(obj);
996err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000997 drm_gem_object_unreference(&obj->base);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700998err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800999 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001000}
1001
Ben Widawskyc43b5632012-04-16 14:07:40 -07001002static int intel_init_ring_buffer(struct drm_device *dev,
1003 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001004{
Chris Wilson05394f32010-11-08 19:18:58 +00001005 struct drm_i915_gem_object *obj;
Daniel Vetterdd2757f2012-06-07 15:55:57 +02001006 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsondd785e32010-08-07 11:01:34 +01001007 int ret;
1008
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001009 ring->dev = dev;
Chris Wilson23bc5982010-09-29 16:10:57 +01001010 INIT_LIST_HEAD(&ring->active_list);
1011 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01001012 INIT_LIST_HEAD(&ring->gpu_write_list);
Daniel Vetterdfc9ef22012-04-11 22:12:47 +02001013 ring->size = 32 * PAGE_SIZE;
Chris Wilson0dc79fb2011-01-05 10:32:24 +00001014
Chris Wilsonb259f672011-03-29 13:19:09 +01001015 init_waitqueue_head(&ring->irq_queue);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001016
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001017 if (I915_NEED_GFX_HWS(dev)) {
Chris Wilson78501ea2010-10-27 12:18:21 +01001018 ret = init_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001019 if (ret)
1020 return ret;
1021 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001022
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001023 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001024 if (obj == NULL) {
1025 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001026 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +01001027 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001028 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001029
Chris Wilson05394f32010-11-08 19:18:58 +00001030 ring->obj = obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001031
Daniel Vetter75e9e912010-11-04 17:11:09 +01001032 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
Chris Wilsondd785e32010-08-07 11:01:34 +01001033 if (ret)
1034 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001035
Chris Wilson3eef8912012-06-04 17:05:40 +01001036 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1037 if (ret)
1038 goto err_unpin;
1039
Daniel Vetterdd2757f2012-06-07 15:55:57 +02001040 ring->virtual_start =
1041 ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
1042 ring->size);
Daniel Vetter4225d0f2012-04-26 23:28:16 +02001043 if (ring->virtual_start == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -07001044 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001045 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +01001046 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001047 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001048
Chris Wilson78501ea2010-10-27 12:18:21 +01001049 ret = ring->init(ring);
Chris Wilsondd785e32010-08-07 11:01:34 +01001050 if (ret)
1051 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001052
Chris Wilson55249ba2010-12-22 14:04:47 +00001053 /* Workaround an erratum on the i830 which causes a hang if
1054 * the TAIL pointer points to within the last 2 cachelines
1055 * of the buffer.
1056 */
1057 ring->effective_size = ring->size;
Chris Wilson27c1cbd2012-04-09 13:59:46 +01001058 if (IS_I830(ring->dev) || IS_845G(ring->dev))
Chris Wilson55249ba2010-12-22 14:04:47 +00001059 ring->effective_size -= 128;
1060
Chris Wilsonc584fe42010-10-29 18:15:52 +01001061 return 0;
Chris Wilsondd785e32010-08-07 11:01:34 +01001062
1063err_unmap:
Daniel Vetter4225d0f2012-04-26 23:28:16 +02001064 iounmap(ring->virtual_start);
Chris Wilsondd785e32010-08-07 11:01:34 +01001065err_unpin:
1066 i915_gem_object_unpin(obj);
1067err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001068 drm_gem_object_unreference(&obj->base);
1069 ring->obj = NULL;
Chris Wilsondd785e32010-08-07 11:01:34 +01001070err_hws:
Chris Wilson78501ea2010-10-27 12:18:21 +01001071 cleanup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001072 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001073}
1074
Chris Wilson78501ea2010-10-27 12:18:21 +01001075void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001076{
Chris Wilson33626e62010-10-29 16:18:36 +01001077 struct drm_i915_private *dev_priv;
1078 int ret;
1079
Chris Wilson05394f32010-11-08 19:18:58 +00001080 if (ring->obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001081 return;
1082
Chris Wilson33626e62010-10-29 16:18:36 +01001083 /* Disable the ring buffer. The ring must be idle at this point */
1084 dev_priv = ring->dev->dev_private;
Ben Widawsky96f298a2011-03-19 18:14:27 -07001085 ret = intel_wait_ring_idle(ring);
Chris Wilson29ee3992011-01-24 16:35:42 +00001086 if (ret)
1087 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1088 ring->name, ret);
1089
Chris Wilson33626e62010-10-29 16:18:36 +01001090 I915_WRITE_CTL(ring, 0);
1091
Daniel Vetter4225d0f2012-04-26 23:28:16 +02001092 iounmap(ring->virtual_start);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001093
Chris Wilson05394f32010-11-08 19:18:58 +00001094 i915_gem_object_unpin(ring->obj);
1095 drm_gem_object_unreference(&ring->obj->base);
1096 ring->obj = NULL;
Chris Wilson78501ea2010-10-27 12:18:21 +01001097
Zou Nan hai8d192152010-11-02 16:31:01 +08001098 if (ring->cleanup)
1099 ring->cleanup(ring);
1100
Chris Wilson78501ea2010-10-27 12:18:21 +01001101 cleanup_status_page(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001102}
1103
Chris Wilson78501ea2010-10-27 12:18:21 +01001104static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001105{
Daniel Vetter4225d0f2012-04-26 23:28:16 +02001106 uint32_t __iomem *virt;
Chris Wilson55249ba2010-12-22 14:04:47 +00001107 int rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001108
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001109 if (ring->space < rem) {
Chris Wilson78501ea2010-10-27 12:18:21 +01001110 int ret = intel_wait_ring_buffer(ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001111 if (ret)
1112 return ret;
1113 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001114
Daniel Vetter4225d0f2012-04-26 23:28:16 +02001115 virt = ring->virtual_start + ring->tail;
1116 rem /= 4;
1117 while (rem--)
1118 iowrite32(MI_NOOP, virt++);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001119
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001120 ring->tail = 0;
Chris Wilsonc7dca472011-01-20 17:00:10 +00001121 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001122
1123 return 0;
1124}
1125
Chris Wilsona71d8d92012-02-15 11:25:36 +00001126static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1127{
Chris Wilsona71d8d92012-02-15 11:25:36 +00001128 int ret;
1129
Ben Widawsky199b2bc2012-05-24 15:03:11 -07001130 ret = i915_wait_seqno(ring, seqno);
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07001131 if (!ret)
1132 i915_gem_retire_requests_ring(ring);
Chris Wilsona71d8d92012-02-15 11:25:36 +00001133
1134 return ret;
1135}
1136
1137static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1138{
1139 struct drm_i915_gem_request *request;
1140 u32 seqno = 0;
1141 int ret;
1142
1143 i915_gem_retire_requests_ring(ring);
1144
1145 if (ring->last_retired_head != -1) {
1146 ring->head = ring->last_retired_head;
1147 ring->last_retired_head = -1;
1148 ring->space = ring_space(ring);
1149 if (ring->space >= n)
1150 return 0;
1151 }
1152
1153 list_for_each_entry(request, &ring->request_list, list) {
1154 int space;
1155
1156 if (request->tail == -1)
1157 continue;
1158
1159 space = request->tail - (ring->tail + 8);
1160 if (space < 0)
1161 space += ring->size;
1162 if (space >= n) {
1163 seqno = request->seqno;
1164 break;
1165 }
1166
1167 /* Consume this request in case we need more space than
1168 * is available and so need to prevent a race between
1169 * updating last_retired_head and direct reads of
1170 * I915_RING_HEAD. It also provides a nice sanity check.
1171 */
1172 request->tail = -1;
1173 }
1174
1175 if (seqno == 0)
1176 return -ENOSPC;
1177
1178 ret = intel_ring_wait_seqno(ring, seqno);
1179 if (ret)
1180 return ret;
1181
1182 if (WARN_ON(ring->last_retired_head == -1))
1183 return -ENOSPC;
1184
1185 ring->head = ring->last_retired_head;
1186 ring->last_retired_head = -1;
1187 ring->space = ring_space(ring);
1188 if (WARN_ON(ring->space < n))
1189 return -ENOSPC;
1190
1191 return 0;
1192}
1193
Chris Wilson78501ea2010-10-27 12:18:21 +01001194int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001195{
Chris Wilson78501ea2010-10-27 12:18:21 +01001196 struct drm_device *dev = ring->dev;
Zou Nan haicae58522010-11-09 17:17:32 +08001197 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +01001198 unsigned long end;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001199 int ret;
Chris Wilsonc7dca472011-01-20 17:00:10 +00001200
Chris Wilsona71d8d92012-02-15 11:25:36 +00001201 ret = intel_ring_wait_request(ring, n);
1202 if (ret != -ENOSPC)
1203 return ret;
1204
Chris Wilsondb53a302011-02-03 11:57:46 +00001205 trace_i915_ring_wait_begin(ring);
Daniel Vetter63ed2cb2012-04-23 16:50:50 +02001206 /* With GEM the hangcheck timer should kick us out of the loop,
1207 * leaving it early runs the risk of corrupting GEM state (due
1208 * to running on almost untested codepaths). But on resume
1209 * timers don't work yet, so prevent a complete hang in that
1210 * case by choosing an insanely large timeout. */
1211 end = jiffies + 60 * HZ;
Daniel Vettere6bfaf82011-12-14 13:56:59 +01001212
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001213 do {
Chris Wilsonc7dca472011-01-20 17:00:10 +00001214 ring->head = I915_READ_HEAD(ring);
1215 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001216 if (ring->space >= n) {
Chris Wilsondb53a302011-02-03 11:57:46 +00001217 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001218 return 0;
1219 }
1220
1221 if (dev->primary->master) {
1222 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1223 if (master_priv->sarea_priv)
1224 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1225 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08001226
Chris Wilsone60a0b12010-10-13 10:09:14 +01001227 msleep(1);
Daniel Vetterd6b2c792012-07-04 22:54:13 +02001228
1229 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
1230 if (ret)
1231 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001232 } while (!time_after(jiffies, end));
Chris Wilsondb53a302011-02-03 11:57:46 +00001233 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001234 return -EBUSY;
1235}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001236
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001237int intel_ring_begin(struct intel_ring_buffer *ring,
1238 int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001239{
Daniel Vetterde2b9982012-07-04 22:52:50 +02001240 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Zou Nan haibe26a102010-06-12 17:40:24 +08001241 int n = 4*num_dwords;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001242 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +01001243
Daniel Vetterde2b9982012-07-04 22:52:50 +02001244 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
1245 if (ret)
1246 return ret;
Chris Wilson21dd3732011-01-26 15:55:56 +00001247
Chris Wilson55249ba2010-12-22 14:04:47 +00001248 if (unlikely(ring->tail + n > ring->effective_size)) {
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001249 ret = intel_wrap_ring_buffer(ring);
1250 if (unlikely(ret))
1251 return ret;
1252 }
Chris Wilson78501ea2010-10-27 12:18:21 +01001253
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001254 if (unlikely(ring->space < n)) {
1255 ret = intel_wait_ring_buffer(ring, n);
1256 if (unlikely(ret))
1257 return ret;
1258 }
Chris Wilsond97ed332010-08-04 15:18:13 +01001259
1260 ring->space -= n;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001261 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001262}
1263
Chris Wilson78501ea2010-10-27 12:18:21 +01001264void intel_ring_advance(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001265{
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001266 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1267
Chris Wilsond97ed332010-08-04 15:18:13 +01001268 ring->tail &= ring->size - 1;
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001269 if (dev_priv->stop_rings & intel_ring_flag(ring))
1270 return;
Chris Wilson78501ea2010-10-27 12:18:21 +01001271 ring->write_tail(ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001272}
1273
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001274
Chris Wilson78501ea2010-10-27 12:18:21 +01001275static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001276 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001277{
Akshay Joshi0206e352011-08-16 15:34:10 -04001278 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001279
1280 /* Every tail move must follow the sequence below */
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001281
Chris Wilson12f55812012-07-05 17:14:01 +01001282 /* Disable notification that the ring is IDLE. The GT
1283 * will then assume that it is busy and bring it out of rc6.
1284 */
1285 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1286 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1287
1288 /* Clear the context id. Here be magic! */
1289 I915_WRITE64(GEN6_BSD_RNCID, 0x0);
1290
1291 /* Wait for the ring not to be idle, i.e. for it to wake up. */
Akshay Joshi0206e352011-08-16 15:34:10 -04001292 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
Chris Wilson12f55812012-07-05 17:14:01 +01001293 GEN6_BSD_SLEEP_INDICATOR) == 0,
1294 50))
1295 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001296
Chris Wilson12f55812012-07-05 17:14:01 +01001297 /* Now that the ring is fully powered up, update the tail */
Akshay Joshi0206e352011-08-16 15:34:10 -04001298 I915_WRITE_TAIL(ring, value);
Chris Wilson12f55812012-07-05 17:14:01 +01001299 POSTING_READ(RING_TAIL(ring->mmio_base));
1300
1301 /* Let the ring send IDLE messages to the GT again,
1302 * and so let it sleep to conserve power when idle.
1303 */
Akshay Joshi0206e352011-08-16 15:34:10 -04001304 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
Chris Wilson12f55812012-07-05 17:14:01 +01001305 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001306}
1307
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001308static int gen6_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001309 u32 invalidate, u32 flush)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001310{
Chris Wilson71a77e02011-02-02 12:13:49 +00001311 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001312 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001313
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001314 ret = intel_ring_begin(ring, 4);
1315 if (ret)
1316 return ret;
1317
Chris Wilson71a77e02011-02-02 12:13:49 +00001318 cmd = MI_FLUSH_DW;
1319 if (invalidate & I915_GEM_GPU_DOMAINS)
1320 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1321 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001322 intel_ring_emit(ring, 0);
1323 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001324 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001325 intel_ring_advance(ring);
1326 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001327}
1328
1329static int
Chris Wilson78501ea2010-10-27 12:18:21 +01001330gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001331 u32 offset, u32 len)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001332{
Akshay Joshi0206e352011-08-16 15:34:10 -04001333 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001334
Akshay Joshi0206e352011-08-16 15:34:10 -04001335 ret = intel_ring_begin(ring, 2);
1336 if (ret)
1337 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001338
Akshay Joshi0206e352011-08-16 15:34:10 -04001339 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1340 /* bit0-7 is the length on GEN6+ */
1341 intel_ring_emit(ring, offset);
1342 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001343
Akshay Joshi0206e352011-08-16 15:34:10 -04001344 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001345}
1346
Chris Wilson549f7362010-10-19 11:19:32 +01001347/* Blitter support (SandyBridge+) */
1348
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001349static int blt_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001350 u32 invalidate, u32 flush)
Zou Nan hai8d192152010-11-02 16:31:01 +08001351{
Chris Wilson71a77e02011-02-02 12:13:49 +00001352 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001353 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001354
Daniel Vetter6a233c72011-12-14 13:57:07 +01001355 ret = intel_ring_begin(ring, 4);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001356 if (ret)
1357 return ret;
1358
Chris Wilson71a77e02011-02-02 12:13:49 +00001359 cmd = MI_FLUSH_DW;
1360 if (invalidate & I915_GEM_DOMAIN_RENDER)
1361 cmd |= MI_INVALIDATE_TLB;
1362 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001363 intel_ring_emit(ring, 0);
1364 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001365 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001366 intel_ring_advance(ring);
1367 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08001368}
1369
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001370int intel_init_render_ring_buffer(struct drm_device *dev)
1371{
1372 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001373 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001374
Daniel Vetter59465b52012-04-11 22:12:48 +02001375 ring->name = "render ring";
1376 ring->id = RCS;
1377 ring->mmio_base = RENDER_RING_BASE;
1378
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001379 if (INTEL_INFO(dev)->gen >= 6) {
1380 ring->add_request = gen6_add_request;
Jesse Barnes8d315282011-10-16 10:23:31 +02001381 ring->flush = gen6_render_ring_flush;
Ben Widawsky25c06302012-03-29 19:11:27 -07001382 ring->irq_get = gen6_ring_get_irq;
1383 ring->irq_put = gen6_ring_put_irq;
Daniel Vetter6a848cc2012-04-11 22:12:46 +02001384 ring->irq_enable_mask = GT_USER_INTERRUPT;
Daniel Vetter4cd53c02012-12-14 16:01:25 +01001385 ring->get_seqno = gen6_ring_get_seqno;
Daniel Vetter686cb5f2012-04-11 22:12:52 +02001386 ring->sync_to = gen6_ring_sync;
Daniel Vetter59465b52012-04-11 22:12:48 +02001387 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1388 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1389 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1390 ring->signal_mbox[0] = GEN6_VRSYNC;
1391 ring->signal_mbox[1] = GEN6_BRSYNC;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001392 } else if (IS_GEN5(dev)) {
1393 ring->add_request = pc_render_add_request;
Chris Wilson46f0f8d2012-04-18 11:12:11 +01001394 ring->flush = gen4_render_ring_flush;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001395 ring->get_seqno = pc_render_get_seqno;
Daniel Vettere48d8632012-04-11 22:12:54 +02001396 ring->irq_get = gen5_ring_get_irq;
1397 ring->irq_put = gen5_ring_put_irq;
Daniel Vettere3670312012-04-11 22:12:53 +02001398 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
Daniel Vetter59465b52012-04-11 22:12:48 +02001399 } else {
Daniel Vetter8620a3a2012-04-11 22:12:57 +02001400 ring->add_request = i9xx_add_request;
Chris Wilson46f0f8d2012-04-18 11:12:11 +01001401 if (INTEL_INFO(dev)->gen < 4)
1402 ring->flush = gen2_render_ring_flush;
1403 else
1404 ring->flush = gen4_render_ring_flush;
Daniel Vetter59465b52012-04-11 22:12:48 +02001405 ring->get_seqno = ring_get_seqno;
Chris Wilsonc2798b12012-04-22 21:13:57 +01001406 if (IS_GEN2(dev)) {
1407 ring->irq_get = i8xx_ring_get_irq;
1408 ring->irq_put = i8xx_ring_put_irq;
1409 } else {
1410 ring->irq_get = i9xx_ring_get_irq;
1411 ring->irq_put = i9xx_ring_put_irq;
1412 }
Daniel Vettere3670312012-04-11 22:12:53 +02001413 ring->irq_enable_mask = I915_USER_INTERRUPT;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001414 }
Daniel Vetter59465b52012-04-11 22:12:48 +02001415 ring->write_tail = ring_write_tail;
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001416 if (INTEL_INFO(dev)->gen >= 6)
1417 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1418 else if (INTEL_INFO(dev)->gen >= 4)
1419 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1420 else if (IS_I830(dev) || IS_845G(dev))
1421 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1422 else
1423 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
Daniel Vetter59465b52012-04-11 22:12:48 +02001424 ring->init = init_render_ring;
1425 ring->cleanup = render_ring_cleanup;
1426
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001427
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001428 if (!I915_NEED_GFX_HWS(dev)) {
1429 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1430 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1431 }
1432
1433 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001434}
1435
Chris Wilsone8616b62011-01-20 09:57:11 +00001436int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1437{
1438 drm_i915_private_t *dev_priv = dev->dev_private;
1439 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1440
Daniel Vetter59465b52012-04-11 22:12:48 +02001441 ring->name = "render ring";
1442 ring->id = RCS;
1443 ring->mmio_base = RENDER_RING_BASE;
1444
Chris Wilsone8616b62011-01-20 09:57:11 +00001445 if (INTEL_INFO(dev)->gen >= 6) {
Daniel Vetterb4178f82012-04-11 22:12:51 +02001446 /* non-kms not supported on gen6+ */
1447 return -ENODEV;
Chris Wilsone8616b62011-01-20 09:57:11 +00001448 }
Daniel Vetter28f0cbf2012-04-11 22:12:58 +02001449
1450 /* Note: gem is not supported on gen5/ilk without kms (the corresponding
1451 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
1452 * the special gen5 functions. */
1453 ring->add_request = i9xx_add_request;
Chris Wilson46f0f8d2012-04-18 11:12:11 +01001454 if (INTEL_INFO(dev)->gen < 4)
1455 ring->flush = gen2_render_ring_flush;
1456 else
1457 ring->flush = gen4_render_ring_flush;
Daniel Vetter28f0cbf2012-04-11 22:12:58 +02001458 ring->get_seqno = ring_get_seqno;
Chris Wilsonc2798b12012-04-22 21:13:57 +01001459 if (IS_GEN2(dev)) {
1460 ring->irq_get = i8xx_ring_get_irq;
1461 ring->irq_put = i8xx_ring_put_irq;
1462 } else {
1463 ring->irq_get = i9xx_ring_get_irq;
1464 ring->irq_put = i9xx_ring_put_irq;
1465 }
Daniel Vetter28f0cbf2012-04-11 22:12:58 +02001466 ring->irq_enable_mask = I915_USER_INTERRUPT;
Daniel Vetter59465b52012-04-11 22:12:48 +02001467 ring->write_tail = ring_write_tail;
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001468 if (INTEL_INFO(dev)->gen >= 4)
1469 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1470 else if (IS_I830(dev) || IS_845G(dev))
1471 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1472 else
1473 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
Daniel Vetter59465b52012-04-11 22:12:48 +02001474 ring->init = init_render_ring;
1475 ring->cleanup = render_ring_cleanup;
Chris Wilsone8616b62011-01-20 09:57:11 +00001476
Keith Packardf3234702011-07-22 10:44:39 -07001477 if (!I915_NEED_GFX_HWS(dev))
1478 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1479
Chris Wilsone8616b62011-01-20 09:57:11 +00001480 ring->dev = dev;
1481 INIT_LIST_HEAD(&ring->active_list);
1482 INIT_LIST_HEAD(&ring->request_list);
1483 INIT_LIST_HEAD(&ring->gpu_write_list);
1484
1485 ring->size = size;
1486 ring->effective_size = ring->size;
1487 if (IS_I830(ring->dev))
1488 ring->effective_size -= 128;
1489
Daniel Vetter4225d0f2012-04-26 23:28:16 +02001490 ring->virtual_start = ioremap_wc(start, size);
1491 if (ring->virtual_start == NULL) {
Chris Wilsone8616b62011-01-20 09:57:11 +00001492 DRM_ERROR("can not ioremap virtual address for"
1493 " ring buffer\n");
1494 return -ENOMEM;
1495 }
1496
Chris Wilsone8616b62011-01-20 09:57:11 +00001497 return 0;
1498}
1499
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001500int intel_init_bsd_ring_buffer(struct drm_device *dev)
1501{
1502 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001503 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001504
Daniel Vetter58fa3832012-04-11 22:12:49 +02001505 ring->name = "bsd ring";
1506 ring->id = VCS;
1507
Daniel Vetter0fd2c202012-04-11 22:12:55 +02001508 ring->write_tail = ring_write_tail;
Daniel Vetter58fa3832012-04-11 22:12:49 +02001509 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1510 ring->mmio_base = GEN6_BSD_RING_BASE;
Daniel Vetter0fd2c202012-04-11 22:12:55 +02001511 /* gen6 bsd needs a special wa for tail updates */
1512 if (IS_GEN6(dev))
1513 ring->write_tail = gen6_bsd_ring_write_tail;
Daniel Vetter58fa3832012-04-11 22:12:49 +02001514 ring->flush = gen6_ring_flush;
1515 ring->add_request = gen6_add_request;
1516 ring->get_seqno = gen6_ring_get_seqno;
1517 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1518 ring->irq_get = gen6_ring_get_irq;
1519 ring->irq_put = gen6_ring_put_irq;
1520 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
Daniel Vetter686cb5f2012-04-11 22:12:52 +02001521 ring->sync_to = gen6_ring_sync;
Daniel Vetter58fa3832012-04-11 22:12:49 +02001522 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
1523 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
1524 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
1525 ring->signal_mbox[0] = GEN6_RVSYNC;
1526 ring->signal_mbox[1] = GEN6_BVSYNC;
1527 } else {
1528 ring->mmio_base = BSD_RING_BASE;
Daniel Vetter58fa3832012-04-11 22:12:49 +02001529 ring->flush = bsd_ring_flush;
Daniel Vetter8620a3a2012-04-11 22:12:57 +02001530 ring->add_request = i9xx_add_request;
Daniel Vetter58fa3832012-04-11 22:12:49 +02001531 ring->get_seqno = ring_get_seqno;
Daniel Vettere48d8632012-04-11 22:12:54 +02001532 if (IS_GEN5(dev)) {
Daniel Vettere3670312012-04-11 22:12:53 +02001533 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
Daniel Vettere48d8632012-04-11 22:12:54 +02001534 ring->irq_get = gen5_ring_get_irq;
1535 ring->irq_put = gen5_ring_put_irq;
1536 } else {
Daniel Vettere3670312012-04-11 22:12:53 +02001537 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
Daniel Vettere48d8632012-04-11 22:12:54 +02001538 ring->irq_get = i9xx_ring_get_irq;
1539 ring->irq_put = i9xx_ring_put_irq;
1540 }
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001541 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
Daniel Vetter58fa3832012-04-11 22:12:49 +02001542 }
1543 ring->init = init_ring_common;
1544
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001545
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001546 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001547}
Chris Wilson549f7362010-10-19 11:19:32 +01001548
1549int intel_init_blt_ring_buffer(struct drm_device *dev)
1550{
1551 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001552 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
Chris Wilson549f7362010-10-19 11:19:32 +01001553
Daniel Vetter3535d9d2012-04-11 22:12:50 +02001554 ring->name = "blitter ring";
1555 ring->id = BCS;
1556
1557 ring->mmio_base = BLT_RING_BASE;
1558 ring->write_tail = ring_write_tail;
1559 ring->flush = blt_ring_flush;
1560 ring->add_request = gen6_add_request;
1561 ring->get_seqno = gen6_ring_get_seqno;
1562 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1563 ring->irq_get = gen6_ring_get_irq;
1564 ring->irq_put = gen6_ring_put_irq;
1565 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
Daniel Vetter686cb5f2012-04-11 22:12:52 +02001566 ring->sync_to = gen6_ring_sync;
Daniel Vetter3535d9d2012-04-11 22:12:50 +02001567 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
1568 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
1569 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
1570 ring->signal_mbox[0] = GEN6_RBSYNC;
1571 ring->signal_mbox[1] = GEN6_VBSYNC;
1572 ring->init = init_ring_common;
Chris Wilson549f7362010-10-19 11:19:32 +01001573
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001574 return intel_init_ring_buffer(dev, ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001575}