blob: 7a16f16371e63a9eae2120f70fae45192d855130 [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Jesse Barnes8d315282011-10-16 10:23:31 +020037/*
38 * 965+ support PIPE_CONTROL commands, which provide finer grained control
39 * over cache flushing.
40 */
41struct pipe_control {
42 struct drm_i915_gem_object *obj;
43 volatile u32 *cpu_page;
44 u32 gtt_offset;
45};
46
Chris Wilsonc7dca472011-01-20 17:00:10 +000047static inline int ring_space(struct intel_ring_buffer *ring)
48{
49 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
50 if (space < 0)
51 space += ring->size;
52 return space;
53}
54
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000055static int
Chris Wilson46f0f8d2012-04-18 11:12:11 +010056gen2_render_ring_flush(struct intel_ring_buffer *ring,
57 u32 invalidate_domains,
58 u32 flush_domains)
59{
60 u32 cmd;
61 int ret;
62
63 cmd = MI_FLUSH;
Daniel Vetter31b14c92012-04-19 16:45:22 +020064 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
Chris Wilson46f0f8d2012-04-18 11:12:11 +010065 cmd |= MI_NO_WRITE_FLUSH;
66
67 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
68 cmd |= MI_READ_FLUSH;
69
70 ret = intel_ring_begin(ring, 2);
71 if (ret)
72 return ret;
73
74 intel_ring_emit(ring, cmd);
75 intel_ring_emit(ring, MI_NOOP);
76 intel_ring_advance(ring);
77
78 return 0;
79}
80
81static int
82gen4_render_ring_flush(struct intel_ring_buffer *ring,
83 u32 invalidate_domains,
84 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070085{
Chris Wilson78501ea2010-10-27 12:18:21 +010086 struct drm_device *dev = ring->dev;
Chris Wilson6f392d5482010-08-07 11:01:22 +010087 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000088 int ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +010089
Chris Wilson36d527d2011-03-19 22:26:49 +000090 /*
91 * read/write caches:
92 *
93 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
94 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
95 * also flushed at 2d versus 3d pipeline switches.
96 *
97 * read-only caches:
98 *
99 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
100 * MI_READ_FLUSH is set, and is always flushed on 965.
101 *
102 * I915_GEM_DOMAIN_COMMAND may not exist?
103 *
104 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
105 * invalidated when MI_EXE_FLUSH is set.
106 *
107 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
108 * invalidated with every MI_FLUSH.
109 *
110 * TLBs:
111 *
112 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
113 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
114 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
115 * are flushed at any MI_FLUSH.
116 */
117
118 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
Chris Wilson46f0f8d2012-04-18 11:12:11 +0100119 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
Chris Wilson36d527d2011-03-19 22:26:49 +0000120 cmd &= ~MI_NO_WRITE_FLUSH;
Chris Wilson36d527d2011-03-19 22:26:49 +0000121 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
122 cmd |= MI_EXE_FLUSH;
123
124 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
125 (IS_G4X(dev) || IS_GEN5(dev)))
126 cmd |= MI_INVALIDATE_ISP;
127
128 ret = intel_ring_begin(ring, 2);
129 if (ret)
130 return ret;
131
132 intel_ring_emit(ring, cmd);
133 intel_ring_emit(ring, MI_NOOP);
134 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000135
136 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800137}
138
Jesse Barnes8d315282011-10-16 10:23:31 +0200139/**
140 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
141 * implementing two workarounds on gen6. From section 1.4.7.1
142 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
143 *
144 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
145 * produced by non-pipelined state commands), software needs to first
146 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
147 * 0.
148 *
149 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
150 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
151 *
152 * And the workaround for these two requires this workaround first:
153 *
154 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
155 * BEFORE the pipe-control with a post-sync op and no write-cache
156 * flushes.
157 *
158 * And this last workaround is tricky because of the requirements on
159 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
160 * volume 2 part 1:
161 *
162 * "1 of the following must also be set:
163 * - Render Target Cache Flush Enable ([12] of DW1)
164 * - Depth Cache Flush Enable ([0] of DW1)
165 * - Stall at Pixel Scoreboard ([1] of DW1)
166 * - Depth Stall ([13] of DW1)
167 * - Post-Sync Operation ([13] of DW1)
168 * - Notify Enable ([8] of DW1)"
169 *
170 * The cache flushes require the workaround flush that triggered this
171 * one, so we can't use it. Depth stall would trigger the same.
172 * Post-sync nonzero is what triggered this second workaround, so we
173 * can't use that one either. Notify enable is IRQs, which aren't
174 * really our business. That leaves only stall at scoreboard.
175 */
176static int
177intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
178{
179 struct pipe_control *pc = ring->private;
180 u32 scratch_addr = pc->gtt_offset + 128;
181 int ret;
182
183
184 ret = intel_ring_begin(ring, 6);
185 if (ret)
186 return ret;
187
188 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
189 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
190 PIPE_CONTROL_STALL_AT_SCOREBOARD);
191 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
192 intel_ring_emit(ring, 0); /* low dword */
193 intel_ring_emit(ring, 0); /* high dword */
194 intel_ring_emit(ring, MI_NOOP);
195 intel_ring_advance(ring);
196
197 ret = intel_ring_begin(ring, 6);
198 if (ret)
199 return ret;
200
201 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
202 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
203 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
204 intel_ring_emit(ring, 0);
205 intel_ring_emit(ring, 0);
206 intel_ring_emit(ring, MI_NOOP);
207 intel_ring_advance(ring);
208
209 return 0;
210}
211
212static int
213gen6_render_ring_flush(struct intel_ring_buffer *ring,
214 u32 invalidate_domains, u32 flush_domains)
215{
216 u32 flags = 0;
217 struct pipe_control *pc = ring->private;
218 u32 scratch_addr = pc->gtt_offset + 128;
219 int ret;
220
221 /* Force SNB workarounds for PIPE_CONTROL flushes */
222 intel_emit_post_sync_nonzero_flush(ring);
223
224 /* Just flush everything. Experiments have shown that reducing the
225 * number of bits based on the write domains has little performance
226 * impact.
227 */
228 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
Ben Widawskycc0f6392012-06-04 14:42:49 -0700229 flags |= PIPE_CONTROL_TLB_INVALIDATE;
Jesse Barnes8d315282011-10-16 10:23:31 +0200230 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
231 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
232 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
233 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
234 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
235 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
236
237 ret = intel_ring_begin(ring, 6);
238 if (ret)
239 return ret;
240
241 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
242 intel_ring_emit(ring, flags);
243 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
244 intel_ring_emit(ring, 0); /* lower dword */
245 intel_ring_emit(ring, 0); /* uppwer dword */
246 intel_ring_emit(ring, MI_NOOP);
247 intel_ring_advance(ring);
248
249 return 0;
250}
251
Chris Wilson78501ea2010-10-27 12:18:21 +0100252static void ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100253 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800254{
Chris Wilson78501ea2010-10-27 12:18:21 +0100255 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson297b0c52010-10-22 17:02:41 +0100256 I915_WRITE_TAIL(ring, value);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800257}
258
Chris Wilson78501ea2010-10-27 12:18:21 +0100259u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800260{
Chris Wilson78501ea2010-10-27 12:18:21 +0100261 drm_i915_private_t *dev_priv = ring->dev->dev_private;
262 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
Daniel Vetter3d281d82010-09-24 21:14:22 +0200263 RING_ACTHD(ring->mmio_base) : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800264
265 return I915_READ(acthd_reg);
266}
267
Chris Wilson78501ea2010-10-27 12:18:21 +0100268static int init_ring_common(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800269{
Chris Wilson78501ea2010-10-27 12:18:21 +0100270 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000271 struct drm_i915_gem_object *obj = ring->obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800272 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800273
274 /* Stop the ring if it's running. */
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200275 I915_WRITE_CTL(ring, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200276 I915_WRITE_HEAD(ring, 0);
Chris Wilson78501ea2010-10-27 12:18:21 +0100277 ring->write_tail(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800278
279 /* Initialize the ring. */
Chris Wilson05394f32010-11-08 19:18:58 +0000280 I915_WRITE_START(ring, obj->gtt_offset);
Daniel Vetter570ef602010-08-02 17:06:23 +0200281 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800282
283 /* G45 ring initialization fails to reset head to zero */
284 if (head != 0) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000285 DRM_DEBUG_KMS("%s head not reset to zero "
286 "ctl %08x head %08x tail %08x start %08x\n",
287 ring->name,
288 I915_READ_CTL(ring),
289 I915_READ_HEAD(ring),
290 I915_READ_TAIL(ring),
291 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800292
Daniel Vetter570ef602010-08-02 17:06:23 +0200293 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800294
Chris Wilson6fd0d562010-12-05 20:42:33 +0000295 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
296 DRM_ERROR("failed to set %s head to zero "
297 "ctl %08x head %08x tail %08x start %08x\n",
298 ring->name,
299 I915_READ_CTL(ring),
300 I915_READ_HEAD(ring),
301 I915_READ_TAIL(ring),
302 I915_READ_START(ring));
303 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700304 }
305
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200306 I915_WRITE_CTL(ring,
Chris Wilsonae69b422010-11-07 11:45:52 +0000307 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson5d031e52012-02-08 13:34:13 +0000308 | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800309
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800310 /* If the head is still not zero, the ring is dead */
Sean Paulf01db982012-03-16 12:43:22 -0400311 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
312 I915_READ_START(ring) == obj->gtt_offset &&
313 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000314 DRM_ERROR("%s initialization failed "
315 "ctl %08x head %08x tail %08x start %08x\n",
316 ring->name,
317 I915_READ_CTL(ring),
318 I915_READ_HEAD(ring),
319 I915_READ_TAIL(ring),
320 I915_READ_START(ring));
321 return -EIO;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800322 }
323
Chris Wilson78501ea2010-10-27 12:18:21 +0100324 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
325 i915_kernel_lost_context(ring->dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800326 else {
Chris Wilsonc7dca472011-01-20 17:00:10 +0000327 ring->head = I915_READ_HEAD(ring);
Daniel Vetter870e86d2010-08-02 16:29:44 +0200328 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000329 ring->space = ring_space(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800330 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000331
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800332 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700333}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800334
Chris Wilsonc6df5412010-12-15 09:56:50 +0000335static int
336init_pipe_control(struct intel_ring_buffer *ring)
337{
338 struct pipe_control *pc;
339 struct drm_i915_gem_object *obj;
340 int ret;
341
342 if (ring->private)
343 return 0;
344
345 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
346 if (!pc)
347 return -ENOMEM;
348
349 obj = i915_gem_alloc_object(ring->dev, 4096);
350 if (obj == NULL) {
351 DRM_ERROR("Failed to allocate seqno page\n");
352 ret = -ENOMEM;
353 goto err;
354 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100355
356 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000357
358 ret = i915_gem_object_pin(obj, 4096, true);
359 if (ret)
360 goto err_unref;
361
362 pc->gtt_offset = obj->gtt_offset;
363 pc->cpu_page = kmap(obj->pages[0]);
364 if (pc->cpu_page == NULL)
365 goto err_unpin;
366
367 pc->obj = obj;
368 ring->private = pc;
369 return 0;
370
371err_unpin:
372 i915_gem_object_unpin(obj);
373err_unref:
374 drm_gem_object_unreference(&obj->base);
375err:
376 kfree(pc);
377 return ret;
378}
379
380static void
381cleanup_pipe_control(struct intel_ring_buffer *ring)
382{
383 struct pipe_control *pc = ring->private;
384 struct drm_i915_gem_object *obj;
385
386 if (!ring->private)
387 return;
388
389 obj = pc->obj;
390 kunmap(obj->pages[0]);
391 i915_gem_object_unpin(obj);
392 drm_gem_object_unreference(&obj->base);
393
394 kfree(pc);
395 ring->private = NULL;
396}
397
Chris Wilson78501ea2010-10-27 12:18:21 +0100398static int init_render_ring(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800399{
Chris Wilson78501ea2010-10-27 12:18:21 +0100400 struct drm_device *dev = ring->dev;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000401 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100402 int ret = init_ring_common(ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800403
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100404 if (INTEL_INFO(dev)->gen > 3) {
Daniel Vetter6b26c862012-04-24 14:04:12 +0200405 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
Jesse Barnesb095cd02011-08-12 15:28:32 -0700406 if (IS_GEN7(dev))
407 I915_WRITE(GFX_MODE_GEN7,
Daniel Vetter6b26c862012-04-24 14:04:12 +0200408 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
409 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800410 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100411
Jesse Barnes8d315282011-10-16 10:23:31 +0200412 if (INTEL_INFO(dev)->gen >= 5) {
Chris Wilsonc6df5412010-12-15 09:56:50 +0000413 ret = init_pipe_control(ring);
414 if (ret)
415 return ret;
416 }
417
Daniel Vetter5e13a0c2012-05-08 13:39:59 +0200418 if (IS_GEN6(dev)) {
Kenneth Graunke3a69ddd2012-04-27 12:44:41 -0700419 /* From the Sandybridge PRM, volume 1 part 3, page 24:
420 * "If this bit is set, STCunit will have LRA as replacement
421 * policy. [...] This bit must be reset. LRA replacement
422 * policy is not supported."
423 */
424 I915_WRITE(CACHE_MODE_0,
Daniel Vetter5e13a0c2012-05-08 13:39:59 +0200425 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
Ben Widawsky12b02862012-06-04 14:42:50 -0700426
427 /* This is not explicitly set for GEN6, so read the register.
428 * see intel_ring_mi_set_context() for why we care.
429 * TODO: consider explicitly setting the bit for GEN5
430 */
431 ring->itlb_before_ctx_switch =
432 !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
Ben Widawsky84f9f932011-12-12 19:21:58 -0800433 }
434
Daniel Vetter6b26c862012-04-24 14:04:12 +0200435 if (INTEL_INFO(dev)->gen >= 6)
436 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
Chris Wilsonc6df5412010-12-15 09:56:50 +0000437
Ben Widawsky15b9f802012-05-25 16:56:23 -0700438 if (IS_IVYBRIDGE(dev))
439 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
440
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800441 return ret;
442}
443
Chris Wilsonc6df5412010-12-15 09:56:50 +0000444static void render_ring_cleanup(struct intel_ring_buffer *ring)
445{
446 if (!ring->private)
447 return;
448
449 cleanup_pipe_control(ring);
450}
451
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000452static void
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700453update_mboxes(struct intel_ring_buffer *ring,
454 u32 seqno,
455 u32 mmio_offset)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000456{
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700457 intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
458 MI_SEMAPHORE_GLOBAL_GTT |
459 MI_SEMAPHORE_REGISTER |
460 MI_SEMAPHORE_UPDATE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000461 intel_ring_emit(ring, seqno);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700462 intel_ring_emit(ring, mmio_offset);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000463}
464
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700465/**
466 * gen6_add_request - Update the semaphore mailbox registers
467 *
468 * @ring - ring that is adding a request
469 * @seqno - return seqno stuck into the ring
470 *
471 * Update the mailbox registers in the *other* rings with the current seqno.
472 * This acts like a signal in the canonical semaphore.
473 */
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000474static int
475gen6_add_request(struct intel_ring_buffer *ring,
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700476 u32 *seqno)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000477{
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700478 u32 mbox1_reg;
479 u32 mbox2_reg;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000480 int ret;
481
482 ret = intel_ring_begin(ring, 10);
483 if (ret)
484 return ret;
485
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700486 mbox1_reg = ring->signal_mbox[0];
487 mbox2_reg = ring->signal_mbox[1];
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000488
Daniel Vetter53d227f2012-01-25 16:32:49 +0100489 *seqno = i915_gem_next_request_seqno(ring);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700490
491 update_mboxes(ring, *seqno, mbox1_reg);
492 update_mboxes(ring, *seqno, mbox2_reg);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000493 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
494 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700495 intel_ring_emit(ring, *seqno);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000496 intel_ring_emit(ring, MI_USER_INTERRUPT);
497 intel_ring_advance(ring);
498
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000499 return 0;
500}
501
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700502/**
503 * intel_ring_sync - sync the waiter to the signaller on seqno
504 *
505 * @waiter - ring that is waiting
506 * @signaller - ring which has, or will signal
507 * @seqno - seqno which the waiter will block on
508 */
509static int
Daniel Vetter686cb5f2012-04-11 22:12:52 +0200510gen6_ring_sync(struct intel_ring_buffer *waiter,
511 struct intel_ring_buffer *signaller,
512 u32 seqno)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000513{
514 int ret;
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700515 u32 dw1 = MI_SEMAPHORE_MBOX |
516 MI_SEMAPHORE_COMPARE |
517 MI_SEMAPHORE_REGISTER;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000518
Ben Widawsky1500f7e2012-04-11 11:18:21 -0700519 /* Throughout all of the GEM code, seqno passed implies our current
520 * seqno is >= the last seqno executed. However for hardware the
521 * comparison is strictly greater than.
522 */
523 seqno -= 1;
524
Daniel Vetter686cb5f2012-04-11 22:12:52 +0200525 WARN_ON(signaller->semaphore_register[waiter->id] ==
526 MI_SEMAPHORE_SYNC_INVALID);
527
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700528 ret = intel_ring_begin(waiter, 4);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000529 if (ret)
530 return ret;
531
Daniel Vetter686cb5f2012-04-11 22:12:52 +0200532 intel_ring_emit(waiter,
533 dw1 | signaller->semaphore_register[waiter->id]);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700534 intel_ring_emit(waiter, seqno);
535 intel_ring_emit(waiter, 0);
536 intel_ring_emit(waiter, MI_NOOP);
537 intel_ring_advance(waiter);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000538
539 return 0;
540}
541
Chris Wilsonc6df5412010-12-15 09:56:50 +0000542#define PIPE_CONTROL_FLUSH(ring__, addr__) \
543do { \
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200544 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
545 PIPE_CONTROL_DEPTH_STALL); \
Chris Wilsonc6df5412010-12-15 09:56:50 +0000546 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
547 intel_ring_emit(ring__, 0); \
548 intel_ring_emit(ring__, 0); \
549} while (0)
550
551static int
552pc_render_add_request(struct intel_ring_buffer *ring,
553 u32 *result)
554{
Daniel Vetter53d227f2012-01-25 16:32:49 +0100555 u32 seqno = i915_gem_next_request_seqno(ring);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000556 struct pipe_control *pc = ring->private;
557 u32 scratch_addr = pc->gtt_offset + 128;
558 int ret;
559
560 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
561 * incoherent with writes to memory, i.e. completely fubar,
562 * so we need to use PIPE_NOTIFY instead.
563 *
564 * However, we also need to workaround the qword write
565 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
566 * memory before requesting an interrupt.
567 */
568 ret = intel_ring_begin(ring, 32);
569 if (ret)
570 return ret;
571
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200572 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
Kenneth Graunke9d971b32011-10-11 23:41:09 +0200573 PIPE_CONTROL_WRITE_FLUSH |
574 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000575 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
576 intel_ring_emit(ring, seqno);
577 intel_ring_emit(ring, 0);
578 PIPE_CONTROL_FLUSH(ring, scratch_addr);
579 scratch_addr += 128; /* write to separate cachelines */
580 PIPE_CONTROL_FLUSH(ring, scratch_addr);
581 scratch_addr += 128;
582 PIPE_CONTROL_FLUSH(ring, scratch_addr);
583 scratch_addr += 128;
584 PIPE_CONTROL_FLUSH(ring, scratch_addr);
585 scratch_addr += 128;
586 PIPE_CONTROL_FLUSH(ring, scratch_addr);
587 scratch_addr += 128;
588 PIPE_CONTROL_FLUSH(ring, scratch_addr);
Chris Wilsona71d8d92012-02-15 11:25:36 +0000589
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200590 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
Kenneth Graunke9d971b32011-10-11 23:41:09 +0200591 PIPE_CONTROL_WRITE_FLUSH |
592 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
Chris Wilsonc6df5412010-12-15 09:56:50 +0000593 PIPE_CONTROL_NOTIFY);
594 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
595 intel_ring_emit(ring, seqno);
596 intel_ring_emit(ring, 0);
597 intel_ring_advance(ring);
598
599 *result = seqno;
600 return 0;
601}
602
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800603static u32
Daniel Vetter4cd53c02012-12-14 16:01:25 +0100604gen6_ring_get_seqno(struct intel_ring_buffer *ring)
605{
606 struct drm_device *dev = ring->dev;
607
608 /* Workaround to force correct ordering between irq and seqno writes on
609 * ivb (and maybe also on snb) by reading from a CS register (like
610 * ACTHD) before reading the status page. */
Daniel Vetter1c7eaac2012-03-27 09:31:24 +0200611 if (IS_GEN6(dev) || IS_GEN7(dev))
Daniel Vetter4cd53c02012-12-14 16:01:25 +0100612 intel_ring_get_active_head(ring);
613 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
614}
615
616static u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000617ring_get_seqno(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800618{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000619 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
620}
621
Chris Wilsonc6df5412010-12-15 09:56:50 +0000622static u32
623pc_render_get_seqno(struct intel_ring_buffer *ring)
624{
625 struct pipe_control *pc = ring->private;
626 return pc->cpu_page[0];
627}
628
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000629static bool
Daniel Vettere48d8632012-04-11 22:12:54 +0200630gen5_ring_get_irq(struct intel_ring_buffer *ring)
631{
632 struct drm_device *dev = ring->dev;
633 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100634 unsigned long flags;
Daniel Vettere48d8632012-04-11 22:12:54 +0200635
636 if (!dev->irq_enabled)
637 return false;
638
Chris Wilson7338aef2012-04-24 21:48:47 +0100639 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Daniel Vetterf637fde2012-04-11 22:12:59 +0200640 if (ring->irq_refcount++ == 0) {
641 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
642 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
643 POSTING_READ(GTIMR);
644 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100645 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Daniel Vettere48d8632012-04-11 22:12:54 +0200646
647 return true;
648}
649
650static void
651gen5_ring_put_irq(struct intel_ring_buffer *ring)
652{
653 struct drm_device *dev = ring->dev;
654 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100655 unsigned long flags;
Daniel Vettere48d8632012-04-11 22:12:54 +0200656
Chris Wilson7338aef2012-04-24 21:48:47 +0100657 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Daniel Vetterf637fde2012-04-11 22:12:59 +0200658 if (--ring->irq_refcount == 0) {
659 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
660 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
661 POSTING_READ(GTIMR);
662 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100663 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Daniel Vettere48d8632012-04-11 22:12:54 +0200664}
665
666static bool
Daniel Vettere3670312012-04-11 22:12:53 +0200667i9xx_ring_get_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700668{
Chris Wilson78501ea2010-10-27 12:18:21 +0100669 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000670 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100671 unsigned long flags;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700672
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000673 if (!dev->irq_enabled)
674 return false;
675
Chris Wilson7338aef2012-04-24 21:48:47 +0100676 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Daniel Vetterf637fde2012-04-11 22:12:59 +0200677 if (ring->irq_refcount++ == 0) {
678 dev_priv->irq_mask &= ~ring->irq_enable_mask;
679 I915_WRITE(IMR, dev_priv->irq_mask);
680 POSTING_READ(IMR);
681 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100682 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000683
684 return true;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700685}
686
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800687static void
Daniel Vettere3670312012-04-11 22:12:53 +0200688i9xx_ring_put_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700689{
Chris Wilson78501ea2010-10-27 12:18:21 +0100690 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000691 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100692 unsigned long flags;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700693
Chris Wilson7338aef2012-04-24 21:48:47 +0100694 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Daniel Vetterf637fde2012-04-11 22:12:59 +0200695 if (--ring->irq_refcount == 0) {
696 dev_priv->irq_mask |= ring->irq_enable_mask;
697 I915_WRITE(IMR, dev_priv->irq_mask);
698 POSTING_READ(IMR);
699 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100700 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700701}
702
Chris Wilsonc2798b12012-04-22 21:13:57 +0100703static bool
704i8xx_ring_get_irq(struct intel_ring_buffer *ring)
705{
706 struct drm_device *dev = ring->dev;
707 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100708 unsigned long flags;
Chris Wilsonc2798b12012-04-22 21:13:57 +0100709
710 if (!dev->irq_enabled)
711 return false;
712
Chris Wilson7338aef2012-04-24 21:48:47 +0100713 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Chris Wilsonc2798b12012-04-22 21:13:57 +0100714 if (ring->irq_refcount++ == 0) {
715 dev_priv->irq_mask &= ~ring->irq_enable_mask;
716 I915_WRITE16(IMR, dev_priv->irq_mask);
717 POSTING_READ16(IMR);
718 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100719 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Chris Wilsonc2798b12012-04-22 21:13:57 +0100720
721 return true;
722}
723
724static void
725i8xx_ring_put_irq(struct intel_ring_buffer *ring)
726{
727 struct drm_device *dev = ring->dev;
728 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100729 unsigned long flags;
Chris Wilsonc2798b12012-04-22 21:13:57 +0100730
Chris Wilson7338aef2012-04-24 21:48:47 +0100731 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Chris Wilsonc2798b12012-04-22 21:13:57 +0100732 if (--ring->irq_refcount == 0) {
733 dev_priv->irq_mask |= ring->irq_enable_mask;
734 I915_WRITE16(IMR, dev_priv->irq_mask);
735 POSTING_READ16(IMR);
736 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100737 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Chris Wilsonc2798b12012-04-22 21:13:57 +0100738}
739
Chris Wilson78501ea2010-10-27 12:18:21 +0100740void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800741{
Eric Anholt45930102011-05-06 17:12:35 -0700742 struct drm_device *dev = ring->dev;
Chris Wilson78501ea2010-10-27 12:18:21 +0100743 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt45930102011-05-06 17:12:35 -0700744 u32 mmio = 0;
745
746 /* The ring status page addresses are no longer next to the rest of
747 * the ring registers as of gen7.
748 */
749 if (IS_GEN7(dev)) {
750 switch (ring->id) {
Daniel Vetter96154f22011-12-14 13:57:00 +0100751 case RCS:
Eric Anholt45930102011-05-06 17:12:35 -0700752 mmio = RENDER_HWS_PGA_GEN7;
753 break;
Daniel Vetter96154f22011-12-14 13:57:00 +0100754 case BCS:
Eric Anholt45930102011-05-06 17:12:35 -0700755 mmio = BLT_HWS_PGA_GEN7;
756 break;
Daniel Vetter96154f22011-12-14 13:57:00 +0100757 case VCS:
Eric Anholt45930102011-05-06 17:12:35 -0700758 mmio = BSD_HWS_PGA_GEN7;
759 break;
760 }
761 } else if (IS_GEN6(ring->dev)) {
762 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
763 } else {
764 mmio = RING_HWS_PGA(ring->mmio_base);
765 }
766
Chris Wilson78501ea2010-10-27 12:18:21 +0100767 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
768 POSTING_READ(mmio);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800769}
770
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000771static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100772bsd_ring_flush(struct intel_ring_buffer *ring,
773 u32 invalidate_domains,
774 u32 flush_domains)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800775{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000776 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000777
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000778 ret = intel_ring_begin(ring, 2);
779 if (ret)
780 return ret;
781
782 intel_ring_emit(ring, MI_FLUSH);
783 intel_ring_emit(ring, MI_NOOP);
784 intel_ring_advance(ring);
785 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800786}
787
Chris Wilson3cce4692010-10-27 16:11:02 +0100788static int
Daniel Vetter8620a3a2012-04-11 22:12:57 +0200789i9xx_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100790 u32 *result)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800791{
792 u32 seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +0100793 int ret;
794
795 ret = intel_ring_begin(ring, 4);
796 if (ret)
797 return ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +0100798
Daniel Vetter53d227f2012-01-25 16:32:49 +0100799 seqno = i915_gem_next_request_seqno(ring);
Chris Wilson6f392d5482010-08-07 11:01:22 +0100800
Chris Wilson3cce4692010-10-27 16:11:02 +0100801 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
802 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
803 intel_ring_emit(ring, seqno);
804 intel_ring_emit(ring, MI_USER_INTERRUPT);
805 intel_ring_advance(ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800806
Chris Wilson3cce4692010-10-27 16:11:02 +0100807 *result = seqno;
808 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800809}
810
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000811static bool
Ben Widawsky25c06302012-03-29 19:11:27 -0700812gen6_ring_get_irq(struct intel_ring_buffer *ring)
Chris Wilson0f468322011-01-04 17:35:21 +0000813{
814 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000815 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100816 unsigned long flags;
Chris Wilson0f468322011-01-04 17:35:21 +0000817
818 if (!dev->irq_enabled)
819 return false;
820
Daniel Vetter4cd53c02012-12-14 16:01:25 +0100821 /* It looks like we need to prevent the gt from suspending while waiting
822 * for an notifiy irq, otherwise irqs seem to get lost on at least the
823 * blt/bsd rings on ivb. */
Daniel Vetter99ffa162012-01-25 14:04:00 +0100824 gen6_gt_force_wake_get(dev_priv);
Daniel Vetter4cd53c02012-12-14 16:01:25 +0100825
Chris Wilson7338aef2012-04-24 21:48:47 +0100826 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Chris Wilson01a03332011-01-04 22:22:56 +0000827 if (ring->irq_refcount++ == 0) {
Ben Widawsky15b9f802012-05-25 16:56:23 -0700828 if (IS_IVYBRIDGE(dev) && ring->id == RCS)
829 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
830 GEN6_RENDER_L3_PARITY_ERROR));
831 else
832 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
Daniel Vetterf637fde2012-04-11 22:12:59 +0200833 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
834 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
835 POSTING_READ(GTIMR);
Chris Wilson0f468322011-01-04 17:35:21 +0000836 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100837 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Chris Wilson0f468322011-01-04 17:35:21 +0000838
839 return true;
840}
841
842static void
Ben Widawsky25c06302012-03-29 19:11:27 -0700843gen6_ring_put_irq(struct intel_ring_buffer *ring)
Chris Wilson0f468322011-01-04 17:35:21 +0000844{
845 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000846 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson7338aef2012-04-24 21:48:47 +0100847 unsigned long flags;
Chris Wilson0f468322011-01-04 17:35:21 +0000848
Chris Wilson7338aef2012-04-24 21:48:47 +0100849 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Chris Wilson01a03332011-01-04 22:22:56 +0000850 if (--ring->irq_refcount == 0) {
Ben Widawsky15b9f802012-05-25 16:56:23 -0700851 if (IS_IVYBRIDGE(dev) && ring->id == RCS)
852 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
853 else
854 I915_WRITE_IMR(ring, ~0);
Daniel Vetterf637fde2012-04-11 22:12:59 +0200855 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
856 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
857 POSTING_READ(GTIMR);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000858 }
Chris Wilson7338aef2012-04-24 21:48:47 +0100859 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Daniel Vetter4cd53c02012-12-14 16:01:25 +0100860
Daniel Vetter99ffa162012-01-25 14:04:00 +0100861 gen6_gt_force_wake_put(dev_priv);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000862}
863
Zou Nan haid1b851f2010-05-21 09:08:57 +0800864static int
Daniel Vetterfb3256d2012-04-11 22:12:56 +0200865i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800866{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100867 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100868
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100869 ret = intel_ring_begin(ring, 2);
870 if (ret)
871 return ret;
872
Chris Wilson78501ea2010-10-27 12:18:21 +0100873 intel_ring_emit(ring,
Chris Wilson65f56872012-04-17 16:38:12 +0100874 MI_BATCH_BUFFER_START |
875 MI_BATCH_GTT |
Chris Wilson78501ea2010-10-27 12:18:21 +0100876 MI_BATCH_NON_SECURE_I965);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000877 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100878 intel_ring_advance(ring);
879
Zou Nan haid1b851f2010-05-21 09:08:57 +0800880 return 0;
881}
882
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800883static int
Daniel Vetterfb3256d2012-04-11 22:12:56 +0200884i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000885 u32 offset, u32 len)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700886{
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000887 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700888
Daniel Vetterfb3256d2012-04-11 22:12:56 +0200889 ret = intel_ring_begin(ring, 4);
890 if (ret)
891 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700892
Daniel Vetterfb3256d2012-04-11 22:12:56 +0200893 intel_ring_emit(ring, MI_BATCH_BUFFER);
894 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
895 intel_ring_emit(ring, offset + len - 8);
896 intel_ring_emit(ring, 0);
897 intel_ring_advance(ring);
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100898
Daniel Vetterfb3256d2012-04-11 22:12:56 +0200899 return 0;
900}
901
902static int
903i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
904 u32 offset, u32 len)
905{
906 int ret;
907
908 ret = intel_ring_begin(ring, 2);
909 if (ret)
910 return ret;
911
Chris Wilson65f56872012-04-17 16:38:12 +0100912 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
Daniel Vetterfb3256d2012-04-11 22:12:56 +0200913 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000914 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700915
Eric Anholt62fdfea2010-05-21 13:26:39 -0700916 return 0;
917}
918
Chris Wilson78501ea2010-10-27 12:18:21 +0100919static void cleanup_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700920{
Chris Wilson05394f32010-11-08 19:18:58 +0000921 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700922
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800923 obj = ring->status_page.obj;
924 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700925 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700926
Chris Wilson05394f32010-11-08 19:18:58 +0000927 kunmap(obj->pages[0]);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700928 i915_gem_object_unpin(obj);
Chris Wilson05394f32010-11-08 19:18:58 +0000929 drm_gem_object_unreference(&obj->base);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800930 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700931}
932
Chris Wilson78501ea2010-10-27 12:18:21 +0100933static int init_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700934{
Chris Wilson78501ea2010-10-27 12:18:21 +0100935 struct drm_device *dev = ring->dev;
Chris Wilson05394f32010-11-08 19:18:58 +0000936 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700937 int ret;
938
Eric Anholt62fdfea2010-05-21 13:26:39 -0700939 obj = i915_gem_alloc_object(dev, 4096);
940 if (obj == NULL) {
941 DRM_ERROR("Failed to allocate status page\n");
942 ret = -ENOMEM;
943 goto err;
944 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100945
946 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700947
Daniel Vetter75e9e912010-11-04 17:11:09 +0100948 ret = i915_gem_object_pin(obj, 4096, true);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700949 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700950 goto err_unref;
951 }
952
Chris Wilson05394f32010-11-08 19:18:58 +0000953 ring->status_page.gfx_addr = obj->gtt_offset;
954 ring->status_page.page_addr = kmap(obj->pages[0]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800955 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700956 goto err_unpin;
957 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800958 ring->status_page.obj = obj;
959 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700960
Chris Wilson78501ea2010-10-27 12:18:21 +0100961 intel_ring_setup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800962 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
963 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700964
965 return 0;
966
967err_unpin:
968 i915_gem_object_unpin(obj);
969err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000970 drm_gem_object_unreference(&obj->base);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700971err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800972 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700973}
974
Ben Widawskyc43b5632012-04-16 14:07:40 -0700975static int intel_init_ring_buffer(struct drm_device *dev,
976 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700977{
Chris Wilson05394f32010-11-08 19:18:58 +0000978 struct drm_i915_gem_object *obj;
Daniel Vetterdd2757f2012-06-07 15:55:57 +0200979 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsondd785e32010-08-07 11:01:34 +0100980 int ret;
981
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800982 ring->dev = dev;
Chris Wilson23bc5982010-09-29 16:10:57 +0100983 INIT_LIST_HEAD(&ring->active_list);
984 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +0100985 INIT_LIST_HEAD(&ring->gpu_write_list);
Daniel Vetterdfc9ef22012-04-11 22:12:47 +0200986 ring->size = 32 * PAGE_SIZE;
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000987
Chris Wilsonb259f672011-03-29 13:19:09 +0100988 init_waitqueue_head(&ring->irq_queue);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700989
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800990 if (I915_NEED_GFX_HWS(dev)) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100991 ret = init_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800992 if (ret)
993 return ret;
994 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700995
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800996 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700997 if (obj == NULL) {
998 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800999 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +01001000 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001001 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001002
Chris Wilson05394f32010-11-08 19:18:58 +00001003 ring->obj = obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001004
Daniel Vetter75e9e912010-11-04 17:11:09 +01001005 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
Chris Wilsondd785e32010-08-07 11:01:34 +01001006 if (ret)
1007 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001008
Daniel Vetterdd2757f2012-06-07 15:55:57 +02001009 ring->virtual_start =
1010 ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
1011 ring->size);
Daniel Vetter4225d0f2012-04-26 23:28:16 +02001012 if (ring->virtual_start == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -07001013 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001014 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +01001015 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001016 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001017
Chris Wilson78501ea2010-10-27 12:18:21 +01001018 ret = ring->init(ring);
Chris Wilsondd785e32010-08-07 11:01:34 +01001019 if (ret)
1020 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001021
Chris Wilson55249ba2010-12-22 14:04:47 +00001022 /* Workaround an erratum on the i830 which causes a hang if
1023 * the TAIL pointer points to within the last 2 cachelines
1024 * of the buffer.
1025 */
1026 ring->effective_size = ring->size;
Chris Wilson27c1cbd2012-04-09 13:59:46 +01001027 if (IS_I830(ring->dev) || IS_845G(ring->dev))
Chris Wilson55249ba2010-12-22 14:04:47 +00001028 ring->effective_size -= 128;
1029
Chris Wilsonc584fe42010-10-29 18:15:52 +01001030 return 0;
Chris Wilsondd785e32010-08-07 11:01:34 +01001031
1032err_unmap:
Daniel Vetter4225d0f2012-04-26 23:28:16 +02001033 iounmap(ring->virtual_start);
Chris Wilsondd785e32010-08-07 11:01:34 +01001034err_unpin:
1035 i915_gem_object_unpin(obj);
1036err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001037 drm_gem_object_unreference(&obj->base);
1038 ring->obj = NULL;
Chris Wilsondd785e32010-08-07 11:01:34 +01001039err_hws:
Chris Wilson78501ea2010-10-27 12:18:21 +01001040 cleanup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001041 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001042}
1043
Chris Wilson78501ea2010-10-27 12:18:21 +01001044void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001045{
Chris Wilson33626e62010-10-29 16:18:36 +01001046 struct drm_i915_private *dev_priv;
1047 int ret;
1048
Chris Wilson05394f32010-11-08 19:18:58 +00001049 if (ring->obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001050 return;
1051
Chris Wilson33626e62010-10-29 16:18:36 +01001052 /* Disable the ring buffer. The ring must be idle at this point */
1053 dev_priv = ring->dev->dev_private;
Ben Widawsky96f298a2011-03-19 18:14:27 -07001054 ret = intel_wait_ring_idle(ring);
Chris Wilson29ee3992011-01-24 16:35:42 +00001055 if (ret)
1056 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1057 ring->name, ret);
1058
Chris Wilson33626e62010-10-29 16:18:36 +01001059 I915_WRITE_CTL(ring, 0);
1060
Daniel Vetter4225d0f2012-04-26 23:28:16 +02001061 iounmap(ring->virtual_start);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001062
Chris Wilson05394f32010-11-08 19:18:58 +00001063 i915_gem_object_unpin(ring->obj);
1064 drm_gem_object_unreference(&ring->obj->base);
1065 ring->obj = NULL;
Chris Wilson78501ea2010-10-27 12:18:21 +01001066
Zou Nan hai8d192152010-11-02 16:31:01 +08001067 if (ring->cleanup)
1068 ring->cleanup(ring);
1069
Chris Wilson78501ea2010-10-27 12:18:21 +01001070 cleanup_status_page(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001071}
1072
Chris Wilson78501ea2010-10-27 12:18:21 +01001073static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001074{
Daniel Vetter4225d0f2012-04-26 23:28:16 +02001075 uint32_t __iomem *virt;
Chris Wilson55249ba2010-12-22 14:04:47 +00001076 int rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001077
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001078 if (ring->space < rem) {
Chris Wilson78501ea2010-10-27 12:18:21 +01001079 int ret = intel_wait_ring_buffer(ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001080 if (ret)
1081 return ret;
1082 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001083
Daniel Vetter4225d0f2012-04-26 23:28:16 +02001084 virt = ring->virtual_start + ring->tail;
1085 rem /= 4;
1086 while (rem--)
1087 iowrite32(MI_NOOP, virt++);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001088
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001089 ring->tail = 0;
Chris Wilsonc7dca472011-01-20 17:00:10 +00001090 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001091
1092 return 0;
1093}
1094
Chris Wilsona71d8d92012-02-15 11:25:36 +00001095static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1096{
1097 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1098 bool was_interruptible;
1099 int ret;
1100
1101 /* XXX As we have not yet audited all the paths to check that
1102 * they are ready for ERESTARTSYS from intel_ring_begin, do not
1103 * allow us to be interruptible by a signal.
1104 */
1105 was_interruptible = dev_priv->mm.interruptible;
1106 dev_priv->mm.interruptible = false;
1107
Ben Widawsky199b2bc2012-05-24 15:03:11 -07001108 ret = i915_wait_seqno(ring, seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00001109
1110 dev_priv->mm.interruptible = was_interruptible;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07001111 if (!ret)
1112 i915_gem_retire_requests_ring(ring);
Chris Wilsona71d8d92012-02-15 11:25:36 +00001113
1114 return ret;
1115}
1116
1117static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1118{
1119 struct drm_i915_gem_request *request;
1120 u32 seqno = 0;
1121 int ret;
1122
1123 i915_gem_retire_requests_ring(ring);
1124
1125 if (ring->last_retired_head != -1) {
1126 ring->head = ring->last_retired_head;
1127 ring->last_retired_head = -1;
1128 ring->space = ring_space(ring);
1129 if (ring->space >= n)
1130 return 0;
1131 }
1132
1133 list_for_each_entry(request, &ring->request_list, list) {
1134 int space;
1135
1136 if (request->tail == -1)
1137 continue;
1138
1139 space = request->tail - (ring->tail + 8);
1140 if (space < 0)
1141 space += ring->size;
1142 if (space >= n) {
1143 seqno = request->seqno;
1144 break;
1145 }
1146
1147 /* Consume this request in case we need more space than
1148 * is available and so need to prevent a race between
1149 * updating last_retired_head and direct reads of
1150 * I915_RING_HEAD. It also provides a nice sanity check.
1151 */
1152 request->tail = -1;
1153 }
1154
1155 if (seqno == 0)
1156 return -ENOSPC;
1157
1158 ret = intel_ring_wait_seqno(ring, seqno);
1159 if (ret)
1160 return ret;
1161
1162 if (WARN_ON(ring->last_retired_head == -1))
1163 return -ENOSPC;
1164
1165 ring->head = ring->last_retired_head;
1166 ring->last_retired_head = -1;
1167 ring->space = ring_space(ring);
1168 if (WARN_ON(ring->space < n))
1169 return -ENOSPC;
1170
1171 return 0;
1172}
1173
Chris Wilson78501ea2010-10-27 12:18:21 +01001174int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001175{
Chris Wilson78501ea2010-10-27 12:18:21 +01001176 struct drm_device *dev = ring->dev;
Zou Nan haicae58522010-11-09 17:17:32 +08001177 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +01001178 unsigned long end;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001179 int ret;
Chris Wilsonc7dca472011-01-20 17:00:10 +00001180
Chris Wilsona71d8d92012-02-15 11:25:36 +00001181 ret = intel_ring_wait_request(ring, n);
1182 if (ret != -ENOSPC)
1183 return ret;
1184
Chris Wilsondb53a302011-02-03 11:57:46 +00001185 trace_i915_ring_wait_begin(ring);
Daniel Vetter63ed2cb2012-04-23 16:50:50 +02001186 /* With GEM the hangcheck timer should kick us out of the loop,
1187 * leaving it early runs the risk of corrupting GEM state (due
1188 * to running on almost untested codepaths). But on resume
1189 * timers don't work yet, so prevent a complete hang in that
1190 * case by choosing an insanely large timeout. */
1191 end = jiffies + 60 * HZ;
Daniel Vettere6bfaf82011-12-14 13:56:59 +01001192
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001193 do {
Chris Wilsonc7dca472011-01-20 17:00:10 +00001194 ring->head = I915_READ_HEAD(ring);
1195 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001196 if (ring->space >= n) {
Chris Wilsondb53a302011-02-03 11:57:46 +00001197 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001198 return 0;
1199 }
1200
1201 if (dev->primary->master) {
1202 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1203 if (master_priv->sarea_priv)
1204 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1205 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08001206
Chris Wilsone60a0b12010-10-13 10:09:14 +01001207 msleep(1);
Chris Wilsonf4e0b292010-10-29 21:06:16 +01001208 if (atomic_read(&dev_priv->mm.wedged))
1209 return -EAGAIN;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001210 } while (!time_after(jiffies, end));
Chris Wilsondb53a302011-02-03 11:57:46 +00001211 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001212 return -EBUSY;
1213}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001214
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001215int intel_ring_begin(struct intel_ring_buffer *ring,
1216 int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001217{
Chris Wilson21dd3732011-01-26 15:55:56 +00001218 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Zou Nan haibe26a102010-06-12 17:40:24 +08001219 int n = 4*num_dwords;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001220 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +01001221
Chris Wilson21dd3732011-01-26 15:55:56 +00001222 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1223 return -EIO;
1224
Chris Wilson55249ba2010-12-22 14:04:47 +00001225 if (unlikely(ring->tail + n > ring->effective_size)) {
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001226 ret = intel_wrap_ring_buffer(ring);
1227 if (unlikely(ret))
1228 return ret;
1229 }
Chris Wilson78501ea2010-10-27 12:18:21 +01001230
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001231 if (unlikely(ring->space < n)) {
1232 ret = intel_wait_ring_buffer(ring, n);
1233 if (unlikely(ret))
1234 return ret;
1235 }
Chris Wilsond97ed332010-08-04 15:18:13 +01001236
1237 ring->space -= n;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001238 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001239}
1240
Chris Wilson78501ea2010-10-27 12:18:21 +01001241void intel_ring_advance(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001242{
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001243 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1244
Chris Wilsond97ed332010-08-04 15:18:13 +01001245 ring->tail &= ring->size - 1;
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001246 if (dev_priv->stop_rings & intel_ring_flag(ring))
1247 return;
Chris Wilson78501ea2010-10-27 12:18:21 +01001248 ring->write_tail(ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001249}
1250
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001251
Chris Wilson78501ea2010-10-27 12:18:21 +01001252static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001253 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001254{
Akshay Joshi0206e352011-08-16 15:34:10 -04001255 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001256
1257 /* Every tail move must follow the sequence below */
Akshay Joshi0206e352011-08-16 15:34:10 -04001258 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1259 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1260 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1261 I915_WRITE(GEN6_BSD_RNCID, 0x0);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001262
Akshay Joshi0206e352011-08-16 15:34:10 -04001263 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1264 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1265 50))
1266 DRM_ERROR("timed out waiting for IDLE Indicator\n");
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001267
Akshay Joshi0206e352011-08-16 15:34:10 -04001268 I915_WRITE_TAIL(ring, value);
1269 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1270 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1271 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001272}
1273
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001274static int gen6_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001275 u32 invalidate, u32 flush)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001276{
Chris Wilson71a77e02011-02-02 12:13:49 +00001277 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001278 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001279
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001280 ret = intel_ring_begin(ring, 4);
1281 if (ret)
1282 return ret;
1283
Chris Wilson71a77e02011-02-02 12:13:49 +00001284 cmd = MI_FLUSH_DW;
1285 if (invalidate & I915_GEM_GPU_DOMAINS)
1286 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1287 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001288 intel_ring_emit(ring, 0);
1289 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001290 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001291 intel_ring_advance(ring);
1292 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001293}
1294
1295static int
Chris Wilson78501ea2010-10-27 12:18:21 +01001296gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001297 u32 offset, u32 len)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001298{
Akshay Joshi0206e352011-08-16 15:34:10 -04001299 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001300
Akshay Joshi0206e352011-08-16 15:34:10 -04001301 ret = intel_ring_begin(ring, 2);
1302 if (ret)
1303 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001304
Akshay Joshi0206e352011-08-16 15:34:10 -04001305 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1306 /* bit0-7 is the length on GEN6+ */
1307 intel_ring_emit(ring, offset);
1308 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001309
Akshay Joshi0206e352011-08-16 15:34:10 -04001310 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001311}
1312
Chris Wilson549f7362010-10-19 11:19:32 +01001313/* Blitter support (SandyBridge+) */
1314
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001315static int blt_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001316 u32 invalidate, u32 flush)
Zou Nan hai8d192152010-11-02 16:31:01 +08001317{
Chris Wilson71a77e02011-02-02 12:13:49 +00001318 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001319 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001320
Daniel Vetter6a233c72011-12-14 13:57:07 +01001321 ret = intel_ring_begin(ring, 4);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001322 if (ret)
1323 return ret;
1324
Chris Wilson71a77e02011-02-02 12:13:49 +00001325 cmd = MI_FLUSH_DW;
1326 if (invalidate & I915_GEM_DOMAIN_RENDER)
1327 cmd |= MI_INVALIDATE_TLB;
1328 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001329 intel_ring_emit(ring, 0);
1330 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001331 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001332 intel_ring_advance(ring);
1333 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08001334}
1335
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001336int intel_init_render_ring_buffer(struct drm_device *dev)
1337{
1338 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001339 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001340
Daniel Vetter59465b52012-04-11 22:12:48 +02001341 ring->name = "render ring";
1342 ring->id = RCS;
1343 ring->mmio_base = RENDER_RING_BASE;
1344
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001345 if (INTEL_INFO(dev)->gen >= 6) {
1346 ring->add_request = gen6_add_request;
Jesse Barnes8d315282011-10-16 10:23:31 +02001347 ring->flush = gen6_render_ring_flush;
Ben Widawsky25c06302012-03-29 19:11:27 -07001348 ring->irq_get = gen6_ring_get_irq;
1349 ring->irq_put = gen6_ring_put_irq;
Daniel Vetter6a848cc2012-04-11 22:12:46 +02001350 ring->irq_enable_mask = GT_USER_INTERRUPT;
Daniel Vetter4cd53c02012-12-14 16:01:25 +01001351 ring->get_seqno = gen6_ring_get_seqno;
Daniel Vetter686cb5f2012-04-11 22:12:52 +02001352 ring->sync_to = gen6_ring_sync;
Daniel Vetter59465b52012-04-11 22:12:48 +02001353 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1354 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1355 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1356 ring->signal_mbox[0] = GEN6_VRSYNC;
1357 ring->signal_mbox[1] = GEN6_BRSYNC;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001358 } else if (IS_GEN5(dev)) {
1359 ring->add_request = pc_render_add_request;
Chris Wilson46f0f8d2012-04-18 11:12:11 +01001360 ring->flush = gen4_render_ring_flush;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001361 ring->get_seqno = pc_render_get_seqno;
Daniel Vettere48d8632012-04-11 22:12:54 +02001362 ring->irq_get = gen5_ring_get_irq;
1363 ring->irq_put = gen5_ring_put_irq;
Daniel Vettere3670312012-04-11 22:12:53 +02001364 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
Daniel Vetter59465b52012-04-11 22:12:48 +02001365 } else {
Daniel Vetter8620a3a2012-04-11 22:12:57 +02001366 ring->add_request = i9xx_add_request;
Chris Wilson46f0f8d2012-04-18 11:12:11 +01001367 if (INTEL_INFO(dev)->gen < 4)
1368 ring->flush = gen2_render_ring_flush;
1369 else
1370 ring->flush = gen4_render_ring_flush;
Daniel Vetter59465b52012-04-11 22:12:48 +02001371 ring->get_seqno = ring_get_seqno;
Chris Wilsonc2798b12012-04-22 21:13:57 +01001372 if (IS_GEN2(dev)) {
1373 ring->irq_get = i8xx_ring_get_irq;
1374 ring->irq_put = i8xx_ring_put_irq;
1375 } else {
1376 ring->irq_get = i9xx_ring_get_irq;
1377 ring->irq_put = i9xx_ring_put_irq;
1378 }
Daniel Vettere3670312012-04-11 22:12:53 +02001379 ring->irq_enable_mask = I915_USER_INTERRUPT;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001380 }
Daniel Vetter59465b52012-04-11 22:12:48 +02001381 ring->write_tail = ring_write_tail;
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001382 if (INTEL_INFO(dev)->gen >= 6)
1383 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1384 else if (INTEL_INFO(dev)->gen >= 4)
1385 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1386 else if (IS_I830(dev) || IS_845G(dev))
1387 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1388 else
1389 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
Daniel Vetter59465b52012-04-11 22:12:48 +02001390 ring->init = init_render_ring;
1391 ring->cleanup = render_ring_cleanup;
1392
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001393
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001394 if (!I915_NEED_GFX_HWS(dev)) {
1395 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1396 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1397 }
1398
1399 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001400}
1401
Chris Wilsone8616b62011-01-20 09:57:11 +00001402int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1403{
1404 drm_i915_private_t *dev_priv = dev->dev_private;
1405 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1406
Daniel Vetter59465b52012-04-11 22:12:48 +02001407 ring->name = "render ring";
1408 ring->id = RCS;
1409 ring->mmio_base = RENDER_RING_BASE;
1410
Chris Wilsone8616b62011-01-20 09:57:11 +00001411 if (INTEL_INFO(dev)->gen >= 6) {
Daniel Vetterb4178f82012-04-11 22:12:51 +02001412 /* non-kms not supported on gen6+ */
1413 return -ENODEV;
Chris Wilsone8616b62011-01-20 09:57:11 +00001414 }
Daniel Vetter28f0cbf2012-04-11 22:12:58 +02001415
1416 /* Note: gem is not supported on gen5/ilk without kms (the corresponding
1417 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
1418 * the special gen5 functions. */
1419 ring->add_request = i9xx_add_request;
Chris Wilson46f0f8d2012-04-18 11:12:11 +01001420 if (INTEL_INFO(dev)->gen < 4)
1421 ring->flush = gen2_render_ring_flush;
1422 else
1423 ring->flush = gen4_render_ring_flush;
Daniel Vetter28f0cbf2012-04-11 22:12:58 +02001424 ring->get_seqno = ring_get_seqno;
Chris Wilsonc2798b12012-04-22 21:13:57 +01001425 if (IS_GEN2(dev)) {
1426 ring->irq_get = i8xx_ring_get_irq;
1427 ring->irq_put = i8xx_ring_put_irq;
1428 } else {
1429 ring->irq_get = i9xx_ring_get_irq;
1430 ring->irq_put = i9xx_ring_put_irq;
1431 }
Daniel Vetter28f0cbf2012-04-11 22:12:58 +02001432 ring->irq_enable_mask = I915_USER_INTERRUPT;
Daniel Vetter59465b52012-04-11 22:12:48 +02001433 ring->write_tail = ring_write_tail;
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001434 if (INTEL_INFO(dev)->gen >= 4)
1435 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1436 else if (IS_I830(dev) || IS_845G(dev))
1437 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1438 else
1439 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
Daniel Vetter59465b52012-04-11 22:12:48 +02001440 ring->init = init_render_ring;
1441 ring->cleanup = render_ring_cleanup;
Chris Wilsone8616b62011-01-20 09:57:11 +00001442
Keith Packardf3234702011-07-22 10:44:39 -07001443 if (!I915_NEED_GFX_HWS(dev))
1444 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1445
Chris Wilsone8616b62011-01-20 09:57:11 +00001446 ring->dev = dev;
1447 INIT_LIST_HEAD(&ring->active_list);
1448 INIT_LIST_HEAD(&ring->request_list);
1449 INIT_LIST_HEAD(&ring->gpu_write_list);
1450
1451 ring->size = size;
1452 ring->effective_size = ring->size;
1453 if (IS_I830(ring->dev))
1454 ring->effective_size -= 128;
1455
Daniel Vetter4225d0f2012-04-26 23:28:16 +02001456 ring->virtual_start = ioremap_wc(start, size);
1457 if (ring->virtual_start == NULL) {
Chris Wilsone8616b62011-01-20 09:57:11 +00001458 DRM_ERROR("can not ioremap virtual address for"
1459 " ring buffer\n");
1460 return -ENOMEM;
1461 }
1462
Chris Wilsone8616b62011-01-20 09:57:11 +00001463 return 0;
1464}
1465
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001466int intel_init_bsd_ring_buffer(struct drm_device *dev)
1467{
1468 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001469 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001470
Daniel Vetter58fa3832012-04-11 22:12:49 +02001471 ring->name = "bsd ring";
1472 ring->id = VCS;
1473
Daniel Vetter0fd2c202012-04-11 22:12:55 +02001474 ring->write_tail = ring_write_tail;
Daniel Vetter58fa3832012-04-11 22:12:49 +02001475 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1476 ring->mmio_base = GEN6_BSD_RING_BASE;
Daniel Vetter0fd2c202012-04-11 22:12:55 +02001477 /* gen6 bsd needs a special wa for tail updates */
1478 if (IS_GEN6(dev))
1479 ring->write_tail = gen6_bsd_ring_write_tail;
Daniel Vetter58fa3832012-04-11 22:12:49 +02001480 ring->flush = gen6_ring_flush;
1481 ring->add_request = gen6_add_request;
1482 ring->get_seqno = gen6_ring_get_seqno;
1483 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1484 ring->irq_get = gen6_ring_get_irq;
1485 ring->irq_put = gen6_ring_put_irq;
1486 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
Daniel Vetter686cb5f2012-04-11 22:12:52 +02001487 ring->sync_to = gen6_ring_sync;
Daniel Vetter58fa3832012-04-11 22:12:49 +02001488 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
1489 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
1490 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
1491 ring->signal_mbox[0] = GEN6_RVSYNC;
1492 ring->signal_mbox[1] = GEN6_BVSYNC;
1493 } else {
1494 ring->mmio_base = BSD_RING_BASE;
Daniel Vetter58fa3832012-04-11 22:12:49 +02001495 ring->flush = bsd_ring_flush;
Daniel Vetter8620a3a2012-04-11 22:12:57 +02001496 ring->add_request = i9xx_add_request;
Daniel Vetter58fa3832012-04-11 22:12:49 +02001497 ring->get_seqno = ring_get_seqno;
Daniel Vettere48d8632012-04-11 22:12:54 +02001498 if (IS_GEN5(dev)) {
Daniel Vettere3670312012-04-11 22:12:53 +02001499 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
Daniel Vettere48d8632012-04-11 22:12:54 +02001500 ring->irq_get = gen5_ring_get_irq;
1501 ring->irq_put = gen5_ring_put_irq;
1502 } else {
Daniel Vettere3670312012-04-11 22:12:53 +02001503 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
Daniel Vettere48d8632012-04-11 22:12:54 +02001504 ring->irq_get = i9xx_ring_get_irq;
1505 ring->irq_put = i9xx_ring_put_irq;
1506 }
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001507 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
Daniel Vetter58fa3832012-04-11 22:12:49 +02001508 }
1509 ring->init = init_ring_common;
1510
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001511
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001512 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001513}
Chris Wilson549f7362010-10-19 11:19:32 +01001514
1515int intel_init_blt_ring_buffer(struct drm_device *dev)
1516{
1517 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001518 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
Chris Wilson549f7362010-10-19 11:19:32 +01001519
Daniel Vetter3535d9d2012-04-11 22:12:50 +02001520 ring->name = "blitter ring";
1521 ring->id = BCS;
1522
1523 ring->mmio_base = BLT_RING_BASE;
1524 ring->write_tail = ring_write_tail;
1525 ring->flush = blt_ring_flush;
1526 ring->add_request = gen6_add_request;
1527 ring->get_seqno = gen6_ring_get_seqno;
1528 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1529 ring->irq_get = gen6_ring_get_irq;
1530 ring->irq_put = gen6_ring_put_irq;
1531 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
Daniel Vetter686cb5f2012-04-11 22:12:52 +02001532 ring->sync_to = gen6_ring_sync;
Daniel Vetter3535d9d2012-04-11 22:12:50 +02001533 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
1534 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
1535 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
1536 ring->signal_mbox[0] = GEN6_RBSYNC;
1537 ring->signal_mbox[1] = GEN6_VBSYNC;
1538 ring->init = init_ring_common;
Chris Wilson549f7362010-10-19 11:19:32 +01001539
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001540 return intel_init_ring_buffer(dev, ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001541}