blob: 77e729d4e4f02476b289aed344d344411c0eb2c1 [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Jesse Barnes8d315282011-10-16 10:23:31 +020037/*
38 * 965+ support PIPE_CONTROL commands, which provide finer grained control
39 * over cache flushing.
40 */
41struct pipe_control {
42 struct drm_i915_gem_object *obj;
43 volatile u32 *cpu_page;
44 u32 gtt_offset;
45};
46
Chris Wilsonc7dca472011-01-20 17:00:10 +000047static inline int ring_space(struct intel_ring_buffer *ring)
48{
49 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
50 if (space < 0)
51 space += ring->size;
52 return space;
53}
54
Chris Wilson6f392d5482010-08-07 11:01:22 +010055static u32 i915_gem_get_seqno(struct drm_device *dev)
56{
57 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 seqno;
59
60 seqno = dev_priv->next_seqno;
61
62 /* reserve 0 for non-seqno */
63 if (++dev_priv->next_seqno == 0)
64 dev_priv->next_seqno = 1;
65
66 return seqno;
67}
68
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000069static int
Chris Wilson78501ea2010-10-27 12:18:21 +010070render_ring_flush(struct intel_ring_buffer *ring,
Chris Wilsonab6f8e32010-09-19 17:53:44 +010071 u32 invalidate_domains,
72 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070073{
Chris Wilson78501ea2010-10-27 12:18:21 +010074 struct drm_device *dev = ring->dev;
Chris Wilson6f392d5482010-08-07 11:01:22 +010075 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000076 int ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +010077
Chris Wilson36d527d2011-03-19 22:26:49 +000078 /*
79 * read/write caches:
80 *
81 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
82 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
83 * also flushed at 2d versus 3d pipeline switches.
84 *
85 * read-only caches:
86 *
87 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
88 * MI_READ_FLUSH is set, and is always flushed on 965.
89 *
90 * I915_GEM_DOMAIN_COMMAND may not exist?
91 *
92 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
93 * invalidated when MI_EXE_FLUSH is set.
94 *
95 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
96 * invalidated with every MI_FLUSH.
97 *
98 * TLBs:
99 *
100 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
101 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
102 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
103 * are flushed at any MI_FLUSH.
104 */
105
106 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
107 if ((invalidate_domains|flush_domains) &
108 I915_GEM_DOMAIN_RENDER)
109 cmd &= ~MI_NO_WRITE_FLUSH;
110 if (INTEL_INFO(dev)->gen < 4) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700111 /*
Chris Wilson36d527d2011-03-19 22:26:49 +0000112 * On the 965, the sampler cache always gets flushed
113 * and this bit is reserved.
Eric Anholt62fdfea2010-05-21 13:26:39 -0700114 */
Chris Wilson36d527d2011-03-19 22:26:49 +0000115 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
116 cmd |= MI_READ_FLUSH;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800117 }
Chris Wilson36d527d2011-03-19 22:26:49 +0000118 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
119 cmd |= MI_EXE_FLUSH;
120
121 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
122 (IS_G4X(dev) || IS_GEN5(dev)))
123 cmd |= MI_INVALIDATE_ISP;
124
125 ret = intel_ring_begin(ring, 2);
126 if (ret)
127 return ret;
128
129 intel_ring_emit(ring, cmd);
130 intel_ring_emit(ring, MI_NOOP);
131 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000132
133 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800134}
135
Jesse Barnes8d315282011-10-16 10:23:31 +0200136/**
137 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
138 * implementing two workarounds on gen6. From section 1.4.7.1
139 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
140 *
141 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
142 * produced by non-pipelined state commands), software needs to first
143 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
144 * 0.
145 *
146 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
147 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
148 *
149 * And the workaround for these two requires this workaround first:
150 *
151 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
152 * BEFORE the pipe-control with a post-sync op and no write-cache
153 * flushes.
154 *
155 * And this last workaround is tricky because of the requirements on
156 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
157 * volume 2 part 1:
158 *
159 * "1 of the following must also be set:
160 * - Render Target Cache Flush Enable ([12] of DW1)
161 * - Depth Cache Flush Enable ([0] of DW1)
162 * - Stall at Pixel Scoreboard ([1] of DW1)
163 * - Depth Stall ([13] of DW1)
164 * - Post-Sync Operation ([13] of DW1)
165 * - Notify Enable ([8] of DW1)"
166 *
167 * The cache flushes require the workaround flush that triggered this
168 * one, so we can't use it. Depth stall would trigger the same.
169 * Post-sync nonzero is what triggered this second workaround, so we
170 * can't use that one either. Notify enable is IRQs, which aren't
171 * really our business. That leaves only stall at scoreboard.
172 */
173static int
174intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
175{
176 struct pipe_control *pc = ring->private;
177 u32 scratch_addr = pc->gtt_offset + 128;
178 int ret;
179
180
181 ret = intel_ring_begin(ring, 6);
182 if (ret)
183 return ret;
184
185 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
186 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
187 PIPE_CONTROL_STALL_AT_SCOREBOARD);
188 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
189 intel_ring_emit(ring, 0); /* low dword */
190 intel_ring_emit(ring, 0); /* high dword */
191 intel_ring_emit(ring, MI_NOOP);
192 intel_ring_advance(ring);
193
194 ret = intel_ring_begin(ring, 6);
195 if (ret)
196 return ret;
197
198 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
199 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
200 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
201 intel_ring_emit(ring, 0);
202 intel_ring_emit(ring, 0);
203 intel_ring_emit(ring, MI_NOOP);
204 intel_ring_advance(ring);
205
206 return 0;
207}
208
209static int
210gen6_render_ring_flush(struct intel_ring_buffer *ring,
211 u32 invalidate_domains, u32 flush_domains)
212{
213 u32 flags = 0;
214 struct pipe_control *pc = ring->private;
215 u32 scratch_addr = pc->gtt_offset + 128;
216 int ret;
217
218 /* Force SNB workarounds for PIPE_CONTROL flushes */
219 intel_emit_post_sync_nonzero_flush(ring);
220
221 /* Just flush everything. Experiments have shown that reducing the
222 * number of bits based on the write domains has little performance
223 * impact.
224 */
225 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
226 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
227 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
228 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
229 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
230 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
231 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
232
233 ret = intel_ring_begin(ring, 6);
234 if (ret)
235 return ret;
236
237 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
238 intel_ring_emit(ring, flags);
239 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
240 intel_ring_emit(ring, 0); /* lower dword */
241 intel_ring_emit(ring, 0); /* uppwer dword */
242 intel_ring_emit(ring, MI_NOOP);
243 intel_ring_advance(ring);
244
245 return 0;
246}
247
Chris Wilson78501ea2010-10-27 12:18:21 +0100248static void ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100249 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800250{
Chris Wilson78501ea2010-10-27 12:18:21 +0100251 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson297b0c52010-10-22 17:02:41 +0100252 I915_WRITE_TAIL(ring, value);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800253}
254
Chris Wilson78501ea2010-10-27 12:18:21 +0100255u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800256{
Chris Wilson78501ea2010-10-27 12:18:21 +0100257 drm_i915_private_t *dev_priv = ring->dev->dev_private;
258 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
Daniel Vetter3d281d82010-09-24 21:14:22 +0200259 RING_ACTHD(ring->mmio_base) : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800260
261 return I915_READ(acthd_reg);
262}
263
Chris Wilson78501ea2010-10-27 12:18:21 +0100264static int init_ring_common(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800265{
Chris Wilson78501ea2010-10-27 12:18:21 +0100266 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000267 struct drm_i915_gem_object *obj = ring->obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800268 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800269
270 /* Stop the ring if it's running. */
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200271 I915_WRITE_CTL(ring, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200272 I915_WRITE_HEAD(ring, 0);
Chris Wilson78501ea2010-10-27 12:18:21 +0100273 ring->write_tail(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800274
275 /* Initialize the ring. */
Chris Wilson05394f32010-11-08 19:18:58 +0000276 I915_WRITE_START(ring, obj->gtt_offset);
Daniel Vetter570ef602010-08-02 17:06:23 +0200277 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800278
279 /* G45 ring initialization fails to reset head to zero */
280 if (head != 0) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000281 DRM_DEBUG_KMS("%s head not reset to zero "
282 "ctl %08x head %08x tail %08x start %08x\n",
283 ring->name,
284 I915_READ_CTL(ring),
285 I915_READ_HEAD(ring),
286 I915_READ_TAIL(ring),
287 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800288
Daniel Vetter570ef602010-08-02 17:06:23 +0200289 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800290
Chris Wilson6fd0d562010-12-05 20:42:33 +0000291 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
292 DRM_ERROR("failed to set %s head to zero "
293 "ctl %08x head %08x tail %08x start %08x\n",
294 ring->name,
295 I915_READ_CTL(ring),
296 I915_READ_HEAD(ring),
297 I915_READ_TAIL(ring),
298 I915_READ_START(ring));
299 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700300 }
301
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200302 I915_WRITE_CTL(ring,
Chris Wilsonae69b422010-11-07 11:45:52 +0000303 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson6aa56062010-10-29 21:44:37 +0100304 | RING_REPORT_64K | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800305
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800306 /* If the head is still not zero, the ring is dead */
Chris Wilson176f28e2010-10-28 11:18:07 +0100307 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
Chris Wilson05394f32010-11-08 19:18:58 +0000308 I915_READ_START(ring) != obj->gtt_offset ||
Chris Wilson176f28e2010-10-28 11:18:07 +0100309 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000310 DRM_ERROR("%s initialization failed "
311 "ctl %08x head %08x tail %08x start %08x\n",
312 ring->name,
313 I915_READ_CTL(ring),
314 I915_READ_HEAD(ring),
315 I915_READ_TAIL(ring),
316 I915_READ_START(ring));
317 return -EIO;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800318 }
319
Chris Wilson78501ea2010-10-27 12:18:21 +0100320 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
321 i915_kernel_lost_context(ring->dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800322 else {
Chris Wilsonc7dca472011-01-20 17:00:10 +0000323 ring->head = I915_READ_HEAD(ring);
Daniel Vetter870e86d2010-08-02 16:29:44 +0200324 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000325 ring->space = ring_space(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800326 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000327
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800328 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700329}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800330
Chris Wilsonc6df5412010-12-15 09:56:50 +0000331static int
332init_pipe_control(struct intel_ring_buffer *ring)
333{
334 struct pipe_control *pc;
335 struct drm_i915_gem_object *obj;
336 int ret;
337
338 if (ring->private)
339 return 0;
340
341 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
342 if (!pc)
343 return -ENOMEM;
344
345 obj = i915_gem_alloc_object(ring->dev, 4096);
346 if (obj == NULL) {
347 DRM_ERROR("Failed to allocate seqno page\n");
348 ret = -ENOMEM;
349 goto err;
350 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100351
352 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000353
354 ret = i915_gem_object_pin(obj, 4096, true);
355 if (ret)
356 goto err_unref;
357
358 pc->gtt_offset = obj->gtt_offset;
359 pc->cpu_page = kmap(obj->pages[0]);
360 if (pc->cpu_page == NULL)
361 goto err_unpin;
362
363 pc->obj = obj;
364 ring->private = pc;
365 return 0;
366
367err_unpin:
368 i915_gem_object_unpin(obj);
369err_unref:
370 drm_gem_object_unreference(&obj->base);
371err:
372 kfree(pc);
373 return ret;
374}
375
376static void
377cleanup_pipe_control(struct intel_ring_buffer *ring)
378{
379 struct pipe_control *pc = ring->private;
380 struct drm_i915_gem_object *obj;
381
382 if (!ring->private)
383 return;
384
385 obj = pc->obj;
386 kunmap(obj->pages[0]);
387 i915_gem_object_unpin(obj);
388 drm_gem_object_unreference(&obj->base);
389
390 kfree(pc);
391 ring->private = NULL;
392}
393
Chris Wilson78501ea2010-10-27 12:18:21 +0100394static int init_render_ring(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800395{
Chris Wilson78501ea2010-10-27 12:18:21 +0100396 struct drm_device *dev = ring->dev;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000397 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100398 int ret = init_ring_common(ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800399
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100400 if (INTEL_INFO(dev)->gen > 3) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100401 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
Jesse Barnes65d3eb12011-04-06 14:54:44 -0700402 if (IS_GEN6(dev) || IS_GEN7(dev))
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800403 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
404 I915_WRITE(MI_MODE, mode);
Jesse Barnesb095cd02011-08-12 15:28:32 -0700405 if (IS_GEN7(dev))
406 I915_WRITE(GFX_MODE_GEN7,
407 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
408 GFX_MODE_ENABLE(GFX_REPLAY_MODE));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800409 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100410
Jesse Barnes8d315282011-10-16 10:23:31 +0200411 if (INTEL_INFO(dev)->gen >= 5) {
Chris Wilsonc6df5412010-12-15 09:56:50 +0000412 ret = init_pipe_control(ring);
413 if (ret)
414 return ret;
415 }
416
Ben Widawsky84f9f932011-12-12 19:21:58 -0800417 if (INTEL_INFO(dev)->gen >= 6) {
418 I915_WRITE(INSTPM,
419 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
420 }
421
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800422 return ret;
423}
424
Chris Wilsonc6df5412010-12-15 09:56:50 +0000425static void render_ring_cleanup(struct intel_ring_buffer *ring)
426{
427 if (!ring->private)
428 return;
429
430 cleanup_pipe_control(ring);
431}
432
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000433static void
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700434update_mboxes(struct intel_ring_buffer *ring,
435 u32 seqno,
436 u32 mmio_offset)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000437{
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700438 intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
439 MI_SEMAPHORE_GLOBAL_GTT |
440 MI_SEMAPHORE_REGISTER |
441 MI_SEMAPHORE_UPDATE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000442 intel_ring_emit(ring, seqno);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700443 intel_ring_emit(ring, mmio_offset);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000444}
445
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700446/**
447 * gen6_add_request - Update the semaphore mailbox registers
448 *
449 * @ring - ring that is adding a request
450 * @seqno - return seqno stuck into the ring
451 *
452 * Update the mailbox registers in the *other* rings with the current seqno.
453 * This acts like a signal in the canonical semaphore.
454 */
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000455static int
456gen6_add_request(struct intel_ring_buffer *ring,
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700457 u32 *seqno)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000458{
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700459 u32 mbox1_reg;
460 u32 mbox2_reg;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000461 int ret;
462
463 ret = intel_ring_begin(ring, 10);
464 if (ret)
465 return ret;
466
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700467 mbox1_reg = ring->signal_mbox[0];
468 mbox2_reg = ring->signal_mbox[1];
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000469
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700470 *seqno = i915_gem_get_seqno(ring->dev);
471
472 update_mboxes(ring, *seqno, mbox1_reg);
473 update_mboxes(ring, *seqno, mbox2_reg);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000474 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
475 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700476 intel_ring_emit(ring, *seqno);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000477 intel_ring_emit(ring, MI_USER_INTERRUPT);
478 intel_ring_advance(ring);
479
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000480 return 0;
481}
482
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700483/**
484 * intel_ring_sync - sync the waiter to the signaller on seqno
485 *
486 * @waiter - ring that is waiting
487 * @signaller - ring which has, or will signal
488 * @seqno - seqno which the waiter will block on
489 */
490static int
491intel_ring_sync(struct intel_ring_buffer *waiter,
492 struct intel_ring_buffer *signaller,
493 int ring,
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000494 u32 seqno)
495{
496 int ret;
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700497 u32 dw1 = MI_SEMAPHORE_MBOX |
498 MI_SEMAPHORE_COMPARE |
499 MI_SEMAPHORE_REGISTER;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000500
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700501 ret = intel_ring_begin(waiter, 4);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000502 if (ret)
503 return ret;
504
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700505 intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
506 intel_ring_emit(waiter, seqno);
507 intel_ring_emit(waiter, 0);
508 intel_ring_emit(waiter, MI_NOOP);
509 intel_ring_advance(waiter);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000510
511 return 0;
512}
513
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700514/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
515int
516render_ring_sync_to(struct intel_ring_buffer *waiter,
517 struct intel_ring_buffer *signaller,
518 u32 seqno)
519{
520 WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
521 return intel_ring_sync(waiter,
522 signaller,
523 RCS,
524 seqno);
525}
526
527/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
528int
529gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
530 struct intel_ring_buffer *signaller,
531 u32 seqno)
532{
533 WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
534 return intel_ring_sync(waiter,
535 signaller,
536 VCS,
537 seqno);
538}
539
540/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
541int
542gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
543 struct intel_ring_buffer *signaller,
544 u32 seqno)
545{
546 WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
547 return intel_ring_sync(waiter,
548 signaller,
549 BCS,
550 seqno);
551}
552
553
554
Chris Wilsonc6df5412010-12-15 09:56:50 +0000555#define PIPE_CONTROL_FLUSH(ring__, addr__) \
556do { \
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200557 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
558 PIPE_CONTROL_DEPTH_STALL); \
Chris Wilsonc6df5412010-12-15 09:56:50 +0000559 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
560 intel_ring_emit(ring__, 0); \
561 intel_ring_emit(ring__, 0); \
562} while (0)
563
564static int
565pc_render_add_request(struct intel_ring_buffer *ring,
566 u32 *result)
567{
568 struct drm_device *dev = ring->dev;
569 u32 seqno = i915_gem_get_seqno(dev);
570 struct pipe_control *pc = ring->private;
571 u32 scratch_addr = pc->gtt_offset + 128;
572 int ret;
573
574 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
575 * incoherent with writes to memory, i.e. completely fubar,
576 * so we need to use PIPE_NOTIFY instead.
577 *
578 * However, we also need to workaround the qword write
579 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
580 * memory before requesting an interrupt.
581 */
582 ret = intel_ring_begin(ring, 32);
583 if (ret)
584 return ret;
585
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200586 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
Kenneth Graunke9d971b32011-10-11 23:41:09 +0200587 PIPE_CONTROL_WRITE_FLUSH |
588 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000589 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
590 intel_ring_emit(ring, seqno);
591 intel_ring_emit(ring, 0);
592 PIPE_CONTROL_FLUSH(ring, scratch_addr);
593 scratch_addr += 128; /* write to separate cachelines */
594 PIPE_CONTROL_FLUSH(ring, scratch_addr);
595 scratch_addr += 128;
596 PIPE_CONTROL_FLUSH(ring, scratch_addr);
597 scratch_addr += 128;
598 PIPE_CONTROL_FLUSH(ring, scratch_addr);
599 scratch_addr += 128;
600 PIPE_CONTROL_FLUSH(ring, scratch_addr);
601 scratch_addr += 128;
602 PIPE_CONTROL_FLUSH(ring, scratch_addr);
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200603 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
Kenneth Graunke9d971b32011-10-11 23:41:09 +0200604 PIPE_CONTROL_WRITE_FLUSH |
605 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
Chris Wilsonc6df5412010-12-15 09:56:50 +0000606 PIPE_CONTROL_NOTIFY);
607 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
608 intel_ring_emit(ring, seqno);
609 intel_ring_emit(ring, 0);
610 intel_ring_advance(ring);
611
612 *result = seqno;
613 return 0;
614}
615
Chris Wilson3cce4692010-10-27 16:11:02 +0100616static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100617render_ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100618 u32 *result)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700619{
Chris Wilson78501ea2010-10-27 12:18:21 +0100620 struct drm_device *dev = ring->dev;
Chris Wilson3cce4692010-10-27 16:11:02 +0100621 u32 seqno = i915_gem_get_seqno(dev);
622 int ret;
Zhenyu Wangca764822010-05-27 10:26:42 +0800623
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000624 ret = intel_ring_begin(ring, 4);
625 if (ret)
626 return ret;
Chris Wilson3cce4692010-10-27 16:11:02 +0100627
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000628 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
629 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
630 intel_ring_emit(ring, seqno);
631 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilson3cce4692010-10-27 16:11:02 +0100632 intel_ring_advance(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000633
Chris Wilson3cce4692010-10-27 16:11:02 +0100634 *result = seqno;
635 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700636}
637
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800638static u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000639ring_get_seqno(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800640{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000641 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
642}
643
Chris Wilsonc6df5412010-12-15 09:56:50 +0000644static u32
645pc_render_get_seqno(struct intel_ring_buffer *ring)
646{
647 struct pipe_control *pc = ring->private;
648 return pc->cpu_page[0];
649}
650
Chris Wilson0f468322011-01-04 17:35:21 +0000651static void
652ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
653{
654 dev_priv->gt_irq_mask &= ~mask;
655 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
656 POSTING_READ(GTIMR);
657}
658
659static void
660ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
661{
662 dev_priv->gt_irq_mask |= mask;
663 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
664 POSTING_READ(GTIMR);
665}
666
667static void
668i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
669{
670 dev_priv->irq_mask &= ~mask;
671 I915_WRITE(IMR, dev_priv->irq_mask);
672 POSTING_READ(IMR);
673}
674
675static void
676i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
677{
678 dev_priv->irq_mask |= mask;
679 I915_WRITE(IMR, dev_priv->irq_mask);
680 POSTING_READ(IMR);
681}
682
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000683static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000684render_ring_get_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700685{
Chris Wilson78501ea2010-10-27 12:18:21 +0100686 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000687 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700688
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000689 if (!dev->irq_enabled)
690 return false;
691
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000692 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000693 if (ring->irq_refcount++ == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700694 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000695 ironlake_enable_irq(dev_priv,
696 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700697 else
698 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
699 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000700 spin_unlock(&ring->irq_lock);
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000701
702 return true;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700703}
704
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800705static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000706render_ring_put_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700707{
Chris Wilson78501ea2010-10-27 12:18:21 +0100708 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000709 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700710
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000711 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000712 if (--ring->irq_refcount == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700713 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000714 ironlake_disable_irq(dev_priv,
715 GT_USER_INTERRUPT |
716 GT_PIPE_NOTIFY);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700717 else
718 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
719 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000720 spin_unlock(&ring->irq_lock);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700721}
722
Chris Wilson78501ea2010-10-27 12:18:21 +0100723void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800724{
Eric Anholt45930102011-05-06 17:12:35 -0700725 struct drm_device *dev = ring->dev;
Chris Wilson78501ea2010-10-27 12:18:21 +0100726 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt45930102011-05-06 17:12:35 -0700727 u32 mmio = 0;
728
729 /* The ring status page addresses are no longer next to the rest of
730 * the ring registers as of gen7.
731 */
732 if (IS_GEN7(dev)) {
733 switch (ring->id) {
734 case RING_RENDER:
735 mmio = RENDER_HWS_PGA_GEN7;
736 break;
737 case RING_BLT:
738 mmio = BLT_HWS_PGA_GEN7;
739 break;
740 case RING_BSD:
741 mmio = BSD_HWS_PGA_GEN7;
742 break;
743 }
744 } else if (IS_GEN6(ring->dev)) {
745 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
746 } else {
747 mmio = RING_HWS_PGA(ring->mmio_base);
748 }
749
Chris Wilson78501ea2010-10-27 12:18:21 +0100750 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
751 POSTING_READ(mmio);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800752}
753
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000754static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100755bsd_ring_flush(struct intel_ring_buffer *ring,
756 u32 invalidate_domains,
757 u32 flush_domains)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800758{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000759 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000760
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000761 ret = intel_ring_begin(ring, 2);
762 if (ret)
763 return ret;
764
765 intel_ring_emit(ring, MI_FLUSH);
766 intel_ring_emit(ring, MI_NOOP);
767 intel_ring_advance(ring);
768 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800769}
770
Chris Wilson3cce4692010-10-27 16:11:02 +0100771static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100772ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100773 u32 *result)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800774{
775 u32 seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +0100776 int ret;
777
778 ret = intel_ring_begin(ring, 4);
779 if (ret)
780 return ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +0100781
Chris Wilson78501ea2010-10-27 12:18:21 +0100782 seqno = i915_gem_get_seqno(ring->dev);
Chris Wilson6f392d5482010-08-07 11:01:22 +0100783
Chris Wilson3cce4692010-10-27 16:11:02 +0100784 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
785 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
786 intel_ring_emit(ring, seqno);
787 intel_ring_emit(ring, MI_USER_INTERRUPT);
788 intel_ring_advance(ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800789
Chris Wilson3cce4692010-10-27 16:11:02 +0100790 *result = seqno;
791 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800792}
793
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000794static bool
Eric Anholt42ff6572011-12-22 14:55:00 -0800795gen7_blt_ring_get_irq(struct intel_ring_buffer *ring)
796{
797 /* The BLT ring on IVB appears to have broken synchronization
798 * between the seqno write and the interrupt, so that the
799 * interrupt appears first. Returning false here makes
800 * i915_wait_request() do a polling loop, instead.
801 */
802 return false;
803}
804
805static bool
Chris Wilson0f468322011-01-04 17:35:21 +0000806gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
807{
808 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000809 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000810
811 if (!dev->irq_enabled)
812 return false;
813
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000814 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000815 if (ring->irq_refcount++ == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000816 ring->irq_mask &= ~rflag;
817 I915_WRITE_IMR(ring, ring->irq_mask);
818 ironlake_enable_irq(dev_priv, gflag);
Chris Wilson0f468322011-01-04 17:35:21 +0000819 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000820 spin_unlock(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000821
822 return true;
823}
824
825static void
826gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
827{
828 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000829 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000830
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000831 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000832 if (--ring->irq_refcount == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000833 ring->irq_mask |= rflag;
834 I915_WRITE_IMR(ring, ring->irq_mask);
835 ironlake_disable_irq(dev_priv, gflag);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000836 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000837 spin_unlock(&ring->irq_lock);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000838}
839
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000840static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000841bsd_ring_get_irq(struct intel_ring_buffer *ring)
842{
Feng, Boqun5bfa1062011-05-16 16:02:39 +0800843 struct drm_device *dev = ring->dev;
844 drm_i915_private_t *dev_priv = dev->dev_private;
845
846 if (!dev->irq_enabled)
847 return false;
848
849 spin_lock(&ring->irq_lock);
850 if (ring->irq_refcount++ == 0) {
851 if (IS_G4X(dev))
852 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
853 else
854 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
855 }
856 spin_unlock(&ring->irq_lock);
857
858 return true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000859}
860static void
861bsd_ring_put_irq(struct intel_ring_buffer *ring)
862{
Feng, Boqun5bfa1062011-05-16 16:02:39 +0800863 struct drm_device *dev = ring->dev;
864 drm_i915_private_t *dev_priv = dev->dev_private;
865
866 spin_lock(&ring->irq_lock);
867 if (--ring->irq_refcount == 0) {
868 if (IS_G4X(dev))
869 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
870 else
871 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
872 }
873 spin_unlock(&ring->irq_lock);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800874}
875
876static int
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000877ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800878{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100879 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100880
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100881 ret = intel_ring_begin(ring, 2);
882 if (ret)
883 return ret;
884
Chris Wilson78501ea2010-10-27 12:18:21 +0100885 intel_ring_emit(ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000886 MI_BATCH_BUFFER_START | (2 << 6) |
Chris Wilson78501ea2010-10-27 12:18:21 +0100887 MI_BATCH_NON_SECURE_I965);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000888 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100889 intel_ring_advance(ring);
890
Zou Nan haid1b851f2010-05-21 09:08:57 +0800891 return 0;
892}
893
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800894static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100895render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000896 u32 offset, u32 len)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700897{
Chris Wilson78501ea2010-10-27 12:18:21 +0100898 struct drm_device *dev = ring->dev;
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000899 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700900
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000901 if (IS_I830(dev) || IS_845G(dev)) {
902 ret = intel_ring_begin(ring, 4);
903 if (ret)
904 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700905
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000906 intel_ring_emit(ring, MI_BATCH_BUFFER);
907 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
908 intel_ring_emit(ring, offset + len - 8);
909 intel_ring_emit(ring, 0);
910 } else {
911 ret = intel_ring_begin(ring, 2);
912 if (ret)
913 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100914
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000915 if (INTEL_INFO(dev)->gen >= 4) {
916 intel_ring_emit(ring,
917 MI_BATCH_BUFFER_START | (2 << 6) |
918 MI_BATCH_NON_SECURE_I965);
919 intel_ring_emit(ring, offset);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700920 } else {
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000921 intel_ring_emit(ring,
922 MI_BATCH_BUFFER_START | (2 << 6));
923 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700924 }
925 }
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000926 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700927
Eric Anholt62fdfea2010-05-21 13:26:39 -0700928 return 0;
929}
930
Chris Wilson78501ea2010-10-27 12:18:21 +0100931static void cleanup_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700932{
Chris Wilson78501ea2010-10-27 12:18:21 +0100933 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000934 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700935
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800936 obj = ring->status_page.obj;
937 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700938 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700939
Chris Wilson05394f32010-11-08 19:18:58 +0000940 kunmap(obj->pages[0]);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700941 i915_gem_object_unpin(obj);
Chris Wilson05394f32010-11-08 19:18:58 +0000942 drm_gem_object_unreference(&obj->base);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800943 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700944
945 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700946}
947
Chris Wilson78501ea2010-10-27 12:18:21 +0100948static int init_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700949{
Chris Wilson78501ea2010-10-27 12:18:21 +0100950 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700951 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000952 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700953 int ret;
954
Eric Anholt62fdfea2010-05-21 13:26:39 -0700955 obj = i915_gem_alloc_object(dev, 4096);
956 if (obj == NULL) {
957 DRM_ERROR("Failed to allocate status page\n");
958 ret = -ENOMEM;
959 goto err;
960 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100961
962 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700963
Daniel Vetter75e9e912010-11-04 17:11:09 +0100964 ret = i915_gem_object_pin(obj, 4096, true);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700965 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700966 goto err_unref;
967 }
968
Chris Wilson05394f32010-11-08 19:18:58 +0000969 ring->status_page.gfx_addr = obj->gtt_offset;
970 ring->status_page.page_addr = kmap(obj->pages[0]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800971 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700972 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700973 goto err_unpin;
974 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800975 ring->status_page.obj = obj;
976 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700977
Chris Wilson78501ea2010-10-27 12:18:21 +0100978 intel_ring_setup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800979 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
980 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700981
982 return 0;
983
984err_unpin:
985 i915_gem_object_unpin(obj);
986err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000987 drm_gem_object_unreference(&obj->base);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700988err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800989 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700990}
991
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800992int intel_init_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100993 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700994{
Chris Wilson05394f32010-11-08 19:18:58 +0000995 struct drm_i915_gem_object *obj;
Chris Wilsondd785e32010-08-07 11:01:34 +0100996 int ret;
997
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800998 ring->dev = dev;
Chris Wilson23bc5982010-09-29 16:10:57 +0100999 INIT_LIST_HEAD(&ring->active_list);
1000 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01001001 INIT_LIST_HEAD(&ring->gpu_write_list);
Chris Wilson0dc79fb2011-01-05 10:32:24 +00001002
Chris Wilsonb259f672011-03-29 13:19:09 +01001003 init_waitqueue_head(&ring->irq_queue);
Chris Wilson0dc79fb2011-01-05 10:32:24 +00001004 spin_lock_init(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +00001005 ring->irq_mask = ~0;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001006
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001007 if (I915_NEED_GFX_HWS(dev)) {
Chris Wilson78501ea2010-10-27 12:18:21 +01001008 ret = init_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001009 if (ret)
1010 return ret;
1011 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001012
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001013 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001014 if (obj == NULL) {
1015 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001016 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +01001017 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001018 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001019
Chris Wilson05394f32010-11-08 19:18:58 +00001020 ring->obj = obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001021
Daniel Vetter75e9e912010-11-04 17:11:09 +01001022 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
Chris Wilsondd785e32010-08-07 11:01:34 +01001023 if (ret)
1024 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001025
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001026 ring->map.size = ring->size;
Chris Wilson05394f32010-11-08 19:18:58 +00001027 ring->map.offset = dev->agp->base + obj->gtt_offset;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001028 ring->map.type = 0;
1029 ring->map.flags = 0;
1030 ring->map.mtrr = 0;
1031
1032 drm_core_ioremap_wc(&ring->map, dev);
1033 if (ring->map.handle == NULL) {
1034 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001035 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +01001036 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001037 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001038
Eric Anholt62fdfea2010-05-21 13:26:39 -07001039 ring->virtual_start = ring->map.handle;
Chris Wilson78501ea2010-10-27 12:18:21 +01001040 ret = ring->init(ring);
Chris Wilsondd785e32010-08-07 11:01:34 +01001041 if (ret)
1042 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001043
Chris Wilson55249ba2010-12-22 14:04:47 +00001044 /* Workaround an erratum on the i830 which causes a hang if
1045 * the TAIL pointer points to within the last 2 cachelines
1046 * of the buffer.
1047 */
1048 ring->effective_size = ring->size;
1049 if (IS_I830(ring->dev))
1050 ring->effective_size -= 128;
1051
Chris Wilsonc584fe42010-10-29 18:15:52 +01001052 return 0;
Chris Wilsondd785e32010-08-07 11:01:34 +01001053
1054err_unmap:
1055 drm_core_ioremapfree(&ring->map, dev);
1056err_unpin:
1057 i915_gem_object_unpin(obj);
1058err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001059 drm_gem_object_unreference(&obj->base);
1060 ring->obj = NULL;
Chris Wilsondd785e32010-08-07 11:01:34 +01001061err_hws:
Chris Wilson78501ea2010-10-27 12:18:21 +01001062 cleanup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001063 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001064}
1065
Chris Wilson78501ea2010-10-27 12:18:21 +01001066void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001067{
Chris Wilson33626e62010-10-29 16:18:36 +01001068 struct drm_i915_private *dev_priv;
1069 int ret;
1070
Chris Wilson05394f32010-11-08 19:18:58 +00001071 if (ring->obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001072 return;
1073
Chris Wilson33626e62010-10-29 16:18:36 +01001074 /* Disable the ring buffer. The ring must be idle at this point */
1075 dev_priv = ring->dev->dev_private;
Ben Widawsky96f298a2011-03-19 18:14:27 -07001076 ret = intel_wait_ring_idle(ring);
Chris Wilson29ee3992011-01-24 16:35:42 +00001077 if (ret)
1078 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1079 ring->name, ret);
1080
Chris Wilson33626e62010-10-29 16:18:36 +01001081 I915_WRITE_CTL(ring, 0);
1082
Chris Wilson78501ea2010-10-27 12:18:21 +01001083 drm_core_ioremapfree(&ring->map, ring->dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001084
Chris Wilson05394f32010-11-08 19:18:58 +00001085 i915_gem_object_unpin(ring->obj);
1086 drm_gem_object_unreference(&ring->obj->base);
1087 ring->obj = NULL;
Chris Wilson78501ea2010-10-27 12:18:21 +01001088
Zou Nan hai8d192152010-11-02 16:31:01 +08001089 if (ring->cleanup)
1090 ring->cleanup(ring);
1091
Chris Wilson78501ea2010-10-27 12:18:21 +01001092 cleanup_status_page(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001093}
1094
Chris Wilson78501ea2010-10-27 12:18:21 +01001095static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001096{
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001097 unsigned int *virt;
Chris Wilson55249ba2010-12-22 14:04:47 +00001098 int rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001099
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001100 if (ring->space < rem) {
Chris Wilson78501ea2010-10-27 12:18:21 +01001101 int ret = intel_wait_ring_buffer(ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001102 if (ret)
1103 return ret;
1104 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001105
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001106 virt = (unsigned int *)(ring->virtual_start + ring->tail);
Chris Wilson1741dd42010-08-04 15:18:12 +01001107 rem /= 8;
1108 while (rem--) {
Eric Anholt62fdfea2010-05-21 13:26:39 -07001109 *virt++ = MI_NOOP;
Chris Wilson1741dd42010-08-04 15:18:12 +01001110 *virt++ = MI_NOOP;
1111 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001112
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001113 ring->tail = 0;
Chris Wilsonc7dca472011-01-20 17:00:10 +00001114 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001115
1116 return 0;
1117}
1118
Chris Wilson78501ea2010-10-27 12:18:21 +01001119int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001120{
Chris Wilson78501ea2010-10-27 12:18:21 +01001121 struct drm_device *dev = ring->dev;
Zou Nan haicae58522010-11-09 17:17:32 +08001122 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +01001123 unsigned long end;
Chris Wilson6aa56062010-10-29 21:44:37 +01001124 u32 head;
1125
Chris Wilsonc7dca472011-01-20 17:00:10 +00001126 /* If the reported head position has wrapped or hasn't advanced,
1127 * fallback to the slow and accurate path.
1128 */
1129 head = intel_read_status_page(ring, 4);
1130 if (head > ring->head) {
1131 ring->head = head;
1132 ring->space = ring_space(ring);
1133 if (ring->space >= n)
1134 return 0;
1135 }
1136
Chris Wilsondb53a302011-02-03 11:57:46 +00001137 trace_i915_ring_wait_begin(ring);
Daniel Vettere6bfaf82011-12-14 13:56:59 +01001138 if (drm_core_check_feature(dev, DRIVER_GEM))
1139 /* With GEM the hangcheck timer should kick us out of the loop,
1140 * leaving it early runs the risk of corrupting GEM state (due
1141 * to running on almost untested codepaths). But on resume
1142 * timers don't work yet, so prevent a complete hang in that
1143 * case by choosing an insanely large timeout. */
1144 end = jiffies + 60 * HZ;
1145 else
1146 end = jiffies + 3 * HZ;
1147
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001148 do {
Chris Wilsonc7dca472011-01-20 17:00:10 +00001149 ring->head = I915_READ_HEAD(ring);
1150 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001151 if (ring->space >= n) {
Chris Wilsondb53a302011-02-03 11:57:46 +00001152 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001153 return 0;
1154 }
1155
1156 if (dev->primary->master) {
1157 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1158 if (master_priv->sarea_priv)
1159 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1160 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08001161
Chris Wilsone60a0b12010-10-13 10:09:14 +01001162 msleep(1);
Chris Wilsonf4e0b292010-10-29 21:06:16 +01001163 if (atomic_read(&dev_priv->mm.wedged))
1164 return -EAGAIN;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001165 } while (!time_after(jiffies, end));
Chris Wilsondb53a302011-02-03 11:57:46 +00001166 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001167 return -EBUSY;
1168}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001169
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001170int intel_ring_begin(struct intel_ring_buffer *ring,
1171 int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001172{
Chris Wilson21dd3732011-01-26 15:55:56 +00001173 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Zou Nan haibe26a102010-06-12 17:40:24 +08001174 int n = 4*num_dwords;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001175 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +01001176
Chris Wilson21dd3732011-01-26 15:55:56 +00001177 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1178 return -EIO;
1179
Chris Wilson55249ba2010-12-22 14:04:47 +00001180 if (unlikely(ring->tail + n > ring->effective_size)) {
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001181 ret = intel_wrap_ring_buffer(ring);
1182 if (unlikely(ret))
1183 return ret;
1184 }
Chris Wilson78501ea2010-10-27 12:18:21 +01001185
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001186 if (unlikely(ring->space < n)) {
1187 ret = intel_wait_ring_buffer(ring, n);
1188 if (unlikely(ret))
1189 return ret;
1190 }
Chris Wilsond97ed332010-08-04 15:18:13 +01001191
1192 ring->space -= n;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001193 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001194}
1195
Chris Wilson78501ea2010-10-27 12:18:21 +01001196void intel_ring_advance(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001197{
Chris Wilsond97ed332010-08-04 15:18:13 +01001198 ring->tail &= ring->size - 1;
Chris Wilson78501ea2010-10-27 12:18:21 +01001199 ring->write_tail(ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001200}
1201
Chris Wilsone0708682010-09-19 14:46:27 +01001202static const struct intel_ring_buffer render_ring = {
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001203 .name = "render ring",
Chris Wilson92204342010-09-18 11:02:01 +01001204 .id = RING_RENDER,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001205 .mmio_base = RENDER_RING_BASE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001206 .size = 32 * PAGE_SIZE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001207 .init = init_render_ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001208 .write_tail = ring_write_tail,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001209 .flush = render_ring_flush,
1210 .add_request = render_ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001211 .get_seqno = ring_get_seqno,
1212 .irq_get = render_ring_get_irq,
1213 .irq_put = render_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001214 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
Akshay Joshi0206e352011-08-16 15:34:10 -04001215 .cleanup = render_ring_cleanup,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001216 .sync_to = render_ring_sync_to,
1217 .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
1218 MI_SEMAPHORE_SYNC_RV,
1219 MI_SEMAPHORE_SYNC_RB},
1220 .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001221};
Zou Nan haid1b851f2010-05-21 09:08:57 +08001222
1223/* ring buffer for bit-stream decoder */
1224
Chris Wilsone0708682010-09-19 14:46:27 +01001225static const struct intel_ring_buffer bsd_ring = {
Zou Nan haid1b851f2010-05-21 09:08:57 +08001226 .name = "bsd ring",
Chris Wilson92204342010-09-18 11:02:01 +01001227 .id = RING_BSD,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001228 .mmio_base = BSD_RING_BASE,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001229 .size = 32 * PAGE_SIZE,
Chris Wilson78501ea2010-10-27 12:18:21 +01001230 .init = init_ring_common,
Chris Wilson297b0c52010-10-22 17:02:41 +01001231 .write_tail = ring_write_tail,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001232 .flush = bsd_ring_flush,
Chris Wilson549f7362010-10-19 11:19:32 +01001233 .add_request = ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001234 .get_seqno = ring_get_seqno,
1235 .irq_get = bsd_ring_get_irq,
1236 .irq_put = bsd_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001237 .dispatch_execbuffer = ring_dispatch_execbuffer,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001238};
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001239
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001240
Chris Wilson78501ea2010-10-27 12:18:21 +01001241static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001242 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001243{
Akshay Joshi0206e352011-08-16 15:34:10 -04001244 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001245
1246 /* Every tail move must follow the sequence below */
Akshay Joshi0206e352011-08-16 15:34:10 -04001247 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1248 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1249 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1250 I915_WRITE(GEN6_BSD_RNCID, 0x0);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001251
Akshay Joshi0206e352011-08-16 15:34:10 -04001252 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1253 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1254 50))
1255 DRM_ERROR("timed out waiting for IDLE Indicator\n");
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001256
Akshay Joshi0206e352011-08-16 15:34:10 -04001257 I915_WRITE_TAIL(ring, value);
1258 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1259 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1260 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001261}
1262
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001263static int gen6_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001264 u32 invalidate, u32 flush)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001265{
Chris Wilson71a77e02011-02-02 12:13:49 +00001266 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001267 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001268
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001269 ret = intel_ring_begin(ring, 4);
1270 if (ret)
1271 return ret;
1272
Chris Wilson71a77e02011-02-02 12:13:49 +00001273 cmd = MI_FLUSH_DW;
1274 if (invalidate & I915_GEM_GPU_DOMAINS)
1275 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1276 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001277 intel_ring_emit(ring, 0);
1278 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001279 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001280 intel_ring_advance(ring);
1281 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001282}
1283
1284static int
Chris Wilson78501ea2010-10-27 12:18:21 +01001285gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001286 u32 offset, u32 len)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001287{
Akshay Joshi0206e352011-08-16 15:34:10 -04001288 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001289
Akshay Joshi0206e352011-08-16 15:34:10 -04001290 ret = intel_ring_begin(ring, 2);
1291 if (ret)
1292 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001293
Akshay Joshi0206e352011-08-16 15:34:10 -04001294 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1295 /* bit0-7 is the length on GEN6+ */
1296 intel_ring_emit(ring, offset);
1297 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001298
Akshay Joshi0206e352011-08-16 15:34:10 -04001299 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001300}
1301
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001302static bool
Chris Wilson0f468322011-01-04 17:35:21 +00001303gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1304{
1305 return gen6_ring_get_irq(ring,
1306 GT_USER_INTERRUPT,
1307 GEN6_RENDER_USER_INTERRUPT);
1308}
1309
1310static void
1311gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1312{
1313 return gen6_ring_put_irq(ring,
1314 GT_USER_INTERRUPT,
1315 GEN6_RENDER_USER_INTERRUPT);
1316}
1317
1318static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001319gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1320{
Chris Wilson0f468322011-01-04 17:35:21 +00001321 return gen6_ring_get_irq(ring,
1322 GT_GEN6_BSD_USER_INTERRUPT,
1323 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001324}
1325
1326static void
1327gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1328{
Chris Wilson0f468322011-01-04 17:35:21 +00001329 return gen6_ring_put_irq(ring,
1330 GT_GEN6_BSD_USER_INTERRUPT,
1331 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001332}
1333
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001334/* ring buffer for Video Codec for Gen6+ */
Chris Wilsone0708682010-09-19 14:46:27 +01001335static const struct intel_ring_buffer gen6_bsd_ring = {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001336 .name = "gen6 bsd ring",
1337 .id = RING_BSD,
1338 .mmio_base = GEN6_BSD_RING_BASE,
1339 .size = 32 * PAGE_SIZE,
1340 .init = init_ring_common,
1341 .write_tail = gen6_bsd_ring_write_tail,
1342 .flush = gen6_ring_flush,
1343 .add_request = gen6_add_request,
1344 .get_seqno = ring_get_seqno,
1345 .irq_get = gen6_bsd_ring_get_irq,
1346 .irq_put = gen6_bsd_ring_put_irq,
1347 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001348 .sync_to = gen6_bsd_ring_sync_to,
1349 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
1350 MI_SEMAPHORE_SYNC_INVALID,
1351 MI_SEMAPHORE_SYNC_VB},
1352 .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
Chris Wilson549f7362010-10-19 11:19:32 +01001353};
1354
1355/* Blitter support (SandyBridge+) */
1356
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001357static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001358blt_ring_get_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001359{
Chris Wilson0f468322011-01-04 17:35:21 +00001360 return gen6_ring_get_irq(ring,
1361 GT_BLT_USER_INTERRUPT,
1362 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001363}
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001364
Chris Wilson549f7362010-10-19 11:19:32 +01001365static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001366blt_ring_put_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001367{
Chris Wilson0f468322011-01-04 17:35:21 +00001368 gen6_ring_put_irq(ring,
1369 GT_BLT_USER_INTERRUPT,
1370 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001371}
1372
Zou Nan hai8d192152010-11-02 16:31:01 +08001373
1374/* Workaround for some stepping of SNB,
1375 * each time when BLT engine ring tail moved,
1376 * the first command in the ring to be parsed
1377 * should be MI_BATCH_BUFFER_START
1378 */
1379#define NEED_BLT_WORKAROUND(dev) \
1380 (IS_GEN6(dev) && (dev->pdev->revision < 8))
1381
1382static inline struct drm_i915_gem_object *
1383to_blt_workaround(struct intel_ring_buffer *ring)
1384{
1385 return ring->private;
1386}
1387
1388static int blt_ring_init(struct intel_ring_buffer *ring)
1389{
1390 if (NEED_BLT_WORKAROUND(ring->dev)) {
1391 struct drm_i915_gem_object *obj;
Chris Wilson27153f72010-11-02 11:17:23 +00001392 u32 *ptr;
Zou Nan hai8d192152010-11-02 16:31:01 +08001393 int ret;
1394
Chris Wilson05394f32010-11-08 19:18:58 +00001395 obj = i915_gem_alloc_object(ring->dev, 4096);
Zou Nan hai8d192152010-11-02 16:31:01 +08001396 if (obj == NULL)
1397 return -ENOMEM;
1398
Chris Wilson05394f32010-11-08 19:18:58 +00001399 ret = i915_gem_object_pin(obj, 4096, true);
Zou Nan hai8d192152010-11-02 16:31:01 +08001400 if (ret) {
1401 drm_gem_object_unreference(&obj->base);
1402 return ret;
1403 }
1404
1405 ptr = kmap(obj->pages[0]);
Chris Wilson27153f72010-11-02 11:17:23 +00001406 *ptr++ = MI_BATCH_BUFFER_END;
1407 *ptr++ = MI_NOOP;
Zou Nan hai8d192152010-11-02 16:31:01 +08001408 kunmap(obj->pages[0]);
1409
Chris Wilson05394f32010-11-08 19:18:58 +00001410 ret = i915_gem_object_set_to_gtt_domain(obj, false);
Zou Nan hai8d192152010-11-02 16:31:01 +08001411 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00001412 i915_gem_object_unpin(obj);
Zou Nan hai8d192152010-11-02 16:31:01 +08001413 drm_gem_object_unreference(&obj->base);
1414 return ret;
1415 }
1416
1417 ring->private = obj;
1418 }
1419
1420 return init_ring_common(ring);
1421}
1422
1423static int blt_ring_begin(struct intel_ring_buffer *ring,
1424 int num_dwords)
1425{
1426 if (ring->private) {
1427 int ret = intel_ring_begin(ring, num_dwords+2);
1428 if (ret)
1429 return ret;
1430
1431 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1432 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1433
1434 return 0;
1435 } else
1436 return intel_ring_begin(ring, 4);
1437}
1438
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001439static int blt_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001440 u32 invalidate, u32 flush)
Zou Nan hai8d192152010-11-02 16:31:01 +08001441{
Chris Wilson71a77e02011-02-02 12:13:49 +00001442 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001443 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001444
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001445 ret = blt_ring_begin(ring, 4);
1446 if (ret)
1447 return ret;
1448
Chris Wilson71a77e02011-02-02 12:13:49 +00001449 cmd = MI_FLUSH_DW;
1450 if (invalidate & I915_GEM_DOMAIN_RENDER)
1451 cmd |= MI_INVALIDATE_TLB;
1452 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001453 intel_ring_emit(ring, 0);
1454 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001455 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001456 intel_ring_advance(ring);
1457 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08001458}
1459
Zou Nan hai8d192152010-11-02 16:31:01 +08001460static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1461{
1462 if (!ring->private)
1463 return;
1464
1465 i915_gem_object_unpin(ring->private);
1466 drm_gem_object_unreference(ring->private);
1467 ring->private = NULL;
1468}
1469
Chris Wilson549f7362010-10-19 11:19:32 +01001470static const struct intel_ring_buffer gen6_blt_ring = {
Akshay Joshi0206e352011-08-16 15:34:10 -04001471 .name = "blt ring",
1472 .id = RING_BLT,
1473 .mmio_base = BLT_RING_BASE,
1474 .size = 32 * PAGE_SIZE,
1475 .init = blt_ring_init,
1476 .write_tail = ring_write_tail,
1477 .flush = blt_ring_flush,
1478 .add_request = gen6_add_request,
1479 .get_seqno = ring_get_seqno,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001480 .irq_get = blt_ring_get_irq,
1481 .irq_put = blt_ring_put_irq,
Akshay Joshi0206e352011-08-16 15:34:10 -04001482 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001483 .cleanup = blt_ring_cleanup,
1484 .sync_to = gen6_blt_ring_sync_to,
1485 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
1486 MI_SEMAPHORE_SYNC_BV,
1487 MI_SEMAPHORE_SYNC_INVALID},
1488 .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001489};
1490
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001491int intel_init_render_ring_buffer(struct drm_device *dev)
1492{
1493 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001494 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001495
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001496 *ring = render_ring;
1497 if (INTEL_INFO(dev)->gen >= 6) {
1498 ring->add_request = gen6_add_request;
Jesse Barnes8d315282011-10-16 10:23:31 +02001499 ring->flush = gen6_render_ring_flush;
Chris Wilson0f468322011-01-04 17:35:21 +00001500 ring->irq_get = gen6_render_ring_get_irq;
1501 ring->irq_put = gen6_render_ring_put_irq;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001502 } else if (IS_GEN5(dev)) {
1503 ring->add_request = pc_render_add_request;
1504 ring->get_seqno = pc_render_get_seqno;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001505 }
1506
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001507 if (!I915_NEED_GFX_HWS(dev)) {
1508 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1509 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1510 }
1511
1512 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001513}
1514
Chris Wilsone8616b62011-01-20 09:57:11 +00001515int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1516{
1517 drm_i915_private_t *dev_priv = dev->dev_private;
1518 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1519
1520 *ring = render_ring;
1521 if (INTEL_INFO(dev)->gen >= 6) {
1522 ring->add_request = gen6_add_request;
1523 ring->irq_get = gen6_render_ring_get_irq;
1524 ring->irq_put = gen6_render_ring_put_irq;
1525 } else if (IS_GEN5(dev)) {
1526 ring->add_request = pc_render_add_request;
1527 ring->get_seqno = pc_render_get_seqno;
1528 }
1529
Keith Packardf3234702011-07-22 10:44:39 -07001530 if (!I915_NEED_GFX_HWS(dev))
1531 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1532
Chris Wilsone8616b62011-01-20 09:57:11 +00001533 ring->dev = dev;
1534 INIT_LIST_HEAD(&ring->active_list);
1535 INIT_LIST_HEAD(&ring->request_list);
1536 INIT_LIST_HEAD(&ring->gpu_write_list);
1537
1538 ring->size = size;
1539 ring->effective_size = ring->size;
1540 if (IS_I830(ring->dev))
1541 ring->effective_size -= 128;
1542
1543 ring->map.offset = start;
1544 ring->map.size = size;
1545 ring->map.type = 0;
1546 ring->map.flags = 0;
1547 ring->map.mtrr = 0;
1548
1549 drm_core_ioremap_wc(&ring->map, dev);
1550 if (ring->map.handle == NULL) {
1551 DRM_ERROR("can not ioremap virtual address for"
1552 " ring buffer\n");
1553 return -ENOMEM;
1554 }
1555
1556 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1557 return 0;
1558}
1559
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001560int intel_init_bsd_ring_buffer(struct drm_device *dev)
1561{
1562 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001563 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001564
Jesse Barnes65d3eb12011-04-06 14:54:44 -07001565 if (IS_GEN6(dev) || IS_GEN7(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001566 *ring = gen6_bsd_ring;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001567 else
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001568 *ring = bsd_ring;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001569
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001570 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001571}
Chris Wilson549f7362010-10-19 11:19:32 +01001572
1573int intel_init_blt_ring_buffer(struct drm_device *dev)
1574{
1575 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001576 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
Chris Wilson549f7362010-10-19 11:19:32 +01001577
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001578 *ring = gen6_blt_ring;
Chris Wilson549f7362010-10-19 11:19:32 +01001579
Eric Anholt42ff6572011-12-22 14:55:00 -08001580 if (IS_GEN7(dev))
1581 ring->irq_get = gen7_blt_ring_get_irq;
1582
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001583 return intel_init_ring_buffer(dev, ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001584}