blob: ca70e2f1044517425ce3ad0ea3ab85db7c0df5ee [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Jesse Barnes8d315282011-10-16 10:23:31 +020037/*
38 * 965+ support PIPE_CONTROL commands, which provide finer grained control
39 * over cache flushing.
40 */
41struct pipe_control {
42 struct drm_i915_gem_object *obj;
43 volatile u32 *cpu_page;
44 u32 gtt_offset;
45};
46
Chris Wilsonc7dca472011-01-20 17:00:10 +000047static inline int ring_space(struct intel_ring_buffer *ring)
48{
49 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
50 if (space < 0)
51 space += ring->size;
52 return space;
53}
54
Chris Wilson6f392d5482010-08-07 11:01:22 +010055static u32 i915_gem_get_seqno(struct drm_device *dev)
56{
57 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 seqno;
59
60 seqno = dev_priv->next_seqno;
61
62 /* reserve 0 for non-seqno */
63 if (++dev_priv->next_seqno == 0)
64 dev_priv->next_seqno = 1;
65
66 return seqno;
67}
68
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000069static int
Chris Wilson78501ea2010-10-27 12:18:21 +010070render_ring_flush(struct intel_ring_buffer *ring,
Chris Wilsonab6f8e32010-09-19 17:53:44 +010071 u32 invalidate_domains,
72 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070073{
Chris Wilson78501ea2010-10-27 12:18:21 +010074 struct drm_device *dev = ring->dev;
Chris Wilson6f392d5482010-08-07 11:01:22 +010075 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000076 int ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +010077
Chris Wilson36d527d2011-03-19 22:26:49 +000078 /*
79 * read/write caches:
80 *
81 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
82 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
83 * also flushed at 2d versus 3d pipeline switches.
84 *
85 * read-only caches:
86 *
87 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
88 * MI_READ_FLUSH is set, and is always flushed on 965.
89 *
90 * I915_GEM_DOMAIN_COMMAND may not exist?
91 *
92 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
93 * invalidated when MI_EXE_FLUSH is set.
94 *
95 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
96 * invalidated with every MI_FLUSH.
97 *
98 * TLBs:
99 *
100 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
101 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
102 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
103 * are flushed at any MI_FLUSH.
104 */
105
106 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
107 if ((invalidate_domains|flush_domains) &
108 I915_GEM_DOMAIN_RENDER)
109 cmd &= ~MI_NO_WRITE_FLUSH;
110 if (INTEL_INFO(dev)->gen < 4) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700111 /*
Chris Wilson36d527d2011-03-19 22:26:49 +0000112 * On the 965, the sampler cache always gets flushed
113 * and this bit is reserved.
Eric Anholt62fdfea2010-05-21 13:26:39 -0700114 */
Chris Wilson36d527d2011-03-19 22:26:49 +0000115 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
116 cmd |= MI_READ_FLUSH;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800117 }
Chris Wilson36d527d2011-03-19 22:26:49 +0000118 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
119 cmd |= MI_EXE_FLUSH;
120
121 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
122 (IS_G4X(dev) || IS_GEN5(dev)))
123 cmd |= MI_INVALIDATE_ISP;
124
125 ret = intel_ring_begin(ring, 2);
126 if (ret)
127 return ret;
128
129 intel_ring_emit(ring, cmd);
130 intel_ring_emit(ring, MI_NOOP);
131 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000132
133 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800134}
135
Jesse Barnes8d315282011-10-16 10:23:31 +0200136/**
137 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
138 * implementing two workarounds on gen6. From section 1.4.7.1
139 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
140 *
141 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
142 * produced by non-pipelined state commands), software needs to first
143 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
144 * 0.
145 *
146 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
147 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
148 *
149 * And the workaround for these two requires this workaround first:
150 *
151 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
152 * BEFORE the pipe-control with a post-sync op and no write-cache
153 * flushes.
154 *
155 * And this last workaround is tricky because of the requirements on
156 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
157 * volume 2 part 1:
158 *
159 * "1 of the following must also be set:
160 * - Render Target Cache Flush Enable ([12] of DW1)
161 * - Depth Cache Flush Enable ([0] of DW1)
162 * - Stall at Pixel Scoreboard ([1] of DW1)
163 * - Depth Stall ([13] of DW1)
164 * - Post-Sync Operation ([13] of DW1)
165 * - Notify Enable ([8] of DW1)"
166 *
167 * The cache flushes require the workaround flush that triggered this
168 * one, so we can't use it. Depth stall would trigger the same.
169 * Post-sync nonzero is what triggered this second workaround, so we
170 * can't use that one either. Notify enable is IRQs, which aren't
171 * really our business. That leaves only stall at scoreboard.
172 */
173static int
174intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
175{
176 struct pipe_control *pc = ring->private;
177 u32 scratch_addr = pc->gtt_offset + 128;
178 int ret;
179
180
181 ret = intel_ring_begin(ring, 6);
182 if (ret)
183 return ret;
184
185 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
186 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
187 PIPE_CONTROL_STALL_AT_SCOREBOARD);
188 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
189 intel_ring_emit(ring, 0); /* low dword */
190 intel_ring_emit(ring, 0); /* high dword */
191 intel_ring_emit(ring, MI_NOOP);
192 intel_ring_advance(ring);
193
194 ret = intel_ring_begin(ring, 6);
195 if (ret)
196 return ret;
197
198 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
199 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
200 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
201 intel_ring_emit(ring, 0);
202 intel_ring_emit(ring, 0);
203 intel_ring_emit(ring, MI_NOOP);
204 intel_ring_advance(ring);
205
206 return 0;
207}
208
209static int
210gen6_render_ring_flush(struct intel_ring_buffer *ring,
211 u32 invalidate_domains, u32 flush_domains)
212{
213 u32 flags = 0;
214 struct pipe_control *pc = ring->private;
215 u32 scratch_addr = pc->gtt_offset + 128;
216 int ret;
217
218 /* Force SNB workarounds for PIPE_CONTROL flushes */
219 intel_emit_post_sync_nonzero_flush(ring);
220
221 /* Just flush everything. Experiments have shown that reducing the
222 * number of bits based on the write domains has little performance
223 * impact.
224 */
225 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
226 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
227 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
228 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
229 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
230 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
231 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
232
233 ret = intel_ring_begin(ring, 6);
234 if (ret)
235 return ret;
236
237 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
238 intel_ring_emit(ring, flags);
239 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
240 intel_ring_emit(ring, 0); /* lower dword */
241 intel_ring_emit(ring, 0); /* uppwer dword */
242 intel_ring_emit(ring, MI_NOOP);
243 intel_ring_advance(ring);
244
245 return 0;
246}
247
Chris Wilson78501ea2010-10-27 12:18:21 +0100248static void ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100249 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800250{
Chris Wilson78501ea2010-10-27 12:18:21 +0100251 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson297b0c52010-10-22 17:02:41 +0100252 I915_WRITE_TAIL(ring, value);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800253}
254
Chris Wilson78501ea2010-10-27 12:18:21 +0100255u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800256{
Chris Wilson78501ea2010-10-27 12:18:21 +0100257 drm_i915_private_t *dev_priv = ring->dev->dev_private;
258 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
Daniel Vetter3d281d82010-09-24 21:14:22 +0200259 RING_ACTHD(ring->mmio_base) : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800260
261 return I915_READ(acthd_reg);
262}
263
Chris Wilson78501ea2010-10-27 12:18:21 +0100264static int init_ring_common(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800265{
Chris Wilson78501ea2010-10-27 12:18:21 +0100266 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000267 struct drm_i915_gem_object *obj = ring->obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800268 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800269
270 /* Stop the ring if it's running. */
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200271 I915_WRITE_CTL(ring, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200272 I915_WRITE_HEAD(ring, 0);
Chris Wilson78501ea2010-10-27 12:18:21 +0100273 ring->write_tail(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800274
275 /* Initialize the ring. */
Chris Wilson05394f32010-11-08 19:18:58 +0000276 I915_WRITE_START(ring, obj->gtt_offset);
Daniel Vetter570ef602010-08-02 17:06:23 +0200277 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800278
279 /* G45 ring initialization fails to reset head to zero */
280 if (head != 0) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000281 DRM_DEBUG_KMS("%s head not reset to zero "
282 "ctl %08x head %08x tail %08x start %08x\n",
283 ring->name,
284 I915_READ_CTL(ring),
285 I915_READ_HEAD(ring),
286 I915_READ_TAIL(ring),
287 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800288
Daniel Vetter570ef602010-08-02 17:06:23 +0200289 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800290
Chris Wilson6fd0d562010-12-05 20:42:33 +0000291 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
292 DRM_ERROR("failed to set %s head to zero "
293 "ctl %08x head %08x tail %08x start %08x\n",
294 ring->name,
295 I915_READ_CTL(ring),
296 I915_READ_HEAD(ring),
297 I915_READ_TAIL(ring),
298 I915_READ_START(ring));
299 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700300 }
301
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200302 I915_WRITE_CTL(ring,
Chris Wilsonae69b422010-11-07 11:45:52 +0000303 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson6aa56062010-10-29 21:44:37 +0100304 | RING_REPORT_64K | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800305
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800306 /* If the head is still not zero, the ring is dead */
Chris Wilson176f28e2010-10-28 11:18:07 +0100307 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
Chris Wilson05394f32010-11-08 19:18:58 +0000308 I915_READ_START(ring) != obj->gtt_offset ||
Chris Wilson176f28e2010-10-28 11:18:07 +0100309 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000310 DRM_ERROR("%s initialization failed "
311 "ctl %08x head %08x tail %08x start %08x\n",
312 ring->name,
313 I915_READ_CTL(ring),
314 I915_READ_HEAD(ring),
315 I915_READ_TAIL(ring),
316 I915_READ_START(ring));
317 return -EIO;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800318 }
319
Chris Wilson78501ea2010-10-27 12:18:21 +0100320 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
321 i915_kernel_lost_context(ring->dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800322 else {
Chris Wilsonc7dca472011-01-20 17:00:10 +0000323 ring->head = I915_READ_HEAD(ring);
Daniel Vetter870e86d2010-08-02 16:29:44 +0200324 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000325 ring->space = ring_space(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800326 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000327
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800328 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700329}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800330
Chris Wilsonc6df5412010-12-15 09:56:50 +0000331static int
332init_pipe_control(struct intel_ring_buffer *ring)
333{
334 struct pipe_control *pc;
335 struct drm_i915_gem_object *obj;
336 int ret;
337
338 if (ring->private)
339 return 0;
340
341 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
342 if (!pc)
343 return -ENOMEM;
344
345 obj = i915_gem_alloc_object(ring->dev, 4096);
346 if (obj == NULL) {
347 DRM_ERROR("Failed to allocate seqno page\n");
348 ret = -ENOMEM;
349 goto err;
350 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100351
352 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000353
354 ret = i915_gem_object_pin(obj, 4096, true);
355 if (ret)
356 goto err_unref;
357
358 pc->gtt_offset = obj->gtt_offset;
359 pc->cpu_page = kmap(obj->pages[0]);
360 if (pc->cpu_page == NULL)
361 goto err_unpin;
362
363 pc->obj = obj;
364 ring->private = pc;
365 return 0;
366
367err_unpin:
368 i915_gem_object_unpin(obj);
369err_unref:
370 drm_gem_object_unreference(&obj->base);
371err:
372 kfree(pc);
373 return ret;
374}
375
376static void
377cleanup_pipe_control(struct intel_ring_buffer *ring)
378{
379 struct pipe_control *pc = ring->private;
380 struct drm_i915_gem_object *obj;
381
382 if (!ring->private)
383 return;
384
385 obj = pc->obj;
386 kunmap(obj->pages[0]);
387 i915_gem_object_unpin(obj);
388 drm_gem_object_unreference(&obj->base);
389
390 kfree(pc);
391 ring->private = NULL;
392}
393
Chris Wilson78501ea2010-10-27 12:18:21 +0100394static int init_render_ring(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800395{
Chris Wilson78501ea2010-10-27 12:18:21 +0100396 struct drm_device *dev = ring->dev;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000397 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100398 int ret = init_ring_common(ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800399
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100400 if (INTEL_INFO(dev)->gen > 3) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100401 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
Jesse Barnes65d3eb12011-04-06 14:54:44 -0700402 if (IS_GEN6(dev) || IS_GEN7(dev))
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800403 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
404 I915_WRITE(MI_MODE, mode);
Jesse Barnesb095cd02011-08-12 15:28:32 -0700405 if (IS_GEN7(dev))
406 I915_WRITE(GFX_MODE_GEN7,
407 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
408 GFX_MODE_ENABLE(GFX_REPLAY_MODE));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800409 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100410
Jesse Barnes8d315282011-10-16 10:23:31 +0200411 if (INTEL_INFO(dev)->gen >= 5) {
Chris Wilsonc6df5412010-12-15 09:56:50 +0000412 ret = init_pipe_control(ring);
413 if (ret)
414 return ret;
415 }
416
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800417 return ret;
418}
419
Chris Wilsonc6df5412010-12-15 09:56:50 +0000420static void render_ring_cleanup(struct intel_ring_buffer *ring)
421{
422 if (!ring->private)
423 return;
424
425 cleanup_pipe_control(ring);
426}
427
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000428static void
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700429update_mboxes(struct intel_ring_buffer *ring,
430 u32 seqno,
431 u32 mmio_offset)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000432{
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700433 intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
434 MI_SEMAPHORE_GLOBAL_GTT |
435 MI_SEMAPHORE_REGISTER |
436 MI_SEMAPHORE_UPDATE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000437 intel_ring_emit(ring, seqno);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700438 intel_ring_emit(ring, mmio_offset);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000439}
440
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700441/**
442 * gen6_add_request - Update the semaphore mailbox registers
443 *
444 * @ring - ring that is adding a request
445 * @seqno - return seqno stuck into the ring
446 *
447 * Update the mailbox registers in the *other* rings with the current seqno.
448 * This acts like a signal in the canonical semaphore.
449 */
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000450static int
451gen6_add_request(struct intel_ring_buffer *ring,
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700452 u32 *seqno)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000453{
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700454 u32 mbox1_reg;
455 u32 mbox2_reg;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000456 int ret;
457
458 ret = intel_ring_begin(ring, 10);
459 if (ret)
460 return ret;
461
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700462 mbox1_reg = ring->signal_mbox[0];
463 mbox2_reg = ring->signal_mbox[1];
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000464
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700465 *seqno = i915_gem_get_seqno(ring->dev);
466
467 update_mboxes(ring, *seqno, mbox1_reg);
468 update_mboxes(ring, *seqno, mbox2_reg);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000469 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
470 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700471 intel_ring_emit(ring, *seqno);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000472 intel_ring_emit(ring, MI_USER_INTERRUPT);
473 intel_ring_advance(ring);
474
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000475 return 0;
476}
477
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700478/**
479 * intel_ring_sync - sync the waiter to the signaller on seqno
480 *
481 * @waiter - ring that is waiting
482 * @signaller - ring which has, or will signal
483 * @seqno - seqno which the waiter will block on
484 */
485static int
486intel_ring_sync(struct intel_ring_buffer *waiter,
487 struct intel_ring_buffer *signaller,
488 int ring,
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000489 u32 seqno)
490{
491 int ret;
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700492 u32 dw1 = MI_SEMAPHORE_MBOX |
493 MI_SEMAPHORE_COMPARE |
494 MI_SEMAPHORE_REGISTER;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000495
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700496 ret = intel_ring_begin(waiter, 4);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000497 if (ret)
498 return ret;
499
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700500 intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
501 intel_ring_emit(waiter, seqno);
502 intel_ring_emit(waiter, 0);
503 intel_ring_emit(waiter, MI_NOOP);
504 intel_ring_advance(waiter);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000505
506 return 0;
507}
508
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700509/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
510int
511render_ring_sync_to(struct intel_ring_buffer *waiter,
512 struct intel_ring_buffer *signaller,
513 u32 seqno)
514{
515 WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
516 return intel_ring_sync(waiter,
517 signaller,
518 RCS,
519 seqno);
520}
521
522/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
523int
524gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
525 struct intel_ring_buffer *signaller,
526 u32 seqno)
527{
528 WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
529 return intel_ring_sync(waiter,
530 signaller,
531 VCS,
532 seqno);
533}
534
535/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
536int
537gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
538 struct intel_ring_buffer *signaller,
539 u32 seqno)
540{
541 WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
542 return intel_ring_sync(waiter,
543 signaller,
544 BCS,
545 seqno);
546}
547
548
549
Chris Wilsonc6df5412010-12-15 09:56:50 +0000550#define PIPE_CONTROL_FLUSH(ring__, addr__) \
551do { \
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200552 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
553 PIPE_CONTROL_DEPTH_STALL); \
Chris Wilsonc6df5412010-12-15 09:56:50 +0000554 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
555 intel_ring_emit(ring__, 0); \
556 intel_ring_emit(ring__, 0); \
557} while (0)
558
559static int
560pc_render_add_request(struct intel_ring_buffer *ring,
561 u32 *result)
562{
563 struct drm_device *dev = ring->dev;
564 u32 seqno = i915_gem_get_seqno(dev);
565 struct pipe_control *pc = ring->private;
566 u32 scratch_addr = pc->gtt_offset + 128;
567 int ret;
568
569 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
570 * incoherent with writes to memory, i.e. completely fubar,
571 * so we need to use PIPE_NOTIFY instead.
572 *
573 * However, we also need to workaround the qword write
574 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
575 * memory before requesting an interrupt.
576 */
577 ret = intel_ring_begin(ring, 32);
578 if (ret)
579 return ret;
580
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200581 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
Kenneth Graunke9d971b32011-10-11 23:41:09 +0200582 PIPE_CONTROL_WRITE_FLUSH |
583 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000584 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
585 intel_ring_emit(ring, seqno);
586 intel_ring_emit(ring, 0);
587 PIPE_CONTROL_FLUSH(ring, scratch_addr);
588 scratch_addr += 128; /* write to separate cachelines */
589 PIPE_CONTROL_FLUSH(ring, scratch_addr);
590 scratch_addr += 128;
591 PIPE_CONTROL_FLUSH(ring, scratch_addr);
592 scratch_addr += 128;
593 PIPE_CONTROL_FLUSH(ring, scratch_addr);
594 scratch_addr += 128;
595 PIPE_CONTROL_FLUSH(ring, scratch_addr);
596 scratch_addr += 128;
597 PIPE_CONTROL_FLUSH(ring, scratch_addr);
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200598 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
Kenneth Graunke9d971b32011-10-11 23:41:09 +0200599 PIPE_CONTROL_WRITE_FLUSH |
600 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
Chris Wilsonc6df5412010-12-15 09:56:50 +0000601 PIPE_CONTROL_NOTIFY);
602 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
603 intel_ring_emit(ring, seqno);
604 intel_ring_emit(ring, 0);
605 intel_ring_advance(ring);
606
607 *result = seqno;
608 return 0;
609}
610
Chris Wilson3cce4692010-10-27 16:11:02 +0100611static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100612render_ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100613 u32 *result)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700614{
Chris Wilson78501ea2010-10-27 12:18:21 +0100615 struct drm_device *dev = ring->dev;
Chris Wilson3cce4692010-10-27 16:11:02 +0100616 u32 seqno = i915_gem_get_seqno(dev);
617 int ret;
Zhenyu Wangca764822010-05-27 10:26:42 +0800618
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000619 ret = intel_ring_begin(ring, 4);
620 if (ret)
621 return ret;
Chris Wilson3cce4692010-10-27 16:11:02 +0100622
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000623 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
624 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
625 intel_ring_emit(ring, seqno);
626 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilson3cce4692010-10-27 16:11:02 +0100627 intel_ring_advance(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000628
Chris Wilson3cce4692010-10-27 16:11:02 +0100629 *result = seqno;
630 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700631}
632
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800633static u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000634ring_get_seqno(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800635{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000636 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
637}
638
Chris Wilsonc6df5412010-12-15 09:56:50 +0000639static u32
640pc_render_get_seqno(struct intel_ring_buffer *ring)
641{
642 struct pipe_control *pc = ring->private;
643 return pc->cpu_page[0];
644}
645
Chris Wilson0f468322011-01-04 17:35:21 +0000646static void
647ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
648{
649 dev_priv->gt_irq_mask &= ~mask;
650 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
651 POSTING_READ(GTIMR);
652}
653
654static void
655ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
656{
657 dev_priv->gt_irq_mask |= mask;
658 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
659 POSTING_READ(GTIMR);
660}
661
662static void
663i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
664{
665 dev_priv->irq_mask &= ~mask;
666 I915_WRITE(IMR, dev_priv->irq_mask);
667 POSTING_READ(IMR);
668}
669
670static void
671i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
672{
673 dev_priv->irq_mask |= mask;
674 I915_WRITE(IMR, dev_priv->irq_mask);
675 POSTING_READ(IMR);
676}
677
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000678static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000679render_ring_get_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700680{
Chris Wilson78501ea2010-10-27 12:18:21 +0100681 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000682 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700683
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000684 if (!dev->irq_enabled)
685 return false;
686
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000687 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000688 if (ring->irq_refcount++ == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700689 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000690 ironlake_enable_irq(dev_priv,
691 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700692 else
693 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
694 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000695 spin_unlock(&ring->irq_lock);
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000696
697 return true;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700698}
699
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800700static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000701render_ring_put_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700702{
Chris Wilson78501ea2010-10-27 12:18:21 +0100703 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000704 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700705
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000706 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000707 if (--ring->irq_refcount == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700708 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000709 ironlake_disable_irq(dev_priv,
710 GT_USER_INTERRUPT |
711 GT_PIPE_NOTIFY);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700712 else
713 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
714 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000715 spin_unlock(&ring->irq_lock);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700716}
717
Chris Wilson78501ea2010-10-27 12:18:21 +0100718void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800719{
Eric Anholt45930102011-05-06 17:12:35 -0700720 struct drm_device *dev = ring->dev;
Chris Wilson78501ea2010-10-27 12:18:21 +0100721 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt45930102011-05-06 17:12:35 -0700722 u32 mmio = 0;
723
724 /* The ring status page addresses are no longer next to the rest of
725 * the ring registers as of gen7.
726 */
727 if (IS_GEN7(dev)) {
728 switch (ring->id) {
729 case RING_RENDER:
730 mmio = RENDER_HWS_PGA_GEN7;
731 break;
732 case RING_BLT:
733 mmio = BLT_HWS_PGA_GEN7;
734 break;
735 case RING_BSD:
736 mmio = BSD_HWS_PGA_GEN7;
737 break;
738 }
739 } else if (IS_GEN6(ring->dev)) {
740 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
741 } else {
742 mmio = RING_HWS_PGA(ring->mmio_base);
743 }
744
Chris Wilson78501ea2010-10-27 12:18:21 +0100745 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
746 POSTING_READ(mmio);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800747}
748
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000749static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100750bsd_ring_flush(struct intel_ring_buffer *ring,
751 u32 invalidate_domains,
752 u32 flush_domains)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800753{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000754 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000755
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000756 ret = intel_ring_begin(ring, 2);
757 if (ret)
758 return ret;
759
760 intel_ring_emit(ring, MI_FLUSH);
761 intel_ring_emit(ring, MI_NOOP);
762 intel_ring_advance(ring);
763 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800764}
765
Chris Wilson3cce4692010-10-27 16:11:02 +0100766static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100767ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100768 u32 *result)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800769{
770 u32 seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +0100771 int ret;
772
773 ret = intel_ring_begin(ring, 4);
774 if (ret)
775 return ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +0100776
Chris Wilson78501ea2010-10-27 12:18:21 +0100777 seqno = i915_gem_get_seqno(ring->dev);
Chris Wilson6f392d5482010-08-07 11:01:22 +0100778
Chris Wilson3cce4692010-10-27 16:11:02 +0100779 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
780 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
781 intel_ring_emit(ring, seqno);
782 intel_ring_emit(ring, MI_USER_INTERRUPT);
783 intel_ring_advance(ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800784
Chris Wilson3cce4692010-10-27 16:11:02 +0100785 *result = seqno;
786 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800787}
788
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000789static bool
Chris Wilson0f468322011-01-04 17:35:21 +0000790gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
791{
792 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000793 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000794
795 if (!dev->irq_enabled)
796 return false;
797
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000798 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000799 if (ring->irq_refcount++ == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000800 ring->irq_mask &= ~rflag;
801 I915_WRITE_IMR(ring, ring->irq_mask);
802 ironlake_enable_irq(dev_priv, gflag);
Chris Wilson0f468322011-01-04 17:35:21 +0000803 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000804 spin_unlock(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000805
806 return true;
807}
808
809static void
810gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
811{
812 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000813 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000814
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000815 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000816 if (--ring->irq_refcount == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000817 ring->irq_mask |= rflag;
818 I915_WRITE_IMR(ring, ring->irq_mask);
819 ironlake_disable_irq(dev_priv, gflag);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000820 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000821 spin_unlock(&ring->irq_lock);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000822}
823
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000824static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000825bsd_ring_get_irq(struct intel_ring_buffer *ring)
826{
Feng, Boqun5bfa1062011-05-16 16:02:39 +0800827 struct drm_device *dev = ring->dev;
828 drm_i915_private_t *dev_priv = dev->dev_private;
829
830 if (!dev->irq_enabled)
831 return false;
832
833 spin_lock(&ring->irq_lock);
834 if (ring->irq_refcount++ == 0) {
835 if (IS_G4X(dev))
836 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
837 else
838 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
839 }
840 spin_unlock(&ring->irq_lock);
841
842 return true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000843}
844static void
845bsd_ring_put_irq(struct intel_ring_buffer *ring)
846{
Feng, Boqun5bfa1062011-05-16 16:02:39 +0800847 struct drm_device *dev = ring->dev;
848 drm_i915_private_t *dev_priv = dev->dev_private;
849
850 spin_lock(&ring->irq_lock);
851 if (--ring->irq_refcount == 0) {
852 if (IS_G4X(dev))
853 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
854 else
855 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
856 }
857 spin_unlock(&ring->irq_lock);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800858}
859
860static int
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000861ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800862{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100863 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100864
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100865 ret = intel_ring_begin(ring, 2);
866 if (ret)
867 return ret;
868
Chris Wilson78501ea2010-10-27 12:18:21 +0100869 intel_ring_emit(ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000870 MI_BATCH_BUFFER_START | (2 << 6) |
Chris Wilson78501ea2010-10-27 12:18:21 +0100871 MI_BATCH_NON_SECURE_I965);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000872 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100873 intel_ring_advance(ring);
874
Zou Nan haid1b851f2010-05-21 09:08:57 +0800875 return 0;
876}
877
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800878static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100879render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000880 u32 offset, u32 len)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700881{
Chris Wilson78501ea2010-10-27 12:18:21 +0100882 struct drm_device *dev = ring->dev;
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000883 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700884
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000885 if (IS_I830(dev) || IS_845G(dev)) {
886 ret = intel_ring_begin(ring, 4);
887 if (ret)
888 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700889
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000890 intel_ring_emit(ring, MI_BATCH_BUFFER);
891 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
892 intel_ring_emit(ring, offset + len - 8);
893 intel_ring_emit(ring, 0);
894 } else {
895 ret = intel_ring_begin(ring, 2);
896 if (ret)
897 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100898
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000899 if (INTEL_INFO(dev)->gen >= 4) {
900 intel_ring_emit(ring,
901 MI_BATCH_BUFFER_START | (2 << 6) |
902 MI_BATCH_NON_SECURE_I965);
903 intel_ring_emit(ring, offset);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700904 } else {
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000905 intel_ring_emit(ring,
906 MI_BATCH_BUFFER_START | (2 << 6));
907 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700908 }
909 }
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000910 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700911
Eric Anholt62fdfea2010-05-21 13:26:39 -0700912 return 0;
913}
914
Chris Wilson78501ea2010-10-27 12:18:21 +0100915static void cleanup_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700916{
Chris Wilson78501ea2010-10-27 12:18:21 +0100917 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000918 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700919
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800920 obj = ring->status_page.obj;
921 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700922 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700923
Chris Wilson05394f32010-11-08 19:18:58 +0000924 kunmap(obj->pages[0]);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700925 i915_gem_object_unpin(obj);
Chris Wilson05394f32010-11-08 19:18:58 +0000926 drm_gem_object_unreference(&obj->base);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800927 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700928
929 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700930}
931
Chris Wilson78501ea2010-10-27 12:18:21 +0100932static int init_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700933{
Chris Wilson78501ea2010-10-27 12:18:21 +0100934 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700935 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000936 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700937 int ret;
938
Eric Anholt62fdfea2010-05-21 13:26:39 -0700939 obj = i915_gem_alloc_object(dev, 4096);
940 if (obj == NULL) {
941 DRM_ERROR("Failed to allocate status page\n");
942 ret = -ENOMEM;
943 goto err;
944 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100945
946 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700947
Daniel Vetter75e9e912010-11-04 17:11:09 +0100948 ret = i915_gem_object_pin(obj, 4096, true);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700949 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700950 goto err_unref;
951 }
952
Chris Wilson05394f32010-11-08 19:18:58 +0000953 ring->status_page.gfx_addr = obj->gtt_offset;
954 ring->status_page.page_addr = kmap(obj->pages[0]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800955 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700956 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700957 goto err_unpin;
958 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800959 ring->status_page.obj = obj;
960 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700961
Chris Wilson78501ea2010-10-27 12:18:21 +0100962 intel_ring_setup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800963 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
964 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700965
966 return 0;
967
968err_unpin:
969 i915_gem_object_unpin(obj);
970err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000971 drm_gem_object_unreference(&obj->base);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700972err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800973 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700974}
975
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800976int intel_init_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100977 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700978{
Chris Wilson05394f32010-11-08 19:18:58 +0000979 struct drm_i915_gem_object *obj;
Chris Wilsondd785e32010-08-07 11:01:34 +0100980 int ret;
981
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800982 ring->dev = dev;
Chris Wilson23bc5982010-09-29 16:10:57 +0100983 INIT_LIST_HEAD(&ring->active_list);
984 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +0100985 INIT_LIST_HEAD(&ring->gpu_write_list);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000986
Chris Wilsonb259f672011-03-29 13:19:09 +0100987 init_waitqueue_head(&ring->irq_queue);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000988 spin_lock_init(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000989 ring->irq_mask = ~0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700990
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800991 if (I915_NEED_GFX_HWS(dev)) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100992 ret = init_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800993 if (ret)
994 return ret;
995 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700996
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800997 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700998 if (obj == NULL) {
999 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001000 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +01001001 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001002 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001003
Chris Wilson05394f32010-11-08 19:18:58 +00001004 ring->obj = obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001005
Daniel Vetter75e9e912010-11-04 17:11:09 +01001006 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
Chris Wilsondd785e32010-08-07 11:01:34 +01001007 if (ret)
1008 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001009
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001010 ring->map.size = ring->size;
Chris Wilson05394f32010-11-08 19:18:58 +00001011 ring->map.offset = dev->agp->base + obj->gtt_offset;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001012 ring->map.type = 0;
1013 ring->map.flags = 0;
1014 ring->map.mtrr = 0;
1015
1016 drm_core_ioremap_wc(&ring->map, dev);
1017 if (ring->map.handle == NULL) {
1018 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001019 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +01001020 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001021 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001022
Eric Anholt62fdfea2010-05-21 13:26:39 -07001023 ring->virtual_start = ring->map.handle;
Chris Wilson78501ea2010-10-27 12:18:21 +01001024 ret = ring->init(ring);
Chris Wilsondd785e32010-08-07 11:01:34 +01001025 if (ret)
1026 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001027
Chris Wilson55249ba2010-12-22 14:04:47 +00001028 /* Workaround an erratum on the i830 which causes a hang if
1029 * the TAIL pointer points to within the last 2 cachelines
1030 * of the buffer.
1031 */
1032 ring->effective_size = ring->size;
1033 if (IS_I830(ring->dev))
1034 ring->effective_size -= 128;
1035
Chris Wilsonc584fe42010-10-29 18:15:52 +01001036 return 0;
Chris Wilsondd785e32010-08-07 11:01:34 +01001037
1038err_unmap:
1039 drm_core_ioremapfree(&ring->map, dev);
1040err_unpin:
1041 i915_gem_object_unpin(obj);
1042err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001043 drm_gem_object_unreference(&obj->base);
1044 ring->obj = NULL;
Chris Wilsondd785e32010-08-07 11:01:34 +01001045err_hws:
Chris Wilson78501ea2010-10-27 12:18:21 +01001046 cleanup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001047 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001048}
1049
Chris Wilson78501ea2010-10-27 12:18:21 +01001050void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001051{
Chris Wilson33626e62010-10-29 16:18:36 +01001052 struct drm_i915_private *dev_priv;
1053 int ret;
1054
Chris Wilson05394f32010-11-08 19:18:58 +00001055 if (ring->obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001056 return;
1057
Chris Wilson33626e62010-10-29 16:18:36 +01001058 /* Disable the ring buffer. The ring must be idle at this point */
1059 dev_priv = ring->dev->dev_private;
Ben Widawsky96f298a2011-03-19 18:14:27 -07001060 ret = intel_wait_ring_idle(ring);
Chris Wilson29ee3992011-01-24 16:35:42 +00001061 if (ret)
1062 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1063 ring->name, ret);
1064
Chris Wilson33626e62010-10-29 16:18:36 +01001065 I915_WRITE_CTL(ring, 0);
1066
Chris Wilson78501ea2010-10-27 12:18:21 +01001067 drm_core_ioremapfree(&ring->map, ring->dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001068
Chris Wilson05394f32010-11-08 19:18:58 +00001069 i915_gem_object_unpin(ring->obj);
1070 drm_gem_object_unreference(&ring->obj->base);
1071 ring->obj = NULL;
Chris Wilson78501ea2010-10-27 12:18:21 +01001072
Zou Nan hai8d192152010-11-02 16:31:01 +08001073 if (ring->cleanup)
1074 ring->cleanup(ring);
1075
Chris Wilson78501ea2010-10-27 12:18:21 +01001076 cleanup_status_page(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001077}
1078
Chris Wilson78501ea2010-10-27 12:18:21 +01001079static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001080{
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001081 unsigned int *virt;
Chris Wilson55249ba2010-12-22 14:04:47 +00001082 int rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001083
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001084 if (ring->space < rem) {
Chris Wilson78501ea2010-10-27 12:18:21 +01001085 int ret = intel_wait_ring_buffer(ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001086 if (ret)
1087 return ret;
1088 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001089
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001090 virt = (unsigned int *)(ring->virtual_start + ring->tail);
Chris Wilson1741dd42010-08-04 15:18:12 +01001091 rem /= 8;
1092 while (rem--) {
Eric Anholt62fdfea2010-05-21 13:26:39 -07001093 *virt++ = MI_NOOP;
Chris Wilson1741dd42010-08-04 15:18:12 +01001094 *virt++ = MI_NOOP;
1095 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001096
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001097 ring->tail = 0;
Chris Wilsonc7dca472011-01-20 17:00:10 +00001098 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001099
1100 return 0;
1101}
1102
Chris Wilson78501ea2010-10-27 12:18:21 +01001103int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001104{
Chris Wilson78501ea2010-10-27 12:18:21 +01001105 struct drm_device *dev = ring->dev;
Zou Nan haicae58522010-11-09 17:17:32 +08001106 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +01001107 unsigned long end;
Chris Wilson6aa56062010-10-29 21:44:37 +01001108 u32 head;
1109
Chris Wilsonc7dca472011-01-20 17:00:10 +00001110 /* If the reported head position has wrapped or hasn't advanced,
1111 * fallback to the slow and accurate path.
1112 */
1113 head = intel_read_status_page(ring, 4);
1114 if (head > ring->head) {
1115 ring->head = head;
1116 ring->space = ring_space(ring);
1117 if (ring->space >= n)
1118 return 0;
1119 }
1120
Chris Wilsondb53a302011-02-03 11:57:46 +00001121 trace_i915_ring_wait_begin(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001122 end = jiffies + 3 * HZ;
1123 do {
Chris Wilsonc7dca472011-01-20 17:00:10 +00001124 ring->head = I915_READ_HEAD(ring);
1125 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001126 if (ring->space >= n) {
Chris Wilsondb53a302011-02-03 11:57:46 +00001127 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001128 return 0;
1129 }
1130
1131 if (dev->primary->master) {
1132 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1133 if (master_priv->sarea_priv)
1134 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1135 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08001136
Chris Wilsone60a0b12010-10-13 10:09:14 +01001137 msleep(1);
Chris Wilsonf4e0b292010-10-29 21:06:16 +01001138 if (atomic_read(&dev_priv->mm.wedged))
1139 return -EAGAIN;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001140 } while (!time_after(jiffies, end));
Chris Wilsondb53a302011-02-03 11:57:46 +00001141 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001142 return -EBUSY;
1143}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001144
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001145int intel_ring_begin(struct intel_ring_buffer *ring,
1146 int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001147{
Chris Wilson21dd3732011-01-26 15:55:56 +00001148 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Zou Nan haibe26a102010-06-12 17:40:24 +08001149 int n = 4*num_dwords;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001150 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +01001151
Chris Wilson21dd3732011-01-26 15:55:56 +00001152 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1153 return -EIO;
1154
Chris Wilson55249ba2010-12-22 14:04:47 +00001155 if (unlikely(ring->tail + n > ring->effective_size)) {
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001156 ret = intel_wrap_ring_buffer(ring);
1157 if (unlikely(ret))
1158 return ret;
1159 }
Chris Wilson78501ea2010-10-27 12:18:21 +01001160
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001161 if (unlikely(ring->space < n)) {
1162 ret = intel_wait_ring_buffer(ring, n);
1163 if (unlikely(ret))
1164 return ret;
1165 }
Chris Wilsond97ed332010-08-04 15:18:13 +01001166
1167 ring->space -= n;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001168 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001169}
1170
Chris Wilson78501ea2010-10-27 12:18:21 +01001171void intel_ring_advance(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001172{
Chris Wilsond97ed332010-08-04 15:18:13 +01001173 ring->tail &= ring->size - 1;
Chris Wilson78501ea2010-10-27 12:18:21 +01001174 ring->write_tail(ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001175}
1176
Chris Wilsone0708682010-09-19 14:46:27 +01001177static const struct intel_ring_buffer render_ring = {
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001178 .name = "render ring",
Chris Wilson92204342010-09-18 11:02:01 +01001179 .id = RING_RENDER,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001180 .mmio_base = RENDER_RING_BASE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001181 .size = 32 * PAGE_SIZE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001182 .init = init_render_ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001183 .write_tail = ring_write_tail,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001184 .flush = render_ring_flush,
1185 .add_request = render_ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001186 .get_seqno = ring_get_seqno,
1187 .irq_get = render_ring_get_irq,
1188 .irq_put = render_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001189 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
Akshay Joshi0206e352011-08-16 15:34:10 -04001190 .cleanup = render_ring_cleanup,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001191 .sync_to = render_ring_sync_to,
1192 .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
1193 MI_SEMAPHORE_SYNC_RV,
1194 MI_SEMAPHORE_SYNC_RB},
1195 .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001196};
Zou Nan haid1b851f2010-05-21 09:08:57 +08001197
1198/* ring buffer for bit-stream decoder */
1199
Chris Wilsone0708682010-09-19 14:46:27 +01001200static const struct intel_ring_buffer bsd_ring = {
Zou Nan haid1b851f2010-05-21 09:08:57 +08001201 .name = "bsd ring",
Chris Wilson92204342010-09-18 11:02:01 +01001202 .id = RING_BSD,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001203 .mmio_base = BSD_RING_BASE,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001204 .size = 32 * PAGE_SIZE,
Chris Wilson78501ea2010-10-27 12:18:21 +01001205 .init = init_ring_common,
Chris Wilson297b0c52010-10-22 17:02:41 +01001206 .write_tail = ring_write_tail,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001207 .flush = bsd_ring_flush,
Chris Wilson549f7362010-10-19 11:19:32 +01001208 .add_request = ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001209 .get_seqno = ring_get_seqno,
1210 .irq_get = bsd_ring_get_irq,
1211 .irq_put = bsd_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001212 .dispatch_execbuffer = ring_dispatch_execbuffer,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001213};
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001214
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001215
Chris Wilson78501ea2010-10-27 12:18:21 +01001216static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001217 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001218{
Akshay Joshi0206e352011-08-16 15:34:10 -04001219 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001220
1221 /* Every tail move must follow the sequence below */
Akshay Joshi0206e352011-08-16 15:34:10 -04001222 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1223 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1224 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1225 I915_WRITE(GEN6_BSD_RNCID, 0x0);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001226
Akshay Joshi0206e352011-08-16 15:34:10 -04001227 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1228 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1229 50))
1230 DRM_ERROR("timed out waiting for IDLE Indicator\n");
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001231
Akshay Joshi0206e352011-08-16 15:34:10 -04001232 I915_WRITE_TAIL(ring, value);
1233 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1234 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1235 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001236}
1237
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001238static int gen6_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001239 u32 invalidate, u32 flush)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001240{
Chris Wilson71a77e02011-02-02 12:13:49 +00001241 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001242 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001243
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001244 ret = intel_ring_begin(ring, 4);
1245 if (ret)
1246 return ret;
1247
Chris Wilson71a77e02011-02-02 12:13:49 +00001248 cmd = MI_FLUSH_DW;
1249 if (invalidate & I915_GEM_GPU_DOMAINS)
1250 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1251 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001252 intel_ring_emit(ring, 0);
1253 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001254 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001255 intel_ring_advance(ring);
1256 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001257}
1258
1259static int
Chris Wilson78501ea2010-10-27 12:18:21 +01001260gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001261 u32 offset, u32 len)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001262{
Akshay Joshi0206e352011-08-16 15:34:10 -04001263 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001264
Akshay Joshi0206e352011-08-16 15:34:10 -04001265 ret = intel_ring_begin(ring, 2);
1266 if (ret)
1267 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001268
Akshay Joshi0206e352011-08-16 15:34:10 -04001269 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1270 /* bit0-7 is the length on GEN6+ */
1271 intel_ring_emit(ring, offset);
1272 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001273
Akshay Joshi0206e352011-08-16 15:34:10 -04001274 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001275}
1276
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001277static bool
Chris Wilson0f468322011-01-04 17:35:21 +00001278gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1279{
1280 return gen6_ring_get_irq(ring,
1281 GT_USER_INTERRUPT,
1282 GEN6_RENDER_USER_INTERRUPT);
1283}
1284
1285static void
1286gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1287{
1288 return gen6_ring_put_irq(ring,
1289 GT_USER_INTERRUPT,
1290 GEN6_RENDER_USER_INTERRUPT);
1291}
1292
1293static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001294gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1295{
Chris Wilson0f468322011-01-04 17:35:21 +00001296 return gen6_ring_get_irq(ring,
1297 GT_GEN6_BSD_USER_INTERRUPT,
1298 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001299}
1300
1301static void
1302gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1303{
Chris Wilson0f468322011-01-04 17:35:21 +00001304 return gen6_ring_put_irq(ring,
1305 GT_GEN6_BSD_USER_INTERRUPT,
1306 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001307}
1308
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001309/* ring buffer for Video Codec for Gen6+ */
Chris Wilsone0708682010-09-19 14:46:27 +01001310static const struct intel_ring_buffer gen6_bsd_ring = {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001311 .name = "gen6 bsd ring",
1312 .id = RING_BSD,
1313 .mmio_base = GEN6_BSD_RING_BASE,
1314 .size = 32 * PAGE_SIZE,
1315 .init = init_ring_common,
1316 .write_tail = gen6_bsd_ring_write_tail,
1317 .flush = gen6_ring_flush,
1318 .add_request = gen6_add_request,
1319 .get_seqno = ring_get_seqno,
1320 .irq_get = gen6_bsd_ring_get_irq,
1321 .irq_put = gen6_bsd_ring_put_irq,
1322 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001323 .sync_to = gen6_bsd_ring_sync_to,
1324 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
1325 MI_SEMAPHORE_SYNC_INVALID,
1326 MI_SEMAPHORE_SYNC_VB},
1327 .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
Chris Wilson549f7362010-10-19 11:19:32 +01001328};
1329
1330/* Blitter support (SandyBridge+) */
1331
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001332static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001333blt_ring_get_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001334{
Chris Wilson0f468322011-01-04 17:35:21 +00001335 return gen6_ring_get_irq(ring,
1336 GT_BLT_USER_INTERRUPT,
1337 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001338}
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001339
Chris Wilson549f7362010-10-19 11:19:32 +01001340static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001341blt_ring_put_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001342{
Chris Wilson0f468322011-01-04 17:35:21 +00001343 gen6_ring_put_irq(ring,
1344 GT_BLT_USER_INTERRUPT,
1345 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001346}
1347
Zou Nan hai8d192152010-11-02 16:31:01 +08001348
1349/* Workaround for some stepping of SNB,
1350 * each time when BLT engine ring tail moved,
1351 * the first command in the ring to be parsed
1352 * should be MI_BATCH_BUFFER_START
1353 */
1354#define NEED_BLT_WORKAROUND(dev) \
1355 (IS_GEN6(dev) && (dev->pdev->revision < 8))
1356
1357static inline struct drm_i915_gem_object *
1358to_blt_workaround(struct intel_ring_buffer *ring)
1359{
1360 return ring->private;
1361}
1362
1363static int blt_ring_init(struct intel_ring_buffer *ring)
1364{
1365 if (NEED_BLT_WORKAROUND(ring->dev)) {
1366 struct drm_i915_gem_object *obj;
Chris Wilson27153f72010-11-02 11:17:23 +00001367 u32 *ptr;
Zou Nan hai8d192152010-11-02 16:31:01 +08001368 int ret;
1369
Chris Wilson05394f32010-11-08 19:18:58 +00001370 obj = i915_gem_alloc_object(ring->dev, 4096);
Zou Nan hai8d192152010-11-02 16:31:01 +08001371 if (obj == NULL)
1372 return -ENOMEM;
1373
Chris Wilson05394f32010-11-08 19:18:58 +00001374 ret = i915_gem_object_pin(obj, 4096, true);
Zou Nan hai8d192152010-11-02 16:31:01 +08001375 if (ret) {
1376 drm_gem_object_unreference(&obj->base);
1377 return ret;
1378 }
1379
1380 ptr = kmap(obj->pages[0]);
Chris Wilson27153f72010-11-02 11:17:23 +00001381 *ptr++ = MI_BATCH_BUFFER_END;
1382 *ptr++ = MI_NOOP;
Zou Nan hai8d192152010-11-02 16:31:01 +08001383 kunmap(obj->pages[0]);
1384
Chris Wilson05394f32010-11-08 19:18:58 +00001385 ret = i915_gem_object_set_to_gtt_domain(obj, false);
Zou Nan hai8d192152010-11-02 16:31:01 +08001386 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00001387 i915_gem_object_unpin(obj);
Zou Nan hai8d192152010-11-02 16:31:01 +08001388 drm_gem_object_unreference(&obj->base);
1389 return ret;
1390 }
1391
1392 ring->private = obj;
1393 }
1394
1395 return init_ring_common(ring);
1396}
1397
1398static int blt_ring_begin(struct intel_ring_buffer *ring,
1399 int num_dwords)
1400{
1401 if (ring->private) {
1402 int ret = intel_ring_begin(ring, num_dwords+2);
1403 if (ret)
1404 return ret;
1405
1406 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1407 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1408
1409 return 0;
1410 } else
1411 return intel_ring_begin(ring, 4);
1412}
1413
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001414static int blt_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001415 u32 invalidate, u32 flush)
Zou Nan hai8d192152010-11-02 16:31:01 +08001416{
Chris Wilson71a77e02011-02-02 12:13:49 +00001417 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001418 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001419
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001420 ret = blt_ring_begin(ring, 4);
1421 if (ret)
1422 return ret;
1423
Chris Wilson71a77e02011-02-02 12:13:49 +00001424 cmd = MI_FLUSH_DW;
1425 if (invalidate & I915_GEM_DOMAIN_RENDER)
1426 cmd |= MI_INVALIDATE_TLB;
1427 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001428 intel_ring_emit(ring, 0);
1429 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001430 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001431 intel_ring_advance(ring);
1432 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08001433}
1434
Zou Nan hai8d192152010-11-02 16:31:01 +08001435static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1436{
1437 if (!ring->private)
1438 return;
1439
1440 i915_gem_object_unpin(ring->private);
1441 drm_gem_object_unreference(ring->private);
1442 ring->private = NULL;
1443}
1444
Chris Wilson549f7362010-10-19 11:19:32 +01001445static const struct intel_ring_buffer gen6_blt_ring = {
Akshay Joshi0206e352011-08-16 15:34:10 -04001446 .name = "blt ring",
1447 .id = RING_BLT,
1448 .mmio_base = BLT_RING_BASE,
1449 .size = 32 * PAGE_SIZE,
1450 .init = blt_ring_init,
1451 .write_tail = ring_write_tail,
1452 .flush = blt_ring_flush,
1453 .add_request = gen6_add_request,
1454 .get_seqno = ring_get_seqno,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001455 .irq_get = blt_ring_get_irq,
1456 .irq_put = blt_ring_put_irq,
Akshay Joshi0206e352011-08-16 15:34:10 -04001457 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001458 .cleanup = blt_ring_cleanup,
1459 .sync_to = gen6_blt_ring_sync_to,
1460 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
1461 MI_SEMAPHORE_SYNC_BV,
1462 MI_SEMAPHORE_SYNC_INVALID},
1463 .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001464};
1465
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001466int intel_init_render_ring_buffer(struct drm_device *dev)
1467{
1468 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001469 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001470
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001471 *ring = render_ring;
1472 if (INTEL_INFO(dev)->gen >= 6) {
1473 ring->add_request = gen6_add_request;
Jesse Barnes8d315282011-10-16 10:23:31 +02001474 ring->flush = gen6_render_ring_flush;
Chris Wilson0f468322011-01-04 17:35:21 +00001475 ring->irq_get = gen6_render_ring_get_irq;
1476 ring->irq_put = gen6_render_ring_put_irq;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001477 } else if (IS_GEN5(dev)) {
1478 ring->add_request = pc_render_add_request;
1479 ring->get_seqno = pc_render_get_seqno;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001480 }
1481
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001482 if (!I915_NEED_GFX_HWS(dev)) {
1483 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1484 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1485 }
1486
1487 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001488}
1489
Chris Wilsone8616b62011-01-20 09:57:11 +00001490int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1491{
1492 drm_i915_private_t *dev_priv = dev->dev_private;
1493 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1494
1495 *ring = render_ring;
1496 if (INTEL_INFO(dev)->gen >= 6) {
1497 ring->add_request = gen6_add_request;
1498 ring->irq_get = gen6_render_ring_get_irq;
1499 ring->irq_put = gen6_render_ring_put_irq;
1500 } else if (IS_GEN5(dev)) {
1501 ring->add_request = pc_render_add_request;
1502 ring->get_seqno = pc_render_get_seqno;
1503 }
1504
Keith Packardf3234702011-07-22 10:44:39 -07001505 if (!I915_NEED_GFX_HWS(dev))
1506 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1507
Chris Wilsone8616b62011-01-20 09:57:11 +00001508 ring->dev = dev;
1509 INIT_LIST_HEAD(&ring->active_list);
1510 INIT_LIST_HEAD(&ring->request_list);
1511 INIT_LIST_HEAD(&ring->gpu_write_list);
1512
1513 ring->size = size;
1514 ring->effective_size = ring->size;
1515 if (IS_I830(ring->dev))
1516 ring->effective_size -= 128;
1517
1518 ring->map.offset = start;
1519 ring->map.size = size;
1520 ring->map.type = 0;
1521 ring->map.flags = 0;
1522 ring->map.mtrr = 0;
1523
1524 drm_core_ioremap_wc(&ring->map, dev);
1525 if (ring->map.handle == NULL) {
1526 DRM_ERROR("can not ioremap virtual address for"
1527 " ring buffer\n");
1528 return -ENOMEM;
1529 }
1530
1531 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1532 return 0;
1533}
1534
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001535int intel_init_bsd_ring_buffer(struct drm_device *dev)
1536{
1537 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001538 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001539
Jesse Barnes65d3eb12011-04-06 14:54:44 -07001540 if (IS_GEN6(dev) || IS_GEN7(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001541 *ring = gen6_bsd_ring;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001542 else
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001543 *ring = bsd_ring;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001544
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001545 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001546}
Chris Wilson549f7362010-10-19 11:19:32 +01001547
1548int intel_init_blt_ring_buffer(struct drm_device *dev)
1549{
1550 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001551 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
Chris Wilson549f7362010-10-19 11:19:32 +01001552
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001553 *ring = gen6_blt_ring;
Chris Wilson549f7362010-10-19 11:19:32 +01001554
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001555 return intel_init_ring_buffer(dev, ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001556}