blob: 6e80f83683558327387335eef35a36d70581ce16 [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Jesse Barnes8d315282011-10-16 10:23:31 +020037/*
38 * 965+ support PIPE_CONTROL commands, which provide finer grained control
39 * over cache flushing.
40 */
41struct pipe_control {
42 struct drm_i915_gem_object *obj;
43 volatile u32 *cpu_page;
44 u32 gtt_offset;
45};
46
Chris Wilsonc7dca472011-01-20 17:00:10 +000047static inline int ring_space(struct intel_ring_buffer *ring)
48{
49 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
50 if (space < 0)
51 space += ring->size;
52 return space;
53}
54
Chris Wilson6f392d5482010-08-07 11:01:22 +010055static u32 i915_gem_get_seqno(struct drm_device *dev)
56{
57 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 seqno;
59
60 seqno = dev_priv->next_seqno;
61
62 /* reserve 0 for non-seqno */
63 if (++dev_priv->next_seqno == 0)
64 dev_priv->next_seqno = 1;
65
66 return seqno;
67}
68
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000069static int
Chris Wilson78501ea2010-10-27 12:18:21 +010070render_ring_flush(struct intel_ring_buffer *ring,
Chris Wilsonab6f8e32010-09-19 17:53:44 +010071 u32 invalidate_domains,
72 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070073{
Chris Wilson78501ea2010-10-27 12:18:21 +010074 struct drm_device *dev = ring->dev;
Chris Wilson6f392d5482010-08-07 11:01:22 +010075 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000076 int ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +010077
Chris Wilson36d527d2011-03-19 22:26:49 +000078 /*
79 * read/write caches:
80 *
81 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
82 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
83 * also flushed at 2d versus 3d pipeline switches.
84 *
85 * read-only caches:
86 *
87 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
88 * MI_READ_FLUSH is set, and is always flushed on 965.
89 *
90 * I915_GEM_DOMAIN_COMMAND may not exist?
91 *
92 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
93 * invalidated when MI_EXE_FLUSH is set.
94 *
95 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
96 * invalidated with every MI_FLUSH.
97 *
98 * TLBs:
99 *
100 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
101 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
102 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
103 * are flushed at any MI_FLUSH.
104 */
105
106 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
107 if ((invalidate_domains|flush_domains) &
108 I915_GEM_DOMAIN_RENDER)
109 cmd &= ~MI_NO_WRITE_FLUSH;
110 if (INTEL_INFO(dev)->gen < 4) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700111 /*
Chris Wilson36d527d2011-03-19 22:26:49 +0000112 * On the 965, the sampler cache always gets flushed
113 * and this bit is reserved.
Eric Anholt62fdfea2010-05-21 13:26:39 -0700114 */
Chris Wilson36d527d2011-03-19 22:26:49 +0000115 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
116 cmd |= MI_READ_FLUSH;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800117 }
Chris Wilson36d527d2011-03-19 22:26:49 +0000118 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
119 cmd |= MI_EXE_FLUSH;
120
121 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
122 (IS_G4X(dev) || IS_GEN5(dev)))
123 cmd |= MI_INVALIDATE_ISP;
124
125 ret = intel_ring_begin(ring, 2);
126 if (ret)
127 return ret;
128
129 intel_ring_emit(ring, cmd);
130 intel_ring_emit(ring, MI_NOOP);
131 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000132
133 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800134}
135
Jesse Barnes8d315282011-10-16 10:23:31 +0200136/**
137 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
138 * implementing two workarounds on gen6. From section 1.4.7.1
139 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
140 *
141 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
142 * produced by non-pipelined state commands), software needs to first
143 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
144 * 0.
145 *
146 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
147 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
148 *
149 * And the workaround for these two requires this workaround first:
150 *
151 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
152 * BEFORE the pipe-control with a post-sync op and no write-cache
153 * flushes.
154 *
155 * And this last workaround is tricky because of the requirements on
156 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
157 * volume 2 part 1:
158 *
159 * "1 of the following must also be set:
160 * - Render Target Cache Flush Enable ([12] of DW1)
161 * - Depth Cache Flush Enable ([0] of DW1)
162 * - Stall at Pixel Scoreboard ([1] of DW1)
163 * - Depth Stall ([13] of DW1)
164 * - Post-Sync Operation ([13] of DW1)
165 * - Notify Enable ([8] of DW1)"
166 *
167 * The cache flushes require the workaround flush that triggered this
168 * one, so we can't use it. Depth stall would trigger the same.
169 * Post-sync nonzero is what triggered this second workaround, so we
170 * can't use that one either. Notify enable is IRQs, which aren't
171 * really our business. That leaves only stall at scoreboard.
172 */
173static int
174intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
175{
176 struct pipe_control *pc = ring->private;
177 u32 scratch_addr = pc->gtt_offset + 128;
178 int ret;
179
180
181 ret = intel_ring_begin(ring, 6);
182 if (ret)
183 return ret;
184
185 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
186 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
187 PIPE_CONTROL_STALL_AT_SCOREBOARD);
188 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
189 intel_ring_emit(ring, 0); /* low dword */
190 intel_ring_emit(ring, 0); /* high dword */
191 intel_ring_emit(ring, MI_NOOP);
192 intel_ring_advance(ring);
193
194 ret = intel_ring_begin(ring, 6);
195 if (ret)
196 return ret;
197
198 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
199 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
200 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
201 intel_ring_emit(ring, 0);
202 intel_ring_emit(ring, 0);
203 intel_ring_emit(ring, MI_NOOP);
204 intel_ring_advance(ring);
205
206 return 0;
207}
208
209static int
210gen6_render_ring_flush(struct intel_ring_buffer *ring,
211 u32 invalidate_domains, u32 flush_domains)
212{
213 u32 flags = 0;
214 struct pipe_control *pc = ring->private;
215 u32 scratch_addr = pc->gtt_offset + 128;
216 int ret;
217
218 /* Force SNB workarounds for PIPE_CONTROL flushes */
219 intel_emit_post_sync_nonzero_flush(ring);
220
221 /* Just flush everything. Experiments have shown that reducing the
222 * number of bits based on the write domains has little performance
223 * impact.
224 */
225 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
226 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
227 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
228 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
229 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
230 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
231 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
232
233 ret = intel_ring_begin(ring, 6);
234 if (ret)
235 return ret;
236
237 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
238 intel_ring_emit(ring, flags);
239 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
240 intel_ring_emit(ring, 0); /* lower dword */
241 intel_ring_emit(ring, 0); /* uppwer dword */
242 intel_ring_emit(ring, MI_NOOP);
243 intel_ring_advance(ring);
244
245 return 0;
246}
247
Chris Wilson78501ea2010-10-27 12:18:21 +0100248static void ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100249 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800250{
Chris Wilson78501ea2010-10-27 12:18:21 +0100251 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson297b0c52010-10-22 17:02:41 +0100252 I915_WRITE_TAIL(ring, value);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800253}
254
Chris Wilson78501ea2010-10-27 12:18:21 +0100255u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800256{
Chris Wilson78501ea2010-10-27 12:18:21 +0100257 drm_i915_private_t *dev_priv = ring->dev->dev_private;
258 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
Daniel Vetter3d281d82010-09-24 21:14:22 +0200259 RING_ACTHD(ring->mmio_base) : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800260
261 return I915_READ(acthd_reg);
262}
263
Chris Wilson78501ea2010-10-27 12:18:21 +0100264static int init_ring_common(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800265{
Chris Wilson78501ea2010-10-27 12:18:21 +0100266 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000267 struct drm_i915_gem_object *obj = ring->obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800268 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800269
270 /* Stop the ring if it's running. */
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200271 I915_WRITE_CTL(ring, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200272 I915_WRITE_HEAD(ring, 0);
Chris Wilson78501ea2010-10-27 12:18:21 +0100273 ring->write_tail(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800274
275 /* Initialize the ring. */
Chris Wilson05394f32010-11-08 19:18:58 +0000276 I915_WRITE_START(ring, obj->gtt_offset);
Daniel Vetter570ef602010-08-02 17:06:23 +0200277 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800278
279 /* G45 ring initialization fails to reset head to zero */
280 if (head != 0) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000281 DRM_DEBUG_KMS("%s head not reset to zero "
282 "ctl %08x head %08x tail %08x start %08x\n",
283 ring->name,
284 I915_READ_CTL(ring),
285 I915_READ_HEAD(ring),
286 I915_READ_TAIL(ring),
287 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800288
Daniel Vetter570ef602010-08-02 17:06:23 +0200289 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800290
Chris Wilson6fd0d562010-12-05 20:42:33 +0000291 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
292 DRM_ERROR("failed to set %s head to zero "
293 "ctl %08x head %08x tail %08x start %08x\n",
294 ring->name,
295 I915_READ_CTL(ring),
296 I915_READ_HEAD(ring),
297 I915_READ_TAIL(ring),
298 I915_READ_START(ring));
299 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700300 }
301
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200302 I915_WRITE_CTL(ring,
Chris Wilsonae69b422010-11-07 11:45:52 +0000303 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson6aa56062010-10-29 21:44:37 +0100304 | RING_REPORT_64K | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800305
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800306 /* If the head is still not zero, the ring is dead */
Chris Wilson176f28e2010-10-28 11:18:07 +0100307 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
Chris Wilson05394f32010-11-08 19:18:58 +0000308 I915_READ_START(ring) != obj->gtt_offset ||
Chris Wilson176f28e2010-10-28 11:18:07 +0100309 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000310 DRM_ERROR("%s initialization failed "
311 "ctl %08x head %08x tail %08x start %08x\n",
312 ring->name,
313 I915_READ_CTL(ring),
314 I915_READ_HEAD(ring),
315 I915_READ_TAIL(ring),
316 I915_READ_START(ring));
317 return -EIO;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800318 }
319
Chris Wilson78501ea2010-10-27 12:18:21 +0100320 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
321 i915_kernel_lost_context(ring->dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800322 else {
Chris Wilsonc7dca472011-01-20 17:00:10 +0000323 ring->head = I915_READ_HEAD(ring);
Daniel Vetter870e86d2010-08-02 16:29:44 +0200324 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000325 ring->space = ring_space(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800326 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000327
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800328 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700329}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800330
Chris Wilsonc6df5412010-12-15 09:56:50 +0000331static int
332init_pipe_control(struct intel_ring_buffer *ring)
333{
334 struct pipe_control *pc;
335 struct drm_i915_gem_object *obj;
336 int ret;
337
338 if (ring->private)
339 return 0;
340
341 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
342 if (!pc)
343 return -ENOMEM;
344
345 obj = i915_gem_alloc_object(ring->dev, 4096);
346 if (obj == NULL) {
347 DRM_ERROR("Failed to allocate seqno page\n");
348 ret = -ENOMEM;
349 goto err;
350 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100351
352 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000353
354 ret = i915_gem_object_pin(obj, 4096, true);
355 if (ret)
356 goto err_unref;
357
358 pc->gtt_offset = obj->gtt_offset;
359 pc->cpu_page = kmap(obj->pages[0]);
360 if (pc->cpu_page == NULL)
361 goto err_unpin;
362
363 pc->obj = obj;
364 ring->private = pc;
365 return 0;
366
367err_unpin:
368 i915_gem_object_unpin(obj);
369err_unref:
370 drm_gem_object_unreference(&obj->base);
371err:
372 kfree(pc);
373 return ret;
374}
375
376static void
377cleanup_pipe_control(struct intel_ring_buffer *ring)
378{
379 struct pipe_control *pc = ring->private;
380 struct drm_i915_gem_object *obj;
381
382 if (!ring->private)
383 return;
384
385 obj = pc->obj;
386 kunmap(obj->pages[0]);
387 i915_gem_object_unpin(obj);
388 drm_gem_object_unreference(&obj->base);
389
390 kfree(pc);
391 ring->private = NULL;
392}
393
Chris Wilson78501ea2010-10-27 12:18:21 +0100394static int init_render_ring(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800395{
Chris Wilson78501ea2010-10-27 12:18:21 +0100396 struct drm_device *dev = ring->dev;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000397 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100398 int ret = init_ring_common(ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800399
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100400 if (INTEL_INFO(dev)->gen > 3) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100401 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800402 I915_WRITE(MI_MODE, mode);
Jesse Barnesb095cd02011-08-12 15:28:32 -0700403 if (IS_GEN7(dev))
404 I915_WRITE(GFX_MODE_GEN7,
405 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
406 GFX_MODE_ENABLE(GFX_REPLAY_MODE));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800407 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100408
Jesse Barnes8d315282011-10-16 10:23:31 +0200409 if (INTEL_INFO(dev)->gen >= 5) {
Chris Wilsonc6df5412010-12-15 09:56:50 +0000410 ret = init_pipe_control(ring);
411 if (ret)
412 return ret;
413 }
414
Ben Widawsky84f9f932011-12-12 19:21:58 -0800415 if (INTEL_INFO(dev)->gen >= 6) {
416 I915_WRITE(INSTPM,
417 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
418 }
419
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800420 return ret;
421}
422
Chris Wilsonc6df5412010-12-15 09:56:50 +0000423static void render_ring_cleanup(struct intel_ring_buffer *ring)
424{
425 if (!ring->private)
426 return;
427
428 cleanup_pipe_control(ring);
429}
430
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000431static void
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700432update_mboxes(struct intel_ring_buffer *ring,
433 u32 seqno,
434 u32 mmio_offset)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000435{
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700436 intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
437 MI_SEMAPHORE_GLOBAL_GTT |
438 MI_SEMAPHORE_REGISTER |
439 MI_SEMAPHORE_UPDATE);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000440 intel_ring_emit(ring, seqno);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700441 intel_ring_emit(ring, mmio_offset);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000442}
443
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700444/**
445 * gen6_add_request - Update the semaphore mailbox registers
446 *
447 * @ring - ring that is adding a request
448 * @seqno - return seqno stuck into the ring
449 *
450 * Update the mailbox registers in the *other* rings with the current seqno.
451 * This acts like a signal in the canonical semaphore.
452 */
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000453static int
454gen6_add_request(struct intel_ring_buffer *ring,
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700455 u32 *seqno)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000456{
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700457 u32 mbox1_reg;
458 u32 mbox2_reg;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000459 int ret;
460
461 ret = intel_ring_begin(ring, 10);
462 if (ret)
463 return ret;
464
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700465 mbox1_reg = ring->signal_mbox[0];
466 mbox2_reg = ring->signal_mbox[1];
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000467
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700468 *seqno = i915_gem_get_seqno(ring->dev);
469
470 update_mboxes(ring, *seqno, mbox1_reg);
471 update_mboxes(ring, *seqno, mbox2_reg);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000472 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
473 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700474 intel_ring_emit(ring, *seqno);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000475 intel_ring_emit(ring, MI_USER_INTERRUPT);
476 intel_ring_advance(ring);
477
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000478 return 0;
479}
480
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700481/**
482 * intel_ring_sync - sync the waiter to the signaller on seqno
483 *
484 * @waiter - ring that is waiting
485 * @signaller - ring which has, or will signal
486 * @seqno - seqno which the waiter will block on
487 */
488static int
489intel_ring_sync(struct intel_ring_buffer *waiter,
490 struct intel_ring_buffer *signaller,
491 int ring,
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000492 u32 seqno)
493{
494 int ret;
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700495 u32 dw1 = MI_SEMAPHORE_MBOX |
496 MI_SEMAPHORE_COMPARE |
497 MI_SEMAPHORE_REGISTER;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000498
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700499 ret = intel_ring_begin(waiter, 4);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000500 if (ret)
501 return ret;
502
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700503 intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
504 intel_ring_emit(waiter, seqno);
505 intel_ring_emit(waiter, 0);
506 intel_ring_emit(waiter, MI_NOOP);
507 intel_ring_advance(waiter);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000508
509 return 0;
510}
511
Ben Widawskyc8c99b02011-09-14 20:32:47 -0700512/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
513int
514render_ring_sync_to(struct intel_ring_buffer *waiter,
515 struct intel_ring_buffer *signaller,
516 u32 seqno)
517{
518 WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
519 return intel_ring_sync(waiter,
520 signaller,
521 RCS,
522 seqno);
523}
524
525/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
526int
527gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
528 struct intel_ring_buffer *signaller,
529 u32 seqno)
530{
531 WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
532 return intel_ring_sync(waiter,
533 signaller,
534 VCS,
535 seqno);
536}
537
538/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
539int
540gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
541 struct intel_ring_buffer *signaller,
542 u32 seqno)
543{
544 WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
545 return intel_ring_sync(waiter,
546 signaller,
547 BCS,
548 seqno);
549}
550
551
552
Chris Wilsonc6df5412010-12-15 09:56:50 +0000553#define PIPE_CONTROL_FLUSH(ring__, addr__) \
554do { \
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200555 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
556 PIPE_CONTROL_DEPTH_STALL); \
Chris Wilsonc6df5412010-12-15 09:56:50 +0000557 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
558 intel_ring_emit(ring__, 0); \
559 intel_ring_emit(ring__, 0); \
560} while (0)
561
562static int
563pc_render_add_request(struct intel_ring_buffer *ring,
564 u32 *result)
565{
566 struct drm_device *dev = ring->dev;
567 u32 seqno = i915_gem_get_seqno(dev);
568 struct pipe_control *pc = ring->private;
569 u32 scratch_addr = pc->gtt_offset + 128;
570 int ret;
571
572 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
573 * incoherent with writes to memory, i.e. completely fubar,
574 * so we need to use PIPE_NOTIFY instead.
575 *
576 * However, we also need to workaround the qword write
577 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
578 * memory before requesting an interrupt.
579 */
580 ret = intel_ring_begin(ring, 32);
581 if (ret)
582 return ret;
583
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200584 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
Kenneth Graunke9d971b32011-10-11 23:41:09 +0200585 PIPE_CONTROL_WRITE_FLUSH |
586 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000587 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
588 intel_ring_emit(ring, seqno);
589 intel_ring_emit(ring, 0);
590 PIPE_CONTROL_FLUSH(ring, scratch_addr);
591 scratch_addr += 128; /* write to separate cachelines */
592 PIPE_CONTROL_FLUSH(ring, scratch_addr);
593 scratch_addr += 128;
594 PIPE_CONTROL_FLUSH(ring, scratch_addr);
595 scratch_addr += 128;
596 PIPE_CONTROL_FLUSH(ring, scratch_addr);
597 scratch_addr += 128;
598 PIPE_CONTROL_FLUSH(ring, scratch_addr);
599 scratch_addr += 128;
600 PIPE_CONTROL_FLUSH(ring, scratch_addr);
Kenneth Graunkefcbc34e2011-10-11 23:41:08 +0200601 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
Kenneth Graunke9d971b32011-10-11 23:41:09 +0200602 PIPE_CONTROL_WRITE_FLUSH |
603 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
Chris Wilsonc6df5412010-12-15 09:56:50 +0000604 PIPE_CONTROL_NOTIFY);
605 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
606 intel_ring_emit(ring, seqno);
607 intel_ring_emit(ring, 0);
608 intel_ring_advance(ring);
609
610 *result = seqno;
611 return 0;
612}
613
Chris Wilson3cce4692010-10-27 16:11:02 +0100614static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100615render_ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100616 u32 *result)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700617{
Chris Wilson78501ea2010-10-27 12:18:21 +0100618 struct drm_device *dev = ring->dev;
Chris Wilson3cce4692010-10-27 16:11:02 +0100619 u32 seqno = i915_gem_get_seqno(dev);
620 int ret;
Zhenyu Wangca764822010-05-27 10:26:42 +0800621
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000622 ret = intel_ring_begin(ring, 4);
623 if (ret)
624 return ret;
Chris Wilson3cce4692010-10-27 16:11:02 +0100625
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000626 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
627 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
628 intel_ring_emit(ring, seqno);
629 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilson3cce4692010-10-27 16:11:02 +0100630 intel_ring_advance(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000631
Chris Wilson3cce4692010-10-27 16:11:02 +0100632 *result = seqno;
633 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700634}
635
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800636static u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000637ring_get_seqno(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800638{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000639 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
640}
641
Chris Wilsonc6df5412010-12-15 09:56:50 +0000642static u32
643pc_render_get_seqno(struct intel_ring_buffer *ring)
644{
645 struct pipe_control *pc = ring->private;
646 return pc->cpu_page[0];
647}
648
Chris Wilson0f468322011-01-04 17:35:21 +0000649static void
650ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
651{
652 dev_priv->gt_irq_mask &= ~mask;
653 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
654 POSTING_READ(GTIMR);
655}
656
657static void
658ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
659{
660 dev_priv->gt_irq_mask |= mask;
661 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
662 POSTING_READ(GTIMR);
663}
664
665static void
666i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
667{
668 dev_priv->irq_mask &= ~mask;
669 I915_WRITE(IMR, dev_priv->irq_mask);
670 POSTING_READ(IMR);
671}
672
673static void
674i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
675{
676 dev_priv->irq_mask |= mask;
677 I915_WRITE(IMR, dev_priv->irq_mask);
678 POSTING_READ(IMR);
679}
680
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000681static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000682render_ring_get_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700683{
Chris Wilson78501ea2010-10-27 12:18:21 +0100684 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000685 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700686
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000687 if (!dev->irq_enabled)
688 return false;
689
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000690 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000691 if (ring->irq_refcount++ == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700692 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000693 ironlake_enable_irq(dev_priv,
694 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700695 else
696 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
697 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000698 spin_unlock(&ring->irq_lock);
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000699
700 return true;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700701}
702
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800703static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000704render_ring_put_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700705{
Chris Wilson78501ea2010-10-27 12:18:21 +0100706 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000707 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700708
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000709 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000710 if (--ring->irq_refcount == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700711 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000712 ironlake_disable_irq(dev_priv,
713 GT_USER_INTERRUPT |
714 GT_PIPE_NOTIFY);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700715 else
716 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
717 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000718 spin_unlock(&ring->irq_lock);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700719}
720
Chris Wilson78501ea2010-10-27 12:18:21 +0100721void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800722{
Eric Anholt45930102011-05-06 17:12:35 -0700723 struct drm_device *dev = ring->dev;
Chris Wilson78501ea2010-10-27 12:18:21 +0100724 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt45930102011-05-06 17:12:35 -0700725 u32 mmio = 0;
726
727 /* The ring status page addresses are no longer next to the rest of
728 * the ring registers as of gen7.
729 */
730 if (IS_GEN7(dev)) {
731 switch (ring->id) {
Daniel Vetter96154f22011-12-14 13:57:00 +0100732 case RCS:
Eric Anholt45930102011-05-06 17:12:35 -0700733 mmio = RENDER_HWS_PGA_GEN7;
734 break;
Daniel Vetter96154f22011-12-14 13:57:00 +0100735 case BCS:
Eric Anholt45930102011-05-06 17:12:35 -0700736 mmio = BLT_HWS_PGA_GEN7;
737 break;
Daniel Vetter96154f22011-12-14 13:57:00 +0100738 case VCS:
Eric Anholt45930102011-05-06 17:12:35 -0700739 mmio = BSD_HWS_PGA_GEN7;
740 break;
741 }
742 } else if (IS_GEN6(ring->dev)) {
743 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
744 } else {
745 mmio = RING_HWS_PGA(ring->mmio_base);
746 }
747
Chris Wilson78501ea2010-10-27 12:18:21 +0100748 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
749 POSTING_READ(mmio);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800750}
751
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000752static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100753bsd_ring_flush(struct intel_ring_buffer *ring,
754 u32 invalidate_domains,
755 u32 flush_domains)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800756{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000757 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000758
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000759 ret = intel_ring_begin(ring, 2);
760 if (ret)
761 return ret;
762
763 intel_ring_emit(ring, MI_FLUSH);
764 intel_ring_emit(ring, MI_NOOP);
765 intel_ring_advance(ring);
766 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800767}
768
Chris Wilson3cce4692010-10-27 16:11:02 +0100769static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100770ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100771 u32 *result)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800772{
773 u32 seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +0100774 int ret;
775
776 ret = intel_ring_begin(ring, 4);
777 if (ret)
778 return ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +0100779
Chris Wilson78501ea2010-10-27 12:18:21 +0100780 seqno = i915_gem_get_seqno(ring->dev);
Chris Wilson6f392d5482010-08-07 11:01:22 +0100781
Chris Wilson3cce4692010-10-27 16:11:02 +0100782 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
783 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
784 intel_ring_emit(ring, seqno);
785 intel_ring_emit(ring, MI_USER_INTERRUPT);
786 intel_ring_advance(ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800787
Chris Wilson3cce4692010-10-27 16:11:02 +0100788 *result = seqno;
789 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800790}
791
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000792static bool
Eric Anholt42ff6572011-12-22 14:55:00 -0800793gen7_blt_ring_get_irq(struct intel_ring_buffer *ring)
794{
795 /* The BLT ring on IVB appears to have broken synchronization
796 * between the seqno write and the interrupt, so that the
797 * interrupt appears first. Returning false here makes
798 * i915_wait_request() do a polling loop, instead.
799 */
800 return false;
801}
802
803static bool
Chris Wilson0f468322011-01-04 17:35:21 +0000804gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
805{
806 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000807 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000808
809 if (!dev->irq_enabled)
810 return false;
811
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000812 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000813 if (ring->irq_refcount++ == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000814 ring->irq_mask &= ~rflag;
815 I915_WRITE_IMR(ring, ring->irq_mask);
816 ironlake_enable_irq(dev_priv, gflag);
Chris Wilson0f468322011-01-04 17:35:21 +0000817 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000818 spin_unlock(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000819
820 return true;
821}
822
823static void
824gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
825{
826 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000827 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000828
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000829 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000830 if (--ring->irq_refcount == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000831 ring->irq_mask |= rflag;
832 I915_WRITE_IMR(ring, ring->irq_mask);
833 ironlake_disable_irq(dev_priv, gflag);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000834 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000835 spin_unlock(&ring->irq_lock);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000836}
837
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000838static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000839bsd_ring_get_irq(struct intel_ring_buffer *ring)
840{
Feng, Boqun5bfa1062011-05-16 16:02:39 +0800841 struct drm_device *dev = ring->dev;
842 drm_i915_private_t *dev_priv = dev->dev_private;
843
844 if (!dev->irq_enabled)
845 return false;
846
847 spin_lock(&ring->irq_lock);
848 if (ring->irq_refcount++ == 0) {
849 if (IS_G4X(dev))
850 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
851 else
852 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
853 }
854 spin_unlock(&ring->irq_lock);
855
856 return true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000857}
858static void
859bsd_ring_put_irq(struct intel_ring_buffer *ring)
860{
Feng, Boqun5bfa1062011-05-16 16:02:39 +0800861 struct drm_device *dev = ring->dev;
862 drm_i915_private_t *dev_priv = dev->dev_private;
863
864 spin_lock(&ring->irq_lock);
865 if (--ring->irq_refcount == 0) {
866 if (IS_G4X(dev))
867 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
868 else
869 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
870 }
871 spin_unlock(&ring->irq_lock);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800872}
873
874static int
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000875ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800876{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100877 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100878
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100879 ret = intel_ring_begin(ring, 2);
880 if (ret)
881 return ret;
882
Chris Wilson78501ea2010-10-27 12:18:21 +0100883 intel_ring_emit(ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000884 MI_BATCH_BUFFER_START | (2 << 6) |
Chris Wilson78501ea2010-10-27 12:18:21 +0100885 MI_BATCH_NON_SECURE_I965);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000886 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100887 intel_ring_advance(ring);
888
Zou Nan haid1b851f2010-05-21 09:08:57 +0800889 return 0;
890}
891
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800892static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100893render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000894 u32 offset, u32 len)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700895{
Chris Wilson78501ea2010-10-27 12:18:21 +0100896 struct drm_device *dev = ring->dev;
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000897 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700898
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000899 if (IS_I830(dev) || IS_845G(dev)) {
900 ret = intel_ring_begin(ring, 4);
901 if (ret)
902 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700903
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000904 intel_ring_emit(ring, MI_BATCH_BUFFER);
905 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
906 intel_ring_emit(ring, offset + len - 8);
907 intel_ring_emit(ring, 0);
908 } else {
909 ret = intel_ring_begin(ring, 2);
910 if (ret)
911 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100912
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000913 if (INTEL_INFO(dev)->gen >= 4) {
914 intel_ring_emit(ring,
915 MI_BATCH_BUFFER_START | (2 << 6) |
916 MI_BATCH_NON_SECURE_I965);
917 intel_ring_emit(ring, offset);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700918 } else {
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000919 intel_ring_emit(ring,
920 MI_BATCH_BUFFER_START | (2 << 6));
921 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700922 }
923 }
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000924 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700925
Eric Anholt62fdfea2010-05-21 13:26:39 -0700926 return 0;
927}
928
Chris Wilson78501ea2010-10-27 12:18:21 +0100929static void cleanup_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700930{
Chris Wilson78501ea2010-10-27 12:18:21 +0100931 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000932 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700933
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800934 obj = ring->status_page.obj;
935 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700936 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700937
Chris Wilson05394f32010-11-08 19:18:58 +0000938 kunmap(obj->pages[0]);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700939 i915_gem_object_unpin(obj);
Chris Wilson05394f32010-11-08 19:18:58 +0000940 drm_gem_object_unreference(&obj->base);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800941 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700942
943 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700944}
945
Chris Wilson78501ea2010-10-27 12:18:21 +0100946static int init_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700947{
Chris Wilson78501ea2010-10-27 12:18:21 +0100948 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700949 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000950 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700951 int ret;
952
Eric Anholt62fdfea2010-05-21 13:26:39 -0700953 obj = i915_gem_alloc_object(dev, 4096);
954 if (obj == NULL) {
955 DRM_ERROR("Failed to allocate status page\n");
956 ret = -ENOMEM;
957 goto err;
958 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100959
960 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700961
Daniel Vetter75e9e912010-11-04 17:11:09 +0100962 ret = i915_gem_object_pin(obj, 4096, true);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700963 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700964 goto err_unref;
965 }
966
Chris Wilson05394f32010-11-08 19:18:58 +0000967 ring->status_page.gfx_addr = obj->gtt_offset;
968 ring->status_page.page_addr = kmap(obj->pages[0]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800969 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700970 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700971 goto err_unpin;
972 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800973 ring->status_page.obj = obj;
974 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700975
Chris Wilson78501ea2010-10-27 12:18:21 +0100976 intel_ring_setup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800977 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
978 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700979
980 return 0;
981
982err_unpin:
983 i915_gem_object_unpin(obj);
984err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000985 drm_gem_object_unreference(&obj->base);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700986err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800987 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700988}
989
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800990int intel_init_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100991 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700992{
Chris Wilson05394f32010-11-08 19:18:58 +0000993 struct drm_i915_gem_object *obj;
Chris Wilsondd785e32010-08-07 11:01:34 +0100994 int ret;
995
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800996 ring->dev = dev;
Chris Wilson23bc5982010-09-29 16:10:57 +0100997 INIT_LIST_HEAD(&ring->active_list);
998 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +0100999 INIT_LIST_HEAD(&ring->gpu_write_list);
Chris Wilson0dc79fb2011-01-05 10:32:24 +00001000
Chris Wilsonb259f672011-03-29 13:19:09 +01001001 init_waitqueue_head(&ring->irq_queue);
Chris Wilson0dc79fb2011-01-05 10:32:24 +00001002 spin_lock_init(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +00001003 ring->irq_mask = ~0;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001004
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001005 if (I915_NEED_GFX_HWS(dev)) {
Chris Wilson78501ea2010-10-27 12:18:21 +01001006 ret = init_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001007 if (ret)
1008 return ret;
1009 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001010
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001011 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001012 if (obj == NULL) {
1013 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001014 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +01001015 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001016 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001017
Chris Wilson05394f32010-11-08 19:18:58 +00001018 ring->obj = obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001019
Daniel Vetter75e9e912010-11-04 17:11:09 +01001020 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
Chris Wilsondd785e32010-08-07 11:01:34 +01001021 if (ret)
1022 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001023
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001024 ring->map.size = ring->size;
Chris Wilson05394f32010-11-08 19:18:58 +00001025 ring->map.offset = dev->agp->base + obj->gtt_offset;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001026 ring->map.type = 0;
1027 ring->map.flags = 0;
1028 ring->map.mtrr = 0;
1029
1030 drm_core_ioremap_wc(&ring->map, dev);
1031 if (ring->map.handle == NULL) {
1032 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001033 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +01001034 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001035 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001036
Eric Anholt62fdfea2010-05-21 13:26:39 -07001037 ring->virtual_start = ring->map.handle;
Chris Wilson78501ea2010-10-27 12:18:21 +01001038 ret = ring->init(ring);
Chris Wilsondd785e32010-08-07 11:01:34 +01001039 if (ret)
1040 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001041
Chris Wilson55249ba2010-12-22 14:04:47 +00001042 /* Workaround an erratum on the i830 which causes a hang if
1043 * the TAIL pointer points to within the last 2 cachelines
1044 * of the buffer.
1045 */
1046 ring->effective_size = ring->size;
1047 if (IS_I830(ring->dev))
1048 ring->effective_size -= 128;
1049
Chris Wilsonc584fe42010-10-29 18:15:52 +01001050 return 0;
Chris Wilsondd785e32010-08-07 11:01:34 +01001051
1052err_unmap:
1053 drm_core_ioremapfree(&ring->map, dev);
1054err_unpin:
1055 i915_gem_object_unpin(obj);
1056err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001057 drm_gem_object_unreference(&obj->base);
1058 ring->obj = NULL;
Chris Wilsondd785e32010-08-07 11:01:34 +01001059err_hws:
Chris Wilson78501ea2010-10-27 12:18:21 +01001060 cleanup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001061 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001062}
1063
Chris Wilson78501ea2010-10-27 12:18:21 +01001064void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001065{
Chris Wilson33626e62010-10-29 16:18:36 +01001066 struct drm_i915_private *dev_priv;
1067 int ret;
1068
Chris Wilson05394f32010-11-08 19:18:58 +00001069 if (ring->obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001070 return;
1071
Chris Wilson33626e62010-10-29 16:18:36 +01001072 /* Disable the ring buffer. The ring must be idle at this point */
1073 dev_priv = ring->dev->dev_private;
Ben Widawsky96f298a2011-03-19 18:14:27 -07001074 ret = intel_wait_ring_idle(ring);
Chris Wilson29ee3992011-01-24 16:35:42 +00001075 if (ret)
1076 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1077 ring->name, ret);
1078
Chris Wilson33626e62010-10-29 16:18:36 +01001079 I915_WRITE_CTL(ring, 0);
1080
Chris Wilson78501ea2010-10-27 12:18:21 +01001081 drm_core_ioremapfree(&ring->map, ring->dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001082
Chris Wilson05394f32010-11-08 19:18:58 +00001083 i915_gem_object_unpin(ring->obj);
1084 drm_gem_object_unreference(&ring->obj->base);
1085 ring->obj = NULL;
Chris Wilson78501ea2010-10-27 12:18:21 +01001086
Zou Nan hai8d192152010-11-02 16:31:01 +08001087 if (ring->cleanup)
1088 ring->cleanup(ring);
1089
Chris Wilson78501ea2010-10-27 12:18:21 +01001090 cleanup_status_page(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001091}
1092
Chris Wilson78501ea2010-10-27 12:18:21 +01001093static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001094{
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001095 unsigned int *virt;
Chris Wilson55249ba2010-12-22 14:04:47 +00001096 int rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001097
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001098 if (ring->space < rem) {
Chris Wilson78501ea2010-10-27 12:18:21 +01001099 int ret = intel_wait_ring_buffer(ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001100 if (ret)
1101 return ret;
1102 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001103
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001104 virt = (unsigned int *)(ring->virtual_start + ring->tail);
Chris Wilson1741dd42010-08-04 15:18:12 +01001105 rem /= 8;
1106 while (rem--) {
Eric Anholt62fdfea2010-05-21 13:26:39 -07001107 *virt++ = MI_NOOP;
Chris Wilson1741dd42010-08-04 15:18:12 +01001108 *virt++ = MI_NOOP;
1109 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001110
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001111 ring->tail = 0;
Chris Wilsonc7dca472011-01-20 17:00:10 +00001112 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001113
1114 return 0;
1115}
1116
Chris Wilson78501ea2010-10-27 12:18:21 +01001117int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001118{
Chris Wilson78501ea2010-10-27 12:18:21 +01001119 struct drm_device *dev = ring->dev;
Zou Nan haicae58522010-11-09 17:17:32 +08001120 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +01001121 unsigned long end;
Chris Wilson6aa56062010-10-29 21:44:37 +01001122 u32 head;
1123
Chris Wilsonc7dca472011-01-20 17:00:10 +00001124 /* If the reported head position has wrapped or hasn't advanced,
1125 * fallback to the slow and accurate path.
1126 */
1127 head = intel_read_status_page(ring, 4);
1128 if (head > ring->head) {
1129 ring->head = head;
1130 ring->space = ring_space(ring);
1131 if (ring->space >= n)
1132 return 0;
1133 }
1134
Chris Wilsondb53a302011-02-03 11:57:46 +00001135 trace_i915_ring_wait_begin(ring);
Daniel Vettere6bfaf82011-12-14 13:56:59 +01001136 if (drm_core_check_feature(dev, DRIVER_GEM))
1137 /* With GEM the hangcheck timer should kick us out of the loop,
1138 * leaving it early runs the risk of corrupting GEM state (due
1139 * to running on almost untested codepaths). But on resume
1140 * timers don't work yet, so prevent a complete hang in that
1141 * case by choosing an insanely large timeout. */
1142 end = jiffies + 60 * HZ;
1143 else
1144 end = jiffies + 3 * HZ;
1145
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001146 do {
Chris Wilsonc7dca472011-01-20 17:00:10 +00001147 ring->head = I915_READ_HEAD(ring);
1148 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001149 if (ring->space >= n) {
Chris Wilsondb53a302011-02-03 11:57:46 +00001150 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001151 return 0;
1152 }
1153
1154 if (dev->primary->master) {
1155 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1156 if (master_priv->sarea_priv)
1157 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1158 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08001159
Chris Wilsone60a0b12010-10-13 10:09:14 +01001160 msleep(1);
Chris Wilsonf4e0b292010-10-29 21:06:16 +01001161 if (atomic_read(&dev_priv->mm.wedged))
1162 return -EAGAIN;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001163 } while (!time_after(jiffies, end));
Chris Wilsondb53a302011-02-03 11:57:46 +00001164 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001165 return -EBUSY;
1166}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001167
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001168int intel_ring_begin(struct intel_ring_buffer *ring,
1169 int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001170{
Chris Wilson21dd3732011-01-26 15:55:56 +00001171 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Zou Nan haibe26a102010-06-12 17:40:24 +08001172 int n = 4*num_dwords;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001173 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +01001174
Chris Wilson21dd3732011-01-26 15:55:56 +00001175 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1176 return -EIO;
1177
Chris Wilson55249ba2010-12-22 14:04:47 +00001178 if (unlikely(ring->tail + n > ring->effective_size)) {
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001179 ret = intel_wrap_ring_buffer(ring);
1180 if (unlikely(ret))
1181 return ret;
1182 }
Chris Wilson78501ea2010-10-27 12:18:21 +01001183
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001184 if (unlikely(ring->space < n)) {
1185 ret = intel_wait_ring_buffer(ring, n);
1186 if (unlikely(ret))
1187 return ret;
1188 }
Chris Wilsond97ed332010-08-04 15:18:13 +01001189
1190 ring->space -= n;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001191 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001192}
1193
Chris Wilson78501ea2010-10-27 12:18:21 +01001194void intel_ring_advance(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001195{
Chris Wilsond97ed332010-08-04 15:18:13 +01001196 ring->tail &= ring->size - 1;
Chris Wilson78501ea2010-10-27 12:18:21 +01001197 ring->write_tail(ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001198}
1199
Chris Wilsone0708682010-09-19 14:46:27 +01001200static const struct intel_ring_buffer render_ring = {
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001201 .name = "render ring",
Daniel Vetter96154f22011-12-14 13:57:00 +01001202 .id = RCS,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001203 .mmio_base = RENDER_RING_BASE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001204 .size = 32 * PAGE_SIZE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001205 .init = init_render_ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001206 .write_tail = ring_write_tail,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001207 .flush = render_ring_flush,
1208 .add_request = render_ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001209 .get_seqno = ring_get_seqno,
1210 .irq_get = render_ring_get_irq,
1211 .irq_put = render_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001212 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
Akshay Joshi0206e352011-08-16 15:34:10 -04001213 .cleanup = render_ring_cleanup,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001214 .sync_to = render_ring_sync_to,
1215 .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
1216 MI_SEMAPHORE_SYNC_RV,
1217 MI_SEMAPHORE_SYNC_RB},
1218 .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001219};
Zou Nan haid1b851f2010-05-21 09:08:57 +08001220
1221/* ring buffer for bit-stream decoder */
1222
Chris Wilsone0708682010-09-19 14:46:27 +01001223static const struct intel_ring_buffer bsd_ring = {
Zou Nan haid1b851f2010-05-21 09:08:57 +08001224 .name = "bsd ring",
Daniel Vetter96154f22011-12-14 13:57:00 +01001225 .id = VCS,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001226 .mmio_base = BSD_RING_BASE,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001227 .size = 32 * PAGE_SIZE,
Chris Wilson78501ea2010-10-27 12:18:21 +01001228 .init = init_ring_common,
Chris Wilson297b0c52010-10-22 17:02:41 +01001229 .write_tail = ring_write_tail,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001230 .flush = bsd_ring_flush,
Chris Wilson549f7362010-10-19 11:19:32 +01001231 .add_request = ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001232 .get_seqno = ring_get_seqno,
1233 .irq_get = bsd_ring_get_irq,
1234 .irq_put = bsd_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001235 .dispatch_execbuffer = ring_dispatch_execbuffer,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001236};
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001237
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001238
Chris Wilson78501ea2010-10-27 12:18:21 +01001239static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001240 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001241{
Akshay Joshi0206e352011-08-16 15:34:10 -04001242 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001243
1244 /* Every tail move must follow the sequence below */
Akshay Joshi0206e352011-08-16 15:34:10 -04001245 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1246 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1247 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1248 I915_WRITE(GEN6_BSD_RNCID, 0x0);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001249
Akshay Joshi0206e352011-08-16 15:34:10 -04001250 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1251 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1252 50))
1253 DRM_ERROR("timed out waiting for IDLE Indicator\n");
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001254
Akshay Joshi0206e352011-08-16 15:34:10 -04001255 I915_WRITE_TAIL(ring, value);
1256 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1257 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1258 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001259}
1260
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001261static int gen6_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001262 u32 invalidate, u32 flush)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001263{
Chris Wilson71a77e02011-02-02 12:13:49 +00001264 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001265 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001266
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001267 ret = intel_ring_begin(ring, 4);
1268 if (ret)
1269 return ret;
1270
Chris Wilson71a77e02011-02-02 12:13:49 +00001271 cmd = MI_FLUSH_DW;
1272 if (invalidate & I915_GEM_GPU_DOMAINS)
1273 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1274 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001275 intel_ring_emit(ring, 0);
1276 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001277 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001278 intel_ring_advance(ring);
1279 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001280}
1281
1282static int
Chris Wilson78501ea2010-10-27 12:18:21 +01001283gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001284 u32 offset, u32 len)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001285{
Akshay Joshi0206e352011-08-16 15:34:10 -04001286 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001287
Akshay Joshi0206e352011-08-16 15:34:10 -04001288 ret = intel_ring_begin(ring, 2);
1289 if (ret)
1290 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001291
Akshay Joshi0206e352011-08-16 15:34:10 -04001292 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1293 /* bit0-7 is the length on GEN6+ */
1294 intel_ring_emit(ring, offset);
1295 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001296
Akshay Joshi0206e352011-08-16 15:34:10 -04001297 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001298}
1299
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001300static bool
Chris Wilson0f468322011-01-04 17:35:21 +00001301gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1302{
1303 return gen6_ring_get_irq(ring,
1304 GT_USER_INTERRUPT,
1305 GEN6_RENDER_USER_INTERRUPT);
1306}
1307
1308static void
1309gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1310{
1311 return gen6_ring_put_irq(ring,
1312 GT_USER_INTERRUPT,
1313 GEN6_RENDER_USER_INTERRUPT);
1314}
1315
1316static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001317gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1318{
Chris Wilson0f468322011-01-04 17:35:21 +00001319 return gen6_ring_get_irq(ring,
1320 GT_GEN6_BSD_USER_INTERRUPT,
1321 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001322}
1323
1324static void
1325gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1326{
Chris Wilson0f468322011-01-04 17:35:21 +00001327 return gen6_ring_put_irq(ring,
1328 GT_GEN6_BSD_USER_INTERRUPT,
1329 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001330}
1331
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001332/* ring buffer for Video Codec for Gen6+ */
Chris Wilsone0708682010-09-19 14:46:27 +01001333static const struct intel_ring_buffer gen6_bsd_ring = {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001334 .name = "gen6 bsd ring",
Daniel Vetter96154f22011-12-14 13:57:00 +01001335 .id = VCS,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001336 .mmio_base = GEN6_BSD_RING_BASE,
1337 .size = 32 * PAGE_SIZE,
1338 .init = init_ring_common,
1339 .write_tail = gen6_bsd_ring_write_tail,
1340 .flush = gen6_ring_flush,
1341 .add_request = gen6_add_request,
1342 .get_seqno = ring_get_seqno,
1343 .irq_get = gen6_bsd_ring_get_irq,
1344 .irq_put = gen6_bsd_ring_put_irq,
1345 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001346 .sync_to = gen6_bsd_ring_sync_to,
1347 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
1348 MI_SEMAPHORE_SYNC_INVALID,
1349 MI_SEMAPHORE_SYNC_VB},
1350 .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
Chris Wilson549f7362010-10-19 11:19:32 +01001351};
1352
1353/* Blitter support (SandyBridge+) */
1354
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001355static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001356blt_ring_get_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001357{
Chris Wilson0f468322011-01-04 17:35:21 +00001358 return gen6_ring_get_irq(ring,
1359 GT_BLT_USER_INTERRUPT,
1360 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001361}
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001362
Chris Wilson549f7362010-10-19 11:19:32 +01001363static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001364blt_ring_put_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001365{
Chris Wilson0f468322011-01-04 17:35:21 +00001366 gen6_ring_put_irq(ring,
1367 GT_BLT_USER_INTERRUPT,
1368 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001369}
1370
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001371static int blt_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001372 u32 invalidate, u32 flush)
Zou Nan hai8d192152010-11-02 16:31:01 +08001373{
Chris Wilson71a77e02011-02-02 12:13:49 +00001374 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001375 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001376
Daniel Vetter6a233c72011-12-14 13:57:07 +01001377 ret = intel_ring_begin(ring, 4);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001378 if (ret)
1379 return ret;
1380
Chris Wilson71a77e02011-02-02 12:13:49 +00001381 cmd = MI_FLUSH_DW;
1382 if (invalidate & I915_GEM_DOMAIN_RENDER)
1383 cmd |= MI_INVALIDATE_TLB;
1384 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001385 intel_ring_emit(ring, 0);
1386 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001387 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001388 intel_ring_advance(ring);
1389 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08001390}
1391
Chris Wilson549f7362010-10-19 11:19:32 +01001392static const struct intel_ring_buffer gen6_blt_ring = {
Akshay Joshi0206e352011-08-16 15:34:10 -04001393 .name = "blt ring",
Daniel Vetter96154f22011-12-14 13:57:00 +01001394 .id = BCS,
Akshay Joshi0206e352011-08-16 15:34:10 -04001395 .mmio_base = BLT_RING_BASE,
1396 .size = 32 * PAGE_SIZE,
Daniel Vetter6a233c72011-12-14 13:57:07 +01001397 .init = init_ring_common,
Akshay Joshi0206e352011-08-16 15:34:10 -04001398 .write_tail = ring_write_tail,
1399 .flush = blt_ring_flush,
1400 .add_request = gen6_add_request,
1401 .get_seqno = ring_get_seqno,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001402 .irq_get = blt_ring_get_irq,
1403 .irq_put = blt_ring_put_irq,
Akshay Joshi0206e352011-08-16 15:34:10 -04001404 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001405 .sync_to = gen6_blt_ring_sync_to,
1406 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
1407 MI_SEMAPHORE_SYNC_BV,
1408 MI_SEMAPHORE_SYNC_INVALID},
1409 .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001410};
1411
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001412int intel_init_render_ring_buffer(struct drm_device *dev)
1413{
1414 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001415 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001416
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001417 *ring = render_ring;
1418 if (INTEL_INFO(dev)->gen >= 6) {
1419 ring->add_request = gen6_add_request;
Jesse Barnes8d315282011-10-16 10:23:31 +02001420 ring->flush = gen6_render_ring_flush;
Chris Wilson0f468322011-01-04 17:35:21 +00001421 ring->irq_get = gen6_render_ring_get_irq;
1422 ring->irq_put = gen6_render_ring_put_irq;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001423 } else if (IS_GEN5(dev)) {
1424 ring->add_request = pc_render_add_request;
1425 ring->get_seqno = pc_render_get_seqno;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001426 }
1427
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001428 if (!I915_NEED_GFX_HWS(dev)) {
1429 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1430 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1431 }
1432
1433 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001434}
1435
Chris Wilsone8616b62011-01-20 09:57:11 +00001436int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1437{
1438 drm_i915_private_t *dev_priv = dev->dev_private;
1439 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1440
1441 *ring = render_ring;
1442 if (INTEL_INFO(dev)->gen >= 6) {
1443 ring->add_request = gen6_add_request;
1444 ring->irq_get = gen6_render_ring_get_irq;
1445 ring->irq_put = gen6_render_ring_put_irq;
1446 } else if (IS_GEN5(dev)) {
1447 ring->add_request = pc_render_add_request;
1448 ring->get_seqno = pc_render_get_seqno;
1449 }
1450
Keith Packardf3234702011-07-22 10:44:39 -07001451 if (!I915_NEED_GFX_HWS(dev))
1452 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1453
Chris Wilsone8616b62011-01-20 09:57:11 +00001454 ring->dev = dev;
1455 INIT_LIST_HEAD(&ring->active_list);
1456 INIT_LIST_HEAD(&ring->request_list);
1457 INIT_LIST_HEAD(&ring->gpu_write_list);
1458
1459 ring->size = size;
1460 ring->effective_size = ring->size;
1461 if (IS_I830(ring->dev))
1462 ring->effective_size -= 128;
1463
1464 ring->map.offset = start;
1465 ring->map.size = size;
1466 ring->map.type = 0;
1467 ring->map.flags = 0;
1468 ring->map.mtrr = 0;
1469
1470 drm_core_ioremap_wc(&ring->map, dev);
1471 if (ring->map.handle == NULL) {
1472 DRM_ERROR("can not ioremap virtual address for"
1473 " ring buffer\n");
1474 return -ENOMEM;
1475 }
1476
1477 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1478 return 0;
1479}
1480
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001481int intel_init_bsd_ring_buffer(struct drm_device *dev)
1482{
1483 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001484 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001485
Jesse Barnes65d3eb12011-04-06 14:54:44 -07001486 if (IS_GEN6(dev) || IS_GEN7(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001487 *ring = gen6_bsd_ring;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001488 else
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001489 *ring = bsd_ring;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001490
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001491 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001492}
Chris Wilson549f7362010-10-19 11:19:32 +01001493
1494int intel_init_blt_ring_buffer(struct drm_device *dev)
1495{
1496 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001497 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
Chris Wilson549f7362010-10-19 11:19:32 +01001498
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001499 *ring = gen6_blt_ring;
Chris Wilson549f7362010-10-19 11:19:32 +01001500
Eric Anholt42ff6572011-12-22 14:55:00 -08001501 if (IS_GEN7(dev))
1502 ring->irq_get = gen7_blt_ring_get_irq;
1503
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001504 return intel_init_ring_buffer(dev, ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001505}