blob: 3971b5e6ad609cee6d1a5a7d52de55eb0059d5da [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Chris Wilsonc7dca472011-01-20 17:00:10 +000037static inline int ring_space(struct intel_ring_buffer *ring)
38{
39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40 if (space < 0)
41 space += ring->size;
42 return space;
43}
44
Chris Wilson6f392d5482010-08-07 11:01:22 +010045static u32 i915_gem_get_seqno(struct drm_device *dev)
46{
47 drm_i915_private_t *dev_priv = dev->dev_private;
48 u32 seqno;
49
50 seqno = dev_priv->next_seqno;
51
52 /* reserve 0 for non-seqno */
53 if (++dev_priv->next_seqno == 0)
54 dev_priv->next_seqno = 1;
55
56 return seqno;
57}
58
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000059static int
Chris Wilson78501ea2010-10-27 12:18:21 +010060render_ring_flush(struct intel_ring_buffer *ring,
Chris Wilsonab6f8e32010-09-19 17:53:44 +010061 u32 invalidate_domains,
62 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070063{
Chris Wilson78501ea2010-10-27 12:18:21 +010064 struct drm_device *dev = ring->dev;
Chris Wilson6f392d5482010-08-07 11:01:22 +010065 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000066 int ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +010067
Chris Wilson36d527d2011-03-19 22:26:49 +000068 /*
69 * read/write caches:
70 *
71 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
72 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
73 * also flushed at 2d versus 3d pipeline switches.
74 *
75 * read-only caches:
76 *
77 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
78 * MI_READ_FLUSH is set, and is always flushed on 965.
79 *
80 * I915_GEM_DOMAIN_COMMAND may not exist?
81 *
82 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
83 * invalidated when MI_EXE_FLUSH is set.
84 *
85 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
86 * invalidated with every MI_FLUSH.
87 *
88 * TLBs:
89 *
90 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
91 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
92 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
93 * are flushed at any MI_FLUSH.
94 */
95
96 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
97 if ((invalidate_domains|flush_domains) &
98 I915_GEM_DOMAIN_RENDER)
99 cmd &= ~MI_NO_WRITE_FLUSH;
100 if (INTEL_INFO(dev)->gen < 4) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700101 /*
Chris Wilson36d527d2011-03-19 22:26:49 +0000102 * On the 965, the sampler cache always gets flushed
103 * and this bit is reserved.
Eric Anholt62fdfea2010-05-21 13:26:39 -0700104 */
Chris Wilson36d527d2011-03-19 22:26:49 +0000105 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
106 cmd |= MI_READ_FLUSH;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800107 }
Chris Wilson36d527d2011-03-19 22:26:49 +0000108 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
109 cmd |= MI_EXE_FLUSH;
110
111 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
112 (IS_G4X(dev) || IS_GEN5(dev)))
113 cmd |= MI_INVALIDATE_ISP;
114
115 ret = intel_ring_begin(ring, 2);
116 if (ret)
117 return ret;
118
119 intel_ring_emit(ring, cmd);
120 intel_ring_emit(ring, MI_NOOP);
121 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000122
123 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800124}
125
Chris Wilson78501ea2010-10-27 12:18:21 +0100126static void ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100127 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800128{
Chris Wilson78501ea2010-10-27 12:18:21 +0100129 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson297b0c52010-10-22 17:02:41 +0100130 I915_WRITE_TAIL(ring, value);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800131}
132
Chris Wilson78501ea2010-10-27 12:18:21 +0100133u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800134{
Chris Wilson78501ea2010-10-27 12:18:21 +0100135 drm_i915_private_t *dev_priv = ring->dev->dev_private;
136 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
Daniel Vetter3d281d82010-09-24 21:14:22 +0200137 RING_ACTHD(ring->mmio_base) : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800138
139 return I915_READ(acthd_reg);
140}
141
Chris Wilson78501ea2010-10-27 12:18:21 +0100142static int init_ring_common(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800143{
Chris Wilson78501ea2010-10-27 12:18:21 +0100144 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000145 struct drm_i915_gem_object *obj = ring->obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800146 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800147
148 /* Stop the ring if it's running. */
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200149 I915_WRITE_CTL(ring, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200150 I915_WRITE_HEAD(ring, 0);
Chris Wilson78501ea2010-10-27 12:18:21 +0100151 ring->write_tail(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800152
153 /* Initialize the ring. */
Chris Wilson05394f32010-11-08 19:18:58 +0000154 I915_WRITE_START(ring, obj->gtt_offset);
Daniel Vetter570ef602010-08-02 17:06:23 +0200155 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800156
157 /* G45 ring initialization fails to reset head to zero */
158 if (head != 0) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000159 DRM_DEBUG_KMS("%s head not reset to zero "
160 "ctl %08x head %08x tail %08x start %08x\n",
161 ring->name,
162 I915_READ_CTL(ring),
163 I915_READ_HEAD(ring),
164 I915_READ_TAIL(ring),
165 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800166
Daniel Vetter570ef602010-08-02 17:06:23 +0200167 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800168
Chris Wilson6fd0d562010-12-05 20:42:33 +0000169 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
170 DRM_ERROR("failed to set %s head to zero "
171 "ctl %08x head %08x tail %08x start %08x\n",
172 ring->name,
173 I915_READ_CTL(ring),
174 I915_READ_HEAD(ring),
175 I915_READ_TAIL(ring),
176 I915_READ_START(ring));
177 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700178 }
179
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200180 I915_WRITE_CTL(ring,
Chris Wilsonae69b422010-11-07 11:45:52 +0000181 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson6aa56062010-10-29 21:44:37 +0100182 | RING_REPORT_64K | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800183
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800184 /* If the head is still not zero, the ring is dead */
Chris Wilson176f28e2010-10-28 11:18:07 +0100185 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
Chris Wilson05394f32010-11-08 19:18:58 +0000186 I915_READ_START(ring) != obj->gtt_offset ||
Chris Wilson176f28e2010-10-28 11:18:07 +0100187 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000188 DRM_ERROR("%s initialization failed "
189 "ctl %08x head %08x tail %08x start %08x\n",
190 ring->name,
191 I915_READ_CTL(ring),
192 I915_READ_HEAD(ring),
193 I915_READ_TAIL(ring),
194 I915_READ_START(ring));
195 return -EIO;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800196 }
197
Chris Wilson78501ea2010-10-27 12:18:21 +0100198 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
199 i915_kernel_lost_context(ring->dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800200 else {
Chris Wilsonc7dca472011-01-20 17:00:10 +0000201 ring->head = I915_READ_HEAD(ring);
Daniel Vetter870e86d2010-08-02 16:29:44 +0200202 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000203 ring->space = ring_space(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800204 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000205
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800206 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700207}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800208
Chris Wilsonc6df5412010-12-15 09:56:50 +0000209/*
210 * 965+ support PIPE_CONTROL commands, which provide finer grained control
211 * over cache flushing.
212 */
213struct pipe_control {
214 struct drm_i915_gem_object *obj;
215 volatile u32 *cpu_page;
216 u32 gtt_offset;
217};
218
219static int
220init_pipe_control(struct intel_ring_buffer *ring)
221{
222 struct pipe_control *pc;
223 struct drm_i915_gem_object *obj;
224 int ret;
225
226 if (ring->private)
227 return 0;
228
229 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
230 if (!pc)
231 return -ENOMEM;
232
233 obj = i915_gem_alloc_object(ring->dev, 4096);
234 if (obj == NULL) {
235 DRM_ERROR("Failed to allocate seqno page\n");
236 ret = -ENOMEM;
237 goto err;
238 }
Chris Wilson93dfb402011-03-29 16:59:50 -0700239 obj->cache_level = I915_CACHE_LLC;
Chris Wilsonc6df5412010-12-15 09:56:50 +0000240
241 ret = i915_gem_object_pin(obj, 4096, true);
242 if (ret)
243 goto err_unref;
244
245 pc->gtt_offset = obj->gtt_offset;
246 pc->cpu_page = kmap(obj->pages[0]);
247 if (pc->cpu_page == NULL)
248 goto err_unpin;
249
250 pc->obj = obj;
251 ring->private = pc;
252 return 0;
253
254err_unpin:
255 i915_gem_object_unpin(obj);
256err_unref:
257 drm_gem_object_unreference(&obj->base);
258err:
259 kfree(pc);
260 return ret;
261}
262
263static void
264cleanup_pipe_control(struct intel_ring_buffer *ring)
265{
266 struct pipe_control *pc = ring->private;
267 struct drm_i915_gem_object *obj;
268
269 if (!ring->private)
270 return;
271
272 obj = pc->obj;
273 kunmap(obj->pages[0]);
274 i915_gem_object_unpin(obj);
275 drm_gem_object_unreference(&obj->base);
276
277 kfree(pc);
278 ring->private = NULL;
279}
280
Chris Wilson78501ea2010-10-27 12:18:21 +0100281static int init_render_ring(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800282{
Chris Wilson78501ea2010-10-27 12:18:21 +0100283 struct drm_device *dev = ring->dev;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000284 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100285 int ret = init_ring_common(ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800286
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100287 if (INTEL_INFO(dev)->gen > 3) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100288 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
Jesse Barnes65d3eb12011-04-06 14:54:44 -0700289 if (IS_GEN6(dev) || IS_GEN7(dev))
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800290 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
291 I915_WRITE(MI_MODE, mode);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800292 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100293
Chris Wilsonc6df5412010-12-15 09:56:50 +0000294 if (INTEL_INFO(dev)->gen >= 6) {
295 } else if (IS_GEN5(dev)) {
296 ret = init_pipe_control(ring);
297 if (ret)
298 return ret;
299 }
300
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800301 return ret;
302}
303
Chris Wilsonc6df5412010-12-15 09:56:50 +0000304static void render_ring_cleanup(struct intel_ring_buffer *ring)
305{
306 if (!ring->private)
307 return;
308
309 cleanup_pipe_control(ring);
310}
311
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000312static void
313update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
314{
315 struct drm_device *dev = ring->dev;
316 struct drm_i915_private *dev_priv = dev->dev_private;
317 int id;
318
319 /*
320 * cs -> 1 = vcs, 0 = bcs
321 * vcs -> 1 = bcs, 0 = cs,
322 * bcs -> 1 = cs, 0 = vcs.
323 */
324 id = ring - dev_priv->ring;
325 id += 2 - i;
326 id %= 3;
327
328 intel_ring_emit(ring,
329 MI_SEMAPHORE_MBOX |
330 MI_SEMAPHORE_REGISTER |
331 MI_SEMAPHORE_UPDATE);
332 intel_ring_emit(ring, seqno);
333 intel_ring_emit(ring,
334 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
335}
336
337static int
338gen6_add_request(struct intel_ring_buffer *ring,
339 u32 *result)
340{
341 u32 seqno;
342 int ret;
343
344 ret = intel_ring_begin(ring, 10);
345 if (ret)
346 return ret;
347
348 seqno = i915_gem_get_seqno(ring->dev);
349 update_semaphore(ring, 0, seqno);
350 update_semaphore(ring, 1, seqno);
351
352 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
353 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
354 intel_ring_emit(ring, seqno);
355 intel_ring_emit(ring, MI_USER_INTERRUPT);
356 intel_ring_advance(ring);
357
358 *result = seqno;
359 return 0;
360}
361
362int
363intel_ring_sync(struct intel_ring_buffer *ring,
364 struct intel_ring_buffer *to,
365 u32 seqno)
366{
367 int ret;
368
369 ret = intel_ring_begin(ring, 4);
370 if (ret)
371 return ret;
372
373 intel_ring_emit(ring,
374 MI_SEMAPHORE_MBOX |
375 MI_SEMAPHORE_REGISTER |
376 intel_ring_sync_index(ring, to) << 17 |
377 MI_SEMAPHORE_COMPARE);
378 intel_ring_emit(ring, seqno);
379 intel_ring_emit(ring, 0);
380 intel_ring_emit(ring, MI_NOOP);
381 intel_ring_advance(ring);
382
383 return 0;
384}
385
Chris Wilsonc6df5412010-12-15 09:56:50 +0000386#define PIPE_CONTROL_FLUSH(ring__, addr__) \
387do { \
388 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
389 PIPE_CONTROL_DEPTH_STALL | 2); \
390 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
391 intel_ring_emit(ring__, 0); \
392 intel_ring_emit(ring__, 0); \
393} while (0)
394
395static int
396pc_render_add_request(struct intel_ring_buffer *ring,
397 u32 *result)
398{
399 struct drm_device *dev = ring->dev;
400 u32 seqno = i915_gem_get_seqno(dev);
401 struct pipe_control *pc = ring->private;
402 u32 scratch_addr = pc->gtt_offset + 128;
403 int ret;
404
405 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
406 * incoherent with writes to memory, i.e. completely fubar,
407 * so we need to use PIPE_NOTIFY instead.
408 *
409 * However, we also need to workaround the qword write
410 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
411 * memory before requesting an interrupt.
412 */
413 ret = intel_ring_begin(ring, 32);
414 if (ret)
415 return ret;
416
417 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
418 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
419 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
420 intel_ring_emit(ring, seqno);
421 intel_ring_emit(ring, 0);
422 PIPE_CONTROL_FLUSH(ring, scratch_addr);
423 scratch_addr += 128; /* write to separate cachelines */
424 PIPE_CONTROL_FLUSH(ring, scratch_addr);
425 scratch_addr += 128;
426 PIPE_CONTROL_FLUSH(ring, scratch_addr);
427 scratch_addr += 128;
428 PIPE_CONTROL_FLUSH(ring, scratch_addr);
429 scratch_addr += 128;
430 PIPE_CONTROL_FLUSH(ring, scratch_addr);
431 scratch_addr += 128;
432 PIPE_CONTROL_FLUSH(ring, scratch_addr);
433 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
434 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
435 PIPE_CONTROL_NOTIFY);
436 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
437 intel_ring_emit(ring, seqno);
438 intel_ring_emit(ring, 0);
439 intel_ring_advance(ring);
440
441 *result = seqno;
442 return 0;
443}
444
Chris Wilson3cce4692010-10-27 16:11:02 +0100445static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100446render_ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100447 u32 *result)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700448{
Chris Wilson78501ea2010-10-27 12:18:21 +0100449 struct drm_device *dev = ring->dev;
Chris Wilson3cce4692010-10-27 16:11:02 +0100450 u32 seqno = i915_gem_get_seqno(dev);
451 int ret;
Zhenyu Wangca764822010-05-27 10:26:42 +0800452
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000453 ret = intel_ring_begin(ring, 4);
454 if (ret)
455 return ret;
Chris Wilson3cce4692010-10-27 16:11:02 +0100456
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000457 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
458 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
459 intel_ring_emit(ring, seqno);
460 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilson3cce4692010-10-27 16:11:02 +0100461 intel_ring_advance(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000462
Chris Wilson3cce4692010-10-27 16:11:02 +0100463 *result = seqno;
464 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700465}
466
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800467static u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000468ring_get_seqno(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800469{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000470 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
471}
472
Chris Wilsonc6df5412010-12-15 09:56:50 +0000473static u32
474pc_render_get_seqno(struct intel_ring_buffer *ring)
475{
476 struct pipe_control *pc = ring->private;
477 return pc->cpu_page[0];
478}
479
Chris Wilson0f468322011-01-04 17:35:21 +0000480static void
481ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
482{
483 dev_priv->gt_irq_mask &= ~mask;
484 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
485 POSTING_READ(GTIMR);
486}
487
488static void
489ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
490{
491 dev_priv->gt_irq_mask |= mask;
492 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
493 POSTING_READ(GTIMR);
494}
495
496static void
497i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
498{
499 dev_priv->irq_mask &= ~mask;
500 I915_WRITE(IMR, dev_priv->irq_mask);
501 POSTING_READ(IMR);
502}
503
504static void
505i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
506{
507 dev_priv->irq_mask |= mask;
508 I915_WRITE(IMR, dev_priv->irq_mask);
509 POSTING_READ(IMR);
510}
511
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000512static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000513render_ring_get_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700514{
Chris Wilson78501ea2010-10-27 12:18:21 +0100515 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000516 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700517
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000518 if (!dev->irq_enabled)
519 return false;
520
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000521 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000522 if (ring->irq_refcount++ == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700523 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000524 ironlake_enable_irq(dev_priv,
525 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700526 else
527 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
528 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000529 spin_unlock(&ring->irq_lock);
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000530
531 return true;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700532}
533
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800534static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000535render_ring_put_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700536{
Chris Wilson78501ea2010-10-27 12:18:21 +0100537 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000538 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700539
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000540 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000541 if (--ring->irq_refcount == 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700542 if (HAS_PCH_SPLIT(dev))
Chris Wilson0f468322011-01-04 17:35:21 +0000543 ironlake_disable_irq(dev_priv,
544 GT_USER_INTERRUPT |
545 GT_PIPE_NOTIFY);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700546 else
547 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
548 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000549 spin_unlock(&ring->irq_lock);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700550}
551
Chris Wilson78501ea2010-10-27 12:18:21 +0100552void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800553{
Eric Anholt45930102011-05-06 17:12:35 -0700554 struct drm_device *dev = ring->dev;
Chris Wilson78501ea2010-10-27 12:18:21 +0100555 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt45930102011-05-06 17:12:35 -0700556 u32 mmio = 0;
557
558 /* The ring status page addresses are no longer next to the rest of
559 * the ring registers as of gen7.
560 */
561 if (IS_GEN7(dev)) {
562 switch (ring->id) {
563 case RING_RENDER:
564 mmio = RENDER_HWS_PGA_GEN7;
565 break;
566 case RING_BLT:
567 mmio = BLT_HWS_PGA_GEN7;
568 break;
569 case RING_BSD:
570 mmio = BSD_HWS_PGA_GEN7;
571 break;
572 }
573 } else if (IS_GEN6(ring->dev)) {
574 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
575 } else {
576 mmio = RING_HWS_PGA(ring->mmio_base);
577 }
578
Chris Wilson78501ea2010-10-27 12:18:21 +0100579 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
580 POSTING_READ(mmio);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800581}
582
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000583static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100584bsd_ring_flush(struct intel_ring_buffer *ring,
585 u32 invalidate_domains,
586 u32 flush_domains)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800587{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000588 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000589
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000590 ret = intel_ring_begin(ring, 2);
591 if (ret)
592 return ret;
593
594 intel_ring_emit(ring, MI_FLUSH);
595 intel_ring_emit(ring, MI_NOOP);
596 intel_ring_advance(ring);
597 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800598}
599
Chris Wilson3cce4692010-10-27 16:11:02 +0100600static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100601ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100602 u32 *result)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800603{
604 u32 seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +0100605 int ret;
606
607 ret = intel_ring_begin(ring, 4);
608 if (ret)
609 return ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +0100610
Chris Wilson78501ea2010-10-27 12:18:21 +0100611 seqno = i915_gem_get_seqno(ring->dev);
Chris Wilson6f392d5482010-08-07 11:01:22 +0100612
Chris Wilson3cce4692010-10-27 16:11:02 +0100613 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
614 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
615 intel_ring_emit(ring, seqno);
616 intel_ring_emit(ring, MI_USER_INTERRUPT);
617 intel_ring_advance(ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800618
Chris Wilson3cce4692010-10-27 16:11:02 +0100619 *result = seqno;
620 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800621}
622
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000623static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000624ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800625{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000626 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000627 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000628
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000629 if (!dev->irq_enabled)
630 return false;
631
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000632 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000633 if (ring->irq_refcount++ == 0)
Chris Wilson0f468322011-01-04 17:35:21 +0000634 ironlake_enable_irq(dev_priv, flag);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000635 spin_unlock(&ring->irq_lock);
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000636
637 return true;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800638}
639
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000640static void
641ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800642{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000643 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000644 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000645
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000646 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000647 if (--ring->irq_refcount == 0)
Chris Wilson0f468322011-01-04 17:35:21 +0000648 ironlake_disable_irq(dev_priv, flag);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000649 spin_unlock(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000650}
651
652static bool
653gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
654{
655 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000656 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000657
658 if (!dev->irq_enabled)
659 return false;
660
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000661 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000662 if (ring->irq_refcount++ == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000663 ring->irq_mask &= ~rflag;
664 I915_WRITE_IMR(ring, ring->irq_mask);
665 ironlake_enable_irq(dev_priv, gflag);
Chris Wilson0f468322011-01-04 17:35:21 +0000666 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000667 spin_unlock(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000668
669 return true;
670}
671
672static void
673gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
674{
675 struct drm_device *dev = ring->dev;
Chris Wilson01a03332011-01-04 22:22:56 +0000676 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson0f468322011-01-04 17:35:21 +0000677
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000678 spin_lock(&ring->irq_lock);
Chris Wilson01a03332011-01-04 22:22:56 +0000679 if (--ring->irq_refcount == 0) {
Chris Wilson0f468322011-01-04 17:35:21 +0000680 ring->irq_mask |= rflag;
681 I915_WRITE_IMR(ring, ring->irq_mask);
682 ironlake_disable_irq(dev_priv, gflag);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000683 }
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000684 spin_unlock(&ring->irq_lock);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000685}
686
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000687static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000688bsd_ring_get_irq(struct intel_ring_buffer *ring)
689{
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000690 return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000691}
692static void
693bsd_ring_put_irq(struct intel_ring_buffer *ring)
694{
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000695 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800696}
697
698static int
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000699ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800700{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100701 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100702
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100703 ret = intel_ring_begin(ring, 2);
704 if (ret)
705 return ret;
706
Chris Wilson78501ea2010-10-27 12:18:21 +0100707 intel_ring_emit(ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000708 MI_BATCH_BUFFER_START | (2 << 6) |
Chris Wilson78501ea2010-10-27 12:18:21 +0100709 MI_BATCH_NON_SECURE_I965);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000710 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100711 intel_ring_advance(ring);
712
Zou Nan haid1b851f2010-05-21 09:08:57 +0800713 return 0;
714}
715
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800716static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100717render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000718 u32 offset, u32 len)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700719{
Chris Wilson78501ea2010-10-27 12:18:21 +0100720 struct drm_device *dev = ring->dev;
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000721 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700722
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000723 if (IS_I830(dev) || IS_845G(dev)) {
724 ret = intel_ring_begin(ring, 4);
725 if (ret)
726 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700727
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000728 intel_ring_emit(ring, MI_BATCH_BUFFER);
729 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
730 intel_ring_emit(ring, offset + len - 8);
731 intel_ring_emit(ring, 0);
732 } else {
733 ret = intel_ring_begin(ring, 2);
734 if (ret)
735 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100736
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000737 if (INTEL_INFO(dev)->gen >= 4) {
738 intel_ring_emit(ring,
739 MI_BATCH_BUFFER_START | (2 << 6) |
740 MI_BATCH_NON_SECURE_I965);
741 intel_ring_emit(ring, offset);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700742 } else {
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000743 intel_ring_emit(ring,
744 MI_BATCH_BUFFER_START | (2 << 6));
745 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700746 }
747 }
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000748 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700749
Eric Anholt62fdfea2010-05-21 13:26:39 -0700750 return 0;
751}
752
Chris Wilson78501ea2010-10-27 12:18:21 +0100753static void cleanup_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700754{
Chris Wilson78501ea2010-10-27 12:18:21 +0100755 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000756 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700757
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800758 obj = ring->status_page.obj;
759 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700760 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700761
Chris Wilson05394f32010-11-08 19:18:58 +0000762 kunmap(obj->pages[0]);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700763 i915_gem_object_unpin(obj);
Chris Wilson05394f32010-11-08 19:18:58 +0000764 drm_gem_object_unreference(&obj->base);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800765 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700766
767 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700768}
769
Chris Wilson78501ea2010-10-27 12:18:21 +0100770static int init_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700771{
Chris Wilson78501ea2010-10-27 12:18:21 +0100772 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700773 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000774 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700775 int ret;
776
Eric Anholt62fdfea2010-05-21 13:26:39 -0700777 obj = i915_gem_alloc_object(dev, 4096);
778 if (obj == NULL) {
779 DRM_ERROR("Failed to allocate status page\n");
780 ret = -ENOMEM;
781 goto err;
782 }
Chris Wilson93dfb402011-03-29 16:59:50 -0700783 obj->cache_level = I915_CACHE_LLC;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700784
Daniel Vetter75e9e912010-11-04 17:11:09 +0100785 ret = i915_gem_object_pin(obj, 4096, true);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700786 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700787 goto err_unref;
788 }
789
Chris Wilson05394f32010-11-08 19:18:58 +0000790 ring->status_page.gfx_addr = obj->gtt_offset;
791 ring->status_page.page_addr = kmap(obj->pages[0]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800792 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700793 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700794 goto err_unpin;
795 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800796 ring->status_page.obj = obj;
797 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700798
Chris Wilson78501ea2010-10-27 12:18:21 +0100799 intel_ring_setup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800800 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
801 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700802
803 return 0;
804
805err_unpin:
806 i915_gem_object_unpin(obj);
807err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000808 drm_gem_object_unreference(&obj->base);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700809err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800810 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700811}
812
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800813int intel_init_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100814 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700815{
Chris Wilson05394f32010-11-08 19:18:58 +0000816 struct drm_i915_gem_object *obj;
Chris Wilsondd785e32010-08-07 11:01:34 +0100817 int ret;
818
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800819 ring->dev = dev;
Chris Wilson23bc5982010-09-29 16:10:57 +0100820 INIT_LIST_HEAD(&ring->active_list);
821 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +0100822 INIT_LIST_HEAD(&ring->gpu_write_list);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000823
Chris Wilsonb259f672011-03-29 13:19:09 +0100824 init_waitqueue_head(&ring->irq_queue);
Chris Wilson0dc79fb2011-01-05 10:32:24 +0000825 spin_lock_init(&ring->irq_lock);
Chris Wilson0f468322011-01-04 17:35:21 +0000826 ring->irq_mask = ~0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700827
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800828 if (I915_NEED_GFX_HWS(dev)) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100829 ret = init_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800830 if (ret)
831 return ret;
832 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700833
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800834 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700835 if (obj == NULL) {
836 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800837 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +0100838 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700839 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700840
Chris Wilson05394f32010-11-08 19:18:58 +0000841 ring->obj = obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800842
Daniel Vetter75e9e912010-11-04 17:11:09 +0100843 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
Chris Wilsondd785e32010-08-07 11:01:34 +0100844 if (ret)
845 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700846
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800847 ring->map.size = ring->size;
Chris Wilson05394f32010-11-08 19:18:58 +0000848 ring->map.offset = dev->agp->base + obj->gtt_offset;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700849 ring->map.type = 0;
850 ring->map.flags = 0;
851 ring->map.mtrr = 0;
852
853 drm_core_ioremap_wc(&ring->map, dev);
854 if (ring->map.handle == NULL) {
855 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800856 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100857 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700858 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800859
Eric Anholt62fdfea2010-05-21 13:26:39 -0700860 ring->virtual_start = ring->map.handle;
Chris Wilson78501ea2010-10-27 12:18:21 +0100861 ret = ring->init(ring);
Chris Wilsondd785e32010-08-07 11:01:34 +0100862 if (ret)
863 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700864
Chris Wilson55249ba2010-12-22 14:04:47 +0000865 /* Workaround an erratum on the i830 which causes a hang if
866 * the TAIL pointer points to within the last 2 cachelines
867 * of the buffer.
868 */
869 ring->effective_size = ring->size;
870 if (IS_I830(ring->dev))
871 ring->effective_size -= 128;
872
Chris Wilsonc584fe42010-10-29 18:15:52 +0100873 return 0;
Chris Wilsondd785e32010-08-07 11:01:34 +0100874
875err_unmap:
876 drm_core_ioremapfree(&ring->map, dev);
877err_unpin:
878 i915_gem_object_unpin(obj);
879err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000880 drm_gem_object_unreference(&obj->base);
881 ring->obj = NULL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100882err_hws:
Chris Wilson78501ea2010-10-27 12:18:21 +0100883 cleanup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800884 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700885}
886
Chris Wilson78501ea2010-10-27 12:18:21 +0100887void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700888{
Chris Wilson33626e62010-10-29 16:18:36 +0100889 struct drm_i915_private *dev_priv;
890 int ret;
891
Chris Wilson05394f32010-11-08 19:18:58 +0000892 if (ring->obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700893 return;
894
Chris Wilson33626e62010-10-29 16:18:36 +0100895 /* Disable the ring buffer. The ring must be idle at this point */
896 dev_priv = ring->dev->dev_private;
Ben Widawsky96f298a2011-03-19 18:14:27 -0700897 ret = intel_wait_ring_idle(ring);
Chris Wilson29ee3992011-01-24 16:35:42 +0000898 if (ret)
899 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
900 ring->name, ret);
901
Chris Wilson33626e62010-10-29 16:18:36 +0100902 I915_WRITE_CTL(ring, 0);
903
Chris Wilson78501ea2010-10-27 12:18:21 +0100904 drm_core_ioremapfree(&ring->map, ring->dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700905
Chris Wilson05394f32010-11-08 19:18:58 +0000906 i915_gem_object_unpin(ring->obj);
907 drm_gem_object_unreference(&ring->obj->base);
908 ring->obj = NULL;
Chris Wilson78501ea2010-10-27 12:18:21 +0100909
Zou Nan hai8d192152010-11-02 16:31:01 +0800910 if (ring->cleanup)
911 ring->cleanup(ring);
912
Chris Wilson78501ea2010-10-27 12:18:21 +0100913 cleanup_status_page(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700914}
915
Chris Wilson78501ea2010-10-27 12:18:21 +0100916static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700917{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800918 unsigned int *virt;
Chris Wilson55249ba2010-12-22 14:04:47 +0000919 int rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700920
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800921 if (ring->space < rem) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100922 int ret = intel_wait_ring_buffer(ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700923 if (ret)
924 return ret;
925 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700926
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800927 virt = (unsigned int *)(ring->virtual_start + ring->tail);
Chris Wilson1741dd42010-08-04 15:18:12 +0100928 rem /= 8;
929 while (rem--) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700930 *virt++ = MI_NOOP;
Chris Wilson1741dd42010-08-04 15:18:12 +0100931 *virt++ = MI_NOOP;
932 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700933
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800934 ring->tail = 0;
Chris Wilsonc7dca472011-01-20 17:00:10 +0000935 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700936
937 return 0;
938}
939
Chris Wilson78501ea2010-10-27 12:18:21 +0100940int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700941{
Chris Wilson78501ea2010-10-27 12:18:21 +0100942 struct drm_device *dev = ring->dev;
Zou Nan haicae58522010-11-09 17:17:32 +0800943 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100944 unsigned long end;
Chris Wilson6aa56062010-10-29 21:44:37 +0100945 u32 head;
946
Chris Wilsonc7dca472011-01-20 17:00:10 +0000947 /* If the reported head position has wrapped or hasn't advanced,
948 * fallback to the slow and accurate path.
949 */
950 head = intel_read_status_page(ring, 4);
951 if (head > ring->head) {
952 ring->head = head;
953 ring->space = ring_space(ring);
954 if (ring->space >= n)
955 return 0;
956 }
957
Chris Wilsondb53a302011-02-03 11:57:46 +0000958 trace_i915_ring_wait_begin(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800959 end = jiffies + 3 * HZ;
960 do {
Chris Wilsonc7dca472011-01-20 17:00:10 +0000961 ring->head = I915_READ_HEAD(ring);
962 ring->space = ring_space(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700963 if (ring->space >= n) {
Chris Wilsondb53a302011-02-03 11:57:46 +0000964 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700965 return 0;
966 }
967
968 if (dev->primary->master) {
969 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
970 if (master_priv->sarea_priv)
971 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
972 }
Zou Nan haid1b851f2010-05-21 09:08:57 +0800973
Chris Wilsone60a0b12010-10-13 10:09:14 +0100974 msleep(1);
Chris Wilsonf4e0b292010-10-29 21:06:16 +0100975 if (atomic_read(&dev_priv->mm.wedged))
976 return -EAGAIN;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800977 } while (!time_after(jiffies, end));
Chris Wilsondb53a302011-02-03 11:57:46 +0000978 trace_i915_ring_wait_end(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700979 return -EBUSY;
980}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800981
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100982int intel_ring_begin(struct intel_ring_buffer *ring,
983 int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800984{
Chris Wilson21dd3732011-01-26 15:55:56 +0000985 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Zou Nan haibe26a102010-06-12 17:40:24 +0800986 int n = 4*num_dwords;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100987 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100988
Chris Wilson21dd3732011-01-26 15:55:56 +0000989 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
990 return -EIO;
991
Chris Wilson55249ba2010-12-22 14:04:47 +0000992 if (unlikely(ring->tail + n > ring->effective_size)) {
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100993 ret = intel_wrap_ring_buffer(ring);
994 if (unlikely(ret))
995 return ret;
996 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100997
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100998 if (unlikely(ring->space < n)) {
999 ret = intel_wait_ring_buffer(ring, n);
1000 if (unlikely(ret))
1001 return ret;
1002 }
Chris Wilsond97ed332010-08-04 15:18:13 +01001003
1004 ring->space -= n;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001005 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001006}
1007
Chris Wilson78501ea2010-10-27 12:18:21 +01001008void intel_ring_advance(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001009{
Chris Wilsond97ed332010-08-04 15:18:13 +01001010 ring->tail &= ring->size - 1;
Chris Wilson78501ea2010-10-27 12:18:21 +01001011 ring->write_tail(ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001012}
1013
Chris Wilsone0708682010-09-19 14:46:27 +01001014static const struct intel_ring_buffer render_ring = {
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001015 .name = "render ring",
Chris Wilson92204342010-09-18 11:02:01 +01001016 .id = RING_RENDER,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001017 .mmio_base = RENDER_RING_BASE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001018 .size = 32 * PAGE_SIZE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001019 .init = init_render_ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001020 .write_tail = ring_write_tail,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001021 .flush = render_ring_flush,
1022 .add_request = render_ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001023 .get_seqno = ring_get_seqno,
1024 .irq_get = render_ring_get_irq,
1025 .irq_put = render_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001026 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
Chris Wilsonc6df5412010-12-15 09:56:50 +00001027 .cleanup = render_ring_cleanup,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001028};
Zou Nan haid1b851f2010-05-21 09:08:57 +08001029
1030/* ring buffer for bit-stream decoder */
1031
Chris Wilsone0708682010-09-19 14:46:27 +01001032static const struct intel_ring_buffer bsd_ring = {
Zou Nan haid1b851f2010-05-21 09:08:57 +08001033 .name = "bsd ring",
Chris Wilson92204342010-09-18 11:02:01 +01001034 .id = RING_BSD,
Daniel Vetter333e9fe2010-08-02 16:24:01 +02001035 .mmio_base = BSD_RING_BASE,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001036 .size = 32 * PAGE_SIZE,
Chris Wilson78501ea2010-10-27 12:18:21 +01001037 .init = init_ring_common,
Chris Wilson297b0c52010-10-22 17:02:41 +01001038 .write_tail = ring_write_tail,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001039 .flush = bsd_ring_flush,
Chris Wilson549f7362010-10-19 11:19:32 +01001040 .add_request = ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001041 .get_seqno = ring_get_seqno,
1042 .irq_get = bsd_ring_get_irq,
1043 .irq_put = bsd_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001044 .dispatch_execbuffer = ring_dispatch_execbuffer,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001045};
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001046
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001047
Chris Wilson78501ea2010-10-27 12:18:21 +01001048static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +01001049 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001050{
Chris Wilson78501ea2010-10-27 12:18:21 +01001051 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001052
1053 /* Every tail move must follow the sequence below */
1054 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1055 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1056 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1057 I915_WRITE(GEN6_BSD_RNCID, 0x0);
1058
1059 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1060 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1061 50))
1062 DRM_ERROR("timed out waiting for IDLE Indicator\n");
1063
Daniel Vetter870e86d2010-08-02 16:29:44 +02001064 I915_WRITE_TAIL(ring, value);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001065 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1066 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1067 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1068}
1069
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001070static int gen6_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001071 u32 invalidate, u32 flush)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001072{
Chris Wilson71a77e02011-02-02 12:13:49 +00001073 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001074 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001075
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001076 ret = intel_ring_begin(ring, 4);
1077 if (ret)
1078 return ret;
1079
Chris Wilson71a77e02011-02-02 12:13:49 +00001080 cmd = MI_FLUSH_DW;
1081 if (invalidate & I915_GEM_GPU_DOMAINS)
1082 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1083 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001084 intel_ring_emit(ring, 0);
1085 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001086 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001087 intel_ring_advance(ring);
1088 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001089}
1090
1091static int
Chris Wilson78501ea2010-10-27 12:18:21 +01001092gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001093 u32 offset, u32 len)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001094{
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001095 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001096
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001097 ret = intel_ring_begin(ring, 2);
1098 if (ret)
1099 return ret;
1100
Chris Wilson78501ea2010-10-27 12:18:21 +01001101 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001102 /* bit0-7 is the length on GEN6+ */
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001103 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +01001104 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001105
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001106 return 0;
1107}
1108
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001109static bool
Chris Wilson0f468322011-01-04 17:35:21 +00001110gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1111{
1112 return gen6_ring_get_irq(ring,
1113 GT_USER_INTERRUPT,
1114 GEN6_RENDER_USER_INTERRUPT);
1115}
1116
1117static void
1118gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1119{
1120 return gen6_ring_put_irq(ring,
1121 GT_USER_INTERRUPT,
1122 GEN6_RENDER_USER_INTERRUPT);
1123}
1124
1125static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001126gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1127{
Chris Wilson0f468322011-01-04 17:35:21 +00001128 return gen6_ring_get_irq(ring,
1129 GT_GEN6_BSD_USER_INTERRUPT,
1130 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001131}
1132
1133static void
1134gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1135{
Chris Wilson0f468322011-01-04 17:35:21 +00001136 return gen6_ring_put_irq(ring,
1137 GT_GEN6_BSD_USER_INTERRUPT,
1138 GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001139}
1140
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001141/* ring buffer for Video Codec for Gen6+ */
Chris Wilsone0708682010-09-19 14:46:27 +01001142static const struct intel_ring_buffer gen6_bsd_ring = {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001143 .name = "gen6 bsd ring",
1144 .id = RING_BSD,
1145 .mmio_base = GEN6_BSD_RING_BASE,
1146 .size = 32 * PAGE_SIZE,
1147 .init = init_ring_common,
1148 .write_tail = gen6_bsd_ring_write_tail,
1149 .flush = gen6_ring_flush,
1150 .add_request = gen6_add_request,
1151 .get_seqno = ring_get_seqno,
1152 .irq_get = gen6_bsd_ring_get_irq,
1153 .irq_put = gen6_bsd_ring_put_irq,
1154 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Chris Wilson549f7362010-10-19 11:19:32 +01001155};
1156
1157/* Blitter support (SandyBridge+) */
1158
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001159static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001160blt_ring_get_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001161{
Chris Wilson0f468322011-01-04 17:35:21 +00001162 return gen6_ring_get_irq(ring,
1163 GT_BLT_USER_INTERRUPT,
1164 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001165}
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001166
Chris Wilson549f7362010-10-19 11:19:32 +01001167static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001168blt_ring_put_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001169{
Chris Wilson0f468322011-01-04 17:35:21 +00001170 gen6_ring_put_irq(ring,
1171 GT_BLT_USER_INTERRUPT,
1172 GEN6_BLITTER_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001173}
1174
Zou Nan hai8d192152010-11-02 16:31:01 +08001175
1176/* Workaround for some stepping of SNB,
1177 * each time when BLT engine ring tail moved,
1178 * the first command in the ring to be parsed
1179 * should be MI_BATCH_BUFFER_START
1180 */
1181#define NEED_BLT_WORKAROUND(dev) \
1182 (IS_GEN6(dev) && (dev->pdev->revision < 8))
1183
1184static inline struct drm_i915_gem_object *
1185to_blt_workaround(struct intel_ring_buffer *ring)
1186{
1187 return ring->private;
1188}
1189
1190static int blt_ring_init(struct intel_ring_buffer *ring)
1191{
1192 if (NEED_BLT_WORKAROUND(ring->dev)) {
1193 struct drm_i915_gem_object *obj;
Chris Wilson27153f72010-11-02 11:17:23 +00001194 u32 *ptr;
Zou Nan hai8d192152010-11-02 16:31:01 +08001195 int ret;
1196
Chris Wilson05394f32010-11-08 19:18:58 +00001197 obj = i915_gem_alloc_object(ring->dev, 4096);
Zou Nan hai8d192152010-11-02 16:31:01 +08001198 if (obj == NULL)
1199 return -ENOMEM;
1200
Chris Wilson05394f32010-11-08 19:18:58 +00001201 ret = i915_gem_object_pin(obj, 4096, true);
Zou Nan hai8d192152010-11-02 16:31:01 +08001202 if (ret) {
1203 drm_gem_object_unreference(&obj->base);
1204 return ret;
1205 }
1206
1207 ptr = kmap(obj->pages[0]);
Chris Wilson27153f72010-11-02 11:17:23 +00001208 *ptr++ = MI_BATCH_BUFFER_END;
1209 *ptr++ = MI_NOOP;
Zou Nan hai8d192152010-11-02 16:31:01 +08001210 kunmap(obj->pages[0]);
1211
Chris Wilson05394f32010-11-08 19:18:58 +00001212 ret = i915_gem_object_set_to_gtt_domain(obj, false);
Zou Nan hai8d192152010-11-02 16:31:01 +08001213 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00001214 i915_gem_object_unpin(obj);
Zou Nan hai8d192152010-11-02 16:31:01 +08001215 drm_gem_object_unreference(&obj->base);
1216 return ret;
1217 }
1218
1219 ring->private = obj;
1220 }
1221
1222 return init_ring_common(ring);
1223}
1224
1225static int blt_ring_begin(struct intel_ring_buffer *ring,
1226 int num_dwords)
1227{
1228 if (ring->private) {
1229 int ret = intel_ring_begin(ring, num_dwords+2);
1230 if (ret)
1231 return ret;
1232
1233 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1234 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1235
1236 return 0;
1237 } else
1238 return intel_ring_begin(ring, 4);
1239}
1240
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001241static int blt_ring_flush(struct intel_ring_buffer *ring,
Chris Wilson71a77e02011-02-02 12:13:49 +00001242 u32 invalidate, u32 flush)
Zou Nan hai8d192152010-11-02 16:31:01 +08001243{
Chris Wilson71a77e02011-02-02 12:13:49 +00001244 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001245 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001246
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001247 ret = blt_ring_begin(ring, 4);
1248 if (ret)
1249 return ret;
1250
Chris Wilson71a77e02011-02-02 12:13:49 +00001251 cmd = MI_FLUSH_DW;
1252 if (invalidate & I915_GEM_DOMAIN_RENDER)
1253 cmd |= MI_INVALIDATE_TLB;
1254 intel_ring_emit(ring, cmd);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001255 intel_ring_emit(ring, 0);
1256 intel_ring_emit(ring, 0);
Chris Wilson71a77e02011-02-02 12:13:49 +00001257 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001258 intel_ring_advance(ring);
1259 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08001260}
1261
Zou Nan hai8d192152010-11-02 16:31:01 +08001262static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1263{
1264 if (!ring->private)
1265 return;
1266
1267 i915_gem_object_unpin(ring->private);
1268 drm_gem_object_unreference(ring->private);
1269 ring->private = NULL;
1270}
1271
Chris Wilson549f7362010-10-19 11:19:32 +01001272static const struct intel_ring_buffer gen6_blt_ring = {
1273 .name = "blt ring",
1274 .id = RING_BLT,
1275 .mmio_base = BLT_RING_BASE,
1276 .size = 32 * PAGE_SIZE,
Zou Nan hai8d192152010-11-02 16:31:01 +08001277 .init = blt_ring_init,
Chris Wilson297b0c52010-10-22 17:02:41 +01001278 .write_tail = ring_write_tail,
Zou Nan hai8d192152010-11-02 16:31:01 +08001279 .flush = blt_ring_flush,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001280 .add_request = gen6_add_request,
1281 .get_seqno = ring_get_seqno,
1282 .irq_get = blt_ring_get_irq,
1283 .irq_put = blt_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001284 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Zou Nan hai8d192152010-11-02 16:31:01 +08001285 .cleanup = blt_ring_cleanup,
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001286};
1287
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001288int intel_init_render_ring_buffer(struct drm_device *dev)
1289{
1290 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001291 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001292
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001293 *ring = render_ring;
1294 if (INTEL_INFO(dev)->gen >= 6) {
1295 ring->add_request = gen6_add_request;
Chris Wilson0f468322011-01-04 17:35:21 +00001296 ring->irq_get = gen6_render_ring_get_irq;
1297 ring->irq_put = gen6_render_ring_put_irq;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001298 } else if (IS_GEN5(dev)) {
1299 ring->add_request = pc_render_add_request;
1300 ring->get_seqno = pc_render_get_seqno;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001301 }
1302
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001303 if (!I915_NEED_GFX_HWS(dev)) {
1304 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1305 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1306 }
1307
1308 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001309}
1310
Chris Wilsone8616b62011-01-20 09:57:11 +00001311int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1312{
1313 drm_i915_private_t *dev_priv = dev->dev_private;
1314 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1315
1316 *ring = render_ring;
1317 if (INTEL_INFO(dev)->gen >= 6) {
1318 ring->add_request = gen6_add_request;
1319 ring->irq_get = gen6_render_ring_get_irq;
1320 ring->irq_put = gen6_render_ring_put_irq;
1321 } else if (IS_GEN5(dev)) {
1322 ring->add_request = pc_render_add_request;
1323 ring->get_seqno = pc_render_get_seqno;
1324 }
1325
1326 ring->dev = dev;
1327 INIT_LIST_HEAD(&ring->active_list);
1328 INIT_LIST_HEAD(&ring->request_list);
1329 INIT_LIST_HEAD(&ring->gpu_write_list);
1330
1331 ring->size = size;
1332 ring->effective_size = ring->size;
1333 if (IS_I830(ring->dev))
1334 ring->effective_size -= 128;
1335
1336 ring->map.offset = start;
1337 ring->map.size = size;
1338 ring->map.type = 0;
1339 ring->map.flags = 0;
1340 ring->map.mtrr = 0;
1341
1342 drm_core_ioremap_wc(&ring->map, dev);
1343 if (ring->map.handle == NULL) {
1344 DRM_ERROR("can not ioremap virtual address for"
1345 " ring buffer\n");
1346 return -ENOMEM;
1347 }
1348
1349 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1350 return 0;
1351}
1352
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001353int intel_init_bsd_ring_buffer(struct drm_device *dev)
1354{
1355 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001356 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001357
Jesse Barnes65d3eb12011-04-06 14:54:44 -07001358 if (IS_GEN6(dev) || IS_GEN7(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001359 *ring = gen6_bsd_ring;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001360 else
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001361 *ring = bsd_ring;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001362
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001363 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001364}
Chris Wilson549f7362010-10-19 11:19:32 +01001365
1366int intel_init_blt_ring_buffer(struct drm_device *dev)
1367{
1368 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001369 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
Chris Wilson549f7362010-10-19 11:19:32 +01001370
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001371 *ring = gen6_blt_ring;
Chris Wilson549f7362010-10-19 11:19:32 +01001372
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001373 return intel_init_ring_buffer(dev, ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001374}