blob: 6880082b91667bd9c505e811fef7db9aacac1b49 [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
Zeng Zhaoxiua4d8a0f2015-12-06 18:26:30 +080030#include <linux/log2.h>
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/drmP.h>
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/i915_drm.h>
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Chris Wilsona0442462016-04-29 09:07:05 +010037/* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
39 */
40#define LEGACY_REQUEST_SIZE 200
41
Oscar Mateo82e104c2014-07-24 17:04:26 +010042int __intel_ring_space(int head, int tail, int size)
Chris Wilson1cf0ba12014-05-05 09:07:33 +010043{
Dave Gordon4f547412014-11-27 11:22:48 +000044 int space = head - tail;
45 if (space <= 0)
Chris Wilson1cf0ba12014-05-05 09:07:33 +010046 space += size;
Dave Gordon4f547412014-11-27 11:22:48 +000047 return space - I915_RING_FREE_SPACE;
Chris Wilson1cf0ba12014-05-05 09:07:33 +010048}
49
Chris Wilson32c04f12016-08-02 22:50:22 +010050void intel_ring_update_space(struct intel_ring *ring)
Dave Gordonebd0fd42014-11-27 11:22:49 +000051{
Chris Wilson32c04f12016-08-02 22:50:22 +010052 if (ring->last_retired_head != -1) {
53 ring->head = ring->last_retired_head;
54 ring->last_retired_head = -1;
Dave Gordonebd0fd42014-11-27 11:22:49 +000055 }
56
Chris Wilson32c04f12016-08-02 22:50:22 +010057 ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
58 ring->tail, ring->size);
Dave Gordonebd0fd42014-11-27 11:22:49 +000059}
60
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000061static int
Chris Wilson7c9cf4e2016-08-02 22:50:25 +010062gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Chris Wilson46f0f8d2012-04-18 11:12:11 +010063{
Chris Wilson7e37f882016-08-02 22:50:21 +010064 struct intel_ring *ring = req->ring;
Chris Wilson46f0f8d2012-04-18 11:12:11 +010065 u32 cmd;
66 int ret;
67
68 cmd = MI_FLUSH;
Chris Wilson46f0f8d2012-04-18 11:12:11 +010069
Chris Wilson7c9cf4e2016-08-02 22:50:25 +010070 if (mode & EMIT_INVALIDATE)
Chris Wilson46f0f8d2012-04-18 11:12:11 +010071 cmd |= MI_READ_FLUSH;
72
John Harrison5fb9de12015-05-29 17:44:07 +010073 ret = intel_ring_begin(req, 2);
Chris Wilson46f0f8d2012-04-18 11:12:11 +010074 if (ret)
75 return ret;
76
Chris Wilsonb5321f32016-08-02 22:50:18 +010077 intel_ring_emit(ring, cmd);
78 intel_ring_emit(ring, MI_NOOP);
79 intel_ring_advance(ring);
Chris Wilson46f0f8d2012-04-18 11:12:11 +010080
81 return 0;
82}
83
84static int
Chris Wilson7c9cf4e2016-08-02 22:50:25 +010085gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Eric Anholt62fdfea2010-05-21 13:26:39 -070086{
Chris Wilson7e37f882016-08-02 22:50:21 +010087 struct intel_ring *ring = req->ring;
Chris Wilson6f392d52010-08-07 11:01:22 +010088 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000089 int ret;
Chris Wilson6f392d52010-08-07 11:01:22 +010090
Chris Wilson36d527d2011-03-19 22:26:49 +000091 /*
92 * read/write caches:
93 *
94 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
95 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
96 * also flushed at 2d versus 3d pipeline switches.
97 *
98 * read-only caches:
99 *
100 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
101 * MI_READ_FLUSH is set, and is always flushed on 965.
102 *
103 * I915_GEM_DOMAIN_COMMAND may not exist?
104 *
105 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
106 * invalidated when MI_EXE_FLUSH is set.
107 *
108 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
109 * invalidated with every MI_FLUSH.
110 *
111 * TLBs:
112 *
113 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
114 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
115 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
116 * are flushed at any MI_FLUSH.
117 */
118
Chris Wilsonb5321f32016-08-02 22:50:18 +0100119 cmd = MI_FLUSH;
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100120 if (mode & EMIT_INVALIDATE) {
Chris Wilson36d527d2011-03-19 22:26:49 +0000121 cmd |= MI_EXE_FLUSH;
Chris Wilsonb5321f32016-08-02 22:50:18 +0100122 if (IS_G4X(req->i915) || IS_GEN5(req->i915))
123 cmd |= MI_INVALIDATE_ISP;
124 }
Chris Wilson36d527d2011-03-19 22:26:49 +0000125
John Harrison5fb9de12015-05-29 17:44:07 +0100126 ret = intel_ring_begin(req, 2);
Chris Wilson36d527d2011-03-19 22:26:49 +0000127 if (ret)
128 return ret;
129
Chris Wilsonb5321f32016-08-02 22:50:18 +0100130 intel_ring_emit(ring, cmd);
131 intel_ring_emit(ring, MI_NOOP);
132 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000133
134 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800135}
136
Jesse Barnes8d315282011-10-16 10:23:31 +0200137/**
138 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
139 * implementing two workarounds on gen6. From section 1.4.7.1
140 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
141 *
142 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
143 * produced by non-pipelined state commands), software needs to first
144 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
145 * 0.
146 *
147 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
148 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
149 *
150 * And the workaround for these two requires this workaround first:
151 *
152 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
153 * BEFORE the pipe-control with a post-sync op and no write-cache
154 * flushes.
155 *
156 * And this last workaround is tricky because of the requirements on
157 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
158 * volume 2 part 1:
159 *
160 * "1 of the following must also be set:
161 * - Render Target Cache Flush Enable ([12] of DW1)
162 * - Depth Cache Flush Enable ([0] of DW1)
163 * - Stall at Pixel Scoreboard ([1] of DW1)
164 * - Depth Stall ([13] of DW1)
165 * - Post-Sync Operation ([13] of DW1)
166 * - Notify Enable ([8] of DW1)"
167 *
168 * The cache flushes require the workaround flush that triggered this
169 * one, so we can't use it. Depth stall would trigger the same.
170 * Post-sync nonzero is what triggered this second workaround, so we
171 * can't use that one either. Notify enable is IRQs, which aren't
172 * really our business. That leaves only stall at scoreboard.
173 */
174static int
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100175intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
Jesse Barnes8d315282011-10-16 10:23:31 +0200176{
Chris Wilson7e37f882016-08-02 22:50:21 +0100177 struct intel_ring *ring = req->ring;
Chris Wilsonb5321f32016-08-02 22:50:18 +0100178 u32 scratch_addr =
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100179 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
Jesse Barnes8d315282011-10-16 10:23:31 +0200180 int ret;
181
John Harrison5fb9de12015-05-29 17:44:07 +0100182 ret = intel_ring_begin(req, 6);
Jesse Barnes8d315282011-10-16 10:23:31 +0200183 if (ret)
184 return ret;
185
Chris Wilsonb5321f32016-08-02 22:50:18 +0100186 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
187 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
Jesse Barnes8d315282011-10-16 10:23:31 +0200188 PIPE_CONTROL_STALL_AT_SCOREBOARD);
Chris Wilsonb5321f32016-08-02 22:50:18 +0100189 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
190 intel_ring_emit(ring, 0); /* low dword */
191 intel_ring_emit(ring, 0); /* high dword */
192 intel_ring_emit(ring, MI_NOOP);
193 intel_ring_advance(ring);
Jesse Barnes8d315282011-10-16 10:23:31 +0200194
John Harrison5fb9de12015-05-29 17:44:07 +0100195 ret = intel_ring_begin(req, 6);
Jesse Barnes8d315282011-10-16 10:23:31 +0200196 if (ret)
197 return ret;
198
Chris Wilsonb5321f32016-08-02 22:50:18 +0100199 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
200 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
201 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
202 intel_ring_emit(ring, 0);
203 intel_ring_emit(ring, 0);
204 intel_ring_emit(ring, MI_NOOP);
205 intel_ring_advance(ring);
Jesse Barnes8d315282011-10-16 10:23:31 +0200206
207 return 0;
208}
209
210static int
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100211gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Jesse Barnes8d315282011-10-16 10:23:31 +0200212{
Chris Wilson7e37f882016-08-02 22:50:21 +0100213 struct intel_ring *ring = req->ring;
Chris Wilsonb5321f32016-08-02 22:50:18 +0100214 u32 scratch_addr =
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100215 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
Jesse Barnes8d315282011-10-16 10:23:31 +0200216 u32 flags = 0;
Jesse Barnes8d315282011-10-16 10:23:31 +0200217 int ret;
218
Paulo Zanonib3111502012-08-17 18:35:42 -0300219 /* Force SNB workarounds for PIPE_CONTROL flushes */
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100220 ret = intel_emit_post_sync_nonzero_flush(req);
Paulo Zanonib3111502012-08-17 18:35:42 -0300221 if (ret)
222 return ret;
223
Jesse Barnes8d315282011-10-16 10:23:31 +0200224 /* Just flush everything. Experiments have shown that reducing the
225 * number of bits based on the write domains has little performance
226 * impact.
227 */
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100228 if (mode & EMIT_FLUSH) {
Chris Wilson7d54a902012-08-10 10:18:10 +0100229 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
230 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
231 /*
232 * Ensure that any following seqno writes only happen
233 * when the render cache is indeed flushed.
234 */
Daniel Vetter97f209b2012-06-28 09:48:42 +0200235 flags |= PIPE_CONTROL_CS_STALL;
Chris Wilson7d54a902012-08-10 10:18:10 +0100236 }
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100237 if (mode & EMIT_INVALIDATE) {
Chris Wilson7d54a902012-08-10 10:18:10 +0100238 flags |= PIPE_CONTROL_TLB_INVALIDATE;
239 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
240 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
241 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
242 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
243 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
244 /*
245 * TLB invalidate requires a post-sync write.
246 */
Jesse Barnes3ac78312012-10-25 12:15:47 -0700247 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
Chris Wilson7d54a902012-08-10 10:18:10 +0100248 }
Jesse Barnes8d315282011-10-16 10:23:31 +0200249
John Harrison5fb9de12015-05-29 17:44:07 +0100250 ret = intel_ring_begin(req, 4);
Jesse Barnes8d315282011-10-16 10:23:31 +0200251 if (ret)
252 return ret;
253
Chris Wilsonb5321f32016-08-02 22:50:18 +0100254 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
255 intel_ring_emit(ring, flags);
256 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
257 intel_ring_emit(ring, 0);
258 intel_ring_advance(ring);
Jesse Barnes8d315282011-10-16 10:23:31 +0200259
260 return 0;
261}
262
Chris Wilson6c6cf5a2012-07-20 18:02:28 +0100263static int
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100264gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
Paulo Zanonif3987632012-08-17 18:35:43 -0300265{
Chris Wilson7e37f882016-08-02 22:50:21 +0100266 struct intel_ring *ring = req->ring;
Paulo Zanonif3987632012-08-17 18:35:43 -0300267 int ret;
268
John Harrison5fb9de12015-05-29 17:44:07 +0100269 ret = intel_ring_begin(req, 4);
Paulo Zanonif3987632012-08-17 18:35:43 -0300270 if (ret)
271 return ret;
272
Chris Wilsonb5321f32016-08-02 22:50:18 +0100273 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
274 intel_ring_emit(ring,
275 PIPE_CONTROL_CS_STALL |
276 PIPE_CONTROL_STALL_AT_SCOREBOARD);
277 intel_ring_emit(ring, 0);
278 intel_ring_emit(ring, 0);
279 intel_ring_advance(ring);
Paulo Zanonif3987632012-08-17 18:35:43 -0300280
281 return 0;
282}
283
284static int
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100285gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300286{
Chris Wilson7e37f882016-08-02 22:50:21 +0100287 struct intel_ring *ring = req->ring;
Chris Wilsonb5321f32016-08-02 22:50:18 +0100288 u32 scratch_addr =
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100289 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300290 u32 flags = 0;
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300291 int ret;
292
Paulo Zanonif3987632012-08-17 18:35:43 -0300293 /*
294 * Ensure that any following seqno writes only happen when the render
295 * cache is indeed flushed.
296 *
297 * Workaround: 4th PIPE_CONTROL command (except the ones with only
298 * read-cache invalidate bits set) must have the CS_STALL bit set. We
299 * don't try to be clever and just set it unconditionally.
300 */
301 flags |= PIPE_CONTROL_CS_STALL;
302
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300303 /* Just flush everything. Experiments have shown that reducing the
304 * number of bits based on the write domains has little performance
305 * impact.
306 */
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100307 if (mode & EMIT_FLUSH) {
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300308 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
309 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
Francisco Jerez965fd602016-01-13 18:59:39 -0800310 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
Chris Wilson40a24482015-08-21 16:08:41 +0100311 flags |= PIPE_CONTROL_FLUSH_ENABLE;
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300312 }
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100313 if (mode & EMIT_INVALIDATE) {
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300314 flags |= PIPE_CONTROL_TLB_INVALIDATE;
315 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
316 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
317 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
318 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
319 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
Chris Wilson148b83d2014-12-16 08:44:31 +0000320 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300321 /*
322 * TLB invalidate requires a post-sync write.
323 */
324 flags |= PIPE_CONTROL_QW_WRITE;
Ville Syrjäläb9e1faa2013-02-14 21:53:51 +0200325 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
Paulo Zanonif3987632012-08-17 18:35:43 -0300326
Chris Wilsonadd284a2014-12-16 08:44:32 +0000327 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
328
Paulo Zanonif3987632012-08-17 18:35:43 -0300329 /* Workaround: we must issue a pipe_control with CS-stall bit
330 * set before a pipe_control command that has the state cache
331 * invalidate bit set. */
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100332 gen7_render_ring_cs_stall_wa(req);
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300333 }
334
John Harrison5fb9de12015-05-29 17:44:07 +0100335 ret = intel_ring_begin(req, 4);
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300336 if (ret)
337 return ret;
338
Chris Wilsonb5321f32016-08-02 22:50:18 +0100339 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
340 intel_ring_emit(ring, flags);
341 intel_ring_emit(ring, scratch_addr);
342 intel_ring_emit(ring, 0);
343 intel_ring_advance(ring);
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300344
345 return 0;
346}
347
Ben Widawskya5f3d682013-11-02 21:07:27 -0700348static int
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100349gen8_emit_pipe_control(struct drm_i915_gem_request *req,
Kenneth Graunke884ceac2014-06-28 02:04:20 +0300350 u32 flags, u32 scratch_addr)
351{
Chris Wilson7e37f882016-08-02 22:50:21 +0100352 struct intel_ring *ring = req->ring;
Kenneth Graunke884ceac2014-06-28 02:04:20 +0300353 int ret;
354
John Harrison5fb9de12015-05-29 17:44:07 +0100355 ret = intel_ring_begin(req, 6);
Kenneth Graunke884ceac2014-06-28 02:04:20 +0300356 if (ret)
357 return ret;
358
Chris Wilsonb5321f32016-08-02 22:50:18 +0100359 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
360 intel_ring_emit(ring, flags);
361 intel_ring_emit(ring, scratch_addr);
362 intel_ring_emit(ring, 0);
363 intel_ring_emit(ring, 0);
364 intel_ring_emit(ring, 0);
365 intel_ring_advance(ring);
Kenneth Graunke884ceac2014-06-28 02:04:20 +0300366
367 return 0;
368}
369
370static int
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100371gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Ben Widawskya5f3d682013-11-02 21:07:27 -0700372{
Chris Wilson56c0f1a2016-08-15 10:48:58 +0100373 u32 scratch_addr =
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100374 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
Chris Wilsonb5321f32016-08-02 22:50:18 +0100375 u32 flags = 0;
Kenneth Graunke02c9f7e2014-01-27 14:20:16 -0800376 int ret;
Ben Widawskya5f3d682013-11-02 21:07:27 -0700377
378 flags |= PIPE_CONTROL_CS_STALL;
379
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100380 if (mode & EMIT_FLUSH) {
Ben Widawskya5f3d682013-11-02 21:07:27 -0700381 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
382 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
Francisco Jerez965fd602016-01-13 18:59:39 -0800383 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
Chris Wilson40a24482015-08-21 16:08:41 +0100384 flags |= PIPE_CONTROL_FLUSH_ENABLE;
Ben Widawskya5f3d682013-11-02 21:07:27 -0700385 }
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100386 if (mode & EMIT_INVALIDATE) {
Ben Widawskya5f3d682013-11-02 21:07:27 -0700387 flags |= PIPE_CONTROL_TLB_INVALIDATE;
388 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
389 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
390 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
391 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
392 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
393 flags |= PIPE_CONTROL_QW_WRITE;
394 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
Kenneth Graunke02c9f7e2014-01-27 14:20:16 -0800395
396 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100397 ret = gen8_emit_pipe_control(req,
Kenneth Graunke02c9f7e2014-01-27 14:20:16 -0800398 PIPE_CONTROL_CS_STALL |
399 PIPE_CONTROL_STALL_AT_SCOREBOARD,
400 0);
401 if (ret)
402 return ret;
Ben Widawskya5f3d682013-11-02 21:07:27 -0700403 }
404
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100405 return gen8_emit_pipe_control(req, flags, scratch_addr);
Ben Widawskya5f3d682013-11-02 21:07:27 -0700406}
407
Chris Wilson7e37f882016-08-02 22:50:21 +0100408u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800409{
Chris Wilsonc0336662016-05-06 15:40:21 +0100410 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson50877442014-03-21 12:41:53 +0000411 u64 acthd;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800412
Chris Wilsonc0336662016-05-06 15:40:21 +0100413 if (INTEL_GEN(dev_priv) >= 8)
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000414 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
415 RING_ACTHD_UDW(engine->mmio_base));
Chris Wilsonc0336662016-05-06 15:40:21 +0100416 else if (INTEL_GEN(dev_priv) >= 4)
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000417 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
Chris Wilson50877442014-03-21 12:41:53 +0000418 else
419 acthd = I915_READ(ACTHD);
420
421 return acthd;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800422}
423
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000424static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
Daniel Vetter035dc1e2013-07-03 12:56:54 +0200425{
Chris Wilsonc0336662016-05-06 15:40:21 +0100426 struct drm_i915_private *dev_priv = engine->i915;
Daniel Vetter035dc1e2013-07-03 12:56:54 +0200427 u32 addr;
428
429 addr = dev_priv->status_page_dmah->busaddr;
Chris Wilsonc0336662016-05-06 15:40:21 +0100430 if (INTEL_GEN(dev_priv) >= 4)
Daniel Vetter035dc1e2013-07-03 12:56:54 +0200431 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
432 I915_WRITE(HWS_PGA, addr);
433}
434
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000435static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
Damien Lespiauaf75f262015-02-10 19:32:17 +0000436{
Chris Wilsonc0336662016-05-06 15:40:21 +0100437 struct drm_i915_private *dev_priv = engine->i915;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200438 i915_reg_t mmio;
Damien Lespiauaf75f262015-02-10 19:32:17 +0000439
440 /* The ring status page addresses are no longer next to the rest of
441 * the ring registers as of gen7.
442 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100443 if (IS_GEN7(dev_priv)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000444 switch (engine->id) {
Damien Lespiauaf75f262015-02-10 19:32:17 +0000445 case RCS:
446 mmio = RENDER_HWS_PGA_GEN7;
447 break;
448 case BCS:
449 mmio = BLT_HWS_PGA_GEN7;
450 break;
451 /*
452 * VCS2 actually doesn't exist on Gen7. Only shut up
453 * gcc switch check warning
454 */
455 case VCS2:
456 case VCS:
457 mmio = BSD_HWS_PGA_GEN7;
458 break;
459 case VECS:
460 mmio = VEBOX_HWS_PGA_GEN7;
461 break;
462 }
Chris Wilsonc0336662016-05-06 15:40:21 +0100463 } else if (IS_GEN6(dev_priv)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000464 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
Damien Lespiauaf75f262015-02-10 19:32:17 +0000465 } else {
466 /* XXX: gen8 returns to sanity */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000467 mmio = RING_HWS_PGA(engine->mmio_base);
Damien Lespiauaf75f262015-02-10 19:32:17 +0000468 }
469
Chris Wilson57e88532016-08-15 10:48:57 +0100470 I915_WRITE(mmio, engine->status_page.ggtt_offset);
Damien Lespiauaf75f262015-02-10 19:32:17 +0000471 POSTING_READ(mmio);
472
473 /*
474 * Flush the TLB for this page
475 *
476 * FIXME: These two bits have disappeared on gen8, so a question
477 * arises: do we still need this and if so how should we go about
478 * invalidating the TLB?
479 */
Tvrtko Ursulinac657f62016-05-10 10:57:08 +0100480 if (IS_GEN(dev_priv, 6, 7)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000481 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
Damien Lespiauaf75f262015-02-10 19:32:17 +0000482
483 /* ring should be idle before issuing a sync flush*/
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000484 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
Damien Lespiauaf75f262015-02-10 19:32:17 +0000485
486 I915_WRITE(reg,
487 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
488 INSTPM_SYNC_FLUSH));
Chris Wilson25ab57f2016-06-30 15:33:29 +0100489 if (intel_wait_for_register(dev_priv,
490 reg, INSTPM_SYNC_FLUSH, 0,
491 1000))
Damien Lespiauaf75f262015-02-10 19:32:17 +0000492 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000493 engine->name);
Damien Lespiauaf75f262015-02-10 19:32:17 +0000494 }
495}
496
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000497static bool stop_ring(struct intel_engine_cs *engine)
Chris Wilson9991ae72014-04-02 16:36:07 +0100498{
Chris Wilsonc0336662016-05-06 15:40:21 +0100499 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson9991ae72014-04-02 16:36:07 +0100500
Chris Wilson21a2c582016-08-15 10:49:11 +0100501 if (INTEL_GEN(dev_priv) > 2) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000502 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
Chris Wilson3d808eb2016-06-30 15:33:30 +0100503 if (intel_wait_for_register(dev_priv,
504 RING_MI_MODE(engine->mmio_base),
505 MODE_IDLE,
506 MODE_IDLE,
507 1000)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000508 DRM_ERROR("%s : timed out trying to stop ring\n",
509 engine->name);
Chris Wilson9bec9b12014-08-11 09:21:35 +0100510 /* Sometimes we observe that the idle flag is not
511 * set even though the ring is empty. So double
512 * check before giving up.
513 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000514 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
Chris Wilson9bec9b12014-08-11 09:21:35 +0100515 return false;
Chris Wilson9991ae72014-04-02 16:36:07 +0100516 }
517 }
518
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000519 I915_WRITE_CTL(engine, 0);
520 I915_WRITE_HEAD(engine, 0);
Chris Wilsonc5efa1a2016-08-02 22:50:29 +0100521 I915_WRITE_TAIL(engine, 0);
Chris Wilson9991ae72014-04-02 16:36:07 +0100522
Chris Wilson21a2c582016-08-15 10:49:11 +0100523 if (INTEL_GEN(dev_priv) > 2) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000524 (void)I915_READ_CTL(engine);
525 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
Chris Wilson9991ae72014-04-02 16:36:07 +0100526 }
527
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000528 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
Chris Wilson9991ae72014-04-02 16:36:07 +0100529}
530
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000531static int init_ring_common(struct intel_engine_cs *engine)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800532{
Chris Wilsonc0336662016-05-06 15:40:21 +0100533 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson7e37f882016-08-02 22:50:21 +0100534 struct intel_ring *ring = engine->buffer;
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200535 int ret = 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800536
Mika Kuoppala59bad942015-01-16 11:34:40 +0200537 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200538
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000539 if (!stop_ring(engine)) {
Chris Wilson9991ae72014-04-02 16:36:07 +0100540 /* G45 ring initialization often fails to reset head to zero */
Chris Wilson6fd0d562010-12-05 20:42:33 +0000541 DRM_DEBUG_KMS("%s head not reset to zero "
542 "ctl %08x head %08x tail %08x start %08x\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000543 engine->name,
544 I915_READ_CTL(engine),
545 I915_READ_HEAD(engine),
546 I915_READ_TAIL(engine),
547 I915_READ_START(engine));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800548
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000549 if (!stop_ring(engine)) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000550 DRM_ERROR("failed to set %s head to zero "
551 "ctl %08x head %08x tail %08x start %08x\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000552 engine->name,
553 I915_READ_CTL(engine),
554 I915_READ_HEAD(engine),
555 I915_READ_TAIL(engine),
556 I915_READ_START(engine));
Chris Wilson9991ae72014-04-02 16:36:07 +0100557 ret = -EIO;
558 goto out;
Chris Wilson6fd0d562010-12-05 20:42:33 +0000559 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700560 }
561
Carlos Santa31776592016-08-17 12:30:56 -0700562 if (HWS_NEEDS_PHYSICAL(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000563 ring_setup_phys_status_page(engine);
Carlos Santa31776592016-08-17 12:30:56 -0700564 else
565 intel_ring_setup_status_page(engine);
Chris Wilson9991ae72014-04-02 16:36:07 +0100566
Chris Wilson821ed7d2016-09-09 14:11:53 +0100567 intel_engine_reset_irq(engine);
568
Jiri Kosinaece4a172014-08-07 16:29:53 +0200569 /* Enforce ordering by reading HEAD register back */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000570 I915_READ_HEAD(engine);
Jiri Kosinaece4a172014-08-07 16:29:53 +0200571
Daniel Vetter0d8957c2012-08-07 09:54:14 +0200572 /* Initialize the ring. This must happen _after_ we've cleared the ring
573 * registers with the above sequence (the readback of the HEAD registers
574 * also enforces ordering), otherwise the hw might lose the new ring
575 * register values. */
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100576 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
Chris Wilson95468892014-08-07 15:39:54 +0100577
578 /* WaClearRingBufHeadRegAtInit:ctg,elk */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000579 if (I915_READ_HEAD(engine))
Chris Wilson95468892014-08-07 15:39:54 +0100580 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000581 engine->name, I915_READ_HEAD(engine));
Chris Wilson821ed7d2016-09-09 14:11:53 +0100582
583 intel_ring_update_space(ring);
584 I915_WRITE_HEAD(engine, ring->head);
585 I915_WRITE_TAIL(engine, ring->tail);
586 (void)I915_READ_TAIL(engine);
Chris Wilson95468892014-08-07 15:39:54 +0100587
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000588 I915_WRITE_CTL(engine,
Chris Wilson7e37f882016-08-02 22:50:21 +0100589 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson5d031e52012-02-08 13:34:13 +0000590 | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800591
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800592 /* If the head is still not zero, the ring is dead */
Chris Wilson821ed7d2016-09-09 14:11:53 +0100593 if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
594 RING_VALID, RING_VALID,
595 50)) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000596 DRM_ERROR("%s initialization failed "
Chris Wilson821ed7d2016-09-09 14:11:53 +0100597 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000598 engine->name,
599 I915_READ_CTL(engine),
600 I915_READ_CTL(engine) & RING_VALID,
Chris Wilson821ed7d2016-09-09 14:11:53 +0100601 I915_READ_HEAD(engine), ring->head,
602 I915_READ_TAIL(engine), ring->tail,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000603 I915_READ_START(engine),
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100604 i915_ggtt_offset(ring->vma));
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200605 ret = -EIO;
606 goto out;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800607 }
608
Tomas Elffc0768c2016-03-21 16:26:59 +0000609 intel_engine_init_hangcheck(engine);
Chris Wilson50f018d2013-06-10 11:20:19 +0100610
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200611out:
Mika Kuoppala59bad942015-01-16 11:34:40 +0200612 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200613
614 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700615}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800616
Chris Wilson821ed7d2016-09-09 14:11:53 +0100617static void reset_ring_common(struct intel_engine_cs *engine,
618 struct drm_i915_gem_request *request)
619{
620 struct intel_ring *ring = request->ring;
621
622 ring->head = request->postfix;
623 ring->last_retired_head = -1;
624}
625
John Harrisone2be4fa2015-05-29 17:43:54 +0100626static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
Arun Siluvery86d7f232014-08-26 14:44:50 +0100627{
Chris Wilson7e37f882016-08-02 22:50:21 +0100628 struct intel_ring *ring = req->ring;
Chris Wilsonc0336662016-05-06 15:40:21 +0100629 struct i915_workarounds *w = &req->i915->workarounds;
630 int ret, i;
Arun Siluvery888b5992014-08-26 14:44:51 +0100631
Francisco Jerez02235802015-10-07 14:44:01 +0300632 if (w->count == 0)
Mika Kuoppala72253422014-10-07 17:21:26 +0300633 return 0;
Arun Siluvery888b5992014-08-26 14:44:51 +0100634
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100635 ret = req->engine->emit_flush(req, EMIT_BARRIER);
Arun Siluvery86d7f232014-08-26 14:44:50 +0100636 if (ret)
637 return ret;
638
John Harrison5fb9de12015-05-29 17:44:07 +0100639 ret = intel_ring_begin(req, (w->count * 2 + 2));
Mika Kuoppala72253422014-10-07 17:21:26 +0300640 if (ret)
641 return ret;
642
Chris Wilsonb5321f32016-08-02 22:50:18 +0100643 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
Mika Kuoppala72253422014-10-07 17:21:26 +0300644 for (i = 0; i < w->count; i++) {
Chris Wilsonb5321f32016-08-02 22:50:18 +0100645 intel_ring_emit_reg(ring, w->reg[i].addr);
646 intel_ring_emit(ring, w->reg[i].value);
Mika Kuoppala72253422014-10-07 17:21:26 +0300647 }
Chris Wilsonb5321f32016-08-02 22:50:18 +0100648 intel_ring_emit(ring, MI_NOOP);
Mika Kuoppala72253422014-10-07 17:21:26 +0300649
Chris Wilsonb5321f32016-08-02 22:50:18 +0100650 intel_ring_advance(ring);
Mika Kuoppala72253422014-10-07 17:21:26 +0300651
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100652 ret = req->engine->emit_flush(req, EMIT_BARRIER);
Mika Kuoppala72253422014-10-07 17:21:26 +0300653 if (ret)
654 return ret;
655
656 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
657
658 return 0;
659}
660
John Harrison87531812015-05-29 17:43:44 +0100661static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
Daniel Vetter8f0e2b92014-12-02 16:19:07 +0100662{
663 int ret;
664
John Harrisone2be4fa2015-05-29 17:43:54 +0100665 ret = intel_ring_workarounds_emit(req);
Daniel Vetter8f0e2b92014-12-02 16:19:07 +0100666 if (ret != 0)
667 return ret;
668
John Harrisonbe013632015-05-29 17:43:45 +0100669 ret = i915_gem_render_state_init(req);
Daniel Vetter8f0e2b92014-12-02 16:19:07 +0100670 if (ret)
Chris Wilsone26e1b92016-01-29 16:49:05 +0000671 return ret;
Daniel Vetter8f0e2b92014-12-02 16:19:07 +0100672
Chris Wilsone26e1b92016-01-29 16:49:05 +0000673 return 0;
Daniel Vetter8f0e2b92014-12-02 16:19:07 +0100674}
675
Mika Kuoppala72253422014-10-07 17:21:26 +0300676static int wa_add(struct drm_i915_private *dev_priv,
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200677 i915_reg_t addr,
678 const u32 mask, const u32 val)
Mika Kuoppala72253422014-10-07 17:21:26 +0300679{
680 const u32 idx = dev_priv->workarounds.count;
681
682 if (WARN_ON(idx >= I915_MAX_WA_REGS))
683 return -ENOSPC;
684
685 dev_priv->workarounds.reg[idx].addr = addr;
686 dev_priv->workarounds.reg[idx].value = val;
687 dev_priv->workarounds.reg[idx].mask = mask;
688
689 dev_priv->workarounds.count++;
690
691 return 0;
692}
693
Mika Kuoppalaca5a0fb2015-08-11 15:44:31 +0100694#define WA_REG(addr, mask, val) do { \
Damien Lespiaucf4b0de2014-12-08 17:35:37 +0000695 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
Mika Kuoppala72253422014-10-07 17:21:26 +0300696 if (r) \
697 return r; \
Mika Kuoppalaca5a0fb2015-08-11 15:44:31 +0100698 } while (0)
Mika Kuoppala72253422014-10-07 17:21:26 +0300699
700#define WA_SET_BIT_MASKED(addr, mask) \
Damien Lespiau26459342014-12-08 17:35:38 +0000701 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
Mika Kuoppala72253422014-10-07 17:21:26 +0300702
703#define WA_CLR_BIT_MASKED(addr, mask) \
Damien Lespiau26459342014-12-08 17:35:38 +0000704 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
Mika Kuoppala72253422014-10-07 17:21:26 +0300705
Damien Lespiau98533252014-12-08 17:33:51 +0000706#define WA_SET_FIELD_MASKED(addr, mask, value) \
Damien Lespiaucf4b0de2014-12-08 17:35:37 +0000707 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
Mika Kuoppala72253422014-10-07 17:21:26 +0300708
Damien Lespiaucf4b0de2014-12-08 17:35:37 +0000709#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
710#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
Mika Kuoppala72253422014-10-07 17:21:26 +0300711
Damien Lespiaucf4b0de2014-12-08 17:35:37 +0000712#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
Mika Kuoppala72253422014-10-07 17:21:26 +0300713
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000714static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
715 i915_reg_t reg)
Arun Siluvery33136b02016-01-21 21:43:47 +0000716{
Chris Wilsonc0336662016-05-06 15:40:21 +0100717 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluvery33136b02016-01-21 21:43:47 +0000718 struct i915_workarounds *wa = &dev_priv->workarounds;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000719 const uint32_t index = wa->hw_whitelist_count[engine->id];
Arun Siluvery33136b02016-01-21 21:43:47 +0000720
721 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
722 return -EINVAL;
723
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000724 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
Arun Siluvery33136b02016-01-21 21:43:47 +0000725 i915_mmio_reg_offset(reg));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000726 wa->hw_whitelist_count[engine->id]++;
Arun Siluvery33136b02016-01-21 21:43:47 +0000727
728 return 0;
729}
730
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000731static int gen8_init_workarounds(struct intel_engine_cs *engine)
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100732{
Chris Wilsonc0336662016-05-06 15:40:21 +0100733 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluvery68c61982015-09-25 17:40:38 +0100734
735 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100736
Arun Siluvery717d84d2015-09-25 17:40:39 +0100737 /* WaDisableAsyncFlipPerfMode:bdw,chv */
738 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
739
Arun Siluveryd0581192015-09-25 17:40:40 +0100740 /* WaDisablePartialInstShootdown:bdw,chv */
741 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
742 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
743
Arun Siluverya340af52015-09-25 17:40:45 +0100744 /* Use Force Non-Coherent whenever executing a 3D context. This is a
745 * workaround for for a possible hang in the unlikely event a TLB
746 * invalidation occurs during a PSD flush.
747 */
748 /* WaForceEnableNonCoherent:bdw,chv */
Arun Siluvery120f5d22015-09-25 17:40:46 +0100749 /* WaHdcDisableFetchWhenMasked:bdw,chv */
Arun Siluverya340af52015-09-25 17:40:45 +0100750 WA_SET_BIT_MASKED(HDC_CHICKEN0,
Arun Siluvery120f5d22015-09-25 17:40:46 +0100751 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
Arun Siluverya340af52015-09-25 17:40:45 +0100752 HDC_FORCE_NON_COHERENT);
753
Arun Siluvery6def8fd2015-09-25 17:40:42 +0100754 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
755 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
756 * polygons in the same 8x4 pixel/sample area to be processed without
757 * stalling waiting for the earlier ones to write to Hierarchical Z
758 * buffer."
759 *
760 * This optimization is off by default for BDW and CHV; turn it on.
761 */
762 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
763
Arun Siluvery48404632015-09-25 17:40:43 +0100764 /* Wa4x4STCOptimizationDisable:bdw,chv */
765 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
766
Arun Siluvery7eebcde2015-09-25 17:40:44 +0100767 /*
768 * BSpec recommends 8x4 when MSAA is used,
769 * however in practice 16x4 seems fastest.
770 *
771 * Note that PS/WM thread counts depend on the WIZ hashing
772 * disable bit, which we don't touch here, but it's good
773 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
774 */
775 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
776 GEN6_WIZ_HASHING_MASK,
777 GEN6_WIZ_HASHING_16x4);
778
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100779 return 0;
780}
781
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000782static int bdw_init_workarounds(struct intel_engine_cs *engine)
Mika Kuoppala72253422014-10-07 17:21:26 +0300783{
Chris Wilsonc0336662016-05-06 15:40:21 +0100784 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100785 int ret;
Mika Kuoppala72253422014-10-07 17:21:26 +0300786
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000787 ret = gen8_init_workarounds(engine);
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100788 if (ret)
789 return ret;
790
Rodrigo Vivi101b3762014-10-09 07:11:47 -0700791 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
Arun Siluveryd0581192015-09-25 17:40:40 +0100792 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
Arun Siluvery86d7f232014-08-26 14:44:50 +0100793
Rodrigo Vivi101b3762014-10-09 07:11:47 -0700794 /* WaDisableDopClockGating:bdw */
Mika Kuoppala72253422014-10-07 17:21:26 +0300795 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
796 DOP_CLOCK_GATING_DISABLE);
Arun Siluvery86d7f232014-08-26 14:44:50 +0100797
Mika Kuoppala72253422014-10-07 17:21:26 +0300798 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
799 GEN8_SAMPLER_POWER_BYPASS_DIS);
Arun Siluvery86d7f232014-08-26 14:44:50 +0100800
Mika Kuoppala72253422014-10-07 17:21:26 +0300801 WA_SET_BIT_MASKED(HDC_CHICKEN0,
Damien Lespiau35cb6f32015-02-10 10:31:00 +0000802 /* WaForceContextSaveRestoreNonCoherent:bdw */
803 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
Damien Lespiau35cb6f32015-02-10 10:31:00 +0000804 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
Chris Wilsonc0336662016-05-06 15:40:21 +0100805 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
Arun Siluvery86d7f232014-08-26 14:44:50 +0100806
Arun Siluvery86d7f232014-08-26 14:44:50 +0100807 return 0;
808}
809
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000810static int chv_init_workarounds(struct intel_engine_cs *engine)
Ville Syrjälä00e1e622014-08-27 17:33:12 +0300811{
Chris Wilsonc0336662016-05-06 15:40:21 +0100812 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100813 int ret;
Ville Syrjälä00e1e622014-08-27 17:33:12 +0300814
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000815 ret = gen8_init_workarounds(engine);
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100816 if (ret)
817 return ret;
818
Ville Syrjälä00e1e622014-08-27 17:33:12 +0300819 /* WaDisableThreadStallDopClockGating:chv */
Arun Siluveryd0581192015-09-25 17:40:40 +0100820 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
Ville Syrjälä00e1e622014-08-27 17:33:12 +0300821
Kenneth Graunked60de812015-01-10 18:02:22 -0800822 /* Improve HiZ throughput on CHV. */
823 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
824
Mika Kuoppala72253422014-10-07 17:21:26 +0300825 return 0;
826}
827
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000828static int gen9_init_workarounds(struct intel_engine_cs *engine)
Hoath, Nicholas3b106532015-02-05 10:47:16 +0000829{
Chris Wilsonc0336662016-05-06 15:40:21 +0100830 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluverye0f3fa02016-01-21 21:43:48 +0000831 int ret;
Hoath, Nicholasab0dfaf2015-02-05 10:47:18 +0000832
Tim Gorea8ab5ed2016-06-13 12:15:01 +0100833 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
834 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
835
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300836 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +0300837 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
838 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
839
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300840 /* WaDisableKillLogic:bxt,skl,kbl */
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +0300841 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
842 ECOCHK_DIS_TLB);
843
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300844 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
845 /* WaDisablePartialInstShootdown:skl,bxt,kbl */
Hoath, Nicholasab0dfaf2015-02-05 10:47:18 +0000846 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
Tim Gore950b2aa2016-03-16 16:13:46 +0000847 FLOW_CONTROL_ENABLE |
Hoath, Nicholasab0dfaf2015-02-05 10:47:18 +0000848 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
849
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300850 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
Nick Hoath84241712015-02-05 10:47:20 +0000851 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
852 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
853
Jani Nikulaa117f372016-09-16 16:59:44 +0300854 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
855 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
Damien Lespiaua86eb582015-02-11 18:21:44 +0000856 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
857 GEN9_DG_MIRROR_FIX_ENABLE);
Nick Hoath1de45822015-02-05 10:47:19 +0000858
Jani Nikulaa117f372016-09-16 16:59:44 +0300859 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
860 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
Damien Lespiau183c6da2015-02-09 19:33:11 +0000861 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
862 GEN9_RHWO_OPTIMIZATION_DISABLE);
Arun Siluvery9b014352015-07-14 15:01:30 +0100863 /*
864 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
865 * but we do that in per ctx batchbuffer as there is an issue
866 * with this register not getting restored on ctx restore
867 */
Damien Lespiau183c6da2015-02-09 19:33:11 +0000868 }
869
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300870 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
871 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
Tim Gorebfd8ad42016-04-19 15:45:52 +0100872 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
873 GEN9_ENABLE_YV12_BUGFIX |
874 GEN9_ENABLE_GPGPU_PREEMPTION);
Nick Hoathcac23df2015-02-05 10:47:22 +0000875
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300876 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
877 /* WaDisablePartialResolveInVc:skl,bxt,kbl */
Arun Siluvery60294682015-09-25 14:33:37 +0100878 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
879 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
Damien Lespiau9370cd92015-02-09 19:33:17 +0000880
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300881 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
Damien Lespiaue2db7072015-02-09 19:33:21 +0000882 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
883 GEN9_CCS_TLB_PREFETCH_ENABLE);
884
Jani Nikula0d0b8dc2016-09-16 16:59:45 +0300885 /* WaDisableMaskBasedCammingInRCC:bxt */
886 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
Ben Widawsky38a39a72015-03-11 10:54:53 +0200887 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
888 PIXEL_MASK_CAMMING_DISABLE);
889
Mika Kuoppala5b0e3652016-06-07 17:18:57 +0300890 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
891 WA_SET_BIT_MASKED(HDC_CHICKEN0,
892 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
893 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
Imre Deak8ea6f892015-05-19 17:05:42 +0300894
Mika Kuoppalabbaefe72016-06-07 17:18:58 +0300895 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
896 * both tied to WaForceContextSaveRestoreNonCoherent
897 * in some hsds for skl. We keep the tie for all gen9. The
898 * documentation is a bit hazy and so we want to get common behaviour,
899 * even though there is no clear evidence we would need both on kbl/bxt.
900 * This area has been source of system hangs so we play it safe
901 * and mimic the skl regardless of what bspec says.
902 *
903 * Use Force Non-Coherent whenever executing a 3D context. This
904 * is a workaround for a possible hang in the unlikely event
905 * a TLB invalidation occurs during a PSD flush.
906 */
907
908 /* WaForceEnableNonCoherent:skl,bxt,kbl */
909 WA_SET_BIT_MASKED(HDC_CHICKEN0,
910 HDC_FORCE_NON_COHERENT);
911
912 /* WaDisableHDCInvalidation:skl,bxt,kbl */
913 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
914 BDW_DISABLE_HDC_INVALIDATION);
915
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300916 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
917 if (IS_SKYLAKE(dev_priv) ||
918 IS_KABYLAKE(dev_priv) ||
919 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
Arun Siluvery8c761602015-09-08 10:31:48 +0100920 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
921 GEN8_SAMPLER_POWER_BYPASS_DIS);
Arun Siluvery8c761602015-09-08 10:31:48 +0100922
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300923 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
Robert Beckett6b6d5622015-09-08 10:31:52 +0100924 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
925
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300926 /* WaOCLCoherentLineFlush:skl,bxt,kbl */
Arun Siluvery6ecf56a2016-01-21 21:43:54 +0000927 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
928 GEN8_LQSC_FLUSH_COHERENT_LINES));
929
arun.siluvery@linux.intel.com6bb628552016-06-06 09:52:49 +0100930 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
931 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
932 if (ret)
933 return ret;
934
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300935 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000936 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
Arun Siluverye0f3fa02016-01-21 21:43:48 +0000937 if (ret)
938 return ret;
939
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300940 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000941 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
Arun Siluvery3669ab62016-01-21 21:43:49 +0000942 if (ret)
943 return ret;
944
Hoath, Nicholas3b106532015-02-05 10:47:16 +0000945 return 0;
946}
947
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000948static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
Damien Lespiau8d205492015-02-09 19:33:15 +0000949{
Chris Wilsonc0336662016-05-06 15:40:21 +0100950 struct drm_i915_private *dev_priv = engine->i915;
Damien Lespiaub7668792015-02-14 18:30:29 +0000951 u8 vals[3] = { 0, 0, 0 };
952 unsigned int i;
953
954 for (i = 0; i < 3; i++) {
955 u8 ss;
956
957 /*
958 * Only consider slices where one, and only one, subslice has 7
959 * EUs
960 */
Imre Deak43b67992016-08-31 19:13:02 +0300961 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
Damien Lespiaub7668792015-02-14 18:30:29 +0000962 continue;
963
964 /*
965 * subslice_7eu[i] != 0 (because of the check above) and
966 * ss_max == 4 (maximum number of subslices possible per slice)
967 *
968 * -> 0 <= ss <= 3;
969 */
Imre Deak43b67992016-08-31 19:13:02 +0300970 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
Damien Lespiaub7668792015-02-14 18:30:29 +0000971 vals[i] = 3 - ss;
972 }
973
974 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
975 return 0;
976
977 /* Tune IZ hashing. See intel_device_info_runtime_init() */
978 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
979 GEN9_IZ_HASHING_MASK(2) |
980 GEN9_IZ_HASHING_MASK(1) |
981 GEN9_IZ_HASHING_MASK(0),
982 GEN9_IZ_HASHING(2, vals[2]) |
983 GEN9_IZ_HASHING(1, vals[1]) |
984 GEN9_IZ_HASHING(0, vals[0]));
Damien Lespiau8d205492015-02-09 19:33:15 +0000985
Mika Kuoppala72253422014-10-07 17:21:26 +0300986 return 0;
987}
988
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000989static int skl_init_workarounds(struct intel_engine_cs *engine)
Damien Lespiau8d205492015-02-09 19:33:15 +0000990{
Chris Wilsonc0336662016-05-06 15:40:21 +0100991 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluveryaa0011a2015-09-25 14:33:35 +0100992 int ret;
Damien Lespiaud0bbbc4f2015-02-09 19:33:16 +0000993
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000994 ret = gen9_init_workarounds(engine);
Arun Siluveryaa0011a2015-09-25 14:33:35 +0100995 if (ret)
996 return ret;
Damien Lespiau8d205492015-02-09 19:33:15 +0000997
Arun Siluverya78536e2016-01-21 21:43:53 +0000998 /*
999 * Actual WA is to disable percontext preemption granularity control
1000 * until D0 which is the default case so this is equivalent to
1001 * !WaDisablePerCtxtPreemptionGranularityControl:skl
1002 */
Jani Nikula9fc736e2016-09-16 16:59:46 +03001003 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1004 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
Arun Siluverya78536e2016-01-21 21:43:53 +00001005
Mika Kuoppala71dce582016-06-07 17:19:14 +03001006 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +03001007 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1008 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1009 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
1010 }
1011
1012 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1013 * involving this register should also be added to WA batch as required.
1014 */
Chris Wilsonc0336662016-05-06 15:40:21 +01001015 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +03001016 /* WaDisableLSQCROPERFforOCL:skl */
1017 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1018 GEN8_LQSC_RO_PERF_DIS);
1019
1020 /* WaEnableGapsTsvCreditFix:skl */
Jani Nikulaa117f372016-09-16 16:59:44 +03001021 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1022 GEN9_GAPS_TSV_CREDIT_DISABLE));
Damien Lespiaud0bbbc4f2015-02-09 19:33:16 +00001023
Mika Kuoppala9bd9dfb2015-08-06 16:51:00 +03001024 /* WaDisableSbeCacheDispatchPortSharing:skl */
Chris Wilsonc0336662016-05-06 15:40:21 +01001025 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
Mika Kuoppala9bd9dfb2015-08-06 16:51:00 +03001026 WA_SET_BIT_MASKED(
1027 GEN7_HALF_SLICE_CHICKEN1,
1028 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
Mika Kuoppala9bd9dfb2015-08-06 16:51:00 +03001029
Mika Kuoppalaeee8efb2016-06-07 17:18:53 +03001030 /* WaDisableGafsUnitClkGating:skl */
1031 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1032
Mika Kuoppala4ba9c1f2016-07-20 14:26:12 +03001033 /* WaInPlaceDecompressionHang:skl */
1034 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
1035 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1036 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1037
Arun Siluvery61074972016-01-21 21:43:52 +00001038 /* WaDisableLSQCROPERFforOCL:skl */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001039 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
Arun Siluvery61074972016-01-21 21:43:52 +00001040 if (ret)
1041 return ret;
1042
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001043 return skl_tune_iz_hashing(engine);
Damien Lespiau8d205492015-02-09 19:33:15 +00001044}
1045
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001046static int bxt_init_workarounds(struct intel_engine_cs *engine)
Nick Hoathcae04372015-03-17 11:39:38 +02001047{
Chris Wilsonc0336662016-05-06 15:40:21 +01001048 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluveryaa0011a2015-09-25 14:33:35 +01001049 int ret;
Nick Hoathdfb601e2015-04-10 13:12:24 +01001050
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001051 ret = gen9_init_workarounds(engine);
Arun Siluveryaa0011a2015-09-25 14:33:35 +01001052 if (ret)
1053 return ret;
Nick Hoathcae04372015-03-17 11:39:38 +02001054
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +03001055 /* WaStoreMultiplePTEenable:bxt */
1056 /* This is a requirement according to Hardware specification */
Chris Wilsonc0336662016-05-06 15:40:21 +01001057 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +03001058 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1059
1060 /* WaSetClckGatingDisableMedia:bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001061 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +03001062 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1063 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1064 }
1065
Nick Hoathdfb601e2015-04-10 13:12:24 +01001066 /* WaDisableThreadStallDopClockGating:bxt */
1067 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1068 STALL_DOP_GATING_DISABLE);
1069
arun.siluvery@linux.intel.com780f0ae2016-06-03 11:16:10 +01001070 /* WaDisablePooledEuLoadBalancingFix:bxt */
1071 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1072 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1073 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1074 }
1075
Nick Hoath983b4b92015-04-10 13:12:25 +01001076 /* WaDisableSbeCacheDispatchPortSharing:bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001077 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
Nick Hoath983b4b92015-04-10 13:12:25 +01001078 WA_SET_BIT_MASKED(
1079 GEN7_HALF_SLICE_CHICKEN1,
1080 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1081 }
1082
Arun Siluvery2c8580e2016-01-21 21:43:50 +00001083 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1084 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1085 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
Arun Siluverya786d532016-01-21 21:43:51 +00001086 /* WaDisableLSQCROPERFforOCL:bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001087 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001088 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
Arun Siluvery2c8580e2016-01-21 21:43:50 +00001089 if (ret)
1090 return ret;
Arun Siluverya786d532016-01-21 21:43:51 +00001091
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001092 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
Arun Siluverya786d532016-01-21 21:43:51 +00001093 if (ret)
1094 return ret;
Arun Siluvery2c8580e2016-01-21 21:43:50 +00001095 }
1096
Tim Gore050fc462016-04-22 09:46:01 +01001097 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001098 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
Imre Deak36579cb2016-05-03 15:54:20 +03001099 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1100 L3_HIGH_PRIO_CREDITS(2));
Tim Gore050fc462016-04-22 09:46:01 +01001101
Matthew Auld575e3cc2016-08-02 09:36:53 +01001102 /* WaToEnableHwFixForPushConstHWBug:bxt */
1103 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
Mika Kuoppalaad2bdb42016-06-07 17:19:07 +03001104 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1105 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1106
Mika Kuoppala4ba9c1f2016-07-20 14:26:12 +03001107 /* WaInPlaceDecompressionHang:bxt */
1108 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1109 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1110 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1111
Nick Hoathcae04372015-03-17 11:39:38 +02001112 return 0;
1113}
1114
Mika Kuoppalae5f81d62016-06-07 17:18:54 +03001115static int kbl_init_workarounds(struct intel_engine_cs *engine)
1116{
Mika Kuoppalae587f6c2016-06-07 17:18:59 +03001117 struct drm_i915_private *dev_priv = engine->i915;
Mika Kuoppalae5f81d62016-06-07 17:18:54 +03001118 int ret;
1119
1120 ret = gen9_init_workarounds(engine);
1121 if (ret)
1122 return ret;
1123
Mika Kuoppalae587f6c2016-06-07 17:18:59 +03001124 /* WaEnableGapsTsvCreditFix:kbl */
1125 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1126 GEN9_GAPS_TSV_CREDIT_DISABLE));
1127
Mika Kuoppalac0b730d2016-06-07 17:19:06 +03001128 /* WaDisableDynamicCreditSharing:kbl */
1129 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1130 WA_SET_BIT(GAMT_CHKN_BIT_REG,
1131 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1132
Mika Kuoppala8401d422016-06-07 17:19:00 +03001133 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1134 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1135 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1136 HDC_FENCE_DEST_SLM_DISABLE);
1137
Mika Kuoppalafe905812016-06-07 17:19:03 +03001138 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1139 * involving this register should also be added to WA batch as required.
1140 */
1141 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
1142 /* WaDisableLSQCROPERFforOCL:kbl */
1143 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1144 GEN8_LQSC_RO_PERF_DIS);
1145
Matthew Auld575e3cc2016-08-02 09:36:53 +01001146 /* WaToEnableHwFixForPushConstHWBug:kbl */
1147 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
Mika Kuoppalaad2bdb42016-06-07 17:19:07 +03001148 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1149 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1150
Mika Kuoppala4de5d7c2016-06-07 17:19:11 +03001151 /* WaDisableGafsUnitClkGating:kbl */
1152 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1153
Mika Kuoppala954337a2016-06-07 17:19:12 +03001154 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1155 WA_SET_BIT_MASKED(
1156 GEN7_HALF_SLICE_CHICKEN1,
1157 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1158
Mika Kuoppala4ba9c1f2016-07-20 14:26:12 +03001159 /* WaInPlaceDecompressionHang:kbl */
1160 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1161 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1162
Mika Kuoppalafe905812016-06-07 17:19:03 +03001163 /* WaDisableLSQCROPERFforOCL:kbl */
1164 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1165 if (ret)
1166 return ret;
1167
Mika Kuoppalae5f81d62016-06-07 17:18:54 +03001168 return 0;
1169}
1170
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001171int init_workarounds_ring(struct intel_engine_cs *engine)
Mika Kuoppala72253422014-10-07 17:21:26 +03001172{
Chris Wilsonc0336662016-05-06 15:40:21 +01001173 struct drm_i915_private *dev_priv = engine->i915;
Mika Kuoppala72253422014-10-07 17:21:26 +03001174
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001175 WARN_ON(engine->id != RCS);
Mika Kuoppala72253422014-10-07 17:21:26 +03001176
1177 dev_priv->workarounds.count = 0;
Arun Siluvery33136b02016-01-21 21:43:47 +00001178 dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
Mika Kuoppala72253422014-10-07 17:21:26 +03001179
Chris Wilsonc0336662016-05-06 15:40:21 +01001180 if (IS_BROADWELL(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001181 return bdw_init_workarounds(engine);
Mika Kuoppala72253422014-10-07 17:21:26 +03001182
Chris Wilsonc0336662016-05-06 15:40:21 +01001183 if (IS_CHERRYVIEW(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001184 return chv_init_workarounds(engine);
Ville Syrjälä00e1e622014-08-27 17:33:12 +03001185
Chris Wilsonc0336662016-05-06 15:40:21 +01001186 if (IS_SKYLAKE(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001187 return skl_init_workarounds(engine);
Nick Hoathcae04372015-03-17 11:39:38 +02001188
Chris Wilsonc0336662016-05-06 15:40:21 +01001189 if (IS_BROXTON(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001190 return bxt_init_workarounds(engine);
Hoath, Nicholas3b106532015-02-05 10:47:16 +00001191
Mika Kuoppalae5f81d62016-06-07 17:18:54 +03001192 if (IS_KABYLAKE(dev_priv))
1193 return kbl_init_workarounds(engine);
1194
Ville Syrjälä00e1e622014-08-27 17:33:12 +03001195 return 0;
1196}
1197
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001198static int init_render_ring(struct intel_engine_cs *engine)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001199{
Chris Wilsonc0336662016-05-06 15:40:21 +01001200 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001201 int ret = init_ring_common(engine);
Konrad Zapalowicz9c33baa2014-06-19 19:07:15 +02001202 if (ret)
1203 return ret;
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +08001204
Akash Goel61a563a2014-03-25 18:01:50 +05301205 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
Tvrtko Ursulinac657f62016-05-10 10:57:08 +01001206 if (IS_GEN(dev_priv, 4, 6))
Daniel Vetter6b26c862012-04-24 14:04:12 +02001207 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
Chris Wilson1c8c38c2013-01-20 16:11:20 +00001208
1209 /* We need to disable the AsyncFlip performance optimisations in order
1210 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1211 * programmed to '1' on all products.
Damien Lespiau8693a822013-05-03 18:48:11 +01001212 *
Ville Syrjälä2441f872015-06-02 15:37:37 +03001213 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
Chris Wilson1c8c38c2013-01-20 16:11:20 +00001214 */
Tvrtko Ursulinac657f62016-05-10 10:57:08 +01001215 if (IS_GEN(dev_priv, 6, 7))
Chris Wilson1c8c38c2013-01-20 16:11:20 +00001216 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1217
Chris Wilsonf05bb0c2013-01-20 16:33:32 +00001218 /* Required for the hardware to program scanline values for waiting */
Akash Goel01fa0302014-03-24 23:00:04 +05301219 /* WaEnableFlushTlbInvalidationMode:snb */
Chris Wilsonc0336662016-05-06 15:40:21 +01001220 if (IS_GEN6(dev_priv))
Chris Wilsonf05bb0c2013-01-20 16:33:32 +00001221 I915_WRITE(GFX_MODE,
Chris Wilsonaa83e302014-03-21 17:18:54 +00001222 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
Chris Wilsonf05bb0c2013-01-20 16:33:32 +00001223
Akash Goel01fa0302014-03-24 23:00:04 +05301224 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
Chris Wilsonc0336662016-05-06 15:40:21 +01001225 if (IS_GEN7(dev_priv))
Chris Wilson1c8c38c2013-01-20 16:11:20 +00001226 I915_WRITE(GFX_MODE_GEN7,
Akash Goel01fa0302014-03-24 23:00:04 +05301227 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
Chris Wilson1c8c38c2013-01-20 16:11:20 +00001228 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
Chris Wilson78501ea2010-10-27 12:18:21 +01001229
Chris Wilsonc0336662016-05-06 15:40:21 +01001230 if (IS_GEN6(dev_priv)) {
Kenneth Graunke3a69ddd2012-04-27 12:44:41 -07001231 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1232 * "If this bit is set, STCunit will have LRA as replacement
1233 * policy. [...] This bit must be reset. LRA replacement
1234 * policy is not supported."
1235 */
1236 I915_WRITE(CACHE_MODE_0,
Daniel Vetter5e13a0c2012-05-08 13:39:59 +02001237 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
Ben Widawsky84f9f932011-12-12 19:21:58 -08001238 }
1239
Tvrtko Ursulinac657f62016-05-10 10:57:08 +01001240 if (IS_GEN(dev_priv, 6, 7))
Daniel Vetter6b26c862012-04-24 14:04:12 +02001241 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
Chris Wilsonc6df5412010-12-15 09:56:50 +00001242
Ville Syrjälä035ea402016-07-12 19:24:47 +03001243 if (INTEL_INFO(dev_priv)->gen >= 6)
1244 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
Ben Widawsky15b9f802012-05-25 16:56:23 -07001245
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001246 return init_workarounds_ring(engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001247}
1248
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001249static void render_ring_cleanup(struct intel_engine_cs *engine)
Chris Wilsonc6df5412010-12-15 09:56:50 +00001250{
Chris Wilsonc0336662016-05-06 15:40:21 +01001251 struct drm_i915_private *dev_priv = engine->i915;
Ben Widawsky3e789982014-06-30 09:53:37 -07001252
Chris Wilson19880c42016-08-15 10:49:05 +01001253 i915_vma_unpin_and_release(&dev_priv->semaphore);
Chris Wilsonc6df5412010-12-15 09:56:50 +00001254}
1255
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001256static int gen8_rcs_signal(struct drm_i915_gem_request *req)
Ben Widawsky3e789982014-06-30 09:53:37 -07001257{
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001258 struct intel_ring *ring = req->ring;
1259 struct drm_i915_private *dev_priv = req->i915;
Ben Widawsky3e789982014-06-30 09:53:37 -07001260 struct intel_engine_cs *waiter;
Dave Gordonc3232b12016-03-23 18:19:53 +00001261 enum intel_engine_id id;
1262 int ret, num_rings;
Ben Widawsky3e789982014-06-30 09:53:37 -07001263
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +01001264 num_rings = INTEL_INFO(dev_priv)->num_rings;
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001265 ret = intel_ring_begin(req, (num_rings-1) * 8);
Ben Widawsky3e789982014-06-30 09:53:37 -07001266 if (ret)
1267 return ret;
1268
Dave Gordonc3232b12016-03-23 18:19:53 +00001269 for_each_engine_id(waiter, dev_priv, id) {
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001270 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
Ben Widawsky3e789982014-06-30 09:53:37 -07001271 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1272 continue;
1273
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001274 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1275 intel_ring_emit(ring,
Chris Wilsonb5321f32016-08-02 22:50:18 +01001276 PIPE_CONTROL_GLOBAL_GTT_IVB |
1277 PIPE_CONTROL_QW_WRITE |
1278 PIPE_CONTROL_CS_STALL);
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001279 intel_ring_emit(ring, lower_32_bits(gtt_offset));
1280 intel_ring_emit(ring, upper_32_bits(gtt_offset));
1281 intel_ring_emit(ring, req->fence.seqno);
1282 intel_ring_emit(ring, 0);
1283 intel_ring_emit(ring,
Chris Wilsonb5321f32016-08-02 22:50:18 +01001284 MI_SEMAPHORE_SIGNAL |
1285 MI_SEMAPHORE_TARGET(waiter->hw_id));
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001286 intel_ring_emit(ring, 0);
Ben Widawsky3e789982014-06-30 09:53:37 -07001287 }
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001288 intel_ring_advance(ring);
Ben Widawsky3e789982014-06-30 09:53:37 -07001289
1290 return 0;
1291}
1292
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001293static int gen8_xcs_signal(struct drm_i915_gem_request *req)
Ben Widawsky3e789982014-06-30 09:53:37 -07001294{
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001295 struct intel_ring *ring = req->ring;
1296 struct drm_i915_private *dev_priv = req->i915;
Ben Widawsky3e789982014-06-30 09:53:37 -07001297 struct intel_engine_cs *waiter;
Dave Gordonc3232b12016-03-23 18:19:53 +00001298 enum intel_engine_id id;
1299 int ret, num_rings;
Ben Widawsky3e789982014-06-30 09:53:37 -07001300
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +01001301 num_rings = INTEL_INFO(dev_priv)->num_rings;
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001302 ret = intel_ring_begin(req, (num_rings-1) * 6);
Ben Widawsky3e789982014-06-30 09:53:37 -07001303 if (ret)
1304 return ret;
1305
Dave Gordonc3232b12016-03-23 18:19:53 +00001306 for_each_engine_id(waiter, dev_priv, id) {
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001307 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
Ben Widawsky3e789982014-06-30 09:53:37 -07001308 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1309 continue;
1310
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001311 intel_ring_emit(ring,
Chris Wilsonb5321f32016-08-02 22:50:18 +01001312 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001313 intel_ring_emit(ring,
Chris Wilsonb5321f32016-08-02 22:50:18 +01001314 lower_32_bits(gtt_offset) |
1315 MI_FLUSH_DW_USE_GTT);
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001316 intel_ring_emit(ring, upper_32_bits(gtt_offset));
1317 intel_ring_emit(ring, req->fence.seqno);
1318 intel_ring_emit(ring,
Chris Wilsonb5321f32016-08-02 22:50:18 +01001319 MI_SEMAPHORE_SIGNAL |
1320 MI_SEMAPHORE_TARGET(waiter->hw_id));
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001321 intel_ring_emit(ring, 0);
Ben Widawsky3e789982014-06-30 09:53:37 -07001322 }
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001323 intel_ring_advance(ring);
Ben Widawsky3e789982014-06-30 09:53:37 -07001324
1325 return 0;
1326}
1327
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001328static int gen6_signal(struct drm_i915_gem_request *req)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001329{
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001330 struct intel_ring *ring = req->ring;
1331 struct drm_i915_private *dev_priv = req->i915;
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +01001332 struct intel_engine_cs *engine;
Dave Gordonc3232b12016-03-23 18:19:53 +00001333 int ret, num_rings;
Ben Widawsky78325f22014-04-29 14:52:29 -07001334
Tvrtko Ursulinc1bb1142016-08-10 16:22:10 +01001335 num_rings = INTEL_INFO(dev_priv)->num_rings;
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001336 ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
Ben Widawsky024a43e2014-04-29 14:52:30 -07001337 if (ret)
1338 return ret;
Ben Widawsky024a43e2014-04-29 14:52:30 -07001339
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +01001340 for_each_engine(engine, dev_priv) {
1341 i915_reg_t mbox_reg;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001342
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +01001343 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
1344 continue;
1345
1346 mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001347 if (i915_mmio_reg_valid(mbox_reg)) {
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001348 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1349 intel_ring_emit_reg(ring, mbox_reg);
1350 intel_ring_emit(ring, req->fence.seqno);
Ben Widawsky78325f22014-04-29 14:52:29 -07001351 }
1352 }
Ben Widawsky024a43e2014-04-29 14:52:30 -07001353
Ben Widawskya1444b72014-06-30 09:53:35 -07001354 /* If num_dwords was rounded, make sure the tail pointer is correct */
1355 if (num_rings % 2 == 0)
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001356 intel_ring_emit(ring, MI_NOOP);
1357 intel_ring_advance(ring);
Ben Widawskya1444b72014-06-30 09:53:35 -07001358
Ben Widawsky024a43e2014-04-29 14:52:30 -07001359 return 0;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001360}
1361
Chris Wilsonb0411e72016-08-02 22:50:34 +01001362static void i9xx_submit_request(struct drm_i915_gem_request *request)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001363{
Chris Wilsonb0411e72016-08-02 22:50:34 +01001364 struct drm_i915_private *dev_priv = request->i915;
1365
1366 I915_WRITE_TAIL(request->engine,
1367 intel_ring_offset(request->ring, request->tail));
1368}
1369
1370static int i9xx_emit_request(struct drm_i915_gem_request *req)
1371{
Chris Wilson7e37f882016-08-02 22:50:21 +01001372 struct intel_ring *ring = req->ring;
Ben Widawsky024a43e2014-04-29 14:52:30 -07001373 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001374
Chris Wilson9242f972016-08-02 22:50:33 +01001375 ret = intel_ring_begin(req, 4);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001376 if (ret)
1377 return ret;
1378
Chris Wilsonb5321f32016-08-02 22:50:18 +01001379 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1380 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1381 intel_ring_emit(ring, req->fence.seqno);
1382 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01001383 intel_ring_advance(ring);
1384
1385 req->tail = ring->tail;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001386
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001387 return 0;
1388}
1389
Chris Wilsonb0411e72016-08-02 22:50:34 +01001390/**
Chris Wilson618e4ca2016-08-02 22:50:35 +01001391 * gen6_sema_emit_request - Update the semaphore mailbox registers
Chris Wilsonb0411e72016-08-02 22:50:34 +01001392 *
1393 * @request - request to write to the ring
1394 *
1395 * Update the mailbox registers in the *other* rings with the current seqno.
1396 * This acts like a signal in the canonical semaphore.
1397 */
Chris Wilson618e4ca2016-08-02 22:50:35 +01001398static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
Chris Wilsonb0411e72016-08-02 22:50:34 +01001399{
Chris Wilson618e4ca2016-08-02 22:50:35 +01001400 int ret;
Chris Wilsonb0411e72016-08-02 22:50:34 +01001401
Chris Wilson618e4ca2016-08-02 22:50:35 +01001402 ret = req->engine->semaphore.signal(req);
1403 if (ret)
1404 return ret;
Chris Wilsonb0411e72016-08-02 22:50:34 +01001405
1406 return i9xx_emit_request(req);
1407}
1408
Chris Wilsonddd66c52016-08-02 22:50:31 +01001409static int gen8_render_emit_request(struct drm_i915_gem_request *req)
Chris Wilsona58c01a2016-04-29 13:18:21 +01001410{
1411 struct intel_engine_cs *engine = req->engine;
Chris Wilson7e37f882016-08-02 22:50:21 +01001412 struct intel_ring *ring = req->ring;
Chris Wilsona58c01a2016-04-29 13:18:21 +01001413 int ret;
1414
Chris Wilson9242f972016-08-02 22:50:33 +01001415 if (engine->semaphore.signal) {
1416 ret = engine->semaphore.signal(req);
1417 if (ret)
1418 return ret;
1419 }
1420
1421 ret = intel_ring_begin(req, 8);
Chris Wilsona58c01a2016-04-29 13:18:21 +01001422 if (ret)
1423 return ret;
1424
Chris Wilsonb5321f32016-08-02 22:50:18 +01001425 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1426 intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
1427 PIPE_CONTROL_CS_STALL |
1428 PIPE_CONTROL_QW_WRITE));
1429 intel_ring_emit(ring, intel_hws_seqno_address(engine));
1430 intel_ring_emit(ring, 0);
1431 intel_ring_emit(ring, i915_gem_request_get_seqno(req));
Chris Wilsona58c01a2016-04-29 13:18:21 +01001432 /* We're thrashing one dword of HWS. */
Chris Wilsonb5321f32016-08-02 22:50:18 +01001433 intel_ring_emit(ring, 0);
1434 intel_ring_emit(ring, MI_USER_INTERRUPT);
1435 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonddd66c52016-08-02 22:50:31 +01001436 intel_ring_advance(ring);
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01001437
1438 req->tail = ring->tail;
Chris Wilsona58c01a2016-04-29 13:18:21 +01001439
1440 return 0;
1441}
1442
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001443/**
1444 * intel_ring_sync - sync the waiter to the signaller on seqno
1445 *
1446 * @waiter - ring that is waiting
1447 * @signaller - ring which has, or will signal
1448 * @seqno - seqno which the waiter will block on
1449 */
Ben Widawsky5ee426c2014-06-30 09:53:38 -07001450
1451static int
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001452gen8_ring_sync_to(struct drm_i915_gem_request *req,
1453 struct drm_i915_gem_request *signal)
Ben Widawsky5ee426c2014-06-30 09:53:38 -07001454{
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001455 struct intel_ring *ring = req->ring;
1456 struct drm_i915_private *dev_priv = req->i915;
1457 u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
Chris Wilson6ef48d72016-04-29 13:18:25 +01001458 struct i915_hw_ppgtt *ppgtt;
Ben Widawsky5ee426c2014-06-30 09:53:38 -07001459 int ret;
1460
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001461 ret = intel_ring_begin(req, 4);
Ben Widawsky5ee426c2014-06-30 09:53:38 -07001462 if (ret)
1463 return ret;
1464
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001465 intel_ring_emit(ring,
1466 MI_SEMAPHORE_WAIT |
1467 MI_SEMAPHORE_GLOBAL_GTT |
1468 MI_SEMAPHORE_SAD_GTE_SDD);
1469 intel_ring_emit(ring, signal->fence.seqno);
1470 intel_ring_emit(ring, lower_32_bits(offset));
1471 intel_ring_emit(ring, upper_32_bits(offset));
1472 intel_ring_advance(ring);
Chris Wilson6ef48d72016-04-29 13:18:25 +01001473
1474 /* When the !RCS engines idle waiting upon a semaphore, they lose their
1475 * pagetables and we must reload them before executing the batch.
1476 * We do this on the i915_switch_context() following the wait and
1477 * before the dispatch.
1478 */
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001479 ppgtt = req->ctx->ppgtt;
1480 if (ppgtt && req->engine->id != RCS)
1481 ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
Ben Widawsky5ee426c2014-06-30 09:53:38 -07001482 return 0;
1483}
1484
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001485static int
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001486gen6_ring_sync_to(struct drm_i915_gem_request *req,
1487 struct drm_i915_gem_request *signal)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001488{
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001489 struct intel_ring *ring = req->ring;
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001490 u32 dw1 = MI_SEMAPHORE_MBOX |
1491 MI_SEMAPHORE_COMPARE |
1492 MI_SEMAPHORE_REGISTER;
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +01001493 u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
Ben Widawskyebc348b2014-04-29 14:52:28 -07001494 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001495
Chris Wilsonddf07be2016-08-02 22:50:39 +01001496 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
1497
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001498 ret = intel_ring_begin(req, 4);
Chris Wilsonddf07be2016-08-02 22:50:39 +01001499 if (ret)
1500 return ret;
1501
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001502 intel_ring_emit(ring, dw1 | wait_mbox);
Ben Widawsky1500f7e2012-04-11 11:18:21 -07001503 /* Throughout all of the GEM code, seqno passed implies our current
1504 * seqno is >= the last seqno executed. However for hardware the
1505 * comparison is strictly greater than.
1506 */
Chris Wilsonad7bdb22016-08-02 22:50:40 +01001507 intel_ring_emit(ring, signal->fence.seqno - 1);
1508 intel_ring_emit(ring, 0);
1509 intel_ring_emit(ring, MI_NOOP);
1510 intel_ring_advance(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001511
1512 return 0;
1513}
1514
Chris Wilsonf8973c22016-07-01 17:23:21 +01001515static void
Dave Gordon38a0f2d2016-07-20 18:16:06 +01001516gen5_seqno_barrier(struct intel_engine_cs *engine)
Chris Wilsonc6df5412010-12-15 09:56:50 +00001517{
Chris Wilsonf8973c22016-07-01 17:23:21 +01001518 /* MI_STORE are internally buffered by the GPU and not flushed
1519 * either by MI_FLUSH or SyncFlush or any other combination of
1520 * MI commands.
Chris Wilsonc6df5412010-12-15 09:56:50 +00001521 *
Chris Wilsonf8973c22016-07-01 17:23:21 +01001522 * "Only the submission of the store operation is guaranteed.
1523 * The write result will be complete (coherent) some time later
1524 * (this is practically a finite period but there is no guaranteed
1525 * latency)."
1526 *
1527 * Empirically, we observe that we need a delay of at least 75us to
1528 * be sure that the seqno write is visible by the CPU.
Chris Wilsonc6df5412010-12-15 09:56:50 +00001529 */
Chris Wilsonf8973c22016-07-01 17:23:21 +01001530 usleep_range(125, 250);
Chris Wilsonc6df5412010-12-15 09:56:50 +00001531}
1532
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001533static void
1534gen6_seqno_barrier(struct intel_engine_cs *engine)
Daniel Vetter4cd53c02012-12-14 16:01:25 +01001535{
Chris Wilsonc0336662016-05-06 15:40:21 +01001536 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilsonbcbdb6d2016-04-27 09:02:01 +01001537
Daniel Vetter4cd53c02012-12-14 16:01:25 +01001538 /* Workaround to force correct ordering between irq and seqno writes on
1539 * ivb (and maybe also on snb) by reading from a CS register (like
Chris Wilson9b9ed302016-04-09 10:57:53 +01001540 * ACTHD) before reading the status page.
1541 *
1542 * Note that this effectively stalls the read by the time it takes to
1543 * do a memory transaction, which more or less ensures that the write
1544 * from the GPU has sufficient time to invalidate the CPU cacheline.
1545 * Alternatively we could delay the interrupt from the CS ring to give
1546 * the write time to land, but that would incur a delay after every
1547 * batch i.e. much more frequent than a delay when waiting for the
1548 * interrupt (with the same net latency).
Chris Wilsonbcbdb6d2016-04-27 09:02:01 +01001549 *
1550 * Also note that to prevent whole machine hangs on gen7, we have to
1551 * take the spinlock to guard against concurrent cacheline access.
Chris Wilson9b9ed302016-04-09 10:57:53 +01001552 */
Chris Wilsonbcbdb6d2016-04-27 09:02:01 +01001553 spin_lock_irq(&dev_priv->uncore.lock);
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001554 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
Chris Wilsonbcbdb6d2016-04-27 09:02:01 +01001555 spin_unlock_irq(&dev_priv->uncore.lock);
Daniel Vetter4cd53c02012-12-14 16:01:25 +01001556}
1557
Chris Wilson31bb59c2016-07-01 17:23:27 +01001558static void
1559gen5_irq_enable(struct intel_engine_cs *engine)
Daniel Vettere48d8632012-04-11 22:12:54 +02001560{
Chris Wilson31bb59c2016-07-01 17:23:27 +01001561 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
Daniel Vettere48d8632012-04-11 22:12:54 +02001562}
1563
1564static void
Chris Wilson31bb59c2016-07-01 17:23:27 +01001565gen5_irq_disable(struct intel_engine_cs *engine)
Daniel Vettere48d8632012-04-11 22:12:54 +02001566{
Chris Wilson31bb59c2016-07-01 17:23:27 +01001567 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001568}
1569
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001570static void
Chris Wilson31bb59c2016-07-01 17:23:27 +01001571i9xx_irq_enable(struct intel_engine_cs *engine)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001572{
Chris Wilsonc0336662016-05-06 15:40:21 +01001573 struct drm_i915_private *dev_priv = engine->i915;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001574
Chris Wilson31bb59c2016-07-01 17:23:27 +01001575 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1576 I915_WRITE(IMR, dev_priv->irq_mask);
1577 POSTING_READ_FW(RING_IMR(engine->mmio_base));
Chris Wilsonc2798b12012-04-22 21:13:57 +01001578}
1579
1580static void
Chris Wilson31bb59c2016-07-01 17:23:27 +01001581i9xx_irq_disable(struct intel_engine_cs *engine)
Chris Wilsonc2798b12012-04-22 21:13:57 +01001582{
Chris Wilsonc0336662016-05-06 15:40:21 +01001583 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilsonc2798b12012-04-22 21:13:57 +01001584
Chris Wilson31bb59c2016-07-01 17:23:27 +01001585 dev_priv->irq_mask |= engine->irq_enable_mask;
1586 I915_WRITE(IMR, dev_priv->irq_mask);
1587}
1588
1589static void
1590i8xx_irq_enable(struct intel_engine_cs *engine)
1591{
1592 struct drm_i915_private *dev_priv = engine->i915;
1593
1594 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1595 I915_WRITE16(IMR, dev_priv->irq_mask);
1596 POSTING_READ16(RING_IMR(engine->mmio_base));
1597}
1598
1599static void
1600i8xx_irq_disable(struct intel_engine_cs *engine)
1601{
1602 struct drm_i915_private *dev_priv = engine->i915;
1603
1604 dev_priv->irq_mask |= engine->irq_enable_mask;
1605 I915_WRITE16(IMR, dev_priv->irq_mask);
Chris Wilsonc2798b12012-04-22 21:13:57 +01001606}
1607
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001608static int
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001609bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Zou Nan haid1b851f2010-05-21 09:08:57 +08001610{
Chris Wilson7e37f882016-08-02 22:50:21 +01001611 struct intel_ring *ring = req->ring;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001612 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001613
John Harrison5fb9de12015-05-29 17:44:07 +01001614 ret = intel_ring_begin(req, 2);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001615 if (ret)
1616 return ret;
1617
Chris Wilsonb5321f32016-08-02 22:50:18 +01001618 intel_ring_emit(ring, MI_FLUSH);
1619 intel_ring_emit(ring, MI_NOOP);
1620 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001621 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +08001622}
1623
Chris Wilson0f468322011-01-04 17:35:21 +00001624static void
Chris Wilson31bb59c2016-07-01 17:23:27 +01001625gen6_irq_enable(struct intel_engine_cs *engine)
Chris Wilson0f468322011-01-04 17:35:21 +00001626{
Chris Wilsonc0336662016-05-06 15:40:21 +01001627 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson0f468322011-01-04 17:35:21 +00001628
Chris Wilson61ff75a2016-07-01 17:23:28 +01001629 I915_WRITE_IMR(engine,
1630 ~(engine->irq_enable_mask |
1631 engine->irq_keep_mask));
Chris Wilson31bb59c2016-07-01 17:23:27 +01001632 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
Ben Widawskya19d2932013-05-28 19:22:30 -07001633}
1634
1635static void
Chris Wilson31bb59c2016-07-01 17:23:27 +01001636gen6_irq_disable(struct intel_engine_cs *engine)
Ben Widawskya19d2932013-05-28 19:22:30 -07001637{
Chris Wilsonc0336662016-05-06 15:40:21 +01001638 struct drm_i915_private *dev_priv = engine->i915;
Ben Widawskya19d2932013-05-28 19:22:30 -07001639
Chris Wilson61ff75a2016-07-01 17:23:28 +01001640 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
Chris Wilson31bb59c2016-07-01 17:23:27 +01001641 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001642}
1643
1644static void
Chris Wilson31bb59c2016-07-01 17:23:27 +01001645hsw_vebox_irq_enable(struct intel_engine_cs *engine)
Ben Widawskyabd58f02013-11-02 21:07:09 -07001646{
Chris Wilsonc0336662016-05-06 15:40:21 +01001647 struct drm_i915_private *dev_priv = engine->i915;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001648
Chris Wilson31bb59c2016-07-01 17:23:27 +01001649 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1650 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
1651}
1652
1653static void
1654hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1655{
1656 struct drm_i915_private *dev_priv = engine->i915;
1657
1658 I915_WRITE_IMR(engine, ~0);
1659 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
1660}
1661
1662static void
1663gen8_irq_enable(struct intel_engine_cs *engine)
1664{
1665 struct drm_i915_private *dev_priv = engine->i915;
1666
Chris Wilson61ff75a2016-07-01 17:23:28 +01001667 I915_WRITE_IMR(engine,
1668 ~(engine->irq_enable_mask |
1669 engine->irq_keep_mask));
Chris Wilson31bb59c2016-07-01 17:23:27 +01001670 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1671}
1672
1673static void
1674gen8_irq_disable(struct intel_engine_cs *engine)
1675{
1676 struct drm_i915_private *dev_priv = engine->i915;
1677
Chris Wilson61ff75a2016-07-01 17:23:28 +01001678 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001679}
1680
Zou Nan haid1b851f2010-05-21 09:08:57 +08001681static int
Chris Wilson803688b2016-08-02 22:50:27 +01001682i965_emit_bb_start(struct drm_i915_gem_request *req,
1683 u64 offset, u32 length,
1684 unsigned int dispatch_flags)
Zou Nan haid1b851f2010-05-21 09:08:57 +08001685{
Chris Wilson7e37f882016-08-02 22:50:21 +01001686 struct intel_ring *ring = req->ring;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001687 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +01001688
John Harrison5fb9de12015-05-29 17:44:07 +01001689 ret = intel_ring_begin(req, 2);
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001690 if (ret)
1691 return ret;
1692
Chris Wilsonb5321f32016-08-02 22:50:18 +01001693 intel_ring_emit(ring,
Chris Wilson65f56872012-04-17 16:38:12 +01001694 MI_BATCH_BUFFER_START |
1695 MI_BATCH_GTT |
John Harrison8e004ef2015-02-13 11:48:10 +00001696 (dispatch_flags & I915_DISPATCH_SECURE ?
1697 0 : MI_BATCH_NON_SECURE_I965));
Chris Wilsonb5321f32016-08-02 22:50:18 +01001698 intel_ring_emit(ring, offset);
1699 intel_ring_advance(ring);
Chris Wilson78501ea2010-10-27 12:18:21 +01001700
Zou Nan haid1b851f2010-05-21 09:08:57 +08001701 return 0;
1702}
1703
Daniel Vetterb45305f2012-12-17 16:21:27 +01001704/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1705#define I830_BATCH_LIMIT (256*1024)
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001706#define I830_TLB_ENTRIES (2)
1707#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001708static int
Chris Wilson803688b2016-08-02 22:50:27 +01001709i830_emit_bb_start(struct drm_i915_gem_request *req,
1710 u64 offset, u32 len,
1711 unsigned int dispatch_flags)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001712{
Chris Wilson7e37f882016-08-02 22:50:21 +01001713 struct intel_ring *ring = req->ring;
Chris Wilsonbde13eb2016-08-15 10:49:07 +01001714 u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001715 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001716
John Harrison5fb9de12015-05-29 17:44:07 +01001717 ret = intel_ring_begin(req, 6);
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001718 if (ret)
1719 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001720
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001721 /* Evict the invalid PTE TLBs */
Chris Wilsonb5321f32016-08-02 22:50:18 +01001722 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1723 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1724 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1725 intel_ring_emit(ring, cs_offset);
1726 intel_ring_emit(ring, 0xdeadbeef);
1727 intel_ring_emit(ring, MI_NOOP);
1728 intel_ring_advance(ring);
Daniel Vetterb45305f2012-12-17 16:21:27 +01001729
John Harrison8e004ef2015-02-13 11:48:10 +00001730 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
Daniel Vetterb45305f2012-12-17 16:21:27 +01001731 if (len > I830_BATCH_LIMIT)
1732 return -ENOSPC;
1733
John Harrison5fb9de12015-05-29 17:44:07 +01001734 ret = intel_ring_begin(req, 6 + 2);
Daniel Vetterb45305f2012-12-17 16:21:27 +01001735 if (ret)
1736 return ret;
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001737
1738 /* Blit the batch (which has now all relocs applied) to the
1739 * stable batch scratch bo area (so that the CS never
1740 * stumbles over its tlb invalidation bug) ...
1741 */
Chris Wilsonb5321f32016-08-02 22:50:18 +01001742 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1743 intel_ring_emit(ring,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001744 BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
Chris Wilsonb5321f32016-08-02 22:50:18 +01001745 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
1746 intel_ring_emit(ring, cs_offset);
1747 intel_ring_emit(ring, 4096);
1748 intel_ring_emit(ring, offset);
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001749
Chris Wilsonb5321f32016-08-02 22:50:18 +01001750 intel_ring_emit(ring, MI_FLUSH);
1751 intel_ring_emit(ring, MI_NOOP);
1752 intel_ring_advance(ring);
Daniel Vetterb45305f2012-12-17 16:21:27 +01001753
1754 /* ... and execute it. */
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001755 offset = cs_offset;
Daniel Vetterb45305f2012-12-17 16:21:27 +01001756 }
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001757
Ville Syrjälä9d611c02015-12-14 18:23:49 +02001758 ret = intel_ring_begin(req, 2);
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001759 if (ret)
1760 return ret;
1761
Chris Wilsonb5321f32016-08-02 22:50:18 +01001762 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1763 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1764 0 : MI_BATCH_NON_SECURE));
1765 intel_ring_advance(ring);
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001766
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001767 return 0;
1768}
1769
1770static int
Chris Wilson803688b2016-08-02 22:50:27 +01001771i915_emit_bb_start(struct drm_i915_gem_request *req,
1772 u64 offset, u32 len,
1773 unsigned int dispatch_flags)
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001774{
Chris Wilson7e37f882016-08-02 22:50:21 +01001775 struct intel_ring *ring = req->ring;
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001776 int ret;
1777
John Harrison5fb9de12015-05-29 17:44:07 +01001778 ret = intel_ring_begin(req, 2);
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001779 if (ret)
1780 return ret;
1781
Chris Wilsonb5321f32016-08-02 22:50:18 +01001782 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1783 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1784 0 : MI_BATCH_NON_SECURE));
1785 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001786
Eric Anholt62fdfea2010-05-21 13:26:39 -07001787 return 0;
1788}
1789
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001790static void cleanup_phys_status_page(struct intel_engine_cs *engine)
Ville Syrjälä7d3fdff2016-01-11 20:48:32 +02001791{
Chris Wilsonc0336662016-05-06 15:40:21 +01001792 struct drm_i915_private *dev_priv = engine->i915;
Ville Syrjälä7d3fdff2016-01-11 20:48:32 +02001793
1794 if (!dev_priv->status_page_dmah)
1795 return;
1796
Chris Wilson91c8a322016-07-05 10:40:23 +01001797 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001798 engine->status_page.page_addr = NULL;
Ville Syrjälä7d3fdff2016-01-11 20:48:32 +02001799}
1800
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001801static void cleanup_status_page(struct intel_engine_cs *engine)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001802{
Chris Wilson57e88532016-08-15 10:48:57 +01001803 struct i915_vma *vma;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001804
Chris Wilson57e88532016-08-15 10:48:57 +01001805 vma = fetch_and_zero(&engine->status_page.vma);
1806 if (!vma)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001807 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001808
Chris Wilson57e88532016-08-15 10:48:57 +01001809 i915_vma_unpin(vma);
1810 i915_gem_object_unpin_map(vma->obj);
1811 i915_vma_put(vma);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001812}
1813
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001814static int init_status_page(struct intel_engine_cs *engine)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001815{
Chris Wilson57e88532016-08-15 10:48:57 +01001816 struct drm_i915_gem_object *obj;
1817 struct i915_vma *vma;
1818 unsigned int flags;
1819 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001820
Chris Wilson57e88532016-08-15 10:48:57 +01001821 obj = i915_gem_object_create(&engine->i915->drm, 4096);
1822 if (IS_ERR(obj)) {
1823 DRM_ERROR("Failed to allocate status page\n");
1824 return PTR_ERR(obj);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001825 }
Chris Wilsone4ffd172011-04-04 09:44:39 +01001826
Chris Wilson57e88532016-08-15 10:48:57 +01001827 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1828 if (ret)
1829 goto err;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001830
Chris Wilson57e88532016-08-15 10:48:57 +01001831 vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
1832 if (IS_ERR(vma)) {
1833 ret = PTR_ERR(vma);
1834 goto err;
1835 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07001836
Chris Wilson57e88532016-08-15 10:48:57 +01001837 flags = PIN_GLOBAL;
1838 if (!HAS_LLC(engine->i915))
1839 /* On g33, we cannot place HWS above 256MiB, so
1840 * restrict its pinning to the low mappable arena.
1841 * Though this restriction is not documented for
1842 * gen4, gen5, or byt, they also behave similarly
1843 * and hang if the HWS is placed at the top of the
1844 * GTT. To generalise, it appears that all !llc
1845 * platforms have issues with us placing the HWS
1846 * above the mappable region (even though we never
1847 * actualy map it).
1848 */
1849 flags |= PIN_MAPPABLE;
1850 ret = i915_vma_pin(vma, 0, 4096, flags);
1851 if (ret)
1852 goto err;
1853
1854 engine->status_page.vma = vma;
Chris Wilsonbde13eb2016-08-15 10:49:07 +01001855 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
Chris Wilson57e88532016-08-15 10:48:57 +01001856 engine->status_page.page_addr =
1857 i915_gem_object_pin_map(obj, I915_MAP_WB);
1858
Chris Wilsonbde13eb2016-08-15 10:49:07 +01001859 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1860 engine->name, i915_ggtt_offset(vma));
Eric Anholt62fdfea2010-05-21 13:26:39 -07001861 return 0;
Chris Wilson57e88532016-08-15 10:48:57 +01001862
1863err:
1864 i915_gem_object_put(obj);
1865 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001866}
1867
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001868static int init_phys_status_page(struct intel_engine_cs *engine)
Chris Wilson6b8294a2012-11-16 11:43:20 +00001869{
Chris Wilsonc0336662016-05-06 15:40:21 +01001870 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson6b8294a2012-11-16 11:43:20 +00001871
Chris Wilson57e88532016-08-15 10:48:57 +01001872 dev_priv->status_page_dmah =
1873 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
1874 if (!dev_priv->status_page_dmah)
1875 return -ENOMEM;
Chris Wilson6b8294a2012-11-16 11:43:20 +00001876
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001877 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1878 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
Chris Wilson6b8294a2012-11-16 11:43:20 +00001879
1880 return 0;
1881}
1882
Chris Wilsonaad29fb2016-08-02 22:50:23 +01001883int intel_ring_pin(struct intel_ring *ring)
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001884{
Chris Wilsona687a432016-04-13 17:35:11 +01001885 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
Chris Wilson57e88532016-08-15 10:48:57 +01001886 unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
Chris Wilson9d808412016-08-18 17:16:56 +01001887 enum i915_map_type map;
Chris Wilson57e88532016-08-15 10:48:57 +01001888 struct i915_vma *vma = ring->vma;
Dave Gordon83052162016-04-12 14:46:16 +01001889 void *addr;
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001890 int ret;
1891
Chris Wilson57e88532016-08-15 10:48:57 +01001892 GEM_BUG_ON(ring->vaddr);
1893
Chris Wilson9d808412016-08-18 17:16:56 +01001894 map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
1895
1896 if (vma->obj->stolen)
Chris Wilson57e88532016-08-15 10:48:57 +01001897 flags |= PIN_MAPPABLE;
1898
1899 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
Chris Wilson9d808412016-08-18 17:16:56 +01001900 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
Chris Wilson57e88532016-08-15 10:48:57 +01001901 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1902 else
1903 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1904 if (unlikely(ret))
Chris Wilsondef0c5f2015-10-08 13:39:54 +01001905 return ret;
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001906 }
1907
Chris Wilson57e88532016-08-15 10:48:57 +01001908 ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
1909 if (unlikely(ret))
1910 return ret;
1911
Chris Wilson9d808412016-08-18 17:16:56 +01001912 if (i915_vma_is_map_and_fenceable(vma))
Chris Wilson57e88532016-08-15 10:48:57 +01001913 addr = (void __force *)i915_vma_pin_iomap(vma);
1914 else
Chris Wilson9d808412016-08-18 17:16:56 +01001915 addr = i915_gem_object_pin_map(vma->obj, map);
Chris Wilson57e88532016-08-15 10:48:57 +01001916 if (IS_ERR(addr))
1917 goto err;
1918
Chris Wilson32c04f12016-08-02 22:50:22 +01001919 ring->vaddr = addr;
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001920 return 0;
Chris Wilsond2cad532016-04-08 12:11:10 +01001921
Chris Wilson57e88532016-08-15 10:48:57 +01001922err:
1923 i915_vma_unpin(vma);
1924 return PTR_ERR(addr);
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001925}
1926
Chris Wilsonaad29fb2016-08-02 22:50:23 +01001927void intel_ring_unpin(struct intel_ring *ring)
1928{
1929 GEM_BUG_ON(!ring->vma);
1930 GEM_BUG_ON(!ring->vaddr);
1931
Chris Wilson9d808412016-08-18 17:16:56 +01001932 if (i915_vma_is_map_and_fenceable(ring->vma))
Chris Wilsonaad29fb2016-08-02 22:50:23 +01001933 i915_vma_unpin_iomap(ring->vma);
Chris Wilson57e88532016-08-15 10:48:57 +01001934 else
1935 i915_gem_object_unpin_map(ring->vma->obj);
Chris Wilsonaad29fb2016-08-02 22:50:23 +01001936 ring->vaddr = NULL;
1937
Chris Wilson57e88532016-08-15 10:48:57 +01001938 i915_vma_unpin(ring->vma);
Chris Wilsonaad29fb2016-08-02 22:50:23 +01001939}
1940
Chris Wilson57e88532016-08-15 10:48:57 +01001941static struct i915_vma *
1942intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
Oscar Mateo2919d292014-07-03 16:28:02 +01001943{
Chris Wilsone3efda42014-04-09 09:19:41 +01001944 struct drm_i915_gem_object *obj;
Chris Wilson57e88532016-08-15 10:48:57 +01001945 struct i915_vma *vma;
Chris Wilsone3efda42014-04-09 09:19:41 +01001946
Chris Wilsonc58b7352016-08-18 17:16:57 +01001947 obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
1948 if (!obj)
Chris Wilson57e88532016-08-15 10:48:57 +01001949 obj = i915_gem_object_create(&dev_priv->drm, size);
1950 if (IS_ERR(obj))
1951 return ERR_CAST(obj);
Chris Wilsone3efda42014-04-09 09:19:41 +01001952
Akash Goel24f3a8c2014-06-17 10:59:42 +05301953 /* mark ring buffers as read-only from GPU side by default */
1954 obj->gt_ro = 1;
1955
Chris Wilson57e88532016-08-15 10:48:57 +01001956 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
1957 if (IS_ERR(vma))
1958 goto err;
Chris Wilsone3efda42014-04-09 09:19:41 +01001959
Chris Wilson57e88532016-08-15 10:48:57 +01001960 return vma;
1961
1962err:
1963 i915_gem_object_put(obj);
1964 return vma;
Chris Wilsone3efda42014-04-09 09:19:41 +01001965}
1966
Chris Wilson7e37f882016-08-02 22:50:21 +01001967struct intel_ring *
1968intel_engine_create_ring(struct intel_engine_cs *engine, int size)
Chris Wilson01101fa2015-09-03 13:01:39 +01001969{
Chris Wilson7e37f882016-08-02 22:50:21 +01001970 struct intel_ring *ring;
Chris Wilson57e88532016-08-15 10:48:57 +01001971 struct i915_vma *vma;
Chris Wilson01101fa2015-09-03 13:01:39 +01001972
Chris Wilson8f942012016-08-02 22:50:30 +01001973 GEM_BUG_ON(!is_power_of_2(size));
1974
Chris Wilson01101fa2015-09-03 13:01:39 +01001975 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
Chris Wilson57e88532016-08-15 10:48:57 +01001976 if (!ring)
Chris Wilson01101fa2015-09-03 13:01:39 +01001977 return ERR_PTR(-ENOMEM);
1978
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001979 ring->engine = engine;
Chris Wilson01101fa2015-09-03 13:01:39 +01001980
Chris Wilson675d9ad2016-08-04 07:52:36 +01001981 INIT_LIST_HEAD(&ring->request_list);
1982
Chris Wilson01101fa2015-09-03 13:01:39 +01001983 ring->size = size;
1984 /* Workaround an erratum on the i830 which causes a hang if
1985 * the TAIL pointer points to within the last 2 cachelines
1986 * of the buffer.
1987 */
1988 ring->effective_size = size;
Chris Wilsonc0336662016-05-06 15:40:21 +01001989 if (IS_I830(engine->i915) || IS_845G(engine->i915))
Chris Wilson01101fa2015-09-03 13:01:39 +01001990 ring->effective_size -= 2 * CACHELINE_BYTES;
1991
1992 ring->last_retired_head = -1;
1993 intel_ring_update_space(ring);
1994
Chris Wilson57e88532016-08-15 10:48:57 +01001995 vma = intel_ring_create_vma(engine->i915, size);
1996 if (IS_ERR(vma)) {
Chris Wilson01101fa2015-09-03 13:01:39 +01001997 kfree(ring);
Chris Wilson57e88532016-08-15 10:48:57 +01001998 return ERR_CAST(vma);
Chris Wilson01101fa2015-09-03 13:01:39 +01001999 }
Chris Wilson57e88532016-08-15 10:48:57 +01002000 ring->vma = vma;
Chris Wilson01101fa2015-09-03 13:01:39 +01002001
2002 return ring;
2003}
2004
2005void
Chris Wilson7e37f882016-08-02 22:50:21 +01002006intel_ring_free(struct intel_ring *ring)
Chris Wilson01101fa2015-09-03 13:01:39 +01002007{
Chris Wilson57e88532016-08-15 10:48:57 +01002008 i915_vma_put(ring->vma);
Chris Wilson01101fa2015-09-03 13:01:39 +01002009 kfree(ring);
2010}
2011
Chris Wilson0cb26a82016-06-24 14:55:53 +01002012static int intel_ring_context_pin(struct i915_gem_context *ctx,
2013 struct intel_engine_cs *engine)
2014{
2015 struct intel_context *ce = &ctx->engine[engine->id];
2016 int ret;
2017
Chris Wilson91c8a322016-07-05 10:40:23 +01002018 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
Chris Wilson0cb26a82016-06-24 14:55:53 +01002019
2020 if (ce->pin_count++)
2021 return 0;
2022
2023 if (ce->state) {
Chris Wilson7abc98f2016-08-15 10:48:55 +01002024 ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
2025 if (ret)
2026 goto error;
2027
Chris Wilsonbf3783e2016-08-15 10:48:54 +01002028 ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
2029 PIN_GLOBAL | PIN_HIGH);
Chris Wilson0cb26a82016-06-24 14:55:53 +01002030 if (ret)
2031 goto error;
2032 }
2033
Chris Wilsonc7c3c072016-06-24 14:55:54 +01002034 /* The kernel context is only used as a placeholder for flushing the
2035 * active context. It is never used for submitting user rendering and
2036 * as such never requires the golden render context, and so we can skip
2037 * emitting it when we switch to the kernel context. This is required
2038 * as during eviction we cannot allocate and pin the renderstate in
2039 * order to initialise the context.
2040 */
2041 if (ctx == ctx->i915->kernel_context)
2042 ce->initialised = true;
2043
Chris Wilson9a6feaf2016-07-20 13:31:50 +01002044 i915_gem_context_get(ctx);
Chris Wilson0cb26a82016-06-24 14:55:53 +01002045 return 0;
2046
2047error:
2048 ce->pin_count = 0;
2049 return ret;
2050}
2051
2052static void intel_ring_context_unpin(struct i915_gem_context *ctx,
2053 struct intel_engine_cs *engine)
2054{
2055 struct intel_context *ce = &ctx->engine[engine->id];
2056
Chris Wilson91c8a322016-07-05 10:40:23 +01002057 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
Chris Wilson0cb26a82016-06-24 14:55:53 +01002058
2059 if (--ce->pin_count)
2060 return;
2061
2062 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01002063 i915_vma_unpin(ce->state);
Chris Wilson0cb26a82016-06-24 14:55:53 +01002064
Chris Wilson9a6feaf2016-07-20 13:31:50 +01002065 i915_gem_context_put(ctx);
Chris Wilson0cb26a82016-06-24 14:55:53 +01002066}
2067
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002068static int intel_init_ring_buffer(struct intel_engine_cs *engine)
Eric Anholt62fdfea2010-05-21 13:26:39 -07002069{
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002070 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson32c04f12016-08-02 22:50:22 +01002071 struct intel_ring *ring;
Chris Wilsondd785e32010-08-07 11:01:34 +01002072 int ret;
2073
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002074 WARN_ON(engine->buffer);
Daniel Vetterbfc882b2014-11-20 00:33:08 +01002075
Tvrtko Ursulin019bf272016-07-13 16:03:41 +01002076 intel_engine_setup_common(engine);
2077
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002078 memset(engine->semaphore.sync_seqno, 0,
2079 sizeof(engine->semaphore.sync_seqno));
Chris Wilson0dc79fb2011-01-05 10:32:24 +00002080
Tvrtko Ursulin019bf272016-07-13 16:03:41 +01002081 ret = intel_engine_init_common(engine);
Chris Wilson688e6c72016-07-01 17:23:15 +01002082 if (ret)
2083 goto error;
Eric Anholt62fdfea2010-05-21 13:26:39 -07002084
Chris Wilson0cb26a82016-06-24 14:55:53 +01002085 /* We may need to do things with the shrinker which
2086 * require us to immediately switch back to the default
2087 * context. This can cause a problem as pinning the
2088 * default context also requires GTT space which may not
2089 * be available. To avoid this we always pin the default
2090 * context.
2091 */
2092 ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
2093 if (ret)
2094 goto error;
2095
Chris Wilson32c04f12016-08-02 22:50:22 +01002096 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
2097 if (IS_ERR(ring)) {
2098 ret = PTR_ERR(ring);
Dave Gordonb0366a52015-12-08 15:02:36 +00002099 goto error;
2100 }
Chris Wilson01101fa2015-09-03 13:01:39 +01002101
Carlos Santa31776592016-08-17 12:30:56 -07002102 if (HWS_NEEDS_PHYSICAL(dev_priv)) {
2103 WARN_ON(engine->id != RCS);
2104 ret = init_phys_status_page(engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002105 if (ret)
Oscar Mateo8ee14972014-05-22 14:13:34 +01002106 goto error;
Chris Wilson6b8294a2012-11-16 11:43:20 +00002107 } else {
Carlos Santa31776592016-08-17 12:30:56 -07002108 ret = init_status_page(engine);
Chris Wilson6b8294a2012-11-16 11:43:20 +00002109 if (ret)
Oscar Mateo8ee14972014-05-22 14:13:34 +01002110 goto error;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002111 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07002112
Chris Wilsonaad29fb2016-08-02 22:50:23 +01002113 ret = intel_ring_pin(ring);
Daniel Vetterbfc882b2014-11-20 00:33:08 +01002114 if (ret) {
Chris Wilson57e88532016-08-15 10:48:57 +01002115 intel_ring_free(ring);
Daniel Vetterbfc882b2014-11-20 00:33:08 +01002116 goto error;
Eric Anholt62fdfea2010-05-21 13:26:39 -07002117 }
Chris Wilson57e88532016-08-15 10:48:57 +01002118 engine->buffer = ring;
Eric Anholt62fdfea2010-05-21 13:26:39 -07002119
Oscar Mateo8ee14972014-05-22 14:13:34 +01002120 return 0;
2121
2122error:
Chris Wilson7e37f882016-08-02 22:50:21 +01002123 intel_engine_cleanup(engine);
Oscar Mateo8ee14972014-05-22 14:13:34 +01002124 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07002125}
2126
Chris Wilson7e37f882016-08-02 22:50:21 +01002127void intel_engine_cleanup(struct intel_engine_cs *engine)
Eric Anholt62fdfea2010-05-21 13:26:39 -07002128{
John Harrison6402c332014-10-31 12:00:26 +00002129 struct drm_i915_private *dev_priv;
Chris Wilson33626e62010-10-29 16:18:36 +01002130
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00002131 if (!intel_engine_initialized(engine))
Eric Anholt62fdfea2010-05-21 13:26:39 -07002132 return;
2133
Chris Wilsonc0336662016-05-06 15:40:21 +01002134 dev_priv = engine->i915;
John Harrison6402c332014-10-31 12:00:26 +00002135
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002136 if (engine->buffer) {
Chris Wilson21a2c582016-08-15 10:49:11 +01002137 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
2138 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
Chris Wilson33626e62010-10-29 16:18:36 +01002139
Chris Wilsonaad29fb2016-08-02 22:50:23 +01002140 intel_ring_unpin(engine->buffer);
Chris Wilson7e37f882016-08-02 22:50:21 +01002141 intel_ring_free(engine->buffer);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002142 engine->buffer = NULL;
Dave Gordonb0366a52015-12-08 15:02:36 +00002143 }
Chris Wilson78501ea2010-10-27 12:18:21 +01002144
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002145 if (engine->cleanup)
2146 engine->cleanup(engine);
Zou Nan hai8d192152010-11-02 16:31:01 +08002147
Carlos Santa31776592016-08-17 12:30:56 -07002148 if (HWS_NEEDS_PHYSICAL(dev_priv)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002149 WARN_ON(engine->id != RCS);
2150 cleanup_phys_status_page(engine);
Carlos Santa31776592016-08-17 12:30:56 -07002151 } else {
2152 cleanup_status_page(engine);
Ville Syrjälä7d3fdff2016-01-11 20:48:32 +02002153 }
Brad Volkin44e895a2014-05-10 14:10:43 -07002154
Chris Wilson96a945a2016-08-03 13:19:16 +01002155 intel_engine_cleanup_common(engine);
Chris Wilson0cb26a82016-06-24 14:55:53 +01002156
2157 intel_ring_context_unpin(dev_priv->kernel_context, engine);
2158
Chris Wilsonc0336662016-05-06 15:40:21 +01002159 engine->i915 = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -07002160}
2161
Chris Wilson821ed7d2016-09-09 14:11:53 +01002162void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
2163{
2164 struct intel_engine_cs *engine;
2165
2166 for_each_engine(engine, dev_priv) {
2167 engine->buffer->head = engine->buffer->tail;
2168 engine->buffer->last_retired_head = -1;
2169 }
2170}
2171
John Harrison6689cb22015-03-19 12:30:08 +00002172int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
Chris Wilson9d7730912012-11-27 16:22:52 +00002173{
Chris Wilson63103462016-04-28 09:56:49 +01002174 int ret;
2175
2176 /* Flush enough space to reduce the likelihood of waiting after
2177 * we start building the request - in which case we will just
2178 * have to repeat work.
2179 */
Chris Wilsona0442462016-04-29 09:07:05 +01002180 request->reserved_space += LEGACY_REQUEST_SIZE;
Chris Wilson63103462016-04-28 09:56:49 +01002181
Chris Wilson1dae2df2016-08-02 22:50:19 +01002182 request->ring = request->engine->buffer;
Chris Wilson63103462016-04-28 09:56:49 +01002183
2184 ret = intel_ring_begin(request, 0);
2185 if (ret)
2186 return ret;
2187
Chris Wilsona0442462016-04-29 09:07:05 +01002188 request->reserved_space -= LEGACY_REQUEST_SIZE;
Chris Wilson63103462016-04-28 09:56:49 +01002189 return 0;
Chris Wilson9d7730912012-11-27 16:22:52 +00002190}
2191
Chris Wilson987046a2016-04-28 09:56:46 +01002192static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
Mika Kuoppalacbcc80d2012-12-04 15:12:03 +02002193{
Chris Wilson7e37f882016-08-02 22:50:21 +01002194 struct intel_ring *ring = req->ring;
Chris Wilson987046a2016-04-28 09:56:46 +01002195 struct drm_i915_gem_request *target;
Chris Wilson7da844c2016-08-04 07:52:38 +01002196 int ret;
Chris Wilson987046a2016-04-28 09:56:46 +01002197
Chris Wilson1dae2df2016-08-02 22:50:19 +01002198 intel_ring_update_space(ring);
2199 if (ring->space >= bytes)
Chris Wilson987046a2016-04-28 09:56:46 +01002200 return 0;
2201
2202 /*
2203 * Space is reserved in the ringbuffer for finalising the request,
2204 * as that cannot be allowed to fail. During request finalisation,
2205 * reserved_space is set to 0 to stop the overallocation and the
2206 * assumption is that then we never need to wait (which has the
2207 * risk of failing with EINTR).
2208 *
2209 * See also i915_gem_request_alloc() and i915_add_request().
2210 */
Chris Wilson0251a962016-04-28 09:56:47 +01002211 GEM_BUG_ON(!req->reserved_space);
Chris Wilson987046a2016-04-28 09:56:46 +01002212
Chris Wilson675d9ad2016-08-04 07:52:36 +01002213 list_for_each_entry(target, &ring->request_list, ring_link) {
Chris Wilson987046a2016-04-28 09:56:46 +01002214 unsigned space;
2215
Chris Wilson987046a2016-04-28 09:56:46 +01002216 /* Would completion of this request free enough space? */
Chris Wilson1dae2df2016-08-02 22:50:19 +01002217 space = __intel_ring_space(target->postfix, ring->tail,
2218 ring->size);
Chris Wilson987046a2016-04-28 09:56:46 +01002219 if (space >= bytes)
2220 break;
2221 }
2222
Chris Wilson675d9ad2016-08-04 07:52:36 +01002223 if (WARN_ON(&target->ring_link == &ring->request_list))
Chris Wilson987046a2016-04-28 09:56:46 +01002224 return -ENOSPC;
2225
Chris Wilson22dd3bb2016-09-09 14:11:50 +01002226 ret = i915_wait_request(target,
2227 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
Chris Wilsonea746f32016-09-09 14:11:49 +01002228 NULL, NO_WAITBOOST);
Chris Wilson7da844c2016-08-04 07:52:38 +01002229 if (ret)
2230 return ret;
2231
Chris Wilson7da844c2016-08-04 07:52:38 +01002232 i915_gem_request_retire_upto(target);
2233
2234 intel_ring_update_space(ring);
2235 GEM_BUG_ON(ring->space < bytes);
2236 return 0;
Chris Wilson987046a2016-04-28 09:56:46 +01002237}
2238
2239int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2240{
Chris Wilson7e37f882016-08-02 22:50:21 +01002241 struct intel_ring *ring = req->ring;
Chris Wilson1dae2df2016-08-02 22:50:19 +01002242 int remain_actual = ring->size - ring->tail;
2243 int remain_usable = ring->effective_size - ring->tail;
Chris Wilson987046a2016-04-28 09:56:46 +01002244 int bytes = num_dwords * sizeof(u32);
2245 int total_bytes, wait_bytes;
John Harrison79bbcc22015-06-30 12:40:55 +01002246 bool need_wrap = false;
Mika Kuoppalacbcc80d2012-12-04 15:12:03 +02002247
Chris Wilson0251a962016-04-28 09:56:47 +01002248 total_bytes = bytes + req->reserved_space;
John Harrison29b1b412015-06-18 13:10:09 +01002249
John Harrison79bbcc22015-06-30 12:40:55 +01002250 if (unlikely(bytes > remain_usable)) {
2251 /*
2252 * Not enough space for the basic request. So need to flush
2253 * out the remainder and then wait for base + reserved.
2254 */
2255 wait_bytes = remain_actual + total_bytes;
2256 need_wrap = true;
Chris Wilson987046a2016-04-28 09:56:46 +01002257 } else if (unlikely(total_bytes > remain_usable)) {
2258 /*
2259 * The base request will fit but the reserved space
2260 * falls off the end. So we don't need an immediate wrap
2261 * and only need to effectively wait for the reserved
2262 * size space from the start of ringbuffer.
2263 */
Chris Wilson0251a962016-04-28 09:56:47 +01002264 wait_bytes = remain_actual + req->reserved_space;
John Harrison79bbcc22015-06-30 12:40:55 +01002265 } else {
Chris Wilson987046a2016-04-28 09:56:46 +01002266 /* No wrapping required, just waiting. */
2267 wait_bytes = total_bytes;
Mika Kuoppalacbcc80d2012-12-04 15:12:03 +02002268 }
2269
Chris Wilson1dae2df2016-08-02 22:50:19 +01002270 if (wait_bytes > ring->space) {
Chris Wilson987046a2016-04-28 09:56:46 +01002271 int ret = wait_for_space(req, wait_bytes);
Mika Kuoppalacbcc80d2012-12-04 15:12:03 +02002272 if (unlikely(ret))
2273 return ret;
2274 }
2275
Chris Wilson987046a2016-04-28 09:56:46 +01002276 if (unlikely(need_wrap)) {
Chris Wilson1dae2df2016-08-02 22:50:19 +01002277 GEM_BUG_ON(remain_actual > ring->space);
2278 GEM_BUG_ON(ring->tail + remain_actual > ring->size);
Mika Kuoppalacbcc80d2012-12-04 15:12:03 +02002279
Chris Wilson987046a2016-04-28 09:56:46 +01002280 /* Fill the tail with MI_NOOP */
Chris Wilson1dae2df2016-08-02 22:50:19 +01002281 memset(ring->vaddr + ring->tail, 0, remain_actual);
2282 ring->tail = 0;
2283 ring->space -= remain_actual;
Chris Wilson987046a2016-04-28 09:56:46 +01002284 }
Chris Wilson78501ea2010-10-27 12:18:21 +01002285
Chris Wilson1dae2df2016-08-02 22:50:19 +01002286 ring->space -= bytes;
2287 GEM_BUG_ON(ring->space < 0);
Chris Wilson304d6952014-01-02 14:32:35 +00002288 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002289}
2290
Ville Syrjälä753b1ad2014-02-11 19:52:05 +02002291/* Align the ring tail to a cacheline boundary */
John Harrisonbba09b12015-05-29 17:44:06 +01002292int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
Ville Syrjälä753b1ad2014-02-11 19:52:05 +02002293{
Chris Wilson7e37f882016-08-02 22:50:21 +01002294 struct intel_ring *ring = req->ring;
Chris Wilsonb5321f32016-08-02 22:50:18 +01002295 int num_dwords =
2296 (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
Ville Syrjälä753b1ad2014-02-11 19:52:05 +02002297 int ret;
2298
2299 if (num_dwords == 0)
2300 return 0;
2301
Chris Wilson18393f62014-04-09 09:19:40 +01002302 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
John Harrison5fb9de12015-05-29 17:44:07 +01002303 ret = intel_ring_begin(req, num_dwords);
Ville Syrjälä753b1ad2014-02-11 19:52:05 +02002304 if (ret)
2305 return ret;
2306
2307 while (num_dwords--)
Chris Wilsonb5321f32016-08-02 22:50:18 +01002308 intel_ring_emit(ring, MI_NOOP);
Ville Syrjälä753b1ad2014-02-11 19:52:05 +02002309
Chris Wilsonb5321f32016-08-02 22:50:18 +01002310 intel_ring_advance(ring);
Ville Syrjälä753b1ad2014-02-11 19:52:05 +02002311
2312 return 0;
2313}
2314
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01002315static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002316{
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01002317 struct drm_i915_private *dev_priv = request->i915;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002318
Chris Wilson76f84212016-06-30 15:33:45 +01002319 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2320
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002321 /* Every tail move must follow the sequence below */
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002322
Chris Wilson12f55812012-07-05 17:14:01 +01002323 /* Disable notification that the ring is IDLE. The GT
2324 * will then assume that it is busy and bring it out of rc6.
2325 */
Chris Wilson76f84212016-06-30 15:33:45 +01002326 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2327 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
Chris Wilson12f55812012-07-05 17:14:01 +01002328
2329 /* Clear the context id. Here be magic! */
Chris Wilson76f84212016-06-30 15:33:45 +01002330 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
Chris Wilson12f55812012-07-05 17:14:01 +01002331
2332 /* Wait for the ring not to be idle, i.e. for it to wake up. */
Chris Wilson76f84212016-06-30 15:33:45 +01002333 if (intel_wait_for_register_fw(dev_priv,
2334 GEN6_BSD_SLEEP_PSMI_CONTROL,
2335 GEN6_BSD_SLEEP_INDICATOR,
2336 0,
2337 50))
Chris Wilson12f55812012-07-05 17:14:01 +01002338 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002339
Chris Wilson12f55812012-07-05 17:14:01 +01002340 /* Now that the ring is fully powered up, update the tail */
Chris Wilsonb0411e72016-08-02 22:50:34 +01002341 i9xx_submit_request(request);
Chris Wilson12f55812012-07-05 17:14:01 +01002342
2343 /* Let the ring send IDLE messages to the GT again,
2344 * and so let it sleep to conserve power when idle.
2345 */
Chris Wilson76f84212016-06-30 15:33:45 +01002346 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2347 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2348
2349 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002350}
2351
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01002352static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002353{
Chris Wilson7e37f882016-08-02 22:50:21 +01002354 struct intel_ring *ring = req->ring;
Chris Wilson71a77e02011-02-02 12:13:49 +00002355 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00002356 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002357
John Harrison5fb9de12015-05-29 17:44:07 +01002358 ret = intel_ring_begin(req, 4);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00002359 if (ret)
2360 return ret;
2361
Chris Wilson71a77e02011-02-02 12:13:49 +00002362 cmd = MI_FLUSH_DW;
Chris Wilsonc0336662016-05-06 15:40:21 +01002363 if (INTEL_GEN(req->i915) >= 8)
Ben Widawsky075b3bb2013-11-02 21:07:13 -07002364 cmd += 1;
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00002365
2366 /* We always require a command barrier so that subsequent
2367 * commands, such as breadcrumb interrupts, are strictly ordered
2368 * wrt the contents of the write cache being flushed to memory
2369 * (and thus being coherent from the CPU).
2370 */
2371 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2372
Jesse Barnes9a289772012-10-26 09:42:42 -07002373 /*
2374 * Bspec vol 1c.5 - video engine command streamer:
2375 * "If ENABLED, all TLBs will be invalidated once the flush
2376 * operation is complete. This bit is only valid when the
2377 * Post-Sync Operation field is a value of 1h or 3h."
2378 */
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01002379 if (mode & EMIT_INVALIDATE)
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00002380 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
2381
Chris Wilsonb5321f32016-08-02 22:50:18 +01002382 intel_ring_emit(ring, cmd);
2383 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
Chris Wilsonc0336662016-05-06 15:40:21 +01002384 if (INTEL_GEN(req->i915) >= 8) {
Chris Wilsonb5321f32016-08-02 22:50:18 +01002385 intel_ring_emit(ring, 0); /* upper addr */
2386 intel_ring_emit(ring, 0); /* value */
Ben Widawsky075b3bb2013-11-02 21:07:13 -07002387 } else {
Chris Wilsonb5321f32016-08-02 22:50:18 +01002388 intel_ring_emit(ring, 0);
2389 intel_ring_emit(ring, MI_NOOP);
Ben Widawsky075b3bb2013-11-02 21:07:13 -07002390 }
Chris Wilsonb5321f32016-08-02 22:50:18 +01002391 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00002392 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002393}
2394
2395static int
Chris Wilson803688b2016-08-02 22:50:27 +01002396gen8_emit_bb_start(struct drm_i915_gem_request *req,
2397 u64 offset, u32 len,
2398 unsigned int dispatch_flags)
Ben Widawsky1c7a0622013-11-02 21:07:12 -07002399{
Chris Wilson7e37f882016-08-02 22:50:21 +01002400 struct intel_ring *ring = req->ring;
Chris Wilsonb5321f32016-08-02 22:50:18 +01002401 bool ppgtt = USES_PPGTT(req->i915) &&
John Harrison8e004ef2015-02-13 11:48:10 +00002402 !(dispatch_flags & I915_DISPATCH_SECURE);
Ben Widawsky1c7a0622013-11-02 21:07:12 -07002403 int ret;
2404
John Harrison5fb9de12015-05-29 17:44:07 +01002405 ret = intel_ring_begin(req, 4);
Ben Widawsky1c7a0622013-11-02 21:07:12 -07002406 if (ret)
2407 return ret;
2408
2409 /* FIXME(BDW): Address space and security selectors. */
Chris Wilsonb5321f32016-08-02 22:50:18 +01002410 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
Abdiel Janulgue919032e2015-06-16 13:39:40 +03002411 (dispatch_flags & I915_DISPATCH_RS ?
2412 MI_BATCH_RESOURCE_STREAMER : 0));
Chris Wilsonb5321f32016-08-02 22:50:18 +01002413 intel_ring_emit(ring, lower_32_bits(offset));
2414 intel_ring_emit(ring, upper_32_bits(offset));
2415 intel_ring_emit(ring, MI_NOOP);
2416 intel_ring_advance(ring);
Ben Widawsky1c7a0622013-11-02 21:07:12 -07002417
2418 return 0;
2419}
2420
2421static int
Chris Wilson803688b2016-08-02 22:50:27 +01002422hsw_emit_bb_start(struct drm_i915_gem_request *req,
2423 u64 offset, u32 len,
2424 unsigned int dispatch_flags)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002425{
Chris Wilson7e37f882016-08-02 22:50:21 +01002426 struct intel_ring *ring = req->ring;
Akshay Joshi0206e352011-08-16 15:34:10 -04002427 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01002428
John Harrison5fb9de12015-05-29 17:44:07 +01002429 ret = intel_ring_begin(req, 2);
Akshay Joshi0206e352011-08-16 15:34:10 -04002430 if (ret)
2431 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01002432
Chris Wilsonb5321f32016-08-02 22:50:18 +01002433 intel_ring_emit(ring,
Chris Wilson77072252014-09-10 12:18:27 +01002434 MI_BATCH_BUFFER_START |
John Harrison8e004ef2015-02-13 11:48:10 +00002435 (dispatch_flags & I915_DISPATCH_SECURE ?
Abdiel Janulgue919032e2015-06-16 13:39:40 +03002436 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
2437 (dispatch_flags & I915_DISPATCH_RS ?
2438 MI_BATCH_RESOURCE_STREAMER : 0));
Chris Wilsond7d4eed2012-10-17 12:09:54 +01002439 /* bit0-7 is the length on GEN6+ */
Chris Wilsonb5321f32016-08-02 22:50:18 +01002440 intel_ring_emit(ring, offset);
2441 intel_ring_advance(ring);
Chris Wilsond7d4eed2012-10-17 12:09:54 +01002442
2443 return 0;
2444}
2445
2446static int
Chris Wilson803688b2016-08-02 22:50:27 +01002447gen6_emit_bb_start(struct drm_i915_gem_request *req,
2448 u64 offset, u32 len,
2449 unsigned int dispatch_flags)
Chris Wilsond7d4eed2012-10-17 12:09:54 +01002450{
Chris Wilson7e37f882016-08-02 22:50:21 +01002451 struct intel_ring *ring = req->ring;
Chris Wilsond7d4eed2012-10-17 12:09:54 +01002452 int ret;
2453
John Harrison5fb9de12015-05-29 17:44:07 +01002454 ret = intel_ring_begin(req, 2);
Chris Wilsond7d4eed2012-10-17 12:09:54 +01002455 if (ret)
2456 return ret;
2457
Chris Wilsonb5321f32016-08-02 22:50:18 +01002458 intel_ring_emit(ring,
Chris Wilsond7d4eed2012-10-17 12:09:54 +01002459 MI_BATCH_BUFFER_START |
John Harrison8e004ef2015-02-13 11:48:10 +00002460 (dispatch_flags & I915_DISPATCH_SECURE ?
2461 0 : MI_BATCH_NON_SECURE_I965));
Akshay Joshi0206e352011-08-16 15:34:10 -04002462 /* bit0-7 is the length on GEN6+ */
Chris Wilsonb5321f32016-08-02 22:50:18 +01002463 intel_ring_emit(ring, offset);
2464 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01002465
Akshay Joshi0206e352011-08-16 15:34:10 -04002466 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002467}
2468
Chris Wilson549f7362010-10-19 11:19:32 +01002469/* Blitter support (SandyBridge+) */
2470
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01002471static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Zou Nan hai8d192152010-11-02 16:31:01 +08002472{
Chris Wilson7e37f882016-08-02 22:50:21 +01002473 struct intel_ring *ring = req->ring;
Chris Wilson71a77e02011-02-02 12:13:49 +00002474 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00002475 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002476
John Harrison5fb9de12015-05-29 17:44:07 +01002477 ret = intel_ring_begin(req, 4);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00002478 if (ret)
2479 return ret;
2480
Chris Wilson71a77e02011-02-02 12:13:49 +00002481 cmd = MI_FLUSH_DW;
Chris Wilsonc0336662016-05-06 15:40:21 +01002482 if (INTEL_GEN(req->i915) >= 8)
Ben Widawsky075b3bb2013-11-02 21:07:13 -07002483 cmd += 1;
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00002484
2485 /* We always require a command barrier so that subsequent
2486 * commands, such as breadcrumb interrupts, are strictly ordered
2487 * wrt the contents of the write cache being flushed to memory
2488 * (and thus being coherent from the CPU).
2489 */
2490 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2491
Jesse Barnes9a289772012-10-26 09:42:42 -07002492 /*
2493 * Bspec vol 1c.3 - blitter engine command streamer:
2494 * "If ENABLED, all TLBs will be invalidated once the flush
2495 * operation is complete. This bit is only valid when the
2496 * Post-Sync Operation field is a value of 1h or 3h."
2497 */
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01002498 if (mode & EMIT_INVALIDATE)
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00002499 cmd |= MI_INVALIDATE_TLB;
Chris Wilsonb5321f32016-08-02 22:50:18 +01002500 intel_ring_emit(ring, cmd);
2501 intel_ring_emit(ring,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002502 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
Chris Wilsonc0336662016-05-06 15:40:21 +01002503 if (INTEL_GEN(req->i915) >= 8) {
Chris Wilsonb5321f32016-08-02 22:50:18 +01002504 intel_ring_emit(ring, 0); /* upper addr */
2505 intel_ring_emit(ring, 0); /* value */
Ben Widawsky075b3bb2013-11-02 21:07:13 -07002506 } else {
Chris Wilsonb5321f32016-08-02 22:50:18 +01002507 intel_ring_emit(ring, 0);
2508 intel_ring_emit(ring, MI_NOOP);
Ben Widawsky075b3bb2013-11-02 21:07:13 -07002509 }
Chris Wilsonb5321f32016-08-02 22:50:18 +01002510 intel_ring_advance(ring);
Rodrigo Vivifd3da6c2013-06-06 16:58:16 -03002511
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00002512 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08002513}
2514
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002515static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2516 struct intel_engine_cs *engine)
2517{
Tvrtko Ursulindb3d4012016-06-29 16:09:28 +01002518 struct drm_i915_gem_object *obj;
Tvrtko Ursulin1b9e6652016-06-29 16:09:29 +01002519 int ret, i;
Tvrtko Ursulindb3d4012016-06-29 16:09:28 +01002520
Chris Wilson39df9192016-07-20 13:31:57 +01002521 if (!i915.semaphores)
Tvrtko Ursulindb3d4012016-06-29 16:09:28 +01002522 return;
2523
Chris Wilson51d545d2016-08-15 10:49:02 +01002524 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
2525 struct i915_vma *vma;
2526
Chris Wilson91c8a322016-07-05 10:40:23 +01002527 obj = i915_gem_object_create(&dev_priv->drm, 4096);
Chris Wilson51d545d2016-08-15 10:49:02 +01002528 if (IS_ERR(obj))
2529 goto err;
2530
2531 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
2532 if (IS_ERR(vma))
2533 goto err_obj;
2534
2535 ret = i915_gem_object_set_to_gtt_domain(obj, false);
2536 if (ret)
2537 goto err_obj;
2538
2539 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
2540 if (ret)
2541 goto err_obj;
2542
2543 dev_priv->semaphore = vma;
Tvrtko Ursulindb3d4012016-06-29 16:09:28 +01002544 }
2545
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002546 if (INTEL_GEN(dev_priv) >= 8) {
Chris Wilsonbde13eb2016-08-15 10:49:07 +01002547 u32 offset = i915_ggtt_offset(dev_priv->semaphore);
Tvrtko Ursulin1b9e6652016-06-29 16:09:29 +01002548
Chris Wilsonad7bdb22016-08-02 22:50:40 +01002549 engine->semaphore.sync_to = gen8_ring_sync_to;
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002550 engine->semaphore.signal = gen8_xcs_signal;
Tvrtko Ursulin1b9e6652016-06-29 16:09:29 +01002551
2552 for (i = 0; i < I915_NUM_ENGINES; i++) {
Chris Wilsonbde13eb2016-08-15 10:49:07 +01002553 u32 ring_offset;
Tvrtko Ursulin1b9e6652016-06-29 16:09:29 +01002554
2555 if (i != engine->id)
2556 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
2557 else
2558 ring_offset = MI_SEMAPHORE_SYNC_INVALID;
2559
2560 engine->semaphore.signal_ggtt[i] = ring_offset;
2561 }
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002562 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsonad7bdb22016-08-02 22:50:40 +01002563 engine->semaphore.sync_to = gen6_ring_sync_to;
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002564 engine->semaphore.signal = gen6_signal;
Tvrtko Ursulin4b8e38a2016-06-29 16:09:31 +01002565
2566 /*
2567 * The current semaphore is only applied on pre-gen8
2568 * platform. And there is no VCS2 ring on the pre-gen8
2569 * platform. So the semaphore between RCS and VCS2 is
2570 * initialized as INVALID. Gen8 will initialize the
2571 * sema between VCS2 and RCS later.
2572 */
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +01002573 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
Tvrtko Ursulin4b8e38a2016-06-29 16:09:31 +01002574 static const struct {
2575 u32 wait_mbox;
2576 i915_reg_t mbox_reg;
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +01002577 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
2578 [RCS_HW] = {
2579 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
2580 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
2581 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
Tvrtko Ursulin4b8e38a2016-06-29 16:09:31 +01002582 },
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +01002583 [VCS_HW] = {
2584 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
2585 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
2586 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
Tvrtko Ursulin4b8e38a2016-06-29 16:09:31 +01002587 },
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +01002588 [BCS_HW] = {
2589 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
2590 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
2591 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
Tvrtko Ursulin4b8e38a2016-06-29 16:09:31 +01002592 },
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +01002593 [VECS_HW] = {
2594 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2595 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2596 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
Tvrtko Ursulin4b8e38a2016-06-29 16:09:31 +01002597 },
2598 };
2599 u32 wait_mbox;
2600 i915_reg_t mbox_reg;
2601
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +01002602 if (i == engine->hw_id) {
Tvrtko Ursulin4b8e38a2016-06-29 16:09:31 +01002603 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2604 mbox_reg = GEN6_NOSYNC;
2605 } else {
Tvrtko Ursulin318f89c2016-08-16 17:04:21 +01002606 wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
2607 mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
Tvrtko Ursulin4b8e38a2016-06-29 16:09:31 +01002608 }
2609
2610 engine->semaphore.mbox.wait[i] = wait_mbox;
2611 engine->semaphore.mbox.signal[i] = mbox_reg;
2612 }
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002613 }
Chris Wilson51d545d2016-08-15 10:49:02 +01002614
2615 return;
2616
2617err_obj:
2618 i915_gem_object_put(obj);
2619err:
2620 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
2621 i915.semaphores = 0;
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002622}
2623
Chris Wilsoned003072016-07-01 09:18:13 +01002624static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2625 struct intel_engine_cs *engine)
2626{
Tvrtko Ursulinc78d6062016-07-13 16:03:38 +01002627 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
2628
Chris Wilsoned003072016-07-01 09:18:13 +01002629 if (INTEL_GEN(dev_priv) >= 8) {
Chris Wilson31bb59c2016-07-01 17:23:27 +01002630 engine->irq_enable = gen8_irq_enable;
2631 engine->irq_disable = gen8_irq_disable;
Chris Wilsoned003072016-07-01 09:18:13 +01002632 engine->irq_seqno_barrier = gen6_seqno_barrier;
2633 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilson31bb59c2016-07-01 17:23:27 +01002634 engine->irq_enable = gen6_irq_enable;
2635 engine->irq_disable = gen6_irq_disable;
Chris Wilsoned003072016-07-01 09:18:13 +01002636 engine->irq_seqno_barrier = gen6_seqno_barrier;
2637 } else if (INTEL_GEN(dev_priv) >= 5) {
Chris Wilson31bb59c2016-07-01 17:23:27 +01002638 engine->irq_enable = gen5_irq_enable;
2639 engine->irq_disable = gen5_irq_disable;
Chris Wilsonf8973c22016-07-01 17:23:21 +01002640 engine->irq_seqno_barrier = gen5_seqno_barrier;
Chris Wilsoned003072016-07-01 09:18:13 +01002641 } else if (INTEL_GEN(dev_priv) >= 3) {
Chris Wilson31bb59c2016-07-01 17:23:27 +01002642 engine->irq_enable = i9xx_irq_enable;
2643 engine->irq_disable = i9xx_irq_disable;
Chris Wilsoned003072016-07-01 09:18:13 +01002644 } else {
Chris Wilson31bb59c2016-07-01 17:23:27 +01002645 engine->irq_enable = i8xx_irq_enable;
2646 engine->irq_disable = i8xx_irq_disable;
Chris Wilsoned003072016-07-01 09:18:13 +01002647 }
2648}
2649
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002650static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2651 struct intel_engine_cs *engine)
2652{
Chris Wilson618e4ca2016-08-02 22:50:35 +01002653 intel_ring_init_irq(dev_priv, engine);
2654 intel_ring_init_semaphores(dev_priv, engine);
2655
Tvrtko Ursulin1d8a1332016-06-29 16:09:25 +01002656 engine->init_hw = init_ring_common;
Chris Wilson821ed7d2016-09-09 14:11:53 +01002657 engine->reset_hw = reset_ring_common;
Tvrtko Ursulin7445a2a2016-06-29 16:09:21 +01002658
Chris Wilsonddd66c52016-08-02 22:50:31 +01002659 engine->emit_request = i9xx_emit_request;
Chris Wilson618e4ca2016-08-02 22:50:35 +01002660 if (i915.semaphores)
2661 engine->emit_request = gen6_sema_emit_request;
Chris Wilsonddd66c52016-08-02 22:50:31 +01002662 engine->submit_request = i9xx_submit_request;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002663
2664 if (INTEL_GEN(dev_priv) >= 8)
Chris Wilson803688b2016-08-02 22:50:27 +01002665 engine->emit_bb_start = gen8_emit_bb_start;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002666 else if (INTEL_GEN(dev_priv) >= 6)
Chris Wilson803688b2016-08-02 22:50:27 +01002667 engine->emit_bb_start = gen6_emit_bb_start;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002668 else if (INTEL_GEN(dev_priv) >= 4)
Chris Wilson803688b2016-08-02 22:50:27 +01002669 engine->emit_bb_start = i965_emit_bb_start;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002670 else if (IS_I830(dev_priv) || IS_845G(dev_priv))
Chris Wilson803688b2016-08-02 22:50:27 +01002671 engine->emit_bb_start = i830_emit_bb_start;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002672 else
Chris Wilson803688b2016-08-02 22:50:27 +01002673 engine->emit_bb_start = i915_emit_bb_start;
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002674}
2675
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002676int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08002677{
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002678 struct drm_i915_private *dev_priv = engine->i915;
Ben Widawsky3e789982014-06-30 09:53:37 -07002679 int ret;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08002680
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002681 intel_ring_default_vfuncs(dev_priv, engine);
2682
Chris Wilson61ff75a2016-07-01 17:23:28 +01002683 if (HAS_L3_DPF(dev_priv))
2684 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
Chris Wilsonf8973c22016-07-01 17:23:21 +01002685
Chris Wilsonc0336662016-05-06 15:40:21 +01002686 if (INTEL_GEN(dev_priv) >= 8) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002687 engine->init_context = intel_rcs_ctx_init;
Chris Wilsonddd66c52016-08-02 22:50:31 +01002688 engine->emit_request = gen8_render_emit_request;
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002689 engine->emit_flush = gen8_render_ring_flush;
Chris Wilson39df9192016-07-20 13:31:57 +01002690 if (i915.semaphores)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002691 engine->semaphore.signal = gen8_rcs_signal;
Chris Wilsonc0336662016-05-06 15:40:21 +01002692 } else if (INTEL_GEN(dev_priv) >= 6) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002693 engine->init_context = intel_rcs_ctx_init;
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002694 engine->emit_flush = gen7_render_ring_flush;
Chris Wilsonc0336662016-05-06 15:40:21 +01002695 if (IS_GEN6(dev_priv))
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002696 engine->emit_flush = gen6_render_ring_flush;
Chris Wilsonc0336662016-05-06 15:40:21 +01002697 } else if (IS_GEN5(dev_priv)) {
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002698 engine->emit_flush = gen4_render_ring_flush;
Daniel Vetter59465b52012-04-11 22:12:48 +02002699 } else {
Chris Wilsonc0336662016-05-06 15:40:21 +01002700 if (INTEL_GEN(dev_priv) < 4)
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002701 engine->emit_flush = gen2_render_ring_flush;
Chris Wilson46f0f8d2012-04-18 11:12:11 +01002702 else
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002703 engine->emit_flush = gen4_render_ring_flush;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002704 engine->irq_enable_mask = I915_USER_INTERRUPT;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08002705 }
Ben Widawsky707d9cf2014-06-30 09:53:36 -07002706
Chris Wilsonc0336662016-05-06 15:40:21 +01002707 if (IS_HASWELL(dev_priv))
Chris Wilson803688b2016-08-02 22:50:27 +01002708 engine->emit_bb_start = hsw_emit_bb_start;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002709
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002710 engine->init_hw = init_render_ring;
2711 engine->cleanup = render_ring_cleanup;
Daniel Vetter59465b52012-04-11 22:12:48 +02002712
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002713 ret = intel_init_ring_buffer(engine);
Daniel Vetter99be1df2014-11-20 00:33:06 +01002714 if (ret)
2715 return ret;
2716
Chris Wilsonf8973c22016-07-01 17:23:21 +01002717 if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilson56c0f1a2016-08-15 10:48:58 +01002718 ret = intel_engine_create_scratch(engine, 4096);
Chris Wilson7d5ea802016-07-01 17:23:20 +01002719 if (ret)
2720 return ret;
2721 } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
Chris Wilson56c0f1a2016-08-15 10:48:58 +01002722 ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
Daniel Vetter99be1df2014-11-20 00:33:06 +01002723 if (ret)
2724 return ret;
2725 }
2726
2727 return 0;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08002728}
2729
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002730int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08002731{
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002732 struct drm_i915_private *dev_priv = engine->i915;
Daniel Vetter58fa3832012-04-11 22:12:49 +02002733
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002734 intel_ring_default_vfuncs(dev_priv, engine);
2735
Chris Wilsonc0336662016-05-06 15:40:21 +01002736 if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter0fd2c202012-04-11 22:12:55 +02002737 /* gen6 bsd needs a special wa for tail updates */
Chris Wilsonc0336662016-05-06 15:40:21 +01002738 if (IS_GEN6(dev_priv))
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01002739 engine->submit_request = gen6_bsd_submit_request;
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002740 engine->emit_flush = gen6_bsd_ring_flush;
Tvrtko Ursulinc78d6062016-07-13 16:03:38 +01002741 if (INTEL_GEN(dev_priv) < 8)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002742 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
Daniel Vetter58fa3832012-04-11 22:12:49 +02002743 } else {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002744 engine->mmio_base = BSD_RING_BASE;
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002745 engine->emit_flush = bsd_ring_flush;
Tvrtko Ursulin8d228912016-06-29 16:09:32 +01002746 if (IS_GEN5(dev_priv))
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002747 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
Tvrtko Ursulin8d228912016-06-29 16:09:32 +01002748 else
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002749 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
Daniel Vetter58fa3832012-04-11 22:12:49 +02002750 }
Daniel Vetter58fa3832012-04-11 22:12:49 +02002751
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002752 return intel_init_ring_buffer(engine);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08002753}
Chris Wilson549f7362010-10-19 11:19:32 +01002754
Zhao Yakui845f74a2014-04-17 10:37:37 +08002755/**
Damien Lespiau62659922015-01-29 14:13:40 +00002756 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
Zhao Yakui845f74a2014-04-17 10:37:37 +08002757 */
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002758int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
Zhao Yakui845f74a2014-04-17 10:37:37 +08002759{
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002760 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002761
2762 intel_ring_default_vfuncs(dev_priv, engine);
2763
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002764 engine->emit_flush = gen6_bsd_ring_flush;
Zhao Yakui845f74a2014-04-17 10:37:37 +08002765
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002766 return intel_init_ring_buffer(engine);
Zhao Yakui845f74a2014-04-17 10:37:37 +08002767}
2768
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002769int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
Chris Wilson549f7362010-10-19 11:19:32 +01002770{
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002771 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002772
2773 intel_ring_default_vfuncs(dev_priv, engine);
2774
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002775 engine->emit_flush = gen6_ring_flush;
Tvrtko Ursulinc78d6062016-07-13 16:03:38 +01002776 if (INTEL_GEN(dev_priv) < 8)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002777 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
Chris Wilson549f7362010-10-19 11:19:32 +01002778
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002779 return intel_init_ring_buffer(engine);
Chris Wilson549f7362010-10-19 11:19:32 +01002780}
Chris Wilsona7b97612012-07-20 12:41:08 +01002781
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002782int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
Ben Widawsky9a8a2212013-05-28 19:22:23 -07002783{
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002784 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002785
2786 intel_ring_default_vfuncs(dev_priv, engine);
2787
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002788 engine->emit_flush = gen6_ring_flush;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002789
Tvrtko Ursulinc78d6062016-07-13 16:03:38 +01002790 if (INTEL_GEN(dev_priv) < 8) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002791 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
Chris Wilson31bb59c2016-07-01 17:23:27 +01002792 engine->irq_enable = hsw_vebox_irq_enable;
2793 engine->irq_disable = hsw_vebox_irq_disable;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002794 }
Ben Widawsky9a8a2212013-05-28 19:22:23 -07002795
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002796 return intel_init_ring_buffer(engine);
Ben Widawsky9a8a2212013-05-28 19:22:23 -07002797}