blob: 8250db767a1a944644f7aa5faa411e78dfdfee9c [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
Zeng Zhaoxiua4d8a0f2015-12-06 18:26:30 +080030#include <linux/log2.h>
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/drmP.h>
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/i915_drm.h>
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Chris Wilsona0442462016-04-29 09:07:05 +010037/* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
39 */
40#define LEGACY_REQUEST_SIZE 200
41
Oscar Mateo82e104c2014-07-24 17:04:26 +010042int __intel_ring_space(int head, int tail, int size)
Chris Wilson1cf0ba12014-05-05 09:07:33 +010043{
Dave Gordon4f547412014-11-27 11:22:48 +000044 int space = head - tail;
45 if (space <= 0)
Chris Wilson1cf0ba12014-05-05 09:07:33 +010046 space += size;
Dave Gordon4f547412014-11-27 11:22:48 +000047 return space - I915_RING_FREE_SPACE;
Chris Wilson1cf0ba12014-05-05 09:07:33 +010048}
49
Chris Wilson32c04f12016-08-02 22:50:22 +010050void intel_ring_update_space(struct intel_ring *ring)
Dave Gordonebd0fd42014-11-27 11:22:49 +000051{
Chris Wilson32c04f12016-08-02 22:50:22 +010052 if (ring->last_retired_head != -1) {
53 ring->head = ring->last_retired_head;
54 ring->last_retired_head = -1;
Dave Gordonebd0fd42014-11-27 11:22:49 +000055 }
56
Chris Wilson32c04f12016-08-02 22:50:22 +010057 ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
58 ring->tail, ring->size);
Dave Gordonebd0fd42014-11-27 11:22:49 +000059}
60
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000061static int
Chris Wilson7c9cf4e2016-08-02 22:50:25 +010062gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Chris Wilson46f0f8d2012-04-18 11:12:11 +010063{
Chris Wilson7e37f882016-08-02 22:50:21 +010064 struct intel_ring *ring = req->ring;
Chris Wilson46f0f8d2012-04-18 11:12:11 +010065 u32 cmd;
66 int ret;
67
68 cmd = MI_FLUSH;
Chris Wilson46f0f8d2012-04-18 11:12:11 +010069
Chris Wilson7c9cf4e2016-08-02 22:50:25 +010070 if (mode & EMIT_INVALIDATE)
Chris Wilson46f0f8d2012-04-18 11:12:11 +010071 cmd |= MI_READ_FLUSH;
72
John Harrison5fb9de12015-05-29 17:44:07 +010073 ret = intel_ring_begin(req, 2);
Chris Wilson46f0f8d2012-04-18 11:12:11 +010074 if (ret)
75 return ret;
76
Chris Wilsonb5321f32016-08-02 22:50:18 +010077 intel_ring_emit(ring, cmd);
78 intel_ring_emit(ring, MI_NOOP);
79 intel_ring_advance(ring);
Chris Wilson46f0f8d2012-04-18 11:12:11 +010080
81 return 0;
82}
83
84static int
Chris Wilson7c9cf4e2016-08-02 22:50:25 +010085gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Eric Anholt62fdfea2010-05-21 13:26:39 -070086{
Chris Wilson7e37f882016-08-02 22:50:21 +010087 struct intel_ring *ring = req->ring;
Chris Wilson6f392d5482010-08-07 11:01:22 +010088 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000089 int ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +010090
Chris Wilson36d527d2011-03-19 22:26:49 +000091 /*
92 * read/write caches:
93 *
94 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
95 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
96 * also flushed at 2d versus 3d pipeline switches.
97 *
98 * read-only caches:
99 *
100 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
101 * MI_READ_FLUSH is set, and is always flushed on 965.
102 *
103 * I915_GEM_DOMAIN_COMMAND may not exist?
104 *
105 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
106 * invalidated when MI_EXE_FLUSH is set.
107 *
108 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
109 * invalidated with every MI_FLUSH.
110 *
111 * TLBs:
112 *
113 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
114 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
115 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
116 * are flushed at any MI_FLUSH.
117 */
118
Chris Wilsonb5321f32016-08-02 22:50:18 +0100119 cmd = MI_FLUSH;
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100120 if (mode & EMIT_INVALIDATE) {
Chris Wilson36d527d2011-03-19 22:26:49 +0000121 cmd |= MI_EXE_FLUSH;
Chris Wilsonb5321f32016-08-02 22:50:18 +0100122 if (IS_G4X(req->i915) || IS_GEN5(req->i915))
123 cmd |= MI_INVALIDATE_ISP;
124 }
Chris Wilson36d527d2011-03-19 22:26:49 +0000125
John Harrison5fb9de12015-05-29 17:44:07 +0100126 ret = intel_ring_begin(req, 2);
Chris Wilson36d527d2011-03-19 22:26:49 +0000127 if (ret)
128 return ret;
129
Chris Wilsonb5321f32016-08-02 22:50:18 +0100130 intel_ring_emit(ring, cmd);
131 intel_ring_emit(ring, MI_NOOP);
132 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000133
134 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800135}
136
Jesse Barnes8d315282011-10-16 10:23:31 +0200137/**
138 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
139 * implementing two workarounds on gen6. From section 1.4.7.1
140 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
141 *
142 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
143 * produced by non-pipelined state commands), software needs to first
144 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
145 * 0.
146 *
147 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
148 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
149 *
150 * And the workaround for these two requires this workaround first:
151 *
152 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
153 * BEFORE the pipe-control with a post-sync op and no write-cache
154 * flushes.
155 *
156 * And this last workaround is tricky because of the requirements on
157 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
158 * volume 2 part 1:
159 *
160 * "1 of the following must also be set:
161 * - Render Target Cache Flush Enable ([12] of DW1)
162 * - Depth Cache Flush Enable ([0] of DW1)
163 * - Stall at Pixel Scoreboard ([1] of DW1)
164 * - Depth Stall ([13] of DW1)
165 * - Post-Sync Operation ([13] of DW1)
166 * - Notify Enable ([8] of DW1)"
167 *
168 * The cache flushes require the workaround flush that triggered this
169 * one, so we can't use it. Depth stall would trigger the same.
170 * Post-sync nonzero is what triggered this second workaround, so we
171 * can't use that one either. Notify enable is IRQs, which aren't
172 * really our business. That leaves only stall at scoreboard.
173 */
174static int
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100175intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
Jesse Barnes8d315282011-10-16 10:23:31 +0200176{
Chris Wilson7e37f882016-08-02 22:50:21 +0100177 struct intel_ring *ring = req->ring;
Chris Wilsonb5321f32016-08-02 22:50:18 +0100178 u32 scratch_addr =
179 req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
Jesse Barnes8d315282011-10-16 10:23:31 +0200180 int ret;
181
John Harrison5fb9de12015-05-29 17:44:07 +0100182 ret = intel_ring_begin(req, 6);
Jesse Barnes8d315282011-10-16 10:23:31 +0200183 if (ret)
184 return ret;
185
Chris Wilsonb5321f32016-08-02 22:50:18 +0100186 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
187 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
Jesse Barnes8d315282011-10-16 10:23:31 +0200188 PIPE_CONTROL_STALL_AT_SCOREBOARD);
Chris Wilsonb5321f32016-08-02 22:50:18 +0100189 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
190 intel_ring_emit(ring, 0); /* low dword */
191 intel_ring_emit(ring, 0); /* high dword */
192 intel_ring_emit(ring, MI_NOOP);
193 intel_ring_advance(ring);
Jesse Barnes8d315282011-10-16 10:23:31 +0200194
John Harrison5fb9de12015-05-29 17:44:07 +0100195 ret = intel_ring_begin(req, 6);
Jesse Barnes8d315282011-10-16 10:23:31 +0200196 if (ret)
197 return ret;
198
Chris Wilsonb5321f32016-08-02 22:50:18 +0100199 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
200 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
201 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
202 intel_ring_emit(ring, 0);
203 intel_ring_emit(ring, 0);
204 intel_ring_emit(ring, MI_NOOP);
205 intel_ring_advance(ring);
Jesse Barnes8d315282011-10-16 10:23:31 +0200206
207 return 0;
208}
209
210static int
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100211gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Jesse Barnes8d315282011-10-16 10:23:31 +0200212{
Chris Wilson7e37f882016-08-02 22:50:21 +0100213 struct intel_ring *ring = req->ring;
Chris Wilsonb5321f32016-08-02 22:50:18 +0100214 u32 scratch_addr =
215 req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
Jesse Barnes8d315282011-10-16 10:23:31 +0200216 u32 flags = 0;
Jesse Barnes8d315282011-10-16 10:23:31 +0200217 int ret;
218
Paulo Zanonib3111502012-08-17 18:35:42 -0300219 /* Force SNB workarounds for PIPE_CONTROL flushes */
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100220 ret = intel_emit_post_sync_nonzero_flush(req);
Paulo Zanonib3111502012-08-17 18:35:42 -0300221 if (ret)
222 return ret;
223
Jesse Barnes8d315282011-10-16 10:23:31 +0200224 /* Just flush everything. Experiments have shown that reducing the
225 * number of bits based on the write domains has little performance
226 * impact.
227 */
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100228 if (mode & EMIT_FLUSH) {
Chris Wilson7d54a902012-08-10 10:18:10 +0100229 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
230 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
231 /*
232 * Ensure that any following seqno writes only happen
233 * when the render cache is indeed flushed.
234 */
Daniel Vetter97f209b2012-06-28 09:48:42 +0200235 flags |= PIPE_CONTROL_CS_STALL;
Chris Wilson7d54a902012-08-10 10:18:10 +0100236 }
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100237 if (mode & EMIT_INVALIDATE) {
Chris Wilson7d54a902012-08-10 10:18:10 +0100238 flags |= PIPE_CONTROL_TLB_INVALIDATE;
239 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
240 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
241 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
242 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
243 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
244 /*
245 * TLB invalidate requires a post-sync write.
246 */
Jesse Barnes3ac78312012-10-25 12:15:47 -0700247 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
Chris Wilson7d54a902012-08-10 10:18:10 +0100248 }
Jesse Barnes8d315282011-10-16 10:23:31 +0200249
John Harrison5fb9de12015-05-29 17:44:07 +0100250 ret = intel_ring_begin(req, 4);
Jesse Barnes8d315282011-10-16 10:23:31 +0200251 if (ret)
252 return ret;
253
Chris Wilsonb5321f32016-08-02 22:50:18 +0100254 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
255 intel_ring_emit(ring, flags);
256 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
257 intel_ring_emit(ring, 0);
258 intel_ring_advance(ring);
Jesse Barnes8d315282011-10-16 10:23:31 +0200259
260 return 0;
261}
262
Chris Wilson6c6cf5a2012-07-20 18:02:28 +0100263static int
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100264gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
Paulo Zanonif3987632012-08-17 18:35:43 -0300265{
Chris Wilson7e37f882016-08-02 22:50:21 +0100266 struct intel_ring *ring = req->ring;
Paulo Zanonif3987632012-08-17 18:35:43 -0300267 int ret;
268
John Harrison5fb9de12015-05-29 17:44:07 +0100269 ret = intel_ring_begin(req, 4);
Paulo Zanonif3987632012-08-17 18:35:43 -0300270 if (ret)
271 return ret;
272
Chris Wilsonb5321f32016-08-02 22:50:18 +0100273 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
274 intel_ring_emit(ring,
275 PIPE_CONTROL_CS_STALL |
276 PIPE_CONTROL_STALL_AT_SCOREBOARD);
277 intel_ring_emit(ring, 0);
278 intel_ring_emit(ring, 0);
279 intel_ring_advance(ring);
Paulo Zanonif3987632012-08-17 18:35:43 -0300280
281 return 0;
282}
283
284static int
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100285gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300286{
Chris Wilson7e37f882016-08-02 22:50:21 +0100287 struct intel_ring *ring = req->ring;
Chris Wilsonb5321f32016-08-02 22:50:18 +0100288 u32 scratch_addr =
289 req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300290 u32 flags = 0;
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300291 int ret;
292
Paulo Zanonif3987632012-08-17 18:35:43 -0300293 /*
294 * Ensure that any following seqno writes only happen when the render
295 * cache is indeed flushed.
296 *
297 * Workaround: 4th PIPE_CONTROL command (except the ones with only
298 * read-cache invalidate bits set) must have the CS_STALL bit set. We
299 * don't try to be clever and just set it unconditionally.
300 */
301 flags |= PIPE_CONTROL_CS_STALL;
302
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300303 /* Just flush everything. Experiments have shown that reducing the
304 * number of bits based on the write domains has little performance
305 * impact.
306 */
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100307 if (mode & EMIT_FLUSH) {
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300308 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
309 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
Francisco Jerez965fd602016-01-13 18:59:39 -0800310 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
Chris Wilson40a24482015-08-21 16:08:41 +0100311 flags |= PIPE_CONTROL_FLUSH_ENABLE;
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300312 }
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100313 if (mode & EMIT_INVALIDATE) {
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300314 flags |= PIPE_CONTROL_TLB_INVALIDATE;
315 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
316 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
317 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
318 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
319 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
Chris Wilson148b83d2014-12-16 08:44:31 +0000320 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300321 /*
322 * TLB invalidate requires a post-sync write.
323 */
324 flags |= PIPE_CONTROL_QW_WRITE;
Ville Syrjäläb9e1faa2013-02-14 21:53:51 +0200325 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
Paulo Zanonif3987632012-08-17 18:35:43 -0300326
Chris Wilsonadd284a2014-12-16 08:44:32 +0000327 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
328
Paulo Zanonif3987632012-08-17 18:35:43 -0300329 /* Workaround: we must issue a pipe_control with CS-stall bit
330 * set before a pipe_control command that has the state cache
331 * invalidate bit set. */
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100332 gen7_render_ring_cs_stall_wa(req);
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300333 }
334
John Harrison5fb9de12015-05-29 17:44:07 +0100335 ret = intel_ring_begin(req, 4);
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300336 if (ret)
337 return ret;
338
Chris Wilsonb5321f32016-08-02 22:50:18 +0100339 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
340 intel_ring_emit(ring, flags);
341 intel_ring_emit(ring, scratch_addr);
342 intel_ring_emit(ring, 0);
343 intel_ring_advance(ring);
Paulo Zanoni4772eae2012-08-17 18:35:41 -0300344
345 return 0;
346}
347
Ben Widawskya5f3d682013-11-02 21:07:27 -0700348static int
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100349gen8_emit_pipe_control(struct drm_i915_gem_request *req,
Kenneth Graunke884ceac2014-06-28 02:04:20 +0300350 u32 flags, u32 scratch_addr)
351{
Chris Wilson7e37f882016-08-02 22:50:21 +0100352 struct intel_ring *ring = req->ring;
Kenneth Graunke884ceac2014-06-28 02:04:20 +0300353 int ret;
354
John Harrison5fb9de12015-05-29 17:44:07 +0100355 ret = intel_ring_begin(req, 6);
Kenneth Graunke884ceac2014-06-28 02:04:20 +0300356 if (ret)
357 return ret;
358
Chris Wilsonb5321f32016-08-02 22:50:18 +0100359 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
360 intel_ring_emit(ring, flags);
361 intel_ring_emit(ring, scratch_addr);
362 intel_ring_emit(ring, 0);
363 intel_ring_emit(ring, 0);
364 intel_ring_emit(ring, 0);
365 intel_ring_advance(ring);
Kenneth Graunke884ceac2014-06-28 02:04:20 +0300366
367 return 0;
368}
369
370static int
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100371gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Ben Widawskya5f3d682013-11-02 21:07:27 -0700372{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000373 u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
Chris Wilsonb5321f32016-08-02 22:50:18 +0100374 u32 flags = 0;
Kenneth Graunke02c9f7e2014-01-27 14:20:16 -0800375 int ret;
Ben Widawskya5f3d682013-11-02 21:07:27 -0700376
377 flags |= PIPE_CONTROL_CS_STALL;
378
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100379 if (mode & EMIT_FLUSH) {
Ben Widawskya5f3d682013-11-02 21:07:27 -0700380 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
381 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
Francisco Jerez965fd602016-01-13 18:59:39 -0800382 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
Chris Wilson40a24482015-08-21 16:08:41 +0100383 flags |= PIPE_CONTROL_FLUSH_ENABLE;
Ben Widawskya5f3d682013-11-02 21:07:27 -0700384 }
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100385 if (mode & EMIT_INVALIDATE) {
Ben Widawskya5f3d682013-11-02 21:07:27 -0700386 flags |= PIPE_CONTROL_TLB_INVALIDATE;
387 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
388 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
389 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
390 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
391 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
392 flags |= PIPE_CONTROL_QW_WRITE;
393 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
Kenneth Graunke02c9f7e2014-01-27 14:20:16 -0800394
395 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100396 ret = gen8_emit_pipe_control(req,
Kenneth Graunke02c9f7e2014-01-27 14:20:16 -0800397 PIPE_CONTROL_CS_STALL |
398 PIPE_CONTROL_STALL_AT_SCOREBOARD,
399 0);
400 if (ret)
401 return ret;
Ben Widawskya5f3d682013-11-02 21:07:27 -0700402 }
403
John Harrisonf2cf1fc2015-05-29 17:43:58 +0100404 return gen8_emit_pipe_control(req, flags, scratch_addr);
Ben Widawskya5f3d682013-11-02 21:07:27 -0700405}
406
Chris Wilson7e37f882016-08-02 22:50:21 +0100407u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800408{
Chris Wilsonc0336662016-05-06 15:40:21 +0100409 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson50877442014-03-21 12:41:53 +0000410 u64 acthd;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800411
Chris Wilsonc0336662016-05-06 15:40:21 +0100412 if (INTEL_GEN(dev_priv) >= 8)
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000413 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
414 RING_ACTHD_UDW(engine->mmio_base));
Chris Wilsonc0336662016-05-06 15:40:21 +0100415 else if (INTEL_GEN(dev_priv) >= 4)
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000416 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
Chris Wilson50877442014-03-21 12:41:53 +0000417 else
418 acthd = I915_READ(ACTHD);
419
420 return acthd;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800421}
422
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000423static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
Daniel Vetter035dc1e2013-07-03 12:56:54 +0200424{
Chris Wilsonc0336662016-05-06 15:40:21 +0100425 struct drm_i915_private *dev_priv = engine->i915;
Daniel Vetter035dc1e2013-07-03 12:56:54 +0200426 u32 addr;
427
428 addr = dev_priv->status_page_dmah->busaddr;
Chris Wilsonc0336662016-05-06 15:40:21 +0100429 if (INTEL_GEN(dev_priv) >= 4)
Daniel Vetter035dc1e2013-07-03 12:56:54 +0200430 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
431 I915_WRITE(HWS_PGA, addr);
432}
433
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000434static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
Damien Lespiauaf75f262015-02-10 19:32:17 +0000435{
Chris Wilsonc0336662016-05-06 15:40:21 +0100436 struct drm_i915_private *dev_priv = engine->i915;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200437 i915_reg_t mmio;
Damien Lespiauaf75f262015-02-10 19:32:17 +0000438
439 /* The ring status page addresses are no longer next to the rest of
440 * the ring registers as of gen7.
441 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100442 if (IS_GEN7(dev_priv)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000443 switch (engine->id) {
Damien Lespiauaf75f262015-02-10 19:32:17 +0000444 case RCS:
445 mmio = RENDER_HWS_PGA_GEN7;
446 break;
447 case BCS:
448 mmio = BLT_HWS_PGA_GEN7;
449 break;
450 /*
451 * VCS2 actually doesn't exist on Gen7. Only shut up
452 * gcc switch check warning
453 */
454 case VCS2:
455 case VCS:
456 mmio = BSD_HWS_PGA_GEN7;
457 break;
458 case VECS:
459 mmio = VEBOX_HWS_PGA_GEN7;
460 break;
461 }
Chris Wilsonc0336662016-05-06 15:40:21 +0100462 } else if (IS_GEN6(dev_priv)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000463 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
Damien Lespiauaf75f262015-02-10 19:32:17 +0000464 } else {
465 /* XXX: gen8 returns to sanity */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000466 mmio = RING_HWS_PGA(engine->mmio_base);
Damien Lespiauaf75f262015-02-10 19:32:17 +0000467 }
468
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000469 I915_WRITE(mmio, (u32)engine->status_page.gfx_addr);
Damien Lespiauaf75f262015-02-10 19:32:17 +0000470 POSTING_READ(mmio);
471
472 /*
473 * Flush the TLB for this page
474 *
475 * FIXME: These two bits have disappeared on gen8, so a question
476 * arises: do we still need this and if so how should we go about
477 * invalidating the TLB?
478 */
Tvrtko Ursulinac657f62016-05-10 10:57:08 +0100479 if (IS_GEN(dev_priv, 6, 7)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000480 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
Damien Lespiauaf75f262015-02-10 19:32:17 +0000481
482 /* ring should be idle before issuing a sync flush*/
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000483 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
Damien Lespiauaf75f262015-02-10 19:32:17 +0000484
485 I915_WRITE(reg,
486 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
487 INSTPM_SYNC_FLUSH));
Chris Wilson25ab57f2016-06-30 15:33:29 +0100488 if (intel_wait_for_register(dev_priv,
489 reg, INSTPM_SYNC_FLUSH, 0,
490 1000))
Damien Lespiauaf75f262015-02-10 19:32:17 +0000491 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000492 engine->name);
Damien Lespiauaf75f262015-02-10 19:32:17 +0000493 }
494}
495
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000496static bool stop_ring(struct intel_engine_cs *engine)
Chris Wilson9991ae72014-04-02 16:36:07 +0100497{
Chris Wilsonc0336662016-05-06 15:40:21 +0100498 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson9991ae72014-04-02 16:36:07 +0100499
Chris Wilsonc0336662016-05-06 15:40:21 +0100500 if (!IS_GEN2(dev_priv)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000501 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
Chris Wilson3d808eb2016-06-30 15:33:30 +0100502 if (intel_wait_for_register(dev_priv,
503 RING_MI_MODE(engine->mmio_base),
504 MODE_IDLE,
505 MODE_IDLE,
506 1000)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000507 DRM_ERROR("%s : timed out trying to stop ring\n",
508 engine->name);
Chris Wilson9bec9b12014-08-11 09:21:35 +0100509 /* Sometimes we observe that the idle flag is not
510 * set even though the ring is empty. So double
511 * check before giving up.
512 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000513 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
Chris Wilson9bec9b12014-08-11 09:21:35 +0100514 return false;
Chris Wilson9991ae72014-04-02 16:36:07 +0100515 }
516 }
517
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000518 I915_WRITE_CTL(engine, 0);
519 I915_WRITE_HEAD(engine, 0);
Chris Wilsonc5efa1a2016-08-02 22:50:29 +0100520 I915_WRITE_TAIL(engine, 0);
Chris Wilson9991ae72014-04-02 16:36:07 +0100521
Chris Wilsonc0336662016-05-06 15:40:21 +0100522 if (!IS_GEN2(dev_priv)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000523 (void)I915_READ_CTL(engine);
524 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
Chris Wilson9991ae72014-04-02 16:36:07 +0100525 }
526
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000527 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
Chris Wilson9991ae72014-04-02 16:36:07 +0100528}
529
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000530static int init_ring_common(struct intel_engine_cs *engine)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800531{
Chris Wilsonc0336662016-05-06 15:40:21 +0100532 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson7e37f882016-08-02 22:50:21 +0100533 struct intel_ring *ring = engine->buffer;
534 struct drm_i915_gem_object *obj = ring->obj;
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200535 int ret = 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800536
Mika Kuoppala59bad942015-01-16 11:34:40 +0200537 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200538
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000539 if (!stop_ring(engine)) {
Chris Wilson9991ae72014-04-02 16:36:07 +0100540 /* G45 ring initialization often fails to reset head to zero */
Chris Wilson6fd0d562010-12-05 20:42:33 +0000541 DRM_DEBUG_KMS("%s head not reset to zero "
542 "ctl %08x head %08x tail %08x start %08x\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000543 engine->name,
544 I915_READ_CTL(engine),
545 I915_READ_HEAD(engine),
546 I915_READ_TAIL(engine),
547 I915_READ_START(engine));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800548
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000549 if (!stop_ring(engine)) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000550 DRM_ERROR("failed to set %s head to zero "
551 "ctl %08x head %08x tail %08x start %08x\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000552 engine->name,
553 I915_READ_CTL(engine),
554 I915_READ_HEAD(engine),
555 I915_READ_TAIL(engine),
556 I915_READ_START(engine));
Chris Wilson9991ae72014-04-02 16:36:07 +0100557 ret = -EIO;
558 goto out;
Chris Wilson6fd0d562010-12-05 20:42:33 +0000559 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700560 }
561
Chris Wilsonc0336662016-05-06 15:40:21 +0100562 if (I915_NEED_GFX_HWS(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000563 intel_ring_setup_status_page(engine);
Chris Wilson9991ae72014-04-02 16:36:07 +0100564 else
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000565 ring_setup_phys_status_page(engine);
Chris Wilson9991ae72014-04-02 16:36:07 +0100566
Jiri Kosinaece4a172014-08-07 16:29:53 +0200567 /* Enforce ordering by reading HEAD register back */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000568 I915_READ_HEAD(engine);
Jiri Kosinaece4a172014-08-07 16:29:53 +0200569
Daniel Vetter0d8957c2012-08-07 09:54:14 +0200570 /* Initialize the ring. This must happen _after_ we've cleared the ring
571 * registers with the above sequence (the readback of the HEAD registers
572 * also enforces ordering), otherwise the hw might lose the new ring
573 * register values. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000574 I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
Chris Wilson95468892014-08-07 15:39:54 +0100575
576 /* WaClearRingBufHeadRegAtInit:ctg,elk */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000577 if (I915_READ_HEAD(engine))
Chris Wilson95468892014-08-07 15:39:54 +0100578 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000579 engine->name, I915_READ_HEAD(engine));
580 I915_WRITE_HEAD(engine, 0);
581 (void)I915_READ_HEAD(engine);
Chris Wilson95468892014-08-07 15:39:54 +0100582
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000583 I915_WRITE_CTL(engine,
Chris Wilson7e37f882016-08-02 22:50:21 +0100584 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson5d031e52012-02-08 13:34:13 +0000585 | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800586
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800587 /* If the head is still not zero, the ring is dead */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000588 if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
589 I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
590 (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000591 DRM_ERROR("%s initialization failed "
Chris Wilson48e48a02014-04-09 09:19:44 +0100592 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000593 engine->name,
594 I915_READ_CTL(engine),
595 I915_READ_CTL(engine) & RING_VALID,
596 I915_READ_HEAD(engine), I915_READ_TAIL(engine),
597 I915_READ_START(engine),
598 (unsigned long)i915_gem_obj_ggtt_offset(obj));
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200599 ret = -EIO;
600 goto out;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800601 }
602
Chris Wilson7e37f882016-08-02 22:50:21 +0100603 ring->last_retired_head = -1;
604 ring->head = I915_READ_HEAD(engine);
605 ring->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
606 intel_ring_update_space(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000607
Tomas Elffc0768c2016-03-21 16:26:59 +0000608 intel_engine_init_hangcheck(engine);
Chris Wilson50f018d2013-06-10 11:20:19 +0100609
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200610out:
Mika Kuoppala59bad942015-01-16 11:34:40 +0200611 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Daniel Vetterb7884eb2012-06-04 11:18:15 +0200612
613 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700614}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800615
Chris Wilsonf8291952016-07-01 17:23:18 +0100616void intel_fini_pipe_control(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100617{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000618 if (engine->scratch.obj == NULL)
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100619 return;
620
Chris Wilsonf8291952016-07-01 17:23:18 +0100621 i915_gem_object_ggtt_unpin(engine->scratch.obj);
Chris Wilsonf8c417c2016-07-20 13:31:53 +0100622 i915_gem_object_put(engine->scratch.obj);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000623 engine->scratch.obj = NULL;
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100624}
625
Chris Wilson7d5ea802016-07-01 17:23:20 +0100626int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
Chris Wilsonc6df5412010-12-15 09:56:50 +0000627{
Chris Wilsonf8291952016-07-01 17:23:18 +0100628 struct drm_i915_gem_object *obj;
Chris Wilsonc6df5412010-12-15 09:56:50 +0000629 int ret;
630
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000631 WARN_ON(engine->scratch.obj);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000632
Chris Wilson91c8a322016-07-05 10:40:23 +0100633 obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
Chris Wilsonde8fe162016-07-01 17:23:19 +0100634 if (!obj)
Chris Wilson91c8a322016-07-05 10:40:23 +0100635 obj = i915_gem_object_create(&engine->i915->drm, size);
Chris Wilsonf8291952016-07-01 17:23:18 +0100636 if (IS_ERR(obj)) {
637 DRM_ERROR("Failed to allocate scratch page\n");
638 ret = PTR_ERR(obj);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000639 goto err;
640 }
Chris Wilsone4ffd172011-04-04 09:44:39 +0100641
Chris Wilsonf8291952016-07-01 17:23:18 +0100642 ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
Daniel Vettera9cc7262014-02-14 14:01:13 +0100643 if (ret)
644 goto err_unref;
Chris Wilsonc6df5412010-12-15 09:56:50 +0000645
Chris Wilsonf8291952016-07-01 17:23:18 +0100646 engine->scratch.obj = obj;
647 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
Ville Syrjälä2b1086c2013-02-12 22:01:38 +0200648 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000649 engine->name, engine->scratch.gtt_offset);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000650 return 0;
651
Chris Wilsonc6df5412010-12-15 09:56:50 +0000652err_unref:
Chris Wilsonf8c417c2016-07-20 13:31:53 +0100653 i915_gem_object_put(engine->scratch.obj);
Chris Wilsonc6df5412010-12-15 09:56:50 +0000654err:
Chris Wilsonc6df5412010-12-15 09:56:50 +0000655 return ret;
656}
657
John Harrisone2be4fa2015-05-29 17:43:54 +0100658static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
Arun Siluvery86d7f232014-08-26 14:44:50 +0100659{
Chris Wilson7e37f882016-08-02 22:50:21 +0100660 struct intel_ring *ring = req->ring;
Chris Wilsonc0336662016-05-06 15:40:21 +0100661 struct i915_workarounds *w = &req->i915->workarounds;
662 int ret, i;
Arun Siluvery888b5992014-08-26 14:44:51 +0100663
Francisco Jerez02235802015-10-07 14:44:01 +0300664 if (w->count == 0)
Mika Kuoppala72253422014-10-07 17:21:26 +0300665 return 0;
Arun Siluvery888b5992014-08-26 14:44:51 +0100666
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100667 ret = req->engine->emit_flush(req, EMIT_BARRIER);
Arun Siluvery86d7f232014-08-26 14:44:50 +0100668 if (ret)
669 return ret;
670
John Harrison5fb9de12015-05-29 17:44:07 +0100671 ret = intel_ring_begin(req, (w->count * 2 + 2));
Mika Kuoppala72253422014-10-07 17:21:26 +0300672 if (ret)
673 return ret;
674
Chris Wilsonb5321f32016-08-02 22:50:18 +0100675 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
Mika Kuoppala72253422014-10-07 17:21:26 +0300676 for (i = 0; i < w->count; i++) {
Chris Wilsonb5321f32016-08-02 22:50:18 +0100677 intel_ring_emit_reg(ring, w->reg[i].addr);
678 intel_ring_emit(ring, w->reg[i].value);
Mika Kuoppala72253422014-10-07 17:21:26 +0300679 }
Chris Wilsonb5321f32016-08-02 22:50:18 +0100680 intel_ring_emit(ring, MI_NOOP);
Mika Kuoppala72253422014-10-07 17:21:26 +0300681
Chris Wilsonb5321f32016-08-02 22:50:18 +0100682 intel_ring_advance(ring);
Mika Kuoppala72253422014-10-07 17:21:26 +0300683
Chris Wilson7c9cf4e2016-08-02 22:50:25 +0100684 ret = req->engine->emit_flush(req, EMIT_BARRIER);
Mika Kuoppala72253422014-10-07 17:21:26 +0300685 if (ret)
686 return ret;
687
688 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
689
690 return 0;
691}
692
John Harrison87531812015-05-29 17:43:44 +0100693static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
Daniel Vetter8f0e2b92014-12-02 16:19:07 +0100694{
695 int ret;
696
John Harrisone2be4fa2015-05-29 17:43:54 +0100697 ret = intel_ring_workarounds_emit(req);
Daniel Vetter8f0e2b92014-12-02 16:19:07 +0100698 if (ret != 0)
699 return ret;
700
John Harrisonbe013632015-05-29 17:43:45 +0100701 ret = i915_gem_render_state_init(req);
Daniel Vetter8f0e2b92014-12-02 16:19:07 +0100702 if (ret)
Chris Wilsone26e1b92016-01-29 16:49:05 +0000703 return ret;
Daniel Vetter8f0e2b92014-12-02 16:19:07 +0100704
Chris Wilsone26e1b92016-01-29 16:49:05 +0000705 return 0;
Daniel Vetter8f0e2b92014-12-02 16:19:07 +0100706}
707
Mika Kuoppala72253422014-10-07 17:21:26 +0300708static int wa_add(struct drm_i915_private *dev_priv,
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200709 i915_reg_t addr,
710 const u32 mask, const u32 val)
Mika Kuoppala72253422014-10-07 17:21:26 +0300711{
712 const u32 idx = dev_priv->workarounds.count;
713
714 if (WARN_ON(idx >= I915_MAX_WA_REGS))
715 return -ENOSPC;
716
717 dev_priv->workarounds.reg[idx].addr = addr;
718 dev_priv->workarounds.reg[idx].value = val;
719 dev_priv->workarounds.reg[idx].mask = mask;
720
721 dev_priv->workarounds.count++;
722
723 return 0;
724}
725
Mika Kuoppalaca5a0fb2015-08-11 15:44:31 +0100726#define WA_REG(addr, mask, val) do { \
Damien Lespiaucf4b0de2014-12-08 17:35:37 +0000727 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
Mika Kuoppala72253422014-10-07 17:21:26 +0300728 if (r) \
729 return r; \
Mika Kuoppalaca5a0fb2015-08-11 15:44:31 +0100730 } while (0)
Mika Kuoppala72253422014-10-07 17:21:26 +0300731
732#define WA_SET_BIT_MASKED(addr, mask) \
Damien Lespiau26459342014-12-08 17:35:38 +0000733 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
Mika Kuoppala72253422014-10-07 17:21:26 +0300734
735#define WA_CLR_BIT_MASKED(addr, mask) \
Damien Lespiau26459342014-12-08 17:35:38 +0000736 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
Mika Kuoppala72253422014-10-07 17:21:26 +0300737
Damien Lespiau98533252014-12-08 17:33:51 +0000738#define WA_SET_FIELD_MASKED(addr, mask, value) \
Damien Lespiaucf4b0de2014-12-08 17:35:37 +0000739 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
Mika Kuoppala72253422014-10-07 17:21:26 +0300740
Damien Lespiaucf4b0de2014-12-08 17:35:37 +0000741#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
742#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
Mika Kuoppala72253422014-10-07 17:21:26 +0300743
Damien Lespiaucf4b0de2014-12-08 17:35:37 +0000744#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
Mika Kuoppala72253422014-10-07 17:21:26 +0300745
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000746static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
747 i915_reg_t reg)
Arun Siluvery33136b02016-01-21 21:43:47 +0000748{
Chris Wilsonc0336662016-05-06 15:40:21 +0100749 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluvery33136b02016-01-21 21:43:47 +0000750 struct i915_workarounds *wa = &dev_priv->workarounds;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000751 const uint32_t index = wa->hw_whitelist_count[engine->id];
Arun Siluvery33136b02016-01-21 21:43:47 +0000752
753 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
754 return -EINVAL;
755
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000756 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
Arun Siluvery33136b02016-01-21 21:43:47 +0000757 i915_mmio_reg_offset(reg));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000758 wa->hw_whitelist_count[engine->id]++;
Arun Siluvery33136b02016-01-21 21:43:47 +0000759
760 return 0;
761}
762
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000763static int gen8_init_workarounds(struct intel_engine_cs *engine)
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100764{
Chris Wilsonc0336662016-05-06 15:40:21 +0100765 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluvery68c61982015-09-25 17:40:38 +0100766
767 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100768
Arun Siluvery717d84d2015-09-25 17:40:39 +0100769 /* WaDisableAsyncFlipPerfMode:bdw,chv */
770 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
771
Arun Siluveryd0581192015-09-25 17:40:40 +0100772 /* WaDisablePartialInstShootdown:bdw,chv */
773 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
774 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
775
Arun Siluverya340af52015-09-25 17:40:45 +0100776 /* Use Force Non-Coherent whenever executing a 3D context. This is a
777 * workaround for for a possible hang in the unlikely event a TLB
778 * invalidation occurs during a PSD flush.
779 */
780 /* WaForceEnableNonCoherent:bdw,chv */
Arun Siluvery120f5d22015-09-25 17:40:46 +0100781 /* WaHdcDisableFetchWhenMasked:bdw,chv */
Arun Siluverya340af52015-09-25 17:40:45 +0100782 WA_SET_BIT_MASKED(HDC_CHICKEN0,
Arun Siluvery120f5d22015-09-25 17:40:46 +0100783 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
Arun Siluverya340af52015-09-25 17:40:45 +0100784 HDC_FORCE_NON_COHERENT);
785
Arun Siluvery6def8fd2015-09-25 17:40:42 +0100786 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
787 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
788 * polygons in the same 8x4 pixel/sample area to be processed without
789 * stalling waiting for the earlier ones to write to Hierarchical Z
790 * buffer."
791 *
792 * This optimization is off by default for BDW and CHV; turn it on.
793 */
794 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
795
Arun Siluvery48404632015-09-25 17:40:43 +0100796 /* Wa4x4STCOptimizationDisable:bdw,chv */
797 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
798
Arun Siluvery7eebcde2015-09-25 17:40:44 +0100799 /*
800 * BSpec recommends 8x4 when MSAA is used,
801 * however in practice 16x4 seems fastest.
802 *
803 * Note that PS/WM thread counts depend on the WIZ hashing
804 * disable bit, which we don't touch here, but it's good
805 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
806 */
807 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
808 GEN6_WIZ_HASHING_MASK,
809 GEN6_WIZ_HASHING_16x4);
810
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100811 return 0;
812}
813
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000814static int bdw_init_workarounds(struct intel_engine_cs *engine)
Mika Kuoppala72253422014-10-07 17:21:26 +0300815{
Chris Wilsonc0336662016-05-06 15:40:21 +0100816 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100817 int ret;
Mika Kuoppala72253422014-10-07 17:21:26 +0300818
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000819 ret = gen8_init_workarounds(engine);
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100820 if (ret)
821 return ret;
822
Rodrigo Vivi101b3762014-10-09 07:11:47 -0700823 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
Arun Siluveryd0581192015-09-25 17:40:40 +0100824 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
Arun Siluvery86d7f232014-08-26 14:44:50 +0100825
Rodrigo Vivi101b3762014-10-09 07:11:47 -0700826 /* WaDisableDopClockGating:bdw */
Mika Kuoppala72253422014-10-07 17:21:26 +0300827 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
828 DOP_CLOCK_GATING_DISABLE);
Arun Siluvery86d7f232014-08-26 14:44:50 +0100829
Mika Kuoppala72253422014-10-07 17:21:26 +0300830 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
831 GEN8_SAMPLER_POWER_BYPASS_DIS);
Arun Siluvery86d7f232014-08-26 14:44:50 +0100832
Mika Kuoppala72253422014-10-07 17:21:26 +0300833 WA_SET_BIT_MASKED(HDC_CHICKEN0,
Damien Lespiau35cb6f32015-02-10 10:31:00 +0000834 /* WaForceContextSaveRestoreNonCoherent:bdw */
835 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
Damien Lespiau35cb6f32015-02-10 10:31:00 +0000836 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
Chris Wilsonc0336662016-05-06 15:40:21 +0100837 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
Arun Siluvery86d7f232014-08-26 14:44:50 +0100838
Arun Siluvery86d7f232014-08-26 14:44:50 +0100839 return 0;
840}
841
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000842static int chv_init_workarounds(struct intel_engine_cs *engine)
Ville Syrjälä00e1e622014-08-27 17:33:12 +0300843{
Chris Wilsonc0336662016-05-06 15:40:21 +0100844 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100845 int ret;
Ville Syrjälä00e1e622014-08-27 17:33:12 +0300846
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000847 ret = gen8_init_workarounds(engine);
Arun Siluverye9a64ad2015-09-25 17:40:37 +0100848 if (ret)
849 return ret;
850
Ville Syrjälä00e1e622014-08-27 17:33:12 +0300851 /* WaDisableThreadStallDopClockGating:chv */
Arun Siluveryd0581192015-09-25 17:40:40 +0100852 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
Ville Syrjälä00e1e622014-08-27 17:33:12 +0300853
Kenneth Graunked60de812015-01-10 18:02:22 -0800854 /* Improve HiZ throughput on CHV. */
855 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
856
Mika Kuoppala72253422014-10-07 17:21:26 +0300857 return 0;
858}
859
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000860static int gen9_init_workarounds(struct intel_engine_cs *engine)
Hoath, Nicholas3b106532015-02-05 10:47:16 +0000861{
Chris Wilsonc0336662016-05-06 15:40:21 +0100862 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluverye0f3fa02016-01-21 21:43:48 +0000863 int ret;
Hoath, Nicholasab0dfaf2015-02-05 10:47:18 +0000864
Tim Gorea8ab5ed2016-06-13 12:15:01 +0100865 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
866 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
867
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300868 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +0300869 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
870 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
871
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300872 /* WaDisableKillLogic:bxt,skl,kbl */
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +0300873 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
874 ECOCHK_DIS_TLB);
875
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300876 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
877 /* WaDisablePartialInstShootdown:skl,bxt,kbl */
Hoath, Nicholasab0dfaf2015-02-05 10:47:18 +0000878 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
Tim Gore950b2aa2016-03-16 16:13:46 +0000879 FLOW_CONTROL_ENABLE |
Hoath, Nicholasab0dfaf2015-02-05 10:47:18 +0000880 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
881
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300882 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
Nick Hoath84241712015-02-05 10:47:20 +0000883 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
884 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
885
Jani Nikulae87a0052015-10-20 15:22:02 +0300886 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +0100887 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
888 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
Damien Lespiaua86eb582015-02-11 18:21:44 +0000889 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
890 GEN9_DG_MIRROR_FIX_ENABLE);
Nick Hoath1de45822015-02-05 10:47:19 +0000891
Jani Nikulae87a0052015-10-20 15:22:02 +0300892 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +0100893 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
894 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
Damien Lespiau183c6da2015-02-09 19:33:11 +0000895 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
896 GEN9_RHWO_OPTIMIZATION_DISABLE);
Arun Siluvery9b014352015-07-14 15:01:30 +0100897 /*
898 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
899 * but we do that in per ctx batchbuffer as there is an issue
900 * with this register not getting restored on ctx restore
901 */
Damien Lespiau183c6da2015-02-09 19:33:11 +0000902 }
903
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300904 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
905 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
Tim Gorebfd8ad42016-04-19 15:45:52 +0100906 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
907 GEN9_ENABLE_YV12_BUGFIX |
908 GEN9_ENABLE_GPGPU_PREEMPTION);
Nick Hoathcac23df2015-02-05 10:47:22 +0000909
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300910 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
911 /* WaDisablePartialResolveInVc:skl,bxt,kbl */
Arun Siluvery60294682015-09-25 14:33:37 +0100912 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
913 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
Damien Lespiau9370cd92015-02-09 19:33:17 +0000914
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300915 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
Damien Lespiaue2db7072015-02-09 19:33:21 +0000916 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
917 GEN9_CCS_TLB_PREFETCH_ENABLE);
918
Imre Deak5a2ae952015-05-19 15:04:59 +0300919 /* WaDisableMaskBasedCammingInRCC:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +0100920 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
921 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
Ben Widawsky38a39a72015-03-11 10:54:53 +0200922 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
923 PIXEL_MASK_CAMMING_DISABLE);
924
Mika Kuoppala5b0e3652016-06-07 17:18:57 +0300925 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
926 WA_SET_BIT_MASKED(HDC_CHICKEN0,
927 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
928 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
Imre Deak8ea6f892015-05-19 17:05:42 +0300929
Mika Kuoppalabbaefe72016-06-07 17:18:58 +0300930 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
931 * both tied to WaForceContextSaveRestoreNonCoherent
932 * in some hsds for skl. We keep the tie for all gen9. The
933 * documentation is a bit hazy and so we want to get common behaviour,
934 * even though there is no clear evidence we would need both on kbl/bxt.
935 * This area has been source of system hangs so we play it safe
936 * and mimic the skl regardless of what bspec says.
937 *
938 * Use Force Non-Coherent whenever executing a 3D context. This
939 * is a workaround for a possible hang in the unlikely event
940 * a TLB invalidation occurs during a PSD flush.
941 */
942
943 /* WaForceEnableNonCoherent:skl,bxt,kbl */
944 WA_SET_BIT_MASKED(HDC_CHICKEN0,
945 HDC_FORCE_NON_COHERENT);
946
947 /* WaDisableHDCInvalidation:skl,bxt,kbl */
948 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
949 BDW_DISABLE_HDC_INVALIDATION);
950
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300951 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
952 if (IS_SKYLAKE(dev_priv) ||
953 IS_KABYLAKE(dev_priv) ||
954 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
Arun Siluvery8c761602015-09-08 10:31:48 +0100955 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
956 GEN8_SAMPLER_POWER_BYPASS_DIS);
Arun Siluvery8c761602015-09-08 10:31:48 +0100957
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300958 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
Robert Beckett6b6d5622015-09-08 10:31:52 +0100959 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
960
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300961 /* WaOCLCoherentLineFlush:skl,bxt,kbl */
Arun Siluvery6ecf56a2016-01-21 21:43:54 +0000962 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
963 GEN8_LQSC_FLUSH_COHERENT_LINES));
964
arun.siluvery@linux.intel.com6bb628552016-06-06 09:52:49 +0100965 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
966 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
967 if (ret)
968 return ret;
969
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300970 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000971 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
Arun Siluverye0f3fa02016-01-21 21:43:48 +0000972 if (ret)
973 return ret;
974
Mika Kuoppalae5f81d62016-06-07 17:18:54 +0300975 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000976 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
Arun Siluvery3669ab62016-01-21 21:43:49 +0000977 if (ret)
978 return ret;
979
Hoath, Nicholas3b106532015-02-05 10:47:16 +0000980 return 0;
981}
982
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000983static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
Damien Lespiau8d205492015-02-09 19:33:15 +0000984{
Chris Wilsonc0336662016-05-06 15:40:21 +0100985 struct drm_i915_private *dev_priv = engine->i915;
Damien Lespiaub7668792015-02-14 18:30:29 +0000986 u8 vals[3] = { 0, 0, 0 };
987 unsigned int i;
988
989 for (i = 0; i < 3; i++) {
990 u8 ss;
991
992 /*
993 * Only consider slices where one, and only one, subslice has 7
994 * EUs
995 */
Zeng Zhaoxiua4d8a0f2015-12-06 18:26:30 +0800996 if (!is_power_of_2(dev_priv->info.subslice_7eu[i]))
Damien Lespiaub7668792015-02-14 18:30:29 +0000997 continue;
998
999 /*
1000 * subslice_7eu[i] != 0 (because of the check above) and
1001 * ss_max == 4 (maximum number of subslices possible per slice)
1002 *
1003 * -> 0 <= ss <= 3;
1004 */
1005 ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
1006 vals[i] = 3 - ss;
1007 }
1008
1009 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
1010 return 0;
1011
1012 /* Tune IZ hashing. See intel_device_info_runtime_init() */
1013 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
1014 GEN9_IZ_HASHING_MASK(2) |
1015 GEN9_IZ_HASHING_MASK(1) |
1016 GEN9_IZ_HASHING_MASK(0),
1017 GEN9_IZ_HASHING(2, vals[2]) |
1018 GEN9_IZ_HASHING(1, vals[1]) |
1019 GEN9_IZ_HASHING(0, vals[0]));
Damien Lespiau8d205492015-02-09 19:33:15 +00001020
Mika Kuoppala72253422014-10-07 17:21:26 +03001021 return 0;
1022}
1023
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001024static int skl_init_workarounds(struct intel_engine_cs *engine)
Damien Lespiau8d205492015-02-09 19:33:15 +00001025{
Chris Wilsonc0336662016-05-06 15:40:21 +01001026 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluveryaa0011a2015-09-25 14:33:35 +01001027 int ret;
Damien Lespiaud0bbbc42015-02-09 19:33:16 +00001028
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001029 ret = gen9_init_workarounds(engine);
Arun Siluveryaa0011a2015-09-25 14:33:35 +01001030 if (ret)
1031 return ret;
Damien Lespiau8d205492015-02-09 19:33:15 +00001032
Arun Siluverya78536e2016-01-21 21:43:53 +00001033 /*
1034 * Actual WA is to disable percontext preemption granularity control
1035 * until D0 which is the default case so this is equivalent to
1036 * !WaDisablePerCtxtPreemptionGranularityControl:skl
1037 */
Chris Wilsonc0336662016-05-06 15:40:21 +01001038 if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
Arun Siluverya78536e2016-01-21 21:43:53 +00001039 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1040 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1041 }
1042
Mika Kuoppala71dce582016-06-07 17:19:14 +03001043 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +03001044 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1045 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1046 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
1047 }
1048
1049 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1050 * involving this register should also be added to WA batch as required.
1051 */
Chris Wilsonc0336662016-05-06 15:40:21 +01001052 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +03001053 /* WaDisableLSQCROPERFforOCL:skl */
1054 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1055 GEN8_LQSC_RO_PERF_DIS);
1056
1057 /* WaEnableGapsTsvCreditFix:skl */
Chris Wilsonc0336662016-05-06 15:40:21 +01001058 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +03001059 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1060 GEN9_GAPS_TSV_CREDIT_DISABLE));
1061 }
1062
Damien Lespiaud0bbbc42015-02-09 19:33:16 +00001063 /* WaDisablePowerCompilerClockGating:skl */
Chris Wilsonc0336662016-05-06 15:40:21 +01001064 if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
Damien Lespiaud0bbbc42015-02-09 19:33:16 +00001065 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1066 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1067
Jani Nikulae87a0052015-10-20 15:22:02 +03001068 /* WaBarrierPerformanceFixDisable:skl */
Chris Wilsonc0336662016-05-06 15:40:21 +01001069 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
Ville Syrjälä5b6fd122015-06-02 15:37:35 +03001070 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1071 HDC_FENCE_DEST_SLM_DISABLE |
1072 HDC_BARRIER_PERFORMANCE_DISABLE);
1073
Mika Kuoppala9bd9dfb2015-08-06 16:51:00 +03001074 /* WaDisableSbeCacheDispatchPortSharing:skl */
Chris Wilsonc0336662016-05-06 15:40:21 +01001075 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
Mika Kuoppala9bd9dfb2015-08-06 16:51:00 +03001076 WA_SET_BIT_MASKED(
1077 GEN7_HALF_SLICE_CHICKEN1,
1078 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
Mika Kuoppala9bd9dfb2015-08-06 16:51:00 +03001079
Mika Kuoppalaeee8efb2016-06-07 17:18:53 +03001080 /* WaDisableGafsUnitClkGating:skl */
1081 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1082
Mika Kuoppala4ba9c1f2016-07-20 14:26:12 +03001083 /* WaInPlaceDecompressionHang:skl */
1084 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
1085 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1086 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1087
Arun Siluvery61074972016-01-21 21:43:52 +00001088 /* WaDisableLSQCROPERFforOCL:skl */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001089 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
Arun Siluvery61074972016-01-21 21:43:52 +00001090 if (ret)
1091 return ret;
1092
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001093 return skl_tune_iz_hashing(engine);
Damien Lespiau8d205492015-02-09 19:33:15 +00001094}
1095
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001096static int bxt_init_workarounds(struct intel_engine_cs *engine)
Nick Hoathcae04372015-03-17 11:39:38 +02001097{
Chris Wilsonc0336662016-05-06 15:40:21 +01001098 struct drm_i915_private *dev_priv = engine->i915;
Arun Siluveryaa0011a2015-09-25 14:33:35 +01001099 int ret;
Nick Hoathdfb601e2015-04-10 13:12:24 +01001100
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001101 ret = gen9_init_workarounds(engine);
Arun Siluveryaa0011a2015-09-25 14:33:35 +01001102 if (ret)
1103 return ret;
Nick Hoathcae04372015-03-17 11:39:38 +02001104
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +03001105 /* WaStoreMultiplePTEenable:bxt */
1106 /* This is a requirement according to Hardware specification */
Chris Wilsonc0336662016-05-06 15:40:21 +01001107 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +03001108 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1109
1110 /* WaSetClckGatingDisableMedia:bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001111 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
Mika Kuoppala9c4cbf82015-10-12 13:20:59 +03001112 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1113 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1114 }
1115
Nick Hoathdfb601e2015-04-10 13:12:24 +01001116 /* WaDisableThreadStallDopClockGating:bxt */
1117 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1118 STALL_DOP_GATING_DISABLE);
1119
arun.siluvery@linux.intel.com780f0ae2016-06-03 11:16:10 +01001120 /* WaDisablePooledEuLoadBalancingFix:bxt */
1121 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1122 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1123 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1124 }
1125
Nick Hoath983b4b92015-04-10 13:12:25 +01001126 /* WaDisableSbeCacheDispatchPortSharing:bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001127 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
Nick Hoath983b4b92015-04-10 13:12:25 +01001128 WA_SET_BIT_MASKED(
1129 GEN7_HALF_SLICE_CHICKEN1,
1130 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1131 }
1132
Arun Siluvery2c8580e2016-01-21 21:43:50 +00001133 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1134 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1135 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
Arun Siluverya786d532016-01-21 21:43:51 +00001136 /* WaDisableLSQCROPERFforOCL:bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001137 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001138 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
Arun Siluvery2c8580e2016-01-21 21:43:50 +00001139 if (ret)
1140 return ret;
Arun Siluverya786d532016-01-21 21:43:51 +00001141
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001142 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
Arun Siluverya786d532016-01-21 21:43:51 +00001143 if (ret)
1144 return ret;
Arun Siluvery2c8580e2016-01-21 21:43:50 +00001145 }
1146
Tim Gore050fc462016-04-22 09:46:01 +01001147 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001148 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
Imre Deak36579cb2016-05-03 15:54:20 +03001149 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1150 L3_HIGH_PRIO_CREDITS(2));
Tim Gore050fc462016-04-22 09:46:01 +01001151
Mika Kuoppalaad2bdb42016-06-07 17:19:07 +03001152 /* WaInsertDummyPushConstPs:bxt */
1153 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
1154 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1155 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1156
Mika Kuoppala4ba9c1f2016-07-20 14:26:12 +03001157 /* WaInPlaceDecompressionHang:bxt */
1158 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1159 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1160 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1161
Nick Hoathcae04372015-03-17 11:39:38 +02001162 return 0;
1163}
1164
Mika Kuoppalae5f81d62016-06-07 17:18:54 +03001165static int kbl_init_workarounds(struct intel_engine_cs *engine)
1166{
Mika Kuoppalae587f6c2016-06-07 17:18:59 +03001167 struct drm_i915_private *dev_priv = engine->i915;
Mika Kuoppalae5f81d62016-06-07 17:18:54 +03001168 int ret;
1169
1170 ret = gen9_init_workarounds(engine);
1171 if (ret)
1172 return ret;
1173
Mika Kuoppalae587f6c2016-06-07 17:18:59 +03001174 /* WaEnableGapsTsvCreditFix:kbl */
1175 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1176 GEN9_GAPS_TSV_CREDIT_DISABLE));
1177
Mika Kuoppalac0b730d2016-06-07 17:19:06 +03001178 /* WaDisableDynamicCreditSharing:kbl */
1179 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1180 WA_SET_BIT(GAMT_CHKN_BIT_REG,
1181 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1182
Mika Kuoppala8401d422016-06-07 17:19:00 +03001183 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1184 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1185 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1186 HDC_FENCE_DEST_SLM_DISABLE);
1187
Mika Kuoppalafe905812016-06-07 17:19:03 +03001188 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1189 * involving this register should also be added to WA batch as required.
1190 */
1191 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
1192 /* WaDisableLSQCROPERFforOCL:kbl */
1193 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1194 GEN8_LQSC_RO_PERF_DIS);
1195
Mika Kuoppalaad2bdb42016-06-07 17:19:07 +03001196 /* WaInsertDummyPushConstPs:kbl */
1197 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1198 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1199 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1200
Mika Kuoppala4de5d7c2016-06-07 17:19:11 +03001201 /* WaDisableGafsUnitClkGating:kbl */
1202 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1203
Mika Kuoppala954337a2016-06-07 17:19:12 +03001204 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1205 WA_SET_BIT_MASKED(
1206 GEN7_HALF_SLICE_CHICKEN1,
1207 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1208
Mika Kuoppala4ba9c1f2016-07-20 14:26:12 +03001209 /* WaInPlaceDecompressionHang:kbl */
1210 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1211 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1212
Mika Kuoppalafe905812016-06-07 17:19:03 +03001213 /* WaDisableLSQCROPERFforOCL:kbl */
1214 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1215 if (ret)
1216 return ret;
1217
Mika Kuoppalae5f81d62016-06-07 17:18:54 +03001218 return 0;
1219}
1220
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001221int init_workarounds_ring(struct intel_engine_cs *engine)
Mika Kuoppala72253422014-10-07 17:21:26 +03001222{
Chris Wilsonc0336662016-05-06 15:40:21 +01001223 struct drm_i915_private *dev_priv = engine->i915;
Mika Kuoppala72253422014-10-07 17:21:26 +03001224
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001225 WARN_ON(engine->id != RCS);
Mika Kuoppala72253422014-10-07 17:21:26 +03001226
1227 dev_priv->workarounds.count = 0;
Arun Siluvery33136b02016-01-21 21:43:47 +00001228 dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
Mika Kuoppala72253422014-10-07 17:21:26 +03001229
Chris Wilsonc0336662016-05-06 15:40:21 +01001230 if (IS_BROADWELL(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001231 return bdw_init_workarounds(engine);
Mika Kuoppala72253422014-10-07 17:21:26 +03001232
Chris Wilsonc0336662016-05-06 15:40:21 +01001233 if (IS_CHERRYVIEW(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001234 return chv_init_workarounds(engine);
Ville Syrjälä00e1e622014-08-27 17:33:12 +03001235
Chris Wilsonc0336662016-05-06 15:40:21 +01001236 if (IS_SKYLAKE(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001237 return skl_init_workarounds(engine);
Nick Hoathcae04372015-03-17 11:39:38 +02001238
Chris Wilsonc0336662016-05-06 15:40:21 +01001239 if (IS_BROXTON(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001240 return bxt_init_workarounds(engine);
Hoath, Nicholas3b106532015-02-05 10:47:16 +00001241
Mika Kuoppalae5f81d62016-06-07 17:18:54 +03001242 if (IS_KABYLAKE(dev_priv))
1243 return kbl_init_workarounds(engine);
1244
Ville Syrjälä00e1e622014-08-27 17:33:12 +03001245 return 0;
1246}
1247
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001248static int init_render_ring(struct intel_engine_cs *engine)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001249{
Chris Wilsonc0336662016-05-06 15:40:21 +01001250 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001251 int ret = init_ring_common(engine);
Konrad Zapalowicz9c33baa2014-06-19 19:07:15 +02001252 if (ret)
1253 return ret;
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +08001254
Akash Goel61a563a2014-03-25 18:01:50 +05301255 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
Tvrtko Ursulinac657f62016-05-10 10:57:08 +01001256 if (IS_GEN(dev_priv, 4, 6))
Daniel Vetter6b26c862012-04-24 14:04:12 +02001257 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
Chris Wilson1c8c38c2013-01-20 16:11:20 +00001258
1259 /* We need to disable the AsyncFlip performance optimisations in order
1260 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1261 * programmed to '1' on all products.
Damien Lespiau8693a822013-05-03 18:48:11 +01001262 *
Ville Syrjälä2441f872015-06-02 15:37:37 +03001263 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
Chris Wilson1c8c38c2013-01-20 16:11:20 +00001264 */
Tvrtko Ursulinac657f62016-05-10 10:57:08 +01001265 if (IS_GEN(dev_priv, 6, 7))
Chris Wilson1c8c38c2013-01-20 16:11:20 +00001266 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1267
Chris Wilsonf05bb0c2013-01-20 16:33:32 +00001268 /* Required for the hardware to program scanline values for waiting */
Akash Goel01fa0302014-03-24 23:00:04 +05301269 /* WaEnableFlushTlbInvalidationMode:snb */
Chris Wilsonc0336662016-05-06 15:40:21 +01001270 if (IS_GEN6(dev_priv))
Chris Wilsonf05bb0c2013-01-20 16:33:32 +00001271 I915_WRITE(GFX_MODE,
Chris Wilsonaa83e302014-03-21 17:18:54 +00001272 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
Chris Wilsonf05bb0c2013-01-20 16:33:32 +00001273
Akash Goel01fa0302014-03-24 23:00:04 +05301274 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
Chris Wilsonc0336662016-05-06 15:40:21 +01001275 if (IS_GEN7(dev_priv))
Chris Wilson1c8c38c2013-01-20 16:11:20 +00001276 I915_WRITE(GFX_MODE_GEN7,
Akash Goel01fa0302014-03-24 23:00:04 +05301277 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
Chris Wilson1c8c38c2013-01-20 16:11:20 +00001278 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
Chris Wilson78501ea2010-10-27 12:18:21 +01001279
Chris Wilsonc0336662016-05-06 15:40:21 +01001280 if (IS_GEN6(dev_priv)) {
Kenneth Graunke3a69ddd2012-04-27 12:44:41 -07001281 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1282 * "If this bit is set, STCunit will have LRA as replacement
1283 * policy. [...] This bit must be reset. LRA replacement
1284 * policy is not supported."
1285 */
1286 I915_WRITE(CACHE_MODE_0,
Daniel Vetter5e13a0c2012-05-08 13:39:59 +02001287 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
Ben Widawsky84f9f932011-12-12 19:21:58 -08001288 }
1289
Tvrtko Ursulinac657f62016-05-10 10:57:08 +01001290 if (IS_GEN(dev_priv, 6, 7))
Daniel Vetter6b26c862012-04-24 14:04:12 +02001291 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
Chris Wilsonc6df5412010-12-15 09:56:50 +00001292
Ville Syrjälä035ea402016-07-12 19:24:47 +03001293 if (INTEL_INFO(dev_priv)->gen >= 6)
1294 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
Ben Widawsky15b9f802012-05-25 16:56:23 -07001295
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001296 return init_workarounds_ring(engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001297}
1298
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001299static void render_ring_cleanup(struct intel_engine_cs *engine)
Chris Wilsonc6df5412010-12-15 09:56:50 +00001300{
Chris Wilsonc0336662016-05-06 15:40:21 +01001301 struct drm_i915_private *dev_priv = engine->i915;
Ben Widawsky3e789982014-06-30 09:53:37 -07001302
1303 if (dev_priv->semaphore_obj) {
1304 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001305 i915_gem_object_put(dev_priv->semaphore_obj);
Ben Widawsky3e789982014-06-30 09:53:37 -07001306 dev_priv->semaphore_obj = NULL;
1307 }
Daniel Vetterb45305f2012-12-17 16:21:27 +01001308
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001309 intel_fini_pipe_control(engine);
Chris Wilsonc6df5412010-12-15 09:56:50 +00001310}
1311
John Harrisonf7169682015-05-29 17:44:05 +01001312static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
Ben Widawsky3e789982014-06-30 09:53:37 -07001313 unsigned int num_dwords)
1314{
1315#define MBOX_UPDATE_DWORDS 8
Chris Wilson7e37f882016-08-02 22:50:21 +01001316 struct intel_ring *signaller = signaller_req->ring;
Chris Wilsonc0336662016-05-06 15:40:21 +01001317 struct drm_i915_private *dev_priv = signaller_req->i915;
Ben Widawsky3e789982014-06-30 09:53:37 -07001318 struct intel_engine_cs *waiter;
Dave Gordonc3232b12016-03-23 18:19:53 +00001319 enum intel_engine_id id;
1320 int ret, num_rings;
Ben Widawsky3e789982014-06-30 09:53:37 -07001321
Chris Wilsonc0336662016-05-06 15:40:21 +01001322 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
Ben Widawsky3e789982014-06-30 09:53:37 -07001323 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1324#undef MBOX_UPDATE_DWORDS
1325
John Harrison5fb9de12015-05-29 17:44:07 +01001326 ret = intel_ring_begin(signaller_req, num_dwords);
Ben Widawsky3e789982014-06-30 09:53:37 -07001327 if (ret)
1328 return ret;
1329
Dave Gordonc3232b12016-03-23 18:19:53 +00001330 for_each_engine_id(waiter, dev_priv, id) {
Chris Wilsonb5321f32016-08-02 22:50:18 +01001331 u64 gtt_offset =
1332 signaller_req->engine->semaphore.signal_ggtt[id];
Ben Widawsky3e789982014-06-30 09:53:37 -07001333 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1334 continue;
1335
1336 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
Chris Wilsonb5321f32016-08-02 22:50:18 +01001337 intel_ring_emit(signaller,
1338 PIPE_CONTROL_GLOBAL_GTT_IVB |
1339 PIPE_CONTROL_QW_WRITE |
1340 PIPE_CONTROL_CS_STALL);
Ben Widawsky3e789982014-06-30 09:53:37 -07001341 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
1342 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
Chris Wilson04769652016-07-20 09:21:11 +01001343 intel_ring_emit(signaller, signaller_req->fence.seqno);
Ben Widawsky3e789982014-06-30 09:53:37 -07001344 intel_ring_emit(signaller, 0);
Chris Wilsonb5321f32016-08-02 22:50:18 +01001345 intel_ring_emit(signaller,
1346 MI_SEMAPHORE_SIGNAL |
1347 MI_SEMAPHORE_TARGET(waiter->hw_id));
Ben Widawsky3e789982014-06-30 09:53:37 -07001348 intel_ring_emit(signaller, 0);
1349 }
1350
1351 return 0;
1352}
1353
John Harrisonf7169682015-05-29 17:44:05 +01001354static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
Ben Widawsky3e789982014-06-30 09:53:37 -07001355 unsigned int num_dwords)
1356{
1357#define MBOX_UPDATE_DWORDS 6
Chris Wilson7e37f882016-08-02 22:50:21 +01001358 struct intel_ring *signaller = signaller_req->ring;
Chris Wilsonc0336662016-05-06 15:40:21 +01001359 struct drm_i915_private *dev_priv = signaller_req->i915;
Ben Widawsky3e789982014-06-30 09:53:37 -07001360 struct intel_engine_cs *waiter;
Dave Gordonc3232b12016-03-23 18:19:53 +00001361 enum intel_engine_id id;
1362 int ret, num_rings;
Ben Widawsky3e789982014-06-30 09:53:37 -07001363
Chris Wilsonc0336662016-05-06 15:40:21 +01001364 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
Ben Widawsky3e789982014-06-30 09:53:37 -07001365 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1366#undef MBOX_UPDATE_DWORDS
1367
John Harrison5fb9de12015-05-29 17:44:07 +01001368 ret = intel_ring_begin(signaller_req, num_dwords);
Ben Widawsky3e789982014-06-30 09:53:37 -07001369 if (ret)
1370 return ret;
1371
Dave Gordonc3232b12016-03-23 18:19:53 +00001372 for_each_engine_id(waiter, dev_priv, id) {
Chris Wilsonb5321f32016-08-02 22:50:18 +01001373 u64 gtt_offset =
1374 signaller_req->engine->semaphore.signal_ggtt[id];
Ben Widawsky3e789982014-06-30 09:53:37 -07001375 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1376 continue;
1377
Chris Wilsonb5321f32016-08-02 22:50:18 +01001378 intel_ring_emit(signaller,
1379 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1380 intel_ring_emit(signaller,
1381 lower_32_bits(gtt_offset) |
1382 MI_FLUSH_DW_USE_GTT);
Ben Widawsky3e789982014-06-30 09:53:37 -07001383 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
Chris Wilson04769652016-07-20 09:21:11 +01001384 intel_ring_emit(signaller, signaller_req->fence.seqno);
Chris Wilsonb5321f32016-08-02 22:50:18 +01001385 intel_ring_emit(signaller,
1386 MI_SEMAPHORE_SIGNAL |
1387 MI_SEMAPHORE_TARGET(waiter->hw_id));
Ben Widawsky3e789982014-06-30 09:53:37 -07001388 intel_ring_emit(signaller, 0);
1389 }
1390
1391 return 0;
1392}
1393
John Harrisonf7169682015-05-29 17:44:05 +01001394static int gen6_signal(struct drm_i915_gem_request *signaller_req,
Ben Widawsky024a43e2014-04-29 14:52:30 -07001395 unsigned int num_dwords)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001396{
Chris Wilson7e37f882016-08-02 22:50:21 +01001397 struct intel_ring *signaller = signaller_req->ring;
Chris Wilsonc0336662016-05-06 15:40:21 +01001398 struct drm_i915_private *dev_priv = signaller_req->i915;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001399 struct intel_engine_cs *useless;
Dave Gordonc3232b12016-03-23 18:19:53 +00001400 enum intel_engine_id id;
1401 int ret, num_rings;
Ben Widawsky78325f22014-04-29 14:52:29 -07001402
Ben Widawskya1444b72014-06-30 09:53:35 -07001403#define MBOX_UPDATE_DWORDS 3
Chris Wilsonc0336662016-05-06 15:40:21 +01001404 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
Ben Widawskya1444b72014-06-30 09:53:35 -07001405 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
1406#undef MBOX_UPDATE_DWORDS
Ben Widawsky024a43e2014-04-29 14:52:30 -07001407
John Harrison5fb9de12015-05-29 17:44:07 +01001408 ret = intel_ring_begin(signaller_req, num_dwords);
Ben Widawsky024a43e2014-04-29 14:52:30 -07001409 if (ret)
1410 return ret;
Ben Widawsky024a43e2014-04-29 14:52:30 -07001411
Dave Gordonc3232b12016-03-23 18:19:53 +00001412 for_each_engine_id(useless, dev_priv, id) {
Chris Wilsonb5321f32016-08-02 22:50:18 +01001413 i915_reg_t mbox_reg =
1414 signaller_req->engine->semaphore.mbox.signal[id];
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001415
1416 if (i915_mmio_reg_valid(mbox_reg)) {
Ben Widawsky78325f22014-04-29 14:52:29 -07001417 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
Ville Syrjäläf92a9162015-11-04 23:20:07 +02001418 intel_ring_emit_reg(signaller, mbox_reg);
Chris Wilson04769652016-07-20 09:21:11 +01001419 intel_ring_emit(signaller, signaller_req->fence.seqno);
Ben Widawsky78325f22014-04-29 14:52:29 -07001420 }
1421 }
Ben Widawsky024a43e2014-04-29 14:52:30 -07001422
Ben Widawskya1444b72014-06-30 09:53:35 -07001423 /* If num_dwords was rounded, make sure the tail pointer is correct */
1424 if (num_rings % 2 == 0)
1425 intel_ring_emit(signaller, MI_NOOP);
1426
Ben Widawsky024a43e2014-04-29 14:52:30 -07001427 return 0;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001428}
1429
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001430/**
Chris Wilsonddd66c52016-08-02 22:50:31 +01001431 * gen6_emit_request - Update the semaphore mailbox registers
John Harrisonee044a82015-05-29 17:44:00 +01001432 *
1433 * @request - request to write to the ring
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001434 *
1435 * Update the mailbox registers in the *other* rings with the current seqno.
1436 * This acts like a signal in the canonical semaphore.
1437 */
Chris Wilsonddd66c52016-08-02 22:50:31 +01001438static int gen6_emit_request(struct drm_i915_gem_request *req)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001439{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001440 struct intel_engine_cs *engine = req->engine;
Chris Wilson7e37f882016-08-02 22:50:21 +01001441 struct intel_ring *ring = req->ring;
Ben Widawsky024a43e2014-04-29 14:52:30 -07001442 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001443
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001444 if (engine->semaphore.signal)
1445 ret = engine->semaphore.signal(req, 4);
Ben Widawsky707d9cf2014-06-30 09:53:36 -07001446 else
John Harrison5fb9de12015-05-29 17:44:07 +01001447 ret = intel_ring_begin(req, 4);
Ben Widawsky707d9cf2014-06-30 09:53:36 -07001448
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001449 if (ret)
1450 return ret;
1451
Chris Wilsonb5321f32016-08-02 22:50:18 +01001452 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1453 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1454 intel_ring_emit(ring, req->fence.seqno);
1455 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01001456 intel_ring_advance(ring);
1457
1458 req->tail = ring->tail;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001459
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001460 return 0;
1461}
1462
Chris Wilsonddd66c52016-08-02 22:50:31 +01001463static int gen8_render_emit_request(struct drm_i915_gem_request *req)
Chris Wilsona58c01a2016-04-29 13:18:21 +01001464{
1465 struct intel_engine_cs *engine = req->engine;
Chris Wilson7e37f882016-08-02 22:50:21 +01001466 struct intel_ring *ring = req->ring;
Chris Wilsona58c01a2016-04-29 13:18:21 +01001467 int ret;
1468
1469 if (engine->semaphore.signal)
1470 ret = engine->semaphore.signal(req, 8);
1471 else
1472 ret = intel_ring_begin(req, 8);
1473 if (ret)
1474 return ret;
1475
Chris Wilsonb5321f32016-08-02 22:50:18 +01001476 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1477 intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
1478 PIPE_CONTROL_CS_STALL |
1479 PIPE_CONTROL_QW_WRITE));
1480 intel_ring_emit(ring, intel_hws_seqno_address(engine));
1481 intel_ring_emit(ring, 0);
1482 intel_ring_emit(ring, i915_gem_request_get_seqno(req));
Chris Wilsona58c01a2016-04-29 13:18:21 +01001483 /* We're thrashing one dword of HWS. */
Chris Wilsonb5321f32016-08-02 22:50:18 +01001484 intel_ring_emit(ring, 0);
1485 intel_ring_emit(ring, MI_USER_INTERRUPT);
1486 intel_ring_emit(ring, MI_NOOP);
Chris Wilsonddd66c52016-08-02 22:50:31 +01001487 intel_ring_advance(ring);
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01001488
1489 req->tail = ring->tail;
Chris Wilsona58c01a2016-04-29 13:18:21 +01001490
1491 return 0;
1492}
1493
Chris Wilsonc0336662016-05-06 15:40:21 +01001494static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02001495 u32 seqno)
1496{
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02001497 return dev_priv->last_seqno < seqno;
1498}
1499
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001500/**
1501 * intel_ring_sync - sync the waiter to the signaller on seqno
1502 *
1503 * @waiter - ring that is waiting
1504 * @signaller - ring which has, or will signal
1505 * @seqno - seqno which the waiter will block on
1506 */
Ben Widawsky5ee426c2014-06-30 09:53:38 -07001507
1508static int
John Harrison599d9242015-05-29 17:44:04 +01001509gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
Ben Widawsky5ee426c2014-06-30 09:53:38 -07001510 struct intel_engine_cs *signaller,
1511 u32 seqno)
1512{
Chris Wilson7e37f882016-08-02 22:50:21 +01001513 struct intel_ring *waiter = waiter_req->ring;
Chris Wilsonc0336662016-05-06 15:40:21 +01001514 struct drm_i915_private *dev_priv = waiter_req->i915;
Chris Wilsonb5321f32016-08-02 22:50:18 +01001515 u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
Chris Wilson6ef48d72016-04-29 13:18:25 +01001516 struct i915_hw_ppgtt *ppgtt;
Ben Widawsky5ee426c2014-06-30 09:53:38 -07001517 int ret;
1518
John Harrison5fb9de12015-05-29 17:44:07 +01001519 ret = intel_ring_begin(waiter_req, 4);
Ben Widawsky5ee426c2014-06-30 09:53:38 -07001520 if (ret)
1521 return ret;
1522
1523 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
1524 MI_SEMAPHORE_GLOBAL_GTT |
1525 MI_SEMAPHORE_SAD_GTE_SDD);
1526 intel_ring_emit(waiter, seqno);
Tvrtko Ursulinc38c6512016-06-29 16:09:30 +01001527 intel_ring_emit(waiter, lower_32_bits(offset));
1528 intel_ring_emit(waiter, upper_32_bits(offset));
Ben Widawsky5ee426c2014-06-30 09:53:38 -07001529 intel_ring_advance(waiter);
Chris Wilson6ef48d72016-04-29 13:18:25 +01001530
1531 /* When the !RCS engines idle waiting upon a semaphore, they lose their
1532 * pagetables and we must reload them before executing the batch.
1533 * We do this on the i915_switch_context() following the wait and
1534 * before the dispatch.
1535 */
1536 ppgtt = waiter_req->ctx->ppgtt;
1537 if (ppgtt && waiter_req->engine->id != RCS)
1538 ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
Ben Widawsky5ee426c2014-06-30 09:53:38 -07001539 return 0;
1540}
1541
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001542static int
John Harrison599d9242015-05-29 17:44:04 +01001543gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001544 struct intel_engine_cs *signaller,
Daniel Vetter686cb5f2012-04-11 22:12:52 +02001545 u32 seqno)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001546{
Chris Wilson7e37f882016-08-02 22:50:21 +01001547 struct intel_ring *waiter = waiter_req->ring;
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001548 u32 dw1 = MI_SEMAPHORE_MBOX |
1549 MI_SEMAPHORE_COMPARE |
1550 MI_SEMAPHORE_REGISTER;
Chris Wilsonb5321f32016-08-02 22:50:18 +01001551 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->engine->id];
Ben Widawskyebc348b2014-04-29 14:52:28 -07001552 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001553
Ben Widawsky1500f7e2012-04-11 11:18:21 -07001554 /* Throughout all of the GEM code, seqno passed implies our current
1555 * seqno is >= the last seqno executed. However for hardware the
1556 * comparison is strictly greater than.
1557 */
1558 seqno -= 1;
1559
Ben Widawskyebc348b2014-04-29 14:52:28 -07001560 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
Daniel Vetter686cb5f2012-04-11 22:12:52 +02001561
John Harrison5fb9de12015-05-29 17:44:07 +01001562 ret = intel_ring_begin(waiter_req, 4);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001563 if (ret)
1564 return ret;
1565
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02001566 /* If seqno wrap happened, omit the wait with no-ops */
Chris Wilsonc0336662016-05-06 15:40:21 +01001567 if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
Ben Widawskyebc348b2014-04-29 14:52:28 -07001568 intel_ring_emit(waiter, dw1 | wait_mbox);
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02001569 intel_ring_emit(waiter, seqno);
1570 intel_ring_emit(waiter, 0);
1571 intel_ring_emit(waiter, MI_NOOP);
1572 } else {
1573 intel_ring_emit(waiter, MI_NOOP);
1574 intel_ring_emit(waiter, MI_NOOP);
1575 intel_ring_emit(waiter, MI_NOOP);
1576 intel_ring_emit(waiter, MI_NOOP);
1577 }
Ben Widawskyc8c99b02011-09-14 20:32:47 -07001578 intel_ring_advance(waiter);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001579
1580 return 0;
1581}
1582
Chris Wilsonf8973c22016-07-01 17:23:21 +01001583static void
Dave Gordon38a0f2d2016-07-20 18:16:06 +01001584gen5_seqno_barrier(struct intel_engine_cs *engine)
Chris Wilsonc6df5412010-12-15 09:56:50 +00001585{
Chris Wilsonf8973c22016-07-01 17:23:21 +01001586 /* MI_STORE are internally buffered by the GPU and not flushed
1587 * either by MI_FLUSH or SyncFlush or any other combination of
1588 * MI commands.
Chris Wilsonc6df5412010-12-15 09:56:50 +00001589 *
Chris Wilsonf8973c22016-07-01 17:23:21 +01001590 * "Only the submission of the store operation is guaranteed.
1591 * The write result will be complete (coherent) some time later
1592 * (this is practically a finite period but there is no guaranteed
1593 * latency)."
1594 *
1595 * Empirically, we observe that we need a delay of at least 75us to
1596 * be sure that the seqno write is visible by the CPU.
Chris Wilsonc6df5412010-12-15 09:56:50 +00001597 */
Chris Wilsonf8973c22016-07-01 17:23:21 +01001598 usleep_range(125, 250);
Chris Wilsonc6df5412010-12-15 09:56:50 +00001599}
1600
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001601static void
1602gen6_seqno_barrier(struct intel_engine_cs *engine)
Daniel Vetter4cd53c02012-12-14 16:01:25 +01001603{
Chris Wilsonc0336662016-05-06 15:40:21 +01001604 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilsonbcbdb6d2016-04-27 09:02:01 +01001605
Daniel Vetter4cd53c02012-12-14 16:01:25 +01001606 /* Workaround to force correct ordering between irq and seqno writes on
1607 * ivb (and maybe also on snb) by reading from a CS register (like
Chris Wilson9b9ed302016-04-09 10:57:53 +01001608 * ACTHD) before reading the status page.
1609 *
1610 * Note that this effectively stalls the read by the time it takes to
1611 * do a memory transaction, which more or less ensures that the write
1612 * from the GPU has sufficient time to invalidate the CPU cacheline.
1613 * Alternatively we could delay the interrupt from the CS ring to give
1614 * the write time to land, but that would incur a delay after every
1615 * batch i.e. much more frequent than a delay when waiting for the
1616 * interrupt (with the same net latency).
Chris Wilsonbcbdb6d2016-04-27 09:02:01 +01001617 *
1618 * Also note that to prevent whole machine hangs on gen7, we have to
1619 * take the spinlock to guard against concurrent cacheline access.
Chris Wilson9b9ed302016-04-09 10:57:53 +01001620 */
Chris Wilsonbcbdb6d2016-04-27 09:02:01 +01001621 spin_lock_irq(&dev_priv->uncore.lock);
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001622 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
Chris Wilsonbcbdb6d2016-04-27 09:02:01 +01001623 spin_unlock_irq(&dev_priv->uncore.lock);
Daniel Vetter4cd53c02012-12-14 16:01:25 +01001624}
1625
Chris Wilson31bb59c2016-07-01 17:23:27 +01001626static void
1627gen5_irq_enable(struct intel_engine_cs *engine)
Daniel Vettere48d8632012-04-11 22:12:54 +02001628{
Chris Wilson31bb59c2016-07-01 17:23:27 +01001629 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
Daniel Vettere48d8632012-04-11 22:12:54 +02001630}
1631
1632static void
Chris Wilson31bb59c2016-07-01 17:23:27 +01001633gen5_irq_disable(struct intel_engine_cs *engine)
Daniel Vettere48d8632012-04-11 22:12:54 +02001634{
Chris Wilson31bb59c2016-07-01 17:23:27 +01001635 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001636}
1637
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001638static void
Chris Wilson31bb59c2016-07-01 17:23:27 +01001639i9xx_irq_enable(struct intel_engine_cs *engine)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001640{
Chris Wilsonc0336662016-05-06 15:40:21 +01001641 struct drm_i915_private *dev_priv = engine->i915;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001642
Chris Wilson31bb59c2016-07-01 17:23:27 +01001643 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1644 I915_WRITE(IMR, dev_priv->irq_mask);
1645 POSTING_READ_FW(RING_IMR(engine->mmio_base));
Chris Wilsonc2798b12012-04-22 21:13:57 +01001646}
1647
1648static void
Chris Wilson31bb59c2016-07-01 17:23:27 +01001649i9xx_irq_disable(struct intel_engine_cs *engine)
Chris Wilsonc2798b12012-04-22 21:13:57 +01001650{
Chris Wilsonc0336662016-05-06 15:40:21 +01001651 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilsonc2798b12012-04-22 21:13:57 +01001652
Chris Wilson31bb59c2016-07-01 17:23:27 +01001653 dev_priv->irq_mask |= engine->irq_enable_mask;
1654 I915_WRITE(IMR, dev_priv->irq_mask);
1655}
1656
1657static void
1658i8xx_irq_enable(struct intel_engine_cs *engine)
1659{
1660 struct drm_i915_private *dev_priv = engine->i915;
1661
1662 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1663 I915_WRITE16(IMR, dev_priv->irq_mask);
1664 POSTING_READ16(RING_IMR(engine->mmio_base));
1665}
1666
1667static void
1668i8xx_irq_disable(struct intel_engine_cs *engine)
1669{
1670 struct drm_i915_private *dev_priv = engine->i915;
1671
1672 dev_priv->irq_mask |= engine->irq_enable_mask;
1673 I915_WRITE16(IMR, dev_priv->irq_mask);
Chris Wilsonc2798b12012-04-22 21:13:57 +01001674}
1675
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001676static int
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001677bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Zou Nan haid1b851f2010-05-21 09:08:57 +08001678{
Chris Wilson7e37f882016-08-02 22:50:21 +01001679 struct intel_ring *ring = req->ring;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001680 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001681
John Harrison5fb9de12015-05-29 17:44:07 +01001682 ret = intel_ring_begin(req, 2);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001683 if (ret)
1684 return ret;
1685
Chris Wilsonb5321f32016-08-02 22:50:18 +01001686 intel_ring_emit(ring, MI_FLUSH);
1687 intel_ring_emit(ring, MI_NOOP);
1688 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001689 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +08001690}
1691
Chris Wilsonddd66c52016-08-02 22:50:31 +01001692static int i9xx_emit_request(struct drm_i915_gem_request *req)
Zou Nan haid1b851f2010-05-21 09:08:57 +08001693{
Chris Wilson7e37f882016-08-02 22:50:21 +01001694 struct intel_ring *ring = req->ring;
Chris Wilson3cce4692010-10-27 16:11:02 +01001695 int ret;
1696
John Harrison5fb9de12015-05-29 17:44:07 +01001697 ret = intel_ring_begin(req, 4);
Chris Wilson3cce4692010-10-27 16:11:02 +01001698 if (ret)
1699 return ret;
Chris Wilson6f392d5482010-08-07 11:01:22 +01001700
Chris Wilsonb5321f32016-08-02 22:50:18 +01001701 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1702 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1703 intel_ring_emit(ring, req->fence.seqno);
1704 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01001705 intel_ring_advance(ring);
1706
1707 req->tail = ring->tail;
Zou Nan haid1b851f2010-05-21 09:08:57 +08001708
Chris Wilson3cce4692010-10-27 16:11:02 +01001709 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +08001710}
1711
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01001712static void i9xx_submit_request(struct drm_i915_gem_request *request)
1713{
1714 struct drm_i915_private *dev_priv = request->i915;
1715
Chris Wilson8f942012016-08-02 22:50:30 +01001716 I915_WRITE_TAIL(request->engine,
1717 intel_ring_offset(request->ring, request->tail));
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01001718}
1719
Chris Wilson0f468322011-01-04 17:35:21 +00001720static void
Chris Wilson31bb59c2016-07-01 17:23:27 +01001721gen6_irq_enable(struct intel_engine_cs *engine)
Chris Wilson0f468322011-01-04 17:35:21 +00001722{
Chris Wilsonc0336662016-05-06 15:40:21 +01001723 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson0f468322011-01-04 17:35:21 +00001724
Chris Wilson61ff75a2016-07-01 17:23:28 +01001725 I915_WRITE_IMR(engine,
1726 ~(engine->irq_enable_mask |
1727 engine->irq_keep_mask));
Chris Wilson31bb59c2016-07-01 17:23:27 +01001728 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
Ben Widawskya19d2932013-05-28 19:22:30 -07001729}
1730
1731static void
Chris Wilson31bb59c2016-07-01 17:23:27 +01001732gen6_irq_disable(struct intel_engine_cs *engine)
Ben Widawskya19d2932013-05-28 19:22:30 -07001733{
Chris Wilsonc0336662016-05-06 15:40:21 +01001734 struct drm_i915_private *dev_priv = engine->i915;
Ben Widawskya19d2932013-05-28 19:22:30 -07001735
Chris Wilson61ff75a2016-07-01 17:23:28 +01001736 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
Chris Wilson31bb59c2016-07-01 17:23:27 +01001737 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001738}
1739
1740static void
Chris Wilson31bb59c2016-07-01 17:23:27 +01001741hsw_vebox_irq_enable(struct intel_engine_cs *engine)
Ben Widawskyabd58f02013-11-02 21:07:09 -07001742{
Chris Wilsonc0336662016-05-06 15:40:21 +01001743 struct drm_i915_private *dev_priv = engine->i915;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001744
Chris Wilson31bb59c2016-07-01 17:23:27 +01001745 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1746 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
1747}
1748
1749static void
1750hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1751{
1752 struct drm_i915_private *dev_priv = engine->i915;
1753
1754 I915_WRITE_IMR(engine, ~0);
1755 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
1756}
1757
1758static void
1759gen8_irq_enable(struct intel_engine_cs *engine)
1760{
1761 struct drm_i915_private *dev_priv = engine->i915;
1762
Chris Wilson61ff75a2016-07-01 17:23:28 +01001763 I915_WRITE_IMR(engine,
1764 ~(engine->irq_enable_mask |
1765 engine->irq_keep_mask));
Chris Wilson31bb59c2016-07-01 17:23:27 +01001766 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1767}
1768
1769static void
1770gen8_irq_disable(struct intel_engine_cs *engine)
1771{
1772 struct drm_i915_private *dev_priv = engine->i915;
1773
Chris Wilson61ff75a2016-07-01 17:23:28 +01001774 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001775}
1776
Zou Nan haid1b851f2010-05-21 09:08:57 +08001777static int
Chris Wilson803688b2016-08-02 22:50:27 +01001778i965_emit_bb_start(struct drm_i915_gem_request *req,
1779 u64 offset, u32 length,
1780 unsigned int dispatch_flags)
Zou Nan haid1b851f2010-05-21 09:08:57 +08001781{
Chris Wilson7e37f882016-08-02 22:50:21 +01001782 struct intel_ring *ring = req->ring;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001783 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +01001784
John Harrison5fb9de12015-05-29 17:44:07 +01001785 ret = intel_ring_begin(req, 2);
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001786 if (ret)
1787 return ret;
1788
Chris Wilsonb5321f32016-08-02 22:50:18 +01001789 intel_ring_emit(ring,
Chris Wilson65f56872012-04-17 16:38:12 +01001790 MI_BATCH_BUFFER_START |
1791 MI_BATCH_GTT |
John Harrison8e004ef2015-02-13 11:48:10 +00001792 (dispatch_flags & I915_DISPATCH_SECURE ?
1793 0 : MI_BATCH_NON_SECURE_I965));
Chris Wilsonb5321f32016-08-02 22:50:18 +01001794 intel_ring_emit(ring, offset);
1795 intel_ring_advance(ring);
Chris Wilson78501ea2010-10-27 12:18:21 +01001796
Zou Nan haid1b851f2010-05-21 09:08:57 +08001797 return 0;
1798}
1799
Daniel Vetterb45305f2012-12-17 16:21:27 +01001800/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1801#define I830_BATCH_LIMIT (256*1024)
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001802#define I830_TLB_ENTRIES (2)
1803#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001804static int
Chris Wilson803688b2016-08-02 22:50:27 +01001805i830_emit_bb_start(struct drm_i915_gem_request *req,
1806 u64 offset, u32 len,
1807 unsigned int dispatch_flags)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001808{
Chris Wilson7e37f882016-08-02 22:50:21 +01001809 struct intel_ring *ring = req->ring;
Chris Wilsonb5321f32016-08-02 22:50:18 +01001810 u32 cs_offset = req->engine->scratch.gtt_offset;
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001811 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001812
John Harrison5fb9de12015-05-29 17:44:07 +01001813 ret = intel_ring_begin(req, 6);
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001814 if (ret)
1815 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001816
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001817 /* Evict the invalid PTE TLBs */
Chris Wilsonb5321f32016-08-02 22:50:18 +01001818 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1819 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1820 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1821 intel_ring_emit(ring, cs_offset);
1822 intel_ring_emit(ring, 0xdeadbeef);
1823 intel_ring_emit(ring, MI_NOOP);
1824 intel_ring_advance(ring);
Daniel Vetterb45305f2012-12-17 16:21:27 +01001825
John Harrison8e004ef2015-02-13 11:48:10 +00001826 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
Daniel Vetterb45305f2012-12-17 16:21:27 +01001827 if (len > I830_BATCH_LIMIT)
1828 return -ENOSPC;
1829
John Harrison5fb9de12015-05-29 17:44:07 +01001830 ret = intel_ring_begin(req, 6 + 2);
Daniel Vetterb45305f2012-12-17 16:21:27 +01001831 if (ret)
1832 return ret;
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001833
1834 /* Blit the batch (which has now all relocs applied) to the
1835 * stable batch scratch bo area (so that the CS never
1836 * stumbles over its tlb invalidation bug) ...
1837 */
Chris Wilsonb5321f32016-08-02 22:50:18 +01001838 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1839 intel_ring_emit(ring,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001840 BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
Chris Wilsonb5321f32016-08-02 22:50:18 +01001841 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
1842 intel_ring_emit(ring, cs_offset);
1843 intel_ring_emit(ring, 4096);
1844 intel_ring_emit(ring, offset);
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001845
Chris Wilsonb5321f32016-08-02 22:50:18 +01001846 intel_ring_emit(ring, MI_FLUSH);
1847 intel_ring_emit(ring, MI_NOOP);
1848 intel_ring_advance(ring);
Daniel Vetterb45305f2012-12-17 16:21:27 +01001849
1850 /* ... and execute it. */
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001851 offset = cs_offset;
Daniel Vetterb45305f2012-12-17 16:21:27 +01001852 }
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001853
Ville Syrjälä9d611c02015-12-14 18:23:49 +02001854 ret = intel_ring_begin(req, 2);
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001855 if (ret)
1856 return ret;
1857
Chris Wilsonb5321f32016-08-02 22:50:18 +01001858 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1859 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1860 0 : MI_BATCH_NON_SECURE));
1861 intel_ring_advance(ring);
Chris Wilsonc4d69da2014-09-08 14:25:41 +01001862
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001863 return 0;
1864}
1865
1866static int
Chris Wilson803688b2016-08-02 22:50:27 +01001867i915_emit_bb_start(struct drm_i915_gem_request *req,
1868 u64 offset, u32 len,
1869 unsigned int dispatch_flags)
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001870{
Chris Wilson7e37f882016-08-02 22:50:21 +01001871 struct intel_ring *ring = req->ring;
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001872 int ret;
1873
John Harrison5fb9de12015-05-29 17:44:07 +01001874 ret = intel_ring_begin(req, 2);
Daniel Vetterfb3256d2012-04-11 22:12:56 +02001875 if (ret)
1876 return ret;
1877
Chris Wilsonb5321f32016-08-02 22:50:18 +01001878 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1879 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1880 0 : MI_BATCH_NON_SECURE));
1881 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001882
Eric Anholt62fdfea2010-05-21 13:26:39 -07001883 return 0;
1884}
1885
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001886static void cleanup_phys_status_page(struct intel_engine_cs *engine)
Ville Syrjälä7d3fdff2016-01-11 20:48:32 +02001887{
Chris Wilsonc0336662016-05-06 15:40:21 +01001888 struct drm_i915_private *dev_priv = engine->i915;
Ville Syrjälä7d3fdff2016-01-11 20:48:32 +02001889
1890 if (!dev_priv->status_page_dmah)
1891 return;
1892
Chris Wilson91c8a322016-07-05 10:40:23 +01001893 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001894 engine->status_page.page_addr = NULL;
Ville Syrjälä7d3fdff2016-01-11 20:48:32 +02001895}
1896
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001897static void cleanup_status_page(struct intel_engine_cs *engine)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001898{
Chris Wilson05394f32010-11-08 19:18:58 +00001899 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001900
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001901 obj = engine->status_page.obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001902 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001903 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001904
Chris Wilson9da3da62012-06-01 15:20:22 +01001905 kunmap(sg_page(obj->pages->sgl));
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08001906 i915_gem_object_ggtt_unpin(obj);
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001907 i915_gem_object_put(obj);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001908 engine->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001909}
1910
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001911static int init_status_page(struct intel_engine_cs *engine)
Eric Anholt62fdfea2010-05-21 13:26:39 -07001912{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001913 struct drm_i915_gem_object *obj = engine->status_page.obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001914
Ville Syrjälä7d3fdff2016-01-11 20:48:32 +02001915 if (obj == NULL) {
Chris Wilson1f767e02014-07-03 17:33:03 -04001916 unsigned flags;
Chris Wilsone3efda42014-04-09 09:19:41 +01001917 int ret;
1918
Chris Wilson91c8a322016-07-05 10:40:23 +01001919 obj = i915_gem_object_create(&engine->i915->drm, 4096);
Chris Wilsonfe3db792016-04-25 13:32:13 +01001920 if (IS_ERR(obj)) {
Chris Wilsone3efda42014-04-09 09:19:41 +01001921 DRM_ERROR("Failed to allocate status page\n");
Chris Wilsonfe3db792016-04-25 13:32:13 +01001922 return PTR_ERR(obj);
Chris Wilsone3efda42014-04-09 09:19:41 +01001923 }
1924
1925 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1926 if (ret)
1927 goto err_unref;
1928
Chris Wilson1f767e02014-07-03 17:33:03 -04001929 flags = 0;
Chris Wilsonc0336662016-05-06 15:40:21 +01001930 if (!HAS_LLC(engine->i915))
Chris Wilson1f767e02014-07-03 17:33:03 -04001931 /* On g33, we cannot place HWS above 256MiB, so
1932 * restrict its pinning to the low mappable arena.
1933 * Though this restriction is not documented for
1934 * gen4, gen5, or byt, they also behave similarly
1935 * and hang if the HWS is placed at the top of the
1936 * GTT. To generalise, it appears that all !llc
1937 * platforms have issues with us placing the HWS
1938 * above the mappable region (even though we never
1939 * actualy map it).
1940 */
1941 flags |= PIN_MAPPABLE;
1942 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
Chris Wilsone3efda42014-04-09 09:19:41 +01001943 if (ret) {
1944err_unref:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001945 i915_gem_object_put(obj);
Chris Wilsone3efda42014-04-09 09:19:41 +01001946 return ret;
1947 }
1948
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001949 engine->status_page.obj = obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001950 }
Chris Wilsone4ffd172011-04-04 09:44:39 +01001951
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001952 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1953 engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1954 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001955
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001956 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001957 engine->name, engine->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -07001958
1959 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -07001960}
1961
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001962static int init_phys_status_page(struct intel_engine_cs *engine)
Chris Wilson6b8294a2012-11-16 11:43:20 +00001963{
Chris Wilsonc0336662016-05-06 15:40:21 +01001964 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson6b8294a2012-11-16 11:43:20 +00001965
1966 if (!dev_priv->status_page_dmah) {
1967 dev_priv->status_page_dmah =
Chris Wilson91c8a322016-07-05 10:40:23 +01001968 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
Chris Wilson6b8294a2012-11-16 11:43:20 +00001969 if (!dev_priv->status_page_dmah)
1970 return -ENOMEM;
1971 }
1972
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001973 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1974 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
Chris Wilson6b8294a2012-11-16 11:43:20 +00001975
1976 return 0;
1977}
1978
Chris Wilsonaad29fb2016-08-02 22:50:23 +01001979int intel_ring_pin(struct intel_ring *ring)
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001980{
Chris Wilsonaad29fb2016-08-02 22:50:23 +01001981 struct drm_i915_private *dev_priv = ring->engine->i915;
Chris Wilson32c04f12016-08-02 22:50:22 +01001982 struct drm_i915_gem_object *obj = ring->obj;
Chris Wilsona687a432016-04-13 17:35:11 +01001983 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1984 unsigned flags = PIN_OFFSET_BIAS | 4096;
Dave Gordon83052162016-04-12 14:46:16 +01001985 void *addr;
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001986 int ret;
1987
Chris Wilsondef0c5f2015-10-08 13:39:54 +01001988 if (HAS_LLC(dev_priv) && !obj->stolen) {
Chris Wilsona687a432016-04-13 17:35:11 +01001989 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
Chris Wilsondef0c5f2015-10-08 13:39:54 +01001990 if (ret)
1991 return ret;
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001992
Chris Wilsondef0c5f2015-10-08 13:39:54 +01001993 ret = i915_gem_object_set_to_cpu_domain(obj, true);
Chris Wilsond2cad532016-04-08 12:11:10 +01001994 if (ret)
1995 goto err_unpin;
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001996
Dave Gordon83052162016-04-12 14:46:16 +01001997 addr = i915_gem_object_pin_map(obj);
1998 if (IS_ERR(addr)) {
1999 ret = PTR_ERR(addr);
Chris Wilsond2cad532016-04-08 12:11:10 +01002000 goto err_unpin;
Chris Wilsondef0c5f2015-10-08 13:39:54 +01002001 }
2002 } else {
Chris Wilsona687a432016-04-13 17:35:11 +01002003 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
2004 flags | PIN_MAPPABLE);
Chris Wilsondef0c5f2015-10-08 13:39:54 +01002005 if (ret)
2006 return ret;
2007
2008 ret = i915_gem_object_set_to_gtt_domain(obj, true);
Chris Wilsond2cad532016-04-08 12:11:10 +01002009 if (ret)
2010 goto err_unpin;
Chris Wilsondef0c5f2015-10-08 13:39:54 +01002011
Daniele Ceraolo Spurioff3dc082016-01-27 15:43:49 +00002012 /* Access through the GTT requires the device to be awake. */
2013 assert_rpm_wakelock_held(dev_priv);
2014
Chris Wilson406ea8d2016-07-20 13:31:55 +01002015 addr = (void __force *)
2016 i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
Chris Wilson3d77e9b2016-04-28 09:56:40 +01002017 if (IS_ERR(addr)) {
2018 ret = PTR_ERR(addr);
Chris Wilsond2cad532016-04-08 12:11:10 +01002019 goto err_unpin;
Chris Wilsondef0c5f2015-10-08 13:39:54 +01002020 }
Thomas Daniel7ba717c2014-11-13 10:28:56 +00002021 }
2022
Chris Wilson32c04f12016-08-02 22:50:22 +01002023 ring->vaddr = addr;
2024 ring->vma = i915_gem_obj_to_ggtt(obj);
Thomas Daniel7ba717c2014-11-13 10:28:56 +00002025 return 0;
Chris Wilsond2cad532016-04-08 12:11:10 +01002026
2027err_unpin:
2028 i915_gem_object_ggtt_unpin(obj);
2029 return ret;
Thomas Daniel7ba717c2014-11-13 10:28:56 +00002030}
2031
Chris Wilsonaad29fb2016-08-02 22:50:23 +01002032void intel_ring_unpin(struct intel_ring *ring)
2033{
2034 GEM_BUG_ON(!ring->vma);
2035 GEM_BUG_ON(!ring->vaddr);
2036
2037 if (HAS_LLC(ring->engine->i915) && !ring->obj->stolen)
2038 i915_gem_object_unpin_map(ring->obj);
2039 else
2040 i915_vma_unpin_iomap(ring->vma);
2041 ring->vaddr = NULL;
2042
2043 i915_gem_object_ggtt_unpin(ring->obj);
2044 ring->vma = NULL;
2045}
2046
Chris Wilson32c04f12016-08-02 22:50:22 +01002047static void intel_destroy_ringbuffer_obj(struct intel_ring *ring)
Chris Wilsone3efda42014-04-09 09:19:41 +01002048{
Chris Wilson32c04f12016-08-02 22:50:22 +01002049 i915_gem_object_put(ring->obj);
2050 ring->obj = NULL;
Oscar Mateo2919d292014-07-03 16:28:02 +01002051}
2052
Chris Wilson01101fa2015-09-03 13:01:39 +01002053static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
Chris Wilson32c04f12016-08-02 22:50:22 +01002054 struct intel_ring *ring)
Oscar Mateo2919d292014-07-03 16:28:02 +01002055{
Chris Wilsone3efda42014-04-09 09:19:41 +01002056 struct drm_i915_gem_object *obj;
Chris Wilsone3efda42014-04-09 09:19:41 +01002057
2058 obj = NULL;
2059 if (!HAS_LLC(dev))
Chris Wilson32c04f12016-08-02 22:50:22 +01002060 obj = i915_gem_object_create_stolen(dev, ring->size);
Chris Wilsone3efda42014-04-09 09:19:41 +01002061 if (obj == NULL)
Chris Wilson32c04f12016-08-02 22:50:22 +01002062 obj = i915_gem_object_create(dev, ring->size);
Chris Wilsonfe3db792016-04-25 13:32:13 +01002063 if (IS_ERR(obj))
2064 return PTR_ERR(obj);
Chris Wilsone3efda42014-04-09 09:19:41 +01002065
Akash Goel24f3a8c2014-06-17 10:59:42 +05302066 /* mark ring buffers as read-only from GPU side by default */
2067 obj->gt_ro = 1;
2068
Chris Wilson32c04f12016-08-02 22:50:22 +01002069 ring->obj = obj;
Chris Wilsone3efda42014-04-09 09:19:41 +01002070
Thomas Daniel7ba717c2014-11-13 10:28:56 +00002071 return 0;
Chris Wilsone3efda42014-04-09 09:19:41 +01002072}
2073
Chris Wilson7e37f882016-08-02 22:50:21 +01002074struct intel_ring *
2075intel_engine_create_ring(struct intel_engine_cs *engine, int size)
Chris Wilson01101fa2015-09-03 13:01:39 +01002076{
Chris Wilson7e37f882016-08-02 22:50:21 +01002077 struct intel_ring *ring;
Chris Wilson01101fa2015-09-03 13:01:39 +01002078 int ret;
2079
Chris Wilson8f942012016-08-02 22:50:30 +01002080 GEM_BUG_ON(!is_power_of_2(size));
2081
Chris Wilson01101fa2015-09-03 13:01:39 +01002082 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
Chris Wilson608c1a52015-09-03 13:01:40 +01002083 if (ring == NULL) {
2084 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
2085 engine->name);
Chris Wilson01101fa2015-09-03 13:01:39 +01002086 return ERR_PTR(-ENOMEM);
Chris Wilson608c1a52015-09-03 13:01:40 +01002087 }
Chris Wilson01101fa2015-09-03 13:01:39 +01002088
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002089 ring->engine = engine;
Chris Wilson608c1a52015-09-03 13:01:40 +01002090 list_add(&ring->link, &engine->buffers);
Chris Wilson01101fa2015-09-03 13:01:39 +01002091
2092 ring->size = size;
2093 /* Workaround an erratum on the i830 which causes a hang if
2094 * the TAIL pointer points to within the last 2 cachelines
2095 * of the buffer.
2096 */
2097 ring->effective_size = size;
Chris Wilsonc0336662016-05-06 15:40:21 +01002098 if (IS_I830(engine->i915) || IS_845G(engine->i915))
Chris Wilson01101fa2015-09-03 13:01:39 +01002099 ring->effective_size -= 2 * CACHELINE_BYTES;
2100
2101 ring->last_retired_head = -1;
2102 intel_ring_update_space(ring);
2103
Chris Wilson91c8a322016-07-05 10:40:23 +01002104 ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
Chris Wilson01101fa2015-09-03 13:01:39 +01002105 if (ret) {
Chris Wilson608c1a52015-09-03 13:01:40 +01002106 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
2107 engine->name, ret);
2108 list_del(&ring->link);
Chris Wilson01101fa2015-09-03 13:01:39 +01002109 kfree(ring);
2110 return ERR_PTR(ret);
2111 }
2112
2113 return ring;
2114}
2115
2116void
Chris Wilson7e37f882016-08-02 22:50:21 +01002117intel_ring_free(struct intel_ring *ring)
Chris Wilson01101fa2015-09-03 13:01:39 +01002118{
2119 intel_destroy_ringbuffer_obj(ring);
Chris Wilson608c1a52015-09-03 13:01:40 +01002120 list_del(&ring->link);
Chris Wilson01101fa2015-09-03 13:01:39 +01002121 kfree(ring);
2122}
2123
Chris Wilson0cb26a82016-06-24 14:55:53 +01002124static int intel_ring_context_pin(struct i915_gem_context *ctx,
2125 struct intel_engine_cs *engine)
2126{
2127 struct intel_context *ce = &ctx->engine[engine->id];
2128 int ret;
2129
Chris Wilson91c8a322016-07-05 10:40:23 +01002130 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
Chris Wilson0cb26a82016-06-24 14:55:53 +01002131
2132 if (ce->pin_count++)
2133 return 0;
2134
2135 if (ce->state) {
2136 ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
2137 if (ret)
2138 goto error;
2139 }
2140
Chris Wilsonc7c3c072016-06-24 14:55:54 +01002141 /* The kernel context is only used as a placeholder for flushing the
2142 * active context. It is never used for submitting user rendering and
2143 * as such never requires the golden render context, and so we can skip
2144 * emitting it when we switch to the kernel context. This is required
2145 * as during eviction we cannot allocate and pin the renderstate in
2146 * order to initialise the context.
2147 */
2148 if (ctx == ctx->i915->kernel_context)
2149 ce->initialised = true;
2150
Chris Wilson9a6feaf2016-07-20 13:31:50 +01002151 i915_gem_context_get(ctx);
Chris Wilson0cb26a82016-06-24 14:55:53 +01002152 return 0;
2153
2154error:
2155 ce->pin_count = 0;
2156 return ret;
2157}
2158
2159static void intel_ring_context_unpin(struct i915_gem_context *ctx,
2160 struct intel_engine_cs *engine)
2161{
2162 struct intel_context *ce = &ctx->engine[engine->id];
2163
Chris Wilson91c8a322016-07-05 10:40:23 +01002164 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
Chris Wilson0cb26a82016-06-24 14:55:53 +01002165
2166 if (--ce->pin_count)
2167 return;
2168
2169 if (ce->state)
2170 i915_gem_object_ggtt_unpin(ce->state);
2171
Chris Wilson9a6feaf2016-07-20 13:31:50 +01002172 i915_gem_context_put(ctx);
Chris Wilson0cb26a82016-06-24 14:55:53 +01002173}
2174
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002175static int intel_init_ring_buffer(struct intel_engine_cs *engine)
Eric Anholt62fdfea2010-05-21 13:26:39 -07002176{
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002177 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson32c04f12016-08-02 22:50:22 +01002178 struct intel_ring *ring;
Chris Wilsondd785e32010-08-07 11:01:34 +01002179 int ret;
2180
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002181 WARN_ON(engine->buffer);
Daniel Vetterbfc882b2014-11-20 00:33:08 +01002182
Tvrtko Ursulin019bf272016-07-13 16:03:41 +01002183 intel_engine_setup_common(engine);
2184
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002185 memset(engine->semaphore.sync_seqno, 0,
2186 sizeof(engine->semaphore.sync_seqno));
Chris Wilson0dc79fb2011-01-05 10:32:24 +00002187
Tvrtko Ursulin019bf272016-07-13 16:03:41 +01002188 ret = intel_engine_init_common(engine);
Chris Wilson688e6c72016-07-01 17:23:15 +01002189 if (ret)
2190 goto error;
Eric Anholt62fdfea2010-05-21 13:26:39 -07002191
Chris Wilson0cb26a82016-06-24 14:55:53 +01002192 /* We may need to do things with the shrinker which
2193 * require us to immediately switch back to the default
2194 * context. This can cause a problem as pinning the
2195 * default context also requires GTT space which may not
2196 * be available. To avoid this we always pin the default
2197 * context.
2198 */
2199 ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
2200 if (ret)
2201 goto error;
2202
Chris Wilson32c04f12016-08-02 22:50:22 +01002203 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
2204 if (IS_ERR(ring)) {
2205 ret = PTR_ERR(ring);
Dave Gordonb0366a52015-12-08 15:02:36 +00002206 goto error;
2207 }
Chris Wilson32c04f12016-08-02 22:50:22 +01002208 engine->buffer = ring;
Chris Wilson01101fa2015-09-03 13:01:39 +01002209
Chris Wilsonc0336662016-05-06 15:40:21 +01002210 if (I915_NEED_GFX_HWS(dev_priv)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002211 ret = init_status_page(engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002212 if (ret)
Oscar Mateo8ee14972014-05-22 14:13:34 +01002213 goto error;
Chris Wilson6b8294a2012-11-16 11:43:20 +00002214 } else {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002215 WARN_ON(engine->id != RCS);
2216 ret = init_phys_status_page(engine);
Chris Wilson6b8294a2012-11-16 11:43:20 +00002217 if (ret)
Oscar Mateo8ee14972014-05-22 14:13:34 +01002218 goto error;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002219 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07002220
Chris Wilsonaad29fb2016-08-02 22:50:23 +01002221 ret = intel_ring_pin(ring);
Daniel Vetterbfc882b2014-11-20 00:33:08 +01002222 if (ret) {
2223 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002224 engine->name, ret);
Chris Wilson32c04f12016-08-02 22:50:22 +01002225 intel_destroy_ringbuffer_obj(ring);
Daniel Vetterbfc882b2014-11-20 00:33:08 +01002226 goto error;
Eric Anholt62fdfea2010-05-21 13:26:39 -07002227 }
Eric Anholt62fdfea2010-05-21 13:26:39 -07002228
Oscar Mateo8ee14972014-05-22 14:13:34 +01002229 return 0;
2230
2231error:
Chris Wilson7e37f882016-08-02 22:50:21 +01002232 intel_engine_cleanup(engine);
Oscar Mateo8ee14972014-05-22 14:13:34 +01002233 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -07002234}
2235
Chris Wilson7e37f882016-08-02 22:50:21 +01002236void intel_engine_cleanup(struct intel_engine_cs *engine)
Eric Anholt62fdfea2010-05-21 13:26:39 -07002237{
John Harrison6402c332014-10-31 12:00:26 +00002238 struct drm_i915_private *dev_priv;
Chris Wilson33626e62010-10-29 16:18:36 +01002239
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00002240 if (!intel_engine_initialized(engine))
Eric Anholt62fdfea2010-05-21 13:26:39 -07002241 return;
2242
Chris Wilsonc0336662016-05-06 15:40:21 +01002243 dev_priv = engine->i915;
John Harrison6402c332014-10-31 12:00:26 +00002244
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002245 if (engine->buffer) {
Chris Wilson7e37f882016-08-02 22:50:21 +01002246 intel_engine_stop(engine);
Chris Wilsonc0336662016-05-06 15:40:21 +01002247 WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
Chris Wilson33626e62010-10-29 16:18:36 +01002248
Chris Wilsonaad29fb2016-08-02 22:50:23 +01002249 intel_ring_unpin(engine->buffer);
Chris Wilson7e37f882016-08-02 22:50:21 +01002250 intel_ring_free(engine->buffer);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002251 engine->buffer = NULL;
Dave Gordonb0366a52015-12-08 15:02:36 +00002252 }
Chris Wilson78501ea2010-10-27 12:18:21 +01002253
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002254 if (engine->cleanup)
2255 engine->cleanup(engine);
Zou Nan hai8d192152010-11-02 16:31:01 +08002256
Chris Wilsonc0336662016-05-06 15:40:21 +01002257 if (I915_NEED_GFX_HWS(dev_priv)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002258 cleanup_status_page(engine);
Ville Syrjälä7d3fdff2016-01-11 20:48:32 +02002259 } else {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002260 WARN_ON(engine->id != RCS);
2261 cleanup_phys_status_page(engine);
Ville Syrjälä7d3fdff2016-01-11 20:48:32 +02002262 }
Brad Volkin44e895a2014-05-10 14:10:43 -07002263
Chris Wilson33a051a2016-07-27 09:07:26 +01002264 intel_engine_cleanup_cmd_parser(engine);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002265 i915_gem_batch_pool_fini(&engine->batch_pool);
Chris Wilson688e6c72016-07-01 17:23:15 +01002266 intel_engine_fini_breadcrumbs(engine);
Chris Wilson0cb26a82016-06-24 14:55:53 +01002267
2268 intel_ring_context_unpin(dev_priv->kernel_context, engine);
2269
Chris Wilsonc0336662016-05-06 15:40:21 +01002270 engine->i915 = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -07002271}
2272
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00002273int intel_engine_idle(struct intel_engine_cs *engine)
Chris Wilson3e960502012-11-27 16:22:54 +00002274{
Daniel Vettera4b3a572014-11-26 14:17:05 +01002275 struct drm_i915_gem_request *req;
Chris Wilson3e960502012-11-27 16:22:54 +00002276
Chris Wilson3e960502012-11-27 16:22:54 +00002277 /* Wait upon the last request to be completed */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002278 if (list_empty(&engine->request_list))
Chris Wilson3e960502012-11-27 16:22:54 +00002279 return 0;
2280
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002281 req = list_entry(engine->request_list.prev,
2282 struct drm_i915_gem_request,
2283 list);
Chris Wilson3e960502012-11-27 16:22:54 +00002284
Chris Wilsonb4716182015-04-27 13:41:17 +01002285 /* Make sure we do not trigger any retires */
2286 return __i915_wait_request(req,
Chris Wilsonc19ae982016-04-13 17:35:03 +01002287 req->i915->mm.interruptible,
Chris Wilsonb4716182015-04-27 13:41:17 +01002288 NULL, NULL);
Chris Wilson3e960502012-11-27 16:22:54 +00002289}
2290
John Harrison6689cb22015-03-19 12:30:08 +00002291int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
Chris Wilson9d7730912012-11-27 16:22:52 +00002292{
Chris Wilson63103462016-04-28 09:56:49 +01002293 int ret;
2294
2295 /* Flush enough space to reduce the likelihood of waiting after
2296 * we start building the request - in which case we will just
2297 * have to repeat work.
2298 */
Chris Wilsona0442462016-04-29 09:07:05 +01002299 request->reserved_space += LEGACY_REQUEST_SIZE;
Chris Wilson63103462016-04-28 09:56:49 +01002300
Chris Wilson1dae2df2016-08-02 22:50:19 +01002301 request->ring = request->engine->buffer;
Chris Wilson63103462016-04-28 09:56:49 +01002302
2303 ret = intel_ring_begin(request, 0);
2304 if (ret)
2305 return ret;
2306
Chris Wilsona0442462016-04-29 09:07:05 +01002307 request->reserved_space -= LEGACY_REQUEST_SIZE;
Chris Wilson63103462016-04-28 09:56:49 +01002308 return 0;
Chris Wilson9d7730912012-11-27 16:22:52 +00002309}
2310
Chris Wilson987046a2016-04-28 09:56:46 +01002311static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
Mika Kuoppalacbcc80d2012-12-04 15:12:03 +02002312{
Chris Wilson7e37f882016-08-02 22:50:21 +01002313 struct intel_ring *ring = req->ring;
Chris Wilson987046a2016-04-28 09:56:46 +01002314 struct intel_engine_cs *engine = req->engine;
2315 struct drm_i915_gem_request *target;
2316
Chris Wilson1dae2df2016-08-02 22:50:19 +01002317 intel_ring_update_space(ring);
2318 if (ring->space >= bytes)
Chris Wilson987046a2016-04-28 09:56:46 +01002319 return 0;
2320
2321 /*
2322 * Space is reserved in the ringbuffer for finalising the request,
2323 * as that cannot be allowed to fail. During request finalisation,
2324 * reserved_space is set to 0 to stop the overallocation and the
2325 * assumption is that then we never need to wait (which has the
2326 * risk of failing with EINTR).
2327 *
2328 * See also i915_gem_request_alloc() and i915_add_request().
2329 */
Chris Wilson0251a962016-04-28 09:56:47 +01002330 GEM_BUG_ON(!req->reserved_space);
Chris Wilson987046a2016-04-28 09:56:46 +01002331
2332 list_for_each_entry(target, &engine->request_list, list) {
2333 unsigned space;
2334
2335 /*
2336 * The request queue is per-engine, so can contain requests
2337 * from multiple ringbuffers. Here, we must ignore any that
2338 * aren't from the ringbuffer we're considering.
2339 */
Chris Wilson1dae2df2016-08-02 22:50:19 +01002340 if (target->ring != ring)
Chris Wilson987046a2016-04-28 09:56:46 +01002341 continue;
2342
2343 /* Would completion of this request free enough space? */
Chris Wilson1dae2df2016-08-02 22:50:19 +01002344 space = __intel_ring_space(target->postfix, ring->tail,
2345 ring->size);
Chris Wilson987046a2016-04-28 09:56:46 +01002346 if (space >= bytes)
2347 break;
2348 }
2349
2350 if (WARN_ON(&target->list == &engine->request_list))
2351 return -ENOSPC;
2352
2353 return i915_wait_request(target);
2354}
2355
2356int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2357{
Chris Wilson7e37f882016-08-02 22:50:21 +01002358 struct intel_ring *ring = req->ring;
Chris Wilson1dae2df2016-08-02 22:50:19 +01002359 int remain_actual = ring->size - ring->tail;
2360 int remain_usable = ring->effective_size - ring->tail;
Chris Wilson987046a2016-04-28 09:56:46 +01002361 int bytes = num_dwords * sizeof(u32);
2362 int total_bytes, wait_bytes;
John Harrison79bbcc22015-06-30 12:40:55 +01002363 bool need_wrap = false;
Mika Kuoppalacbcc80d2012-12-04 15:12:03 +02002364
Chris Wilson0251a962016-04-28 09:56:47 +01002365 total_bytes = bytes + req->reserved_space;
John Harrison29b1b412015-06-18 13:10:09 +01002366
John Harrison79bbcc22015-06-30 12:40:55 +01002367 if (unlikely(bytes > remain_usable)) {
2368 /*
2369 * Not enough space for the basic request. So need to flush
2370 * out the remainder and then wait for base + reserved.
2371 */
2372 wait_bytes = remain_actual + total_bytes;
2373 need_wrap = true;
Chris Wilson987046a2016-04-28 09:56:46 +01002374 } else if (unlikely(total_bytes > remain_usable)) {
2375 /*
2376 * The base request will fit but the reserved space
2377 * falls off the end. So we don't need an immediate wrap
2378 * and only need to effectively wait for the reserved
2379 * size space from the start of ringbuffer.
2380 */
Chris Wilson0251a962016-04-28 09:56:47 +01002381 wait_bytes = remain_actual + req->reserved_space;
John Harrison79bbcc22015-06-30 12:40:55 +01002382 } else {
Chris Wilson987046a2016-04-28 09:56:46 +01002383 /* No wrapping required, just waiting. */
2384 wait_bytes = total_bytes;
Mika Kuoppalacbcc80d2012-12-04 15:12:03 +02002385 }
2386
Chris Wilson1dae2df2016-08-02 22:50:19 +01002387 if (wait_bytes > ring->space) {
Chris Wilson987046a2016-04-28 09:56:46 +01002388 int ret = wait_for_space(req, wait_bytes);
Mika Kuoppalacbcc80d2012-12-04 15:12:03 +02002389 if (unlikely(ret))
2390 return ret;
John Harrison79bbcc22015-06-30 12:40:55 +01002391
Chris Wilson1dae2df2016-08-02 22:50:19 +01002392 intel_ring_update_space(ring);
2393 if (unlikely(ring->space < wait_bytes))
Chris Wilsone075a322016-05-13 11:57:22 +01002394 return -EAGAIN;
Mika Kuoppalacbcc80d2012-12-04 15:12:03 +02002395 }
2396
Chris Wilson987046a2016-04-28 09:56:46 +01002397 if (unlikely(need_wrap)) {
Chris Wilson1dae2df2016-08-02 22:50:19 +01002398 GEM_BUG_ON(remain_actual > ring->space);
2399 GEM_BUG_ON(ring->tail + remain_actual > ring->size);
Mika Kuoppalacbcc80d2012-12-04 15:12:03 +02002400
Chris Wilson987046a2016-04-28 09:56:46 +01002401 /* Fill the tail with MI_NOOP */
Chris Wilson1dae2df2016-08-02 22:50:19 +01002402 memset(ring->vaddr + ring->tail, 0, remain_actual);
2403 ring->tail = 0;
2404 ring->space -= remain_actual;
Chris Wilson987046a2016-04-28 09:56:46 +01002405 }
Chris Wilson78501ea2010-10-27 12:18:21 +01002406
Chris Wilson1dae2df2016-08-02 22:50:19 +01002407 ring->space -= bytes;
2408 GEM_BUG_ON(ring->space < 0);
Chris Wilson304d6952014-01-02 14:32:35 +00002409 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002410}
2411
Ville Syrjälä753b1ad2014-02-11 19:52:05 +02002412/* Align the ring tail to a cacheline boundary */
John Harrisonbba09b12015-05-29 17:44:06 +01002413int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
Ville Syrjälä753b1ad2014-02-11 19:52:05 +02002414{
Chris Wilson7e37f882016-08-02 22:50:21 +01002415 struct intel_ring *ring = req->ring;
Chris Wilsonb5321f32016-08-02 22:50:18 +01002416 int num_dwords =
2417 (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
Ville Syrjälä753b1ad2014-02-11 19:52:05 +02002418 int ret;
2419
2420 if (num_dwords == 0)
2421 return 0;
2422
Chris Wilson18393f62014-04-09 09:19:40 +01002423 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
John Harrison5fb9de12015-05-29 17:44:07 +01002424 ret = intel_ring_begin(req, num_dwords);
Ville Syrjälä753b1ad2014-02-11 19:52:05 +02002425 if (ret)
2426 return ret;
2427
2428 while (num_dwords--)
Chris Wilsonb5321f32016-08-02 22:50:18 +01002429 intel_ring_emit(ring, MI_NOOP);
Ville Syrjälä753b1ad2014-02-11 19:52:05 +02002430
Chris Wilsonb5321f32016-08-02 22:50:18 +01002431 intel_ring_advance(ring);
Ville Syrjälä753b1ad2014-02-11 19:52:05 +02002432
2433 return 0;
2434}
2435
Chris Wilson7e37f882016-08-02 22:50:21 +01002436void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02002437{
Chris Wilsonc0336662016-05-06 15:40:21 +01002438 struct drm_i915_private *dev_priv = engine->i915;
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02002439
Chris Wilson29dcb572016-04-07 07:29:13 +01002440 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
2441 * so long as the semaphore value in the register/page is greater
2442 * than the sync value), so whenever we reset the seqno,
2443 * so long as we reset the tracking semaphore value to 0, it will
2444 * always be before the next request's seqno. If we don't reset
2445 * the semaphore value, then when the seqno moves backwards all
2446 * future waits will complete instantly (causing rendering corruption).
2447 */
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002448 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002449 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
2450 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
Chris Wilsond04bce42016-04-07 07:29:12 +01002451 if (HAS_VEBOX(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002452 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
Chris Wilson78501ea2010-10-27 12:18:21 +01002453 }
Chris Wilsona058d932016-04-07 07:29:15 +01002454 if (dev_priv->semaphore_obj) {
2455 struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
2456 struct page *page = i915_gem_object_get_dirty_page(obj, 0);
2457 void *semaphores = kmap(page);
2458 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
2459 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
2460 kunmap(page);
2461 }
Chris Wilson29dcb572016-04-07 07:29:13 +01002462 memset(engine->semaphore.sync_seqno, 0,
2463 sizeof(engine->semaphore.sync_seqno));
Chris Wilson297b0c52010-10-22 17:02:41 +01002464
Chris Wilson1b7744e2016-07-01 17:23:17 +01002465 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
2466 if (engine->irq_seqno_barrier)
2467 engine->irq_seqno_barrier(engine);
Chris Wilson01347122016-04-07 07:29:16 +01002468 engine->last_submitted_seqno = seqno;
Chris Wilson29dcb572016-04-07 07:29:13 +01002469
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002470 engine->hangcheck.seqno = seqno;
Chris Wilson688e6c72016-07-01 17:23:15 +01002471
2472 /* After manually advancing the seqno, fake the interrupt in case
2473 * there are any waiters for that seqno.
2474 */
2475 rcu_read_lock();
2476 intel_engine_wakeup(engine);
2477 rcu_read_unlock();
Chris Wilson549f7362010-10-19 11:19:32 +01002478}
2479
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01002480static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002481{
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01002482 struct drm_i915_private *dev_priv = request->i915;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002483
Chris Wilson76f84212016-06-30 15:33:45 +01002484 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2485
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002486 /* Every tail move must follow the sequence below */
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002487
Chris Wilson12f55812012-07-05 17:14:01 +01002488 /* Disable notification that the ring is IDLE. The GT
2489 * will then assume that it is busy and bring it out of rc6.
2490 */
Chris Wilson76f84212016-06-30 15:33:45 +01002491 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2492 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
Chris Wilson12f55812012-07-05 17:14:01 +01002493
2494 /* Clear the context id. Here be magic! */
Chris Wilson76f84212016-06-30 15:33:45 +01002495 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
Chris Wilson12f55812012-07-05 17:14:01 +01002496
2497 /* Wait for the ring not to be idle, i.e. for it to wake up. */
Chris Wilson76f84212016-06-30 15:33:45 +01002498 if (intel_wait_for_register_fw(dev_priv,
2499 GEN6_BSD_SLEEP_PSMI_CONTROL,
2500 GEN6_BSD_SLEEP_INDICATOR,
2501 0,
2502 50))
Chris Wilson12f55812012-07-05 17:14:01 +01002503 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002504
Chris Wilson12f55812012-07-05 17:14:01 +01002505 /* Now that the ring is fully powered up, update the tail */
Chris Wilson8f942012016-08-02 22:50:30 +01002506 I915_WRITE_FW(RING_TAIL(request->engine->mmio_base),
2507 intel_ring_offset(request->ring, request->tail));
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01002508 POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
Chris Wilson12f55812012-07-05 17:14:01 +01002509
2510 /* Let the ring send IDLE messages to the GT again,
2511 * and so let it sleep to conserve power when idle.
2512 */
Chris Wilson76f84212016-06-30 15:33:45 +01002513 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2514 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2515
2516 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002517}
2518
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01002519static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002520{
Chris Wilson7e37f882016-08-02 22:50:21 +01002521 struct intel_ring *ring = req->ring;
Chris Wilson71a77e02011-02-02 12:13:49 +00002522 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00002523 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002524
John Harrison5fb9de12015-05-29 17:44:07 +01002525 ret = intel_ring_begin(req, 4);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00002526 if (ret)
2527 return ret;
2528
Chris Wilson71a77e02011-02-02 12:13:49 +00002529 cmd = MI_FLUSH_DW;
Chris Wilsonc0336662016-05-06 15:40:21 +01002530 if (INTEL_GEN(req->i915) >= 8)
Ben Widawsky075b3bb2013-11-02 21:07:13 -07002531 cmd += 1;
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00002532
2533 /* We always require a command barrier so that subsequent
2534 * commands, such as breadcrumb interrupts, are strictly ordered
2535 * wrt the contents of the write cache being flushed to memory
2536 * (and thus being coherent from the CPU).
2537 */
2538 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2539
Jesse Barnes9a289772012-10-26 09:42:42 -07002540 /*
2541 * Bspec vol 1c.5 - video engine command streamer:
2542 * "If ENABLED, all TLBs will be invalidated once the flush
2543 * operation is complete. This bit is only valid when the
2544 * Post-Sync Operation field is a value of 1h or 3h."
2545 */
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01002546 if (mode & EMIT_INVALIDATE)
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00002547 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
2548
Chris Wilsonb5321f32016-08-02 22:50:18 +01002549 intel_ring_emit(ring, cmd);
2550 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
Chris Wilsonc0336662016-05-06 15:40:21 +01002551 if (INTEL_GEN(req->i915) >= 8) {
Chris Wilsonb5321f32016-08-02 22:50:18 +01002552 intel_ring_emit(ring, 0); /* upper addr */
2553 intel_ring_emit(ring, 0); /* value */
Ben Widawsky075b3bb2013-11-02 21:07:13 -07002554 } else {
Chris Wilsonb5321f32016-08-02 22:50:18 +01002555 intel_ring_emit(ring, 0);
2556 intel_ring_emit(ring, MI_NOOP);
Ben Widawsky075b3bb2013-11-02 21:07:13 -07002557 }
Chris Wilsonb5321f32016-08-02 22:50:18 +01002558 intel_ring_advance(ring);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00002559 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002560}
2561
2562static int
Chris Wilson803688b2016-08-02 22:50:27 +01002563gen8_emit_bb_start(struct drm_i915_gem_request *req,
2564 u64 offset, u32 len,
2565 unsigned int dispatch_flags)
Ben Widawsky1c7a0622013-11-02 21:07:12 -07002566{
Chris Wilson7e37f882016-08-02 22:50:21 +01002567 struct intel_ring *ring = req->ring;
Chris Wilsonb5321f32016-08-02 22:50:18 +01002568 bool ppgtt = USES_PPGTT(req->i915) &&
John Harrison8e004ef2015-02-13 11:48:10 +00002569 !(dispatch_flags & I915_DISPATCH_SECURE);
Ben Widawsky1c7a0622013-11-02 21:07:12 -07002570 int ret;
2571
John Harrison5fb9de12015-05-29 17:44:07 +01002572 ret = intel_ring_begin(req, 4);
Ben Widawsky1c7a0622013-11-02 21:07:12 -07002573 if (ret)
2574 return ret;
2575
2576 /* FIXME(BDW): Address space and security selectors. */
Chris Wilsonb5321f32016-08-02 22:50:18 +01002577 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
Abdiel Janulgue919032e2015-06-16 13:39:40 +03002578 (dispatch_flags & I915_DISPATCH_RS ?
2579 MI_BATCH_RESOURCE_STREAMER : 0));
Chris Wilsonb5321f32016-08-02 22:50:18 +01002580 intel_ring_emit(ring, lower_32_bits(offset));
2581 intel_ring_emit(ring, upper_32_bits(offset));
2582 intel_ring_emit(ring, MI_NOOP);
2583 intel_ring_advance(ring);
Ben Widawsky1c7a0622013-11-02 21:07:12 -07002584
2585 return 0;
2586}
2587
2588static int
Chris Wilson803688b2016-08-02 22:50:27 +01002589hsw_emit_bb_start(struct drm_i915_gem_request *req,
2590 u64 offset, u32 len,
2591 unsigned int dispatch_flags)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002592{
Chris Wilson7e37f882016-08-02 22:50:21 +01002593 struct intel_ring *ring = req->ring;
Akshay Joshi0206e352011-08-16 15:34:10 -04002594 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01002595
John Harrison5fb9de12015-05-29 17:44:07 +01002596 ret = intel_ring_begin(req, 2);
Akshay Joshi0206e352011-08-16 15:34:10 -04002597 if (ret)
2598 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +01002599
Chris Wilsonb5321f32016-08-02 22:50:18 +01002600 intel_ring_emit(ring,
Chris Wilson77072252014-09-10 12:18:27 +01002601 MI_BATCH_BUFFER_START |
John Harrison8e004ef2015-02-13 11:48:10 +00002602 (dispatch_flags & I915_DISPATCH_SECURE ?
Abdiel Janulgue919032e2015-06-16 13:39:40 +03002603 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
2604 (dispatch_flags & I915_DISPATCH_RS ?
2605 MI_BATCH_RESOURCE_STREAMER : 0));
Chris Wilsond7d4eed2012-10-17 12:09:54 +01002606 /* bit0-7 is the length on GEN6+ */
Chris Wilsonb5321f32016-08-02 22:50:18 +01002607 intel_ring_emit(ring, offset);
2608 intel_ring_advance(ring);
Chris Wilsond7d4eed2012-10-17 12:09:54 +01002609
2610 return 0;
2611}
2612
2613static int
Chris Wilson803688b2016-08-02 22:50:27 +01002614gen6_emit_bb_start(struct drm_i915_gem_request *req,
2615 u64 offset, u32 len,
2616 unsigned int dispatch_flags)
Chris Wilsond7d4eed2012-10-17 12:09:54 +01002617{
Chris Wilson7e37f882016-08-02 22:50:21 +01002618 struct intel_ring *ring = req->ring;
Chris Wilsond7d4eed2012-10-17 12:09:54 +01002619 int ret;
2620
John Harrison5fb9de12015-05-29 17:44:07 +01002621 ret = intel_ring_begin(req, 2);
Chris Wilsond7d4eed2012-10-17 12:09:54 +01002622 if (ret)
2623 return ret;
2624
Chris Wilsonb5321f32016-08-02 22:50:18 +01002625 intel_ring_emit(ring,
Chris Wilsond7d4eed2012-10-17 12:09:54 +01002626 MI_BATCH_BUFFER_START |
John Harrison8e004ef2015-02-13 11:48:10 +00002627 (dispatch_flags & I915_DISPATCH_SECURE ?
2628 0 : MI_BATCH_NON_SECURE_I965));
Akshay Joshi0206e352011-08-16 15:34:10 -04002629 /* bit0-7 is the length on GEN6+ */
Chris Wilsonb5321f32016-08-02 22:50:18 +01002630 intel_ring_emit(ring, offset);
2631 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01002632
Akshay Joshi0206e352011-08-16 15:34:10 -04002633 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01002634}
2635
Chris Wilson549f7362010-10-19 11:19:32 +01002636/* Blitter support (SandyBridge+) */
2637
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01002638static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
Zou Nan hai8d192152010-11-02 16:31:01 +08002639{
Chris Wilson7e37f882016-08-02 22:50:21 +01002640 struct intel_ring *ring = req->ring;
Chris Wilson71a77e02011-02-02 12:13:49 +00002641 uint32_t cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00002642 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002643
John Harrison5fb9de12015-05-29 17:44:07 +01002644 ret = intel_ring_begin(req, 4);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00002645 if (ret)
2646 return ret;
2647
Chris Wilson71a77e02011-02-02 12:13:49 +00002648 cmd = MI_FLUSH_DW;
Chris Wilsonc0336662016-05-06 15:40:21 +01002649 if (INTEL_GEN(req->i915) >= 8)
Ben Widawsky075b3bb2013-11-02 21:07:13 -07002650 cmd += 1;
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00002651
2652 /* We always require a command barrier so that subsequent
2653 * commands, such as breadcrumb interrupts, are strictly ordered
2654 * wrt the contents of the write cache being flushed to memory
2655 * (and thus being coherent from the CPU).
2656 */
2657 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2658
Jesse Barnes9a289772012-10-26 09:42:42 -07002659 /*
2660 * Bspec vol 1c.3 - blitter engine command streamer:
2661 * "If ENABLED, all TLBs will be invalidated once the flush
2662 * operation is complete. This bit is only valid when the
2663 * Post-Sync Operation field is a value of 1h or 3h."
2664 */
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01002665 if (mode & EMIT_INVALIDATE)
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00002666 cmd |= MI_INVALIDATE_TLB;
Chris Wilsonb5321f32016-08-02 22:50:18 +01002667 intel_ring_emit(ring, cmd);
2668 intel_ring_emit(ring,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002669 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
Chris Wilsonc0336662016-05-06 15:40:21 +01002670 if (INTEL_GEN(req->i915) >= 8) {
Chris Wilsonb5321f32016-08-02 22:50:18 +01002671 intel_ring_emit(ring, 0); /* upper addr */
2672 intel_ring_emit(ring, 0); /* value */
Ben Widawsky075b3bb2013-11-02 21:07:13 -07002673 } else {
Chris Wilsonb5321f32016-08-02 22:50:18 +01002674 intel_ring_emit(ring, 0);
2675 intel_ring_emit(ring, MI_NOOP);
Ben Widawsky075b3bb2013-11-02 21:07:13 -07002676 }
Chris Wilsonb5321f32016-08-02 22:50:18 +01002677 intel_ring_advance(ring);
Rodrigo Vivifd3da6c2013-06-06 16:58:16 -03002678
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00002679 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08002680}
2681
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002682static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2683 struct intel_engine_cs *engine)
2684{
Tvrtko Ursulindb3d4012016-06-29 16:09:28 +01002685 struct drm_i915_gem_object *obj;
Tvrtko Ursulin1b9e6652016-06-29 16:09:29 +01002686 int ret, i;
Tvrtko Ursulindb3d4012016-06-29 16:09:28 +01002687
Chris Wilson39df9192016-07-20 13:31:57 +01002688 if (!i915.semaphores)
Tvrtko Ursulindb3d4012016-06-29 16:09:28 +01002689 return;
2690
2691 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
Chris Wilson91c8a322016-07-05 10:40:23 +01002692 obj = i915_gem_object_create(&dev_priv->drm, 4096);
Tvrtko Ursulindb3d4012016-06-29 16:09:28 +01002693 if (IS_ERR(obj)) {
2694 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2695 i915.semaphores = 0;
2696 } else {
2697 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2698 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2699 if (ret != 0) {
Chris Wilsonf8c417c2016-07-20 13:31:53 +01002700 i915_gem_object_put(obj);
Tvrtko Ursulindb3d4012016-06-29 16:09:28 +01002701 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2702 i915.semaphores = 0;
2703 } else {
2704 dev_priv->semaphore_obj = obj;
2705 }
2706 }
2707 }
2708
Chris Wilson39df9192016-07-20 13:31:57 +01002709 if (!i915.semaphores)
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002710 return;
2711
2712 if (INTEL_GEN(dev_priv) >= 8) {
Tvrtko Ursulin1b9e6652016-06-29 16:09:29 +01002713 u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
2714
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002715 engine->semaphore.sync_to = gen8_ring_sync;
2716 engine->semaphore.signal = gen8_xcs_signal;
Tvrtko Ursulin1b9e6652016-06-29 16:09:29 +01002717
2718 for (i = 0; i < I915_NUM_ENGINES; i++) {
2719 u64 ring_offset;
2720
2721 if (i != engine->id)
2722 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
2723 else
2724 ring_offset = MI_SEMAPHORE_SYNC_INVALID;
2725
2726 engine->semaphore.signal_ggtt[i] = ring_offset;
2727 }
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002728 } else if (INTEL_GEN(dev_priv) >= 6) {
2729 engine->semaphore.sync_to = gen6_ring_sync;
2730 engine->semaphore.signal = gen6_signal;
Tvrtko Ursulin4b8e38a2016-06-29 16:09:31 +01002731
2732 /*
2733 * The current semaphore is only applied on pre-gen8
2734 * platform. And there is no VCS2 ring on the pre-gen8
2735 * platform. So the semaphore between RCS and VCS2 is
2736 * initialized as INVALID. Gen8 will initialize the
2737 * sema between VCS2 and RCS later.
2738 */
2739 for (i = 0; i < I915_NUM_ENGINES; i++) {
2740 static const struct {
2741 u32 wait_mbox;
2742 i915_reg_t mbox_reg;
2743 } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
2744 [RCS] = {
2745 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
2746 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
2747 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
2748 },
2749 [VCS] = {
2750 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
2751 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
2752 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
2753 },
2754 [BCS] = {
2755 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
2756 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
2757 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
2758 },
2759 [VECS] = {
2760 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2761 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2762 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2763 },
2764 };
2765 u32 wait_mbox;
2766 i915_reg_t mbox_reg;
2767
2768 if (i == engine->id || i == VCS2) {
2769 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2770 mbox_reg = GEN6_NOSYNC;
2771 } else {
2772 wait_mbox = sem_data[engine->id][i].wait_mbox;
2773 mbox_reg = sem_data[engine->id][i].mbox_reg;
2774 }
2775
2776 engine->semaphore.mbox.wait[i] = wait_mbox;
2777 engine->semaphore.mbox.signal[i] = mbox_reg;
2778 }
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002779 }
2780}
2781
Chris Wilsoned003072016-07-01 09:18:13 +01002782static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2783 struct intel_engine_cs *engine)
2784{
Tvrtko Ursulinc78d6062016-07-13 16:03:38 +01002785 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
2786
Chris Wilsoned003072016-07-01 09:18:13 +01002787 if (INTEL_GEN(dev_priv) >= 8) {
Chris Wilson31bb59c2016-07-01 17:23:27 +01002788 engine->irq_enable = gen8_irq_enable;
2789 engine->irq_disable = gen8_irq_disable;
Chris Wilsoned003072016-07-01 09:18:13 +01002790 engine->irq_seqno_barrier = gen6_seqno_barrier;
2791 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilson31bb59c2016-07-01 17:23:27 +01002792 engine->irq_enable = gen6_irq_enable;
2793 engine->irq_disable = gen6_irq_disable;
Chris Wilsoned003072016-07-01 09:18:13 +01002794 engine->irq_seqno_barrier = gen6_seqno_barrier;
2795 } else if (INTEL_GEN(dev_priv) >= 5) {
Chris Wilson31bb59c2016-07-01 17:23:27 +01002796 engine->irq_enable = gen5_irq_enable;
2797 engine->irq_disable = gen5_irq_disable;
Chris Wilsonf8973c22016-07-01 17:23:21 +01002798 engine->irq_seqno_barrier = gen5_seqno_barrier;
Chris Wilsoned003072016-07-01 09:18:13 +01002799 } else if (INTEL_GEN(dev_priv) >= 3) {
Chris Wilson31bb59c2016-07-01 17:23:27 +01002800 engine->irq_enable = i9xx_irq_enable;
2801 engine->irq_disable = i9xx_irq_disable;
Chris Wilsoned003072016-07-01 09:18:13 +01002802 } else {
Chris Wilson31bb59c2016-07-01 17:23:27 +01002803 engine->irq_enable = i8xx_irq_enable;
2804 engine->irq_disable = i8xx_irq_disable;
Chris Wilsoned003072016-07-01 09:18:13 +01002805 }
2806}
2807
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002808static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2809 struct intel_engine_cs *engine)
2810{
Tvrtko Ursulin1d8a1332016-06-29 16:09:25 +01002811 engine->init_hw = init_ring_common;
Tvrtko Ursulin7445a2a2016-06-29 16:09:21 +01002812
Chris Wilsonddd66c52016-08-02 22:50:31 +01002813 engine->emit_request = i9xx_emit_request;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002814 if (INTEL_GEN(dev_priv) >= 6)
Chris Wilsonddd66c52016-08-02 22:50:31 +01002815 engine->emit_request = gen6_emit_request;
2816 engine->submit_request = i9xx_submit_request;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002817
2818 if (INTEL_GEN(dev_priv) >= 8)
Chris Wilson803688b2016-08-02 22:50:27 +01002819 engine->emit_bb_start = gen8_emit_bb_start;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002820 else if (INTEL_GEN(dev_priv) >= 6)
Chris Wilson803688b2016-08-02 22:50:27 +01002821 engine->emit_bb_start = gen6_emit_bb_start;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002822 else if (INTEL_GEN(dev_priv) >= 4)
Chris Wilson803688b2016-08-02 22:50:27 +01002823 engine->emit_bb_start = i965_emit_bb_start;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002824 else if (IS_I830(dev_priv) || IS_845G(dev_priv))
Chris Wilson803688b2016-08-02 22:50:27 +01002825 engine->emit_bb_start = i830_emit_bb_start;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002826 else
Chris Wilson803688b2016-08-02 22:50:27 +01002827 engine->emit_bb_start = i915_emit_bb_start;
Tvrtko Ursulinb9700322016-06-29 16:09:23 +01002828
Chris Wilsoned003072016-07-01 09:18:13 +01002829 intel_ring_init_irq(dev_priv, engine);
Tvrtko Ursulind9a64612016-06-29 16:09:27 +01002830 intel_ring_init_semaphores(dev_priv, engine);
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002831}
2832
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002833int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08002834{
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002835 struct drm_i915_private *dev_priv = engine->i915;
Ben Widawsky3e789982014-06-30 09:53:37 -07002836 int ret;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08002837
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002838 intel_ring_default_vfuncs(dev_priv, engine);
2839
Chris Wilson61ff75a2016-07-01 17:23:28 +01002840 if (HAS_L3_DPF(dev_priv))
2841 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
Chris Wilsonf8973c22016-07-01 17:23:21 +01002842
Chris Wilsonc0336662016-05-06 15:40:21 +01002843 if (INTEL_GEN(dev_priv) >= 8) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002844 engine->init_context = intel_rcs_ctx_init;
Chris Wilsonddd66c52016-08-02 22:50:31 +01002845 engine->emit_request = gen8_render_emit_request;
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002846 engine->emit_flush = gen8_render_ring_flush;
Chris Wilson39df9192016-07-20 13:31:57 +01002847 if (i915.semaphores)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002848 engine->semaphore.signal = gen8_rcs_signal;
Chris Wilsonc0336662016-05-06 15:40:21 +01002849 } else if (INTEL_GEN(dev_priv) >= 6) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002850 engine->init_context = intel_rcs_ctx_init;
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002851 engine->emit_flush = gen7_render_ring_flush;
Chris Wilsonc0336662016-05-06 15:40:21 +01002852 if (IS_GEN6(dev_priv))
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002853 engine->emit_flush = gen6_render_ring_flush;
Chris Wilsonc0336662016-05-06 15:40:21 +01002854 } else if (IS_GEN5(dev_priv)) {
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002855 engine->emit_flush = gen4_render_ring_flush;
Daniel Vetter59465b52012-04-11 22:12:48 +02002856 } else {
Chris Wilsonc0336662016-05-06 15:40:21 +01002857 if (INTEL_GEN(dev_priv) < 4)
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002858 engine->emit_flush = gen2_render_ring_flush;
Chris Wilson46f0f8d2012-04-18 11:12:11 +01002859 else
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002860 engine->emit_flush = gen4_render_ring_flush;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002861 engine->irq_enable_mask = I915_USER_INTERRUPT;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08002862 }
Ben Widawsky707d9cf2014-06-30 09:53:36 -07002863
Chris Wilsonc0336662016-05-06 15:40:21 +01002864 if (IS_HASWELL(dev_priv))
Chris Wilson803688b2016-08-02 22:50:27 +01002865 engine->emit_bb_start = hsw_emit_bb_start;
Chris Wilson6f7bef72016-07-01 09:18:12 +01002866
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002867 engine->init_hw = init_render_ring;
2868 engine->cleanup = render_ring_cleanup;
Daniel Vetter59465b52012-04-11 22:12:48 +02002869
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002870 ret = intel_init_ring_buffer(engine);
Daniel Vetter99be1df2014-11-20 00:33:06 +01002871 if (ret)
2872 return ret;
2873
Chris Wilsonf8973c22016-07-01 17:23:21 +01002874 if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilson7d5ea802016-07-01 17:23:20 +01002875 ret = intel_init_pipe_control(engine, 4096);
2876 if (ret)
2877 return ret;
2878 } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2879 ret = intel_init_pipe_control(engine, I830_WA_SIZE);
Daniel Vetter99be1df2014-11-20 00:33:06 +01002880 if (ret)
2881 return ret;
2882 }
2883
2884 return 0;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08002885}
2886
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002887int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08002888{
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002889 struct drm_i915_private *dev_priv = engine->i915;
Daniel Vetter58fa3832012-04-11 22:12:49 +02002890
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002891 intel_ring_default_vfuncs(dev_priv, engine);
2892
Chris Wilsonc0336662016-05-06 15:40:21 +01002893 if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter0fd2c202012-04-11 22:12:55 +02002894 /* gen6 bsd needs a special wa for tail updates */
Chris Wilsonc0336662016-05-06 15:40:21 +01002895 if (IS_GEN6(dev_priv))
Chris Wilsonc5efa1a2016-08-02 22:50:29 +01002896 engine->submit_request = gen6_bsd_submit_request;
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002897 engine->emit_flush = gen6_bsd_ring_flush;
Tvrtko Ursulinc78d6062016-07-13 16:03:38 +01002898 if (INTEL_GEN(dev_priv) < 8)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002899 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
Daniel Vetter58fa3832012-04-11 22:12:49 +02002900 } else {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002901 engine->mmio_base = BSD_RING_BASE;
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002902 engine->emit_flush = bsd_ring_flush;
Tvrtko Ursulin8d228912016-06-29 16:09:32 +01002903 if (IS_GEN5(dev_priv))
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002904 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
Tvrtko Ursulin8d228912016-06-29 16:09:32 +01002905 else
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002906 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
Daniel Vetter58fa3832012-04-11 22:12:49 +02002907 }
Daniel Vetter58fa3832012-04-11 22:12:49 +02002908
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002909 return intel_init_ring_buffer(engine);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08002910}
Chris Wilson549f7362010-10-19 11:19:32 +01002911
Zhao Yakui845f74a2014-04-17 10:37:37 +08002912/**
Damien Lespiau62659922015-01-29 14:13:40 +00002913 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
Zhao Yakui845f74a2014-04-17 10:37:37 +08002914 */
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002915int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
Zhao Yakui845f74a2014-04-17 10:37:37 +08002916{
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002917 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002918
2919 intel_ring_default_vfuncs(dev_priv, engine);
2920
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002921 engine->emit_flush = gen6_bsd_ring_flush;
Zhao Yakui845f74a2014-04-17 10:37:37 +08002922
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002923 return intel_init_ring_buffer(engine);
Zhao Yakui845f74a2014-04-17 10:37:37 +08002924}
2925
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002926int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
Chris Wilson549f7362010-10-19 11:19:32 +01002927{
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002928 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002929
2930 intel_ring_default_vfuncs(dev_priv, engine);
2931
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002932 engine->emit_flush = gen6_ring_flush;
Tvrtko Ursulinc78d6062016-07-13 16:03:38 +01002933 if (INTEL_GEN(dev_priv) < 8)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002934 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
Chris Wilson549f7362010-10-19 11:19:32 +01002935
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002936 return intel_init_ring_buffer(engine);
Chris Wilson549f7362010-10-19 11:19:32 +01002937}
Chris Wilsona7b97612012-07-20 12:41:08 +01002938
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002939int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
Ben Widawsky9a8a2212013-05-28 19:22:23 -07002940{
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01002941 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulin06a2fe22016-06-29 16:09:20 +01002942
2943 intel_ring_default_vfuncs(dev_priv, engine);
2944
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01002945 engine->emit_flush = gen6_ring_flush;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002946
Tvrtko Ursulinc78d6062016-07-13 16:03:38 +01002947 if (INTEL_GEN(dev_priv) < 8) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002948 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
Chris Wilson31bb59c2016-07-01 17:23:27 +01002949 engine->irq_enable = hsw_vebox_irq_enable;
2950 engine->irq_disable = hsw_vebox_irq_disable;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002951 }
Ben Widawsky9a8a2212013-05-28 19:22:23 -07002952
Tvrtko Ursulinacd27842016-07-13 16:03:39 +01002953 return intel_init_ring_buffer(engine);
Ben Widawsky9a8a2212013-05-28 19:22:23 -07002954}
2955
Chris Wilson7e37f882016-08-02 22:50:21 +01002956void intel_engine_stop(struct intel_engine_cs *engine)
Chris Wilsone3efda42014-04-09 09:19:41 +01002957{
2958 int ret;
2959
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00002960 if (!intel_engine_initialized(engine))
Chris Wilsone3efda42014-04-09 09:19:41 +01002961 return;
2962
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00002963 ret = intel_engine_idle(engine);
Chris Wilsonf4457ae2016-04-13 17:35:08 +01002964 if (ret)
Chris Wilsone3efda42014-04-09 09:19:41 +01002965 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002966 engine->name, ret);
Chris Wilsone3efda42014-04-09 09:19:41 +01002967
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002968 stop_ring(engine);
Chris Wilsone3efda42014-04-09 09:19:41 +01002969}