blob: 01b611cdb751bd26718c8fc828fdb5ff355c93af [file] [log] [blame]
Alex Daibac427f2015-08-12 15:43:39 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
Alex Daibac427f2015-08-12 15:43:39 +010024#include <linux/circ_buf.h>
25#include "i915_drv.h"
Arkadiusz Hiler8c4f24f2016-11-25 18:59:33 +010026#include "intel_uc.h"
Alex Daibac427f2015-08-12 15:43:39 +010027
28/**
Alex Daifeda33e2015-10-19 16:10:54 -070029 * DOC: GuC-based command submission
Dave Gordon44a28b12015-08-12 15:43:41 +010030 *
31 * i915_guc_client:
32 * We use the term client to avoid confusion with contexts. A i915_guc_client is
33 * equivalent to GuC object guc_context_desc. This context descriptor is
34 * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
35 * and workqueue for it. Also the process descriptor (guc_process_desc), which
36 * is mapped to client space. So the client can write Work Item then ring the
37 * doorbell.
38 *
39 * To simplify the implementation, we allocate one gem object that contains all
40 * pages for doorbell, process descriptor and workqueue.
41 *
42 * The Scratch registers:
43 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
44 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
45 * triggers an interrupt on the GuC via another register write (0xC4C8).
46 * Firmware writes a success/fail code back to the action register after
47 * processes the request. The kernel driver polls waiting for this update and
48 * then proceeds.
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +010049 * See intel_guc_send()
Dave Gordon44a28b12015-08-12 15:43:41 +010050 *
51 * Doorbells:
52 * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
53 * mapped into process space.
54 *
55 * Work Items:
56 * There are several types of work items that the host may place into a
57 * workqueue, each with its own requirements and limitations. Currently only
58 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
59 * represents in-order queue. The kernel driver packs ring tail pointer and an
60 * ELSP context descriptor dword into Work Item.
Dave Gordon7a9347f2016-09-12 21:19:37 +010061 * See guc_wq_item_append()
Dave Gordon44a28b12015-08-12 15:43:41 +010062 *
63 */
64
65/*
Dave Gordon44a28b12015-08-12 15:43:41 +010066 * Tell the GuC to allocate or deallocate a specific doorbell
67 */
68
Arkadiusz Hilera80bc452016-11-25 18:59:34 +010069static int guc_allocate_doorbell(struct intel_guc *guc,
70 struct i915_guc_client *client)
Dave Gordon44a28b12015-08-12 15:43:41 +010071{
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +010072 u32 action[] = {
73 INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
74 client->ctx_index
75 };
Dave Gordon44a28b12015-08-12 15:43:41 +010076
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +010077 return intel_guc_send(guc, action, ARRAY_SIZE(action));
Dave Gordon44a28b12015-08-12 15:43:41 +010078}
79
Arkadiusz Hilera80bc452016-11-25 18:59:34 +010080static int guc_release_doorbell(struct intel_guc *guc,
81 struct i915_guc_client *client)
Dave Gordon44a28b12015-08-12 15:43:41 +010082{
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +010083 u32 action[] = {
84 INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
85 client->ctx_index
86 };
Dave Gordon44a28b12015-08-12 15:43:41 +010087
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +010088 return intel_guc_send(guc, action, ARRAY_SIZE(action));
Sagar Arun Kamble685534e2016-10-12 21:54:41 +053089}
90
Dave Gordon44a28b12015-08-12 15:43:41 +010091/*
92 * Initialise, update, or clear doorbell data shared with the GuC
93 *
94 * These functions modify shared data and so need access to the mapped
95 * client object which contains the page being used for the doorbell
96 */
97
Dave Gordona6674292016-06-13 17:57:32 +010098static int guc_update_doorbell_id(struct intel_guc *guc,
99 struct i915_guc_client *client,
100 u16 new_id)
Dave Gordon44a28b12015-08-12 15:43:41 +0100101{
Chris Wilson8b797af2016-08-15 10:48:51 +0100102 struct sg_table *sg = guc->ctx_pool_vma->pages;
Dave Gordona6674292016-06-13 17:57:32 +0100103 void *doorbell_bitmap = guc->doorbell_bitmap;
Dave Gordon44a28b12015-08-12 15:43:41 +0100104 struct guc_doorbell_info *doorbell;
Dave Gordona6674292016-06-13 17:57:32 +0100105 struct guc_context_desc desc;
106 size_t len;
Dave Gordon44a28b12015-08-12 15:43:41 +0100107
Chris Wilson72aa0d82016-11-02 17:50:47 +0000108 doorbell = client->vaddr + client->doorbell_offset;
Dave Gordon44a28b12015-08-12 15:43:41 +0100109
Dave Gordona6674292016-06-13 17:57:32 +0100110 if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
111 test_bit(client->doorbell_id, doorbell_bitmap)) {
112 /* Deactivate the old doorbell */
113 doorbell->db_status = GUC_DOORBELL_DISABLED;
Arkadiusz Hilera80bc452016-11-25 18:59:34 +0100114 (void)guc_release_doorbell(guc, client);
Dave Gordona6674292016-06-13 17:57:32 +0100115 __clear_bit(client->doorbell_id, doorbell_bitmap);
116 }
117
118 /* Update the GuC's idea of the doorbell ID */
119 len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
120 sizeof(desc) * client->ctx_index);
121 if (len != sizeof(desc))
122 return -EFAULT;
123 desc.db_id = new_id;
124 len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
125 sizeof(desc) * client->ctx_index);
126 if (len != sizeof(desc))
127 return -EFAULT;
128
129 client->doorbell_id = new_id;
130 if (new_id == GUC_INVALID_DOORBELL_ID)
131 return 0;
132
133 /* Activate the new doorbell */
134 __set_bit(new_id, doorbell_bitmap);
Dave Gordona6674292016-06-13 17:57:32 +0100135 doorbell->db_status = GUC_DOORBELL_ENABLED;
Chris Wilson597bdc82016-11-29 12:10:22 +0000136 doorbell->cookie = client->doorbell_cookie;
Arkadiusz Hilera80bc452016-11-25 18:59:34 +0100137 return guc_allocate_doorbell(guc, client);
Dave Gordona6674292016-06-13 17:57:32 +0100138}
139
Dave Gordon44a28b12015-08-12 15:43:41 +0100140static void guc_disable_doorbell(struct intel_guc *guc,
141 struct i915_guc_client *client)
142{
Dave Gordona6674292016-06-13 17:57:32 +0100143 (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID);
Dave Gordon44a28b12015-08-12 15:43:41 +0100144
Dave Gordon44a28b12015-08-12 15:43:41 +0100145 /* XXX: wait for any interrupts */
146 /* XXX: wait for workqueue to drain */
147}
148
Dave Gordonf10d69a2016-06-13 17:57:33 +0100149static uint16_t
150select_doorbell_register(struct intel_guc *guc, uint32_t priority)
151{
152 /*
153 * The bitmap tracks which doorbell registers are currently in use.
154 * It is split into two halves; the first half is used for normal
155 * priority contexts, the second half for high-priority ones.
156 * Note that logically higher priorities are numerically less than
157 * normal ones, so the test below means "is it high-priority?"
158 */
159 const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
160 const uint16_t half = GUC_MAX_DOORBELLS / 2;
161 const uint16_t start = hi_pri ? half : 0;
162 const uint16_t end = start + half;
163 uint16_t id;
164
165 id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
166 if (id == end)
167 id = GUC_INVALID_DOORBELL_ID;
168
169 DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
170 hi_pri ? "high" : "normal", id);
171
172 return id;
173}
174
Dave Gordon44a28b12015-08-12 15:43:41 +0100175/*
176 * Select, assign and relase doorbell cachelines
177 *
178 * These functions track which doorbell cachelines are in use.
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100179 * The data they manipulate is protected by the intel_guc_send lock.
Dave Gordon44a28b12015-08-12 15:43:41 +0100180 */
181
182static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
183{
184 const uint32_t cacheline_size = cache_line_size();
185 uint32_t offset;
186
Dave Gordon44a28b12015-08-12 15:43:41 +0100187 /* Doorbell uses a single cache line within a page */
188 offset = offset_in_page(guc->db_cacheline);
189
190 /* Moving to next cache line to reduce contention */
191 guc->db_cacheline += cacheline_size;
192
Dave Gordon44a28b12015-08-12 15:43:41 +0100193 DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
194 offset, guc->db_cacheline, cacheline_size);
195
196 return offset;
197}
198
Dave Gordon44a28b12015-08-12 15:43:41 +0100199/*
200 * Initialise the process descriptor shared with the GuC firmware.
201 */
Dave Gordon7a9347f2016-09-12 21:19:37 +0100202static void guc_proc_desc_init(struct intel_guc *guc,
Dave Gordon44a28b12015-08-12 15:43:41 +0100203 struct i915_guc_client *client)
204{
205 struct guc_process_desc *desc;
Dave Gordon44a28b12015-08-12 15:43:41 +0100206
Chris Wilson72aa0d82016-11-02 17:50:47 +0000207 desc = client->vaddr + client->proc_desc_offset;
Dave Gordon44a28b12015-08-12 15:43:41 +0100208
209 memset(desc, 0, sizeof(*desc));
210
211 /*
212 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
213 * space for ring3 clients (set them as in mmap_ioctl) or kernel
214 * space for kernel clients (map on demand instead? May make debug
215 * easier to have it mapped).
216 */
217 desc->wq_base_addr = 0;
218 desc->db_base_addr = 0;
219
220 desc->context_id = client->ctx_index;
221 desc->wq_size_bytes = client->wq_size;
222 desc->wq_status = WQ_STATUS_ACTIVE;
223 desc->priority = client->priority;
Dave Gordon44a28b12015-08-12 15:43:41 +0100224}
225
226/*
227 * Initialise/clear the context descriptor shared with the GuC firmware.
228 *
229 * This descriptor tells the GuC where (in GGTT space) to find the important
230 * data structures relating to this client (doorbell, process descriptor,
231 * write queue, etc).
232 */
233
Dave Gordon7a9347f2016-09-12 21:19:37 +0100234static void guc_ctx_desc_init(struct intel_guc *guc,
Dave Gordon44a28b12015-08-12 15:43:41 +0100235 struct i915_guc_client *client)
236{
Alex Dai397097b2016-01-23 11:58:14 -0800237 struct drm_i915_private *dev_priv = guc_to_i915(guc);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000238 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +0100239 struct i915_gem_context *ctx = client->owner;
Dave Gordon44a28b12015-08-12 15:43:41 +0100240 struct guc_context_desc desc;
241 struct sg_table *sg;
Chris Wilsonbafb0fc2016-08-27 08:54:01 +0100242 unsigned int tmp;
Dave Gordon86e06cc2016-04-19 16:08:36 +0100243 u32 gfx_addr;
Dave Gordon44a28b12015-08-12 15:43:41 +0100244
245 memset(&desc, 0, sizeof(desc));
246
247 desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
248 desc.context_id = client->ctx_index;
249 desc.priority = client->priority;
Dave Gordon44a28b12015-08-12 15:43:41 +0100250 desc.db_id = client->doorbell_id;
251
Chris Wilsonbafb0fc2016-08-27 08:54:01 +0100252 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
Chris Wilson9021ad02016-05-24 14:53:37 +0100253 struct intel_context *ce = &ctx->engine[engine->id];
Dave Gordonc18468c2016-08-09 15:19:22 +0100254 uint32_t guc_engine_id = engine->guc_id;
255 struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
Alex Daid1675192015-08-12 15:43:43 +0100256
257 /* TODO: We have a design issue to be solved here. Only when we
258 * receive the first batch, we know which engine is used by the
259 * user. But here GuC expects the lrc and ring to be pinned. It
260 * is not an issue for default context, which is the only one
261 * for now who owns a GuC client. But for future owner of GuC
262 * client, need to make sure lrc is pinned prior to enter here.
263 */
Chris Wilson9021ad02016-05-24 14:53:37 +0100264 if (!ce->state)
Alex Daid1675192015-08-12 15:43:43 +0100265 break; /* XXX: continue? */
266
Chris Wilson9021ad02016-05-24 14:53:37 +0100267 lrc->context_desc = lower_32_bits(ce->lrc_desc);
Alex Daid1675192015-08-12 15:43:43 +0100268
269 /* The state page is after PPHWSP */
Chris Wilson57e88532016-08-15 10:48:57 +0100270 lrc->ring_lcra =
Chris Wilson4741da92016-12-24 19:31:46 +0000271 guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
Alex Daid1675192015-08-12 15:43:43 +0100272 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
Dave Gordonc18468c2016-08-09 15:19:22 +0100273 (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
Alex Daid1675192015-08-12 15:43:43 +0100274
Chris Wilson4741da92016-12-24 19:31:46 +0000275 lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
Chris Wilson57e88532016-08-15 10:48:57 +0100276 lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
277 lrc->ring_next_free_location = lrc->ring_begin;
Alex Daid1675192015-08-12 15:43:43 +0100278 lrc->ring_current_tail_pointer_value = 0;
279
Dave Gordonc18468c2016-08-09 15:19:22 +0100280 desc.engines_used |= (1 << guc_engine_id);
Alex Daid1675192015-08-12 15:43:43 +0100281 }
282
Dave Gordone02757d2016-08-09 15:19:21 +0100283 DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
284 client->engines, desc.engines_used);
Alex Daid1675192015-08-12 15:43:43 +0100285 WARN_ON(desc.engines_used == 0);
286
Dave Gordon44a28b12015-08-12 15:43:41 +0100287 /*
Dave Gordon86e06cc2016-04-19 16:08:36 +0100288 * The doorbell, process descriptor, and workqueue are all parts
289 * of the client object, which the GuC will reference via the GGTT
Dave Gordon44a28b12015-08-12 15:43:41 +0100290 */
Chris Wilson4741da92016-12-24 19:31:46 +0000291 gfx_addr = guc_ggtt_offset(client->vma);
Chris Wilson8b797af2016-08-15 10:48:51 +0100292 desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
Dave Gordon86e06cc2016-04-19 16:08:36 +0100293 client->doorbell_offset;
Chris Wilson72aa0d82016-11-02 17:50:47 +0000294 desc.db_trigger_cpu =
295 (uintptr_t)client->vaddr + client->doorbell_offset;
Dave Gordon86e06cc2016-04-19 16:08:36 +0100296 desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
297 desc.process_desc = gfx_addr + client->proc_desc_offset;
298 desc.wq_addr = gfx_addr + client->wq_offset;
Dave Gordon44a28b12015-08-12 15:43:41 +0100299 desc.wq_size = client->wq_size;
300
301 /*
Chris Wilsone2efd132016-05-24 14:53:34 +0100302 * XXX: Take LRCs from an existing context if this is not an
Dave Gordon44a28b12015-08-12 15:43:41 +0100303 * IsKMDCreatedContext client
304 */
305 desc.desc_private = (uintptr_t)client;
306
307 /* Pool context is pinned already */
Chris Wilson8b797af2016-08-15 10:48:51 +0100308 sg = guc->ctx_pool_vma->pages;
Dave Gordon44a28b12015-08-12 15:43:41 +0100309 sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
310 sizeof(desc) * client->ctx_index);
311}
312
Dave Gordon7a9347f2016-09-12 21:19:37 +0100313static void guc_ctx_desc_fini(struct intel_guc *guc,
Dave Gordon44a28b12015-08-12 15:43:41 +0100314 struct i915_guc_client *client)
315{
316 struct guc_context_desc desc;
317 struct sg_table *sg;
318
319 memset(&desc, 0, sizeof(desc));
320
Chris Wilson8b797af2016-08-15 10:48:51 +0100321 sg = guc->ctx_pool_vma->pages;
Dave Gordon44a28b12015-08-12 15:43:41 +0100322 sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
323 sizeof(desc) * client->ctx_index);
324}
325
Dave Gordon7c2c2702016-05-13 15:36:32 +0100326/**
Dave Gordon7a9347f2016-09-12 21:19:37 +0100327 * i915_guc_wq_reserve() - reserve space in the GuC's workqueue
Dave Gordon7c2c2702016-05-13 15:36:32 +0100328 * @request: request associated with the commands
329 *
330 * Return: 0 if space is available
331 * -EAGAIN if space is not currently available
332 *
333 * This function must be called (and must return 0) before a request
334 * is submitted to the GuC via i915_guc_submit() below. Once a result
Dave Gordon7a9347f2016-09-12 21:19:37 +0100335 * of 0 has been returned, it must be balanced by a corresponding
336 * call to submit().
Dave Gordon7c2c2702016-05-13 15:36:32 +0100337 *
Dave Gordon7a9347f2016-09-12 21:19:37 +0100338 * Reservation allows the caller to determine in advance that space
Dave Gordon7c2c2702016-05-13 15:36:32 +0100339 * will be available for the next submission before committing resources
340 * to it, and helps avoid late failures with complicated recovery paths.
341 */
Dave Gordon7a9347f2016-09-12 21:19:37 +0100342int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
Dave Gordon44a28b12015-08-12 15:43:41 +0100343{
Dave Gordon551aaec2016-05-13 15:36:33 +0100344 const size_t wqi_size = sizeof(struct guc_wq_item);
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000345 struct i915_guc_client *client = request->i915->guc.execbuf_client;
346 struct guc_process_desc *desc = client->vaddr +
347 client->proc_desc_offset;
Dave Gordon551aaec2016-05-13 15:36:33 +0100348 u32 freespace;
Chris Wilsondadd4812016-09-09 14:11:57 +0100349 int ret;
Dave Gordon44a28b12015-08-12 15:43:41 +0100350
Chris Wilson349ab912017-02-28 11:28:02 +0000351 spin_lock_irq(&client->wq_lock);
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000352 freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
353 freespace -= client->wq_rsvd;
Chris Wilsondadd4812016-09-09 14:11:57 +0100354 if (likely(freespace >= wqi_size)) {
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000355 client->wq_rsvd += wqi_size;
Chris Wilsondadd4812016-09-09 14:11:57 +0100356 ret = 0;
357 } else {
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000358 client->no_wq_space++;
Chris Wilsondadd4812016-09-09 14:11:57 +0100359 ret = -EAGAIN;
360 }
Chris Wilson349ab912017-02-28 11:28:02 +0000361 spin_unlock_irq(&client->wq_lock);
Alex Dai5a843302015-12-02 16:56:29 -0800362
Chris Wilsondadd4812016-09-09 14:11:57 +0100363 return ret;
Dave Gordon44a28b12015-08-12 15:43:41 +0100364}
365
Chris Wilson349ab912017-02-28 11:28:02 +0000366static void guc_client_update_wq_rsvd(struct i915_guc_client *client, int size)
367{
368 unsigned long flags;
369
370 spin_lock_irqsave(&client->wq_lock, flags);
371 client->wq_rsvd += size;
372 spin_unlock_irqrestore(&client->wq_lock, flags);
373}
374
Chris Wilson5ba89902016-10-07 07:53:27 +0100375void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
376{
Chris Wilson349ab912017-02-28 11:28:02 +0000377 const int wqi_size = sizeof(struct guc_wq_item);
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000378 struct i915_guc_client *client = request->i915->guc.execbuf_client;
Chris Wilson5ba89902016-10-07 07:53:27 +0100379
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000380 GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size);
Chris Wilson349ab912017-02-28 11:28:02 +0000381 guc_client_update_wq_rsvd(client, -wqi_size);
Chris Wilson5ba89902016-10-07 07:53:27 +0100382}
383
Dave Gordon7a9347f2016-09-12 21:19:37 +0100384/* Construct a Work Item and append it to the GuC's Work Queue */
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000385static void guc_wq_item_append(struct i915_guc_client *client,
Dave Gordon7a9347f2016-09-12 21:19:37 +0100386 struct drm_i915_gem_request *rq)
Dave Gordon44a28b12015-08-12 15:43:41 +0100387{
Dave Gordon0a31afb2016-05-13 15:36:34 +0100388 /* wqi_len is in DWords, and does not include the one-word header */
389 const size_t wqi_size = sizeof(struct guc_wq_item);
390 const u32 wqi_len = wqi_size/sizeof(u32) - 1;
Dave Gordonc18468c2016-08-09 15:19:22 +0100391 struct intel_engine_cs *engine = rq->engine;
Alex Daia5916e82016-04-19 16:08:35 +0100392 struct guc_process_desc *desc;
Dave Gordon44a28b12015-08-12 15:43:41 +0100393 struct guc_wq_item *wqi;
Chris Wilson72aa0d82016-11-02 17:50:47 +0000394 u32 freespace, tail, wq_off;
Dave Gordon44a28b12015-08-12 15:43:41 +0100395
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000396 desc = client->vaddr + client->proc_desc_offset;
Alex Daia7e02192015-12-16 11:45:55 -0800397
Dave Gordon7a9347f2016-09-12 21:19:37 +0100398 /* Free space is guaranteed, see i915_guc_wq_reserve() above */
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000399 freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
Dave Gordon0a31afb2016-05-13 15:36:34 +0100400 GEM_BUG_ON(freespace < wqi_size);
401
402 /* The GuC firmware wants the tail index in QWords, not bytes */
403 tail = rq->tail;
404 GEM_BUG_ON(tail & 7);
405 tail >>= 3;
406 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
Dave Gordon44a28b12015-08-12 15:43:41 +0100407
408 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
409 * should not have the case where structure wqi is across page, neither
410 * wrapped to the beginning. This simplifies the implementation below.
411 *
412 * XXX: if not the case, we need save data to a temp wqi and copy it to
413 * workqueue buffer dw by dw.
414 */
Dave Gordon0a31afb2016-05-13 15:36:34 +0100415 BUILD_BUG_ON(wqi_size != 16);
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000416 GEM_BUG_ON(client->wq_rsvd < wqi_size);
Dave Gordon44a28b12015-08-12 15:43:41 +0100417
Dave Gordon0a31afb2016-05-13 15:36:34 +0100418 /* postincrement WQ tail for next time */
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000419 wq_off = client->wq_tail;
Chris Wilsondadd4812016-09-09 14:11:57 +0100420 GEM_BUG_ON(wq_off & (wqi_size - 1));
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000421 client->wq_tail += wqi_size;
422 client->wq_tail &= client->wq_size - 1;
423 client->wq_rsvd -= wqi_size;
Dave Gordon0a31afb2016-05-13 15:36:34 +0100424
425 /* WQ starts from the page after doorbell / process_desc */
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000426 wqi = client->vaddr + wq_off + GUC_DB_SIZE;
Dave Gordon44a28b12015-08-12 15:43:41 +0100427
Dave Gordon0a31afb2016-05-13 15:36:34 +0100428 /* Now fill in the 4-word work queue item */
Dave Gordon44a28b12015-08-12 15:43:41 +0100429 wqi->header = WQ_TYPE_INORDER |
Dave Gordon0a31afb2016-05-13 15:36:34 +0100430 (wqi_len << WQ_LEN_SHIFT) |
Dave Gordonc18468c2016-08-09 15:19:22 +0100431 (engine->guc_id << WQ_TARGET_SHIFT) |
Dave Gordon44a28b12015-08-12 15:43:41 +0100432 WQ_NO_WCFLUSH_WAIT;
433
434 /* The GuC wants only the low-order word of the context descriptor */
Dave Gordonc18468c2016-08-09 15:19:22 +0100435 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
Dave Gordon44a28b12015-08-12 15:43:41 +0100436
Dave Gordon44a28b12015-08-12 15:43:41 +0100437 wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
Chris Wilson65e47602016-10-28 13:58:49 +0100438 wqi->fence_id = rq->global_seqno;
Dave Gordon44a28b12015-08-12 15:43:41 +0100439}
440
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000441static int guc_ring_doorbell(struct i915_guc_client *client)
Dave Gordon10d2c3e2016-06-13 17:57:31 +0100442{
443 struct guc_process_desc *desc;
444 union guc_doorbell_qw db_cmp, db_exc, db_ret;
445 union guc_doorbell_qw *db;
446 int attempt = 2, ret = -EAGAIN;
447
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000448 desc = client->vaddr + client->proc_desc_offset;
Dave Gordon10d2c3e2016-06-13 17:57:31 +0100449
450 /* Update the tail so it is visible to GuC */
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000451 desc->tail = client->wq_tail;
Dave Gordon10d2c3e2016-06-13 17:57:31 +0100452
453 /* current cookie */
454 db_cmp.db_status = GUC_DOORBELL_ENABLED;
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000455 db_cmp.cookie = client->doorbell_cookie;
Dave Gordon10d2c3e2016-06-13 17:57:31 +0100456
457 /* cookie to be updated */
458 db_exc.db_status = GUC_DOORBELL_ENABLED;
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000459 db_exc.cookie = client->doorbell_cookie + 1;
Dave Gordon10d2c3e2016-06-13 17:57:31 +0100460 if (db_exc.cookie == 0)
461 db_exc.cookie = 1;
462
463 /* pointer of current doorbell cacheline */
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000464 db = client->vaddr + client->doorbell_offset;
Dave Gordon10d2c3e2016-06-13 17:57:31 +0100465
466 while (attempt--) {
467 /* lets ring the doorbell */
468 db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
469 db_cmp.value_qw, db_exc.value_qw);
470
471 /* if the exchange was successfully executed */
472 if (db_ret.value_qw == db_cmp.value_qw) {
473 /* db was successfully rung */
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000474 client->doorbell_cookie = db_exc.cookie;
Dave Gordon10d2c3e2016-06-13 17:57:31 +0100475 ret = 0;
476 break;
477 }
478
479 /* XXX: doorbell was lost and need to acquire it again */
480 if (db_ret.db_status == GUC_DOORBELL_DISABLED)
481 break;
482
Dave Gordon535b2f52016-08-18 18:17:23 +0100483 DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
484 db_cmp.cookie, db_ret.cookie);
Dave Gordon10d2c3e2016-06-13 17:57:31 +0100485
486 /* update the cookie to newly read cookie from GuC */
487 db_cmp.cookie = db_ret.cookie;
488 db_exc.cookie = db_ret.cookie + 1;
489 if (db_exc.cookie == 0)
490 db_exc.cookie = 1;
491 }
492
493 return ret;
494}
495
Dave Gordon44a28b12015-08-12 15:43:41 +0100496/**
Chris Wilson34ba5a82016-11-29 12:10:24 +0000497 * __i915_guc_submit() - Submit commands through GuC
Alex Daifeda33e2015-10-19 16:10:54 -0700498 * @rq: request associated with the commands
Dave Gordon44a28b12015-08-12 15:43:41 +0100499 *
Dave Gordon7a9347f2016-09-12 21:19:37 +0100500 * The caller must have already called i915_guc_wq_reserve() above with
501 * a result of 0 (success), guaranteeing that there is space in the work
502 * queue for the new request, so enqueuing the item cannot fail.
Dave Gordon7c2c2702016-05-13 15:36:32 +0100503 *
504 * Bad Things Will Happen if the caller violates this protocol e.g. calls
Dave Gordon7a9347f2016-09-12 21:19:37 +0100505 * submit() when _reserve() says there's no space, or calls _submit()
506 * a different number of times from (successful) calls to _reserve().
Dave Gordon7c2c2702016-05-13 15:36:32 +0100507 *
508 * The only error here arises if the doorbell hardware isn't functioning
509 * as expected, which really shouln't happen.
Dave Gordon44a28b12015-08-12 15:43:41 +0100510 */
Chris Wilson34ba5a82016-11-29 12:10:24 +0000511static void __i915_guc_submit(struct drm_i915_gem_request *rq)
Dave Gordon44a28b12015-08-12 15:43:41 +0100512{
Akash Goeled4596ea2016-10-25 22:05:23 +0530513 struct drm_i915_private *dev_priv = rq->i915;
Chris Wilsond55ac5b2016-11-14 20:40:59 +0000514 struct intel_engine_cs *engine = rq->engine;
515 unsigned int engine_id = engine->id;
Dave Gordon7c2c2702016-05-13 15:36:32 +0100516 struct intel_guc *guc = &rq->i915->guc;
517 struct i915_guc_client *client = guc->execbuf_client;
Chris Wilson25afdf892017-03-02 14:53:23 +0000518 unsigned long flags;
Dave Gordon0a31afb2016-05-13 15:36:34 +0100519 int b_ret;
Dave Gordon44a28b12015-08-12 15:43:41 +0100520
Akash Goeled4596ea2016-10-25 22:05:23 +0530521 /* WA to flush out the pending GMADR writes to ring buffer. */
522 if (i915_vma_is_map_and_fenceable(rq->ring->vma))
523 POSTING_READ_FW(GUC_STATUS);
524
Tvrtko Ursulind7d96832017-02-21 11:03:00 +0000525 trace_i915_gem_request_in(rq, 0);
526
Chris Wilson25afdf892017-03-02 14:53:23 +0000527 spin_lock_irqsave(&client->wq_lock, flags);
Chris Wilson0c335182017-02-28 11:28:03 +0000528
529 guc_wq_item_append(client, rq);
Dave Gordon0a31afb2016-05-13 15:36:34 +0100530 b_ret = guc_ring_doorbell(client);
Dave Gordon44a28b12015-08-12 15:43:41 +0100531
Alex Dai397097b2016-01-23 11:58:14 -0800532 client->submissions[engine_id] += 1;
Dave Gordon0a31afb2016-05-13 15:36:34 +0100533 client->retcode = b_ret;
534 if (b_ret)
Dave Gordon44a28b12015-08-12 15:43:41 +0100535 client->b_fail += 1;
Dave Gordon0a31afb2016-05-13 15:36:34 +0100536
Alex Dai397097b2016-01-23 11:58:14 -0800537 guc->submissions[engine_id] += 1;
Chris Wilson65e47602016-10-28 13:58:49 +0100538 guc->last_seqno[engine_id] = rq->global_seqno;
Chris Wilson0c335182017-02-28 11:28:03 +0000539
Chris Wilson25afdf892017-03-02 14:53:23 +0000540 spin_unlock_irqrestore(&client->wq_lock, flags);
Dave Gordon44a28b12015-08-12 15:43:41 +0100541}
542
Chris Wilson34ba5a82016-11-29 12:10:24 +0000543static void i915_guc_submit(struct drm_i915_gem_request *rq)
544{
Chris Wilson34ba5a82016-11-29 12:10:24 +0000545 i915_gem_request_submit(rq);
546 __i915_guc_submit(rq);
547}
548
Dave Gordon44a28b12015-08-12 15:43:41 +0100549/*
550 * Everything below here is concerned with setup & teardown, and is
551 * therefore not part of the somewhat time-critical batch-submission
552 * path of i915_guc_submit() above.
553 */
554
555/**
Michal Wajdeczkof9cda042017-01-13 17:41:57 +0000556 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
Chris Wilson8b797af2016-08-15 10:48:51 +0100557 * @guc: the guc
558 * @size: size of area to allocate (both virtual space and memory)
Alex Daibac427f2015-08-12 15:43:39 +0100559 *
Chris Wilson8b797af2016-08-15 10:48:51 +0100560 * This is a wrapper to create an object for use with the GuC. In order to
561 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
562 * both some backing storage and a range inside the Global GTT. We must pin
563 * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
564 * range is reserved inside GuC.
Alex Daibac427f2015-08-12 15:43:39 +0100565 *
Chris Wilson8b797af2016-08-15 10:48:51 +0100566 * Return: A i915_vma if successful, otherwise an ERR_PTR.
Alex Daibac427f2015-08-12 15:43:39 +0100567 */
Michal Wajdeczkof9cda042017-01-13 17:41:57 +0000568struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
Alex Daibac427f2015-08-12 15:43:39 +0100569{
Chris Wilson8b797af2016-08-15 10:48:51 +0100570 struct drm_i915_private *dev_priv = guc_to_i915(guc);
Alex Daibac427f2015-08-12 15:43:39 +0100571 struct drm_i915_gem_object *obj;
Chris Wilson8b797af2016-08-15 10:48:51 +0100572 struct i915_vma *vma;
573 int ret;
Alex Daibac427f2015-08-12 15:43:39 +0100574
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000575 obj = i915_gem_object_create(dev_priv, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100576 if (IS_ERR(obj))
Chris Wilson8b797af2016-08-15 10:48:51 +0100577 return ERR_CAST(obj);
Alex Daibac427f2015-08-12 15:43:39 +0100578
Chris Wilsona01cb372017-01-16 15:21:30 +0000579 vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
Chris Wilson8b797af2016-08-15 10:48:51 +0100580 if (IS_ERR(vma))
581 goto err;
Alex Daibac427f2015-08-12 15:43:39 +0100582
Chris Wilson8b797af2016-08-15 10:48:51 +0100583 ret = i915_vma_pin(vma, 0, PAGE_SIZE,
584 PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
585 if (ret) {
586 vma = ERR_PTR(ret);
587 goto err;
Alex Daibac427f2015-08-12 15:43:39 +0100588 }
589
Chris Wilson8b797af2016-08-15 10:48:51 +0100590 return vma;
591
592err:
593 i915_gem_object_put(obj);
594 return vma;
Alex Daibac427f2015-08-12 15:43:39 +0100595}
596
Dave Gordon0daf5562016-06-10 18:29:25 +0100597static void
598guc_client_free(struct drm_i915_private *dev_priv,
599 struct i915_guc_client *client)
Dave Gordon44a28b12015-08-12 15:43:41 +0100600{
Dave Gordon44a28b12015-08-12 15:43:41 +0100601 struct intel_guc *guc = &dev_priv->guc;
602
603 if (!client)
604 return;
605
Dave Gordon44a28b12015-08-12 15:43:41 +0100606 /*
607 * XXX: wait for any outstanding submissions before freeing memory.
608 * Be sure to drop any locks
609 */
610
Chris Wilson72aa0d82016-11-02 17:50:47 +0000611 if (client->vaddr) {
Dave Gordon0d92a6a2016-04-19 16:08:34 +0100612 /*
Dave Gordona6674292016-06-13 17:57:32 +0100613 * If we got as far as setting up a doorbell, make sure we
614 * shut it down before unmapping & deallocating the memory.
Dave Gordon0d92a6a2016-04-19 16:08:34 +0100615 */
Dave Gordona6674292016-06-13 17:57:32 +0100616 guc_disable_doorbell(guc, client);
Dave Gordon0d92a6a2016-04-19 16:08:34 +0100617
Chris Wilson72aa0d82016-11-02 17:50:47 +0000618 i915_gem_object_unpin_map(client->vma->obj);
Dave Gordon0d92a6a2016-04-19 16:08:34 +0100619 }
620
Chris Wilson19880c42016-08-15 10:49:05 +0100621 i915_vma_unpin_and_release(&client->vma);
Dave Gordon44a28b12015-08-12 15:43:41 +0100622
623 if (client->ctx_index != GUC_INVALID_CTX_ID) {
Dave Gordon7a9347f2016-09-12 21:19:37 +0100624 guc_ctx_desc_fini(guc, client);
Dave Gordon44a28b12015-08-12 15:43:41 +0100625 ida_simple_remove(&guc->ctx_ids, client->ctx_index);
626 }
627
628 kfree(client);
629}
630
Dave Gordon84b7f882016-08-09 15:19:20 +0100631/* Check that a doorbell register is in the expected state */
632static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id)
633{
634 struct drm_i915_private *dev_priv = guc_to_i915(guc);
635 i915_reg_t drbreg = GEN8_DRBREGL(db_id);
636 uint32_t value = I915_READ(drbreg);
637 bool enabled = (value & GUC_DOORBELL_ENABLED) != 0;
638 bool expected = test_bit(db_id, guc->doorbell_bitmap);
639
640 if (enabled == expected)
641 return true;
642
643 DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n",
644 db_id, drbreg.reg, value,
645 expected ? "active" : "inactive");
646
647 return false;
648}
649
Dave Gordon4d757872016-06-13 17:57:34 +0100650/*
Dave Gordon8888cd02016-08-09 15:19:19 +0100651 * Borrow the first client to set up & tear down each unused doorbell
Dave Gordon4d757872016-06-13 17:57:34 +0100652 * in turn, to ensure that all doorbell h/w is (re)initialised.
653 */
654static void guc_init_doorbell_hw(struct intel_guc *guc)
655{
Dave Gordon4d757872016-06-13 17:57:34 +0100656 struct i915_guc_client *client = guc->execbuf_client;
Dave Gordon84b7f882016-08-09 15:19:20 +0100657 uint16_t db_id;
658 int i, err;
Dave Gordon4d757872016-06-13 17:57:34 +0100659
Chris Wilson4d357af2016-11-29 12:10:23 +0000660 guc_disable_doorbell(guc, client);
Dave Gordon4d757872016-06-13 17:57:34 +0100661
662 for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
Dave Gordon84b7f882016-08-09 15:19:20 +0100663 /* Skip if doorbell is OK */
664 if (guc_doorbell_check(guc, i))
Dave Gordon8888cd02016-08-09 15:19:19 +0100665 continue;
666
Dave Gordon4d757872016-06-13 17:57:34 +0100667 err = guc_update_doorbell_id(guc, client, i);
Dave Gordon84b7f882016-08-09 15:19:20 +0100668 if (err)
669 DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n",
670 i, err);
Dave Gordon4d757872016-06-13 17:57:34 +0100671 }
672
Chris Wilson4d357af2016-11-29 12:10:23 +0000673 db_id = select_doorbell_register(guc, client->priority);
674 WARN_ON(db_id == GUC_INVALID_DOORBELL_ID);
675
Dave Gordon4d757872016-06-13 17:57:34 +0100676 err = guc_update_doorbell_id(guc, client, db_id);
677 if (err)
Dave Gordon535b2f52016-08-18 18:17:23 +0100678 DRM_WARN("Failed to restore doorbell to %d, err %d\n",
679 db_id, err);
Dave Gordon4d757872016-06-13 17:57:34 +0100680
Dave Gordon84b7f882016-08-09 15:19:20 +0100681 /* Read back & verify all doorbell registers */
682 for (i = 0; i < GUC_MAX_DOORBELLS; ++i)
683 (void)guc_doorbell_check(guc, i);
Dave Gordon4d757872016-06-13 17:57:34 +0100684}
685
Dave Gordon44a28b12015-08-12 15:43:41 +0100686/**
687 * guc_client_alloc() - Allocate an i915_guc_client
Dave Gordon0daf5562016-06-10 18:29:25 +0100688 * @dev_priv: driver private data structure
Chris Wilsonceae5312016-08-17 13:42:42 +0100689 * @engines: The set of engines to enable for this client
Dave Gordon44a28b12015-08-12 15:43:41 +0100690 * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
691 * The kernel client to replace ExecList submission is created with
692 * NORMAL priority. Priority of a client for scheduler can be HIGH,
693 * while a preemption context can use CRITICAL.
Alex Daifeda33e2015-10-19 16:10:54 -0700694 * @ctx: the context that owns the client (we use the default render
695 * context)
Dave Gordon44a28b12015-08-12 15:43:41 +0100696 *
Dave Gordon0d92a6a2016-04-19 16:08:34 +0100697 * Return: An i915_guc_client object if success, else NULL.
Dave Gordon44a28b12015-08-12 15:43:41 +0100698 */
Dave Gordon0daf5562016-06-10 18:29:25 +0100699static struct i915_guc_client *
700guc_client_alloc(struct drm_i915_private *dev_priv,
Dave Gordone02757d2016-08-09 15:19:21 +0100701 uint32_t engines,
Dave Gordon0daf5562016-06-10 18:29:25 +0100702 uint32_t priority,
703 struct i915_gem_context *ctx)
Dave Gordon44a28b12015-08-12 15:43:41 +0100704{
705 struct i915_guc_client *client;
Dave Gordon44a28b12015-08-12 15:43:41 +0100706 struct intel_guc *guc = &dev_priv->guc;
Chris Wilson8b797af2016-08-15 10:48:51 +0100707 struct i915_vma *vma;
Chris Wilson72aa0d82016-11-02 17:50:47 +0000708 void *vaddr;
Dave Gordona6674292016-06-13 17:57:32 +0100709 uint16_t db_id;
Dave Gordon44a28b12015-08-12 15:43:41 +0100710
711 client = kzalloc(sizeof(*client), GFP_KERNEL);
712 if (!client)
713 return NULL;
714
Alex Daid1675192015-08-12 15:43:43 +0100715 client->owner = ctx;
Dave Gordon44a28b12015-08-12 15:43:41 +0100716 client->guc = guc;
Dave Gordone02757d2016-08-09 15:19:21 +0100717 client->engines = engines;
718 client->priority = priority;
719 client->doorbell_id = GUC_INVALID_DOORBELL_ID;
Dave Gordon44a28b12015-08-12 15:43:41 +0100720
721 client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
722 GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
723 if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
724 client->ctx_index = GUC_INVALID_CTX_ID;
725 goto err;
726 }
727
728 /* The first page is doorbell/proc_desc. Two followed pages are wq. */
Michal Wajdeczkof9cda042017-01-13 17:41:57 +0000729 vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
Chris Wilson8b797af2016-08-15 10:48:51 +0100730 if (IS_ERR(vma))
Dave Gordon44a28b12015-08-12 15:43:41 +0100731 goto err;
732
Dave Gordon0d92a6a2016-04-19 16:08:34 +0100733 /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
Chris Wilson8b797af2016-08-15 10:48:51 +0100734 client->vma = vma;
Chris Wilson72aa0d82016-11-02 17:50:47 +0000735
736 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
737 if (IS_ERR(vaddr))
738 goto err;
739
740 client->vaddr = vaddr;
Chris Wilsondadd4812016-09-09 14:11:57 +0100741
742 spin_lock_init(&client->wq_lock);
Dave Gordon44a28b12015-08-12 15:43:41 +0100743 client->wq_offset = GUC_DB_SIZE;
744 client->wq_size = GUC_WQ_SIZE;
Dave Gordon44a28b12015-08-12 15:43:41 +0100745
Dave Gordonf10d69a2016-06-13 17:57:33 +0100746 db_id = select_doorbell_register(guc, client->priority);
747 if (db_id == GUC_INVALID_DOORBELL_ID)
748 /* XXX: evict a doorbell instead? */
749 goto err;
750
Dave Gordon44a28b12015-08-12 15:43:41 +0100751 client->doorbell_offset = select_doorbell_cacheline(guc);
752
753 /*
754 * Since the doorbell only requires a single cacheline, we can save
755 * space by putting the application process descriptor in the same
756 * page. Use the half of the page that doesn't include the doorbell.
757 */
758 if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
759 client->proc_desc_offset = 0;
760 else
761 client->proc_desc_offset = (GUC_DB_SIZE / 2);
762
Dave Gordon7a9347f2016-09-12 21:19:37 +0100763 guc_proc_desc_init(guc, client);
764 guc_ctx_desc_init(guc, client);
Chris Wilson4d357af2016-11-29 12:10:23 +0000765
766 /* For runtime client allocation we need to enable the doorbell. Not
767 * required yet for the static execbuf_client as this special kernel
768 * client is enabled from i915_guc_submission_enable().
769 *
770 * guc_update_doorbell_id(guc, client, db_id);
771 */
Dave Gordon44a28b12015-08-12 15:43:41 +0100772
Dave Gordone02757d2016-08-09 15:19:21 +0100773 DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
774 priority, client, client->engines, client->ctx_index);
Dave Gordona6674292016-06-13 17:57:32 +0100775 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
776 client->doorbell_id, client->doorbell_offset);
Dave Gordon44a28b12015-08-12 15:43:41 +0100777
778 return client;
779
780err:
Dave Gordon0daf5562016-06-10 18:29:25 +0100781 guc_client_free(dev_priv, client);
Dave Gordon44a28b12015-08-12 15:43:41 +0100782 return NULL;
783}
784
Akash Goelf8240832016-10-12 21:54:34 +0530785
Akash Goelf8240832016-10-12 21:54:34 +0530786
Dave Gordon7a9347f2016-09-12 21:19:37 +0100787static void guc_policies_init(struct guc_policies *policies)
Alex Dai463704d2015-12-18 12:00:10 -0800788{
789 struct guc_policy *policy;
790 u32 p, i;
791
792 policies->dpc_promote_time = 500000;
793 policies->max_num_work_items = POLICY_MAX_NUM_WI;
794
795 for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
Alex Dai397097b2016-01-23 11:58:14 -0800796 for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
Alex Dai463704d2015-12-18 12:00:10 -0800797 policy = &policies->policy[p][i];
798
799 policy->execution_quantum = 1000000;
800 policy->preemption_time = 500000;
801 policy->fault_time = 250000;
802 policy->policy_flags = 0;
803 }
804 }
805
806 policies->is_valid = 1;
807}
808
Dave Gordon7a9347f2016-09-12 21:19:37 +0100809static void guc_addon_create(struct intel_guc *guc)
Alex Dai68371a92015-12-18 12:00:09 -0800810{
811 struct drm_i915_private *dev_priv = guc_to_i915(guc);
Chris Wilson8b797af2016-08-15 10:48:51 +0100812 struct i915_vma *vma;
Alex Dai68371a92015-12-18 12:00:09 -0800813 struct guc_ads *ads;
Alex Dai463704d2015-12-18 12:00:10 -0800814 struct guc_policies *policies;
Alex Dai5c148e02015-12-18 12:00:11 -0800815 struct guc_mmio_reg_state *reg_state;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000816 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530817 enum intel_engine_id id;
Alex Dai68371a92015-12-18 12:00:09 -0800818 struct page *page;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000819 u32 size;
Alex Dai68371a92015-12-18 12:00:09 -0800820
821 /* The ads obj includes the struct itself and buffers passed to GuC */
Alex Dai5c148e02015-12-18 12:00:11 -0800822 size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
823 sizeof(struct guc_mmio_reg_state) +
824 GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
Alex Dai68371a92015-12-18 12:00:09 -0800825
Chris Wilson8b797af2016-08-15 10:48:51 +0100826 vma = guc->ads_vma;
827 if (!vma) {
Michal Wajdeczkof9cda042017-01-13 17:41:57 +0000828 vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(size));
Chris Wilson8b797af2016-08-15 10:48:51 +0100829 if (IS_ERR(vma))
Alex Dai68371a92015-12-18 12:00:09 -0800830 return;
831
Chris Wilson8b797af2016-08-15 10:48:51 +0100832 guc->ads_vma = vma;
Alex Dai68371a92015-12-18 12:00:09 -0800833 }
834
Chris Wilson8b797af2016-08-15 10:48:51 +0100835 page = i915_vma_first_page(vma);
Alex Dai68371a92015-12-18 12:00:09 -0800836 ads = kmap(page);
837
838 /*
839 * The GuC requires a "Golden Context" when it reinitialises
840 * engines after a reset. Here we use the Render ring default
841 * context, which must already exist and be pinned in the GGTT,
842 * so its address won't change after we've told the GuC where
843 * to find it.
844 */
Akash Goel3b3f1652016-10-13 22:44:48 +0530845 engine = dev_priv->engine[RCS];
Chris Wilson57e88532016-08-15 10:48:57 +0100846 ads->golden_context_lrca = engine->status_page.ggtt_offset;
Alex Dai68371a92015-12-18 12:00:09 -0800847
Akash Goel3b3f1652016-10-13 22:44:48 +0530848 for_each_engine(engine, dev_priv, id)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000849 ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
Alex Dai68371a92015-12-18 12:00:09 -0800850
Alex Dai463704d2015-12-18 12:00:10 -0800851 /* GuC scheduling policies */
852 policies = (void *)ads + sizeof(struct guc_ads);
Dave Gordon7a9347f2016-09-12 21:19:37 +0100853 guc_policies_init(policies);
Alex Dai463704d2015-12-18 12:00:10 -0800854
Chris Wilsonbde13eb2016-08-15 10:49:07 +0100855 ads->scheduler_policies =
Chris Wilson4741da92016-12-24 19:31:46 +0000856 guc_ggtt_offset(vma) + sizeof(struct guc_ads);
Alex Dai463704d2015-12-18 12:00:10 -0800857
Alex Dai5c148e02015-12-18 12:00:11 -0800858 /* MMIO reg state */
859 reg_state = (void *)policies + sizeof(struct guc_policies);
860
Akash Goel3b3f1652016-10-13 22:44:48 +0530861 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000862 reg_state->mmio_white_list[engine->guc_id].mmio_start =
863 engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
Alex Dai5c148e02015-12-18 12:00:11 -0800864
865 /* Nothing to be saved or restored for now. */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000866 reg_state->mmio_white_list[engine->guc_id].count = 0;
Alex Dai5c148e02015-12-18 12:00:11 -0800867 }
868
869 ads->reg_state_addr = ads->scheduler_policies +
870 sizeof(struct guc_policies);
871
872 ads->reg_state_buffer = ads->reg_state_addr +
873 sizeof(struct guc_mmio_reg_state);
874
Alex Dai68371a92015-12-18 12:00:09 -0800875 kunmap(page);
876}
877
Alex Daibac427f2015-08-12 15:43:39 +0100878/*
879 * Set up the memory resources to be shared with the GuC. At this point,
880 * we require just one object that can be mapped through the GGTT.
881 */
Dave Gordonbeffa512016-06-10 18:29:26 +0100882int i915_guc_submission_init(struct drm_i915_private *dev_priv)
Alex Daibac427f2015-08-12 15:43:39 +0100883{
Dave Gordon7a9347f2016-09-12 21:19:37 +0100884 const size_t ctxsize = sizeof(struct guc_context_desc);
885 const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
886 const size_t gemsize = round_up(poolsize, PAGE_SIZE);
Alex Daibac427f2015-08-12 15:43:39 +0100887 struct intel_guc *guc = &dev_priv->guc;
Chris Wilson8b797af2016-08-15 10:48:51 +0100888 struct i915_vma *vma;
Alex Daibac427f2015-08-12 15:43:39 +0100889
Chris Wilson4d357af2016-11-29 12:10:23 +0000890 if (!HAS_GUC_SCHED(dev_priv))
891 return 0;
892
Dave Gordon29fb72c2016-06-07 09:14:50 +0100893 /* Wipe bitmap & delete client in case of reinitialisation */
894 bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
Dave Gordonbeffa512016-06-10 18:29:26 +0100895 i915_guc_submission_disable(dev_priv);
Dave Gordon29fb72c2016-06-07 09:14:50 +0100896
Alex Daibac427f2015-08-12 15:43:39 +0100897 if (!i915.enable_guc_submission)
898 return 0; /* not enabled */
899
Chris Wilson8b797af2016-08-15 10:48:51 +0100900 if (guc->ctx_pool_vma)
Alex Daibac427f2015-08-12 15:43:39 +0100901 return 0; /* already allocated */
902
Michal Wajdeczkof9cda042017-01-13 17:41:57 +0000903 vma = intel_guc_allocate_vma(guc, gemsize);
Chris Wilson8b797af2016-08-15 10:48:51 +0100904 if (IS_ERR(vma))
905 return PTR_ERR(vma);
Alex Daibac427f2015-08-12 15:43:39 +0100906
Chris Wilson8b797af2016-08-15 10:48:51 +0100907 guc->ctx_pool_vma = vma;
Alex Daibac427f2015-08-12 15:43:39 +0100908 ida_init(&guc->ctx_ids);
Michal Wajdeczkof9cda042017-01-13 17:41:57 +0000909 intel_guc_log_create(guc);
Dave Gordon7a9347f2016-09-12 21:19:37 +0100910 guc_addon_create(guc);
Alex Dai68371a92015-12-18 12:00:09 -0800911
Chris Wilson4d357af2016-11-29 12:10:23 +0000912 guc->execbuf_client = guc_client_alloc(dev_priv,
913 INTEL_INFO(dev_priv)->ring_mask,
914 GUC_CTX_PRIORITY_KMD_NORMAL,
915 dev_priv->kernel_context);
916 if (!guc->execbuf_client) {
917 DRM_ERROR("Failed to create GuC client for execbuf!\n");
918 goto err;
919 }
920
Alex Daibac427f2015-08-12 15:43:39 +0100921 return 0;
Chris Wilson4d357af2016-11-29 12:10:23 +0000922
923err:
924 i915_guc_submission_fini(dev_priv);
925 return -ENOMEM;
926}
927
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000928static void guc_reset_wq(struct i915_guc_client *client)
Chris Wilson4d357af2016-11-29 12:10:23 +0000929{
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000930 struct guc_process_desc *desc = client->vaddr +
931 client->proc_desc_offset;
Chris Wilson4d357af2016-11-29 12:10:23 +0000932
933 desc->head = 0;
934 desc->tail = 0;
935
Michal Wajdeczko776594d2016-12-15 19:53:21 +0000936 client->wq_tail = 0;
Alex Daibac427f2015-08-12 15:43:39 +0100937}
938
Tvrtko Ursulincbf4b772017-03-09 13:20:04 +0000939static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
940{
941 struct intel_engine_cs *engine;
942 enum intel_engine_id id;
943 int irqs;
944
945 /* tell all command streamers to forward interrupts (but not vblank) to GuC */
946 irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
947 for_each_engine(engine, dev_priv, id)
948 I915_WRITE(RING_MODE_GEN7(engine), irqs);
949
950 /* route USER_INTERRUPT to Host, all others are sent to GuC. */
951 irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
952 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
953 /* These three registers have the same bit definitions */
954 I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
955 I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
956 I915_WRITE(GUC_WD_VECS_IER, ~irqs);
957}
958
Dave Gordonbeffa512016-06-10 18:29:26 +0100959int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
Dave Gordon44a28b12015-08-12 15:43:41 +0100960{
Dave Gordon44a28b12015-08-12 15:43:41 +0100961 struct intel_guc *guc = &dev_priv->guc;
Chris Wilson4d357af2016-11-29 12:10:23 +0000962 struct i915_guc_client *client = guc->execbuf_client;
Chris Wilsonddd66c52016-08-02 22:50:31 +0100963 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530964 enum intel_engine_id id;
Dave Gordon44a28b12015-08-12 15:43:41 +0100965
Chris Wilson4d357af2016-11-29 12:10:23 +0000966 if (!client)
967 return -ENODEV;
Dave Gordon44a28b12015-08-12 15:43:41 +0100968
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100969 intel_guc_sample_forcewake(guc);
Chris Wilson4d357af2016-11-29 12:10:23 +0000970
971 guc_reset_wq(client);
Dave Gordon4d757872016-06-13 17:57:34 +0100972 guc_init_doorbell_hw(guc);
Alex Daif5d3c3e2015-08-18 14:34:47 -0700973
Chris Wilsonddd66c52016-08-02 22:50:31 +0100974 /* Take over from manual control of ELSP (execlists) */
Akash Goel3b3f1652016-10-13 22:44:48 +0530975 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursulincbf4b772017-03-09 13:20:04 +0000976 engine->submit_request = i915_guc_submit;
977 engine->schedule = NULL;
978 }
979
980 guc_interrupts_capture(dev_priv);
981
982 /* Replay the current set of previously submitted requests */
983 for_each_engine(engine, dev_priv, id) {
Chris Wilson349ab912017-02-28 11:28:02 +0000984 const int wqi_size = sizeof(struct guc_wq_item);
Chris Wilson4d357af2016-11-29 12:10:23 +0000985 struct drm_i915_gem_request *rq;
986
Chris Wilson349ab912017-02-28 11:28:02 +0000987 spin_lock_irq(&engine->timeline->lock);
Chris Wilson4d357af2016-11-29 12:10:23 +0000988 list_for_each_entry(rq, &engine->timeline->requests, link) {
Chris Wilson349ab912017-02-28 11:28:02 +0000989 guc_client_update_wq_rsvd(client, wqi_size);
Chris Wilson34ba5a82016-11-29 12:10:24 +0000990 __i915_guc_submit(rq);
Chris Wilsondadd4812016-09-09 14:11:57 +0100991 }
Chris Wilson349ab912017-02-28 11:28:02 +0000992 spin_unlock_irq(&engine->timeline->lock);
Chris Wilson821ed7d2016-09-09 14:11:53 +0100993 }
994
Dave Gordon44a28b12015-08-12 15:43:41 +0100995 return 0;
996}
997
Sagar Arun Kamble7762ebb2017-03-11 08:06:59 +0530998static void guc_interrupts_release(struct drm_i915_private *dev_priv)
999{
1000 struct intel_engine_cs *engine;
1001 enum intel_engine_id id;
1002 int irqs;
1003
1004 /*
1005 * tell all command streamers NOT to forward interrupts or vblank
1006 * to GuC.
1007 */
1008 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
1009 irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
1010 for_each_engine(engine, dev_priv, id)
1011 I915_WRITE(RING_MODE_GEN7(engine), irqs);
1012
1013 /* route all GT interrupts to the host */
1014 I915_WRITE(GUC_BCS_RCS_IER, 0);
1015 I915_WRITE(GUC_VCS2_VCS1_IER, 0);
1016 I915_WRITE(GUC_WD_VECS_IER, 0);
1017}
1018
Dave Gordonbeffa512016-06-10 18:29:26 +01001019void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
Dave Gordon44a28b12015-08-12 15:43:41 +01001020{
Dave Gordon44a28b12015-08-12 15:43:41 +01001021 struct intel_guc *guc = &dev_priv->guc;
1022
Sagar Arun Kamble7762ebb2017-03-11 08:06:59 +05301023 guc_interrupts_release(dev_priv);
1024
Chris Wilsonddd66c52016-08-02 22:50:31 +01001025 if (!guc->execbuf_client)
1026 return;
1027
Chris Wilsonddd66c52016-08-02 22:50:31 +01001028 /* Revert back to manual ELSP submission */
1029 intel_execlists_enable_submission(dev_priv);
Dave Gordon44a28b12015-08-12 15:43:41 +01001030}
1031
Dave Gordonbeffa512016-06-10 18:29:26 +01001032void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
Alex Daibac427f2015-08-12 15:43:39 +01001033{
Alex Daibac427f2015-08-12 15:43:39 +01001034 struct intel_guc *guc = &dev_priv->guc;
Chris Wilson4d357af2016-11-29 12:10:23 +00001035 struct i915_guc_client *client;
1036
1037 client = fetch_and_zero(&guc->execbuf_client);
1038 if (!client)
1039 return;
1040
1041 guc_client_free(dev_priv, client);
Alex Daibac427f2015-08-12 15:43:39 +01001042
Chris Wilson19880c42016-08-15 10:49:05 +01001043 i915_vma_unpin_and_release(&guc->ads_vma);
Akash Goeld6b40b42016-10-12 21:54:29 +05301044 i915_vma_unpin_and_release(&guc->log.vma);
Alex Dai68371a92015-12-18 12:00:09 -08001045
Chris Wilson8b797af2016-08-15 10:48:51 +01001046 if (guc->ctx_pool_vma)
Alex Daibac427f2015-08-12 15:43:39 +01001047 ida_destroy(&guc->ctx_ids);
Chris Wilson19880c42016-08-15 10:49:05 +01001048 i915_vma_unpin_and_release(&guc->ctx_pool_vma);
Alex Daibac427f2015-08-12 15:43:39 +01001049}
Alex Daia1c41992015-09-30 09:46:37 -07001050
1051/**
1052 * intel_guc_suspend() - notify GuC entering suspend state
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001053 * @dev_priv: i915 device private
Alex Daia1c41992015-09-30 09:46:37 -07001054 */
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001055int intel_guc_suspend(struct drm_i915_private *dev_priv)
Alex Daia1c41992015-09-30 09:46:37 -07001056{
Alex Daia1c41992015-09-30 09:46:37 -07001057 struct intel_guc *guc = &dev_priv->guc;
Chris Wilsone2efd132016-05-24 14:53:34 +01001058 struct i915_gem_context *ctx;
Alex Daia1c41992015-09-30 09:46:37 -07001059 u32 data[3];
1060
Anusha Srivatsadb0a0912017-01-13 17:17:04 -08001061 if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
Alex Daia1c41992015-09-30 09:46:37 -07001062 return 0;
1063
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301064 gen9_disable_guc_interrupts(dev_priv);
1065
Dave Gordoned54c1a2016-01-19 19:02:54 +00001066 ctx = dev_priv->kernel_context;
Alex Daia1c41992015-09-30 09:46:37 -07001067
Arkadiusz Hilera80bc452016-11-25 18:59:34 +01001068 data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
Alex Daia1c41992015-09-30 09:46:37 -07001069 /* any value greater than GUC_POWER_D0 */
1070 data[1] = GUC_POWER_D1;
1071 /* first page is shared data with GuC */
Chris Wilson4741da92016-12-24 19:31:46 +00001072 data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
Alex Daia1c41992015-09-30 09:46:37 -07001073
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +01001074 return intel_guc_send(guc, data, ARRAY_SIZE(data));
Alex Daia1c41992015-09-30 09:46:37 -07001075}
1076
1077
1078/**
1079 * intel_guc_resume() - notify GuC resuming from suspend state
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001080 * @dev_priv: i915 device private
Alex Daia1c41992015-09-30 09:46:37 -07001081 */
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001082int intel_guc_resume(struct drm_i915_private *dev_priv)
Alex Daia1c41992015-09-30 09:46:37 -07001083{
Alex Daia1c41992015-09-30 09:46:37 -07001084 struct intel_guc *guc = &dev_priv->guc;
Chris Wilsone2efd132016-05-24 14:53:34 +01001085 struct i915_gem_context *ctx;
Alex Daia1c41992015-09-30 09:46:37 -07001086 u32 data[3];
1087
Anusha Srivatsadb0a0912017-01-13 17:17:04 -08001088 if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
Alex Daia1c41992015-09-30 09:46:37 -07001089 return 0;
1090
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301091 if (i915.guc_log_level >= 0)
1092 gen9_enable_guc_interrupts(dev_priv);
1093
Dave Gordoned54c1a2016-01-19 19:02:54 +00001094 ctx = dev_priv->kernel_context;
Alex Daia1c41992015-09-30 09:46:37 -07001095
Arkadiusz Hilera80bc452016-11-25 18:59:34 +01001096 data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
Alex Daia1c41992015-09-30 09:46:37 -07001097 data[1] = GUC_POWER_D0;
1098 /* first page is shared data with GuC */
Chris Wilson4741da92016-12-24 19:31:46 +00001099 data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
Alex Daia1c41992015-09-30 09:46:37 -07001100
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +01001101 return intel_guc_send(guc, data, ARRAY_SIZE(data));
Alex Daia1c41992015-09-30 09:46:37 -07001102}
Sagar Arun Kamble4100b2a2016-10-12 21:54:32 +05301103
Sagar Arun Kamble4100b2a2016-10-12 21:54:32 +05301104