blob: 65d0b5918661326f39d988ee1dc139be003a6a57 [file] [log] [blame]
Alex Dai33a732f2015-08-12 15:43:36 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
Arkadiusz Hiler8c4f24f2016-11-25 18:59:33 +010024#ifndef _INTEL_UC_H_
25#define _INTEL_UC_H_
Alex Dai33a732f2015-08-12 15:43:36 +010026
27#include "intel_guc_fwif.h"
28#include "i915_guc_reg.h"
Dave Gordon0b63bb12016-06-20 15:18:07 +010029#include "intel_ringbuffer.h"
Alex Dai33a732f2015-08-12 15:43:36 +010030
Chris Wilson4741da92016-12-24 19:31:46 +000031#include "i915_vma.h"
32
Chris Wilsone73bdd22016-04-13 17:35:01 +010033struct drm_i915_gem_request;
34
Dave Gordon86e06cc2016-04-19 16:08:36 +010035/*
36 * This structure primarily describes the GEM object shared with the GuC.
Oscar Mateo0d768122017-03-22 10:39:50 -070037 * The specs sometimes refer to this object as a "GuC context", but we use
38 * the term "client" to avoid confusion with hardware contexts. This
39 * GEM object is held for the entire lifetime of our interaction with
Dave Gordon86e06cc2016-04-19 16:08:36 +010040 * the GuC, being allocated before the GuC is loaded with its firmware.
41 * Because there's no way to update the address used by the GuC after
42 * initialisation, the shared object must stay pinned into the GGTT as
43 * long as the GuC is in use. We also keep the first page (only) mapped
44 * into kernel address space, as it includes shared data that must be
45 * updated on every request submission.
46 *
47 * The single GEM object described here is actually made up of several
48 * separate areas, as far as the GuC is concerned. The first page (kept
Oscar Mateo0d768122017-03-22 10:39:50 -070049 * kmap'd) includes the "process descriptor" which holds sequence data for
Dave Gordon86e06cc2016-04-19 16:08:36 +010050 * the doorbell, and one cacheline which actually *is* the doorbell; a
51 * write to this will "ring the doorbell" (i.e. send an interrupt to the
52 * GuC). The subsequent pages of the client object constitute the work
53 * queue (a circular array of work items), again described in the process
54 * descriptor. Work queue pages are mapped momentarily as required.
55 *
Dave Gordon551aaec2016-05-13 15:36:33 +010056 * We also keep a few statistics on failures. Ideally, these should all
57 * be zero!
58 * no_wq_space: times that the submission pre-check found no space was
59 * available in the work queue (note, the queue is shared,
60 * not per-engine). It is OK for this to be nonzero, but
61 * it should not be huge!
62 * q_fail: failed to enqueue a work item. This should never happen,
63 * because we check for space beforehand.
64 * b_fail: failed to ring the doorbell. This should never happen, unless
65 * somehow the hardware misbehaves, or maybe if the GuC firmware
66 * crashes? We probably need to reset the GPU to recover.
67 * retcode: errno from last guc_submit()
Dave Gordon86e06cc2016-04-19 16:08:36 +010068 */
Dave Gordon44a28b12015-08-12 15:43:41 +010069struct i915_guc_client {
Chris Wilson8b797af2016-08-15 10:48:51 +010070 struct i915_vma *vma;
Chris Wilson72aa0d82016-11-02 17:50:47 +000071 void *vaddr;
Chris Wilsone2efd132016-05-24 14:53:34 +010072 struct i915_gem_context *owner;
Dave Gordon44a28b12015-08-12 15:43:41 +010073 struct intel_guc *guc;
Dave Gordone02757d2016-08-09 15:19:21 +010074
75 uint32_t engines; /* bitmap of (host) engine ids */
Dave Gordon44a28b12015-08-12 15:43:41 +010076 uint32_t priority;
Joonas Lahtinenabddffd2017-03-22 10:39:44 -070077 u32 ctx_index;
Dave Gordon44a28b12015-08-12 15:43:41 +010078 uint32_t proc_desc_offset;
Dave Gordon774439e12016-08-09 15:19:23 +010079
Joonas Lahtinenabddffd2017-03-22 10:39:44 -070080 u16 doorbell_id;
81 unsigned long doorbell_offset;
82 u32 doorbell_cookie;
Dave Gordon44a28b12015-08-12 15:43:41 +010083
Chris Wilsondadd4812016-09-09 14:11:57 +010084 spinlock_t wq_lock;
Dave Gordon44a28b12015-08-12 15:43:41 +010085 uint32_t wq_offset;
86 uint32_t wq_size;
Dave Gordon44a28b12015-08-12 15:43:41 +010087 uint32_t wq_tail;
Chris Wilsondadd4812016-09-09 14:11:57 +010088 uint32_t wq_rsvd;
Dave Gordon551aaec2016-05-13 15:36:33 +010089 uint32_t no_wq_space;
Dave Gordon44a28b12015-08-12 15:43:41 +010090 uint32_t b_fail;
91 int retcode;
Dave Gordon551aaec2016-05-13 15:36:33 +010092
93 /* Per-engine counts of GuC submissions */
Dave Gordon0b63bb12016-06-20 15:18:07 +010094 uint64_t submissions[I915_NUM_ENGINES];
Dave Gordon44a28b12015-08-12 15:43:41 +010095};
96
Anusha Srivatsadb0a0912017-01-13 17:17:04 -080097enum intel_uc_fw_status {
98 INTEL_UC_FIRMWARE_FAIL = -1,
99 INTEL_UC_FIRMWARE_NONE = 0,
100 INTEL_UC_FIRMWARE_PENDING,
101 INTEL_UC_FIRMWARE_SUCCESS
Alex Dai33a732f2015-08-12 15:43:36 +0100102};
103
Anusha Srivatsafbbad732017-01-13 17:17:05 -0800104enum intel_uc_fw_type {
105 INTEL_UC_FW_TYPE_GUC,
106 INTEL_UC_FW_TYPE_HUC
107};
108
Alex Dai33a732f2015-08-12 15:43:36 +0100109/*
110 * This structure encapsulates all the data needed during the process
111 * of fetching, caching, and loading the firmware image into the GuC.
112 */
Anusha Srivatsadb0a0912017-01-13 17:17:04 -0800113struct intel_uc_fw {
114 const char *path;
115 size_t size;
116 struct drm_i915_gem_object *obj;
117 enum intel_uc_fw_status fetch_status;
118 enum intel_uc_fw_status load_status;
Alex Dai33a732f2015-08-12 15:43:36 +0100119
Anusha Srivatsadb0a0912017-01-13 17:17:04 -0800120 uint16_t major_ver_wanted;
121 uint16_t minor_ver_wanted;
122 uint16_t major_ver_found;
123 uint16_t minor_ver_found;
Alex Daifeda33e2015-10-19 16:10:54 -0700124
Arkadiusz Hiler6833b822017-03-15 14:34:15 +0100125 enum intel_uc_fw_type type;
Alex Daifeda33e2015-10-19 16:10:54 -0700126 uint32_t header_size;
127 uint32_t header_offset;
128 uint32_t rsa_size;
129 uint32_t rsa_offset;
130 uint32_t ucode_size;
131 uint32_t ucode_offset;
Alex Dai33a732f2015-08-12 15:43:36 +0100132};
133
Akash Goeld6b40b42016-10-12 21:54:29 +0530134struct intel_guc_log {
135 uint32_t flags;
136 struct i915_vma *vma;
Oscar Mateoe7465472017-03-22 10:39:48 -0700137 /* The runtime stuff gets created only when GuC logging gets enabled */
138 struct {
139 void *buf_addr;
140 struct workqueue_struct *flush_wq;
141 struct work_struct flush_work;
142 struct rchan *relay_chan;
143 } runtime;
Akash Goel5aa1ee42016-10-12 21:54:36 +0530144 /* logging related stats */
145 u32 capture_miss_count;
146 u32 flush_interrupt_count;
147 u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
148 u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
149 u32 flush_count[GUC_MAX_LOG_BUFFER];
Akash Goeld6b40b42016-10-12 21:54:29 +0530150};
151
Alex Dai33a732f2015-08-12 15:43:36 +0100152struct intel_guc {
Anusha Srivatsadb0a0912017-01-13 17:17:04 -0800153 struct intel_uc_fw fw;
Akash Goeld6b40b42016-10-12 21:54:29 +0530154 struct intel_guc_log log;
Alex Daibac427f2015-08-12 15:43:39 +0100155
Arkadiusz Hilera80bc452016-11-25 18:59:34 +0100156 /* intel_guc_recv interrupt related state */
Sagar Arun Kamble26705e22016-10-12 21:54:31 +0530157 bool interrupts_enabled;
158
Chris Wilson8b797af2016-08-15 10:48:51 +0100159 struct i915_vma *ads_vma;
Oscar Mateo73b05532017-03-22 10:39:45 -0700160 struct i915_vma *ctx_pool;
161 void *ctx_pool_vaddr;
Alex Daibac427f2015-08-12 15:43:39 +0100162 struct ida ctx_ids;
Dave Gordon44a28b12015-08-12 15:43:41 +0100163
164 struct i915_guc_client *execbuf_client;
165
Joonas Lahtinenabddffd2017-03-22 10:39:44 -0700166 DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
Dave Gordon44a28b12015-08-12 15:43:41 +0100167 uint32_t db_cacheline; /* Cyclic counter mod pagesize */
168
169 /* Action status & statistics */
170 uint64_t action_count; /* Total commands issued */
171 uint32_t action_cmd; /* Last command word */
172 uint32_t action_status; /* Last return status */
173 uint32_t action_fail; /* Total number of failures */
174 int32_t action_err; /* Last error code */
175
Dave Gordon0b63bb12016-06-20 15:18:07 +0100176 uint64_t submissions[I915_NUM_ENGINES];
177 uint32_t last_seqno[I915_NUM_ENGINES];
Akash Goel5dd79892016-10-12 21:54:35 +0530178
Arkadiusz Hilera80bc452016-11-25 18:59:34 +0100179 /* To serialize the intel_guc_send actions */
180 struct mutex send_mutex;
Oscar Mateo5e7cd372017-03-22 10:39:49 -0700181
182 /* GuC's FW specific send function */
183 int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
Alex Dai33a732f2015-08-12 15:43:36 +0100184};
185
Anusha Srivatsabd1328582017-01-18 08:05:53 -0800186struct intel_huc {
187 /* Generic uC firmware management */
188 struct intel_uc_fw fw;
189
190 /* HuC-specific additions */
191};
192
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100193/* intel_uc.c */
Arkadiusz Hilerd2be9f22017-03-14 15:28:10 +0100194void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
Arkadiusz Hiler413e8fd2016-11-25 18:59:36 +0100195void intel_uc_init_early(struct drm_i915_private *dev_priv);
Arkadiusz Hiler29ad6a32017-03-14 15:28:09 +0100196void intel_uc_init_fw(struct drm_i915_private *dev_priv);
Oscar Mateo3950bf32017-03-22 10:39:46 -0700197void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100198int intel_uc_init_hw(struct drm_i915_private *dev_priv);
Oscar Mateo3950bf32017-03-22 10:39:46 -0700199void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
Arkadiusz Hiler4c0fed72017-03-14 15:28:08 +0100200void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
201 struct intel_uc_fw *uc_fw);
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100202int intel_guc_sample_forcewake(struct intel_guc *guc);
Oscar Mateo5e7cd372017-03-22 10:39:49 -0700203int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
204static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
205{
206 return guc->send(guc, action, len);
207}
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100208
Alex Dai33a732f2015-08-12 15:43:36 +0100209/* intel_guc_loader.c */
Arkadiusz Hilerb551f612017-03-14 15:28:13 +0100210int intel_guc_select_fw(struct intel_guc *guc);
Arkadiusz Hiler882d1db2017-03-14 15:28:07 +0100211int intel_guc_init_hw(struct intel_guc *guc);
Arkadiusz Hiler0417a2b2017-03-14 15:28:05 +0100212const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
213int intel_guc_suspend(struct drm_i915_private *dev_priv);
214int intel_guc_resume(struct drm_i915_private *dev_priv);
Anusha Srivatsadb0a0912017-01-13 17:17:04 -0800215u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
Alex Dai33a732f2015-08-12 15:43:36 +0100216
Alex Daibac427f2015-08-12 15:43:39 +0100217/* i915_guc_submission.c */
Dave Gordonbeffa512016-06-10 18:29:26 +0100218int i915_guc_submission_init(struct drm_i915_private *dev_priv);
219int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
Dave Gordon7a9347f2016-09-12 21:19:37 +0100220int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
Chris Wilson5ba89902016-10-07 07:53:27 +0100221void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
Dave Gordonbeffa512016-06-10 18:29:26 +0100222void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
223void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
Michal Wajdeczkof9cda042017-01-13 17:41:57 +0000224struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
225
226/* intel_guc_log.c */
Oscar Mateo3950bf32017-03-22 10:39:46 -0700227int intel_guc_log_create(struct intel_guc *guc);
228void intel_guc_log_destroy(struct intel_guc *guc);
229int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
Michal Wajdeczkof9cda042017-01-13 17:41:57 +0000230void i915_guc_log_register(struct drm_i915_private *dev_priv);
231void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
Alex Daibac427f2015-08-12 15:43:39 +0100232
Chris Wilson4741da92016-12-24 19:31:46 +0000233static inline u32 guc_ggtt_offset(struct i915_vma *vma)
234{
235 u32 offset = i915_ggtt_offset(vma);
236 GEM_BUG_ON(offset < GUC_WOPCM_TOP);
Chris Wilsondb9309a2017-01-05 15:30:23 +0000237 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
Chris Wilson4741da92016-12-24 19:31:46 +0000238 return offset;
239}
240
Anusha Srivatsabd1328582017-01-18 08:05:53 -0800241/* intel_huc.c */
Arkadiusz Hilerb551f612017-03-14 15:28:13 +0100242void intel_huc_select_fw(struct intel_huc *huc);
Anusha Srivatsabd1328582017-01-18 08:05:53 -0800243void intel_huc_fini(struct drm_i915_private *dev_priv);
Arkadiusz Hiler882d1db2017-03-14 15:28:07 +0100244int intel_huc_init_hw(struct intel_huc *huc);
Anusha Srivatsadac84a32017-01-18 08:05:57 -0800245void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
Anusha Srivatsabd1328582017-01-18 08:05:53 -0800246
Alex Dai33a732f2015-08-12 15:43:36 +0100247#endif