blob: 6966349ed73732f1619b65058c25a0772c29f2fe [file] [log] [blame]
Alex Dai33a732f2015-08-12 15:43:36 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
Arkadiusz Hiler8c4f24f2016-11-25 18:59:33 +010024#ifndef _INTEL_UC_H_
25#define _INTEL_UC_H_
Alex Dai33a732f2015-08-12 15:43:36 +010026
27#include "intel_guc_fwif.h"
28#include "i915_guc_reg.h"
Dave Gordon0b63bb12016-06-20 15:18:07 +010029#include "intel_ringbuffer.h"
Michal Wajdeczkof8a58d62017-05-26 11:13:25 +000030#include "intel_guc_ct.h"
Chris Wilson4741da92016-12-24 19:31:46 +000031#include "i915_vma.h"
32
Chris Wilsone73bdd22016-04-13 17:35:01 +010033struct drm_i915_gem_request;
34
Dave Gordon86e06cc2016-04-19 16:08:36 +010035/*
36 * This structure primarily describes the GEM object shared with the GuC.
Oscar Mateo0d768122017-03-22 10:39:50 -070037 * The specs sometimes refer to this object as a "GuC context", but we use
38 * the term "client" to avoid confusion with hardware contexts. This
39 * GEM object is held for the entire lifetime of our interaction with
Dave Gordon86e06cc2016-04-19 16:08:36 +010040 * the GuC, being allocated before the GuC is loaded with its firmware.
41 * Because there's no way to update the address used by the GuC after
42 * initialisation, the shared object must stay pinned into the GGTT as
43 * long as the GuC is in use. We also keep the first page (only) mapped
44 * into kernel address space, as it includes shared data that must be
45 * updated on every request submission.
46 *
47 * The single GEM object described here is actually made up of several
48 * separate areas, as far as the GuC is concerned. The first page (kept
Oscar Mateo0d768122017-03-22 10:39:50 -070049 * kmap'd) includes the "process descriptor" which holds sequence data for
Dave Gordon86e06cc2016-04-19 16:08:36 +010050 * the doorbell, and one cacheline which actually *is* the doorbell; a
51 * write to this will "ring the doorbell" (i.e. send an interrupt to the
52 * GuC). The subsequent pages of the client object constitute the work
53 * queue (a circular array of work items), again described in the process
54 * descriptor. Work queue pages are mapped momentarily as required.
Dave Gordon86e06cc2016-04-19 16:08:36 +010055 */
Dave Gordon44a28b12015-08-12 15:43:41 +010056struct i915_guc_client {
Chris Wilson8b797af2016-08-15 10:48:51 +010057 struct i915_vma *vma;
Chris Wilson72aa0d82016-11-02 17:50:47 +000058 void *vaddr;
Chris Wilsone2efd132016-05-24 14:53:34 +010059 struct i915_gem_context *owner;
Dave Gordon44a28b12015-08-12 15:43:41 +010060 struct intel_guc *guc;
Dave Gordone02757d2016-08-09 15:19:21 +010061
62 uint32_t engines; /* bitmap of (host) engine ids */
Dave Gordon44a28b12015-08-12 15:43:41 +010063 uint32_t priority;
Oscar Mateob09935a2017-03-22 10:39:53 -070064 u32 stage_id;
Dave Gordon44a28b12015-08-12 15:43:41 +010065 uint32_t proc_desc_offset;
Dave Gordon774439e12016-08-09 15:19:23 +010066
Joonas Lahtinenabddffd2017-03-22 10:39:44 -070067 u16 doorbell_id;
68 unsigned long doorbell_offset;
Dave Gordon44a28b12015-08-12 15:43:41 +010069
Chris Wilsondadd4812016-09-09 14:11:57 +010070 spinlock_t wq_lock;
Dave Gordon551aaec2016-05-13 15:36:33 +010071 /* Per-engine counts of GuC submissions */
Dave Gordon0b63bb12016-06-20 15:18:07 +010072 uint64_t submissions[I915_NUM_ENGINES];
Dave Gordon44a28b12015-08-12 15:43:41 +010073};
74
Anusha Srivatsadb0a0912017-01-13 17:17:04 -080075enum intel_uc_fw_status {
76 INTEL_UC_FIRMWARE_FAIL = -1,
77 INTEL_UC_FIRMWARE_NONE = 0,
78 INTEL_UC_FIRMWARE_PENDING,
79 INTEL_UC_FIRMWARE_SUCCESS
Alex Dai33a732f2015-08-12 15:43:36 +010080};
81
Michal Wajdeczko4f1cd3e2017-03-30 11:21:11 +000082/* User-friendly representation of an enum */
83static inline
84const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
85{
86 switch (status) {
87 case INTEL_UC_FIRMWARE_FAIL:
88 return "FAIL";
89 case INTEL_UC_FIRMWARE_NONE:
90 return "NONE";
91 case INTEL_UC_FIRMWARE_PENDING:
92 return "PENDING";
93 case INTEL_UC_FIRMWARE_SUCCESS:
94 return "SUCCESS";
Michal Wajdeczko4f1cd3e2017-03-30 11:21:11 +000095 }
Michal Wajdeczkob9ab1f32017-03-31 10:26:52 +000096 return "<invalid>";
Michal Wajdeczko4f1cd3e2017-03-30 11:21:11 +000097}
98
Anusha Srivatsafbbad732017-01-13 17:17:05 -080099enum intel_uc_fw_type {
100 INTEL_UC_FW_TYPE_GUC,
101 INTEL_UC_FW_TYPE_HUC
102};
103
Michal Wajdeczko5e065f12017-03-30 11:21:12 +0000104/* User-friendly representation of an enum */
105static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
106{
107 switch (type) {
108 case INTEL_UC_FW_TYPE_GUC:
109 return "GuC";
110 case INTEL_UC_FW_TYPE_HUC:
111 return "HuC";
Michal Wajdeczko5e065f12017-03-30 11:21:12 +0000112 }
Michal Wajdeczkob9ab1f32017-03-31 10:26:52 +0000113 return "uC";
Michal Wajdeczko5e065f12017-03-30 11:21:12 +0000114}
115
Alex Dai33a732f2015-08-12 15:43:36 +0100116/*
117 * This structure encapsulates all the data needed during the process
118 * of fetching, caching, and loading the firmware image into the GuC.
119 */
Anusha Srivatsadb0a0912017-01-13 17:17:04 -0800120struct intel_uc_fw {
121 const char *path;
122 size_t size;
123 struct drm_i915_gem_object *obj;
124 enum intel_uc_fw_status fetch_status;
125 enum intel_uc_fw_status load_status;
Alex Dai33a732f2015-08-12 15:43:36 +0100126
Anusha Srivatsadb0a0912017-01-13 17:17:04 -0800127 uint16_t major_ver_wanted;
128 uint16_t minor_ver_wanted;
129 uint16_t major_ver_found;
130 uint16_t minor_ver_found;
Alex Daifeda33e2015-10-19 16:10:54 -0700131
Arkadiusz Hiler6833b822017-03-15 14:34:15 +0100132 enum intel_uc_fw_type type;
Alex Daifeda33e2015-10-19 16:10:54 -0700133 uint32_t header_size;
134 uint32_t header_offset;
135 uint32_t rsa_size;
136 uint32_t rsa_offset;
137 uint32_t ucode_size;
138 uint32_t ucode_offset;
Alex Dai33a732f2015-08-12 15:43:36 +0100139};
140
Akash Goeld6b40b42016-10-12 21:54:29 +0530141struct intel_guc_log {
142 uint32_t flags;
143 struct i915_vma *vma;
Oscar Mateoe7465472017-03-22 10:39:48 -0700144 /* The runtime stuff gets created only when GuC logging gets enabled */
145 struct {
146 void *buf_addr;
147 struct workqueue_struct *flush_wq;
148 struct work_struct flush_work;
149 struct rchan *relay_chan;
150 } runtime;
Akash Goel5aa1ee42016-10-12 21:54:36 +0530151 /* logging related stats */
152 u32 capture_miss_count;
153 u32 flush_interrupt_count;
154 u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
155 u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
156 u32 flush_count[GUC_MAX_LOG_BUFFER];
Akash Goeld6b40b42016-10-12 21:54:29 +0530157};
158
Alex Dai33a732f2015-08-12 15:43:36 +0100159struct intel_guc {
Anusha Srivatsadb0a0912017-01-13 17:17:04 -0800160 struct intel_uc_fw fw;
Akash Goeld6b40b42016-10-12 21:54:29 +0530161 struct intel_guc_log log;
Michal Wajdeczkof8a58d62017-05-26 11:13:25 +0000162 struct intel_guc_ct ct;
Alex Daibac427f2015-08-12 15:43:39 +0100163
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -0700164 /* Log snapshot if GuC errors during load */
165 struct drm_i915_gem_object *load_err_log;
166
Arkadiusz Hilera80bc452016-11-25 18:59:34 +0100167 /* intel_guc_recv interrupt related state */
Sagar Arun Kamble26705e22016-10-12 21:54:31 +0530168 bool interrupts_enabled;
169
Chris Wilson8b797af2016-08-15 10:48:51 +0100170 struct i915_vma *ads_vma;
Oscar Mateob09935a2017-03-22 10:39:53 -0700171 struct i915_vma *stage_desc_pool;
172 void *stage_desc_pool_vaddr;
173 struct ida stage_ids;
Dave Gordon44a28b12015-08-12 15:43:41 +0100174
175 struct i915_guc_client *execbuf_client;
176
Joonas Lahtinenabddffd2017-03-22 10:39:44 -0700177 DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
Dave Gordon44a28b12015-08-12 15:43:41 +0100178 uint32_t db_cacheline; /* Cyclic counter mod pagesize */
179
Michal Wajdeczkoa0c1fe22017-05-10 12:59:27 +0000180 /* GuC's FW specific registers used in MMIO send */
181 struct {
182 u32 base;
183 unsigned int count;
184 enum forcewake_domains fw_domains;
185 } send_regs;
186
Arkadiusz Hilera80bc452016-11-25 18:59:34 +0100187 /* To serialize the intel_guc_send actions */
188 struct mutex send_mutex;
Oscar Mateo5e7cd372017-03-22 10:39:49 -0700189
190 /* GuC's FW specific send function */
191 int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
Michal Wajdeczkoa03aac42017-05-10 12:59:26 +0000192
193 /* GuC's FW specific notify function */
194 void (*notify)(struct intel_guc *guc);
Alex Dai33a732f2015-08-12 15:43:36 +0100195};
196
Anusha Srivatsabd1328582017-01-18 08:05:53 -0800197struct intel_huc {
198 /* Generic uC firmware management */
199 struct intel_uc_fw fw;
200
201 /* HuC-specific additions */
202};
203
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100204/* intel_uc.c */
Arkadiusz Hilerd2be9f22017-03-14 15:28:10 +0100205void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
Arkadiusz Hiler413e8fd2016-11-25 18:59:36 +0100206void intel_uc_init_early(struct drm_i915_private *dev_priv);
Arkadiusz Hiler29ad6a32017-03-14 15:28:09 +0100207void intel_uc_init_fw(struct drm_i915_private *dev_priv);
Oscar Mateo3950bf32017-03-22 10:39:46 -0700208void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100209int intel_uc_init_hw(struct drm_i915_private *dev_priv);
Oscar Mateo3950bf32017-03-22 10:39:46 -0700210void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100211int intel_guc_sample_forcewake(struct intel_guc *guc);
Michal Wajdeczko789a6252017-05-02 10:32:42 +0000212int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
Oscar Mateo5e7cd372017-03-22 10:39:49 -0700213int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
Sagar Arun Kamble9a2cbf22017-09-26 12:47:16 +0530214int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
Michal Wajdeczkoa03aac42017-05-10 12:59:26 +0000215
Oscar Mateo5e7cd372017-03-22 10:39:49 -0700216static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
217{
218 return guc->send(guc, action, len);
219}
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100220
Michal Wajdeczkoa03aac42017-05-10 12:59:26 +0000221static inline void intel_guc_notify(struct intel_guc *guc)
222{
223 guc->notify(guc);
224}
225
Alex Dai33a732f2015-08-12 15:43:36 +0100226/* intel_guc_loader.c */
Arkadiusz Hilerb551f612017-03-14 15:28:13 +0100227int intel_guc_select_fw(struct intel_guc *guc);
Arkadiusz Hiler882d1db2017-03-14 15:28:07 +0100228int intel_guc_init_hw(struct intel_guc *guc);
Arkadiusz Hiler0417a2b2017-03-14 15:28:05 +0100229int intel_guc_suspend(struct drm_i915_private *dev_priv);
230int intel_guc_resume(struct drm_i915_private *dev_priv);
Anusha Srivatsadb0a0912017-01-13 17:17:04 -0800231u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
Alex Dai33a732f2015-08-12 15:43:36 +0100232
Alex Daibac427f2015-08-12 15:43:39 +0100233/* i915_guc_submission.c */
Dave Gordonbeffa512016-06-10 18:29:26 +0100234int i915_guc_submission_init(struct drm_i915_private *dev_priv);
235int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
Dave Gordonbeffa512016-06-10 18:29:26 +0100236void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
237void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
Michal Wajdeczkof9cda042017-01-13 17:41:57 +0000238struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
239
240/* intel_guc_log.c */
Oscar Mateo3950bf32017-03-22 10:39:46 -0700241int intel_guc_log_create(struct intel_guc *guc);
242void intel_guc_log_destroy(struct intel_guc *guc);
243int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
Michal Wajdeczkof9cda042017-01-13 17:41:57 +0000244void i915_guc_log_register(struct drm_i915_private *dev_priv);
245void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
Alex Daibac427f2015-08-12 15:43:39 +0100246
Chris Wilson4741da92016-12-24 19:31:46 +0000247static inline u32 guc_ggtt_offset(struct i915_vma *vma)
248{
249 u32 offset = i915_ggtt_offset(vma);
250 GEM_BUG_ON(offset < GUC_WOPCM_TOP);
Chris Wilsondb9309a2017-01-05 15:30:23 +0000251 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
Chris Wilson4741da92016-12-24 19:31:46 +0000252 return offset;
253}
254
Anusha Srivatsabd1328582017-01-18 08:05:53 -0800255/* intel_huc.c */
Arkadiusz Hilerb551f612017-03-14 15:28:13 +0100256void intel_huc_select_fw(struct intel_huc *huc);
Michal Wajdeczko01a9ca02017-03-31 11:57:09 +0000257void intel_huc_init_hw(struct intel_huc *huc);
Sagar Arun Kamble9a2cbf22017-09-26 12:47:16 +0530258void intel_huc_auth(struct intel_huc *huc);
Anusha Srivatsabd1328582017-01-18 08:05:53 -0800259
Alex Dai33a732f2015-08-12 15:43:36 +0100260#endif