blob: 388528f510c52fc22ccc61969da1cbdfb059e860 [file] [log] [blame]
Alex Dai33a732f2015-08-12 15:43:36 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
Arkadiusz Hiler8c4f24f2016-11-25 18:59:33 +010024#ifndef _INTEL_UC_H_
25#define _INTEL_UC_H_
Alex Dai33a732f2015-08-12 15:43:36 +010026
27#include "intel_guc_fwif.h"
28#include "i915_guc_reg.h"
Dave Gordon0b63bb12016-06-20 15:18:07 +010029#include "intel_ringbuffer.h"
Michal Wajdeczkof8a58d62017-05-26 11:13:25 +000030#include "intel_guc_ct.h"
Chris Wilson4741da92016-12-24 19:31:46 +000031#include "i915_vma.h"
32
Dave Gordon86e06cc2016-04-19 16:08:36 +010033/*
34 * This structure primarily describes the GEM object shared with the GuC.
Oscar Mateo0d768122017-03-22 10:39:50 -070035 * The specs sometimes refer to this object as a "GuC context", but we use
36 * the term "client" to avoid confusion with hardware contexts. This
37 * GEM object is held for the entire lifetime of our interaction with
Dave Gordon86e06cc2016-04-19 16:08:36 +010038 * the GuC, being allocated before the GuC is loaded with its firmware.
39 * Because there's no way to update the address used by the GuC after
40 * initialisation, the shared object must stay pinned into the GGTT as
41 * long as the GuC is in use. We also keep the first page (only) mapped
42 * into kernel address space, as it includes shared data that must be
43 * updated on every request submission.
44 *
45 * The single GEM object described here is actually made up of several
46 * separate areas, as far as the GuC is concerned. The first page (kept
Oscar Mateo0d768122017-03-22 10:39:50 -070047 * kmap'd) includes the "process descriptor" which holds sequence data for
Dave Gordon86e06cc2016-04-19 16:08:36 +010048 * the doorbell, and one cacheline which actually *is* the doorbell; a
49 * write to this will "ring the doorbell" (i.e. send an interrupt to the
50 * GuC). The subsequent pages of the client object constitute the work
51 * queue (a circular array of work items), again described in the process
52 * descriptor. Work queue pages are mapped momentarily as required.
Dave Gordon86e06cc2016-04-19 16:08:36 +010053 */
Dave Gordon44a28b12015-08-12 15:43:41 +010054struct i915_guc_client {
Chris Wilson8b797af2016-08-15 10:48:51 +010055 struct i915_vma *vma;
Chris Wilson72aa0d82016-11-02 17:50:47 +000056 void *vaddr;
Chris Wilsone2efd132016-05-24 14:53:34 +010057 struct i915_gem_context *owner;
Dave Gordon44a28b12015-08-12 15:43:41 +010058 struct intel_guc *guc;
Dave Gordone02757d2016-08-09 15:19:21 +010059
60 uint32_t engines; /* bitmap of (host) engine ids */
Dave Gordon44a28b12015-08-12 15:43:41 +010061 uint32_t priority;
Oscar Mateob09935a2017-03-22 10:39:53 -070062 u32 stage_id;
Dave Gordon44a28b12015-08-12 15:43:41 +010063 uint32_t proc_desc_offset;
Dave Gordon774439e12016-08-09 15:19:23 +010064
Joonas Lahtinenabddffd2017-03-22 10:39:44 -070065 u16 doorbell_id;
66 unsigned long doorbell_offset;
Dave Gordon44a28b12015-08-12 15:43:41 +010067
Chris Wilsondadd4812016-09-09 14:11:57 +010068 spinlock_t wq_lock;
Dave Gordon551aaec2016-05-13 15:36:33 +010069 /* Per-engine counts of GuC submissions */
Dave Gordon0b63bb12016-06-20 15:18:07 +010070 uint64_t submissions[I915_NUM_ENGINES];
Dave Gordon44a28b12015-08-12 15:43:41 +010071};
72
Anusha Srivatsadb0a0912017-01-13 17:17:04 -080073enum intel_uc_fw_status {
74 INTEL_UC_FIRMWARE_FAIL = -1,
75 INTEL_UC_FIRMWARE_NONE = 0,
76 INTEL_UC_FIRMWARE_PENDING,
77 INTEL_UC_FIRMWARE_SUCCESS
Alex Dai33a732f2015-08-12 15:43:36 +010078};
79
Michal Wajdeczko4f1cd3e2017-03-30 11:21:11 +000080/* User-friendly representation of an enum */
81static inline
82const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
83{
84 switch (status) {
85 case INTEL_UC_FIRMWARE_FAIL:
86 return "FAIL";
87 case INTEL_UC_FIRMWARE_NONE:
88 return "NONE";
89 case INTEL_UC_FIRMWARE_PENDING:
90 return "PENDING";
91 case INTEL_UC_FIRMWARE_SUCCESS:
92 return "SUCCESS";
Michal Wajdeczko4f1cd3e2017-03-30 11:21:11 +000093 }
Michal Wajdeczkob9ab1f32017-03-31 10:26:52 +000094 return "<invalid>";
Michal Wajdeczko4f1cd3e2017-03-30 11:21:11 +000095}
96
Anusha Srivatsafbbad732017-01-13 17:17:05 -080097enum intel_uc_fw_type {
98 INTEL_UC_FW_TYPE_GUC,
99 INTEL_UC_FW_TYPE_HUC
100};
101
Michal Wajdeczko5e065f12017-03-30 11:21:12 +0000102/* User-friendly representation of an enum */
103static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
104{
105 switch (type) {
106 case INTEL_UC_FW_TYPE_GUC:
107 return "GuC";
108 case INTEL_UC_FW_TYPE_HUC:
109 return "HuC";
Michal Wajdeczko5e065f12017-03-30 11:21:12 +0000110 }
Michal Wajdeczkob9ab1f32017-03-31 10:26:52 +0000111 return "uC";
Michal Wajdeczko5e065f12017-03-30 11:21:12 +0000112}
113
Alex Dai33a732f2015-08-12 15:43:36 +0100114/*
115 * This structure encapsulates all the data needed during the process
116 * of fetching, caching, and loading the firmware image into the GuC.
117 */
Anusha Srivatsadb0a0912017-01-13 17:17:04 -0800118struct intel_uc_fw {
119 const char *path;
120 size_t size;
121 struct drm_i915_gem_object *obj;
122 enum intel_uc_fw_status fetch_status;
123 enum intel_uc_fw_status load_status;
Alex Dai33a732f2015-08-12 15:43:36 +0100124
Anusha Srivatsadb0a0912017-01-13 17:17:04 -0800125 uint16_t major_ver_wanted;
126 uint16_t minor_ver_wanted;
127 uint16_t major_ver_found;
128 uint16_t minor_ver_found;
Alex Daifeda33e2015-10-19 16:10:54 -0700129
Arkadiusz Hiler6833b822017-03-15 14:34:15 +0100130 enum intel_uc_fw_type type;
Alex Daifeda33e2015-10-19 16:10:54 -0700131 uint32_t header_size;
132 uint32_t header_offset;
133 uint32_t rsa_size;
134 uint32_t rsa_offset;
135 uint32_t ucode_size;
136 uint32_t ucode_offset;
Alex Dai33a732f2015-08-12 15:43:36 +0100137};
138
Akash Goeld6b40b42016-10-12 21:54:29 +0530139struct intel_guc_log {
140 uint32_t flags;
141 struct i915_vma *vma;
Oscar Mateoe7465472017-03-22 10:39:48 -0700142 /* The runtime stuff gets created only when GuC logging gets enabled */
143 struct {
144 void *buf_addr;
145 struct workqueue_struct *flush_wq;
146 struct work_struct flush_work;
147 struct rchan *relay_chan;
148 } runtime;
Akash Goel5aa1ee42016-10-12 21:54:36 +0530149 /* logging related stats */
150 u32 capture_miss_count;
151 u32 flush_interrupt_count;
152 u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
153 u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
154 u32 flush_count[GUC_MAX_LOG_BUFFER];
Akash Goeld6b40b42016-10-12 21:54:29 +0530155};
156
Alex Dai33a732f2015-08-12 15:43:36 +0100157struct intel_guc {
Anusha Srivatsadb0a0912017-01-13 17:17:04 -0800158 struct intel_uc_fw fw;
Akash Goeld6b40b42016-10-12 21:54:29 +0530159 struct intel_guc_log log;
Michal Wajdeczkof8a58d62017-05-26 11:13:25 +0000160 struct intel_guc_ct ct;
Alex Daibac427f2015-08-12 15:43:39 +0100161
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -0700162 /* Log snapshot if GuC errors during load */
163 struct drm_i915_gem_object *load_err_log;
164
Arkadiusz Hilera80bc452016-11-25 18:59:34 +0100165 /* intel_guc_recv interrupt related state */
Sagar Arun Kamble26705e22016-10-12 21:54:31 +0530166 bool interrupts_enabled;
167
Chris Wilson8b797af2016-08-15 10:48:51 +0100168 struct i915_vma *ads_vma;
Oscar Mateob09935a2017-03-22 10:39:53 -0700169 struct i915_vma *stage_desc_pool;
170 void *stage_desc_pool_vaddr;
171 struct ida stage_ids;
Dave Gordon44a28b12015-08-12 15:43:41 +0100172
173 struct i915_guc_client *execbuf_client;
174
Joonas Lahtinenabddffd2017-03-22 10:39:44 -0700175 DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
Dave Gordon44a28b12015-08-12 15:43:41 +0100176 uint32_t db_cacheline; /* Cyclic counter mod pagesize */
177
Michal Wajdeczkoa0c1fe22017-05-10 12:59:27 +0000178 /* GuC's FW specific registers used in MMIO send */
179 struct {
180 u32 base;
181 unsigned int count;
182 enum forcewake_domains fw_domains;
183 } send_regs;
184
Arkadiusz Hilera80bc452016-11-25 18:59:34 +0100185 /* To serialize the intel_guc_send actions */
186 struct mutex send_mutex;
Oscar Mateo5e7cd372017-03-22 10:39:49 -0700187
188 /* GuC's FW specific send function */
189 int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
Michal Wajdeczkoa03aac42017-05-10 12:59:26 +0000190
191 /* GuC's FW specific notify function */
192 void (*notify)(struct intel_guc *guc);
Alex Dai33a732f2015-08-12 15:43:36 +0100193};
194
Anusha Srivatsabd1328582017-01-18 08:05:53 -0800195struct intel_huc {
196 /* Generic uC firmware management */
197 struct intel_uc_fw fw;
198
199 /* HuC-specific additions */
200};
201
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100202/* intel_uc.c */
Arkadiusz Hilerd2be9f22017-03-14 15:28:10 +0100203void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
Arkadiusz Hiler413e8fd2016-11-25 18:59:36 +0100204void intel_uc_init_early(struct drm_i915_private *dev_priv);
Sagar Arun Kamble1fc556f2017-10-04 15:33:24 +0000205void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
Arkadiusz Hiler29ad6a32017-03-14 15:28:09 +0100206void intel_uc_init_fw(struct drm_i915_private *dev_priv);
Oscar Mateo3950bf32017-03-22 10:39:46 -0700207void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
Arkadiusz Hiler6cd5a722017-03-14 15:28:11 +0100208int intel_uc_init_hw(struct drm_i915_private *dev_priv);
Oscar Mateo3950bf32017-03-22 10:39:46 -0700209void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100210int intel_guc_sample_forcewake(struct intel_guc *guc);
Michal Wajdeczko789a6252017-05-02 10:32:42 +0000211int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
Oscar Mateo5e7cd372017-03-22 10:39:49 -0700212int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
Sagar Arun Kamble9a2cbf22017-09-26 12:47:16 +0530213int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
Michal Wajdeczkoa03aac42017-05-10 12:59:26 +0000214
Oscar Mateo5e7cd372017-03-22 10:39:49 -0700215static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
216{
217 return guc->send(guc, action, len);
218}
Arkadiusz Hiler2d803c22016-11-25 18:59:35 +0100219
Michal Wajdeczkoa03aac42017-05-10 12:59:26 +0000220static inline void intel_guc_notify(struct intel_guc *guc)
221{
222 guc->notify(guc);
223}
224
Alex Dai33a732f2015-08-12 15:43:36 +0100225/* intel_guc_loader.c */
Arkadiusz Hilerb551f612017-03-14 15:28:13 +0100226int intel_guc_select_fw(struct intel_guc *guc);
Arkadiusz Hiler882d1db2017-03-14 15:28:07 +0100227int intel_guc_init_hw(struct intel_guc *guc);
Arkadiusz Hiler0417a2b2017-03-14 15:28:05 +0100228int intel_guc_suspend(struct drm_i915_private *dev_priv);
229int intel_guc_resume(struct drm_i915_private *dev_priv);
Anusha Srivatsadb0a0912017-01-13 17:17:04 -0800230u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
Alex Dai33a732f2015-08-12 15:43:36 +0100231
Alex Daibac427f2015-08-12 15:43:39 +0100232/* i915_guc_submission.c */
Dave Gordonbeffa512016-06-10 18:29:26 +0100233int i915_guc_submission_init(struct drm_i915_private *dev_priv);
234int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
Dave Gordonbeffa512016-06-10 18:29:26 +0100235void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
236void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
Michal Wajdeczkof9cda042017-01-13 17:41:57 +0000237struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
238
239/* intel_guc_log.c */
Oscar Mateo3950bf32017-03-22 10:39:46 -0700240int intel_guc_log_create(struct intel_guc *guc);
241void intel_guc_log_destroy(struct intel_guc *guc);
242int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
Michal Wajdeczkof9cda042017-01-13 17:41:57 +0000243void i915_guc_log_register(struct drm_i915_private *dev_priv);
244void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
Alex Daibac427f2015-08-12 15:43:39 +0100245
Chris Wilson4741da92016-12-24 19:31:46 +0000246static inline u32 guc_ggtt_offset(struct i915_vma *vma)
247{
248 u32 offset = i915_ggtt_offset(vma);
249 GEM_BUG_ON(offset < GUC_WOPCM_TOP);
Chris Wilsondb9309a2017-01-05 15:30:23 +0000250 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
Chris Wilson4741da92016-12-24 19:31:46 +0000251 return offset;
252}
253
Anusha Srivatsabd1328582017-01-18 08:05:53 -0800254/* intel_huc.c */
Arkadiusz Hilerb551f612017-03-14 15:28:13 +0100255void intel_huc_select_fw(struct intel_huc *huc);
Michal Wajdeczko01a9ca02017-03-31 11:57:09 +0000256void intel_huc_init_hw(struct intel_huc *huc);
Sagar Arun Kamble9a2cbf22017-09-26 12:47:16 +0530257void intel_huc_auth(struct intel_huc *huc);
Anusha Srivatsabd1328582017-01-18 08:05:53 -0800258
Alex Dai33a732f2015-08-12 15:43:36 +0100259#endif