Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2016 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
| 24 | |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 25 | #include <drm/drm_print.h> |
| 26 | |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 27 | #include "i915_drv.h" |
Weinan Li | 1fd51d9 | 2017-10-15 11:55:25 +0800 | [diff] [blame] | 28 | #include "i915_vgpu.h" |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 29 | #include "intel_ringbuffer.h" |
| 30 | #include "intel_lrc.h" |
| 31 | |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 32 | /* Haswell does have the CXT_SIZE register however it does not appear to be |
| 33 | * valid. Now, docs explain in dwords what is in the context object. The full |
| 34 | * size is 70720 bytes, however, the power context and execlist context will |
| 35 | * never be saved (power context is stored elsewhere, and execlists don't work |
| 36 | * on HSW) - so the final size, including the extra state required for the |
| 37 | * Resource Streamer, is 66944 bytes, which rounds to 17 pages. |
| 38 | */ |
| 39 | #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 40 | |
Oscar Mateo | 7ab4adb | 2018-01-11 14:55:06 -0800 | [diff] [blame] | 41 | #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 42 | #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) |
| 43 | #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) |
Oscar Mateo | 3cf1934 | 2017-10-04 08:39:52 -0700 | [diff] [blame] | 44 | #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE) |
Tvrtko Ursulin | b86aa44 | 2018-01-11 14:55:07 -0800 | [diff] [blame] | 45 | #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE) |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 46 | |
| 47 | #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE) |
| 48 | |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 49 | struct engine_class_info { |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 50 | const char *name; |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 51 | int (*init_legacy)(struct intel_engine_cs *engine); |
| 52 | int (*init_execlists)(struct intel_engine_cs *engine); |
Tvrtko Ursulin | 1803fcbc | 2017-11-10 14:26:27 +0000 | [diff] [blame] | 53 | |
| 54 | u8 uabi_class; |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 55 | }; |
| 56 | |
| 57 | static const struct engine_class_info intel_engine_classes[] = { |
| 58 | [RENDER_CLASS] = { |
| 59 | .name = "rcs", |
| 60 | .init_execlists = logical_render_ring_init, |
| 61 | .init_legacy = intel_init_render_ring_buffer, |
Tvrtko Ursulin | 1803fcbc | 2017-11-10 14:26:27 +0000 | [diff] [blame] | 62 | .uabi_class = I915_ENGINE_CLASS_RENDER, |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 63 | }, |
| 64 | [COPY_ENGINE_CLASS] = { |
| 65 | .name = "bcs", |
| 66 | .init_execlists = logical_xcs_ring_init, |
| 67 | .init_legacy = intel_init_blt_ring_buffer, |
Tvrtko Ursulin | 1803fcbc | 2017-11-10 14:26:27 +0000 | [diff] [blame] | 68 | .uabi_class = I915_ENGINE_CLASS_COPY, |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 69 | }, |
| 70 | [VIDEO_DECODE_CLASS] = { |
| 71 | .name = "vcs", |
| 72 | .init_execlists = logical_xcs_ring_init, |
| 73 | .init_legacy = intel_init_bsd_ring_buffer, |
Tvrtko Ursulin | 1803fcbc | 2017-11-10 14:26:27 +0000 | [diff] [blame] | 74 | .uabi_class = I915_ENGINE_CLASS_VIDEO, |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 75 | }, |
| 76 | [VIDEO_ENHANCEMENT_CLASS] = { |
| 77 | .name = "vecs", |
| 78 | .init_execlists = logical_xcs_ring_init, |
| 79 | .init_legacy = intel_init_vebox_ring_buffer, |
Tvrtko Ursulin | 1803fcbc | 2017-11-10 14:26:27 +0000 | [diff] [blame] | 80 | .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE, |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 81 | }, |
| 82 | }; |
| 83 | |
Daniele Ceraolo Spurio | 80b216b | 2018-03-14 11:26:50 -0700 | [diff] [blame] | 84 | #define MAX_MMIO_BASES 3 |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 85 | struct engine_info { |
Michal Wajdeczko | 237ae7c | 2017-03-01 20:26:15 +0000 | [diff] [blame] | 86 | unsigned int hw_id; |
Chris Wilson | 1d39f28 | 2017-04-11 13:43:06 +0100 | [diff] [blame] | 87 | unsigned int uabi_id; |
Daniele Ceraolo Spurio | 0908180 | 2017-04-10 07:34:29 -0700 | [diff] [blame] | 88 | u8 class; |
| 89 | u8 instance; |
Daniele Ceraolo Spurio | 80b216b | 2018-03-14 11:26:50 -0700 | [diff] [blame] | 90 | /* mmio bases table *must* be sorted in reverse gen order */ |
| 91 | struct engine_mmio_base { |
| 92 | u32 gen : 8; |
| 93 | u32 base : 24; |
| 94 | } mmio_bases[MAX_MMIO_BASES]; |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 95 | }; |
| 96 | |
| 97 | static const struct engine_info intel_engines[] = { |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 98 | [RCS] = { |
Tvrtko Ursulin | 5ec2cf7 | 2016-08-16 17:04:20 +0100 | [diff] [blame] | 99 | .hw_id = RCS_HW, |
Chris Wilson | 1d39f28 | 2017-04-11 13:43:06 +0100 | [diff] [blame] | 100 | .uabi_id = I915_EXEC_RENDER, |
Daniele Ceraolo Spurio | 0908180 | 2017-04-10 07:34:29 -0700 | [diff] [blame] | 101 | .class = RENDER_CLASS, |
| 102 | .instance = 0, |
Daniele Ceraolo Spurio | 80b216b | 2018-03-14 11:26:50 -0700 | [diff] [blame] | 103 | .mmio_bases = { |
| 104 | { .gen = 1, .base = RENDER_RING_BASE } |
| 105 | }, |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 106 | }, |
| 107 | [BCS] = { |
Tvrtko Ursulin | 5ec2cf7 | 2016-08-16 17:04:20 +0100 | [diff] [blame] | 108 | .hw_id = BCS_HW, |
Chris Wilson | 1d39f28 | 2017-04-11 13:43:06 +0100 | [diff] [blame] | 109 | .uabi_id = I915_EXEC_BLT, |
Daniele Ceraolo Spurio | 0908180 | 2017-04-10 07:34:29 -0700 | [diff] [blame] | 110 | .class = COPY_ENGINE_CLASS, |
| 111 | .instance = 0, |
Daniele Ceraolo Spurio | 80b216b | 2018-03-14 11:26:50 -0700 | [diff] [blame] | 112 | .mmio_bases = { |
| 113 | { .gen = 6, .base = BLT_RING_BASE } |
| 114 | }, |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 115 | }, |
| 116 | [VCS] = { |
Tvrtko Ursulin | 5ec2cf7 | 2016-08-16 17:04:20 +0100 | [diff] [blame] | 117 | .hw_id = VCS_HW, |
Chris Wilson | 1d39f28 | 2017-04-11 13:43:06 +0100 | [diff] [blame] | 118 | .uabi_id = I915_EXEC_BSD, |
Daniele Ceraolo Spurio | 0908180 | 2017-04-10 07:34:29 -0700 | [diff] [blame] | 119 | .class = VIDEO_DECODE_CLASS, |
| 120 | .instance = 0, |
Daniele Ceraolo Spurio | 80b216b | 2018-03-14 11:26:50 -0700 | [diff] [blame] | 121 | .mmio_bases = { |
| 122 | { .gen = 11, .base = GEN11_BSD_RING_BASE }, |
| 123 | { .gen = 6, .base = GEN6_BSD_RING_BASE }, |
| 124 | { .gen = 4, .base = BSD_RING_BASE } |
| 125 | }, |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 126 | }, |
| 127 | [VCS2] = { |
Tvrtko Ursulin | 5ec2cf7 | 2016-08-16 17:04:20 +0100 | [diff] [blame] | 128 | .hw_id = VCS2_HW, |
Chris Wilson | 1d39f28 | 2017-04-11 13:43:06 +0100 | [diff] [blame] | 129 | .uabi_id = I915_EXEC_BSD, |
Daniele Ceraolo Spurio | 0908180 | 2017-04-10 07:34:29 -0700 | [diff] [blame] | 130 | .class = VIDEO_DECODE_CLASS, |
| 131 | .instance = 1, |
Daniele Ceraolo Spurio | 80b216b | 2018-03-14 11:26:50 -0700 | [diff] [blame] | 132 | .mmio_bases = { |
| 133 | { .gen = 11, .base = GEN11_BSD2_RING_BASE }, |
| 134 | { .gen = 8, .base = GEN8_BSD2_RING_BASE } |
| 135 | }, |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 136 | }, |
Oscar Mateo | 5f79e7c | 2018-03-02 18:14:57 +0200 | [diff] [blame] | 137 | [VCS3] = { |
| 138 | .hw_id = VCS3_HW, |
| 139 | .uabi_id = I915_EXEC_BSD, |
| 140 | .class = VIDEO_DECODE_CLASS, |
| 141 | .instance = 2, |
Daniele Ceraolo Spurio | 80b216b | 2018-03-14 11:26:50 -0700 | [diff] [blame] | 142 | .mmio_bases = { |
| 143 | { .gen = 11, .base = GEN11_BSD3_RING_BASE } |
| 144 | }, |
Oscar Mateo | 5f79e7c | 2018-03-02 18:14:57 +0200 | [diff] [blame] | 145 | }, |
| 146 | [VCS4] = { |
| 147 | .hw_id = VCS4_HW, |
| 148 | .uabi_id = I915_EXEC_BSD, |
| 149 | .class = VIDEO_DECODE_CLASS, |
| 150 | .instance = 3, |
Daniele Ceraolo Spurio | 80b216b | 2018-03-14 11:26:50 -0700 | [diff] [blame] | 151 | .mmio_bases = { |
| 152 | { .gen = 11, .base = GEN11_BSD4_RING_BASE } |
| 153 | }, |
Oscar Mateo | 5f79e7c | 2018-03-02 18:14:57 +0200 | [diff] [blame] | 154 | }, |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 155 | [VECS] = { |
Tvrtko Ursulin | 5ec2cf7 | 2016-08-16 17:04:20 +0100 | [diff] [blame] | 156 | .hw_id = VECS_HW, |
Chris Wilson | 1d39f28 | 2017-04-11 13:43:06 +0100 | [diff] [blame] | 157 | .uabi_id = I915_EXEC_VEBOX, |
Daniele Ceraolo Spurio | 0908180 | 2017-04-10 07:34:29 -0700 | [diff] [blame] | 158 | .class = VIDEO_ENHANCEMENT_CLASS, |
| 159 | .instance = 0, |
Daniele Ceraolo Spurio | 80b216b | 2018-03-14 11:26:50 -0700 | [diff] [blame] | 160 | .mmio_bases = { |
| 161 | { .gen = 11, .base = GEN11_VEBOX_RING_BASE }, |
| 162 | { .gen = 7, .base = VEBOX_RING_BASE } |
| 163 | }, |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 164 | }, |
Oscar Mateo | 5f79e7c | 2018-03-02 18:14:57 +0200 | [diff] [blame] | 165 | [VECS2] = { |
| 166 | .hw_id = VECS2_HW, |
| 167 | .uabi_id = I915_EXEC_VEBOX, |
| 168 | .class = VIDEO_ENHANCEMENT_CLASS, |
| 169 | .instance = 1, |
Daniele Ceraolo Spurio | 80b216b | 2018-03-14 11:26:50 -0700 | [diff] [blame] | 170 | .mmio_bases = { |
| 171 | { .gen = 11, .base = GEN11_VEBOX2_RING_BASE } |
| 172 | }, |
Oscar Mateo | 5f79e7c | 2018-03-02 18:14:57 +0200 | [diff] [blame] | 173 | }, |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 174 | }; |
| 175 | |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 176 | /** |
| 177 | * ___intel_engine_context_size() - return the size of the context for an engine |
| 178 | * @dev_priv: i915 device private |
| 179 | * @class: engine class |
| 180 | * |
| 181 | * Each engine class may require a different amount of space for a context |
| 182 | * image. |
| 183 | * |
| 184 | * Return: size (in bytes) of an engine class specific context image |
| 185 | * |
| 186 | * Note: this size includes the HWSP, which is part of the context image |
| 187 | * in LRC mode, but does not include the "shared data page" used with |
| 188 | * GuC submission. The caller should account for this if using the GuC. |
| 189 | */ |
| 190 | static u32 |
| 191 | __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) |
| 192 | { |
| 193 | u32 cxt_size; |
| 194 | |
| 195 | BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE); |
| 196 | |
| 197 | switch (class) { |
| 198 | case RENDER_CLASS: |
| 199 | switch (INTEL_GEN(dev_priv)) { |
| 200 | default: |
| 201 | MISSING_CASE(INTEL_GEN(dev_priv)); |
Oscar Mateo | 7ab4adb | 2018-01-11 14:55:06 -0800 | [diff] [blame] | 202 | return DEFAULT_LR_CONTEXT_RENDER_SIZE; |
Tvrtko Ursulin | b86aa44 | 2018-01-11 14:55:07 -0800 | [diff] [blame] | 203 | case 11: |
| 204 | return GEN11_LR_CONTEXT_RENDER_SIZE; |
Rodrigo Vivi | f65f841 | 2017-07-06 14:06:24 -0700 | [diff] [blame] | 205 | case 10: |
Oscar Mateo | 7fd0b1a | 2017-09-21 16:19:49 -0700 | [diff] [blame] | 206 | return GEN10_LR_CONTEXT_RENDER_SIZE; |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 207 | case 9: |
| 208 | return GEN9_LR_CONTEXT_RENDER_SIZE; |
| 209 | case 8: |
Chris Wilson | fb5c551 | 2017-11-20 20:55:00 +0000 | [diff] [blame] | 210 | return GEN8_LR_CONTEXT_RENDER_SIZE; |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 211 | case 7: |
| 212 | if (IS_HASWELL(dev_priv)) |
| 213 | return HSW_CXT_TOTAL_SIZE; |
| 214 | |
| 215 | cxt_size = I915_READ(GEN7_CXT_SIZE); |
| 216 | return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64, |
| 217 | PAGE_SIZE); |
| 218 | case 6: |
| 219 | cxt_size = I915_READ(CXT_SIZE); |
| 220 | return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64, |
| 221 | PAGE_SIZE); |
| 222 | case 5: |
| 223 | case 4: |
| 224 | case 3: |
| 225 | case 2: |
| 226 | /* For the special day when i810 gets merged. */ |
| 227 | case 1: |
| 228 | return 0; |
| 229 | } |
| 230 | break; |
| 231 | default: |
| 232 | MISSING_CASE(class); |
| 233 | case VIDEO_DECODE_CLASS: |
| 234 | case VIDEO_ENHANCEMENT_CLASS: |
| 235 | case COPY_ENGINE_CLASS: |
| 236 | if (INTEL_GEN(dev_priv) < 8) |
| 237 | return 0; |
| 238 | return GEN8_LR_CONTEXT_OTHER_SIZE; |
| 239 | } |
| 240 | } |
| 241 | |
Daniele Ceraolo Spurio | 80b216b | 2018-03-14 11:26:50 -0700 | [diff] [blame] | 242 | static u32 __engine_mmio_base(struct drm_i915_private *i915, |
| 243 | const struct engine_mmio_base *bases) |
| 244 | { |
| 245 | int i; |
| 246 | |
| 247 | for (i = 0; i < MAX_MMIO_BASES; i++) |
| 248 | if (INTEL_GEN(i915) >= bases[i].gen) |
| 249 | break; |
| 250 | |
| 251 | GEM_BUG_ON(i == MAX_MMIO_BASES); |
| 252 | GEM_BUG_ON(!bases[i].base); |
| 253 | |
| 254 | return bases[i].base; |
| 255 | } |
| 256 | |
Daniele Ceraolo Spurio | 74419da | 2018-03-14 11:26:51 -0700 | [diff] [blame] | 257 | static void __sprint_engine_name(char *name, const struct engine_info *info) |
| 258 | { |
| 259 | WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u", |
| 260 | intel_engine_classes[info->class].name, |
| 261 | info->instance) >= INTEL_ENGINE_CS_MAX_NAME); |
| 262 | } |
| 263 | |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 264 | static int |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 265 | intel_engine_setup(struct drm_i915_private *dev_priv, |
| 266 | enum intel_engine_id id) |
| 267 | { |
| 268 | const struct engine_info *info = &intel_engines[id]; |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 269 | struct intel_engine_cs *engine; |
| 270 | |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 271 | GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes)); |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 272 | |
Daniele Ceraolo Spurio | ac52da6 | 2018-03-02 18:14:58 +0200 | [diff] [blame] | 273 | BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); |
| 274 | BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); |
| 275 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 276 | if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS)) |
| 277 | return -EINVAL; |
| 278 | |
| 279 | if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) |
| 280 | return -EINVAL; |
| 281 | |
| 282 | if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance])) |
| 283 | return -EINVAL; |
| 284 | |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 285 | GEM_BUG_ON(dev_priv->engine[id]); |
| 286 | engine = kzalloc(sizeof(*engine), GFP_KERNEL); |
| 287 | if (!engine) |
| 288 | return -ENOMEM; |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 289 | |
| 290 | engine->id = id; |
| 291 | engine->i915 = dev_priv; |
Daniele Ceraolo Spurio | 74419da | 2018-03-14 11:26:51 -0700 | [diff] [blame] | 292 | __sprint_engine_name(engine->name, info); |
Tvrtko Ursulin | 5ec2cf7 | 2016-08-16 17:04:20 +0100 | [diff] [blame] | 293 | engine->hw_id = engine->guc_id = info->hw_id; |
Daniele Ceraolo Spurio | 80b216b | 2018-03-14 11:26:50 -0700 | [diff] [blame] | 294 | engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases); |
Daniele Ceraolo Spurio | 0908180 | 2017-04-10 07:34:29 -0700 | [diff] [blame] | 295 | engine->class = info->class; |
| 296 | engine->instance = info->instance; |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 297 | |
Tvrtko Ursulin | 1803fcbc | 2017-11-10 14:26:27 +0000 | [diff] [blame] | 298 | engine->uabi_id = info->uabi_id; |
Daniele Ceraolo Spurio | 74419da | 2018-03-14 11:26:51 -0700 | [diff] [blame] | 299 | engine->uabi_class = intel_engine_classes[info->class].uabi_class; |
Tvrtko Ursulin | 1803fcbc | 2017-11-10 14:26:27 +0000 | [diff] [blame] | 300 | |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 301 | engine->context_size = __intel_engine_context_size(dev_priv, |
| 302 | engine->class); |
| 303 | if (WARN_ON(engine->context_size > BIT(20))) |
| 304 | engine->context_size = 0; |
| 305 | |
Chris Wilson | 0de9136 | 2016-11-14 20:41:01 +0000 | [diff] [blame] | 306 | /* Nothing to do here, execute in order of dependencies */ |
| 307 | engine->schedule = NULL; |
| 308 | |
Tvrtko Ursulin | 30e17b7 | 2017-11-21 18:18:48 +0000 | [diff] [blame] | 309 | spin_lock_init(&engine->stats.lock); |
| 310 | |
Changbin Du | 3fc0306 | 2017-03-13 10:47:11 +0800 | [diff] [blame] | 311 | ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); |
| 312 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 313 | dev_priv->engine_class[info->class][info->instance] = engine; |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 314 | dev_priv->engine[id] = engine; |
| 315 | return 0; |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 316 | } |
| 317 | |
| 318 | /** |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 319 | * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers |
Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 320 | * @dev_priv: i915 device private |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 321 | * |
| 322 | * Return: non-zero if the initialization failed. |
| 323 | */ |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 324 | int intel_engines_init_mmio(struct drm_i915_private *dev_priv) |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 325 | { |
Tvrtko Ursulin | c1bb114 | 2016-08-10 16:22:10 +0100 | [diff] [blame] | 326 | struct intel_device_info *device_info = mkwrite_device_info(dev_priv); |
Chris Wilson | 5f9be05 | 2017-04-11 17:56:58 +0100 | [diff] [blame] | 327 | const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask; |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 328 | struct intel_engine_cs *engine; |
| 329 | enum intel_engine_id id; |
Chris Wilson | 5f9be05 | 2017-04-11 17:56:58 +0100 | [diff] [blame] | 330 | unsigned int mask = 0; |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 331 | unsigned int i; |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 332 | int err; |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 333 | |
Tvrtko Ursulin | 70006ad | 2016-10-13 11:02:56 +0100 | [diff] [blame] | 334 | WARN_ON(ring_mask == 0); |
| 335 | WARN_ON(ring_mask & |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 336 | GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); |
| 337 | |
| 338 | for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { |
| 339 | if (!HAS_ENGINE(dev_priv, i)) |
| 340 | continue; |
| 341 | |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 342 | err = intel_engine_setup(dev_priv, i); |
| 343 | if (err) |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 344 | goto cleanup; |
| 345 | |
| 346 | mask |= ENGINE_MASK(i); |
| 347 | } |
| 348 | |
| 349 | /* |
| 350 | * Catch failures to update intel_engines table when the new engines |
| 351 | * are added to the driver by a warning and disabling the forgotten |
| 352 | * engines. |
| 353 | */ |
Tvrtko Ursulin | 70006ad | 2016-10-13 11:02:56 +0100 | [diff] [blame] | 354 | if (WARN_ON(mask != ring_mask)) |
Tvrtko Ursulin | c1bb114 | 2016-08-10 16:22:10 +0100 | [diff] [blame] | 355 | device_info->ring_mask = mask; |
| 356 | |
Chris Wilson | 5f9be05 | 2017-04-11 17:56:58 +0100 | [diff] [blame] | 357 | /* We always presume we have at least RCS available for later probing */ |
| 358 | if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) { |
| 359 | err = -ENODEV; |
| 360 | goto cleanup; |
| 361 | } |
| 362 | |
Tvrtko Ursulin | c1bb114 | 2016-08-10 16:22:10 +0100 | [diff] [blame] | 363 | device_info->num_rings = hweight32(mask); |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 364 | |
Michel Thierry | ce453b3 | 2017-11-10 16:44:47 -0800 | [diff] [blame] | 365 | i915_check_and_clear_faults(dev_priv); |
| 366 | |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 367 | return 0; |
| 368 | |
| 369 | cleanup: |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 370 | for_each_engine(engine, dev_priv, id) |
| 371 | kfree(engine); |
| 372 | return err; |
| 373 | } |
| 374 | |
| 375 | /** |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 376 | * intel_engines_init() - init the Engine Command Streamers |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 377 | * @dev_priv: i915 device private |
| 378 | * |
| 379 | * Return: non-zero if the initialization failed. |
| 380 | */ |
| 381 | int intel_engines_init(struct drm_i915_private *dev_priv) |
| 382 | { |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 383 | struct intel_engine_cs *engine; |
| 384 | enum intel_engine_id id, err_id; |
Tvrtko Ursulin | 33def1f | 2017-06-16 14:03:38 +0100 | [diff] [blame] | 385 | int err; |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 386 | |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 387 | for_each_engine(engine, dev_priv, id) { |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 388 | const struct engine_class_info *class_info = |
| 389 | &intel_engine_classes[engine->class]; |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 390 | int (*init)(struct intel_engine_cs *engine); |
| 391 | |
Chris Wilson | fb5c551 | 2017-11-20 20:55:00 +0000 | [diff] [blame] | 392 | if (HAS_EXECLISTS(dev_priv)) |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 393 | init = class_info->init_execlists; |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 394 | else |
Oscar Mateo | b8400f0 | 2017-04-10 07:34:32 -0700 | [diff] [blame] | 395 | init = class_info->init_legacy; |
Tvrtko Ursulin | 33def1f | 2017-06-16 14:03:38 +0100 | [diff] [blame] | 396 | |
| 397 | err = -EINVAL; |
| 398 | err_id = id; |
| 399 | |
| 400 | if (GEM_WARN_ON(!init)) |
| 401 | goto cleanup; |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 402 | |
| 403 | err = init(engine); |
Tvrtko Ursulin | 33def1f | 2017-06-16 14:03:38 +0100 | [diff] [blame] | 404 | if (err) |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 405 | goto cleanup; |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 406 | |
Chris Wilson | ff44ad5 | 2017-03-16 17:13:03 +0000 | [diff] [blame] | 407 | GEM_BUG_ON(!engine->submit_request); |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 408 | } |
| 409 | |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 410 | return 0; |
| 411 | |
| 412 | cleanup: |
| 413 | for_each_engine(engine, dev_priv, id) { |
Tvrtko Ursulin | 33def1f | 2017-06-16 14:03:38 +0100 | [diff] [blame] | 414 | if (id >= err_id) { |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 415 | kfree(engine); |
Tvrtko Ursulin | 33def1f | 2017-06-16 14:03:38 +0100 | [diff] [blame] | 416 | dev_priv->engine[id] = NULL; |
| 417 | } else { |
Tvrtko Ursulin | 8ee7c6e | 2017-02-16 12:23:22 +0000 | [diff] [blame] | 418 | dev_priv->gt.cleanup_engine(engine); |
Tvrtko Ursulin | 33def1f | 2017-06-16 14:03:38 +0100 | [diff] [blame] | 419 | } |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 420 | } |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 421 | return err; |
Tvrtko Ursulin | 88d2ba2 | 2016-07-13 16:03:40 +0100 | [diff] [blame] | 422 | } |
| 423 | |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 424 | void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno) |
Chris Wilson | 57f275a | 2016-08-15 10:49:00 +0100 | [diff] [blame] | 425 | { |
| 426 | struct drm_i915_private *dev_priv = engine->i915; |
| 427 | |
| 428 | /* Our semaphore implementation is strictly monotonic (i.e. we proceed |
| 429 | * so long as the semaphore value in the register/page is greater |
| 430 | * than the sync value), so whenever we reset the seqno, |
| 431 | * so long as we reset the tracking semaphore value to 0, it will |
| 432 | * always be before the next request's seqno. If we don't reset |
| 433 | * the semaphore value, then when the seqno moves backwards all |
| 434 | * future waits will complete instantly (causing rendering corruption). |
| 435 | */ |
| 436 | if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { |
| 437 | I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); |
| 438 | I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); |
| 439 | if (HAS_VEBOX(dev_priv)) |
| 440 | I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); |
| 441 | } |
Chris Wilson | 57f275a | 2016-08-15 10:49:00 +0100 | [diff] [blame] | 442 | |
| 443 | intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); |
Chris Wilson | 14a6bbf | 2017-03-14 11:14:52 +0000 | [diff] [blame] | 444 | clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 445 | |
Chris Wilson | 57f275a | 2016-08-15 10:49:00 +0100 | [diff] [blame] | 446 | /* After manually advancing the seqno, fake the interrupt in case |
| 447 | * there are any waiters for that seqno. |
| 448 | */ |
| 449 | intel_engine_wakeup(engine); |
Chris Wilson | 2ca9faa | 2017-04-05 16:30:54 +0100 | [diff] [blame] | 450 | |
| 451 | GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno); |
Chris Wilson | 57f275a | 2016-08-15 10:49:00 +0100 | [diff] [blame] | 452 | } |
| 453 | |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 454 | static void intel_engine_init_timeline(struct intel_engine_cs *engine) |
Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 455 | { |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 456 | engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id]; |
Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 457 | } |
| 458 | |
Michal Wajdeczko | c578135 | 2018-03-08 09:50:35 +0000 | [diff] [blame] | 459 | static void intel_engine_init_batch_pool(struct intel_engine_cs *engine) |
| 460 | { |
| 461 | i915_gem_batch_pool_init(&engine->batch_pool, engine); |
| 462 | } |
| 463 | |
Mika Kuoppala | 19df9a5 | 2017-09-22 15:43:04 +0300 | [diff] [blame] | 464 | static bool csb_force_mmio(struct drm_i915_private *i915) |
| 465 | { |
Mika Kuoppala | 19df9a5 | 2017-09-22 15:43:04 +0300 | [diff] [blame] | 466 | /* |
| 467 | * IOMMU adds unpredictable latency causing the CSB write (from the |
| 468 | * GPU into the HWSP) to only be visible some time after the interrupt |
| 469 | * (missed breadcrumb syndrome). |
| 470 | */ |
| 471 | if (intel_vtd_active()) |
| 472 | return true; |
| 473 | |
Weinan Li | 1fd51d9 | 2017-10-15 11:55:25 +0800 | [diff] [blame] | 474 | /* Older GVT emulation depends upon intercepting CSB mmio */ |
| 475 | if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915)) |
| 476 | return true; |
| 477 | |
Mika Kuoppala | 61bf971 | 2018-04-12 17:58:02 +0300 | [diff] [blame] | 478 | if (IS_CANNONLAKE(i915)) |
| 479 | return true; |
| 480 | |
Mika Kuoppala | 19df9a5 | 2017-09-22 15:43:04 +0300 | [diff] [blame] | 481 | return false; |
| 482 | } |
| 483 | |
| 484 | static void intel_engine_init_execlist(struct intel_engine_cs *engine) |
| 485 | { |
| 486 | struct intel_engine_execlists * const execlists = &engine->execlists; |
| 487 | |
| 488 | execlists->csb_use_mmio = csb_force_mmio(engine->i915); |
| 489 | |
Mika Kuoppala | 76e7008 | 2017-09-22 15:43:07 +0300 | [diff] [blame] | 490 | execlists->port_mask = 1; |
| 491 | BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); |
| 492 | GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); |
| 493 | |
Chris Wilson | f6322ed | 2018-02-22 14:22:29 +0000 | [diff] [blame] | 494 | execlists->queue_priority = INT_MIN; |
Mika Kuoppala | 19df9a5 | 2017-09-22 15:43:04 +0300 | [diff] [blame] | 495 | execlists->queue = RB_ROOT; |
| 496 | execlists->first = NULL; |
| 497 | } |
| 498 | |
Tvrtko Ursulin | 019bf27 | 2016-07-13 16:03:41 +0100 | [diff] [blame] | 499 | /** |
| 500 | * intel_engines_setup_common - setup engine state not requiring hw access |
| 501 | * @engine: Engine to setup. |
| 502 | * |
| 503 | * Initializes @engine@ structure members shared between legacy and execlists |
| 504 | * submission modes which do not require hardware access. |
| 505 | * |
| 506 | * Typically done early in the submission mode specific engine setup stage. |
| 507 | */ |
| 508 | void intel_engine_setup_common(struct intel_engine_cs *engine) |
| 509 | { |
Mika Kuoppala | 19df9a5 | 2017-09-22 15:43:04 +0300 | [diff] [blame] | 510 | intel_engine_init_execlist(engine); |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 511 | intel_engine_init_timeline(engine); |
Tvrtko Ursulin | 019bf27 | 2016-07-13 16:03:41 +0100 | [diff] [blame] | 512 | intel_engine_init_hangcheck(engine); |
Michal Wajdeczko | c578135 | 2018-03-08 09:50:35 +0000 | [diff] [blame] | 513 | intel_engine_init_batch_pool(engine); |
Chris Wilson | 7756e45 | 2016-08-18 17:17:10 +0100 | [diff] [blame] | 514 | intel_engine_init_cmd_parser(engine); |
Tvrtko Ursulin | 019bf27 | 2016-07-13 16:03:41 +0100 | [diff] [blame] | 515 | } |
| 516 | |
Chris Wilson | adc320c | 2016-08-15 10:48:59 +0100 | [diff] [blame] | 517 | int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) |
| 518 | { |
| 519 | struct drm_i915_gem_object *obj; |
| 520 | struct i915_vma *vma; |
| 521 | int ret; |
| 522 | |
| 523 | WARN_ON(engine->scratch); |
| 524 | |
Tvrtko Ursulin | 187685c | 2016-12-01 14:16:36 +0000 | [diff] [blame] | 525 | obj = i915_gem_object_create_stolen(engine->i915, size); |
Chris Wilson | adc320c | 2016-08-15 10:48:59 +0100 | [diff] [blame] | 526 | if (!obj) |
Chris Wilson | 920cf41 | 2016-10-28 13:58:30 +0100 | [diff] [blame] | 527 | obj = i915_gem_object_create_internal(engine->i915, size); |
Chris Wilson | adc320c | 2016-08-15 10:48:59 +0100 | [diff] [blame] | 528 | if (IS_ERR(obj)) { |
| 529 | DRM_ERROR("Failed to allocate scratch page\n"); |
| 530 | return PTR_ERR(obj); |
| 531 | } |
| 532 | |
Chris Wilson | a01cb37 | 2017-01-16 15:21:30 +0000 | [diff] [blame] | 533 | vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); |
Chris Wilson | adc320c | 2016-08-15 10:48:59 +0100 | [diff] [blame] | 534 | if (IS_ERR(vma)) { |
| 535 | ret = PTR_ERR(vma); |
| 536 | goto err_unref; |
| 537 | } |
| 538 | |
| 539 | ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH); |
| 540 | if (ret) |
| 541 | goto err_unref; |
| 542 | |
| 543 | engine->scratch = vma; |
Chris Wilson | bde13eb | 2016-08-15 10:49:07 +0100 | [diff] [blame] | 544 | DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", |
| 545 | engine->name, i915_ggtt_offset(vma)); |
Chris Wilson | adc320c | 2016-08-15 10:48:59 +0100 | [diff] [blame] | 546 | return 0; |
| 547 | |
| 548 | err_unref: |
| 549 | i915_gem_object_put(obj); |
| 550 | return ret; |
| 551 | } |
| 552 | |
| 553 | static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) |
| 554 | { |
Chris Wilson | 19880c4 | 2016-08-15 10:49:05 +0100 | [diff] [blame] | 555 | i915_vma_unpin_and_release(&engine->scratch); |
Chris Wilson | adc320c | 2016-08-15 10:48:59 +0100 | [diff] [blame] | 556 | } |
| 557 | |
Daniele Ceraolo Spurio | 486e93f | 2017-09-13 09:56:02 +0100 | [diff] [blame] | 558 | static void cleanup_phys_status_page(struct intel_engine_cs *engine) |
| 559 | { |
| 560 | struct drm_i915_private *dev_priv = engine->i915; |
| 561 | |
| 562 | if (!dev_priv->status_page_dmah) |
| 563 | return; |
| 564 | |
| 565 | drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); |
| 566 | engine->status_page.page_addr = NULL; |
| 567 | } |
| 568 | |
| 569 | static void cleanup_status_page(struct intel_engine_cs *engine) |
| 570 | { |
| 571 | struct i915_vma *vma; |
| 572 | struct drm_i915_gem_object *obj; |
| 573 | |
| 574 | vma = fetch_and_zero(&engine->status_page.vma); |
| 575 | if (!vma) |
| 576 | return; |
| 577 | |
| 578 | obj = vma->obj; |
| 579 | |
| 580 | i915_vma_unpin(vma); |
| 581 | i915_vma_close(vma); |
| 582 | |
| 583 | i915_gem_object_unpin_map(obj); |
| 584 | __i915_gem_object_release_unless_active(obj); |
| 585 | } |
| 586 | |
| 587 | static int init_status_page(struct intel_engine_cs *engine) |
| 588 | { |
| 589 | struct drm_i915_gem_object *obj; |
| 590 | struct i915_vma *vma; |
| 591 | unsigned int flags; |
| 592 | void *vaddr; |
| 593 | int ret; |
| 594 | |
| 595 | obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); |
| 596 | if (IS_ERR(obj)) { |
| 597 | DRM_ERROR("Failed to allocate status page\n"); |
| 598 | return PTR_ERR(obj); |
| 599 | } |
| 600 | |
| 601 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
| 602 | if (ret) |
| 603 | goto err; |
| 604 | |
| 605 | vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); |
| 606 | if (IS_ERR(vma)) { |
| 607 | ret = PTR_ERR(vma); |
| 608 | goto err; |
| 609 | } |
| 610 | |
| 611 | flags = PIN_GLOBAL; |
| 612 | if (!HAS_LLC(engine->i915)) |
| 613 | /* On g33, we cannot place HWS above 256MiB, so |
| 614 | * restrict its pinning to the low mappable arena. |
| 615 | * Though this restriction is not documented for |
| 616 | * gen4, gen5, or byt, they also behave similarly |
| 617 | * and hang if the HWS is placed at the top of the |
| 618 | * GTT. To generalise, it appears that all !llc |
| 619 | * platforms have issues with us placing the HWS |
| 620 | * above the mappable region (even though we never |
| 621 | * actually map it). |
| 622 | */ |
| 623 | flags |= PIN_MAPPABLE; |
Chris Wilson | 34a04e5 | 2017-09-13 09:56:03 +0100 | [diff] [blame] | 624 | else |
| 625 | flags |= PIN_HIGH; |
Daniele Ceraolo Spurio | 486e93f | 2017-09-13 09:56:02 +0100 | [diff] [blame] | 626 | ret = i915_vma_pin(vma, 0, 4096, flags); |
| 627 | if (ret) |
| 628 | goto err; |
| 629 | |
| 630 | vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); |
| 631 | if (IS_ERR(vaddr)) { |
| 632 | ret = PTR_ERR(vaddr); |
| 633 | goto err_unpin; |
| 634 | } |
| 635 | |
| 636 | engine->status_page.vma = vma; |
| 637 | engine->status_page.ggtt_offset = i915_ggtt_offset(vma); |
| 638 | engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE); |
| 639 | |
| 640 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
| 641 | engine->name, i915_ggtt_offset(vma)); |
| 642 | return 0; |
| 643 | |
| 644 | err_unpin: |
| 645 | i915_vma_unpin(vma); |
| 646 | err: |
| 647 | i915_gem_object_put(obj); |
| 648 | return ret; |
| 649 | } |
| 650 | |
| 651 | static int init_phys_status_page(struct intel_engine_cs *engine) |
| 652 | { |
| 653 | struct drm_i915_private *dev_priv = engine->i915; |
| 654 | |
| 655 | GEM_BUG_ON(engine->id != RCS); |
| 656 | |
| 657 | dev_priv->status_page_dmah = |
| 658 | drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); |
| 659 | if (!dev_priv->status_page_dmah) |
| 660 | return -ENOMEM; |
| 661 | |
| 662 | engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
| 663 | memset(engine->status_page.page_addr, 0, PAGE_SIZE); |
| 664 | |
| 665 | return 0; |
| 666 | } |
| 667 | |
Tvrtko Ursulin | 019bf27 | 2016-07-13 16:03:41 +0100 | [diff] [blame] | 668 | /** |
| 669 | * intel_engines_init_common - initialize cengine state which might require hw access |
| 670 | * @engine: Engine to initialize. |
| 671 | * |
| 672 | * Initializes @engine@ structure members shared between legacy and execlists |
| 673 | * submission modes which do require hardware access. |
| 674 | * |
| 675 | * Typcally done at later stages of submission mode specific engine setup. |
| 676 | * |
| 677 | * Returns zero on success or an error code on failure. |
| 678 | */ |
| 679 | int intel_engine_init_common(struct intel_engine_cs *engine) |
| 680 | { |
Chris Wilson | 266a240 | 2017-05-04 10:33:08 +0100 | [diff] [blame] | 681 | struct intel_ring *ring; |
Tvrtko Ursulin | 019bf27 | 2016-07-13 16:03:41 +0100 | [diff] [blame] | 682 | int ret; |
| 683 | |
Chris Wilson | ff44ad5 | 2017-03-16 17:13:03 +0000 | [diff] [blame] | 684 | engine->set_default_submission(engine); |
| 685 | |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 686 | /* We may need to do things with the shrinker which |
| 687 | * require us to immediately switch back to the default |
| 688 | * context. This can cause a problem as pinning the |
| 689 | * default context also requires GTT space which may not |
| 690 | * be available. To avoid this we always pin the default |
| 691 | * context. |
| 692 | */ |
Chris Wilson | 266a240 | 2017-05-04 10:33:08 +0100 | [diff] [blame] | 693 | ring = engine->context_pin(engine, engine->i915->kernel_context); |
| 694 | if (IS_ERR(ring)) |
| 695 | return PTR_ERR(ring); |
Tvrtko Ursulin | 019bf27 | 2016-07-13 16:03:41 +0100 | [diff] [blame] | 696 | |
Chris Wilson | e7af311 | 2017-10-03 21:34:48 +0100 | [diff] [blame] | 697 | /* |
| 698 | * Similarly the preempt context must always be available so that |
| 699 | * we can interrupt the engine at any time. |
| 700 | */ |
Chris Wilson | d637637 | 2018-02-07 21:05:44 +0000 | [diff] [blame] | 701 | if (engine->i915->preempt_context) { |
Chris Wilson | e7af311 | 2017-10-03 21:34:48 +0100 | [diff] [blame] | 702 | ring = engine->context_pin(engine, |
| 703 | engine->i915->preempt_context); |
| 704 | if (IS_ERR(ring)) { |
| 705 | ret = PTR_ERR(ring); |
| 706 | goto err_unpin_kernel; |
| 707 | } |
| 708 | } |
| 709 | |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 710 | ret = intel_engine_init_breadcrumbs(engine); |
| 711 | if (ret) |
Chris Wilson | e7af311 | 2017-10-03 21:34:48 +0100 | [diff] [blame] | 712 | goto err_unpin_preempt; |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 713 | |
Daniele Ceraolo Spurio | 486e93f | 2017-09-13 09:56:02 +0100 | [diff] [blame] | 714 | if (HWS_NEEDS_PHYSICAL(engine->i915)) |
| 715 | ret = init_phys_status_page(engine); |
| 716 | else |
| 717 | ret = init_status_page(engine); |
| 718 | if (ret) |
Chris Wilson | 7c2fa7f | 2017-11-10 14:26:34 +0000 | [diff] [blame] | 719 | goto err_breadcrumbs; |
Chris Wilson | 4e50f08 | 2016-10-28 13:58:31 +0100 | [diff] [blame] | 720 | |
Chris Wilson | 7756e45 | 2016-08-18 17:17:10 +0100 | [diff] [blame] | 721 | return 0; |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 722 | |
Daniele Ceraolo Spurio | 486e93f | 2017-09-13 09:56:02 +0100 | [diff] [blame] | 723 | err_breadcrumbs: |
| 724 | intel_engine_fini_breadcrumbs(engine); |
Chris Wilson | e7af311 | 2017-10-03 21:34:48 +0100 | [diff] [blame] | 725 | err_unpin_preempt: |
Chris Wilson | d637637 | 2018-02-07 21:05:44 +0000 | [diff] [blame] | 726 | if (engine->i915->preempt_context) |
Chris Wilson | e7af311 | 2017-10-03 21:34:48 +0100 | [diff] [blame] | 727 | engine->context_unpin(engine, engine->i915->preempt_context); |
| 728 | err_unpin_kernel: |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 729 | engine->context_unpin(engine, engine->i915->kernel_context); |
| 730 | return ret; |
Tvrtko Ursulin | 019bf27 | 2016-07-13 16:03:41 +0100 | [diff] [blame] | 731 | } |
Chris Wilson | 96a945a | 2016-08-03 13:19:16 +0100 | [diff] [blame] | 732 | |
| 733 | /** |
| 734 | * intel_engines_cleanup_common - cleans up the engine state created by |
| 735 | * the common initiailizers. |
| 736 | * @engine: Engine to cleanup. |
| 737 | * |
| 738 | * This cleans up everything created by the common helpers. |
| 739 | */ |
| 740 | void intel_engine_cleanup_common(struct intel_engine_cs *engine) |
| 741 | { |
Chris Wilson | adc320c | 2016-08-15 10:48:59 +0100 | [diff] [blame] | 742 | intel_engine_cleanup_scratch(engine); |
| 743 | |
Daniele Ceraolo Spurio | 486e93f | 2017-09-13 09:56:02 +0100 | [diff] [blame] | 744 | if (HWS_NEEDS_PHYSICAL(engine->i915)) |
| 745 | cleanup_phys_status_page(engine); |
| 746 | else |
| 747 | cleanup_status_page(engine); |
| 748 | |
Chris Wilson | 96a945a | 2016-08-03 13:19:16 +0100 | [diff] [blame] | 749 | intel_engine_fini_breadcrumbs(engine); |
Chris Wilson | 7756e45 | 2016-08-18 17:17:10 +0100 | [diff] [blame] | 750 | intel_engine_cleanup_cmd_parser(engine); |
Chris Wilson | 96a945a | 2016-08-03 13:19:16 +0100 | [diff] [blame] | 751 | i915_gem_batch_pool_fini(&engine->batch_pool); |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 752 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 753 | if (engine->default_state) |
| 754 | i915_gem_object_put(engine->default_state); |
| 755 | |
Chris Wilson | d637637 | 2018-02-07 21:05:44 +0000 | [diff] [blame] | 756 | if (engine->i915->preempt_context) |
Chris Wilson | e7af311 | 2017-10-03 21:34:48 +0100 | [diff] [blame] | 757 | engine->context_unpin(engine, engine->i915->preempt_context); |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 758 | engine->context_unpin(engine, engine->i915->kernel_context); |
Chris Wilson | 96a945a | 2016-08-03 13:19:16 +0100 | [diff] [blame] | 759 | } |
Chris Wilson | 1b36595 | 2016-10-04 21:11:31 +0100 | [diff] [blame] | 760 | |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 761 | u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) |
Chris Wilson | 1b36595 | 2016-10-04 21:11:31 +0100 | [diff] [blame] | 762 | { |
| 763 | struct drm_i915_private *dev_priv = engine->i915; |
| 764 | u64 acthd; |
| 765 | |
| 766 | if (INTEL_GEN(dev_priv) >= 8) |
| 767 | acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), |
| 768 | RING_ACTHD_UDW(engine->mmio_base)); |
| 769 | else if (INTEL_GEN(dev_priv) >= 4) |
| 770 | acthd = I915_READ(RING_ACTHD(engine->mmio_base)); |
| 771 | else |
| 772 | acthd = I915_READ(ACTHD); |
| 773 | |
| 774 | return acthd; |
| 775 | } |
| 776 | |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 777 | u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) |
Chris Wilson | 1b36595 | 2016-10-04 21:11:31 +0100 | [diff] [blame] | 778 | { |
| 779 | struct drm_i915_private *dev_priv = engine->i915; |
| 780 | u64 bbaddr; |
| 781 | |
| 782 | if (INTEL_GEN(dev_priv) >= 8) |
| 783 | bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base), |
| 784 | RING_BBADDR_UDW(engine->mmio_base)); |
| 785 | else |
| 786 | bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); |
| 787 | |
| 788 | return bbaddr; |
| 789 | } |
Chris Wilson | 0e70447 | 2016-10-12 10:05:17 +0100 | [diff] [blame] | 790 | |
| 791 | const char *i915_cache_level_str(struct drm_i915_private *i915, int type) |
| 792 | { |
| 793 | switch (type) { |
| 794 | case I915_CACHE_NONE: return " uncached"; |
| 795 | case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped"; |
| 796 | case I915_CACHE_L3_LLC: return " L3+LLC"; |
| 797 | case I915_CACHE_WT: return " WT"; |
| 798 | default: return ""; |
| 799 | } |
| 800 | } |
| 801 | |
| 802 | static inline uint32_t |
| 803 | read_subslice_reg(struct drm_i915_private *dev_priv, int slice, |
| 804 | int subslice, i915_reg_t reg) |
| 805 | { |
Kelvin Gardiner | d3d5792 | 2018-03-16 14:14:51 +0200 | [diff] [blame] | 806 | uint32_t mcr_slice_subslice_mask; |
| 807 | uint32_t mcr_slice_subslice_select; |
Chris Wilson | 0e70447 | 2016-10-12 10:05:17 +0100 | [diff] [blame] | 808 | uint32_t mcr; |
| 809 | uint32_t ret; |
| 810 | enum forcewake_domains fw_domains; |
| 811 | |
Kelvin Gardiner | d3d5792 | 2018-03-16 14:14:51 +0200 | [diff] [blame] | 812 | if (INTEL_GEN(dev_priv) >= 11) { |
| 813 | mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | |
| 814 | GEN11_MCR_SUBSLICE_MASK; |
| 815 | mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) | |
| 816 | GEN11_MCR_SUBSLICE(subslice); |
| 817 | } else { |
| 818 | mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK | |
| 819 | GEN8_MCR_SUBSLICE_MASK; |
| 820 | mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) | |
| 821 | GEN8_MCR_SUBSLICE(subslice); |
| 822 | } |
| 823 | |
Chris Wilson | 0e70447 | 2016-10-12 10:05:17 +0100 | [diff] [blame] | 824 | fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, |
| 825 | FW_REG_READ); |
| 826 | fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, |
| 827 | GEN8_MCR_SELECTOR, |
| 828 | FW_REG_READ | FW_REG_WRITE); |
| 829 | |
| 830 | spin_lock_irq(&dev_priv->uncore.lock); |
| 831 | intel_uncore_forcewake_get__locked(dev_priv, fw_domains); |
| 832 | |
| 833 | mcr = I915_READ_FW(GEN8_MCR_SELECTOR); |
| 834 | /* |
| 835 | * The HW expects the slice and sublice selectors to be reset to 0 |
| 836 | * after reading out the registers. |
| 837 | */ |
Kelvin Gardiner | d3d5792 | 2018-03-16 14:14:51 +0200 | [diff] [blame] | 838 | WARN_ON_ONCE(mcr & mcr_slice_subslice_mask); |
| 839 | mcr &= ~mcr_slice_subslice_mask; |
| 840 | mcr |= mcr_slice_subslice_select; |
Chris Wilson | 0e70447 | 2016-10-12 10:05:17 +0100 | [diff] [blame] | 841 | I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); |
| 842 | |
| 843 | ret = I915_READ_FW(reg); |
| 844 | |
Kelvin Gardiner | d3d5792 | 2018-03-16 14:14:51 +0200 | [diff] [blame] | 845 | mcr &= ~mcr_slice_subslice_mask; |
Chris Wilson | 0e70447 | 2016-10-12 10:05:17 +0100 | [diff] [blame] | 846 | I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); |
| 847 | |
| 848 | intel_uncore_forcewake_put__locked(dev_priv, fw_domains); |
| 849 | spin_unlock_irq(&dev_priv->uncore.lock); |
| 850 | |
| 851 | return ret; |
| 852 | } |
| 853 | |
| 854 | /* NB: please notice the memset */ |
| 855 | void intel_engine_get_instdone(struct intel_engine_cs *engine, |
| 856 | struct intel_instdone *instdone) |
| 857 | { |
| 858 | struct drm_i915_private *dev_priv = engine->i915; |
| 859 | u32 mmio_base = engine->mmio_base; |
| 860 | int slice; |
| 861 | int subslice; |
| 862 | |
| 863 | memset(instdone, 0, sizeof(*instdone)); |
| 864 | |
| 865 | switch (INTEL_GEN(dev_priv)) { |
| 866 | default: |
| 867 | instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); |
| 868 | |
| 869 | if (engine->id != RCS) |
| 870 | break; |
| 871 | |
| 872 | instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); |
| 873 | for_each_instdone_slice_subslice(dev_priv, slice, subslice) { |
| 874 | instdone->sampler[slice][subslice] = |
| 875 | read_subslice_reg(dev_priv, slice, subslice, |
| 876 | GEN7_SAMPLER_INSTDONE); |
| 877 | instdone->row[slice][subslice] = |
| 878 | read_subslice_reg(dev_priv, slice, subslice, |
| 879 | GEN7_ROW_INSTDONE); |
| 880 | } |
| 881 | break; |
| 882 | case 7: |
| 883 | instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); |
| 884 | |
| 885 | if (engine->id != RCS) |
| 886 | break; |
| 887 | |
| 888 | instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); |
| 889 | instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE); |
| 890 | instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE); |
| 891 | |
| 892 | break; |
| 893 | case 6: |
| 894 | case 5: |
| 895 | case 4: |
| 896 | instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); |
| 897 | |
| 898 | if (engine->id == RCS) |
| 899 | /* HACK: Using the wrong struct member */ |
| 900 | instdone->slice_common = I915_READ(GEN4_INSTDONE1); |
| 901 | break; |
| 902 | case 3: |
| 903 | case 2: |
| 904 | instdone->instdone = I915_READ(GEN2_INSTDONE); |
| 905 | break; |
| 906 | } |
| 907 | } |
Chris Wilson | f97fbf9 | 2017-02-13 17:15:14 +0000 | [diff] [blame] | 908 | |
Chris Wilson | a091d4e | 2017-05-30 13:13:33 +0100 | [diff] [blame] | 909 | static bool ring_is_idle(struct intel_engine_cs *engine) |
| 910 | { |
| 911 | struct drm_i915_private *dev_priv = engine->i915; |
| 912 | bool idle = true; |
| 913 | |
Chris Wilson | 74d00d2 | 2018-02-12 09:39:28 +0000 | [diff] [blame] | 914 | /* If the whole device is asleep, the engine must be idle */ |
| 915 | if (!intel_runtime_pm_get_if_in_use(dev_priv)) |
| 916 | return true; |
Chris Wilson | a091d4e | 2017-05-30 13:13:33 +0100 | [diff] [blame] | 917 | |
Chris Wilson | aed2fc1 | 2017-05-30 13:13:34 +0100 | [diff] [blame] | 918 | /* First check that no commands are left in the ring */ |
| 919 | if ((I915_READ_HEAD(engine) & HEAD_ADDR) != |
| 920 | (I915_READ_TAIL(engine) & TAIL_ADDR)) |
| 921 | idle = false; |
| 922 | |
Chris Wilson | a091d4e | 2017-05-30 13:13:33 +0100 | [diff] [blame] | 923 | /* No bit for gen2, so assume the CS parser is idle */ |
| 924 | if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) |
| 925 | idle = false; |
| 926 | |
| 927 | intel_runtime_pm_put(dev_priv); |
| 928 | |
| 929 | return idle; |
| 930 | } |
| 931 | |
Chris Wilson | 5400367 | 2017-03-03 12:19:46 +0000 | [diff] [blame] | 932 | /** |
| 933 | * intel_engine_is_idle() - Report if the engine has finished process all work |
| 934 | * @engine: the intel_engine_cs |
| 935 | * |
| 936 | * Return true if there are no requests pending, nothing left to be submitted |
| 937 | * to hardware, and that the engine is idle. |
| 938 | */ |
| 939 | bool intel_engine_is_idle(struct intel_engine_cs *engine) |
| 940 | { |
| 941 | struct drm_i915_private *dev_priv = engine->i915; |
| 942 | |
Chris Wilson | a8e9a41 | 2017-04-11 20:00:42 +0100 | [diff] [blame] | 943 | /* More white lies, if wedged, hw state is inconsistent */ |
| 944 | if (i915_terminally_wedged(&dev_priv->gpu_error)) |
| 945 | return true; |
| 946 | |
Chris Wilson | 5400367 | 2017-03-03 12:19:46 +0000 | [diff] [blame] | 947 | /* Any inflight/incomplete requests? */ |
| 948 | if (!i915_seqno_passed(intel_engine_get_seqno(engine), |
| 949 | intel_engine_last_submit(engine))) |
| 950 | return false; |
| 951 | |
Chris Wilson | 8968a36 | 2017-04-12 00:44:26 +0100 | [diff] [blame] | 952 | if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock)) |
| 953 | return true; |
| 954 | |
Chris Wilson | 4a118ec | 2017-10-23 22:32:36 +0100 | [diff] [blame] | 955 | /* Waiting to drain ELSP? */ |
| 956 | if (READ_ONCE(engine->execlists.active)) |
Chris Wilson | 5400367 | 2017-03-03 12:19:46 +0000 | [diff] [blame] | 957 | return false; |
| 958 | |
Chris Wilson | d6edb6e | 2017-07-21 13:32:24 +0100 | [diff] [blame] | 959 | /* ELSP is empty, but there are ready requests? */ |
Mika Kuoppala | b620e87 | 2017-09-22 15:43:03 +0300 | [diff] [blame] | 960 | if (READ_ONCE(engine->execlists.first)) |
Chris Wilson | d6edb6e | 2017-07-21 13:32:24 +0100 | [diff] [blame] | 961 | return false; |
| 962 | |
Chris Wilson | 5400367 | 2017-03-03 12:19:46 +0000 | [diff] [blame] | 963 | /* Ring stopped? */ |
Chris Wilson | a091d4e | 2017-05-30 13:13:33 +0100 | [diff] [blame] | 964 | if (!ring_is_idle(engine)) |
Chris Wilson | 5400367 | 2017-03-03 12:19:46 +0000 | [diff] [blame] | 965 | return false; |
| 966 | |
| 967 | return true; |
| 968 | } |
| 969 | |
Chris Wilson | 0542524 | 2017-03-03 12:19:47 +0000 | [diff] [blame] | 970 | bool intel_engines_are_idle(struct drm_i915_private *dev_priv) |
| 971 | { |
| 972 | struct intel_engine_cs *engine; |
| 973 | enum intel_engine_id id; |
| 974 | |
Chris Wilson | d7dc413 | 2017-12-12 13:21:48 +0000 | [diff] [blame] | 975 | /* |
| 976 | * If the driver is wedged, HW state may be very inconsistent and |
Chris Wilson | 8490ae20 | 2017-03-30 15:50:37 +0100 | [diff] [blame] | 977 | * report that it is still busy, even though we have stopped using it. |
| 978 | */ |
| 979 | if (i915_terminally_wedged(&dev_priv->gpu_error)) |
| 980 | return true; |
| 981 | |
Chris Wilson | 0542524 | 2017-03-03 12:19:47 +0000 | [diff] [blame] | 982 | for_each_engine(engine, dev_priv, id) { |
| 983 | if (!intel_engine_is_idle(engine)) |
| 984 | return false; |
| 985 | } |
| 986 | |
| 987 | return true; |
| 988 | } |
| 989 | |
Chris Wilson | ae6c457 | 2017-11-10 14:26:28 +0000 | [diff] [blame] | 990 | /** |
| 991 | * intel_engine_has_kernel_context: |
| 992 | * @engine: the engine |
| 993 | * |
| 994 | * Returns true if the last context to be executed on this engine, or has been |
| 995 | * executed if the engine is already idle, is the kernel context |
| 996 | * (#i915.kernel_context). |
| 997 | */ |
Chris Wilson | 20ccd4d | 2017-10-24 23:08:55 +0100 | [diff] [blame] | 998 | bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine) |
| 999 | { |
Chris Wilson | ae6c457 | 2017-11-10 14:26:28 +0000 | [diff] [blame] | 1000 | const struct i915_gem_context * const kernel_context = |
| 1001 | engine->i915->kernel_context; |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1002 | struct i915_request *rq; |
Chris Wilson | ae6c457 | 2017-11-10 14:26:28 +0000 | [diff] [blame] | 1003 | |
| 1004 | lockdep_assert_held(&engine->i915->drm.struct_mutex); |
| 1005 | |
| 1006 | /* |
| 1007 | * Check the last context seen by the engine. If active, it will be |
| 1008 | * the last request that remains in the timeline. When idle, it is |
| 1009 | * the last executed context as tracked by retirement. |
| 1010 | */ |
| 1011 | rq = __i915_gem_active_peek(&engine->timeline->last_request); |
| 1012 | if (rq) |
| 1013 | return rq->ctx == kernel_context; |
| 1014 | else |
| 1015 | return engine->last_retired_context == kernel_context; |
Chris Wilson | 20ccd4d | 2017-10-24 23:08:55 +0100 | [diff] [blame] | 1016 | } |
| 1017 | |
Chris Wilson | ff44ad5 | 2017-03-16 17:13:03 +0000 | [diff] [blame] | 1018 | void intel_engines_reset_default_submission(struct drm_i915_private *i915) |
| 1019 | { |
| 1020 | struct intel_engine_cs *engine; |
| 1021 | enum intel_engine_id id; |
| 1022 | |
| 1023 | for_each_engine(engine, i915, id) |
| 1024 | engine->set_default_submission(engine); |
| 1025 | } |
| 1026 | |
Chris Wilson | aba5e27 | 2017-10-25 15:39:41 +0100 | [diff] [blame] | 1027 | /** |
| 1028 | * intel_engines_park: called when the GT is transitioning from busy->idle |
| 1029 | * @i915: the i915 device |
| 1030 | * |
| 1031 | * The GT is now idle and about to go to sleep (maybe never to wake again?). |
| 1032 | * Time for us to tidy and put away our toys (release resources back to the |
| 1033 | * system). |
| 1034 | */ |
| 1035 | void intel_engines_park(struct drm_i915_private *i915) |
Chris Wilson | 6c06757 | 2017-05-17 13:10:03 +0100 | [diff] [blame] | 1036 | { |
| 1037 | struct intel_engine_cs *engine; |
| 1038 | enum intel_engine_id id; |
| 1039 | |
| 1040 | for_each_engine(engine, i915, id) { |
Chris Wilson | 820c5bb | 2017-11-01 20:21:49 +0000 | [diff] [blame] | 1041 | /* Flush the residual irq tasklets first. */ |
| 1042 | intel_engine_disarm_breadcrumbs(engine); |
Sagar Arun Kamble | c6dce8f | 2017-11-16 19:02:37 +0530 | [diff] [blame] | 1043 | tasklet_kill(&engine->execlists.tasklet); |
Chris Wilson | 820c5bb | 2017-11-01 20:21:49 +0000 | [diff] [blame] | 1044 | |
Chris Wilson | 3265124 | 2017-10-27 12:06:17 +0100 | [diff] [blame] | 1045 | /* |
| 1046 | * We are committed now to parking the engines, make sure there |
| 1047 | * will be no more interrupts arriving later and the engines |
| 1048 | * are truly idle. |
| 1049 | */ |
Chris Wilson | 30b2940 | 2017-11-10 11:25:50 +0000 | [diff] [blame] | 1050 | if (wait_for(intel_engine_is_idle(engine), 10)) { |
Chris Wilson | 3265124 | 2017-10-27 12:06:17 +0100 | [diff] [blame] | 1051 | struct drm_printer p = drm_debug_printer(__func__); |
| 1052 | |
Chris Wilson | 30b2940 | 2017-11-10 11:25:50 +0000 | [diff] [blame] | 1053 | dev_err(i915->drm.dev, |
| 1054 | "%s is not idle before parking\n", |
| 1055 | engine->name); |
Chris Wilson | 0db18b1 | 2017-12-08 01:23:00 +0000 | [diff] [blame] | 1056 | intel_engine_dump(engine, &p, NULL); |
Chris Wilson | 3265124 | 2017-10-27 12:06:17 +0100 | [diff] [blame] | 1057 | } |
| 1058 | |
Chris Wilson | 15c83c4 | 2018-04-11 11:39:29 +0100 | [diff] [blame] | 1059 | /* Must be reset upon idling, or we may miss the busy wakeup. */ |
| 1060 | GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN); |
| 1061 | |
Chris Wilson | aba5e27 | 2017-10-25 15:39:41 +0100 | [diff] [blame] | 1062 | if (engine->park) |
| 1063 | engine->park(engine); |
| 1064 | |
Chris Wilson | aba5e27 | 2017-10-25 15:39:41 +0100 | [diff] [blame] | 1065 | i915_gem_batch_pool_fini(&engine->batch_pool); |
Mika Kuoppala | b620e87 | 2017-09-22 15:43:03 +0300 | [diff] [blame] | 1066 | engine->execlists.no_priolist = false; |
Chris Wilson | 6c06757 | 2017-05-17 13:10:03 +0100 | [diff] [blame] | 1067 | } |
| 1068 | } |
| 1069 | |
Chris Wilson | aba5e27 | 2017-10-25 15:39:41 +0100 | [diff] [blame] | 1070 | /** |
| 1071 | * intel_engines_unpark: called when the GT is transitioning from idle->busy |
| 1072 | * @i915: the i915 device |
| 1073 | * |
| 1074 | * The GT was idle and now about to fire up with some new user requests. |
| 1075 | */ |
| 1076 | void intel_engines_unpark(struct drm_i915_private *i915) |
| 1077 | { |
| 1078 | struct intel_engine_cs *engine; |
| 1079 | enum intel_engine_id id; |
| 1080 | |
| 1081 | for_each_engine(engine, i915, id) { |
| 1082 | if (engine->unpark) |
| 1083 | engine->unpark(engine); |
| 1084 | } |
| 1085 | } |
| 1086 | |
Chris Wilson | 90cad09 | 2017-09-06 16:28:59 +0100 | [diff] [blame] | 1087 | bool intel_engine_can_store_dword(struct intel_engine_cs *engine) |
| 1088 | { |
| 1089 | switch (INTEL_GEN(engine->i915)) { |
| 1090 | case 2: |
| 1091 | return false; /* uses physical not virtual addresses */ |
| 1092 | case 3: |
| 1093 | /* maybe only uses physical not virtual addresses */ |
| 1094 | return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); |
| 1095 | case 6: |
| 1096 | return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ |
| 1097 | default: |
| 1098 | return true; |
| 1099 | } |
| 1100 | } |
| 1101 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1102 | unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915) |
| 1103 | { |
| 1104 | struct intel_engine_cs *engine; |
| 1105 | enum intel_engine_id id; |
| 1106 | unsigned int which; |
| 1107 | |
| 1108 | which = 0; |
| 1109 | for_each_engine(engine, i915, id) |
| 1110 | if (engine->default_state) |
| 1111 | which |= BIT(engine->uabi_class); |
| 1112 | |
| 1113 | return which; |
| 1114 | } |
| 1115 | |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1116 | static void print_request(struct drm_printer *m, |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1117 | struct i915_request *rq, |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1118 | const char *prefix) |
| 1119 | { |
Chris Wilson | ab26815 | 2018-03-14 10:16:30 +0000 | [diff] [blame] | 1120 | const char *name = rq->fence.ops->get_timeline_name(&rq->fence); |
| 1121 | |
Chris Wilson | 367a35a | 2018-02-28 09:47:32 +0000 | [diff] [blame] | 1122 | drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix, |
Chris Wilson | a27d5a4 | 2017-10-15 21:43:10 +0100 | [diff] [blame] | 1123 | rq->global_seqno, |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1124 | i915_request_completed(rq) ? "!" : "", |
Chris Wilson | 367a35a | 2018-02-28 09:47:32 +0000 | [diff] [blame] | 1125 | rq->fence.context, rq->fence.seqno, |
Chris Wilson | 0c7112a | 2018-04-18 19:40:51 +0100 | [diff] [blame^] | 1126 | rq->sched.priority, |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1127 | jiffies_to_msecs(jiffies - rq->emitted_jiffies), |
Chris Wilson | ab26815 | 2018-03-14 10:16:30 +0000 | [diff] [blame] | 1128 | name); |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1129 | } |
| 1130 | |
Chris Wilson | c1bf272 | 2017-12-22 18:25:21 +0000 | [diff] [blame] | 1131 | static void hexdump(struct drm_printer *m, const void *buf, size_t len) |
| 1132 | { |
| 1133 | const size_t rowsize = 8 * sizeof(u32); |
| 1134 | const void *prev = NULL; |
| 1135 | bool skip = false; |
| 1136 | size_t pos; |
| 1137 | |
| 1138 | for (pos = 0; pos < len; pos += rowsize) { |
| 1139 | char line[128]; |
| 1140 | |
| 1141 | if (prev && !memcmp(prev, buf + pos, rowsize)) { |
| 1142 | if (!skip) { |
| 1143 | drm_printf(m, "*\n"); |
| 1144 | skip = true; |
| 1145 | } |
| 1146 | continue; |
| 1147 | } |
| 1148 | |
| 1149 | WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos, |
| 1150 | rowsize, sizeof(u32), |
| 1151 | line, sizeof(line), |
| 1152 | false) >= sizeof(line)); |
| 1153 | drm_printf(m, "%08zx %s\n", pos, line); |
| 1154 | |
| 1155 | prev = buf + pos; |
| 1156 | skip = false; |
| 1157 | } |
| 1158 | } |
| 1159 | |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1160 | static void intel_engine_print_registers(const struct intel_engine_cs *engine, |
| 1161 | struct drm_printer *m) |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1162 | { |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1163 | struct drm_i915_private *dev_priv = engine->i915; |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1164 | const struct intel_engine_execlists * const execlists = |
| 1165 | &engine->execlists; |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1166 | u64 addr; |
| 1167 | |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1168 | drm_printf(m, "\tRING_START: 0x%08x\n", |
| 1169 | I915_READ(RING_START(engine->mmio_base))); |
| 1170 | drm_printf(m, "\tRING_HEAD: 0x%08x\n", |
| 1171 | I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR); |
| 1172 | drm_printf(m, "\tRING_TAIL: 0x%08x\n", |
| 1173 | I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR); |
Chris Wilson | 3c75de5 | 2017-10-26 12:50:48 +0100 | [diff] [blame] | 1174 | drm_printf(m, "\tRING_CTL: 0x%08x%s\n", |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1175 | I915_READ(RING_CTL(engine->mmio_base)), |
Chris Wilson | 3c75de5 | 2017-10-26 12:50:48 +0100 | [diff] [blame] | 1176 | I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); |
| 1177 | if (INTEL_GEN(engine->i915) > 2) { |
| 1178 | drm_printf(m, "\tRING_MODE: 0x%08x%s\n", |
| 1179 | I915_READ(RING_MI_MODE(engine->mmio_base)), |
| 1180 | I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : ""); |
| 1181 | } |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1182 | |
| 1183 | if (INTEL_GEN(dev_priv) >= 6) { |
| 1184 | drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine)); |
| 1185 | } |
| 1186 | |
Chris Wilson | 93c6e96 | 2017-11-20 20:55:04 +0000 | [diff] [blame] | 1187 | if (HAS_LEGACY_SEMAPHORES(dev_priv)) { |
Chris Wilson | af9ff6c | 2017-11-20 20:55:03 +0000 | [diff] [blame] | 1188 | drm_printf(m, "\tSYNC_0: 0x%08x\n", |
| 1189 | I915_READ(RING_SYNC_0(engine->mmio_base))); |
| 1190 | drm_printf(m, "\tSYNC_1: 0x%08x\n", |
| 1191 | I915_READ(RING_SYNC_1(engine->mmio_base))); |
| 1192 | if (HAS_VEBOX(dev_priv)) |
| 1193 | drm_printf(m, "\tSYNC_2: 0x%08x\n", |
| 1194 | I915_READ(RING_SYNC_2(engine->mmio_base))); |
| 1195 | } |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1196 | |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1197 | addr = intel_engine_get_active_head(engine); |
| 1198 | drm_printf(m, "\tACTHD: 0x%08x_%08x\n", |
| 1199 | upper_32_bits(addr), lower_32_bits(addr)); |
| 1200 | addr = intel_engine_get_last_batch_head(engine); |
| 1201 | drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", |
| 1202 | upper_32_bits(addr), lower_32_bits(addr)); |
Chris Wilson | a0cf579 | 2017-12-18 12:39:14 +0000 | [diff] [blame] | 1203 | if (INTEL_GEN(dev_priv) >= 8) |
| 1204 | addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base), |
| 1205 | RING_DMA_FADD_UDW(engine->mmio_base)); |
| 1206 | else if (INTEL_GEN(dev_priv) >= 4) |
| 1207 | addr = I915_READ(RING_DMA_FADD(engine->mmio_base)); |
| 1208 | else |
| 1209 | addr = I915_READ(DMA_FADD_I8XX); |
| 1210 | drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n", |
| 1211 | upper_32_bits(addr), lower_32_bits(addr)); |
| 1212 | if (INTEL_GEN(dev_priv) >= 4) { |
| 1213 | drm_printf(m, "\tIPEIR: 0x%08x\n", |
| 1214 | I915_READ(RING_IPEIR(engine->mmio_base))); |
| 1215 | drm_printf(m, "\tIPEHR: 0x%08x\n", |
| 1216 | I915_READ(RING_IPEHR(engine->mmio_base))); |
| 1217 | } else { |
| 1218 | drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR)); |
| 1219 | drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR)); |
| 1220 | } |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1221 | |
Chris Wilson | fb5c551 | 2017-11-20 20:55:00 +0000 | [diff] [blame] | 1222 | if (HAS_EXECLISTS(dev_priv)) { |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1223 | const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1224 | u32 ptr, read, write; |
| 1225 | unsigned int idx; |
| 1226 | |
| 1227 | drm_printf(m, "\tExeclist status: 0x%08x %08x\n", |
| 1228 | I915_READ(RING_EXECLIST_STATUS_LO(engine)), |
| 1229 | I915_READ(RING_EXECLIST_STATUS_HI(engine))); |
| 1230 | |
| 1231 | ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); |
| 1232 | read = GEN8_CSB_READ_PTR(ptr); |
| 1233 | write = GEN8_CSB_WRITE_PTR(ptr); |
Chris Wilson | 9040871 | 2018-03-26 12:50:36 +0100 | [diff] [blame] | 1234 | drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n", |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1235 | read, execlists->csb_head, |
| 1236 | write, |
| 1237 | intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), |
| 1238 | yesno(test_bit(ENGINE_IRQ_EXECLIST, |
Chris Wilson | 9040871 | 2018-03-26 12:50:36 +0100 | [diff] [blame] | 1239 | &engine->irq_posted)), |
| 1240 | yesno(test_bit(TASKLET_STATE_SCHED, |
| 1241 | &engine->execlists.tasklet.state)), |
| 1242 | enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1243 | if (read >= GEN8_CSB_ENTRIES) |
| 1244 | read = 0; |
| 1245 | if (write >= GEN8_CSB_ENTRIES) |
| 1246 | write = 0; |
| 1247 | if (read > write) |
| 1248 | write += GEN8_CSB_ENTRIES; |
| 1249 | while (read < write) { |
| 1250 | idx = ++read % GEN8_CSB_ENTRIES; |
| 1251 | drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n", |
| 1252 | idx, |
| 1253 | I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), |
| 1254 | hws[idx * 2], |
| 1255 | I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)), |
| 1256 | hws[idx * 2 + 1]); |
| 1257 | } |
| 1258 | |
| 1259 | rcu_read_lock(); |
| 1260 | for (idx = 0; idx < execlists_num_ports(execlists); idx++) { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1261 | struct i915_request *rq; |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1262 | unsigned int count; |
| 1263 | |
| 1264 | rq = port_unpack(&execlists->port[idx], &count); |
| 1265 | if (rq) { |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1266 | char hdr[80]; |
| 1267 | |
Chris Wilson | e8a70ca | 2017-12-08 01:22:59 +0000 | [diff] [blame] | 1268 | snprintf(hdr, sizeof(hdr), |
| 1269 | "\t\tELSP[%d] count=%d, rq: ", |
| 1270 | idx, count); |
| 1271 | print_request(m, rq, hdr); |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1272 | } else { |
Chris Wilson | e8a70ca | 2017-12-08 01:22:59 +0000 | [diff] [blame] | 1273 | drm_printf(m, "\t\tELSP[%d] idle\n", idx); |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1274 | } |
| 1275 | } |
Chris Wilson | 4a118ec | 2017-10-23 22:32:36 +0100 | [diff] [blame] | 1276 | drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active); |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1277 | rcu_read_unlock(); |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1278 | } else if (INTEL_GEN(dev_priv) > 6) { |
| 1279 | drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", |
| 1280 | I915_READ(RING_PP_DIR_BASE(engine))); |
| 1281 | drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", |
| 1282 | I915_READ(RING_PP_DIR_BASE_READ(engine))); |
| 1283 | drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", |
| 1284 | I915_READ(RING_PP_DIR_DCLV(engine))); |
| 1285 | } |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1286 | } |
| 1287 | |
| 1288 | void intel_engine_dump(struct intel_engine_cs *engine, |
| 1289 | struct drm_printer *m, |
| 1290 | const char *header, ...) |
| 1291 | { |
| 1292 | struct intel_breadcrumbs * const b = &engine->breadcrumbs; |
| 1293 | const struct intel_engine_execlists * const execlists = &engine->execlists; |
| 1294 | struct i915_gpu_error * const error = &engine->i915->gpu_error; |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1295 | struct i915_request *rq; |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1296 | struct rb_node *rb; |
| 1297 | |
| 1298 | if (header) { |
| 1299 | va_list ap; |
| 1300 | |
| 1301 | va_start(ap, header); |
| 1302 | drm_vprintf(m, header, &ap); |
| 1303 | va_end(ap); |
| 1304 | } |
| 1305 | |
| 1306 | if (i915_terminally_wedged(&engine->i915->gpu_error)) |
| 1307 | drm_printf(m, "*** WEDGED ***\n"); |
| 1308 | |
| 1309 | drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n", |
| 1310 | intel_engine_get_seqno(engine), |
| 1311 | intel_engine_last_submit(engine), |
| 1312 | engine->hangcheck.seqno, |
| 1313 | jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp), |
| 1314 | engine->timeline->inflight_seqnos); |
| 1315 | drm_printf(m, "\tReset count: %d (global %d)\n", |
| 1316 | i915_reset_engine_count(error, engine), |
| 1317 | i915_reset_count(error)); |
| 1318 | |
| 1319 | rcu_read_lock(); |
| 1320 | |
| 1321 | drm_printf(m, "\tRequests:\n"); |
| 1322 | |
| 1323 | rq = list_first_entry(&engine->timeline->requests, |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1324 | struct i915_request, link); |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1325 | if (&rq->link != &engine->timeline->requests) |
| 1326 | print_request(m, rq, "\t\tfirst "); |
| 1327 | |
| 1328 | rq = list_last_entry(&engine->timeline->requests, |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1329 | struct i915_request, link); |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1330 | if (&rq->link != &engine->timeline->requests) |
| 1331 | print_request(m, rq, "\t\tlast "); |
| 1332 | |
| 1333 | rq = i915_gem_find_active_request(engine); |
| 1334 | if (rq) { |
| 1335 | print_request(m, rq, "\t\tactive "); |
| 1336 | drm_printf(m, |
| 1337 | "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n", |
| 1338 | rq->head, rq->postfix, rq->tail, |
| 1339 | rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, |
| 1340 | rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); |
Chris Wilson | ef5032a | 2018-03-07 13:42:24 +0000 | [diff] [blame] | 1341 | drm_printf(m, "\t\tring->start: 0x%08x\n", |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1342 | i915_ggtt_offset(rq->ring->vma)); |
Chris Wilson | ef5032a | 2018-03-07 13:42:24 +0000 | [diff] [blame] | 1343 | drm_printf(m, "\t\tring->head: 0x%08x\n", |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1344 | rq->ring->head); |
Chris Wilson | ef5032a | 2018-03-07 13:42:24 +0000 | [diff] [blame] | 1345 | drm_printf(m, "\t\tring->tail: 0x%08x\n", |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1346 | rq->ring->tail); |
Chris Wilson | ef5032a | 2018-03-07 13:42:24 +0000 | [diff] [blame] | 1347 | drm_printf(m, "\t\tring->emit: 0x%08x\n", |
| 1348 | rq->ring->emit); |
| 1349 | drm_printf(m, "\t\tring->space: 0x%08x\n", |
| 1350 | rq->ring->space); |
Chris Wilson | 3ceda3a | 2018-02-12 10:24:15 +0000 | [diff] [blame] | 1351 | } |
| 1352 | |
| 1353 | rcu_read_unlock(); |
| 1354 | |
| 1355 | if (intel_runtime_pm_get_if_in_use(engine->i915)) { |
| 1356 | intel_engine_print_registers(engine, m); |
| 1357 | intel_runtime_pm_put(engine->i915); |
| 1358 | } else { |
| 1359 | drm_printf(m, "\tDevice is asleep; skipping register dump\n"); |
| 1360 | } |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1361 | |
Chris Wilson | a27d5a4 | 2017-10-15 21:43:10 +0100 | [diff] [blame] | 1362 | spin_lock_irq(&engine->timeline->lock); |
| 1363 | list_for_each_entry(rq, &engine->timeline->requests, link) |
| 1364 | print_request(m, rq, "\t\tE "); |
Chris Wilson | f6322ed | 2018-02-22 14:22:29 +0000 | [diff] [blame] | 1365 | drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); |
Chris Wilson | a27d5a4 | 2017-10-15 21:43:10 +0100 | [diff] [blame] | 1366 | for (rb = execlists->first; rb; rb = rb_next(rb)) { |
| 1367 | struct i915_priolist *p = |
| 1368 | rb_entry(rb, typeof(*p), node); |
| 1369 | |
Chris Wilson | 0c7112a | 2018-04-18 19:40:51 +0100 | [diff] [blame^] | 1370 | list_for_each_entry(rq, &p->requests, sched.link) |
Chris Wilson | a27d5a4 | 2017-10-15 21:43:10 +0100 | [diff] [blame] | 1371 | print_request(m, rq, "\t\tQ "); |
| 1372 | } |
| 1373 | spin_unlock_irq(&engine->timeline->lock); |
| 1374 | |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1375 | spin_lock_irq(&b->rb_lock); |
| 1376 | for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { |
| 1377 | struct intel_wait *w = rb_entry(rb, typeof(*w), node); |
| 1378 | |
| 1379 | drm_printf(m, "\t%s [%d] waiting for %x\n", |
| 1380 | w->tsk->comm, w->tsk->pid, w->seqno); |
| 1381 | } |
| 1382 | spin_unlock_irq(&b->rb_lock); |
| 1383 | |
Chris Wilson | 832265d | 2017-12-08 01:23:01 +0000 | [diff] [blame] | 1384 | drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n", |
| 1385 | engine->irq_posted, |
| 1386 | yesno(test_bit(ENGINE_IRQ_BREADCRUMB, |
| 1387 | &engine->irq_posted)), |
| 1388 | yesno(test_bit(ENGINE_IRQ_EXECLIST, |
| 1389 | &engine->irq_posted))); |
Chris Wilson | c1bf272 | 2017-12-22 18:25:21 +0000 | [diff] [blame] | 1390 | |
| 1391 | drm_printf(m, "HWSP:\n"); |
| 1392 | hexdump(m, engine->status_page.page_addr, PAGE_SIZE); |
| 1393 | |
Chris Wilson | c400cc2 | 2017-11-07 15:22:11 +0000 | [diff] [blame] | 1394 | drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine))); |
Chris Wilson | f636edb | 2017-10-09 12:02:57 +0100 | [diff] [blame] | 1395 | } |
| 1396 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 1397 | static u8 user_class_map[] = { |
| 1398 | [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS, |
| 1399 | [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS, |
| 1400 | [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS, |
| 1401 | [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS, |
| 1402 | }; |
| 1403 | |
| 1404 | struct intel_engine_cs * |
| 1405 | intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) |
| 1406 | { |
| 1407 | if (class >= ARRAY_SIZE(user_class_map)) |
| 1408 | return NULL; |
| 1409 | |
| 1410 | class = user_class_map[class]; |
| 1411 | |
| 1412 | GEM_BUG_ON(class > MAX_ENGINE_CLASS); |
| 1413 | |
| 1414 | if (instance > MAX_ENGINE_INSTANCE) |
| 1415 | return NULL; |
| 1416 | |
| 1417 | return i915->engine_class[class][instance]; |
| 1418 | } |
| 1419 | |
Tvrtko Ursulin | 30e17b7 | 2017-11-21 18:18:48 +0000 | [diff] [blame] | 1420 | /** |
| 1421 | * intel_enable_engine_stats() - Enable engine busy tracking on engine |
| 1422 | * @engine: engine to enable stats collection |
| 1423 | * |
| 1424 | * Start collecting the engine busyness data for @engine. |
| 1425 | * |
| 1426 | * Returns 0 on success or a negative error code. |
| 1427 | */ |
| 1428 | int intel_enable_engine_stats(struct intel_engine_cs *engine) |
| 1429 | { |
Chris Wilson | 99e48bf | 2018-01-15 09:20:41 +0000 | [diff] [blame] | 1430 | struct intel_engine_execlists *execlists = &engine->execlists; |
Tvrtko Ursulin | 30e17b7 | 2017-11-21 18:18:48 +0000 | [diff] [blame] | 1431 | unsigned long flags; |
Chris Wilson | 99e48bf | 2018-01-15 09:20:41 +0000 | [diff] [blame] | 1432 | int err = 0; |
Tvrtko Ursulin | 30e17b7 | 2017-11-21 18:18:48 +0000 | [diff] [blame] | 1433 | |
Tvrtko Ursulin | cf669b4 | 2017-11-29 10:28:05 +0000 | [diff] [blame] | 1434 | if (!intel_engine_supports_stats(engine)) |
Tvrtko Ursulin | 30e17b7 | 2017-11-21 18:18:48 +0000 | [diff] [blame] | 1435 | return -ENODEV; |
| 1436 | |
Chris Wilson | 99e48bf | 2018-01-15 09:20:41 +0000 | [diff] [blame] | 1437 | tasklet_disable(&execlists->tasklet); |
Tvrtko Ursulin | 30e17b7 | 2017-11-21 18:18:48 +0000 | [diff] [blame] | 1438 | spin_lock_irqsave(&engine->stats.lock, flags); |
Chris Wilson | 99e48bf | 2018-01-15 09:20:41 +0000 | [diff] [blame] | 1439 | |
| 1440 | if (unlikely(engine->stats.enabled == ~0)) { |
| 1441 | err = -EBUSY; |
| 1442 | goto unlock; |
| 1443 | } |
| 1444 | |
Chris Wilson | 4900727 | 2018-01-11 07:30:31 +0000 | [diff] [blame] | 1445 | if (engine->stats.enabled++ == 0) { |
Chris Wilson | 4900727 | 2018-01-11 07:30:31 +0000 | [diff] [blame] | 1446 | const struct execlist_port *port = execlists->port; |
| 1447 | unsigned int num_ports = execlists_num_ports(execlists); |
| 1448 | |
Tvrtko Ursulin | 30e17b7 | 2017-11-21 18:18:48 +0000 | [diff] [blame] | 1449 | engine->stats.enabled_at = ktime_get(); |
Chris Wilson | 4900727 | 2018-01-11 07:30:31 +0000 | [diff] [blame] | 1450 | |
| 1451 | /* XXX submission method oblivious? */ |
| 1452 | while (num_ports-- && port_isset(port)) { |
| 1453 | engine->stats.active++; |
| 1454 | port++; |
| 1455 | } |
| 1456 | |
| 1457 | if (engine->stats.active) |
| 1458 | engine->stats.start = engine->stats.enabled_at; |
| 1459 | } |
Chris Wilson | 99e48bf | 2018-01-15 09:20:41 +0000 | [diff] [blame] | 1460 | |
| 1461 | unlock: |
Tvrtko Ursulin | 30e17b7 | 2017-11-21 18:18:48 +0000 | [diff] [blame] | 1462 | spin_unlock_irqrestore(&engine->stats.lock, flags); |
Chris Wilson | 99e48bf | 2018-01-15 09:20:41 +0000 | [diff] [blame] | 1463 | tasklet_enable(&execlists->tasklet); |
Tvrtko Ursulin | 30e17b7 | 2017-11-21 18:18:48 +0000 | [diff] [blame] | 1464 | |
Chris Wilson | 99e48bf | 2018-01-15 09:20:41 +0000 | [diff] [blame] | 1465 | return err; |
Tvrtko Ursulin | 30e17b7 | 2017-11-21 18:18:48 +0000 | [diff] [blame] | 1466 | } |
| 1467 | |
| 1468 | static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) |
| 1469 | { |
| 1470 | ktime_t total = engine->stats.total; |
| 1471 | |
| 1472 | /* |
| 1473 | * If the engine is executing something at the moment |
| 1474 | * add it to the total. |
| 1475 | */ |
| 1476 | if (engine->stats.active) |
| 1477 | total = ktime_add(total, |
| 1478 | ktime_sub(ktime_get(), engine->stats.start)); |
| 1479 | |
| 1480 | return total; |
| 1481 | } |
| 1482 | |
| 1483 | /** |
| 1484 | * intel_engine_get_busy_time() - Return current accumulated engine busyness |
| 1485 | * @engine: engine to report on |
| 1486 | * |
| 1487 | * Returns accumulated time @engine was busy since engine stats were enabled. |
| 1488 | */ |
| 1489 | ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine) |
| 1490 | { |
| 1491 | ktime_t total; |
| 1492 | unsigned long flags; |
| 1493 | |
| 1494 | spin_lock_irqsave(&engine->stats.lock, flags); |
| 1495 | total = __intel_engine_get_busy_time(engine); |
| 1496 | spin_unlock_irqrestore(&engine->stats.lock, flags); |
| 1497 | |
| 1498 | return total; |
| 1499 | } |
| 1500 | |
| 1501 | /** |
| 1502 | * intel_disable_engine_stats() - Disable engine busy tracking on engine |
| 1503 | * @engine: engine to disable stats collection |
| 1504 | * |
| 1505 | * Stops collecting the engine busyness data for @engine. |
| 1506 | */ |
| 1507 | void intel_disable_engine_stats(struct intel_engine_cs *engine) |
| 1508 | { |
| 1509 | unsigned long flags; |
| 1510 | |
Tvrtko Ursulin | cf669b4 | 2017-11-29 10:28:05 +0000 | [diff] [blame] | 1511 | if (!intel_engine_supports_stats(engine)) |
Tvrtko Ursulin | 30e17b7 | 2017-11-21 18:18:48 +0000 | [diff] [blame] | 1512 | return; |
| 1513 | |
| 1514 | spin_lock_irqsave(&engine->stats.lock, flags); |
| 1515 | WARN_ON_ONCE(engine->stats.enabled == 0); |
| 1516 | if (--engine->stats.enabled == 0) { |
| 1517 | engine->stats.total = __intel_engine_get_busy_time(engine); |
| 1518 | engine->stats.active = 0; |
| 1519 | } |
| 1520 | spin_unlock_irqrestore(&engine->stats.lock, flags); |
| 1521 | } |
| 1522 | |
Chris Wilson | f97fbf9 | 2017-02-13 17:15:14 +0000 | [diff] [blame] | 1523 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
| 1524 | #include "selftests/mock_engine.c" |
Daniele Ceraolo Spurio | 74419da | 2018-03-14 11:26:51 -0700 | [diff] [blame] | 1525 | #include "selftests/intel_engine_cs.c" |
Chris Wilson | f97fbf9 | 2017-02-13 17:15:14 +0000 | [diff] [blame] | 1526 | #endif |