Daniel Vetter | 76aaf22 | 2010-11-05 22:23:30 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2010 Daniel Vetter |
Ben Widawsky | c4ac524 | 2014-02-19 22:05:47 -0800 | [diff] [blame] | 3 | * Copyright © 2011-2014 Intel Corporation |
Daniel Vetter | 76aaf22 | 2010-11-05 22:23:30 +0100 | [diff] [blame] | 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the "Software"), |
| 7 | * to deal in the Software without restriction, including without limitation |
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 9 | * and/or sell copies of the Software, and to permit persons to whom the |
| 10 | * Software is furnished to do so, subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice (including the next |
| 13 | * paragraph) shall be included in all copies or substantial portions of the |
| 14 | * Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 22 | * IN THE SOFTWARE. |
| 23 | * |
| 24 | */ |
| 25 | |
Chris Wilson | aae4a3d | 2017-02-13 17:15:44 +0000 | [diff] [blame] | 26 | #include <linux/slab.h> /* fault-inject.h is not standalone! */ |
| 27 | |
| 28 | #include <linux/fault-inject.h> |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 29 | #include <linux/log2.h> |
Chris Wilson | 606fec9 | 2017-01-11 11:23:12 +0000 | [diff] [blame] | 30 | #include <linux/random.h> |
Daniel Vetter | 0e46ce2 | 2014-01-08 16:10:27 +0100 | [diff] [blame] | 31 | #include <linux/seq_file.h> |
Chris Wilson | 5bab6f6 | 2015-10-23 18:43:32 +0100 | [diff] [blame] | 32 | #include <linux/stop_machine.h> |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 33 | |
Laura Abbott | ed3ba07 | 2017-05-08 15:58:17 -0700 | [diff] [blame] | 34 | #include <asm/set_memory.h> |
| 35 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 36 | #include <drm/drmP.h> |
| 37 | #include <drm/i915_drm.h> |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 38 | |
Daniel Vetter | 76aaf22 | 2010-11-05 22:23:30 +0100 | [diff] [blame] | 39 | #include "i915_drv.h" |
Yu Zhang | 5dda8fa | 2015-02-10 19:05:48 +0800 | [diff] [blame] | 40 | #include "i915_vgpu.h" |
Daniel Vetter | 76aaf22 | 2010-11-05 22:23:30 +0100 | [diff] [blame] | 41 | #include "i915_trace.h" |
| 42 | #include "intel_drv.h" |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 43 | #include "intel_frontbuffer.h" |
Daniel Vetter | 76aaf22 | 2010-11-05 22:23:30 +0100 | [diff] [blame] | 44 | |
Chris Wilson | bb8f9cf | 2016-08-22 08:44:31 +0100 | [diff] [blame] | 45 | #define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM) |
| 46 | |
Tvrtko Ursulin | 45f8f69 | 2014-12-10 17:27:59 +0000 | [diff] [blame] | 47 | /** |
| 48 | * DOC: Global GTT views |
| 49 | * |
| 50 | * Background and previous state |
| 51 | * |
| 52 | * Historically objects could exists (be bound) in global GTT space only as |
| 53 | * singular instances with a view representing all of the object's backing pages |
| 54 | * in a linear fashion. This view will be called a normal view. |
| 55 | * |
| 56 | * To support multiple views of the same object, where the number of mapped |
| 57 | * pages is not equal to the backing store, or where the layout of the pages |
| 58 | * is not linear, concept of a GGTT view was added. |
| 59 | * |
| 60 | * One example of an alternative view is a stereo display driven by a single |
| 61 | * image. In this case we would have a framebuffer looking like this |
| 62 | * (2x2 pages): |
| 63 | * |
| 64 | * 12 |
| 65 | * 34 |
| 66 | * |
| 67 | * Above would represent a normal GGTT view as normally mapped for GPU or CPU |
| 68 | * rendering. In contrast, fed to the display engine would be an alternative |
| 69 | * view which could look something like this: |
| 70 | * |
| 71 | * 1212 |
| 72 | * 3434 |
| 73 | * |
| 74 | * In this example both the size and layout of pages in the alternative view is |
| 75 | * different from the normal view. |
| 76 | * |
| 77 | * Implementation and usage |
| 78 | * |
| 79 | * GGTT views are implemented using VMAs and are distinguished via enum |
| 80 | * i915_ggtt_view_type and struct i915_ggtt_view. |
| 81 | * |
| 82 | * A new flavour of core GEM functions which work with GGTT bound objects were |
Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 83 | * added with the _ggtt_ infix, and sometimes with _view postfix to avoid |
| 84 | * renaming in large amounts of code. They take the struct i915_ggtt_view |
| 85 | * parameter encapsulating all metadata required to implement a view. |
Tvrtko Ursulin | 45f8f69 | 2014-12-10 17:27:59 +0000 | [diff] [blame] | 86 | * |
| 87 | * As a helper for callers which are only interested in the normal view, |
| 88 | * globally const i915_ggtt_view_normal singleton instance exists. All old core |
| 89 | * GEM API functions, the ones not taking the view parameter, are operating on, |
| 90 | * or with the normal GGTT view. |
| 91 | * |
| 92 | * Code wanting to add or use a new GGTT view needs to: |
| 93 | * |
| 94 | * 1. Add a new enum with a suitable name. |
| 95 | * 2. Extend the metadata in the i915_ggtt_view structure if required. |
| 96 | * 3. Add support to i915_get_vma_pages(). |
| 97 | * |
| 98 | * New views are required to build a scatter-gather table from within the |
| 99 | * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and |
| 100 | * exists for the lifetime of an VMA. |
| 101 | * |
| 102 | * Core API is designed to have copy semantics which means that passed in |
| 103 | * struct i915_ggtt_view does not need to be persistent (left around after |
| 104 | * calling the core API functions). |
| 105 | * |
| 106 | */ |
| 107 | |
Daniel Vetter | 70b9f6f | 2015-04-14 17:35:27 +0200 | [diff] [blame] | 108 | static int |
| 109 | i915_get_ggtt_vma_pages(struct i915_vma *vma); |
| 110 | |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 111 | static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv) |
| 112 | { |
| 113 | /* Note that as an uncached mmio write, this should flush the |
| 114 | * WCB of the writes into the GGTT before it triggers the invalidate. |
| 115 | */ |
| 116 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
| 117 | } |
| 118 | |
| 119 | static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv) |
| 120 | { |
| 121 | gen6_ggtt_invalidate(dev_priv); |
| 122 | I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); |
| 123 | } |
| 124 | |
| 125 | static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv) |
| 126 | { |
| 127 | intel_gtt_chipset_flush(); |
| 128 | } |
| 129 | |
| 130 | static inline void i915_ggtt_invalidate(struct drm_i915_private *i915) |
| 131 | { |
| 132 | i915->ggtt.invalidate(i915); |
| 133 | } |
| 134 | |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 135 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, |
| 136 | int enable_ppgtt) |
Daniel Vetter | cfa7c86 | 2014-04-29 11:53:58 +0200 | [diff] [blame] | 137 | { |
Chris Wilson | 1893a71 | 2014-09-19 11:56:27 +0100 | [diff] [blame] | 138 | bool has_aliasing_ppgtt; |
| 139 | bool has_full_ppgtt; |
Michel Thierry | 1f9a99e | 2015-09-30 15:36:19 +0100 | [diff] [blame] | 140 | bool has_full_48bit_ppgtt; |
Chris Wilson | 1893a71 | 2014-09-19 11:56:27 +0100 | [diff] [blame] | 141 | |
Michel Thierry | 9e1d0e6 | 2016-12-05 17:57:03 -0800 | [diff] [blame] | 142 | has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt; |
| 143 | has_full_ppgtt = dev_priv->info.has_full_ppgtt; |
| 144 | has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt; |
Chris Wilson | 1893a71 | 2014-09-19 11:56:27 +0100 | [diff] [blame] | 145 | |
Zhi Wang | e320d40 | 2016-09-06 12:04:12 +0800 | [diff] [blame] | 146 | if (intel_vgpu_active(dev_priv)) { |
Tina Zhang | 8a4ab66 | 2017-08-14 15:20:46 +0800 | [diff] [blame] | 147 | /* GVT-g has no support for 32bit ppgtt */ |
Zhi Wang | e320d40 | 2016-09-06 12:04:12 +0800 | [diff] [blame] | 148 | has_full_ppgtt = false; |
Tina Zhang | 8a4ab66 | 2017-08-14 15:20:46 +0800 | [diff] [blame] | 149 | has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv); |
Zhi Wang | e320d40 | 2016-09-06 12:04:12 +0800 | [diff] [blame] | 150 | } |
Yu Zhang | 71ba2d6 | 2015-02-10 19:05:54 +0800 | [diff] [blame] | 151 | |
Chris Wilson | 0e4ca10 | 2016-04-29 13:18:22 +0100 | [diff] [blame] | 152 | if (!has_aliasing_ppgtt) |
| 153 | return 0; |
| 154 | |
Damien Lespiau | 70ee45e | 2014-11-14 15:05:59 +0000 | [diff] [blame] | 155 | /* |
| 156 | * We don't allow disabling PPGTT for gen9+ as it's a requirement for |
| 157 | * execlists, the sole mechanism available to submit work. |
| 158 | */ |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 159 | if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) |
Daniel Vetter | cfa7c86 | 2014-04-29 11:53:58 +0200 | [diff] [blame] | 160 | return 0; |
| 161 | |
| 162 | if (enable_ppgtt == 1) |
| 163 | return 1; |
| 164 | |
Chris Wilson | 1893a71 | 2014-09-19 11:56:27 +0100 | [diff] [blame] | 165 | if (enable_ppgtt == 2 && has_full_ppgtt) |
Daniel Vetter | cfa7c86 | 2014-04-29 11:53:58 +0200 | [diff] [blame] | 166 | return 2; |
| 167 | |
Michel Thierry | 1f9a99e | 2015-09-30 15:36:19 +0100 | [diff] [blame] | 168 | if (enable_ppgtt == 3 && has_full_48bit_ppgtt) |
| 169 | return 3; |
| 170 | |
Daniel Vetter | 93a25a9 | 2014-03-06 09:40:43 +0100 | [diff] [blame] | 171 | /* Disable ppgtt on SNB if VT-d is on. */ |
Chris Wilson | 80debff | 2017-05-25 13:16:12 +0100 | [diff] [blame] | 172 | if (IS_GEN6(dev_priv) && intel_vtd_active()) { |
Daniel Vetter | 93a25a9 | 2014-03-06 09:40:43 +0100 | [diff] [blame] | 173 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); |
Daniel Vetter | cfa7c86 | 2014-04-29 11:53:58 +0200 | [diff] [blame] | 174 | return 0; |
Daniel Vetter | 93a25a9 | 2014-03-06 09:40:43 +0100 | [diff] [blame] | 175 | } |
Daniel Vetter | 93a25a9 | 2014-03-06 09:40:43 +0100 | [diff] [blame] | 176 | |
Jesse Barnes | 62942ed | 2014-06-13 09:28:33 -0700 | [diff] [blame] | 177 | /* Early VLV doesn't have this */ |
Chris Wilson | 91c8a32 | 2016-07-05 10:40:23 +0100 | [diff] [blame] | 178 | if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) { |
Jesse Barnes | 62942ed | 2014-06-13 09:28:33 -0700 | [diff] [blame] | 179 | DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); |
| 180 | return 0; |
| 181 | } |
| 182 | |
Michal Wajdeczko | 4f044a8 | 2017-09-19 19:38:44 +0000 | [diff] [blame^] | 183 | if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) { |
Joonas Lahtinen | 4fc0506 | 2017-08-11 12:51:26 +0300 | [diff] [blame] | 184 | if (has_full_48bit_ppgtt) |
| 185 | return 3; |
| 186 | |
| 187 | if (has_full_ppgtt) |
| 188 | return 2; |
| 189 | } |
| 190 | |
| 191 | return has_aliasing_ppgtt ? 1 : 0; |
Daniel Vetter | 93a25a9 | 2014-03-06 09:40:43 +0100 | [diff] [blame] | 192 | } |
| 193 | |
Daniel Vetter | 70b9f6f | 2015-04-14 17:35:27 +0200 | [diff] [blame] | 194 | static int ppgtt_bind_vma(struct i915_vma *vma, |
| 195 | enum i915_cache_level cache_level, |
| 196 | u32 unused) |
Daniel Vetter | 4755265 | 2015-04-14 17:35:24 +0200 | [diff] [blame] | 197 | { |
Chris Wilson | ff68597 | 2017-02-15 08:43:42 +0000 | [diff] [blame] | 198 | u32 pte_flags; |
| 199 | int ret; |
| 200 | |
Matthew Auld | 1f23475 | 2017-05-12 10:14:23 +0100 | [diff] [blame] | 201 | if (!(vma->flags & I915_VMA_LOCAL_BIND)) { |
| 202 | ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, |
| 203 | vma->size); |
| 204 | if (ret) |
| 205 | return ret; |
| 206 | } |
Daniel Vetter | 4755265 | 2015-04-14 17:35:24 +0200 | [diff] [blame] | 207 | |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 208 | vma->pages = vma->obj->mm.pages; |
Chris Wilson | 247177d | 2016-08-15 10:48:47 +0100 | [diff] [blame] | 209 | |
Daniel Vetter | 4755265 | 2015-04-14 17:35:24 +0200 | [diff] [blame] | 210 | /* Currently applicable only to VLV */ |
Chris Wilson | ff68597 | 2017-02-15 08:43:42 +0000 | [diff] [blame] | 211 | pte_flags = 0; |
Daniel Vetter | 4755265 | 2015-04-14 17:35:24 +0200 | [diff] [blame] | 212 | if (vma->obj->gt_ro) |
| 213 | pte_flags |= PTE_READ_ONLY; |
| 214 | |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 215 | vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); |
Daniel Vetter | 70b9f6f | 2015-04-14 17:35:27 +0200 | [diff] [blame] | 216 | |
| 217 | return 0; |
Daniel Vetter | 4755265 | 2015-04-14 17:35:24 +0200 | [diff] [blame] | 218 | } |
| 219 | |
| 220 | static void ppgtt_unbind_vma(struct i915_vma *vma) |
| 221 | { |
Chris Wilson | ff68597 | 2017-02-15 08:43:42 +0000 | [diff] [blame] | 222 | vma->vm->clear_range(vma->vm, vma->node.start, vma->size); |
Daniel Vetter | 4755265 | 2015-04-14 17:35:24 +0200 | [diff] [blame] | 223 | } |
Ben Widawsky | 6f65e29 | 2013-12-06 14:10:56 -0800 | [diff] [blame] | 224 | |
Daniel Vetter | 2c642b0 | 2015-04-14 17:35:26 +0200 | [diff] [blame] | 225 | static gen8_pte_t gen8_pte_encode(dma_addr_t addr, |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 226 | enum i915_cache_level level) |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 227 | { |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 228 | gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW; |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 229 | pte |= addr; |
Ben Widawsky | 63c42e5 | 2014-04-18 18:04:27 -0300 | [diff] [blame] | 230 | |
| 231 | switch (level) { |
| 232 | case I915_CACHE_NONE: |
Zhi Wang | c095b97 | 2017-09-14 20:39:41 +0800 | [diff] [blame] | 233 | pte |= PPAT_UNCACHED; |
Ben Widawsky | 63c42e5 | 2014-04-18 18:04:27 -0300 | [diff] [blame] | 234 | break; |
| 235 | case I915_CACHE_WT: |
Zhi Wang | c095b97 | 2017-09-14 20:39:41 +0800 | [diff] [blame] | 236 | pte |= PPAT_DISPLAY_ELLC; |
Ben Widawsky | 63c42e5 | 2014-04-18 18:04:27 -0300 | [diff] [blame] | 237 | break; |
| 238 | default: |
Zhi Wang | c095b97 | 2017-09-14 20:39:41 +0800 | [diff] [blame] | 239 | pte |= PPAT_CACHED; |
Ben Widawsky | 63c42e5 | 2014-04-18 18:04:27 -0300 | [diff] [blame] | 240 | break; |
| 241 | } |
| 242 | |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 243 | return pte; |
| 244 | } |
| 245 | |
Mika Kuoppala | fe36f55 | 2015-06-25 18:35:16 +0300 | [diff] [blame] | 246 | static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, |
| 247 | const enum i915_cache_level level) |
Ben Widawsky | b1fe667 | 2013-11-04 21:20:14 -0800 | [diff] [blame] | 248 | { |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 249 | gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; |
Ben Widawsky | b1fe667 | 2013-11-04 21:20:14 -0800 | [diff] [blame] | 250 | pde |= addr; |
| 251 | if (level != I915_CACHE_NONE) |
Zhi Wang | c095b97 | 2017-09-14 20:39:41 +0800 | [diff] [blame] | 252 | pde |= PPAT_CACHED_PDE; |
Ben Widawsky | b1fe667 | 2013-11-04 21:20:14 -0800 | [diff] [blame] | 253 | else |
Zhi Wang | c095b97 | 2017-09-14 20:39:41 +0800 | [diff] [blame] | 254 | pde |= PPAT_UNCACHED; |
Ben Widawsky | b1fe667 | 2013-11-04 21:20:14 -0800 | [diff] [blame] | 255 | return pde; |
| 256 | } |
| 257 | |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 258 | #define gen8_pdpe_encode gen8_pde_encode |
| 259 | #define gen8_pml4e_encode gen8_pde_encode |
| 260 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 261 | static gen6_pte_t snb_pte_encode(dma_addr_t addr, |
| 262 | enum i915_cache_level level, |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 263 | u32 unused) |
Ben Widawsky | 54d1252 | 2012-09-24 16:44:32 -0700 | [diff] [blame] | 264 | { |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 265 | gen6_pte_t pte = GEN6_PTE_VALID; |
Ben Widawsky | 54d1252 | 2012-09-24 16:44:32 -0700 | [diff] [blame] | 266 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
Ben Widawsky | e7210c3 | 2012-10-19 09:33:22 -0700 | [diff] [blame] | 267 | |
| 268 | switch (level) { |
Chris Wilson | 350ec88 | 2013-08-06 13:17:02 +0100 | [diff] [blame] | 269 | case I915_CACHE_L3_LLC: |
| 270 | case I915_CACHE_LLC: |
| 271 | pte |= GEN6_PTE_CACHE_LLC; |
| 272 | break; |
| 273 | case I915_CACHE_NONE: |
| 274 | pte |= GEN6_PTE_UNCACHED; |
| 275 | break; |
| 276 | default: |
Daniel Vetter | 5f77eeb | 2014-12-08 16:40:10 +0100 | [diff] [blame] | 277 | MISSING_CASE(level); |
Chris Wilson | 350ec88 | 2013-08-06 13:17:02 +0100 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | return pte; |
| 281 | } |
| 282 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 283 | static gen6_pte_t ivb_pte_encode(dma_addr_t addr, |
| 284 | enum i915_cache_level level, |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 285 | u32 unused) |
Chris Wilson | 350ec88 | 2013-08-06 13:17:02 +0100 | [diff] [blame] | 286 | { |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 287 | gen6_pte_t pte = GEN6_PTE_VALID; |
Chris Wilson | 350ec88 | 2013-08-06 13:17:02 +0100 | [diff] [blame] | 288 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
| 289 | |
| 290 | switch (level) { |
| 291 | case I915_CACHE_L3_LLC: |
| 292 | pte |= GEN7_PTE_CACHE_L3_LLC; |
Ben Widawsky | e7210c3 | 2012-10-19 09:33:22 -0700 | [diff] [blame] | 293 | break; |
| 294 | case I915_CACHE_LLC: |
| 295 | pte |= GEN6_PTE_CACHE_LLC; |
| 296 | break; |
| 297 | case I915_CACHE_NONE: |
Kenneth Graunke | 9119708 | 2013-04-22 00:53:51 -0700 | [diff] [blame] | 298 | pte |= GEN6_PTE_UNCACHED; |
Ben Widawsky | e7210c3 | 2012-10-19 09:33:22 -0700 | [diff] [blame] | 299 | break; |
| 300 | default: |
Daniel Vetter | 5f77eeb | 2014-12-08 16:40:10 +0100 | [diff] [blame] | 301 | MISSING_CASE(level); |
Ben Widawsky | e7210c3 | 2012-10-19 09:33:22 -0700 | [diff] [blame] | 302 | } |
| 303 | |
Ben Widawsky | 54d1252 | 2012-09-24 16:44:32 -0700 | [diff] [blame] | 304 | return pte; |
| 305 | } |
| 306 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 307 | static gen6_pte_t byt_pte_encode(dma_addr_t addr, |
| 308 | enum i915_cache_level level, |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 309 | u32 flags) |
Kenneth Graunke | 93c34e7 | 2013-04-22 00:53:50 -0700 | [diff] [blame] | 310 | { |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 311 | gen6_pte_t pte = GEN6_PTE_VALID; |
Kenneth Graunke | 93c34e7 | 2013-04-22 00:53:50 -0700 | [diff] [blame] | 312 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
| 313 | |
Akash Goel | 24f3a8c | 2014-06-17 10:59:42 +0530 | [diff] [blame] | 314 | if (!(flags & PTE_READ_ONLY)) |
| 315 | pte |= BYT_PTE_WRITEABLE; |
Kenneth Graunke | 93c34e7 | 2013-04-22 00:53:50 -0700 | [diff] [blame] | 316 | |
| 317 | if (level != I915_CACHE_NONE) |
| 318 | pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; |
| 319 | |
| 320 | return pte; |
| 321 | } |
| 322 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 323 | static gen6_pte_t hsw_pte_encode(dma_addr_t addr, |
| 324 | enum i915_cache_level level, |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 325 | u32 unused) |
Kenneth Graunke | 9119708 | 2013-04-22 00:53:51 -0700 | [diff] [blame] | 326 | { |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 327 | gen6_pte_t pte = GEN6_PTE_VALID; |
Ben Widawsky | 0d8ff15 | 2013-07-04 11:02:03 -0700 | [diff] [blame] | 328 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
Kenneth Graunke | 9119708 | 2013-04-22 00:53:51 -0700 | [diff] [blame] | 329 | |
| 330 | if (level != I915_CACHE_NONE) |
Ben Widawsky | 87a6b68 | 2013-08-04 23:47:29 -0700 | [diff] [blame] | 331 | pte |= HSW_WB_LLC_AGE3; |
Kenneth Graunke | 9119708 | 2013-04-22 00:53:51 -0700 | [diff] [blame] | 332 | |
| 333 | return pte; |
| 334 | } |
| 335 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 336 | static gen6_pte_t iris_pte_encode(dma_addr_t addr, |
| 337 | enum i915_cache_level level, |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 338 | u32 unused) |
Ben Widawsky | 4d15c14 | 2013-07-04 11:02:06 -0700 | [diff] [blame] | 339 | { |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 340 | gen6_pte_t pte = GEN6_PTE_VALID; |
Ben Widawsky | 4d15c14 | 2013-07-04 11:02:06 -0700 | [diff] [blame] | 341 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
| 342 | |
Chris Wilson | 651d794 | 2013-08-08 14:41:10 +0100 | [diff] [blame] | 343 | switch (level) { |
| 344 | case I915_CACHE_NONE: |
| 345 | break; |
| 346 | case I915_CACHE_WT: |
Chris Wilson | c51e970 | 2013-11-22 10:37:53 +0000 | [diff] [blame] | 347 | pte |= HSW_WT_ELLC_LLC_AGE3; |
Chris Wilson | 651d794 | 2013-08-08 14:41:10 +0100 | [diff] [blame] | 348 | break; |
| 349 | default: |
Chris Wilson | c51e970 | 2013-11-22 10:37:53 +0000 | [diff] [blame] | 350 | pte |= HSW_WB_ELLC_LLC_AGE3; |
Chris Wilson | 651d794 | 2013-08-08 14:41:10 +0100 | [diff] [blame] | 351 | break; |
| 352 | } |
Ben Widawsky | 4d15c14 | 2013-07-04 11:02:06 -0700 | [diff] [blame] | 353 | |
| 354 | return pte; |
| 355 | } |
| 356 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 357 | static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 358 | { |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 359 | struct pagevec *pvec = &vm->free_pages; |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 360 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 361 | if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) |
| 362 | i915_gem_shrink_all(vm->i915); |
Chris Wilson | aae4a3d | 2017-02-13 17:15:44 +0000 | [diff] [blame] | 363 | |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 364 | if (likely(pvec->nr)) |
| 365 | return pvec->pages[--pvec->nr]; |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 366 | |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 367 | if (!vm->pt_kmap_wc) |
| 368 | return alloc_page(gfp); |
| 369 | |
| 370 | /* A placeholder for a specific mutex to guard the WC stash */ |
| 371 | lockdep_assert_held(&vm->i915->drm.struct_mutex); |
| 372 | |
| 373 | /* Look in our global stash of WC pages... */ |
| 374 | pvec = &vm->i915->mm.wc_stash; |
| 375 | if (likely(pvec->nr)) |
| 376 | return pvec->pages[--pvec->nr]; |
| 377 | |
| 378 | /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */ |
| 379 | do { |
| 380 | struct page *page; |
| 381 | |
| 382 | page = alloc_page(gfp); |
| 383 | if (unlikely(!page)) |
| 384 | break; |
| 385 | |
| 386 | pvec->pages[pvec->nr++] = page; |
| 387 | } while (pagevec_space(pvec)); |
| 388 | |
| 389 | if (unlikely(!pvec->nr)) |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 390 | return NULL; |
| 391 | |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 392 | set_pages_array_wc(pvec->pages, pvec->nr); |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 393 | |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 394 | return pvec->pages[--pvec->nr]; |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 395 | } |
| 396 | |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 397 | static void vm_free_pages_release(struct i915_address_space *vm, |
| 398 | bool immediate) |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 399 | { |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 400 | struct pagevec *pvec = &vm->free_pages; |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 401 | |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 402 | GEM_BUG_ON(!pagevec_count(pvec)); |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 403 | |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 404 | if (vm->pt_kmap_wc) { |
| 405 | struct pagevec *stash = &vm->i915->mm.wc_stash; |
| 406 | |
| 407 | /* When we use WC, first fill up the global stash and then |
| 408 | * only if full immediately free the overflow. |
| 409 | */ |
| 410 | |
| 411 | lockdep_assert_held(&vm->i915->drm.struct_mutex); |
| 412 | if (pagevec_space(stash)) { |
| 413 | do { |
| 414 | stash->pages[stash->nr++] = |
| 415 | pvec->pages[--pvec->nr]; |
| 416 | if (!pvec->nr) |
| 417 | return; |
| 418 | } while (pagevec_space(stash)); |
| 419 | |
| 420 | /* As we have made some room in the VM's free_pages, |
| 421 | * we can wait for it to fill again. Unless we are |
| 422 | * inside i915_address_space_fini() and must |
| 423 | * immediately release the pages! |
| 424 | */ |
| 425 | if (!immediate) |
| 426 | return; |
| 427 | } |
| 428 | |
| 429 | set_pages_array_wb(pvec->pages, pvec->nr); |
| 430 | } |
| 431 | |
| 432 | __pagevec_release(pvec); |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 433 | } |
| 434 | |
| 435 | static void vm_free_page(struct i915_address_space *vm, struct page *page) |
| 436 | { |
| 437 | if (!pagevec_add(&vm->free_pages, page)) |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 438 | vm_free_pages_release(vm, false); |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 439 | } |
| 440 | |
| 441 | static int __setup_page_dma(struct i915_address_space *vm, |
| 442 | struct i915_page_dma *p, |
| 443 | gfp_t gfp) |
| 444 | { |
| 445 | p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY); |
| 446 | if (unlikely(!p->page)) |
Michel Thierry | 1266cdb | 2015-03-24 17:06:33 +0000 | [diff] [blame] | 447 | return -ENOMEM; |
| 448 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 449 | p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE, |
| 450 | PCI_DMA_BIDIRECTIONAL); |
| 451 | if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { |
| 452 | vm_free_page(vm, p->page); |
| 453 | return -ENOMEM; |
Mika Kuoppala | 44159dd | 2015-06-25 18:35:07 +0300 | [diff] [blame] | 454 | } |
| 455 | |
Michel Thierry | 1266cdb | 2015-03-24 17:06:33 +0000 | [diff] [blame] | 456 | return 0; |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 457 | } |
| 458 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 459 | static int setup_page_dma(struct i915_address_space *vm, |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 460 | struct i915_page_dma *p) |
Mika Kuoppala | c114f76 | 2015-06-25 18:35:13 +0300 | [diff] [blame] | 461 | { |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 462 | return __setup_page_dma(vm, p, I915_GFP_DMA); |
Mika Kuoppala | c114f76 | 2015-06-25 18:35:13 +0300 | [diff] [blame] | 463 | } |
| 464 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 465 | static void cleanup_page_dma(struct i915_address_space *vm, |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 466 | struct i915_page_dma *p) |
Mika Kuoppala | 44159dd | 2015-06-25 18:35:07 +0300 | [diff] [blame] | 467 | { |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 468 | dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
| 469 | vm_free_page(vm, p->page); |
Mika Kuoppala | 44159dd | 2015-06-25 18:35:07 +0300 | [diff] [blame] | 470 | } |
| 471 | |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 472 | #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) |
Mika Kuoppala | d1c54ac | 2015-06-25 18:35:11 +0300 | [diff] [blame] | 473 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 474 | #define setup_px(vm, px) setup_page_dma((vm), px_base(px)) |
| 475 | #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px)) |
| 476 | #define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v)) |
| 477 | #define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v)) |
Mika Kuoppala | 567047b | 2015-06-25 18:35:12 +0300 | [diff] [blame] | 478 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 479 | static void fill_page_dma(struct i915_address_space *vm, |
| 480 | struct i915_page_dma *p, |
| 481 | const u64 val) |
Mika Kuoppala | d1c54ac | 2015-06-25 18:35:11 +0300 | [diff] [blame] | 482 | { |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 483 | u64 * const vaddr = kmap_atomic(p->page); |
Mika Kuoppala | d1c54ac | 2015-06-25 18:35:11 +0300 | [diff] [blame] | 484 | int i; |
Mika Kuoppala | d1c54ac | 2015-06-25 18:35:11 +0300 | [diff] [blame] | 485 | |
| 486 | for (i = 0; i < 512; i++) |
| 487 | vaddr[i] = val; |
| 488 | |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 489 | kunmap_atomic(vaddr); |
Mika Kuoppala | d1c54ac | 2015-06-25 18:35:11 +0300 | [diff] [blame] | 490 | } |
| 491 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 492 | static void fill_page_dma_32(struct i915_address_space *vm, |
| 493 | struct i915_page_dma *p, |
| 494 | const u32 v) |
Mika Kuoppala | 73eeea5 | 2015-06-25 18:35:10 +0300 | [diff] [blame] | 495 | { |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 496 | fill_page_dma(vm, p, (u64)v << 32 | v); |
Mika Kuoppala | 73eeea5 | 2015-06-25 18:35:10 +0300 | [diff] [blame] | 497 | } |
| 498 | |
Chris Wilson | 8bcdd0f7 | 2016-08-22 08:44:30 +0100 | [diff] [blame] | 499 | static int |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 500 | setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) |
Mika Kuoppala | 4ad2af1 | 2015-06-30 18:16:39 +0300 | [diff] [blame] | 501 | { |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 502 | struct page *page; |
| 503 | dma_addr_t addr; |
| 504 | |
| 505 | page = alloc_page(gfp | __GFP_ZERO); |
| 506 | if (unlikely(!page)) |
| 507 | return -ENOMEM; |
| 508 | |
| 509 | addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE, |
| 510 | PCI_DMA_BIDIRECTIONAL); |
| 511 | if (unlikely(dma_mapping_error(vm->dma, addr))) { |
| 512 | __free_page(page); |
| 513 | return -ENOMEM; |
| 514 | } |
| 515 | |
| 516 | vm->scratch_page.page = page; |
| 517 | vm->scratch_page.daddr = addr; |
| 518 | return 0; |
Mika Kuoppala | 4ad2af1 | 2015-06-30 18:16:39 +0300 | [diff] [blame] | 519 | } |
| 520 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 521 | static void cleanup_scratch_page(struct i915_address_space *vm) |
Mika Kuoppala | 4ad2af1 | 2015-06-30 18:16:39 +0300 | [diff] [blame] | 522 | { |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 523 | struct i915_page_dma *p = &vm->scratch_page; |
| 524 | |
| 525 | dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
| 526 | __free_page(p->page); |
Mika Kuoppala | 4ad2af1 | 2015-06-30 18:16:39 +0300 | [diff] [blame] | 527 | } |
| 528 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 529 | static struct i915_page_table *alloc_pt(struct i915_address_space *vm) |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 530 | { |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 531 | struct i915_page_table *pt; |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 532 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 533 | pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN); |
| 534 | if (unlikely(!pt)) |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 535 | return ERR_PTR(-ENOMEM); |
| 536 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 537 | if (unlikely(setup_px(vm, pt))) { |
| 538 | kfree(pt); |
| 539 | return ERR_PTR(-ENOMEM); |
| 540 | } |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 541 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 542 | pt->used_ptes = 0; |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 543 | return pt; |
| 544 | } |
| 545 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 546 | static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt) |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 547 | { |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 548 | cleanup_px(vm, pt); |
Mika Kuoppala | 2e906be | 2015-06-30 18:16:37 +0300 | [diff] [blame] | 549 | kfree(pt); |
| 550 | } |
| 551 | |
| 552 | static void gen8_initialize_pt(struct i915_address_space *vm, |
| 553 | struct i915_page_table *pt) |
| 554 | { |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 555 | fill_px(vm, pt, |
| 556 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC)); |
Mika Kuoppala | 2e906be | 2015-06-30 18:16:37 +0300 | [diff] [blame] | 557 | } |
| 558 | |
| 559 | static void gen6_initialize_pt(struct i915_address_space *vm, |
| 560 | struct i915_page_table *pt) |
| 561 | { |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 562 | fill32_px(vm, pt, |
| 563 | vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0)); |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 564 | } |
| 565 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 566 | static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 567 | { |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 568 | struct i915_page_directory *pd; |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 569 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 570 | pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN); |
| 571 | if (unlikely(!pd)) |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 572 | return ERR_PTR(-ENOMEM); |
| 573 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 574 | if (unlikely(setup_px(vm, pd))) { |
| 575 | kfree(pd); |
| 576 | return ERR_PTR(-ENOMEM); |
| 577 | } |
Michel Thierry | 33c8819 | 2015-04-08 12:13:33 +0100 | [diff] [blame] | 578 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 579 | pd->used_pdes = 0; |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 580 | return pd; |
| 581 | } |
| 582 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 583 | static void free_pd(struct i915_address_space *vm, |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 584 | struct i915_page_directory *pd) |
Mika Kuoppala | 2e906be | 2015-06-30 18:16:37 +0300 | [diff] [blame] | 585 | { |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 586 | cleanup_px(vm, pd); |
| 587 | kfree(pd); |
Mika Kuoppala | 2e906be | 2015-06-30 18:16:37 +0300 | [diff] [blame] | 588 | } |
| 589 | |
| 590 | static void gen8_initialize_pd(struct i915_address_space *vm, |
| 591 | struct i915_page_directory *pd) |
| 592 | { |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 593 | unsigned int i; |
Mika Kuoppala | 2e906be | 2015-06-30 18:16:37 +0300 | [diff] [blame] | 594 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 595 | fill_px(vm, pd, |
| 596 | gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC)); |
| 597 | for (i = 0; i < I915_PDES; i++) |
| 598 | pd->page_table[i] = vm->scratch_pt; |
Mika Kuoppala | 2e906be | 2015-06-30 18:16:37 +0300 | [diff] [blame] | 599 | } |
| 600 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 601 | static int __pdp_init(struct i915_address_space *vm, |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 602 | struct i915_page_directory_pointer *pdp) |
| 603 | { |
Mika Kuoppala | 3e49004 | 2017-02-28 17:28:07 +0200 | [diff] [blame] | 604 | const unsigned int pdpes = i915_pdpes_per_pdp(vm); |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 605 | unsigned int i; |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 606 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 607 | pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory), |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 608 | GFP_KERNEL | __GFP_NOWARN); |
| 609 | if (unlikely(!pdp->page_directory)) |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 610 | return -ENOMEM; |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 611 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 612 | for (i = 0; i < pdpes; i++) |
| 613 | pdp->page_directory[i] = vm->scratch_pd; |
| 614 | |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 615 | return 0; |
| 616 | } |
| 617 | |
| 618 | static void __pdp_fini(struct i915_page_directory_pointer *pdp) |
| 619 | { |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 620 | kfree(pdp->page_directory); |
| 621 | pdp->page_directory = NULL; |
| 622 | } |
| 623 | |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 624 | static inline bool use_4lvl(const struct i915_address_space *vm) |
| 625 | { |
| 626 | return i915_vm_is_48bit(vm); |
| 627 | } |
| 628 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 629 | static struct i915_page_directory_pointer * |
| 630 | alloc_pdp(struct i915_address_space *vm) |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 631 | { |
| 632 | struct i915_page_directory_pointer *pdp; |
| 633 | int ret = -ENOMEM; |
| 634 | |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 635 | WARN_ON(!use_4lvl(vm)); |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 636 | |
| 637 | pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); |
| 638 | if (!pdp) |
| 639 | return ERR_PTR(-ENOMEM); |
| 640 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 641 | ret = __pdp_init(vm, pdp); |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 642 | if (ret) |
| 643 | goto fail_bitmap; |
| 644 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 645 | ret = setup_px(vm, pdp); |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 646 | if (ret) |
| 647 | goto fail_page_m; |
| 648 | |
| 649 | return pdp; |
| 650 | |
| 651 | fail_page_m: |
| 652 | __pdp_fini(pdp); |
| 653 | fail_bitmap: |
| 654 | kfree(pdp); |
| 655 | |
| 656 | return ERR_PTR(ret); |
| 657 | } |
| 658 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 659 | static void free_pdp(struct i915_address_space *vm, |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 660 | struct i915_page_directory_pointer *pdp) |
| 661 | { |
| 662 | __pdp_fini(pdp); |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 663 | |
| 664 | if (!use_4lvl(vm)) |
| 665 | return; |
| 666 | |
| 667 | cleanup_px(vm, pdp); |
| 668 | kfree(pdp); |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 669 | } |
| 670 | |
Michel Thierry | 69ab76f | 2015-07-29 17:23:55 +0100 | [diff] [blame] | 671 | static void gen8_initialize_pdp(struct i915_address_space *vm, |
| 672 | struct i915_page_directory_pointer *pdp) |
| 673 | { |
| 674 | gen8_ppgtt_pdpe_t scratch_pdpe; |
| 675 | |
| 676 | scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); |
| 677 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 678 | fill_px(vm, pdp, scratch_pdpe); |
Michel Thierry | 69ab76f | 2015-07-29 17:23:55 +0100 | [diff] [blame] | 679 | } |
| 680 | |
| 681 | static void gen8_initialize_pml4(struct i915_address_space *vm, |
| 682 | struct i915_pml4 *pml4) |
| 683 | { |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 684 | unsigned int i; |
Michel Thierry | 69ab76f | 2015-07-29 17:23:55 +0100 | [diff] [blame] | 685 | |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 686 | fill_px(vm, pml4, |
| 687 | gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC)); |
| 688 | for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) |
| 689 | pml4->pdps[i] = vm->scratch_pdp; |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 690 | } |
| 691 | |
Ben Widawsky | 94e409c | 2013-11-04 22:29:36 -0800 | [diff] [blame] | 692 | /* Broadwell Page Directory Pointer Descriptors */ |
John Harrison | e85b26d | 2015-05-29 17:43:56 +0100 | [diff] [blame] | 693 | static int gen8_write_pdp(struct drm_i915_gem_request *req, |
Michel Thierry | 7cb6d7a | 2015-04-08 12:13:29 +0100 | [diff] [blame] | 694 | unsigned entry, |
| 695 | dma_addr_t addr) |
Ben Widawsky | 94e409c | 2013-11-04 22:29:36 -0800 | [diff] [blame] | 696 | { |
Tvrtko Ursulin | 4a570db | 2016-03-16 11:00:38 +0000 | [diff] [blame] | 697 | struct intel_engine_cs *engine = req->engine; |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 698 | u32 *cs; |
Ben Widawsky | 94e409c | 2013-11-04 22:29:36 -0800 | [diff] [blame] | 699 | |
| 700 | BUG_ON(entry >= 4); |
| 701 | |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 702 | cs = intel_ring_begin(req, 6); |
| 703 | if (IS_ERR(cs)) |
| 704 | return PTR_ERR(cs); |
Ben Widawsky | 94e409c | 2013-11-04 22:29:36 -0800 | [diff] [blame] | 705 | |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 706 | *cs++ = MI_LOAD_REGISTER_IMM(1); |
| 707 | *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry)); |
| 708 | *cs++ = upper_32_bits(addr); |
| 709 | *cs++ = MI_LOAD_REGISTER_IMM(1); |
| 710 | *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry)); |
| 711 | *cs++ = lower_32_bits(addr); |
| 712 | intel_ring_advance(req, cs); |
Ben Widawsky | 94e409c | 2013-11-04 22:29:36 -0800 | [diff] [blame] | 713 | |
| 714 | return 0; |
| 715 | } |
| 716 | |
Mika Kuoppala | e716776 | 2017-02-28 17:28:10 +0200 | [diff] [blame] | 717 | static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt, |
| 718 | struct drm_i915_gem_request *req) |
Ben Widawsky | 94e409c | 2013-11-04 22:29:36 -0800 | [diff] [blame] | 719 | { |
Ben Widawsky | eeb9488 | 2013-12-06 14:11:10 -0800 | [diff] [blame] | 720 | int i, ret; |
Ben Widawsky | 94e409c | 2013-11-04 22:29:36 -0800 | [diff] [blame] | 721 | |
Mika Kuoppala | e716776 | 2017-02-28 17:28:10 +0200 | [diff] [blame] | 722 | for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) { |
Mika Kuoppala | d852c7b | 2015-06-25 18:35:06 +0300 | [diff] [blame] | 723 | const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); |
| 724 | |
John Harrison | e85b26d | 2015-05-29 17:43:56 +0100 | [diff] [blame] | 725 | ret = gen8_write_pdp(req, i, pd_daddr); |
Ben Widawsky | eeb9488 | 2013-12-06 14:11:10 -0800 | [diff] [blame] | 726 | if (ret) |
| 727 | return ret; |
Ben Widawsky | 94e409c | 2013-11-04 22:29:36 -0800 | [diff] [blame] | 728 | } |
Ben Widawsky | d595bd4 | 2013-11-25 09:54:32 -0800 | [diff] [blame] | 729 | |
Ben Widawsky | eeb9488 | 2013-12-06 14:11:10 -0800 | [diff] [blame] | 730 | return 0; |
Ben Widawsky | 94e409c | 2013-11-04 22:29:36 -0800 | [diff] [blame] | 731 | } |
| 732 | |
Mika Kuoppala | e716776 | 2017-02-28 17:28:10 +0200 | [diff] [blame] | 733 | static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt, |
| 734 | struct drm_i915_gem_request *req) |
Michel Thierry | 2dba323 | 2015-07-30 11:06:23 +0100 | [diff] [blame] | 735 | { |
| 736 | return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4)); |
| 737 | } |
| 738 | |
Mika Kuoppala | fce9375 | 2016-10-31 17:24:46 +0200 | [diff] [blame] | 739 | /* PDE TLBs are a pain to invalidate on GEN8+. When we modify |
| 740 | * the page table structures, we mark them dirty so that |
| 741 | * context switching/execlist queuing code takes extra steps |
| 742 | * to ensure that tlbs are flushed. |
| 743 | */ |
| 744 | static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) |
| 745 | { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 746 | ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask; |
Mika Kuoppala | fce9375 | 2016-10-31 17:24:46 +0200 | [diff] [blame] | 747 | } |
| 748 | |
Michał Winiarski | 2ce5179 | 2016-10-13 14:02:42 +0200 | [diff] [blame] | 749 | /* Removes entries from a single page table, releasing it if it's empty. |
| 750 | * Caller can use the return value to update higher-level entries. |
| 751 | */ |
| 752 | static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 753 | struct i915_page_table *pt, |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 754 | u64 start, u64 length) |
Ben Widawsky | 459108b | 2013-11-02 21:07:23 -0700 | [diff] [blame] | 755 | { |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 756 | unsigned int num_entries = gen8_pte_count(start, length); |
Mika Kuoppala | 37c6393 | 2016-11-01 15:27:36 +0200 | [diff] [blame] | 757 | unsigned int pte = gen8_pte_index(start); |
| 758 | unsigned int pte_end = pte + num_entries; |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 759 | const gen8_pte_t scratch_pte = |
| 760 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); |
| 761 | gen8_pte_t *vaddr; |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 762 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 763 | GEM_BUG_ON(num_entries > pt->used_ptes); |
Ben Widawsky | 459108b | 2013-11-02 21:07:23 -0700 | [diff] [blame] | 764 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 765 | pt->used_ptes -= num_entries; |
| 766 | if (!pt->used_ptes) |
| 767 | return true; |
Michał Winiarski | 2ce5179 | 2016-10-13 14:02:42 +0200 | [diff] [blame] | 768 | |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 769 | vaddr = kmap_atomic_px(pt); |
Mika Kuoppala | 37c6393 | 2016-11-01 15:27:36 +0200 | [diff] [blame] | 770 | while (pte < pte_end) |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 771 | vaddr[pte++] = scratch_pte; |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 772 | kunmap_atomic(vaddr); |
Michał Winiarski | 2ce5179 | 2016-10-13 14:02:42 +0200 | [diff] [blame] | 773 | |
| 774 | return false; |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 775 | } |
| 776 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 777 | static void gen8_ppgtt_set_pde(struct i915_address_space *vm, |
| 778 | struct i915_page_directory *pd, |
| 779 | struct i915_page_table *pt, |
| 780 | unsigned int pde) |
| 781 | { |
| 782 | gen8_pde_t *vaddr; |
| 783 | |
| 784 | pd->page_table[pde] = pt; |
| 785 | |
| 786 | vaddr = kmap_atomic_px(pd); |
| 787 | vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC); |
| 788 | kunmap_atomic(vaddr); |
| 789 | } |
| 790 | |
Michał Winiarski | 2ce5179 | 2016-10-13 14:02:42 +0200 | [diff] [blame] | 791 | static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 792 | struct i915_page_directory *pd, |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 793 | u64 start, u64 length) |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 794 | { |
| 795 | struct i915_page_table *pt; |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 796 | u32 pde; |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 797 | |
| 798 | gen8_for_each_pde(pt, pd, start, length, pde) { |
Chris Wilson | bf75d59 | 2017-02-27 12:26:52 +0000 | [diff] [blame] | 799 | GEM_BUG_ON(pt == vm->scratch_pt); |
| 800 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 801 | if (!gen8_ppgtt_clear_pt(vm, pt, start, length)) |
| 802 | continue; |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 803 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 804 | gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde); |
Chris Wilson | bf75d59 | 2017-02-27 12:26:52 +0000 | [diff] [blame] | 805 | GEM_BUG_ON(!pd->used_pdes); |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 806 | pd->used_pdes--; |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 807 | |
| 808 | free_pt(vm, pt); |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 809 | } |
Michał Winiarski | 2ce5179 | 2016-10-13 14:02:42 +0200 | [diff] [blame] | 810 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 811 | return !pd->used_pdes; |
| 812 | } |
Michał Winiarski | 2ce5179 | 2016-10-13 14:02:42 +0200 | [diff] [blame] | 813 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 814 | static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm, |
| 815 | struct i915_page_directory_pointer *pdp, |
| 816 | struct i915_page_directory *pd, |
| 817 | unsigned int pdpe) |
| 818 | { |
| 819 | gen8_ppgtt_pdpe_t *vaddr; |
| 820 | |
| 821 | pdp->page_directory[pdpe] = pd; |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 822 | if (!use_4lvl(vm)) |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 823 | return; |
| 824 | |
| 825 | vaddr = kmap_atomic_px(pdp); |
| 826 | vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); |
| 827 | kunmap_atomic(vaddr); |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 828 | } |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 829 | |
Michał Winiarski | 2ce5179 | 2016-10-13 14:02:42 +0200 | [diff] [blame] | 830 | /* Removes entries from a single page dir pointer, releasing it if it's empty. |
| 831 | * Caller can use the return value to update higher-level entries |
| 832 | */ |
| 833 | static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 834 | struct i915_page_directory_pointer *pdp, |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 835 | u64 start, u64 length) |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 836 | { |
| 837 | struct i915_page_directory *pd; |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 838 | unsigned int pdpe; |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 839 | |
| 840 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
Chris Wilson | bf75d59 | 2017-02-27 12:26:52 +0000 | [diff] [blame] | 841 | GEM_BUG_ON(pd == vm->scratch_pd); |
| 842 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 843 | if (!gen8_ppgtt_clear_pd(vm, pd, start, length)) |
| 844 | continue; |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 845 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 846 | gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); |
Chris Wilson | bf75d59 | 2017-02-27 12:26:52 +0000 | [diff] [blame] | 847 | GEM_BUG_ON(!pdp->used_pdpes); |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 848 | pdp->used_pdpes--; |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 849 | |
| 850 | free_pd(vm, pd); |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 851 | } |
Michał Winiarski | 2ce5179 | 2016-10-13 14:02:42 +0200 | [diff] [blame] | 852 | |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 853 | return !pdp->used_pdpes; |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 854 | } |
Ben Widawsky | 459108b | 2013-11-02 21:07:23 -0700 | [diff] [blame] | 855 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 856 | static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm, |
| 857 | u64 start, u64 length) |
| 858 | { |
| 859 | gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length); |
| 860 | } |
| 861 | |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 862 | static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4, |
| 863 | struct i915_page_directory_pointer *pdp, |
| 864 | unsigned int pml4e) |
| 865 | { |
| 866 | gen8_ppgtt_pml4e_t *vaddr; |
| 867 | |
| 868 | pml4->pdps[pml4e] = pdp; |
| 869 | |
| 870 | vaddr = kmap_atomic_px(pml4); |
| 871 | vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); |
| 872 | kunmap_atomic(vaddr); |
| 873 | } |
| 874 | |
Michał Winiarski | 2ce5179 | 2016-10-13 14:02:42 +0200 | [diff] [blame] | 875 | /* Removes entries from a single pml4. |
| 876 | * This is the top-level structure in 4-level page tables used on gen8+. |
| 877 | * Empty entries are always scratch pml4e. |
| 878 | */ |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 879 | static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, |
| 880 | u64 start, u64 length) |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 881 | { |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 882 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
| 883 | struct i915_pml4 *pml4 = &ppgtt->pml4; |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 884 | struct i915_page_directory_pointer *pdp; |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 885 | unsigned int pml4e; |
Michał Winiarski | 2ce5179 | 2016-10-13 14:02:42 +0200 | [diff] [blame] | 886 | |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 887 | GEM_BUG_ON(!use_4lvl(vm)); |
Ben Widawsky | 459108b | 2013-11-02 21:07:23 -0700 | [diff] [blame] | 888 | |
Michał Winiarski | d209b9c | 2016-10-13 14:02:41 +0200 | [diff] [blame] | 889 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
Chris Wilson | bf75d59 | 2017-02-27 12:26:52 +0000 | [diff] [blame] | 890 | GEM_BUG_ON(pdp == vm->scratch_pdp); |
| 891 | |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 892 | if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length)) |
| 893 | continue; |
Ben Widawsky | 459108b | 2013-11-02 21:07:23 -0700 | [diff] [blame] | 894 | |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 895 | gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 896 | |
| 897 | free_pdp(vm, pdp); |
Ben Widawsky | 459108b | 2013-11-02 21:07:23 -0700 | [diff] [blame] | 898 | } |
| 899 | } |
| 900 | |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 901 | struct sgt_dma { |
| 902 | struct scatterlist *sg; |
| 903 | dma_addr_t dma, max; |
| 904 | }; |
| 905 | |
Chris Wilson | 9e89f9e | 2017-02-25 18:11:22 +0000 | [diff] [blame] | 906 | struct gen8_insert_pte { |
| 907 | u16 pml4e; |
| 908 | u16 pdpe; |
| 909 | u16 pde; |
| 910 | u16 pte; |
| 911 | }; |
| 912 | |
| 913 | static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start) |
| 914 | { |
| 915 | return (struct gen8_insert_pte) { |
| 916 | gen8_pml4e_index(start), |
| 917 | gen8_pdpe_index(start), |
| 918 | gen8_pde_index(start), |
| 919 | gen8_pte_index(start), |
| 920 | }; |
| 921 | } |
| 922 | |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 923 | static __always_inline bool |
| 924 | gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, |
Michel Thierry | f9b5b78 | 2015-07-30 11:02:49 +0100 | [diff] [blame] | 925 | struct i915_page_directory_pointer *pdp, |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 926 | struct sgt_dma *iter, |
Chris Wilson | 9e89f9e | 2017-02-25 18:11:22 +0000 | [diff] [blame] | 927 | struct gen8_insert_pte *idx, |
Michel Thierry | f9b5b78 | 2015-07-30 11:02:49 +0100 | [diff] [blame] | 928 | enum i915_cache_level cache_level) |
| 929 | { |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 930 | struct i915_page_directory *pd; |
| 931 | const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level); |
| 932 | gen8_pte_t *vaddr; |
| 933 | bool ret; |
Ben Widawsky | 9df15b4 | 2013-11-02 21:07:24 -0700 | [diff] [blame] | 934 | |
Mika Kuoppala | 3e49004 | 2017-02-28 17:28:07 +0200 | [diff] [blame] | 935 | GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base)); |
Chris Wilson | 9e89f9e | 2017-02-25 18:11:22 +0000 | [diff] [blame] | 936 | pd = pdp->page_directory[idx->pdpe]; |
| 937 | vaddr = kmap_atomic_px(pd->page_table[idx->pde]); |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 938 | do { |
Chris Wilson | 9e89f9e | 2017-02-25 18:11:22 +0000 | [diff] [blame] | 939 | vaddr[idx->pte] = pte_encode | iter->dma; |
| 940 | |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 941 | iter->dma += PAGE_SIZE; |
| 942 | if (iter->dma >= iter->max) { |
| 943 | iter->sg = __sg_next(iter->sg); |
| 944 | if (!iter->sg) { |
| 945 | ret = false; |
| 946 | break; |
| 947 | } |
Ben Widawsky | 9df15b4 | 2013-11-02 21:07:24 -0700 | [diff] [blame] | 948 | |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 949 | iter->dma = sg_dma_address(iter->sg); |
| 950 | iter->max = iter->dma + iter->sg->length; |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 951 | } |
Ben Widawsky | 7ad47cf | 2014-02-20 11:51:21 -0800 | [diff] [blame] | 952 | |
Chris Wilson | 9e89f9e | 2017-02-25 18:11:22 +0000 | [diff] [blame] | 953 | if (++idx->pte == GEN8_PTES) { |
| 954 | idx->pte = 0; |
| 955 | |
| 956 | if (++idx->pde == I915_PDES) { |
| 957 | idx->pde = 0; |
| 958 | |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 959 | /* Limited by sg length for 3lvl */ |
Chris Wilson | 9e89f9e | 2017-02-25 18:11:22 +0000 | [diff] [blame] | 960 | if (++idx->pdpe == GEN8_PML4ES_PER_PML4) { |
| 961 | idx->pdpe = 0; |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 962 | ret = true; |
Michel Thierry | de5ba8e | 2015-08-03 09:53:27 +0100 | [diff] [blame] | 963 | break; |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 964 | } |
| 965 | |
Mika Kuoppala | 3e49004 | 2017-02-28 17:28:07 +0200 | [diff] [blame] | 966 | GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base)); |
Chris Wilson | 9e89f9e | 2017-02-25 18:11:22 +0000 | [diff] [blame] | 967 | pd = pdp->page_directory[idx->pdpe]; |
Ben Widawsky | 7ad47cf | 2014-02-20 11:51:21 -0800 | [diff] [blame] | 968 | } |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 969 | |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 970 | kunmap_atomic(vaddr); |
Chris Wilson | 9e89f9e | 2017-02-25 18:11:22 +0000 | [diff] [blame] | 971 | vaddr = kmap_atomic_px(pd->page_table[idx->pde]); |
Ben Widawsky | 9df15b4 | 2013-11-02 21:07:24 -0700 | [diff] [blame] | 972 | } |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 973 | } while (1); |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 974 | kunmap_atomic(vaddr); |
Mika Kuoppala | d1c54ac | 2015-06-25 18:35:11 +0300 | [diff] [blame] | 975 | |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 976 | return ret; |
Ben Widawsky | 9df15b4 | 2013-11-02 21:07:24 -0700 | [diff] [blame] | 977 | } |
| 978 | |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 979 | static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 980 | struct i915_vma *vma, |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 981 | enum i915_cache_level cache_level, |
| 982 | u32 unused) |
Michel Thierry | f9b5b78 | 2015-07-30 11:02:49 +0100 | [diff] [blame] | 983 | { |
Chuanxiao Dong | 17369ba | 2017-07-07 17:50:59 +0800 | [diff] [blame] | 984 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 985 | struct sgt_dma iter = { |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 986 | .sg = vma->pages->sgl, |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 987 | .dma = sg_dma_address(iter.sg), |
| 988 | .max = iter.dma + iter.sg->length, |
| 989 | }; |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 990 | struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); |
Michel Thierry | f9b5b78 | 2015-07-30 11:02:49 +0100 | [diff] [blame] | 991 | |
Chris Wilson | 9e89f9e | 2017-02-25 18:11:22 +0000 | [diff] [blame] | 992 | gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, |
| 993 | cache_level); |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 994 | } |
Michel Thierry | de5ba8e | 2015-08-03 09:53:27 +0100 | [diff] [blame] | 995 | |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 996 | static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 997 | struct i915_vma *vma, |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 998 | enum i915_cache_level cache_level, |
| 999 | u32 unused) |
| 1000 | { |
| 1001 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
| 1002 | struct sgt_dma iter = { |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 1003 | .sg = vma->pages->sgl, |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 1004 | .dma = sg_dma_address(iter.sg), |
| 1005 | .max = iter.dma + iter.sg->length, |
| 1006 | }; |
| 1007 | struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps; |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 1008 | struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); |
Michel Thierry | de5ba8e | 2015-08-03 09:53:27 +0100 | [diff] [blame] | 1009 | |
Chris Wilson | 9e89f9e | 2017-02-25 18:11:22 +0000 | [diff] [blame] | 1010 | while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter, |
| 1011 | &idx, cache_level)) |
| 1012 | GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); |
Michel Thierry | f9b5b78 | 2015-07-30 11:02:49 +0100 | [diff] [blame] | 1013 | } |
| 1014 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1015 | static void gen8_free_page_tables(struct i915_address_space *vm, |
Michel Thierry | f37c050 | 2015-06-10 17:46:39 +0100 | [diff] [blame] | 1016 | struct i915_page_directory *pd) |
Ben Widawsky | b45a671 | 2014-02-12 14:28:44 -0800 | [diff] [blame] | 1017 | { |
| 1018 | int i; |
| 1019 | |
Mika Kuoppala | 567047b | 2015-06-25 18:35:12 +0300 | [diff] [blame] | 1020 | if (!px_page(pd)) |
Ben Widawsky | 7ad47cf | 2014-02-20 11:51:21 -0800 | [diff] [blame] | 1021 | return; |
Ben Widawsky | b45a671 | 2014-02-12 14:28:44 -0800 | [diff] [blame] | 1022 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1023 | for (i = 0; i < I915_PDES; i++) { |
| 1024 | if (pd->page_table[i] != vm->scratch_pt) |
| 1025 | free_pt(vm, pd->page_table[i]); |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 1026 | } |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 1027 | } |
| 1028 | |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1029 | static int gen8_init_scratch(struct i915_address_space *vm) |
| 1030 | { |
Matthew Auld | 64c050d | 2016-04-27 13:19:25 +0100 | [diff] [blame] | 1031 | int ret; |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1032 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1033 | ret = setup_scratch_page(vm, I915_GFP_DMA); |
Chris Wilson | 8bcdd0f7 | 2016-08-22 08:44:30 +0100 | [diff] [blame] | 1034 | if (ret) |
| 1035 | return ret; |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1036 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1037 | vm->scratch_pt = alloc_pt(vm); |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1038 | if (IS_ERR(vm->scratch_pt)) { |
Matthew Auld | 64c050d | 2016-04-27 13:19:25 +0100 | [diff] [blame] | 1039 | ret = PTR_ERR(vm->scratch_pt); |
| 1040 | goto free_scratch_page; |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1041 | } |
| 1042 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1043 | vm->scratch_pd = alloc_pd(vm); |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1044 | if (IS_ERR(vm->scratch_pd)) { |
Matthew Auld | 64c050d | 2016-04-27 13:19:25 +0100 | [diff] [blame] | 1045 | ret = PTR_ERR(vm->scratch_pd); |
| 1046 | goto free_pt; |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1047 | } |
| 1048 | |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1049 | if (use_4lvl(vm)) { |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1050 | vm->scratch_pdp = alloc_pdp(vm); |
Michel Thierry | 69ab76f | 2015-07-29 17:23:55 +0100 | [diff] [blame] | 1051 | if (IS_ERR(vm->scratch_pdp)) { |
Matthew Auld | 64c050d | 2016-04-27 13:19:25 +0100 | [diff] [blame] | 1052 | ret = PTR_ERR(vm->scratch_pdp); |
| 1053 | goto free_pd; |
Michel Thierry | 69ab76f | 2015-07-29 17:23:55 +0100 | [diff] [blame] | 1054 | } |
| 1055 | } |
| 1056 | |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1057 | gen8_initialize_pt(vm, vm->scratch_pt); |
| 1058 | gen8_initialize_pd(vm, vm->scratch_pd); |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1059 | if (use_4lvl(vm)) |
Michel Thierry | 69ab76f | 2015-07-29 17:23:55 +0100 | [diff] [blame] | 1060 | gen8_initialize_pdp(vm, vm->scratch_pdp); |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1061 | |
| 1062 | return 0; |
Matthew Auld | 64c050d | 2016-04-27 13:19:25 +0100 | [diff] [blame] | 1063 | |
| 1064 | free_pd: |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1065 | free_pd(vm, vm->scratch_pd); |
Matthew Auld | 64c050d | 2016-04-27 13:19:25 +0100 | [diff] [blame] | 1066 | free_pt: |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1067 | free_pt(vm, vm->scratch_pt); |
Matthew Auld | 64c050d | 2016-04-27 13:19:25 +0100 | [diff] [blame] | 1068 | free_scratch_page: |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1069 | cleanup_scratch_page(vm); |
Matthew Auld | 64c050d | 2016-04-27 13:19:25 +0100 | [diff] [blame] | 1070 | |
| 1071 | return ret; |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1072 | } |
| 1073 | |
Zhiyuan Lv | 650da34 | 2015-08-28 15:41:18 +0800 | [diff] [blame] | 1074 | static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) |
| 1075 | { |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1076 | struct i915_address_space *vm = &ppgtt->base; |
| 1077 | struct drm_i915_private *dev_priv = vm->i915; |
Zhiyuan Lv | 650da34 | 2015-08-28 15:41:18 +0800 | [diff] [blame] | 1078 | enum vgt_g2v_type msg; |
Zhiyuan Lv | 650da34 | 2015-08-28 15:41:18 +0800 | [diff] [blame] | 1079 | int i; |
| 1080 | |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1081 | if (use_4lvl(vm)) { |
| 1082 | const u64 daddr = px_dma(&ppgtt->pml4); |
Zhiyuan Lv | 650da34 | 2015-08-28 15:41:18 +0800 | [diff] [blame] | 1083 | |
Ville Syrjälä | ab75bb5 | 2015-11-04 23:20:12 +0200 | [diff] [blame] | 1084 | I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); |
| 1085 | I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); |
Zhiyuan Lv | 650da34 | 2015-08-28 15:41:18 +0800 | [diff] [blame] | 1086 | |
| 1087 | msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : |
| 1088 | VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); |
| 1089 | } else { |
Mika Kuoppala | e716776 | 2017-02-28 17:28:10 +0200 | [diff] [blame] | 1090 | for (i = 0; i < GEN8_3LVL_PDPES; i++) { |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1091 | const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); |
Zhiyuan Lv | 650da34 | 2015-08-28 15:41:18 +0800 | [diff] [blame] | 1092 | |
Ville Syrjälä | ab75bb5 | 2015-11-04 23:20:12 +0200 | [diff] [blame] | 1093 | I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); |
| 1094 | I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); |
Zhiyuan Lv | 650da34 | 2015-08-28 15:41:18 +0800 | [diff] [blame] | 1095 | } |
| 1096 | |
| 1097 | msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : |
| 1098 | VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); |
| 1099 | } |
| 1100 | |
| 1101 | I915_WRITE(vgtif_reg(g2v_notify), msg); |
| 1102 | |
| 1103 | return 0; |
| 1104 | } |
| 1105 | |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1106 | static void gen8_free_scratch(struct i915_address_space *vm) |
| 1107 | { |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1108 | if (use_4lvl(vm)) |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1109 | free_pdp(vm, vm->scratch_pdp); |
| 1110 | free_pd(vm, vm->scratch_pd); |
| 1111 | free_pt(vm, vm->scratch_pt); |
| 1112 | cleanup_scratch_page(vm); |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1113 | } |
| 1114 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1115 | static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1116 | struct i915_page_directory_pointer *pdp) |
Ben Widawsky | 7ad47cf | 2014-02-20 11:51:21 -0800 | [diff] [blame] | 1117 | { |
Mika Kuoppala | 3e49004 | 2017-02-28 17:28:07 +0200 | [diff] [blame] | 1118 | const unsigned int pdpes = i915_pdpes_per_pdp(vm); |
Ben Widawsky | 7ad47cf | 2014-02-20 11:51:21 -0800 | [diff] [blame] | 1119 | int i; |
| 1120 | |
Mika Kuoppala | 3e49004 | 2017-02-28 17:28:07 +0200 | [diff] [blame] | 1121 | for (i = 0; i < pdpes; i++) { |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1122 | if (pdp->page_directory[i] == vm->scratch_pd) |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 1123 | continue; |
| 1124 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1125 | gen8_free_page_tables(vm, pdp->page_directory[i]); |
| 1126 | free_pd(vm, pdp->page_directory[i]); |
Ben Widawsky | 7ad47cf | 2014-02-20 11:51:21 -0800 | [diff] [blame] | 1127 | } |
Michel Thierry | 69876be | 2015-04-08 12:13:27 +0100 | [diff] [blame] | 1128 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1129 | free_pdp(vm, pdp); |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1130 | } |
| 1131 | |
| 1132 | static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) |
| 1133 | { |
| 1134 | int i; |
| 1135 | |
Chris Wilson | c5d092a | 2017-02-15 08:43:49 +0000 | [diff] [blame] | 1136 | for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { |
| 1137 | if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp) |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1138 | continue; |
| 1139 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1140 | gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]); |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1141 | } |
| 1142 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1143 | cleanup_px(&ppgtt->base, &ppgtt->pml4); |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1144 | } |
| 1145 | |
| 1146 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) |
| 1147 | { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 1148 | struct drm_i915_private *dev_priv = vm->i915; |
Joonas Lahtinen | e5716f5 | 2016-04-07 11:08:03 +0300 | [diff] [blame] | 1149 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1150 | |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 1151 | if (intel_vgpu_active(dev_priv)) |
Zhiyuan Lv | 650da34 | 2015-08-28 15:41:18 +0800 | [diff] [blame] | 1152 | gen8_ppgtt_notify_vgt(ppgtt, false); |
| 1153 | |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1154 | if (use_4lvl(vm)) |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1155 | gen8_ppgtt_cleanup_4lvl(ppgtt); |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1156 | else |
| 1157 | gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp); |
Michel Thierry | d4ec9da | 2015-07-30 11:02:03 +0100 | [diff] [blame] | 1158 | |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1159 | gen8_free_scratch(vm); |
Ben Widawsky | b45a671 | 2014-02-12 14:28:44 -0800 | [diff] [blame] | 1160 | } |
| 1161 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1162 | static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, |
| 1163 | struct i915_page_directory *pd, |
| 1164 | u64 start, u64 length) |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 1165 | { |
Michel Thierry | d7b2633 | 2015-04-08 12:13:34 +0100 | [diff] [blame] | 1166 | struct i915_page_table *pt; |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1167 | u64 from = start; |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1168 | unsigned int pde; |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 1169 | |
Dave Gordon | e8ebd8e | 2015-12-08 13:30:51 +0000 | [diff] [blame] | 1170 | gen8_for_each_pde(pt, pd, start, length, pde) { |
Chris Wilson | 1482667 | 2017-09-08 19:16:22 +0100 | [diff] [blame] | 1171 | int count = gen8_pte_count(start, length); |
| 1172 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1173 | if (pt == vm->scratch_pt) { |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1174 | pt = alloc_pt(vm); |
| 1175 | if (IS_ERR(pt)) |
| 1176 | goto unwind; |
| 1177 | |
Chris Wilson | 1482667 | 2017-09-08 19:16:22 +0100 | [diff] [blame] | 1178 | if (count < GEN8_PTES) |
| 1179 | gen8_initialize_pt(vm, pt); |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 1180 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1181 | gen8_ppgtt_set_pde(vm, pd, pt, pde); |
| 1182 | pd->used_pdes++; |
Chris Wilson | bf75d59 | 2017-02-27 12:26:52 +0000 | [diff] [blame] | 1183 | GEM_BUG_ON(pd->used_pdes > I915_PDES); |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1184 | } |
| 1185 | |
Chris Wilson | 1482667 | 2017-09-08 19:16:22 +0100 | [diff] [blame] | 1186 | pt->used_ptes += count; |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1187 | } |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 1188 | return 0; |
| 1189 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1190 | unwind: |
| 1191 | gen8_ppgtt_clear_pd(vm, pd, from, start - from); |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 1192 | return -ENOMEM; |
| 1193 | } |
| 1194 | |
Chris Wilson | c5d092a | 2017-02-15 08:43:49 +0000 | [diff] [blame] | 1195 | static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, |
| 1196 | struct i915_page_directory_pointer *pdp, |
| 1197 | u64 start, u64 length) |
Ben Widawsky | bf2b4ed | 2014-02-19 22:05:43 -0800 | [diff] [blame] | 1198 | { |
Michel Thierry | 5441f0c | 2015-04-08 12:13:28 +0100 | [diff] [blame] | 1199 | struct i915_page_directory *pd; |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1200 | u64 from = start; |
| 1201 | unsigned int pdpe; |
Ben Widawsky | bf2b4ed | 2014-02-19 22:05:43 -0800 | [diff] [blame] | 1202 | int ret; |
| 1203 | |
Dave Gordon | e8ebd8e | 2015-12-08 13:30:51 +0000 | [diff] [blame] | 1204 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1205 | if (pd == vm->scratch_pd) { |
| 1206 | pd = alloc_pd(vm); |
| 1207 | if (IS_ERR(pd)) |
| 1208 | goto unwind; |
Michel Thierry | 5441f0c | 2015-04-08 12:13:28 +0100 | [diff] [blame] | 1209 | |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1210 | gen8_initialize_pd(vm, pd); |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1211 | gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1212 | pdp->used_pdpes++; |
Mika Kuoppala | 3e49004 | 2017-02-28 17:28:07 +0200 | [diff] [blame] | 1213 | GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm)); |
Chris Wilson | 75afcf7 | 2017-02-15 08:43:51 +0000 | [diff] [blame] | 1214 | |
| 1215 | mark_tlbs_dirty(i915_vm_to_ppgtt(vm)); |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1216 | } |
| 1217 | |
| 1218 | ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); |
Chris Wilson | bf75d59 | 2017-02-27 12:26:52 +0000 | [diff] [blame] | 1219 | if (unlikely(ret)) |
| 1220 | goto unwind_pd; |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1221 | } |
Michel Thierry | 33c8819 | 2015-04-08 12:13:33 +0100 | [diff] [blame] | 1222 | |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 1223 | return 0; |
| 1224 | |
Chris Wilson | bf75d59 | 2017-02-27 12:26:52 +0000 | [diff] [blame] | 1225 | unwind_pd: |
| 1226 | if (!pd->used_pdes) { |
| 1227 | gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); |
| 1228 | GEM_BUG_ON(!pdp->used_pdpes); |
| 1229 | pdp->used_pdpes--; |
| 1230 | free_pd(vm, pd); |
| 1231 | } |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1232 | unwind: |
| 1233 | gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); |
| 1234 | return -ENOMEM; |
Ben Widawsky | bf2b4ed | 2014-02-19 22:05:43 -0800 | [diff] [blame] | 1235 | } |
| 1236 | |
Chris Wilson | c5d092a | 2017-02-15 08:43:49 +0000 | [diff] [blame] | 1237 | static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm, |
| 1238 | u64 start, u64 length) |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1239 | { |
Chris Wilson | c5d092a | 2017-02-15 08:43:49 +0000 | [diff] [blame] | 1240 | return gen8_ppgtt_alloc_pdp(vm, |
| 1241 | &i915_vm_to_ppgtt(vm)->pdp, start, length); |
| 1242 | } |
| 1243 | |
| 1244 | static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, |
| 1245 | u64 start, u64 length) |
| 1246 | { |
| 1247 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
| 1248 | struct i915_pml4 *pml4 = &ppgtt->pml4; |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1249 | struct i915_page_directory_pointer *pdp; |
Chris Wilson | c5d092a | 2017-02-15 08:43:49 +0000 | [diff] [blame] | 1250 | u64 from = start; |
| 1251 | u32 pml4e; |
| 1252 | int ret; |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1253 | |
Dave Gordon | e8ebd8e | 2015-12-08 13:30:51 +0000 | [diff] [blame] | 1254 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
Chris Wilson | c5d092a | 2017-02-15 08:43:49 +0000 | [diff] [blame] | 1255 | if (pml4->pdps[pml4e] == vm->scratch_pdp) { |
| 1256 | pdp = alloc_pdp(vm); |
| 1257 | if (IS_ERR(pdp)) |
| 1258 | goto unwind; |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1259 | |
Chris Wilson | c5d092a | 2017-02-15 08:43:49 +0000 | [diff] [blame] | 1260 | gen8_initialize_pdp(vm, pdp); |
| 1261 | gen8_ppgtt_set_pml4e(pml4, pdp, pml4e); |
| 1262 | } |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1263 | |
Chris Wilson | c5d092a | 2017-02-15 08:43:49 +0000 | [diff] [blame] | 1264 | ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length); |
Chris Wilson | bf75d59 | 2017-02-27 12:26:52 +0000 | [diff] [blame] | 1265 | if (unlikely(ret)) |
| 1266 | goto unwind_pdp; |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1267 | } |
| 1268 | |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1269 | return 0; |
| 1270 | |
Chris Wilson | bf75d59 | 2017-02-27 12:26:52 +0000 | [diff] [blame] | 1271 | unwind_pdp: |
| 1272 | if (!pdp->used_pdpes) { |
| 1273 | gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); |
| 1274 | free_pdp(vm, pdp); |
| 1275 | } |
Chris Wilson | c5d092a | 2017-02-15 08:43:49 +0000 | [diff] [blame] | 1276 | unwind: |
| 1277 | gen8_ppgtt_clear_4lvl(vm, from, start - from); |
| 1278 | return -ENOMEM; |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1279 | } |
| 1280 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1281 | static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, |
| 1282 | struct i915_page_directory_pointer *pdp, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1283 | u64 start, u64 length, |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1284 | gen8_pte_t scratch_pte, |
| 1285 | struct seq_file *m) |
| 1286 | { |
Mika Kuoppala | 3e49004 | 2017-02-28 17:28:07 +0200 | [diff] [blame] | 1287 | struct i915_address_space *vm = &ppgtt->base; |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1288 | struct i915_page_directory *pd; |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1289 | u32 pdpe; |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1290 | |
Dave Gordon | e8ebd8e | 2015-12-08 13:30:51 +0000 | [diff] [blame] | 1291 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1292 | struct i915_page_table *pt; |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1293 | u64 pd_len = length; |
| 1294 | u64 pd_start = start; |
| 1295 | u32 pde; |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1296 | |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1297 | if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd) |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1298 | continue; |
| 1299 | |
| 1300 | seq_printf(m, "\tPDPE #%d\n", pdpe); |
Dave Gordon | e8ebd8e | 2015-12-08 13:30:51 +0000 | [diff] [blame] | 1301 | gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1302 | u32 pte; |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1303 | gen8_pte_t *pt_vaddr; |
| 1304 | |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1305 | if (pd->page_table[pde] == ppgtt->base.scratch_pt) |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1306 | continue; |
| 1307 | |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 1308 | pt_vaddr = kmap_atomic_px(pt); |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1309 | for (pte = 0; pte < GEN8_PTES; pte += 4) { |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1310 | u64 va = (pdpe << GEN8_PDPE_SHIFT | |
| 1311 | pde << GEN8_PDE_SHIFT | |
| 1312 | pte << GEN8_PTE_SHIFT); |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1313 | int i; |
| 1314 | bool found = false; |
| 1315 | |
| 1316 | for (i = 0; i < 4; i++) |
| 1317 | if (pt_vaddr[pte + i] != scratch_pte) |
| 1318 | found = true; |
| 1319 | if (!found) |
| 1320 | continue; |
| 1321 | |
| 1322 | seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte); |
| 1323 | for (i = 0; i < 4; i++) { |
| 1324 | if (pt_vaddr[pte + i] != scratch_pte) |
| 1325 | seq_printf(m, " %llx", pt_vaddr[pte + i]); |
| 1326 | else |
| 1327 | seq_puts(m, " SCRATCH "); |
| 1328 | } |
| 1329 | seq_puts(m, "\n"); |
| 1330 | } |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1331 | kunmap_atomic(pt_vaddr); |
| 1332 | } |
| 1333 | } |
| 1334 | } |
| 1335 | |
| 1336 | static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) |
| 1337 | { |
| 1338 | struct i915_address_space *vm = &ppgtt->base; |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 1339 | const gen8_pte_t scratch_pte = |
| 1340 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); |
Chris Wilson | 381b943 | 2017-02-15 08:43:54 +0000 | [diff] [blame] | 1341 | u64 start = 0, length = ppgtt->base.total; |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1342 | |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1343 | if (use_4lvl(vm)) { |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1344 | u64 pml4e; |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1345 | struct i915_pml4 *pml4 = &ppgtt->pml4; |
| 1346 | struct i915_page_directory_pointer *pdp; |
| 1347 | |
Dave Gordon | e8ebd8e | 2015-12-08 13:30:51 +0000 | [diff] [blame] | 1348 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
Chris Wilson | c5d092a | 2017-02-15 08:43:49 +0000 | [diff] [blame] | 1349 | if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp) |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1350 | continue; |
| 1351 | |
| 1352 | seq_printf(m, " PML4E #%llu\n", pml4e); |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1353 | gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m); |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1354 | } |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1355 | } else { |
| 1356 | gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m); |
Michel Thierry | ea91e40 | 2015-07-29 17:23:57 +0100 | [diff] [blame] | 1357 | } |
| 1358 | } |
| 1359 | |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1360 | static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt) |
Zhiyuan Lv | 331f38e | 2015-08-28 15:41:14 +0800 | [diff] [blame] | 1361 | { |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1362 | struct i915_address_space *vm = &ppgtt->base; |
| 1363 | struct i915_page_directory_pointer *pdp = &ppgtt->pdp; |
| 1364 | struct i915_page_directory *pd; |
| 1365 | u64 start = 0, length = ppgtt->base.total; |
| 1366 | u64 from = start; |
| 1367 | unsigned int pdpe; |
Zhiyuan Lv | 331f38e | 2015-08-28 15:41:14 +0800 | [diff] [blame] | 1368 | |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1369 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
| 1370 | pd = alloc_pd(vm); |
| 1371 | if (IS_ERR(pd)) |
| 1372 | goto unwind; |
Zhiyuan Lv | 331f38e | 2015-08-28 15:41:14 +0800 | [diff] [blame] | 1373 | |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1374 | gen8_initialize_pd(vm, pd); |
| 1375 | gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); |
| 1376 | pdp->used_pdpes++; |
| 1377 | } |
Zhiyuan Lv | 331f38e | 2015-08-28 15:41:14 +0800 | [diff] [blame] | 1378 | |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1379 | pdp->used_pdpes++; /* never remove */ |
| 1380 | return 0; |
Zhiyuan Lv | 331f38e | 2015-08-28 15:41:14 +0800 | [diff] [blame] | 1381 | |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1382 | unwind: |
| 1383 | start -= from; |
| 1384 | gen8_for_each_pdpe(pd, pdp, from, start, pdpe) { |
| 1385 | gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); |
| 1386 | free_pd(vm, pd); |
| 1387 | } |
| 1388 | pdp->used_pdpes = 0; |
| 1389 | return -ENOMEM; |
Zhiyuan Lv | 331f38e | 2015-08-28 15:41:14 +0800 | [diff] [blame] | 1390 | } |
| 1391 | |
Daniel Vetter | eb0b44a | 2015-03-18 14:47:59 +0100 | [diff] [blame] | 1392 | /* |
Ben Widawsky | f3a964b | 2014-02-19 22:05:42 -0800 | [diff] [blame] | 1393 | * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers |
| 1394 | * with a net effect resembling a 2-level page table in normal x86 terms. Each |
| 1395 | * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address |
| 1396 | * space. |
Ben Widawsky | 37aca44 | 2013-11-04 20:47:32 -0800 | [diff] [blame] | 1397 | * |
Ben Widawsky | f3a964b | 2014-02-19 22:05:42 -0800 | [diff] [blame] | 1398 | */ |
Daniel Vetter | 5c5f645 | 2015-04-14 17:35:14 +0200 | [diff] [blame] | 1399 | static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
Ben Widawsky | 37aca44 | 2013-11-04 20:47:32 -0800 | [diff] [blame] | 1400 | { |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1401 | struct i915_address_space *vm = &ppgtt->base; |
| 1402 | struct drm_i915_private *dev_priv = vm->i915; |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1403 | int ret; |
Michel Thierry | 69876be | 2015-04-08 12:13:27 +0100 | [diff] [blame] | 1404 | |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1405 | ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ? |
| 1406 | 1ULL << 48 : |
| 1407 | 1ULL << 32; |
| 1408 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1409 | /* There are only few exceptions for gen >=6. chv and bxt. |
| 1410 | * And we are not sure about the latter so play safe for now. |
| 1411 | */ |
| 1412 | if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) |
| 1413 | ppgtt->base.pt_kmap_wc = true; |
| 1414 | |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 1415 | ret = gen8_init_scratch(&ppgtt->base); |
| 1416 | if (ret) { |
| 1417 | ppgtt->base.total = 0; |
| 1418 | return ret; |
| 1419 | } |
| 1420 | |
Mika Kuoppala | 1e6437b | 2017-02-28 17:28:09 +0200 | [diff] [blame] | 1421 | if (use_4lvl(vm)) { |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1422 | ret = setup_px(&ppgtt->base, &ppgtt->pml4); |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1423 | if (ret) |
| 1424 | goto free_scratch; |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 1425 | |
Michel Thierry | 69ab76f | 2015-07-29 17:23:55 +0100 | [diff] [blame] | 1426 | gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4); |
| 1427 | |
Mika Kuoppala | e716776 | 2017-02-28 17:28:10 +0200 | [diff] [blame] | 1428 | ppgtt->switch_mm = gen8_mm_switch_4lvl; |
Chris Wilson | c5d092a | 2017-02-15 08:43:49 +0000 | [diff] [blame] | 1429 | ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl; |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 1430 | ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl; |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1431 | ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl; |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 1432 | } else { |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1433 | ret = __pdp_init(&ppgtt->base, &ppgtt->pdp); |
Michel Thierry | 81ba8aef | 2015-08-03 09:52:01 +0100 | [diff] [blame] | 1434 | if (ret) |
| 1435 | goto free_scratch; |
| 1436 | |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 1437 | if (intel_vgpu_active(dev_priv)) { |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1438 | ret = gen8_preallocate_top_level_pdp(ppgtt); |
| 1439 | if (ret) { |
| 1440 | __pdp_fini(&ppgtt->pdp); |
Zhiyuan Lv | 331f38e | 2015-08-28 15:41:14 +0800 | [diff] [blame] | 1441 | goto free_scratch; |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 1442 | } |
Zhiyuan Lv | 331f38e | 2015-08-28 15:41:14 +0800 | [diff] [blame] | 1443 | } |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 1444 | |
Mika Kuoppala | e716776 | 2017-02-28 17:28:10 +0200 | [diff] [blame] | 1445 | ppgtt->switch_mm = gen8_mm_switch_3lvl; |
Chris Wilson | c5d092a | 2017-02-15 08:43:49 +0000 | [diff] [blame] | 1446 | ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl; |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 1447 | ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl; |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 1448 | ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl; |
Michel Thierry | 81ba8aef | 2015-08-03 09:52:01 +0100 | [diff] [blame] | 1449 | } |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 1450 | |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 1451 | if (intel_vgpu_active(dev_priv)) |
Zhiyuan Lv | 650da34 | 2015-08-28 15:41:18 +0800 | [diff] [blame] | 1452 | gen8_ppgtt_notify_vgt(ppgtt, true); |
| 1453 | |
Mika Kuoppala | 054b9ac | 2017-02-28 17:28:11 +0200 | [diff] [blame] | 1454 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; |
| 1455 | ppgtt->base.unbind_vma = ppgtt_unbind_vma; |
| 1456 | ppgtt->base.bind_vma = ppgtt_bind_vma; |
| 1457 | ppgtt->debug_dump = gen8_dump_ppgtt; |
| 1458 | |
Michel Thierry | d7b2633 | 2015-04-08 12:13:34 +0100 | [diff] [blame] | 1459 | return 0; |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 1460 | |
| 1461 | free_scratch: |
| 1462 | gen8_free_scratch(&ppgtt->base); |
| 1463 | return ret; |
Michel Thierry | d7b2633 | 2015-04-08 12:13:34 +0100 | [diff] [blame] | 1464 | } |
| 1465 | |
Ben Widawsky | 87d60b6 | 2013-12-06 14:11:29 -0800 | [diff] [blame] | 1466 | static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) |
| 1467 | { |
Ben Widawsky | 87d60b6 | 2013-12-06 14:11:29 -0800 | [diff] [blame] | 1468 | struct i915_address_space *vm = &ppgtt->base; |
Michel Thierry | 09942c6 | 2015-04-08 12:13:30 +0100 | [diff] [blame] | 1469 | struct i915_page_table *unused; |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 1470 | gen6_pte_t scratch_pte; |
Chris Wilson | 381b943 | 2017-02-15 08:43:54 +0000 | [diff] [blame] | 1471 | u32 pd_entry, pte, pde; |
| 1472 | u32 start = 0, length = ppgtt->base.total; |
Ben Widawsky | 87d60b6 | 2013-12-06 14:11:29 -0800 | [diff] [blame] | 1473 | |
Chris Wilson | 8bcdd0f7 | 2016-08-22 08:44:30 +0100 | [diff] [blame] | 1474 | scratch_pte = vm->pte_encode(vm->scratch_page.daddr, |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 1475 | I915_CACHE_LLC, 0); |
Ben Widawsky | 87d60b6 | 2013-12-06 14:11:29 -0800 | [diff] [blame] | 1476 | |
Dave Gordon | 731f74c | 2016-06-24 19:37:46 +0100 | [diff] [blame] | 1477 | gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) { |
Ben Widawsky | 87d60b6 | 2013-12-06 14:11:29 -0800 | [diff] [blame] | 1478 | u32 expected; |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 1479 | gen6_pte_t *pt_vaddr; |
Mika Kuoppala | 567047b | 2015-06-25 18:35:12 +0300 | [diff] [blame] | 1480 | const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]); |
Michel Thierry | 09942c6 | 2015-04-08 12:13:30 +0100 | [diff] [blame] | 1481 | pd_entry = readl(ppgtt->pd_addr + pde); |
Ben Widawsky | 87d60b6 | 2013-12-06 14:11:29 -0800 | [diff] [blame] | 1482 | expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); |
| 1483 | |
| 1484 | if (pd_entry != expected) |
| 1485 | seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", |
| 1486 | pde, |
| 1487 | pd_entry, |
| 1488 | expected); |
| 1489 | seq_printf(m, "\tPDE: %x\n", pd_entry); |
| 1490 | |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 1491 | pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]); |
Mika Kuoppala | d1c54ac | 2015-06-25 18:35:11 +0300 | [diff] [blame] | 1492 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 1493 | for (pte = 0; pte < GEN6_PTES; pte+=4) { |
Ben Widawsky | 87d60b6 | 2013-12-06 14:11:29 -0800 | [diff] [blame] | 1494 | unsigned long va = |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 1495 | (pde * PAGE_SIZE * GEN6_PTES) + |
Ben Widawsky | 87d60b6 | 2013-12-06 14:11:29 -0800 | [diff] [blame] | 1496 | (pte * PAGE_SIZE); |
| 1497 | int i; |
| 1498 | bool found = false; |
| 1499 | for (i = 0; i < 4; i++) |
| 1500 | if (pt_vaddr[pte + i] != scratch_pte) |
| 1501 | found = true; |
| 1502 | if (!found) |
| 1503 | continue; |
| 1504 | |
| 1505 | seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); |
| 1506 | for (i = 0; i < 4; i++) { |
| 1507 | if (pt_vaddr[pte + i] != scratch_pte) |
| 1508 | seq_printf(m, " %08x", pt_vaddr[pte + i]); |
| 1509 | else |
| 1510 | seq_puts(m, " SCRATCH "); |
| 1511 | } |
| 1512 | seq_puts(m, "\n"); |
| 1513 | } |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 1514 | kunmap_atomic(pt_vaddr); |
Ben Widawsky | 87d60b6 | 2013-12-06 14:11:29 -0800 | [diff] [blame] | 1515 | } |
| 1516 | } |
| 1517 | |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1518 | /* Write pde (index) from the page directory @pd to the page table @pt */ |
Chris Wilson | 16a011c | 2017-02-15 08:43:45 +0000 | [diff] [blame] | 1519 | static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt, |
| 1520 | const unsigned int pde, |
| 1521 | const struct i915_page_table *pt) |
Ben Widawsky | 6197349 | 2013-04-08 18:43:54 -0700 | [diff] [blame] | 1522 | { |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1523 | /* Caller needs to make sure the write completes if necessary */ |
Chris Wilson | 16a011c | 2017-02-15 08:43:45 +0000 | [diff] [blame] | 1524 | writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID, |
| 1525 | ppgtt->pd_addr + pde); |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1526 | } |
Ben Widawsky | 6197349 | 2013-04-08 18:43:54 -0700 | [diff] [blame] | 1527 | |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1528 | /* Write all the page tables found in the ppgtt structure to incrementing page |
| 1529 | * directories. */ |
Chris Wilson | 16a011c | 2017-02-15 08:43:45 +0000 | [diff] [blame] | 1530 | static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1531 | u32 start, u32 length) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1532 | { |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 1533 | struct i915_page_table *pt; |
Chris Wilson | 16a011c | 2017-02-15 08:43:45 +0000 | [diff] [blame] | 1534 | unsigned int pde; |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1535 | |
Chris Wilson | 16a011c | 2017-02-15 08:43:45 +0000 | [diff] [blame] | 1536 | gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) |
| 1537 | gen6_write_pde(ppgtt, pde, pt); |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1538 | |
Chris Wilson | 16a011c | 2017-02-15 08:43:45 +0000 | [diff] [blame] | 1539 | mark_tlbs_dirty(ppgtt); |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1540 | wmb(); |
Ben Widawsky | 3e30254 | 2013-04-23 23:15:32 -0700 | [diff] [blame] | 1541 | } |
| 1542 | |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1543 | static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt) |
Ben Widawsky | 3e30254 | 2013-04-23 23:15:32 -0700 | [diff] [blame] | 1544 | { |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1545 | GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f); |
| 1546 | return ppgtt->pd.base.ggtt_offset << 10; |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1547 | } |
Ben Widawsky | 6197349 | 2013-04-08 18:43:54 -0700 | [diff] [blame] | 1548 | |
Ben Widawsky | 90252e5 | 2013-12-06 14:11:12 -0800 | [diff] [blame] | 1549 | static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, |
John Harrison | e85b26d | 2015-05-29 17:43:56 +0100 | [diff] [blame] | 1550 | struct drm_i915_gem_request *req) |
Ben Widawsky | 90252e5 | 2013-12-06 14:11:12 -0800 | [diff] [blame] | 1551 | { |
Tvrtko Ursulin | 4a570db | 2016-03-16 11:00:38 +0000 | [diff] [blame] | 1552 | struct intel_engine_cs *engine = req->engine; |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 1553 | u32 *cs; |
Ben Widawsky | 6197349 | 2013-04-08 18:43:54 -0700 | [diff] [blame] | 1554 | |
Ben Widawsky | 90252e5 | 2013-12-06 14:11:12 -0800 | [diff] [blame] | 1555 | /* NB: TLBs must be flushed and invalidated before a switch */ |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 1556 | cs = intel_ring_begin(req, 6); |
| 1557 | if (IS_ERR(cs)) |
| 1558 | return PTR_ERR(cs); |
Ben Widawsky | 90252e5 | 2013-12-06 14:11:12 -0800 | [diff] [blame] | 1559 | |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 1560 | *cs++ = MI_LOAD_REGISTER_IMM(2); |
| 1561 | *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine)); |
| 1562 | *cs++ = PP_DIR_DCLV_2G; |
| 1563 | *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); |
| 1564 | *cs++ = get_pd_offset(ppgtt); |
| 1565 | *cs++ = MI_NOOP; |
| 1566 | intel_ring_advance(req, cs); |
Ben Widawsky | 90252e5 | 2013-12-06 14:11:12 -0800 | [diff] [blame] | 1567 | |
| 1568 | return 0; |
| 1569 | } |
| 1570 | |
Ben Widawsky | 48a1038 | 2013-12-06 14:11:11 -0800 | [diff] [blame] | 1571 | static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, |
John Harrison | e85b26d | 2015-05-29 17:43:56 +0100 | [diff] [blame] | 1572 | struct drm_i915_gem_request *req) |
Ben Widawsky | 48a1038 | 2013-12-06 14:11:11 -0800 | [diff] [blame] | 1573 | { |
Tvrtko Ursulin | 4a570db | 2016-03-16 11:00:38 +0000 | [diff] [blame] | 1574 | struct intel_engine_cs *engine = req->engine; |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 1575 | u32 *cs; |
Ben Widawsky | 48a1038 | 2013-12-06 14:11:11 -0800 | [diff] [blame] | 1576 | |
Ben Widawsky | 48a1038 | 2013-12-06 14:11:11 -0800 | [diff] [blame] | 1577 | /* NB: TLBs must be flushed and invalidated before a switch */ |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 1578 | cs = intel_ring_begin(req, 6); |
| 1579 | if (IS_ERR(cs)) |
| 1580 | return PTR_ERR(cs); |
Ben Widawsky | 48a1038 | 2013-12-06 14:11:11 -0800 | [diff] [blame] | 1581 | |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 1582 | *cs++ = MI_LOAD_REGISTER_IMM(2); |
| 1583 | *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine)); |
| 1584 | *cs++ = PP_DIR_DCLV_2G; |
| 1585 | *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); |
| 1586 | *cs++ = get_pd_offset(ppgtt); |
| 1587 | *cs++ = MI_NOOP; |
| 1588 | intel_ring_advance(req, cs); |
Ben Widawsky | 48a1038 | 2013-12-06 14:11:11 -0800 | [diff] [blame] | 1589 | |
| 1590 | return 0; |
| 1591 | } |
| 1592 | |
Ben Widawsky | eeb9488 | 2013-12-06 14:11:10 -0800 | [diff] [blame] | 1593 | static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, |
John Harrison | e85b26d | 2015-05-29 17:43:56 +0100 | [diff] [blame] | 1594 | struct drm_i915_gem_request *req) |
Ben Widawsky | eeb9488 | 2013-12-06 14:11:10 -0800 | [diff] [blame] | 1595 | { |
Tvrtko Ursulin | 4a570db | 2016-03-16 11:00:38 +0000 | [diff] [blame] | 1596 | struct intel_engine_cs *engine = req->engine; |
Chris Wilson | 8eb9520 | 2016-07-04 08:48:31 +0100 | [diff] [blame] | 1597 | struct drm_i915_private *dev_priv = req->i915; |
Ben Widawsky | 48a1038 | 2013-12-06 14:11:11 -0800 | [diff] [blame] | 1598 | |
Tvrtko Ursulin | e2f8039 | 2016-03-16 11:00:36 +0000 | [diff] [blame] | 1599 | I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); |
| 1600 | I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); |
Ben Widawsky | eeb9488 | 2013-12-06 14:11:10 -0800 | [diff] [blame] | 1601 | return 0; |
| 1602 | } |
| 1603 | |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1604 | static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv) |
Ben Widawsky | eeb9488 | 2013-12-06 14:11:10 -0800 | [diff] [blame] | 1605 | { |
Tvrtko Ursulin | e2f8039 | 2016-03-16 11:00:36 +0000 | [diff] [blame] | 1606 | struct intel_engine_cs *engine; |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 1607 | enum intel_engine_id id; |
Ben Widawsky | eeb9488 | 2013-12-06 14:11:10 -0800 | [diff] [blame] | 1608 | |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 1609 | for_each_engine(engine, dev_priv, id) { |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1610 | u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ? |
| 1611 | GEN8_GFX_PPGTT_48B : 0; |
Tvrtko Ursulin | e2f8039 | 2016-03-16 11:00:36 +0000 | [diff] [blame] | 1612 | I915_WRITE(RING_MODE_GEN7(engine), |
Michel Thierry | 2dba323 | 2015-07-30 11:06:23 +0100 | [diff] [blame] | 1613 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); |
Ben Widawsky | eeb9488 | 2013-12-06 14:11:10 -0800 | [diff] [blame] | 1614 | } |
Ben Widawsky | eeb9488 | 2013-12-06 14:11:10 -0800 | [diff] [blame] | 1615 | } |
| 1616 | |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1617 | static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1618 | { |
Tvrtko Ursulin | e2f8039 | 2016-03-16 11:00:36 +0000 | [diff] [blame] | 1619 | struct intel_engine_cs *engine; |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1620 | u32 ecochk, ecobits; |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 1621 | enum intel_engine_id id; |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1622 | |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1623 | ecobits = I915_READ(GAC_ECO_BITS); |
| 1624 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); |
| 1625 | |
| 1626 | ecochk = I915_READ(GAM_ECOCHK); |
Tvrtko Ursulin | 772c2a5 | 2016-10-13 11:03:01 +0100 | [diff] [blame] | 1627 | if (IS_HASWELL(dev_priv)) { |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1628 | ecochk |= ECOCHK_PPGTT_WB_HSW; |
| 1629 | } else { |
| 1630 | ecochk |= ECOCHK_PPGTT_LLC_IVB; |
| 1631 | ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; |
| 1632 | } |
| 1633 | I915_WRITE(GAM_ECOCHK, ecochk); |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1634 | |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 1635 | for_each_engine(engine, dev_priv, id) { |
Ben Widawsky | eeb9488 | 2013-12-06 14:11:10 -0800 | [diff] [blame] | 1636 | /* GFX_MODE is per-ring on gen7+ */ |
Tvrtko Ursulin | e2f8039 | 2016-03-16 11:00:36 +0000 | [diff] [blame] | 1637 | I915_WRITE(RING_MODE_GEN7(engine), |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1638 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
Ben Widawsky | 6197349 | 2013-04-08 18:43:54 -0700 | [diff] [blame] | 1639 | } |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1640 | } |
| 1641 | |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1642 | static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv) |
Ben Widawsky | 6197349 | 2013-04-08 18:43:54 -0700 | [diff] [blame] | 1643 | { |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1644 | u32 ecochk, gab_ctl, ecobits; |
Ben Widawsky | 6197349 | 2013-04-08 18:43:54 -0700 | [diff] [blame] | 1645 | |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1646 | ecobits = I915_READ(GAC_ECO_BITS); |
| 1647 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | |
| 1648 | ECOBITS_PPGTT_CACHE64B); |
Ben Widawsky | 6197349 | 2013-04-08 18:43:54 -0700 | [diff] [blame] | 1649 | |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1650 | gab_ctl = I915_READ(GAB_CTL); |
| 1651 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); |
Ben Widawsky | 6197349 | 2013-04-08 18:43:54 -0700 | [diff] [blame] | 1652 | |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1653 | ecochk = I915_READ(GAM_ECOCHK); |
| 1654 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); |
Ben Widawsky | 6197349 | 2013-04-08 18:43:54 -0700 | [diff] [blame] | 1655 | |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1656 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
Ben Widawsky | 6197349 | 2013-04-08 18:43:54 -0700 | [diff] [blame] | 1657 | } |
| 1658 | |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1659 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 1660 | static void gen6_ppgtt_clear_range(struct i915_address_space *vm, |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1661 | u64 start, u64 length) |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1662 | { |
Joonas Lahtinen | e5716f5 | 2016-04-07 11:08:03 +0300 | [diff] [blame] | 1663 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1664 | unsigned int first_entry = start >> PAGE_SHIFT; |
| 1665 | unsigned int pde = first_entry / GEN6_PTES; |
| 1666 | unsigned int pte = first_entry % GEN6_PTES; |
| 1667 | unsigned int num_entries = length >> PAGE_SHIFT; |
| 1668 | gen6_pte_t scratch_pte = |
| 1669 | vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1670 | |
Daniel Vetter | 7bddb01 | 2012-02-09 17:15:47 +0100 | [diff] [blame] | 1671 | while (num_entries) { |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1672 | struct i915_page_table *pt = ppgtt->pd.page_table[pde++]; |
| 1673 | unsigned int end = min(pte + num_entries, GEN6_PTES); |
| 1674 | gen6_pte_t *vaddr; |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1675 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1676 | num_entries -= end - pte; |
Daniel Vetter | 7bddb01 | 2012-02-09 17:15:47 +0100 | [diff] [blame] | 1677 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1678 | /* Note that the hw doesn't support removing PDE on the fly |
| 1679 | * (they are cached inside the context with no means to |
| 1680 | * invalidate the cache), so we can only reset the PTE |
| 1681 | * entries back to scratch. |
| 1682 | */ |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1683 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1684 | vaddr = kmap_atomic_px(pt); |
| 1685 | do { |
| 1686 | vaddr[pte++] = scratch_pte; |
| 1687 | } while (pte < end); |
| 1688 | kunmap_atomic(vaddr); |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1689 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1690 | pte = 0; |
Daniel Vetter | 7bddb01 | 2012-02-09 17:15:47 +0100 | [diff] [blame] | 1691 | } |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1692 | } |
| 1693 | |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 1694 | static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 1695 | struct i915_vma *vma, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1696 | enum i915_cache_level cache_level, |
| 1697 | u32 flags) |
Daniel Vetter | def886c | 2013-01-24 14:44:56 -0800 | [diff] [blame] | 1698 | { |
Joonas Lahtinen | e5716f5 | 2016-04-07 11:08:03 +0300 | [diff] [blame] | 1699 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 1700 | unsigned first_entry = vma->node.start >> PAGE_SHIFT; |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 1701 | unsigned act_pt = first_entry / GEN6_PTES; |
| 1702 | unsigned act_pte = first_entry % GEN6_PTES; |
Chris Wilson | b31144c | 2017-02-15 08:43:36 +0000 | [diff] [blame] | 1703 | const u32 pte_encode = vm->pte_encode(0, cache_level, flags); |
| 1704 | struct sgt_dma iter; |
| 1705 | gen6_pte_t *vaddr; |
Daniel Vetter | def886c | 2013-01-24 14:44:56 -0800 | [diff] [blame] | 1706 | |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 1707 | vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]); |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 1708 | iter.sg = vma->pages->sgl; |
Chris Wilson | b31144c | 2017-02-15 08:43:36 +0000 | [diff] [blame] | 1709 | iter.dma = sg_dma_address(iter.sg); |
| 1710 | iter.max = iter.dma + iter.sg->length; |
| 1711 | do { |
| 1712 | vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); |
Daniel Vetter | def886c | 2013-01-24 14:44:56 -0800 | [diff] [blame] | 1713 | |
Chris Wilson | b31144c | 2017-02-15 08:43:36 +0000 | [diff] [blame] | 1714 | iter.dma += PAGE_SIZE; |
| 1715 | if (iter.dma == iter.max) { |
| 1716 | iter.sg = __sg_next(iter.sg); |
| 1717 | if (!iter.sg) |
| 1718 | break; |
| 1719 | |
| 1720 | iter.dma = sg_dma_address(iter.sg); |
| 1721 | iter.max = iter.dma + iter.sg->length; |
| 1722 | } |
Akash Goel | 24f3a8c | 2014-06-17 10:59:42 +0530 | [diff] [blame] | 1723 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 1724 | if (++act_pte == GEN6_PTES) { |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 1725 | kunmap_atomic(vaddr); |
| 1726 | vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]); |
Imre Deak | 6e995e2 | 2013-02-18 19:28:04 +0200 | [diff] [blame] | 1727 | act_pte = 0; |
Daniel Vetter | def886c | 2013-01-24 14:44:56 -0800 | [diff] [blame] | 1728 | } |
Chris Wilson | b31144c | 2017-02-15 08:43:36 +0000 | [diff] [blame] | 1729 | } while (1); |
Chris Wilson | 9231da7 | 2017-02-15 08:43:41 +0000 | [diff] [blame] | 1730 | kunmap_atomic(vaddr); |
Daniel Vetter | def886c | 2013-01-24 14:44:56 -0800 | [diff] [blame] | 1731 | } |
| 1732 | |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1733 | static int gen6_alloc_va_range(struct i915_address_space *vm, |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1734 | u64 start, u64 length) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1735 | { |
Joonas Lahtinen | e5716f5 | 2016-04-07 11:08:03 +0300 | [diff] [blame] | 1736 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 1737 | struct i915_page_table *pt; |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1738 | u64 from = start; |
| 1739 | unsigned int pde; |
| 1740 | bool flush = false; |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1741 | |
Dave Gordon | 731f74c | 2016-06-24 19:37:46 +0100 | [diff] [blame] | 1742 | gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) { |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1743 | if (pt == vm->scratch_pt) { |
| 1744 | pt = alloc_pt(vm); |
| 1745 | if (IS_ERR(pt)) |
| 1746 | goto unwind_out; |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1747 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1748 | gen6_initialize_pt(vm, pt); |
| 1749 | ppgtt->pd.page_table[pde] = pt; |
Chris Wilson | 16a011c | 2017-02-15 08:43:45 +0000 | [diff] [blame] | 1750 | gen6_write_pde(ppgtt, pde, pt); |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1751 | flush = true; |
| 1752 | } |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1753 | } |
| 1754 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1755 | if (flush) { |
| 1756 | mark_tlbs_dirty(ppgtt); |
| 1757 | wmb(); |
| 1758 | } |
Michel Thierry | 4933d51 | 2015-03-24 15:46:22 +0000 | [diff] [blame] | 1759 | |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1760 | return 0; |
Michel Thierry | 4933d51 | 2015-03-24 15:46:22 +0000 | [diff] [blame] | 1761 | |
| 1762 | unwind_out: |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 1763 | gen6_ppgtt_clear_range(vm, from, start); |
| 1764 | return -ENOMEM; |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1765 | } |
| 1766 | |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1767 | static int gen6_init_scratch(struct i915_address_space *vm) |
| 1768 | { |
Chris Wilson | 8bcdd0f7 | 2016-08-22 08:44:30 +0100 | [diff] [blame] | 1769 | int ret; |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1770 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1771 | ret = setup_scratch_page(vm, I915_GFP_DMA); |
Chris Wilson | 8bcdd0f7 | 2016-08-22 08:44:30 +0100 | [diff] [blame] | 1772 | if (ret) |
| 1773 | return ret; |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1774 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1775 | vm->scratch_pt = alloc_pt(vm); |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1776 | if (IS_ERR(vm->scratch_pt)) { |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1777 | cleanup_scratch_page(vm); |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1778 | return PTR_ERR(vm->scratch_pt); |
| 1779 | } |
| 1780 | |
| 1781 | gen6_initialize_pt(vm, vm->scratch_pt); |
| 1782 | |
| 1783 | return 0; |
| 1784 | } |
| 1785 | |
| 1786 | static void gen6_free_scratch(struct i915_address_space *vm) |
| 1787 | { |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1788 | free_pt(vm, vm->scratch_pt); |
| 1789 | cleanup_scratch_page(vm); |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1790 | } |
| 1791 | |
Daniel Vetter | 061dd49 | 2015-04-14 17:35:13 +0200 | [diff] [blame] | 1792 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) |
Ben Widawsky | a00d825 | 2014-02-19 22:05:48 -0800 | [diff] [blame] | 1793 | { |
Joonas Lahtinen | e5716f5 | 2016-04-07 11:08:03 +0300 | [diff] [blame] | 1794 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
Dave Gordon | 731f74c | 2016-06-24 19:37:46 +0100 | [diff] [blame] | 1795 | struct i915_page_directory *pd = &ppgtt->pd; |
Michel Thierry | 09942c6 | 2015-04-08 12:13:30 +0100 | [diff] [blame] | 1796 | struct i915_page_table *pt; |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1797 | u32 pde; |
Daniel Vetter | 3440d26 | 2013-01-24 13:49:56 -0800 | [diff] [blame] | 1798 | |
Daniel Vetter | 061dd49 | 2015-04-14 17:35:13 +0200 | [diff] [blame] | 1799 | drm_mm_remove_node(&ppgtt->node); |
| 1800 | |
Dave Gordon | 731f74c | 2016-06-24 19:37:46 +0100 | [diff] [blame] | 1801 | gen6_for_all_pdes(pt, pd, pde) |
Mika Kuoppala | 79ab937 | 2015-06-25 18:35:17 +0300 | [diff] [blame] | 1802 | if (pt != vm->scratch_pt) |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1803 | free_pt(vm, pt); |
Michel Thierry | 4933d51 | 2015-03-24 15:46:22 +0000 | [diff] [blame] | 1804 | |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1805 | gen6_free_scratch(vm); |
Daniel Vetter | 3440d26 | 2013-01-24 13:49:56 -0800 | [diff] [blame] | 1806 | } |
| 1807 | |
Ben Widawsky | b146520 | 2014-02-19 22:05:49 -0800 | [diff] [blame] | 1808 | static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) |
Daniel Vetter | 3440d26 | 2013-01-24 13:49:56 -0800 | [diff] [blame] | 1809 | { |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1810 | struct i915_address_space *vm = &ppgtt->base; |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 1811 | struct drm_i915_private *dev_priv = ppgtt->base.i915; |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 1812 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Ben Widawsky | b146520 | 2014-02-19 22:05:49 -0800 | [diff] [blame] | 1813 | int ret; |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1814 | |
Ben Widawsky | c8d4c0d | 2013-12-06 14:11:07 -0800 | [diff] [blame] | 1815 | /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The |
| 1816 | * allocator works in address space sizes, so it's multiplied by page |
| 1817 | * size. We allocate at the top of the GTT to avoid fragmentation. |
| 1818 | */ |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 1819 | BUG_ON(!drm_mm_initialized(&ggtt->base.mm)); |
Michel Thierry | 4933d51 | 2015-03-24 15:46:22 +0000 | [diff] [blame] | 1820 | |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1821 | ret = gen6_init_scratch(vm); |
| 1822 | if (ret) |
| 1823 | return ret; |
Michel Thierry | 4933d51 | 2015-03-24 15:46:22 +0000 | [diff] [blame] | 1824 | |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 1825 | ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node, |
| 1826 | GEN6_PD_SIZE, GEN6_PD_ALIGN, |
| 1827 | I915_COLOR_UNEVICTABLE, |
| 1828 | 0, ggtt->base.total, |
| 1829 | PIN_HIGH); |
Ben Widawsky | c8c2662 | 2015-01-22 17:01:25 +0000 | [diff] [blame] | 1830 | if (ret) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1831 | goto err_out; |
| 1832 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 1833 | if (ppgtt->node.start < ggtt->mappable_end) |
Ben Widawsky | c8d4c0d | 2013-12-06 14:11:07 -0800 | [diff] [blame] | 1834 | DRM_DEBUG("Forced to use aperture for PDEs\n"); |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1835 | |
Chris Wilson | 52c126e | 2017-02-15 08:43:43 +0000 | [diff] [blame] | 1836 | ppgtt->pd.base.ggtt_offset = |
| 1837 | ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); |
| 1838 | |
| 1839 | ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + |
| 1840 | ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t); |
| 1841 | |
Ben Widawsky | c8c2662 | 2015-01-22 17:01:25 +0000 | [diff] [blame] | 1842 | return 0; |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1843 | |
| 1844 | err_out: |
Mika Kuoppala | 8776f02 | 2015-06-30 18:16:40 +0300 | [diff] [blame] | 1845 | gen6_free_scratch(vm); |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1846 | return ret; |
Ben Widawsky | b146520 | 2014-02-19 22:05:49 -0800 | [diff] [blame] | 1847 | } |
| 1848 | |
Ben Widawsky | b146520 | 2014-02-19 22:05:49 -0800 | [diff] [blame] | 1849 | static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) |
| 1850 | { |
kbuild test robot | 2f2cf68 | 2015-03-27 19:26:35 +0800 | [diff] [blame] | 1851 | return gen6_ppgtt_allocate_page_directories(ppgtt); |
Ben Widawsky | b146520 | 2014-02-19 22:05:49 -0800 | [diff] [blame] | 1852 | } |
| 1853 | |
Michel Thierry | 4933d51 | 2015-03-24 15:46:22 +0000 | [diff] [blame] | 1854 | static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1855 | u64 start, u64 length) |
Michel Thierry | 4933d51 | 2015-03-24 15:46:22 +0000 | [diff] [blame] | 1856 | { |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 1857 | struct i915_page_table *unused; |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 1858 | u32 pde; |
Michel Thierry | 4933d51 | 2015-03-24 15:46:22 +0000 | [diff] [blame] | 1859 | |
Dave Gordon | 731f74c | 2016-06-24 19:37:46 +0100 | [diff] [blame] | 1860 | gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) |
Mika Kuoppala | 79ab937 | 2015-06-25 18:35:17 +0300 | [diff] [blame] | 1861 | ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt; |
Michel Thierry | 4933d51 | 2015-03-24 15:46:22 +0000 | [diff] [blame] | 1862 | } |
| 1863 | |
Daniel Vetter | 5c5f645 | 2015-04-14 17:35:14 +0200 | [diff] [blame] | 1864 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
Ben Widawsky | b146520 | 2014-02-19 22:05:49 -0800 | [diff] [blame] | 1865 | { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 1866 | struct drm_i915_private *dev_priv = ppgtt->base.i915; |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 1867 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Ben Widawsky | b146520 | 2014-02-19 22:05:49 -0800 | [diff] [blame] | 1868 | int ret; |
| 1869 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 1870 | ppgtt->base.pte_encode = ggtt->base.pte_encode; |
Tvrtko Ursulin | 5db9401 | 2016-10-13 11:03:10 +0100 | [diff] [blame] | 1871 | if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv)) |
Ben Widawsky | 48a1038 | 2013-12-06 14:11:11 -0800 | [diff] [blame] | 1872 | ppgtt->switch_mm = gen6_mm_switch; |
Tvrtko Ursulin | 772c2a5 | 2016-10-13 11:03:01 +0100 | [diff] [blame] | 1873 | else if (IS_HASWELL(dev_priv)) |
Ben Widawsky | 90252e5 | 2013-12-06 14:11:12 -0800 | [diff] [blame] | 1874 | ppgtt->switch_mm = hsw_mm_switch; |
Tvrtko Ursulin | 5db9401 | 2016-10-13 11:03:10 +0100 | [diff] [blame] | 1875 | else if (IS_GEN7(dev_priv)) |
Ben Widawsky | 48a1038 | 2013-12-06 14:11:11 -0800 | [diff] [blame] | 1876 | ppgtt->switch_mm = gen7_mm_switch; |
Chris Wilson | 8eb9520 | 2016-07-04 08:48:31 +0100 | [diff] [blame] | 1877 | else |
Ben Widawsky | b4a74e3 | 2013-12-06 14:11:09 -0800 | [diff] [blame] | 1878 | BUG(); |
Ben Widawsky | b146520 | 2014-02-19 22:05:49 -0800 | [diff] [blame] | 1879 | |
| 1880 | ret = gen6_ppgtt_alloc(ppgtt); |
| 1881 | if (ret) |
| 1882 | return ret; |
| 1883 | |
Michel Thierry | 09942c6 | 2015-04-08 12:13:30 +0100 | [diff] [blame] | 1884 | ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE; |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1885 | |
Daniel Vetter | 5c5f645 | 2015-04-14 17:35:14 +0200 | [diff] [blame] | 1886 | gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); |
Chris Wilson | 16a011c | 2017-02-15 08:43:45 +0000 | [diff] [blame] | 1887 | gen6_write_page_range(ppgtt, 0, ppgtt->base.total); |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 1888 | |
Chris Wilson | 52c126e | 2017-02-15 08:43:43 +0000 | [diff] [blame] | 1889 | ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total); |
| 1890 | if (ret) { |
| 1891 | gen6_ppgtt_cleanup(&ppgtt->base); |
| 1892 | return ret; |
| 1893 | } |
| 1894 | |
Mika Kuoppala | 054b9ac | 2017-02-28 17:28:11 +0200 | [diff] [blame] | 1895 | ppgtt->base.clear_range = gen6_ppgtt_clear_range; |
| 1896 | ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; |
| 1897 | ppgtt->base.unbind_vma = ppgtt_unbind_vma; |
| 1898 | ppgtt->base.bind_vma = ppgtt_bind_vma; |
| 1899 | ppgtt->base.cleanup = gen6_ppgtt_cleanup; |
| 1900 | ppgtt->debug_dump = gen6_dump_ppgtt; |
| 1901 | |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 1902 | DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", |
Ben Widawsky | c8d4c0d | 2013-12-06 14:11:07 -0800 | [diff] [blame] | 1903 | ppgtt->node.size >> 20, |
| 1904 | ppgtt->node.start / PAGE_SIZE); |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1905 | |
Chris Wilson | 52c126e | 2017-02-15 08:43:43 +0000 | [diff] [blame] | 1906 | DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n", |
| 1907 | ppgtt->pd.base.ggtt_offset << 10); |
Daniel Vetter | fa76da3 | 2014-08-06 20:19:54 +0200 | [diff] [blame] | 1908 | |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1909 | return 0; |
Daniel Vetter | 3440d26 | 2013-01-24 13:49:56 -0800 | [diff] [blame] | 1910 | } |
| 1911 | |
Chris Wilson | 2bfa996 | 2016-08-04 07:52:25 +0100 | [diff] [blame] | 1912 | static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt, |
| 1913 | struct drm_i915_private *dev_priv) |
Daniel Vetter | 3440d26 | 2013-01-24 13:49:56 -0800 | [diff] [blame] | 1914 | { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 1915 | ppgtt->base.i915 = dev_priv; |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1916 | ppgtt->base.dma = &dev_priv->drm.pdev->dev; |
Daniel Vetter | 3440d26 | 2013-01-24 13:49:56 -0800 | [diff] [blame] | 1917 | |
Chris Wilson | 2bfa996 | 2016-08-04 07:52:25 +0100 | [diff] [blame] | 1918 | if (INTEL_INFO(dev_priv)->gen < 8) |
Daniel Vetter | 5c5f645 | 2015-04-14 17:35:14 +0200 | [diff] [blame] | 1919 | return gen6_ppgtt_init(ppgtt); |
Ben Widawsky | 3ed124b | 2013-04-08 18:43:53 -0700 | [diff] [blame] | 1920 | else |
Michel Thierry | d7b2633 | 2015-04-08 12:13:34 +0100 | [diff] [blame] | 1921 | return gen8_ppgtt_init(ppgtt); |
Daniel Vetter | fa76da3 | 2014-08-06 20:19:54 +0200 | [diff] [blame] | 1922 | } |
Mika Kuoppala | c114f76 | 2015-06-25 18:35:13 +0300 | [diff] [blame] | 1923 | |
Michał Winiarski | a2cad9d | 2015-09-16 11:49:00 +0200 | [diff] [blame] | 1924 | static void i915_address_space_init(struct i915_address_space *vm, |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 1925 | struct drm_i915_private *dev_priv, |
| 1926 | const char *name) |
Michał Winiarski | a2cad9d | 2015-09-16 11:49:00 +0200 | [diff] [blame] | 1927 | { |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 1928 | i915_gem_timeline_init(dev_priv, &vm->timeline, name); |
Chris Wilson | 47db922 | 2017-02-06 08:45:46 +0000 | [diff] [blame] | 1929 | |
Chris Wilson | 381b943 | 2017-02-15 08:43:54 +0000 | [diff] [blame] | 1930 | drm_mm_init(&vm->mm, 0, vm->total); |
Chris Wilson | 47db922 | 2017-02-06 08:45:46 +0000 | [diff] [blame] | 1931 | vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; |
| 1932 | |
Michał Winiarski | a2cad9d | 2015-09-16 11:49:00 +0200 | [diff] [blame] | 1933 | INIT_LIST_HEAD(&vm->active_list); |
| 1934 | INIT_LIST_HEAD(&vm->inactive_list); |
Chris Wilson | 50e046b | 2016-08-04 07:52:46 +0100 | [diff] [blame] | 1935 | INIT_LIST_HEAD(&vm->unbound_list); |
Chris Wilson | 47db922 | 2017-02-06 08:45:46 +0000 | [diff] [blame] | 1936 | |
Michał Winiarski | a2cad9d | 2015-09-16 11:49:00 +0200 | [diff] [blame] | 1937 | list_add_tail(&vm->global_link, &dev_priv->vm_list); |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1938 | pagevec_init(&vm->free_pages, false); |
Michał Winiarski | a2cad9d | 2015-09-16 11:49:00 +0200 | [diff] [blame] | 1939 | } |
| 1940 | |
Matthew Auld | ed9724d | 2016-11-17 21:04:10 +0000 | [diff] [blame] | 1941 | static void i915_address_space_fini(struct i915_address_space *vm) |
| 1942 | { |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1943 | if (pagevec_count(&vm->free_pages)) |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 1944 | vm_free_pages_release(vm, true); |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 1945 | |
Matthew Auld | ed9724d | 2016-11-17 21:04:10 +0000 | [diff] [blame] | 1946 | i915_gem_timeline_fini(&vm->timeline); |
| 1947 | drm_mm_takedown(&vm->mm); |
| 1948 | list_del(&vm->global_link); |
| 1949 | } |
| 1950 | |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1951 | static void gtt_write_workarounds(struct drm_i915_private *dev_priv) |
Tim Gore | d5165eb | 2016-02-04 11:49:34 +0000 | [diff] [blame] | 1952 | { |
Tim Gore | d5165eb | 2016-02-04 11:49:34 +0000 | [diff] [blame] | 1953 | /* This function is for gtt related workarounds. This function is |
| 1954 | * called on driver load and after a GPU reset, so you can place |
| 1955 | * workarounds here even if they get overwritten by GPU reset. |
| 1956 | */ |
Rodrigo Vivi | 90007bc | 2017-08-15 16:16:48 -0700 | [diff] [blame] | 1957 | /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */ |
Tvrtko Ursulin | 8652744 | 2016-10-13 11:03:00 +0100 | [diff] [blame] | 1958 | if (IS_BROADWELL(dev_priv)) |
Tim Gore | d5165eb | 2016-02-04 11:49:34 +0000 | [diff] [blame] | 1959 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); |
Tvrtko Ursulin | 920a14b | 2016-10-14 10:13:44 +0100 | [diff] [blame] | 1960 | else if (IS_CHERRYVIEW(dev_priv)) |
Tim Gore | d5165eb | 2016-02-04 11:49:34 +0000 | [diff] [blame] | 1961 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); |
Rodrigo Vivi | 90007bc | 2017-08-15 16:16:48 -0700 | [diff] [blame] | 1962 | else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv)) |
Tim Gore | d5165eb | 2016-02-04 11:49:34 +0000 | [diff] [blame] | 1963 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); |
Ander Conselvan de Oliveira | 9fb5026 | 2017-01-26 11:16:58 +0200 | [diff] [blame] | 1964 | else if (IS_GEN9_LP(dev_priv)) |
Tim Gore | d5165eb | 2016-02-04 11:49:34 +0000 | [diff] [blame] | 1965 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); |
| 1966 | } |
| 1967 | |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1968 | int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) |
Daniel Vetter | 82460d9 | 2014-08-06 20:19:53 +0200 | [diff] [blame] | 1969 | { |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1970 | gtt_write_workarounds(dev_priv); |
Tim Gore | d5165eb | 2016-02-04 11:49:34 +0000 | [diff] [blame] | 1971 | |
Thomas Daniel | 671b5013 | 2014-08-20 16:24:50 +0100 | [diff] [blame] | 1972 | /* In the case of execlists, PPGTT is enabled by the context descriptor |
| 1973 | * and the PDPs are contained within the context itself. We don't |
| 1974 | * need to do anything here. */ |
Michal Wajdeczko | 4f044a8 | 2017-09-19 19:38:44 +0000 | [diff] [blame^] | 1975 | if (i915_modparams.enable_execlists) |
Thomas Daniel | 671b5013 | 2014-08-20 16:24:50 +0100 | [diff] [blame] | 1976 | return 0; |
| 1977 | |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1978 | if (!USES_PPGTT(dev_priv)) |
Daniel Vetter | 82460d9 | 2014-08-06 20:19:53 +0200 | [diff] [blame] | 1979 | return 0; |
| 1980 | |
Tvrtko Ursulin | 5db9401 | 2016-10-13 11:03:10 +0100 | [diff] [blame] | 1981 | if (IS_GEN6(dev_priv)) |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1982 | gen6_ppgtt_enable(dev_priv); |
Tvrtko Ursulin | 5db9401 | 2016-10-13 11:03:10 +0100 | [diff] [blame] | 1983 | else if (IS_GEN7(dev_priv)) |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1984 | gen7_ppgtt_enable(dev_priv); |
| 1985 | else if (INTEL_GEN(dev_priv) >= 8) |
| 1986 | gen8_ppgtt_enable(dev_priv); |
Daniel Vetter | 82460d9 | 2014-08-06 20:19:53 +0200 | [diff] [blame] | 1987 | else |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1988 | MISSING_CASE(INTEL_GEN(dev_priv)); |
Daniel Vetter | 82460d9 | 2014-08-06 20:19:53 +0200 | [diff] [blame] | 1989 | |
John Harrison | 4ad2fd8 | 2015-06-18 13:11:20 +0100 | [diff] [blame] | 1990 | return 0; |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 1991 | } |
John Harrison | 4ad2fd8 | 2015-06-18 13:11:20 +0100 | [diff] [blame] | 1992 | |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 1993 | struct i915_hw_ppgtt * |
Chris Wilson | 2bfa996 | 2016-08-04 07:52:25 +0100 | [diff] [blame] | 1994 | i915_ppgtt_create(struct drm_i915_private *dev_priv, |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 1995 | struct drm_i915_file_private *fpriv, |
| 1996 | const char *name) |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 1997 | { |
| 1998 | struct i915_hw_ppgtt *ppgtt; |
| 1999 | int ret; |
| 2000 | |
| 2001 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); |
| 2002 | if (!ppgtt) |
| 2003 | return ERR_PTR(-ENOMEM); |
| 2004 | |
Chris Wilson | 1188bc6 | 2017-02-15 08:43:38 +0000 | [diff] [blame] | 2005 | ret = __hw_ppgtt_init(ppgtt, dev_priv); |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 2006 | if (ret) { |
| 2007 | kfree(ppgtt); |
| 2008 | return ERR_PTR(ret); |
| 2009 | } |
| 2010 | |
Chris Wilson | 1188bc6 | 2017-02-15 08:43:38 +0000 | [diff] [blame] | 2011 | kref_init(&ppgtt->ref); |
| 2012 | i915_address_space_init(&ppgtt->base, dev_priv, name); |
| 2013 | ppgtt->base.file = fpriv; |
| 2014 | |
Daniele Ceraolo Spurio | 198c974 | 2014-11-10 13:44:31 +0000 | [diff] [blame] | 2015 | trace_i915_ppgtt_create(&ppgtt->base); |
| 2016 | |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 2017 | return ppgtt; |
| 2018 | } |
| 2019 | |
Chris Wilson | 0c7eeda | 2017-01-11 21:09:25 +0000 | [diff] [blame] | 2020 | void i915_ppgtt_close(struct i915_address_space *vm) |
| 2021 | { |
| 2022 | struct list_head *phases[] = { |
| 2023 | &vm->active_list, |
| 2024 | &vm->inactive_list, |
| 2025 | &vm->unbound_list, |
| 2026 | NULL, |
| 2027 | }, **phase; |
| 2028 | |
| 2029 | GEM_BUG_ON(vm->closed); |
| 2030 | vm->closed = true; |
| 2031 | |
| 2032 | for (phase = phases; *phase; phase++) { |
| 2033 | struct i915_vma *vma, *vn; |
| 2034 | |
| 2035 | list_for_each_entry_safe(vma, vn, *phase, vm_link) |
| 2036 | if (!i915_vma_is_closed(vma)) |
| 2037 | i915_vma_close(vma); |
| 2038 | } |
| 2039 | } |
| 2040 | |
Matthew Auld | ed9724d | 2016-11-17 21:04:10 +0000 | [diff] [blame] | 2041 | void i915_ppgtt_release(struct kref *kref) |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 2042 | { |
| 2043 | struct i915_hw_ppgtt *ppgtt = |
| 2044 | container_of(kref, struct i915_hw_ppgtt, ref); |
| 2045 | |
Daniele Ceraolo Spurio | 198c974 | 2014-11-10 13:44:31 +0000 | [diff] [blame] | 2046 | trace_i915_ppgtt_release(&ppgtt->base); |
| 2047 | |
Chris Wilson | 50e046b | 2016-08-04 07:52:46 +0100 | [diff] [blame] | 2048 | /* vmas should already be unbound and destroyed */ |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 2049 | WARN_ON(!list_empty(&ppgtt->base.active_list)); |
| 2050 | WARN_ON(!list_empty(&ppgtt->base.inactive_list)); |
Chris Wilson | 50e046b | 2016-08-04 07:52:46 +0100 | [diff] [blame] | 2051 | WARN_ON(!list_empty(&ppgtt->base.unbound_list)); |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 2052 | |
| 2053 | ppgtt->base.cleanup(&ppgtt->base); |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 2054 | i915_address_space_fini(&ppgtt->base); |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 2055 | kfree(ppgtt); |
| 2056 | } |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 2057 | |
Ben Widawsky | a81cc00 | 2013-01-18 12:30:31 -0800 | [diff] [blame] | 2058 | /* Certain Gen5 chipsets require require idling the GPU before |
| 2059 | * unmapping anything from the GTT when VT-d is enabled. |
| 2060 | */ |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 2061 | static bool needs_idle_maps(struct drm_i915_private *dev_priv) |
Ben Widawsky | a81cc00 | 2013-01-18 12:30:31 -0800 | [diff] [blame] | 2062 | { |
Ben Widawsky | a81cc00 | 2013-01-18 12:30:31 -0800 | [diff] [blame] | 2063 | /* Query intel_iommu to see if we need the workaround. Presumably that |
| 2064 | * was loaded first. |
| 2065 | */ |
Chris Wilson | 80debff | 2017-05-25 13:16:12 +0100 | [diff] [blame] | 2066 | return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active(); |
Ben Widawsky | a81cc00 | 2013-01-18 12:30:31 -0800 | [diff] [blame] | 2067 | } |
| 2068 | |
Chris Wilson | dc97997 | 2016-05-10 14:10:04 +0100 | [diff] [blame] | 2069 | void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2070 | { |
Tvrtko Ursulin | e2f8039 | 2016-03-16 11:00:36 +0000 | [diff] [blame] | 2071 | struct intel_engine_cs *engine; |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 2072 | enum intel_engine_id id; |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2073 | |
Chris Wilson | dc97997 | 2016-05-10 14:10:04 +0100 | [diff] [blame] | 2074 | if (INTEL_INFO(dev_priv)->gen < 6) |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2075 | return; |
| 2076 | |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 2077 | for_each_engine(engine, dev_priv, id) { |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2078 | u32 fault_reg; |
Tvrtko Ursulin | e2f8039 | 2016-03-16 11:00:36 +0000 | [diff] [blame] | 2079 | fault_reg = I915_READ(RING_FAULT_REG(engine)); |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2080 | if (fault_reg & RING_FAULT_VALID) { |
| 2081 | DRM_DEBUG_DRIVER("Unexpected fault\n" |
Paulo Zanoni | 59a5d29 | 2014-10-30 15:52:45 -0200 | [diff] [blame] | 2082 | "\tAddr: 0x%08lx\n" |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2083 | "\tAddress space: %s\n" |
| 2084 | "\tSource ID: %d\n" |
| 2085 | "\tType: %d\n", |
| 2086 | fault_reg & PAGE_MASK, |
| 2087 | fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", |
| 2088 | RING_FAULT_SRCID(fault_reg), |
| 2089 | RING_FAULT_FAULT_TYPE(fault_reg)); |
Tvrtko Ursulin | e2f8039 | 2016-03-16 11:00:36 +0000 | [diff] [blame] | 2090 | I915_WRITE(RING_FAULT_REG(engine), |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2091 | fault_reg & ~RING_FAULT_VALID); |
| 2092 | } |
| 2093 | } |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 2094 | |
| 2095 | /* Engine specific init may not have been done till this point. */ |
| 2096 | if (dev_priv->engine[RCS]) |
| 2097 | POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2098 | } |
| 2099 | |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 2100 | void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2101 | { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 2102 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2103 | |
| 2104 | /* Don't bother messing with faults pre GEN6 as we have little |
| 2105 | * documentation supporting that it's a good idea. |
| 2106 | */ |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 2107 | if (INTEL_GEN(dev_priv) < 6) |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2108 | return; |
| 2109 | |
Chris Wilson | dc97997 | 2016-05-10 14:10:04 +0100 | [diff] [blame] | 2110 | i915_check_and_clear_faults(dev_priv); |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2111 | |
Chris Wilson | 381b943 | 2017-02-15 08:43:54 +0000 | [diff] [blame] | 2112 | ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total); |
Chris Wilson | 91e5649 | 2014-09-25 10:13:12 +0100 | [diff] [blame] | 2113 | |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 2114 | i915_ggtt_invalidate(dev_priv); |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2115 | } |
| 2116 | |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2117 | int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, |
| 2118 | struct sg_table *pages) |
Daniel Vetter | 7c2e6fd | 2010-11-06 10:10:47 +0100 | [diff] [blame] | 2119 | { |
Chris Wilson | 1a292fa | 2017-01-06 15:22:39 +0000 | [diff] [blame] | 2120 | do { |
| 2121 | if (dma_map_sg(&obj->base.dev->pdev->dev, |
| 2122 | pages->sgl, pages->nents, |
| 2123 | PCI_DMA_BIDIRECTIONAL)) |
| 2124 | return 0; |
| 2125 | |
| 2126 | /* If the DMA remap fails, one cause can be that we have |
| 2127 | * too many objects pinned in a small remapping table, |
| 2128 | * such as swiotlb. Incrementally purge all other objects and |
| 2129 | * try again - if there are no more pages to remove from |
| 2130 | * the DMA remapper, i915_gem_shrink will return 0. |
| 2131 | */ |
| 2132 | GEM_BUG_ON(obj->mm.pages == pages); |
| 2133 | } while (i915_gem_shrink(to_i915(obj->base.dev), |
| 2134 | obj->base.size >> PAGE_SHIFT, |
| 2135 | I915_SHRINK_BOUND | |
| 2136 | I915_SHRINK_UNBOUND | |
| 2137 | I915_SHRINK_ACTIVE)); |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 2138 | |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2139 | return -ENOSPC; |
Daniel Vetter | 7c2e6fd | 2010-11-06 10:10:47 +0100 | [diff] [blame] | 2140 | } |
| 2141 | |
Daniel Vetter | 2c642b0 | 2015-04-14 17:35:26 +0200 | [diff] [blame] | 2142 | static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2143 | { |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2144 | writeq(pte, addr); |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2145 | } |
| 2146 | |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2147 | static void gen8_ggtt_insert_page(struct i915_address_space *vm, |
| 2148 | dma_addr_t addr, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 2149 | u64 offset, |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2150 | enum i915_cache_level level, |
| 2151 | u32 unused) |
| 2152 | { |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 2153 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2154 | gen8_pte_t __iomem *pte = |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 2155 | (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT); |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2156 | |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 2157 | gen8_set_pte(pte, gen8_pte_encode(addr, level)); |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2158 | |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 2159 | ggtt->invalidate(vm->i915); |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2160 | } |
| 2161 | |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2162 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2163 | struct i915_vma *vma, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 2164 | enum i915_cache_level level, |
| 2165 | u32 unused) |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2166 | { |
Chris Wilson | ce7fda2 | 2016-04-28 09:56:38 +0100 | [diff] [blame] | 2167 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2168 | struct sgt_iter sgt_iter; |
| 2169 | gen8_pte_t __iomem *gtt_entries; |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 2170 | const gen8_pte_t pte_encode = gen8_pte_encode(0, level); |
Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2171 | dma_addr_t addr; |
Imre Deak | be69459 | 2015-12-15 20:10:38 +0200 | [diff] [blame] | 2172 | |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 2173 | gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2174 | gtt_entries += vma->node.start >> PAGE_SHIFT; |
| 2175 | for_each_sgt_dma(addr, sgt_iter, vma->pages) |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 2176 | gen8_set_pte(gtt_entries++, pte_encode | addr); |
Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2177 | |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 2178 | wmb(); |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2179 | |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2180 | /* This next bit makes the above posting read even more important. We |
| 2181 | * want to flush the TLBs only after we're certain all the PTE updates |
| 2182 | * have finished. |
| 2183 | */ |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 2184 | ggtt->invalidate(vm->i915); |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2185 | } |
| 2186 | |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2187 | static void gen6_ggtt_insert_page(struct i915_address_space *vm, |
| 2188 | dma_addr_t addr, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 2189 | u64 offset, |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2190 | enum i915_cache_level level, |
| 2191 | u32 flags) |
| 2192 | { |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 2193 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2194 | gen6_pte_t __iomem *pte = |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 2195 | (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT); |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2196 | |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 2197 | iowrite32(vm->pte_encode(addr, level, flags), pte); |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2198 | |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 2199 | ggtt->invalidate(vm->i915); |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2200 | } |
| 2201 | |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 2202 | /* |
| 2203 | * Binds an object into the global gtt with the specified cache level. The object |
| 2204 | * will be accessible to the GPU via commands whose operands reference offsets |
| 2205 | * within the global GTT as well as accessible by the GPU through the GMADR |
| 2206 | * mapped BAR (dev_priv->mm.gtt->gtt). |
| 2207 | */ |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 2208 | static void gen6_ggtt_insert_entries(struct i915_address_space *vm, |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2209 | struct i915_vma *vma, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 2210 | enum i915_cache_level level, |
| 2211 | u32 flags) |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 2212 | { |
Chris Wilson | ce7fda2 | 2016-04-28 09:56:38 +0100 | [diff] [blame] | 2213 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
Chris Wilson | b31144c | 2017-02-15 08:43:36 +0000 | [diff] [blame] | 2214 | gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2215 | unsigned int i = vma->node.start >> PAGE_SHIFT; |
Chris Wilson | b31144c | 2017-02-15 08:43:36 +0000 | [diff] [blame] | 2216 | struct sgt_iter iter; |
Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2217 | dma_addr_t addr; |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2218 | for_each_sgt_dma(addr, iter, vma->pages) |
Chris Wilson | b31144c | 2017-02-15 08:43:36 +0000 | [diff] [blame] | 2219 | iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); |
| 2220 | wmb(); |
Ben Widawsky | 0f9b91c | 2012-11-04 09:21:30 -0800 | [diff] [blame] | 2221 | |
| 2222 | /* This next bit makes the above posting read even more important. We |
| 2223 | * want to flush the TLBs only after we're certain all the PTE updates |
| 2224 | * have finished. |
| 2225 | */ |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 2226 | ggtt->invalidate(vm->i915); |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 2227 | } |
| 2228 | |
Chris Wilson | f7770bf | 2016-05-14 07:26:35 +0100 | [diff] [blame] | 2229 | static void nop_clear_range(struct i915_address_space *vm, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 2230 | u64 start, u64 length) |
Chris Wilson | f7770bf | 2016-05-14 07:26:35 +0100 | [diff] [blame] | 2231 | { |
| 2232 | } |
| 2233 | |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2234 | static void gen8_ggtt_clear_range(struct i915_address_space *vm, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 2235 | u64 start, u64 length) |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2236 | { |
Chris Wilson | ce7fda2 | 2016-04-28 09:56:38 +0100 | [diff] [blame] | 2237 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
Ben Widawsky | 782f149 | 2014-02-20 11:50:33 -0800 | [diff] [blame] | 2238 | unsigned first_entry = start >> PAGE_SHIFT; |
| 2239 | unsigned num_entries = length >> PAGE_SHIFT; |
Chris Wilson | 894cceb | 2017-02-15 08:43:37 +0000 | [diff] [blame] | 2240 | const gen8_pte_t scratch_pte = |
| 2241 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); |
| 2242 | gen8_pte_t __iomem *gtt_base = |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 2243 | (gen8_pte_t __iomem *)ggtt->gsm + first_entry; |
| 2244 | const int max_entries = ggtt_total_entries(ggtt) - first_entry; |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2245 | int i; |
| 2246 | |
| 2247 | if (WARN(num_entries > max_entries, |
| 2248 | "First entry = %d; Num entries = %d (max=%d)\n", |
| 2249 | first_entry, num_entries, max_entries)) |
| 2250 | num_entries = max_entries; |
| 2251 | |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2252 | for (i = 0; i < num_entries; i++) |
| 2253 | gen8_set_pte(>t_base[i], scratch_pte); |
Ben Widawsky | 94ec8f6 | 2013-11-02 21:07:18 -0700 | [diff] [blame] | 2254 | } |
| 2255 | |
Jon Bloomfield | 0ef34ad | 2017-05-24 08:54:11 -0700 | [diff] [blame] | 2256 | static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) |
| 2257 | { |
| 2258 | struct drm_i915_private *dev_priv = vm->i915; |
| 2259 | |
| 2260 | /* |
| 2261 | * Make sure the internal GAM fifo has been cleared of all GTT |
| 2262 | * writes before exiting stop_machine(). This guarantees that |
| 2263 | * any aperture accesses waiting to start in another process |
| 2264 | * cannot back up behind the GTT writes causing a hang. |
| 2265 | * The register can be any arbitrary GAM register. |
| 2266 | */ |
| 2267 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
| 2268 | } |
| 2269 | |
| 2270 | struct insert_page { |
| 2271 | struct i915_address_space *vm; |
| 2272 | dma_addr_t addr; |
| 2273 | u64 offset; |
| 2274 | enum i915_cache_level level; |
| 2275 | }; |
| 2276 | |
| 2277 | static int bxt_vtd_ggtt_insert_page__cb(void *_arg) |
| 2278 | { |
| 2279 | struct insert_page *arg = _arg; |
| 2280 | |
| 2281 | gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); |
| 2282 | bxt_vtd_ggtt_wa(arg->vm); |
| 2283 | |
| 2284 | return 0; |
| 2285 | } |
| 2286 | |
| 2287 | static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, |
| 2288 | dma_addr_t addr, |
| 2289 | u64 offset, |
| 2290 | enum i915_cache_level level, |
| 2291 | u32 unused) |
| 2292 | { |
| 2293 | struct insert_page arg = { vm, addr, offset, level }; |
| 2294 | |
| 2295 | stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); |
| 2296 | } |
| 2297 | |
| 2298 | struct insert_entries { |
| 2299 | struct i915_address_space *vm; |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2300 | struct i915_vma *vma; |
Jon Bloomfield | 0ef34ad | 2017-05-24 08:54:11 -0700 | [diff] [blame] | 2301 | enum i915_cache_level level; |
| 2302 | }; |
| 2303 | |
| 2304 | static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) |
| 2305 | { |
| 2306 | struct insert_entries *arg = _arg; |
| 2307 | |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2308 | gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0); |
Jon Bloomfield | 0ef34ad | 2017-05-24 08:54:11 -0700 | [diff] [blame] | 2309 | bxt_vtd_ggtt_wa(arg->vm); |
| 2310 | |
| 2311 | return 0; |
| 2312 | } |
| 2313 | |
| 2314 | static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2315 | struct i915_vma *vma, |
Jon Bloomfield | 0ef34ad | 2017-05-24 08:54:11 -0700 | [diff] [blame] | 2316 | enum i915_cache_level level, |
| 2317 | u32 unused) |
| 2318 | { |
Chuanxiao Dong | 17369ba | 2017-07-07 17:50:59 +0800 | [diff] [blame] | 2319 | struct insert_entries arg = { vm, vma, level }; |
Jon Bloomfield | 0ef34ad | 2017-05-24 08:54:11 -0700 | [diff] [blame] | 2320 | |
| 2321 | stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); |
| 2322 | } |
| 2323 | |
| 2324 | struct clear_range { |
| 2325 | struct i915_address_space *vm; |
| 2326 | u64 start; |
| 2327 | u64 length; |
| 2328 | }; |
| 2329 | |
| 2330 | static int bxt_vtd_ggtt_clear_range__cb(void *_arg) |
| 2331 | { |
| 2332 | struct clear_range *arg = _arg; |
| 2333 | |
| 2334 | gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); |
| 2335 | bxt_vtd_ggtt_wa(arg->vm); |
| 2336 | |
| 2337 | return 0; |
| 2338 | } |
| 2339 | |
| 2340 | static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm, |
| 2341 | u64 start, |
| 2342 | u64 length) |
| 2343 | { |
| 2344 | struct clear_range arg = { vm, start, length }; |
| 2345 | |
| 2346 | stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL); |
| 2347 | } |
| 2348 | |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 2349 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 2350 | u64 start, u64 length) |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 2351 | { |
Chris Wilson | ce7fda2 | 2016-04-28 09:56:38 +0100 | [diff] [blame] | 2352 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
Ben Widawsky | 782f149 | 2014-02-20 11:50:33 -0800 | [diff] [blame] | 2353 | unsigned first_entry = start >> PAGE_SHIFT; |
| 2354 | unsigned num_entries = length >> PAGE_SHIFT; |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 2355 | gen6_pte_t scratch_pte, __iomem *gtt_base = |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 2356 | (gen6_pte_t __iomem *)ggtt->gsm + first_entry; |
| 2357 | const int max_entries = ggtt_total_entries(ggtt) - first_entry; |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 2358 | int i; |
| 2359 | |
| 2360 | if (WARN(num_entries > max_entries, |
| 2361 | "First entry = %d; Num entries = %d (max=%d)\n", |
| 2362 | first_entry, num_entries, max_entries)) |
| 2363 | num_entries = max_entries; |
| 2364 | |
Chris Wilson | 8bcdd0f7 | 2016-08-22 08:44:30 +0100 | [diff] [blame] | 2365 | scratch_pte = vm->pte_encode(vm->scratch_page.daddr, |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 2366 | I915_CACHE_LLC, 0); |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 2367 | |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 2368 | for (i = 0; i < num_entries; i++) |
| 2369 | iowrite32(scratch_pte, >t_base[i]); |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 2370 | } |
| 2371 | |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2372 | static void i915_ggtt_insert_page(struct i915_address_space *vm, |
| 2373 | dma_addr_t addr, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 2374 | u64 offset, |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2375 | enum i915_cache_level cache_level, |
| 2376 | u32 unused) |
| 2377 | { |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2378 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
| 2379 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2380 | |
| 2381 | intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 2382 | } |
| 2383 | |
Daniel Vetter | d369d2d | 2015-04-14 17:35:25 +0200 | [diff] [blame] | 2384 | static void i915_ggtt_insert_entries(struct i915_address_space *vm, |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2385 | struct i915_vma *vma, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 2386 | enum i915_cache_level cache_level, |
| 2387 | u32 unused) |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 2388 | { |
| 2389 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
| 2390 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; |
| 2391 | |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2392 | intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, |
| 2393 | flags); |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 2394 | } |
| 2395 | |
Ben Widawsky | 853ba5d | 2013-07-16 16:50:05 -0700 | [diff] [blame] | 2396 | static void i915_ggtt_clear_range(struct i915_address_space *vm, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 2397 | u64 start, u64 length) |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 2398 | { |
Chris Wilson | 2eedfc7 | 2016-10-24 13:42:17 +0100 | [diff] [blame] | 2399 | intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 2400 | } |
| 2401 | |
Daniel Vetter | 70b9f6f | 2015-04-14 17:35:27 +0200 | [diff] [blame] | 2402 | static int ggtt_bind_vma(struct i915_vma *vma, |
| 2403 | enum i915_cache_level cache_level, |
| 2404 | u32 flags) |
Daniel Vetter | 7c2e6fd | 2010-11-06 10:10:47 +0100 | [diff] [blame] | 2405 | { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 2406 | struct drm_i915_private *i915 = vma->vm->i915; |
Daniel Vetter | 0a87871 | 2015-10-15 14:23:01 +0200 | [diff] [blame] | 2407 | struct drm_i915_gem_object *obj = vma->obj; |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 2408 | u32 pte_flags; |
Daniel Vetter | 0a87871 | 2015-10-15 14:23:01 +0200 | [diff] [blame] | 2409 | |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 2410 | if (unlikely(!vma->pages)) { |
| 2411 | int ret = i915_get_ggtt_vma_pages(vma); |
| 2412 | if (ret) |
| 2413 | return ret; |
| 2414 | } |
Daniel Vetter | 0a87871 | 2015-10-15 14:23:01 +0200 | [diff] [blame] | 2415 | |
| 2416 | /* Currently applicable only to VLV */ |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 2417 | pte_flags = 0; |
Daniel Vetter | 0a87871 | 2015-10-15 14:23:01 +0200 | [diff] [blame] | 2418 | if (obj->gt_ro) |
| 2419 | pte_flags |= PTE_READ_ONLY; |
| 2420 | |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 2421 | intel_runtime_pm_get(i915); |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2422 | vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 2423 | intel_runtime_pm_put(i915); |
Daniel Vetter | 0a87871 | 2015-10-15 14:23:01 +0200 | [diff] [blame] | 2424 | |
| 2425 | /* |
| 2426 | * Without aliasing PPGTT there's no difference between |
| 2427 | * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally |
| 2428 | * upgrade to both bound if we bind either to avoid double-binding. |
| 2429 | */ |
Chris Wilson | 3272db5 | 2016-08-04 16:32:32 +0100 | [diff] [blame] | 2430 | vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; |
Daniel Vetter | 0a87871 | 2015-10-15 14:23:01 +0200 | [diff] [blame] | 2431 | |
| 2432 | return 0; |
| 2433 | } |
| 2434 | |
Chris Wilson | cbc4e9e | 2017-02-15 08:43:39 +0000 | [diff] [blame] | 2435 | static void ggtt_unbind_vma(struct i915_vma *vma) |
| 2436 | { |
| 2437 | struct drm_i915_private *i915 = vma->vm->i915; |
| 2438 | |
| 2439 | intel_runtime_pm_get(i915); |
| 2440 | vma->vm->clear_range(vma->vm, vma->node.start, vma->size); |
| 2441 | intel_runtime_pm_put(i915); |
| 2442 | } |
| 2443 | |
Daniel Vetter | 0a87871 | 2015-10-15 14:23:01 +0200 | [diff] [blame] | 2444 | static int aliasing_gtt_bind_vma(struct i915_vma *vma, |
| 2445 | enum i915_cache_level cache_level, |
| 2446 | u32 flags) |
| 2447 | { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 2448 | struct drm_i915_private *i915 = vma->vm->i915; |
Chris Wilson | 321d178 | 2015-11-20 10:27:18 +0000 | [diff] [blame] | 2449 | u32 pte_flags; |
Chris Wilson | ff68597 | 2017-02-15 08:43:42 +0000 | [diff] [blame] | 2450 | int ret; |
Daniel Vetter | 70b9f6f | 2015-04-14 17:35:27 +0200 | [diff] [blame] | 2451 | |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 2452 | if (unlikely(!vma->pages)) { |
Chris Wilson | ff68597 | 2017-02-15 08:43:42 +0000 | [diff] [blame] | 2453 | ret = i915_get_ggtt_vma_pages(vma); |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 2454 | if (ret) |
| 2455 | return ret; |
| 2456 | } |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 2457 | |
Akash Goel | 24f3a8c | 2014-06-17 10:59:42 +0530 | [diff] [blame] | 2458 | /* Currently applicable only to VLV */ |
Chris Wilson | 321d178 | 2015-11-20 10:27:18 +0000 | [diff] [blame] | 2459 | pte_flags = 0; |
| 2460 | if (vma->obj->gt_ro) |
Daniel Vetter | f329f5f | 2015-04-14 17:35:15 +0200 | [diff] [blame] | 2461 | pte_flags |= PTE_READ_ONLY; |
Akash Goel | 24f3a8c | 2014-06-17 10:59:42 +0530 | [diff] [blame] | 2462 | |
Chris Wilson | ff68597 | 2017-02-15 08:43:42 +0000 | [diff] [blame] | 2463 | if (flags & I915_VMA_LOCAL_BIND) { |
| 2464 | struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; |
| 2465 | |
Matthew Auld | 1f23475 | 2017-05-12 10:14:23 +0100 | [diff] [blame] | 2466 | if (!(vma->flags & I915_VMA_LOCAL_BIND) && |
| 2467 | appgtt->base.allocate_va_range) { |
Chris Wilson | ff68597 | 2017-02-15 08:43:42 +0000 | [diff] [blame] | 2468 | ret = appgtt->base.allocate_va_range(&appgtt->base, |
| 2469 | vma->node.start, |
Matthew Auld | d567232 | 2017-05-16 09:55:14 +0100 | [diff] [blame] | 2470 | vma->size); |
Chris Wilson | ff68597 | 2017-02-15 08:43:42 +0000 | [diff] [blame] | 2471 | if (ret) |
Chris Wilson | 2f7399a | 2017-02-27 12:26:53 +0000 | [diff] [blame] | 2472 | goto err_pages; |
Chris Wilson | ff68597 | 2017-02-15 08:43:42 +0000 | [diff] [blame] | 2473 | } |
| 2474 | |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2475 | appgtt->base.insert_entries(&appgtt->base, vma, cache_level, |
| 2476 | pte_flags); |
Chris Wilson | ff68597 | 2017-02-15 08:43:42 +0000 | [diff] [blame] | 2477 | } |
| 2478 | |
Chris Wilson | 3272db5 | 2016-08-04 16:32:32 +0100 | [diff] [blame] | 2479 | if (flags & I915_VMA_GLOBAL_BIND) { |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 2480 | intel_runtime_pm_get(i915); |
Matthew Auld | 4a234c5 | 2017-06-22 10:58:36 +0100 | [diff] [blame] | 2481 | vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 2482 | intel_runtime_pm_put(i915); |
Ben Widawsky | 6f65e29 | 2013-12-06 14:10:56 -0800 | [diff] [blame] | 2483 | } |
Daniel Vetter | 74898d7 | 2012-02-15 23:50:22 +0100 | [diff] [blame] | 2484 | |
Daniel Vetter | 70b9f6f | 2015-04-14 17:35:27 +0200 | [diff] [blame] | 2485 | return 0; |
Chris Wilson | 2f7399a | 2017-02-27 12:26:53 +0000 | [diff] [blame] | 2486 | |
| 2487 | err_pages: |
| 2488 | if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) { |
| 2489 | if (vma->pages != vma->obj->mm.pages) { |
| 2490 | GEM_BUG_ON(!vma->pages); |
| 2491 | sg_free_table(vma->pages); |
| 2492 | kfree(vma->pages); |
| 2493 | } |
| 2494 | vma->pages = NULL; |
| 2495 | } |
| 2496 | return ret; |
Ben Widawsky | 6f65e29 | 2013-12-06 14:10:56 -0800 | [diff] [blame] | 2497 | } |
| 2498 | |
Chris Wilson | cbc4e9e | 2017-02-15 08:43:39 +0000 | [diff] [blame] | 2499 | static void aliasing_gtt_unbind_vma(struct i915_vma *vma) |
Ben Widawsky | 6f65e29 | 2013-12-06 14:10:56 -0800 | [diff] [blame] | 2500 | { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 2501 | struct drm_i915_private *i915 = vma->vm->i915; |
Ben Widawsky | 6f65e29 | 2013-12-06 14:10:56 -0800 | [diff] [blame] | 2502 | |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 2503 | if (vma->flags & I915_VMA_GLOBAL_BIND) { |
| 2504 | intel_runtime_pm_get(i915); |
Chris Wilson | cbc4e9e | 2017-02-15 08:43:39 +0000 | [diff] [blame] | 2505 | vma->vm->clear_range(vma->vm, vma->node.start, vma->size); |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 2506 | intel_runtime_pm_put(i915); |
| 2507 | } |
Ben Widawsky | 6f65e29 | 2013-12-06 14:10:56 -0800 | [diff] [blame] | 2508 | |
Chris Wilson | cbc4e9e | 2017-02-15 08:43:39 +0000 | [diff] [blame] | 2509 | if (vma->flags & I915_VMA_LOCAL_BIND) { |
| 2510 | struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base; |
| 2511 | |
| 2512 | vm->clear_range(vm, vma->node.start, vma->size); |
| 2513 | } |
Daniel Vetter | 7416390 | 2012-02-15 23:50:21 +0100 | [diff] [blame] | 2514 | } |
| 2515 | |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2516 | void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, |
| 2517 | struct sg_table *pages) |
Daniel Vetter | 7416390 | 2012-02-15 23:50:21 +0100 | [diff] [blame] | 2518 | { |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 2519 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
| 2520 | struct device *kdev = &dev_priv->drm.pdev->dev; |
Chris Wilson | 307dc25 | 2016-08-05 10:14:12 +0100 | [diff] [blame] | 2521 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Ben Widawsky | 5c04228 | 2011-10-17 15:51:55 -0700 | [diff] [blame] | 2522 | |
Chris Wilson | 307dc25 | 2016-08-05 10:14:12 +0100 | [diff] [blame] | 2523 | if (unlikely(ggtt->do_idle_maps)) { |
Chris Wilson | 228ec87 | 2017-03-30 09:53:41 +0100 | [diff] [blame] | 2524 | if (i915_gem_wait_for_idle(dev_priv, 0)) { |
Chris Wilson | 307dc25 | 2016-08-05 10:14:12 +0100 | [diff] [blame] | 2525 | DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); |
| 2526 | /* Wait a bit, in hopes it avoids the hang */ |
| 2527 | udelay(10); |
| 2528 | } |
| 2529 | } |
Ben Widawsky | 5c04228 | 2011-10-17 15:51:55 -0700 | [diff] [blame] | 2530 | |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 2531 | dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL); |
Daniel Vetter | 7c2e6fd | 2010-11-06 10:10:47 +0100 | [diff] [blame] | 2532 | } |
Daniel Vetter | 644ec02 | 2012-03-26 09:45:40 +0200 | [diff] [blame] | 2533 | |
Chris Wilson | 45b186f | 2016-12-16 07:46:42 +0000 | [diff] [blame] | 2534 | static void i915_gtt_color_adjust(const struct drm_mm_node *node, |
Chris Wilson | 42d6ab4 | 2012-07-26 11:49:32 +0100 | [diff] [blame] | 2535 | unsigned long color, |
Thierry Reding | 440fd52 | 2015-01-23 09:05:06 +0100 | [diff] [blame] | 2536 | u64 *start, |
| 2537 | u64 *end) |
Chris Wilson | 42d6ab4 | 2012-07-26 11:49:32 +0100 | [diff] [blame] | 2538 | { |
Chris Wilson | a6508de | 2017-02-06 08:45:47 +0000 | [diff] [blame] | 2539 | if (node->allocated && node->color != color) |
Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 2540 | *start += I915_GTT_PAGE_SIZE; |
Chris Wilson | 42d6ab4 | 2012-07-26 11:49:32 +0100 | [diff] [blame] | 2541 | |
Chris Wilson | a6508de | 2017-02-06 08:45:47 +0000 | [diff] [blame] | 2542 | /* Also leave a space between the unallocated reserved node after the |
| 2543 | * GTT and any objects within the GTT, i.e. we use the color adjustment |
| 2544 | * to insert a guard page to prevent prefetches crossing over the |
| 2545 | * GTT boundary. |
| 2546 | */ |
Chris Wilson | b44f97f | 2016-12-16 07:46:40 +0000 | [diff] [blame] | 2547 | node = list_next_entry(node, node_list); |
Chris Wilson | a6508de | 2017-02-06 08:45:47 +0000 | [diff] [blame] | 2548 | if (node->color != color) |
Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 2549 | *end -= I915_GTT_PAGE_SIZE; |
Chris Wilson | 42d6ab4 | 2012-07-26 11:49:32 +0100 | [diff] [blame] | 2550 | } |
Ben Widawsky | fbe5d36 | 2013-11-04 19:56:49 -0800 | [diff] [blame] | 2551 | |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2552 | int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) |
| 2553 | { |
| 2554 | struct i915_ggtt *ggtt = &i915->ggtt; |
| 2555 | struct i915_hw_ppgtt *ppgtt; |
| 2556 | int err; |
| 2557 | |
Chris Wilson | 57202f4 | 2017-02-15 08:43:56 +0000 | [diff] [blame] | 2558 | ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]"); |
Chris Wilson | 1188bc6 | 2017-02-15 08:43:38 +0000 | [diff] [blame] | 2559 | if (IS_ERR(ppgtt)) |
| 2560 | return PTR_ERR(ppgtt); |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2561 | |
Chris Wilson | e565ceb | 2017-02-15 08:43:55 +0000 | [diff] [blame] | 2562 | if (WARN_ON(ppgtt->base.total < ggtt->base.total)) { |
| 2563 | err = -ENODEV; |
| 2564 | goto err_ppgtt; |
| 2565 | } |
| 2566 | |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2567 | if (ppgtt->base.allocate_va_range) { |
Chris Wilson | e565ceb | 2017-02-15 08:43:55 +0000 | [diff] [blame] | 2568 | /* Note we only pre-allocate as far as the end of the global |
| 2569 | * GTT. On 48b / 4-level page-tables, the difference is very, |
| 2570 | * very significant! We have to preallocate as GVT/vgpu does |
| 2571 | * not like the page directory disappearing. |
| 2572 | */ |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2573 | err = ppgtt->base.allocate_va_range(&ppgtt->base, |
Chris Wilson | e565ceb | 2017-02-15 08:43:55 +0000 | [diff] [blame] | 2574 | 0, ggtt->base.total); |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2575 | if (err) |
Chris Wilson | 1188bc6 | 2017-02-15 08:43:38 +0000 | [diff] [blame] | 2576 | goto err_ppgtt; |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2577 | } |
| 2578 | |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2579 | i915->mm.aliasing_ppgtt = ppgtt; |
Chris Wilson | cbc4e9e | 2017-02-15 08:43:39 +0000 | [diff] [blame] | 2580 | |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2581 | WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma); |
| 2582 | ggtt->base.bind_vma = aliasing_gtt_bind_vma; |
| 2583 | |
Chris Wilson | cbc4e9e | 2017-02-15 08:43:39 +0000 | [diff] [blame] | 2584 | WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma); |
| 2585 | ggtt->base.unbind_vma = aliasing_gtt_unbind_vma; |
| 2586 | |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2587 | return 0; |
| 2588 | |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2589 | err_ppgtt: |
Chris Wilson | 1188bc6 | 2017-02-15 08:43:38 +0000 | [diff] [blame] | 2590 | i915_ppgtt_put(ppgtt); |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2591 | return err; |
| 2592 | } |
| 2593 | |
| 2594 | void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915) |
| 2595 | { |
| 2596 | struct i915_ggtt *ggtt = &i915->ggtt; |
| 2597 | struct i915_hw_ppgtt *ppgtt; |
| 2598 | |
| 2599 | ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt); |
| 2600 | if (!ppgtt) |
| 2601 | return; |
| 2602 | |
Chris Wilson | 1188bc6 | 2017-02-15 08:43:38 +0000 | [diff] [blame] | 2603 | i915_ppgtt_put(ppgtt); |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2604 | |
| 2605 | ggtt->base.bind_vma = ggtt_bind_vma; |
Chris Wilson | cbc4e9e | 2017-02-15 08:43:39 +0000 | [diff] [blame] | 2606 | ggtt->base.unbind_vma = ggtt_unbind_vma; |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2607 | } |
| 2608 | |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 2609 | int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) |
Daniel Vetter | 644ec02 | 2012-03-26 09:45:40 +0200 | [diff] [blame] | 2610 | { |
Ben Widawsky | e78891c | 2013-01-25 16:41:04 -0800 | [diff] [blame] | 2611 | /* Let GEM Manage all of the aperture. |
| 2612 | * |
| 2613 | * However, leave one page at the end still bound to the scratch page. |
| 2614 | * There are a number of places where the hardware apparently prefetches |
| 2615 | * past the end of the object, and we've seen multiple hangs with the |
| 2616 | * GPU head pointer stuck in a batchbuffer bound at the last page of the |
| 2617 | * aperture. One page should be enough to keep any prefetching inside |
| 2618 | * of the aperture. |
| 2619 | */ |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 2620 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Chris Wilson | ed2f345 | 2012-11-15 11:32:19 +0000 | [diff] [blame] | 2621 | unsigned long hole_start, hole_end; |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 2622 | struct drm_mm_node *entry; |
Daniel Vetter | fa76da3 | 2014-08-06 20:19:54 +0200 | [diff] [blame] | 2623 | int ret; |
Daniel Vetter | 644ec02 | 2012-03-26 09:45:40 +0200 | [diff] [blame] | 2624 | |
Zhi Wang | b02d22a | 2016-06-16 08:06:59 -0400 | [diff] [blame] | 2625 | ret = intel_vgt_balloon(dev_priv); |
| 2626 | if (ret) |
| 2627 | return ret; |
Yu Zhang | 5dda8fa | 2015-02-10 19:05:48 +0800 | [diff] [blame] | 2628 | |
Chris Wilson | 95374d7 | 2016-10-12 10:05:20 +0100 | [diff] [blame] | 2629 | /* Reserve a mappable slot for our lockless error capture */ |
Chris Wilson | 4e64e55 | 2017-02-02 21:04:38 +0000 | [diff] [blame] | 2630 | ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture, |
| 2631 | PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, |
| 2632 | 0, ggtt->mappable_end, |
| 2633 | DRM_MM_INSERT_LOW); |
Chris Wilson | 95374d7 | 2016-10-12 10:05:20 +0100 | [diff] [blame] | 2634 | if (ret) |
| 2635 | return ret; |
| 2636 | |
Chris Wilson | ed2f345 | 2012-11-15 11:32:19 +0000 | [diff] [blame] | 2637 | /* Clear any non-preallocated blocks */ |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 2638 | drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) { |
Chris Wilson | ed2f345 | 2012-11-15 11:32:19 +0000 | [diff] [blame] | 2639 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
| 2640 | hole_start, hole_end); |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 2641 | ggtt->base.clear_range(&ggtt->base, hole_start, |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 2642 | hole_end - hole_start); |
Chris Wilson | ed2f345 | 2012-11-15 11:32:19 +0000 | [diff] [blame] | 2643 | } |
| 2644 | |
| 2645 | /* And finally clear the reserved guard page */ |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 2646 | ggtt->base.clear_range(&ggtt->base, |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 2647 | ggtt->base.total - PAGE_SIZE, PAGE_SIZE); |
Daniel Vetter | 6c5566a | 2014-08-06 15:04:50 +0200 | [diff] [blame] | 2648 | |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 2649 | if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2650 | ret = i915_gem_init_aliasing_ppgtt(dev_priv); |
Chris Wilson | 95374d7 | 2016-10-12 10:05:20 +0100 | [diff] [blame] | 2651 | if (ret) |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 2652 | goto err; |
Daniel Vetter | fa76da3 | 2014-08-06 20:19:54 +0200 | [diff] [blame] | 2653 | } |
| 2654 | |
Daniel Vetter | 6c5566a | 2014-08-06 15:04:50 +0200 | [diff] [blame] | 2655 | return 0; |
Chris Wilson | 95374d7 | 2016-10-12 10:05:20 +0100 | [diff] [blame] | 2656 | |
Chris Wilson | 95374d7 | 2016-10-12 10:05:20 +0100 | [diff] [blame] | 2657 | err: |
| 2658 | drm_mm_remove_node(&ggtt->error_capture); |
| 2659 | return ret; |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 2660 | } |
| 2661 | |
Joonas Lahtinen | d85489d | 2016-03-24 16:47:46 +0200 | [diff] [blame] | 2662 | /** |
Joonas Lahtinen | d85489d | 2016-03-24 16:47:46 +0200 | [diff] [blame] | 2663 | * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 2664 | * @dev_priv: i915 device |
Joonas Lahtinen | d85489d | 2016-03-24 16:47:46 +0200 | [diff] [blame] | 2665 | */ |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 2666 | void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) |
Daniel Vetter | 90d0a0e | 2014-08-06 15:04:56 +0200 | [diff] [blame] | 2667 | { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 2668 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Chris Wilson | 94d4a2a | 2017-02-10 16:35:22 +0000 | [diff] [blame] | 2669 | struct i915_vma *vma, *vn; |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 2670 | struct pagevec *pvec; |
Chris Wilson | 94d4a2a | 2017-02-10 16:35:22 +0000 | [diff] [blame] | 2671 | |
| 2672 | ggtt->base.closed = true; |
| 2673 | |
| 2674 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 2675 | WARN_ON(!list_empty(&ggtt->base.active_list)); |
| 2676 | list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link) |
| 2677 | WARN_ON(i915_vma_unbind(vma)); |
| 2678 | mutex_unlock(&dev_priv->drm.struct_mutex); |
Daniel Vetter | 90d0a0e | 2014-08-06 15:04:56 +0200 | [diff] [blame] | 2679 | |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 2680 | i915_gem_cleanup_stolen(&dev_priv->drm); |
Imre Deak | a4eba47 | 2016-01-19 15:26:32 +0200 | [diff] [blame] | 2681 | |
Chris Wilson | 1188bc6 | 2017-02-15 08:43:38 +0000 | [diff] [blame] | 2682 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 2683 | i915_gem_fini_aliasing_ppgtt(dev_priv); |
| 2684 | |
Chris Wilson | 95374d7 | 2016-10-12 10:05:20 +0100 | [diff] [blame] | 2685 | if (drm_mm_node_allocated(&ggtt->error_capture)) |
| 2686 | drm_mm_remove_node(&ggtt->error_capture); |
| 2687 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 2688 | if (drm_mm_initialized(&ggtt->base.mm)) { |
Zhi Wang | b02d22a | 2016-06-16 08:06:59 -0400 | [diff] [blame] | 2689 | intel_vgt_deballoon(dev_priv); |
Matthew Auld | ed9724d | 2016-11-17 21:04:10 +0000 | [diff] [blame] | 2690 | i915_address_space_fini(&ggtt->base); |
Daniel Vetter | 90d0a0e | 2014-08-06 15:04:56 +0200 | [diff] [blame] | 2691 | } |
| 2692 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 2693 | ggtt->base.cleanup(&ggtt->base); |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 2694 | |
| 2695 | pvec = &dev_priv->mm.wc_stash; |
| 2696 | if (pvec->nr) { |
| 2697 | set_pages_array_wb(pvec->pages, pvec->nr); |
| 2698 | __pagevec_release(pvec); |
| 2699 | } |
| 2700 | |
Chris Wilson | 1188bc6 | 2017-02-15 08:43:38 +0000 | [diff] [blame] | 2701 | mutex_unlock(&dev_priv->drm.struct_mutex); |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 2702 | |
| 2703 | arch_phys_wc_del(ggtt->mtrr); |
Chris Wilson | f7bbe78 | 2016-08-19 16:54:27 +0100 | [diff] [blame] | 2704 | io_mapping_fini(&ggtt->mappable); |
Daniel Vetter | 90d0a0e | 2014-08-06 15:04:56 +0200 | [diff] [blame] | 2705 | } |
Daniel Vetter | 70e3254 | 2014-08-06 15:04:57 +0200 | [diff] [blame] | 2706 | |
Daniel Vetter | 2c642b0 | 2015-04-14 17:35:26 +0200 | [diff] [blame] | 2707 | static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 2708 | { |
| 2709 | snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; |
| 2710 | snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; |
| 2711 | return snb_gmch_ctl << 20; |
| 2712 | } |
| 2713 | |
Daniel Vetter | 2c642b0 | 2015-04-14 17:35:26 +0200 | [diff] [blame] | 2714 | static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) |
Ben Widawsky | 9459d25 | 2013-11-03 16:53:55 -0800 | [diff] [blame] | 2715 | { |
| 2716 | bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; |
| 2717 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; |
| 2718 | if (bdw_gmch_ctl) |
| 2719 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; |
Ben Widawsky | 562d55d | 2014-05-27 16:53:08 -0700 | [diff] [blame] | 2720 | |
| 2721 | #ifdef CONFIG_X86_32 |
| 2722 | /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */ |
| 2723 | if (bdw_gmch_ctl > 4) |
| 2724 | bdw_gmch_ctl = 4; |
| 2725 | #endif |
| 2726 | |
Ben Widawsky | 9459d25 | 2013-11-03 16:53:55 -0800 | [diff] [blame] | 2727 | return bdw_gmch_ctl << 20; |
| 2728 | } |
| 2729 | |
Daniel Vetter | 2c642b0 | 2015-04-14 17:35:26 +0200 | [diff] [blame] | 2730 | static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) |
Damien Lespiau | d7f25f2 | 2014-05-08 22:19:40 +0300 | [diff] [blame] | 2731 | { |
| 2732 | gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; |
| 2733 | gmch_ctrl &= SNB_GMCH_GGMS_MASK; |
| 2734 | |
| 2735 | if (gmch_ctrl) |
| 2736 | return 1 << (20 + gmch_ctrl); |
| 2737 | |
| 2738 | return 0; |
| 2739 | } |
| 2740 | |
Daniel Vetter | 2c642b0 | 2015-04-14 17:35:26 +0200 | [diff] [blame] | 2741 | static size_t gen6_get_stolen_size(u16 snb_gmch_ctl) |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 2742 | { |
| 2743 | snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; |
| 2744 | snb_gmch_ctl &= SNB_GMCH_GMS_MASK; |
Imre Deak | a92d1a9 | 2017-05-10 12:21:52 +0300 | [diff] [blame] | 2745 | return (size_t)snb_gmch_ctl << 25; /* 32 MB units */ |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 2746 | } |
| 2747 | |
Daniel Vetter | 2c642b0 | 2015-04-14 17:35:26 +0200 | [diff] [blame] | 2748 | static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) |
Ben Widawsky | 9459d25 | 2013-11-03 16:53:55 -0800 | [diff] [blame] | 2749 | { |
| 2750 | bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; |
| 2751 | bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; |
Imre Deak | a92d1a9 | 2017-05-10 12:21:52 +0300 | [diff] [blame] | 2752 | return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */ |
Ben Widawsky | 9459d25 | 2013-11-03 16:53:55 -0800 | [diff] [blame] | 2753 | } |
| 2754 | |
Damien Lespiau | d7f25f2 | 2014-05-08 22:19:40 +0300 | [diff] [blame] | 2755 | static size_t chv_get_stolen_size(u16 gmch_ctrl) |
| 2756 | { |
| 2757 | gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; |
| 2758 | gmch_ctrl &= SNB_GMCH_GMS_MASK; |
| 2759 | |
| 2760 | /* |
| 2761 | * 0x0 to 0x10: 32MB increments starting at 0MB |
| 2762 | * 0x11 to 0x16: 4MB increments starting at 8MB |
| 2763 | * 0x17 to 0x1d: 4MB increments start at 36MB |
| 2764 | */ |
| 2765 | if (gmch_ctrl < 0x11) |
Imre Deak | a92d1a9 | 2017-05-10 12:21:52 +0300 | [diff] [blame] | 2766 | return (size_t)gmch_ctrl << 25; |
Damien Lespiau | d7f25f2 | 2014-05-08 22:19:40 +0300 | [diff] [blame] | 2767 | else if (gmch_ctrl < 0x17) |
Imre Deak | a92d1a9 | 2017-05-10 12:21:52 +0300 | [diff] [blame] | 2768 | return (size_t)(gmch_ctrl - 0x11 + 2) << 22; |
Damien Lespiau | d7f25f2 | 2014-05-08 22:19:40 +0300 | [diff] [blame] | 2769 | else |
Imre Deak | a92d1a9 | 2017-05-10 12:21:52 +0300 | [diff] [blame] | 2770 | return (size_t)(gmch_ctrl - 0x17 + 9) << 22; |
Damien Lespiau | d7f25f2 | 2014-05-08 22:19:40 +0300 | [diff] [blame] | 2771 | } |
| 2772 | |
Damien Lespiau | 6637501 | 2014-01-09 18:02:46 +0000 | [diff] [blame] | 2773 | static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) |
| 2774 | { |
| 2775 | gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; |
| 2776 | gen9_gmch_ctl &= BDW_GMCH_GMS_MASK; |
| 2777 | |
| 2778 | if (gen9_gmch_ctl < 0xf0) |
Imre Deak | a92d1a9 | 2017-05-10 12:21:52 +0300 | [diff] [blame] | 2779 | return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */ |
Damien Lespiau | 6637501 | 2014-01-09 18:02:46 +0000 | [diff] [blame] | 2780 | else |
| 2781 | /* 4MB increments starting at 0xf0 for 4MB */ |
Imre Deak | a92d1a9 | 2017-05-10 12:21:52 +0300 | [diff] [blame] | 2782 | return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22; |
Damien Lespiau | 6637501 | 2014-01-09 18:02:46 +0000 | [diff] [blame] | 2783 | } |
| 2784 | |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 2785 | static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 2786 | { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 2787 | struct drm_i915_private *dev_priv = ggtt->base.i915; |
| 2788 | struct pci_dev *pdev = dev_priv->drm.pdev; |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 2789 | phys_addr_t phys_addr; |
Chris Wilson | 8bcdd0f7 | 2016-08-22 08:44:30 +0100 | [diff] [blame] | 2790 | int ret; |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 2791 | |
| 2792 | /* For Modern GENs the PTEs and register space are split in the BAR */ |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 2793 | phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 2794 | |
Imre Deak | 2a073f89 | 2015-03-27 13:07:33 +0200 | [diff] [blame] | 2795 | /* |
Rodrigo Vivi | 385db98 | 2017-08-29 16:09:07 -0700 | [diff] [blame] | 2796 | * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range |
| 2797 | * will be dropped. For WC mappings in general we have 64 byte burst |
| 2798 | * writes when the WC buffer is flushed, so we can't use it, but have to |
Imre Deak | 2a073f89 | 2015-03-27 13:07:33 +0200 | [diff] [blame] | 2799 | * resort to an uncached mapping. The WC issue is easily caught by the |
| 2800 | * readback check when writing GTT PTE entries. |
| 2801 | */ |
Rodrigo Vivi | 385db98 | 2017-08-29 16:09:07 -0700 | [diff] [blame] | 2802 | if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 2803 | ggtt->gsm = ioremap_nocache(phys_addr, size); |
Imre Deak | 2a073f89 | 2015-03-27 13:07:33 +0200 | [diff] [blame] | 2804 | else |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 2805 | ggtt->gsm = ioremap_wc(phys_addr, size); |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 2806 | if (!ggtt->gsm) { |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 2807 | DRM_ERROR("Failed to map the ggtt page table\n"); |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 2808 | return -ENOMEM; |
| 2809 | } |
| 2810 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 2811 | ret = setup_scratch_page(&ggtt->base, GFP_DMA32); |
Chris Wilson | 8bcdd0f7 | 2016-08-22 08:44:30 +0100 | [diff] [blame] | 2812 | if (ret) { |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 2813 | DRM_ERROR("Scratch setup failed\n"); |
| 2814 | /* iounmap will also get called at remove, but meh */ |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 2815 | iounmap(ggtt->gsm); |
Chris Wilson | 8bcdd0f7 | 2016-08-22 08:44:30 +0100 | [diff] [blame] | 2816 | return ret; |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 2817 | } |
| 2818 | |
Mika Kuoppala | 4ad2af1 | 2015-06-30 18:16:39 +0300 | [diff] [blame] | 2819 | return 0; |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 2820 | } |
| 2821 | |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 2822 | static struct intel_ppat_entry * |
| 2823 | __alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value) |
Rodrigo Vivi | 4e34935 | 2017-08-15 16:25:39 -0700 | [diff] [blame] | 2824 | { |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 2825 | struct intel_ppat_entry *entry = &ppat->entries[index]; |
| 2826 | |
| 2827 | GEM_BUG_ON(index >= ppat->max_entries); |
| 2828 | GEM_BUG_ON(test_bit(index, ppat->used)); |
| 2829 | |
| 2830 | entry->ppat = ppat; |
| 2831 | entry->value = value; |
| 2832 | kref_init(&entry->ref); |
| 2833 | set_bit(index, ppat->used); |
| 2834 | set_bit(index, ppat->dirty); |
| 2835 | |
| 2836 | return entry; |
| 2837 | } |
| 2838 | |
| 2839 | static void __free_ppat_entry(struct intel_ppat_entry *entry) |
| 2840 | { |
| 2841 | struct intel_ppat *ppat = entry->ppat; |
| 2842 | unsigned int index = entry - ppat->entries; |
| 2843 | |
| 2844 | GEM_BUG_ON(index >= ppat->max_entries); |
| 2845 | GEM_BUG_ON(!test_bit(index, ppat->used)); |
| 2846 | |
| 2847 | entry->value = ppat->clear_value; |
| 2848 | clear_bit(index, ppat->used); |
| 2849 | set_bit(index, ppat->dirty); |
| 2850 | } |
| 2851 | |
| 2852 | /** |
| 2853 | * intel_ppat_get - get a usable PPAT entry |
| 2854 | * @i915: i915 device instance |
| 2855 | * @value: the PPAT value required by the caller |
| 2856 | * |
| 2857 | * The function tries to search if there is an existing PPAT entry which |
| 2858 | * matches with the required value. If perfectly matched, the existing PPAT |
| 2859 | * entry will be used. If only partially matched, it will try to check if |
| 2860 | * there is any available PPAT index. If yes, it will allocate a new PPAT |
| 2861 | * index for the required entry and update the HW. If not, the partially |
| 2862 | * matched entry will be used. |
| 2863 | */ |
| 2864 | const struct intel_ppat_entry * |
| 2865 | intel_ppat_get(struct drm_i915_private *i915, u8 value) |
| 2866 | { |
| 2867 | struct intel_ppat *ppat = &i915->ppat; |
| 2868 | struct intel_ppat_entry *entry; |
| 2869 | unsigned int scanned, best_score; |
| 2870 | int i; |
| 2871 | |
| 2872 | GEM_BUG_ON(!ppat->max_entries); |
| 2873 | |
| 2874 | scanned = best_score = 0; |
| 2875 | for_each_set_bit(i, ppat->used, ppat->max_entries) { |
| 2876 | unsigned int score; |
| 2877 | |
| 2878 | score = ppat->match(ppat->entries[i].value, value); |
| 2879 | if (score > best_score) { |
| 2880 | entry = &ppat->entries[i]; |
| 2881 | if (score == INTEL_PPAT_PERFECT_MATCH) { |
| 2882 | kref_get(&entry->ref); |
| 2883 | return entry; |
| 2884 | } |
| 2885 | best_score = score; |
| 2886 | } |
| 2887 | scanned++; |
| 2888 | } |
| 2889 | |
| 2890 | if (scanned == ppat->max_entries) { |
| 2891 | if (!best_score) |
| 2892 | return ERR_PTR(-ENOSPC); |
| 2893 | |
| 2894 | kref_get(&entry->ref); |
| 2895 | return entry; |
| 2896 | } |
| 2897 | |
| 2898 | i = find_first_zero_bit(ppat->used, ppat->max_entries); |
| 2899 | entry = __alloc_ppat_entry(ppat, i, value); |
| 2900 | ppat->update_hw(i915); |
| 2901 | return entry; |
| 2902 | } |
| 2903 | |
| 2904 | static void release_ppat(struct kref *kref) |
| 2905 | { |
| 2906 | struct intel_ppat_entry *entry = |
| 2907 | container_of(kref, struct intel_ppat_entry, ref); |
| 2908 | struct drm_i915_private *i915 = entry->ppat->i915; |
| 2909 | |
| 2910 | __free_ppat_entry(entry); |
| 2911 | entry->ppat->update_hw(i915); |
| 2912 | } |
| 2913 | |
| 2914 | /** |
| 2915 | * intel_ppat_put - put back the PPAT entry got from intel_ppat_get() |
| 2916 | * @entry: an intel PPAT entry |
| 2917 | * |
| 2918 | * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the |
| 2919 | * entry is dynamically allocated, its reference count will be decreased. Once |
| 2920 | * the reference count becomes into zero, the PPAT index becomes free again. |
| 2921 | */ |
| 2922 | void intel_ppat_put(const struct intel_ppat_entry *entry) |
| 2923 | { |
| 2924 | struct intel_ppat *ppat = entry->ppat; |
| 2925 | unsigned int index = entry - ppat->entries; |
| 2926 | |
| 2927 | GEM_BUG_ON(!ppat->max_entries); |
| 2928 | |
| 2929 | kref_put(&ppat->entries[index].ref, release_ppat); |
| 2930 | } |
| 2931 | |
| 2932 | static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv) |
| 2933 | { |
| 2934 | struct intel_ppat *ppat = &dev_priv->ppat; |
| 2935 | int i; |
| 2936 | |
| 2937 | for_each_set_bit(i, ppat->dirty, ppat->max_entries) { |
| 2938 | I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value); |
| 2939 | clear_bit(i, ppat->dirty); |
| 2940 | } |
| 2941 | } |
| 2942 | |
| 2943 | static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv) |
| 2944 | { |
| 2945 | struct intel_ppat *ppat = &dev_priv->ppat; |
| 2946 | u64 pat = 0; |
| 2947 | int i; |
| 2948 | |
| 2949 | for (i = 0; i < ppat->max_entries; i++) |
| 2950 | pat |= GEN8_PPAT(i, ppat->entries[i].value); |
| 2951 | |
| 2952 | bitmap_clear(ppat->dirty, 0, ppat->max_entries); |
| 2953 | |
| 2954 | I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); |
| 2955 | I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); |
| 2956 | } |
| 2957 | |
| 2958 | static unsigned int bdw_private_pat_match(u8 src, u8 dst) |
| 2959 | { |
| 2960 | unsigned int score = 0; |
| 2961 | enum { |
| 2962 | AGE_MATCH = BIT(0), |
| 2963 | TC_MATCH = BIT(1), |
| 2964 | CA_MATCH = BIT(2), |
| 2965 | }; |
| 2966 | |
| 2967 | /* Cache attribute has to be matched. */ |
Zhi Wang | 1298d51 | 2017-09-18 21:36:34 +0800 | [diff] [blame] | 2968 | if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst)) |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 2969 | return 0; |
| 2970 | |
| 2971 | score |= CA_MATCH; |
| 2972 | |
| 2973 | if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst)) |
| 2974 | score |= TC_MATCH; |
| 2975 | |
| 2976 | if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst)) |
| 2977 | score |= AGE_MATCH; |
| 2978 | |
| 2979 | if (score == (AGE_MATCH | TC_MATCH | CA_MATCH)) |
| 2980 | return INTEL_PPAT_PERFECT_MATCH; |
| 2981 | |
| 2982 | return score; |
| 2983 | } |
| 2984 | |
| 2985 | static unsigned int chv_private_pat_match(u8 src, u8 dst) |
| 2986 | { |
| 2987 | return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ? |
| 2988 | INTEL_PPAT_PERFECT_MATCH : 0; |
| 2989 | } |
| 2990 | |
| 2991 | static void cnl_setup_private_ppat(struct intel_ppat *ppat) |
| 2992 | { |
| 2993 | ppat->max_entries = 8; |
| 2994 | ppat->update_hw = cnl_private_pat_update_hw; |
| 2995 | ppat->match = bdw_private_pat_match; |
| 2996 | ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); |
| 2997 | |
Rodrigo Vivi | 4e34935 | 2017-08-15 16:25:39 -0700 | [diff] [blame] | 2998 | /* XXX: spec is unclear if this is still needed for CNL+ */ |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 2999 | if (!USES_PPGTT(ppat->i915)) { |
| 3000 | __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC); |
Rodrigo Vivi | 4e34935 | 2017-08-15 16:25:39 -0700 | [diff] [blame] | 3001 | return; |
| 3002 | } |
| 3003 | |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3004 | __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); |
| 3005 | __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); |
| 3006 | __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); |
| 3007 | __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); |
| 3008 | __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); |
| 3009 | __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); |
| 3010 | __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); |
| 3011 | __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); |
Rodrigo Vivi | 4e34935 | 2017-08-15 16:25:39 -0700 | [diff] [blame] | 3012 | } |
| 3013 | |
Ben Widawsky | fbe5d36 | 2013-11-04 19:56:49 -0800 | [diff] [blame] | 3014 | /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability |
| 3015 | * bits. When using advanced contexts each context stores its own PAT, but |
| 3016 | * writing this data shouldn't be harmful even in those cases. */ |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3017 | static void bdw_setup_private_ppat(struct intel_ppat *ppat) |
Ben Widawsky | fbe5d36 | 2013-11-04 19:56:49 -0800 | [diff] [blame] | 3018 | { |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3019 | ppat->max_entries = 8; |
| 3020 | ppat->update_hw = bdw_private_pat_update_hw; |
| 3021 | ppat->match = bdw_private_pat_match; |
| 3022 | ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); |
Ben Widawsky | fbe5d36 | 2013-11-04 19:56:49 -0800 | [diff] [blame] | 3023 | |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3024 | if (!USES_PPGTT(ppat->i915)) { |
Rodrigo Vivi | d6a8b72 | 2014-11-05 16:56:36 -0800 | [diff] [blame] | 3025 | /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, |
| 3026 | * so RTL will always use the value corresponding to |
| 3027 | * pat_sel = 000". |
| 3028 | * So let's disable cache for GGTT to avoid screen corruptions. |
| 3029 | * MOCS still can be used though. |
| 3030 | * - System agent ggtt writes (i.e. cpu gtt mmaps) already work |
| 3031 | * before this patch, i.e. the same uncached + snooping access |
| 3032 | * like on gen6/7 seems to be in effect. |
| 3033 | * - So this just fixes blitter/render access. Again it looks |
| 3034 | * like it's not just uncached access, but uncached + snooping. |
| 3035 | * So we can still hold onto all our assumptions wrt cpu |
| 3036 | * clflushing on LLC machines. |
| 3037 | */ |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3038 | __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC); |
| 3039 | return; |
| 3040 | } |
Rodrigo Vivi | d6a8b72 | 2014-11-05 16:56:36 -0800 | [diff] [blame] | 3041 | |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3042 | __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */ |
| 3043 | __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */ |
| 3044 | __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */ |
| 3045 | __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */ |
| 3046 | __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); |
| 3047 | __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); |
| 3048 | __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); |
| 3049 | __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); |
Ben Widawsky | fbe5d36 | 2013-11-04 19:56:49 -0800 | [diff] [blame] | 3050 | } |
| 3051 | |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3052 | static void chv_setup_private_ppat(struct intel_ppat *ppat) |
Ville Syrjälä | ee0ce47 | 2014-04-09 13:28:01 +0300 | [diff] [blame] | 3053 | { |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3054 | ppat->max_entries = 8; |
| 3055 | ppat->update_hw = bdw_private_pat_update_hw; |
| 3056 | ppat->match = chv_private_pat_match; |
| 3057 | ppat->clear_value = CHV_PPAT_SNOOP; |
Ville Syrjälä | ee0ce47 | 2014-04-09 13:28:01 +0300 | [diff] [blame] | 3058 | |
| 3059 | /* |
| 3060 | * Map WB on BDW to snooped on CHV. |
| 3061 | * |
| 3062 | * Only the snoop bit has meaning for CHV, the rest is |
| 3063 | * ignored. |
| 3064 | * |
Ville Syrjälä | cf3d262 | 2014-11-14 21:02:44 +0200 | [diff] [blame] | 3065 | * The hardware will never snoop for certain types of accesses: |
| 3066 | * - CPU GTT (GMADR->GGTT->no snoop->memory) |
| 3067 | * - PPGTT page tables |
| 3068 | * - some other special cycles |
| 3069 | * |
| 3070 | * As with BDW, we also need to consider the following for GT accesses: |
| 3071 | * "For GGTT, there is NO pat_sel[2:0] from the entry, |
| 3072 | * so RTL will always use the value corresponding to |
| 3073 | * pat_sel = 000". |
| 3074 | * Which means we must set the snoop bit in PAT entry 0 |
| 3075 | * in order to keep the global status page working. |
Ville Syrjälä | ee0ce47 | 2014-04-09 13:28:01 +0300 | [diff] [blame] | 3076 | */ |
Ville Syrjälä | ee0ce47 | 2014-04-09 13:28:01 +0300 | [diff] [blame] | 3077 | |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3078 | __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP); |
| 3079 | __alloc_ppat_entry(ppat, 1, 0); |
| 3080 | __alloc_ppat_entry(ppat, 2, 0); |
| 3081 | __alloc_ppat_entry(ppat, 3, 0); |
| 3082 | __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP); |
| 3083 | __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP); |
| 3084 | __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP); |
| 3085 | __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP); |
Ville Syrjälä | ee0ce47 | 2014-04-09 13:28:01 +0300 | [diff] [blame] | 3086 | } |
| 3087 | |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3088 | static void gen6_gmch_remove(struct i915_address_space *vm) |
| 3089 | { |
| 3090 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
| 3091 | |
| 3092 | iounmap(ggtt->gsm); |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 3093 | cleanup_scratch_page(vm); |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3094 | } |
| 3095 | |
Zhi Wang | 36e16c4 | 2017-09-12 15:42:24 +0800 | [diff] [blame] | 3096 | static void setup_private_pat(struct drm_i915_private *dev_priv) |
| 3097 | { |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3098 | struct intel_ppat *ppat = &dev_priv->ppat; |
| 3099 | int i; |
| 3100 | |
| 3101 | ppat->i915 = dev_priv; |
| 3102 | |
Zhi Wang | 36e16c4 | 2017-09-12 15:42:24 +0800 | [diff] [blame] | 3103 | if (INTEL_GEN(dev_priv) >= 10) |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3104 | cnl_setup_private_ppat(ppat); |
Zhi Wang | 36e16c4 | 2017-09-12 15:42:24 +0800 | [diff] [blame] | 3105 | else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3106 | chv_setup_private_ppat(ppat); |
Zhi Wang | 36e16c4 | 2017-09-12 15:42:24 +0800 | [diff] [blame] | 3107 | else |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3108 | bdw_setup_private_ppat(ppat); |
| 3109 | |
| 3110 | GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES); |
| 3111 | |
| 3112 | for_each_clear_bit(i, ppat->used, ppat->max_entries) { |
| 3113 | ppat->entries[i].value = ppat->clear_value; |
| 3114 | ppat->entries[i].ppat = ppat; |
| 3115 | set_bit(i, ppat->dirty); |
| 3116 | } |
| 3117 | |
| 3118 | ppat->update_hw(dev_priv); |
Zhi Wang | 36e16c4 | 2017-09-12 15:42:24 +0800 | [diff] [blame] | 3119 | } |
| 3120 | |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3121 | static int gen8_gmch_probe(struct i915_ggtt *ggtt) |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 3122 | { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 3123 | struct drm_i915_private *dev_priv = ggtt->base.i915; |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3124 | struct pci_dev *pdev = dev_priv->drm.pdev; |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3125 | unsigned int size; |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 3126 | u16 snb_gmch_ctl; |
Imre Deak | 4519290 | 2017-05-10 12:21:50 +0300 | [diff] [blame] | 3127 | int err; |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 3128 | |
| 3129 | /* TODO: We're not aware of mappable constraints on gen8 yet */ |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3130 | ggtt->mappable_base = pci_resource_start(pdev, 2); |
| 3131 | ggtt->mappable_end = pci_resource_len(pdev, 2); |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 3132 | |
Imre Deak | 4519290 | 2017-05-10 12:21:50 +0300 | [diff] [blame] | 3133 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); |
| 3134 | if (!err) |
| 3135 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); |
| 3136 | if (err) |
| 3137 | DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 3138 | |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3139 | pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 3140 | |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3141 | if (INTEL_GEN(dev_priv) >= 9) { |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3142 | ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl); |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3143 | size = gen8_get_total_gtt_size(snb_gmch_ctl); |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3144 | } else if (IS_CHERRYVIEW(dev_priv)) { |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3145 | ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl); |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3146 | size = chv_get_total_gtt_size(snb_gmch_ctl); |
Damien Lespiau | d7f25f2 | 2014-05-08 22:19:40 +0300 | [diff] [blame] | 3147 | } else { |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3148 | ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl); |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3149 | size = gen8_get_total_gtt_size(snb_gmch_ctl); |
Damien Lespiau | d7f25f2 | 2014-05-08 22:19:40 +0300 | [diff] [blame] | 3150 | } |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 3151 | |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3152 | ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3153 | ggtt->base.cleanup = gen6_gmch_remove; |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3154 | ggtt->base.bind_vma = ggtt_bind_vma; |
| 3155 | ggtt->base.unbind_vma = ggtt_unbind_vma; |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 3156 | ggtt->base.insert_page = gen8_ggtt_insert_page; |
Chris Wilson | f7770bf | 2016-05-14 07:26:35 +0100 | [diff] [blame] | 3157 | ggtt->base.clear_range = nop_clear_range; |
Chris Wilson | 48f112f | 2016-06-24 14:07:14 +0100 | [diff] [blame] | 3158 | if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) |
Chris Wilson | f7770bf | 2016-05-14 07:26:35 +0100 | [diff] [blame] | 3159 | ggtt->base.clear_range = gen8_ggtt_clear_range; |
| 3160 | |
| 3161 | ggtt->base.insert_entries = gen8_ggtt_insert_entries; |
Chris Wilson | f7770bf | 2016-05-14 07:26:35 +0100 | [diff] [blame] | 3162 | |
Jon Bloomfield | 0ef34ad | 2017-05-24 08:54:11 -0700 | [diff] [blame] | 3163 | /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ |
| 3164 | if (intel_ggtt_update_needs_vtd_wa(dev_priv)) { |
| 3165 | ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; |
| 3166 | ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL; |
| 3167 | if (ggtt->base.clear_range != nop_clear_range) |
| 3168 | ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL; |
| 3169 | } |
| 3170 | |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 3171 | ggtt->invalidate = gen6_ggtt_invalidate; |
| 3172 | |
Zhi Wang | 36e16c4 | 2017-09-12 15:42:24 +0800 | [diff] [blame] | 3173 | setup_private_pat(dev_priv); |
| 3174 | |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3175 | return ggtt_probe_common(ggtt, size); |
Ben Widawsky | 6334013 | 2013-11-04 19:32:22 -0800 | [diff] [blame] | 3176 | } |
| 3177 | |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3178 | static int gen6_gmch_probe(struct i915_ggtt *ggtt) |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 3179 | { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 3180 | struct drm_i915_private *dev_priv = ggtt->base.i915; |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3181 | struct pci_dev *pdev = dev_priv->drm.pdev; |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3182 | unsigned int size; |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 3183 | u16 snb_gmch_ctl; |
Imre Deak | 4519290 | 2017-05-10 12:21:50 +0300 | [diff] [blame] | 3184 | int err; |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 3185 | |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3186 | ggtt->mappable_base = pci_resource_start(pdev, 2); |
| 3187 | ggtt->mappable_end = pci_resource_len(pdev, 2); |
Ben Widawsky | 41907dd | 2013-02-08 11:32:47 -0800 | [diff] [blame] | 3188 | |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3189 | /* 64/512MB is the current min/max we actually know of, but this is just |
| 3190 | * a coarse sanity check. |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 3191 | */ |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3192 | if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3193 | DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end); |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3194 | return -ENXIO; |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 3195 | } |
| 3196 | |
Imre Deak | 4519290 | 2017-05-10 12:21:50 +0300 | [diff] [blame] | 3197 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); |
| 3198 | if (!err) |
| 3199 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); |
| 3200 | if (err) |
| 3201 | DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3202 | pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3203 | |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3204 | ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl); |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3205 | |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3206 | size = gen6_get_total_gtt_size(snb_gmch_ctl); |
| 3207 | ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT; |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3208 | |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3209 | ggtt->base.clear_range = gen6_ggtt_clear_range; |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 3210 | ggtt->base.insert_page = gen6_ggtt_insert_page; |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3211 | ggtt->base.insert_entries = gen6_ggtt_insert_entries; |
| 3212 | ggtt->base.bind_vma = ggtt_bind_vma; |
| 3213 | ggtt->base.unbind_vma = ggtt_unbind_vma; |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3214 | ggtt->base.cleanup = gen6_gmch_remove; |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3215 | |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 3216 | ggtt->invalidate = gen6_ggtt_invalidate; |
| 3217 | |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3218 | if (HAS_EDRAM(dev_priv)) |
| 3219 | ggtt->base.pte_encode = iris_pte_encode; |
| 3220 | else if (IS_HASWELL(dev_priv)) |
| 3221 | ggtt->base.pte_encode = hsw_pte_encode; |
| 3222 | else if (IS_VALLEYVIEW(dev_priv)) |
| 3223 | ggtt->base.pte_encode = byt_pte_encode; |
| 3224 | else if (INTEL_GEN(dev_priv) >= 7) |
| 3225 | ggtt->base.pte_encode = ivb_pte_encode; |
| 3226 | else |
| 3227 | ggtt->base.pte_encode = snb_pte_encode; |
| 3228 | |
| 3229 | return ggtt_probe_common(ggtt, size); |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3230 | } |
| 3231 | |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3232 | static void i915_gmch_remove(struct i915_address_space *vm) |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3233 | { |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3234 | intel_gmch_remove(); |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3235 | } |
| 3236 | |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3237 | static int i915_gmch_probe(struct i915_ggtt *ggtt) |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3238 | { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 3239 | struct drm_i915_private *dev_priv = ggtt->base.i915; |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3240 | int ret; |
| 3241 | |
Chris Wilson | 91c8a32 | 2016-07-05 10:40:23 +0100 | [diff] [blame] | 3242 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3243 | if (!ret) { |
| 3244 | DRM_ERROR("failed to set up gmch\n"); |
| 3245 | return -EIO; |
| 3246 | } |
| 3247 | |
Chris Wilson | edd1f2f | 2017-01-06 15:20:11 +0000 | [diff] [blame] | 3248 | intel_gtt_get(&ggtt->base.total, |
| 3249 | &ggtt->stolen_size, |
| 3250 | &ggtt->mappable_base, |
| 3251 | &ggtt->mappable_end); |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3252 | |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3253 | ggtt->do_idle_maps = needs_idle_maps(dev_priv); |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 3254 | ggtt->base.insert_page = i915_ggtt_insert_page; |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3255 | ggtt->base.insert_entries = i915_ggtt_insert_entries; |
| 3256 | ggtt->base.clear_range = i915_ggtt_clear_range; |
| 3257 | ggtt->base.bind_vma = ggtt_bind_vma; |
| 3258 | ggtt->base.unbind_vma = ggtt_unbind_vma; |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3259 | ggtt->base.cleanup = i915_gmch_remove; |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3260 | |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 3261 | ggtt->invalidate = gmch_ggtt_invalidate; |
| 3262 | |
Joonas Lahtinen | d507d73 | 2016-03-18 10:42:58 +0200 | [diff] [blame] | 3263 | if (unlikely(ggtt->do_idle_maps)) |
Chris Wilson | c0a7f81 | 2013-12-30 12:16:15 +0000 | [diff] [blame] | 3264 | DRM_INFO("applying Ironlake quirks for intel_iommu\n"); |
| 3265 | |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3266 | return 0; |
| 3267 | } |
| 3268 | |
Joonas Lahtinen | d85489d | 2016-03-24 16:47:46 +0200 | [diff] [blame] | 3269 | /** |
Chris Wilson | 0088e52 | 2016-08-04 07:52:21 +0100 | [diff] [blame] | 3270 | * i915_ggtt_probe_hw - Probe GGTT hardware location |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3271 | * @dev_priv: i915 device |
Joonas Lahtinen | d85489d | 2016-03-24 16:47:46 +0200 | [diff] [blame] | 3272 | */ |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3273 | int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3274 | { |
Joonas Lahtinen | 62106b4 | 2016-03-18 10:42:57 +0200 | [diff] [blame] | 3275 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3276 | int ret; |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 3277 | |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 3278 | ggtt->base.i915 = dev_priv; |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 3279 | ggtt->base.dma = &dev_priv->drm.pdev->dev; |
Mika Kuoppala | c114f76 | 2015-06-25 18:35:13 +0300 | [diff] [blame] | 3280 | |
Chris Wilson | 34c998b | 2016-08-04 07:52:24 +0100 | [diff] [blame] | 3281 | if (INTEL_GEN(dev_priv) <= 5) |
| 3282 | ret = i915_gmch_probe(ggtt); |
| 3283 | else if (INTEL_GEN(dev_priv) < 8) |
| 3284 | ret = gen6_gmch_probe(ggtt); |
| 3285 | else |
| 3286 | ret = gen8_gmch_probe(ggtt); |
Ben Widawsky | a54c0c2 | 2013-01-24 14:45:00 -0800 | [diff] [blame] | 3287 | if (ret) |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3288 | return ret; |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 3289 | |
Chris Wilson | db9309a | 2017-01-05 15:30:23 +0000 | [diff] [blame] | 3290 | /* Trim the GGTT to fit the GuC mappable upper range (when enabled). |
| 3291 | * This is easier than doing range restriction on the fly, as we |
| 3292 | * currently don't have any bits spare to pass in this upper |
| 3293 | * restriction! |
| 3294 | */ |
Michal Wajdeczko | 4f044a8 | 2017-09-19 19:38:44 +0000 | [diff] [blame^] | 3295 | if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) { |
Chris Wilson | db9309a | 2017-01-05 15:30:23 +0000 | [diff] [blame] | 3296 | ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP); |
| 3297 | ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total); |
| 3298 | } |
| 3299 | |
Chris Wilson | c890e2d | 2016-03-18 10:42:59 +0200 | [diff] [blame] | 3300 | if ((ggtt->base.total - 1) >> 32) { |
| 3301 | DRM_ERROR("We never expected a Global GTT with more than 32bits" |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 3302 | " of address space! Found %lldM!\n", |
Chris Wilson | c890e2d | 2016-03-18 10:42:59 +0200 | [diff] [blame] | 3303 | ggtt->base.total >> 20); |
| 3304 | ggtt->base.total = 1ULL << 32; |
| 3305 | ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total); |
| 3306 | } |
| 3307 | |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 3308 | if (ggtt->mappable_end > ggtt->base.total) { |
| 3309 | DRM_ERROR("mappable aperture extends past end of GGTT," |
| 3310 | " aperture=%llx, total=%llx\n", |
| 3311 | ggtt->mappable_end, ggtt->base.total); |
| 3312 | ggtt->mappable_end = ggtt->base.total; |
| 3313 | } |
| 3314 | |
Ben Widawsky | baa09f5 | 2013-01-24 13:49:57 -0800 | [diff] [blame] | 3315 | /* GMADR is the PCI mmio aperture into the global GTT. */ |
Mika Kuoppala | c44ef60 | 2015-06-25 18:35:05 +0300 | [diff] [blame] | 3316 | DRM_INFO("Memory usable by graphics device = %lluM\n", |
Joonas Lahtinen | 62106b4 | 2016-03-18 10:42:57 +0200 | [diff] [blame] | 3317 | ggtt->base.total >> 20); |
| 3318 | DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20); |
Chris Wilson | edd1f2f | 2017-01-06 15:20:11 +0000 | [diff] [blame] | 3319 | DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20); |
Chris Wilson | 80debff | 2017-05-25 13:16:12 +0100 | [diff] [blame] | 3320 | if (intel_vtd_active()) |
Daniel Vetter | 5db6c73 | 2014-03-31 16:23:04 +0200 | [diff] [blame] | 3321 | DRM_INFO("VT-d active for gfx access\n"); |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 3322 | |
Ben Widawsky | e76e9ae | 2012-11-04 09:21:27 -0800 | [diff] [blame] | 3323 | return 0; |
Chris Wilson | 0088e52 | 2016-08-04 07:52:21 +0100 | [diff] [blame] | 3324 | } |
| 3325 | |
| 3326 | /** |
| 3327 | * i915_ggtt_init_hw - Initialize GGTT hardware |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3328 | * @dev_priv: i915 device |
Chris Wilson | 0088e52 | 2016-08-04 07:52:21 +0100 | [diff] [blame] | 3329 | */ |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3330 | int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) |
Chris Wilson | 0088e52 | 2016-08-04 07:52:21 +0100 | [diff] [blame] | 3331 | { |
Chris Wilson | 0088e52 | 2016-08-04 07:52:21 +0100 | [diff] [blame] | 3332 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
| 3333 | int ret; |
| 3334 | |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 3335 | INIT_LIST_HEAD(&dev_priv->vm_list); |
| 3336 | |
Chris Wilson | a6508de | 2017-02-06 08:45:47 +0000 | [diff] [blame] | 3337 | /* Note that we use page colouring to enforce a guard page at the |
| 3338 | * end of the address space. This is required as the CS may prefetch |
| 3339 | * beyond the end of the batch buffer, across the page boundary, |
| 3340 | * and beyond the end of the GTT if we do not provide a guard. |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 3341 | */ |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 3342 | mutex_lock(&dev_priv->drm.struct_mutex); |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 3343 | i915_address_space_init(&ggtt->base, dev_priv, "[global]"); |
Chris Wilson | a6508de | 2017-02-06 08:45:47 +0000 | [diff] [blame] | 3344 | if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 3345 | ggtt->base.mm.color_adjust = i915_gtt_color_adjust; |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 3346 | mutex_unlock(&dev_priv->drm.struct_mutex); |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 3347 | |
Chris Wilson | f7bbe78 | 2016-08-19 16:54:27 +0100 | [diff] [blame] | 3348 | if (!io_mapping_init_wc(&dev_priv->ggtt.mappable, |
| 3349 | dev_priv->ggtt.mappable_base, |
| 3350 | dev_priv->ggtt.mappable_end)) { |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 3351 | ret = -EIO; |
| 3352 | goto out_gtt_cleanup; |
| 3353 | } |
| 3354 | |
| 3355 | ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end); |
| 3356 | |
Chris Wilson | 0088e52 | 2016-08-04 07:52:21 +0100 | [diff] [blame] | 3357 | /* |
| 3358 | * Initialise stolen early so that we may reserve preallocated |
| 3359 | * objects for the BIOS to KMS transition. |
| 3360 | */ |
Tvrtko Ursulin | 7ace3d3 | 2016-11-16 08:55:35 +0000 | [diff] [blame] | 3361 | ret = i915_gem_init_stolen(dev_priv); |
Chris Wilson | 0088e52 | 2016-08-04 07:52:21 +0100 | [diff] [blame] | 3362 | if (ret) |
| 3363 | goto out_gtt_cleanup; |
| 3364 | |
| 3365 | return 0; |
Imre Deak | a4eba47 | 2016-01-19 15:26:32 +0200 | [diff] [blame] | 3366 | |
| 3367 | out_gtt_cleanup: |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 3368 | ggtt->base.cleanup(&ggtt->base); |
Imre Deak | a4eba47 | 2016-01-19 15:26:32 +0200 | [diff] [blame] | 3369 | return ret; |
Daniel Vetter | 644ec02 | 2012-03-26 09:45:40 +0200 | [diff] [blame] | 3370 | } |
Ben Widawsky | 6f65e29 | 2013-12-06 14:10:56 -0800 | [diff] [blame] | 3371 | |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3372 | int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) |
Ville Syrjälä | ac840ae | 2016-05-06 21:35:55 +0300 | [diff] [blame] | 3373 | { |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 3374 | if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt()) |
Ville Syrjälä | ac840ae | 2016-05-06 21:35:55 +0300 | [diff] [blame] | 3375 | return -EIO; |
| 3376 | |
| 3377 | return 0; |
| 3378 | } |
| 3379 | |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 3380 | void i915_ggtt_enable_guc(struct drm_i915_private *i915) |
| 3381 | { |
Chris Wilson | 04f7b24e | 2017-06-01 10:04:46 +0100 | [diff] [blame] | 3382 | GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate); |
| 3383 | |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 3384 | i915->ggtt.invalidate = guc_ggtt_invalidate; |
| 3385 | } |
| 3386 | |
| 3387 | void i915_ggtt_disable_guc(struct drm_i915_private *i915) |
| 3388 | { |
Chris Wilson | 04f7b24e | 2017-06-01 10:04:46 +0100 | [diff] [blame] | 3389 | /* We should only be called after i915_ggtt_enable_guc() */ |
| 3390 | GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate); |
| 3391 | |
| 3392 | i915->ggtt.invalidate = gen6_ggtt_invalidate; |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 3393 | } |
| 3394 | |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 3395 | void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3396 | { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 3397 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Chris Wilson | fbb30a5c | 2016-09-09 21:19:57 +0100 | [diff] [blame] | 3398 | struct drm_i915_gem_object *obj, *on; |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3399 | |
Chris Wilson | dc97997 | 2016-05-10 14:10:04 +0100 | [diff] [blame] | 3400 | i915_check_and_clear_faults(dev_priv); |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3401 | |
| 3402 | /* First fill our portion of the GTT with scratch pages */ |
Chris Wilson | 381b943 | 2017-02-15 08:43:54 +0000 | [diff] [blame] | 3403 | ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total); |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3404 | |
Chris Wilson | fbb30a5c | 2016-09-09 21:19:57 +0100 | [diff] [blame] | 3405 | ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */ |
| 3406 | |
| 3407 | /* clflush objects bound into the GGTT and rebind them. */ |
| 3408 | list_for_each_entry_safe(obj, on, |
Joonas Lahtinen | 56cea32 | 2016-11-02 12:16:04 +0200 | [diff] [blame] | 3409 | &dev_priv->mm.bound_list, global_link) { |
Chris Wilson | fbb30a5c | 2016-09-09 21:19:57 +0100 | [diff] [blame] | 3410 | bool ggtt_bound = false; |
| 3411 | struct i915_vma *vma; |
| 3412 | |
Chris Wilson | 1c7f4bc | 2016-02-26 11:03:19 +0000 | [diff] [blame] | 3413 | list_for_each_entry(vma, &obj->vma_list, obj_link) { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 3414 | if (vma->vm != &ggtt->base) |
Tvrtko Ursulin | 2c3d998 | 2015-07-06 15:15:01 +0100 | [diff] [blame] | 3415 | continue; |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3416 | |
Chris Wilson | fbb30a5c | 2016-09-09 21:19:57 +0100 | [diff] [blame] | 3417 | if (!i915_vma_unbind(vma)) |
| 3418 | continue; |
| 3419 | |
Tvrtko Ursulin | 2c3d998 | 2015-07-06 15:15:01 +0100 | [diff] [blame] | 3420 | WARN_ON(i915_vma_bind(vma, obj->cache_level, |
| 3421 | PIN_UPDATE)); |
Chris Wilson | fbb30a5c | 2016-09-09 21:19:57 +0100 | [diff] [blame] | 3422 | ggtt_bound = true; |
Tvrtko Ursulin | 2c3d998 | 2015-07-06 15:15:01 +0100 | [diff] [blame] | 3423 | } |
| 3424 | |
Chris Wilson | fbb30a5c | 2016-09-09 21:19:57 +0100 | [diff] [blame] | 3425 | if (ggtt_bound) |
Chris Wilson | 975f7ff | 2016-05-14 07:26:34 +0100 | [diff] [blame] | 3426 | WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3427 | } |
| 3428 | |
Chris Wilson | fbb30a5c | 2016-09-09 21:19:57 +0100 | [diff] [blame] | 3429 | ggtt->base.closed = false; |
| 3430 | |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 3431 | if (INTEL_GEN(dev_priv) >= 8) { |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3432 | struct intel_ppat *ppat = &dev_priv->ppat; |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3433 | |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 3434 | bitmap_set(ppat->dirty, 0, ppat->max_entries); |
| 3435 | dev_priv->ppat.update_hw(dev_priv); |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3436 | return; |
| 3437 | } |
| 3438 | |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 3439 | if (USES_PPGTT(dev_priv)) { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 3440 | struct i915_address_space *vm; |
| 3441 | |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3442 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
Joonas Lahtinen | e5716f5 | 2016-04-07 11:08:03 +0300 | [diff] [blame] | 3443 | struct i915_hw_ppgtt *ppgtt; |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3444 | |
Chris Wilson | 2bfa996 | 2016-08-04 07:52:25 +0100 | [diff] [blame] | 3445 | if (i915_is_ggtt(vm)) |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3446 | ppgtt = dev_priv->mm.aliasing_ppgtt; |
Joonas Lahtinen | e5716f5 | 2016-04-07 11:08:03 +0300 | [diff] [blame] | 3447 | else |
| 3448 | ppgtt = i915_vm_to_ppgtt(vm); |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3449 | |
Chris Wilson | 16a011c | 2017-02-15 08:43:45 +0000 | [diff] [blame] | 3450 | gen6_write_page_range(ppgtt, 0, ppgtt->base.total); |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3451 | } |
| 3452 | } |
| 3453 | |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 3454 | i915_ggtt_invalidate(dev_priv); |
Daniel Vetter | fa42331 | 2015-04-14 17:35:23 +0200 | [diff] [blame] | 3455 | } |
| 3456 | |
Tvrtko Ursulin | 804beb4 | 2015-09-21 10:45:33 +0100 | [diff] [blame] | 3457 | static struct scatterlist * |
Ville Syrjälä | 2d7f3bd | 2016-01-14 15:22:11 +0200 | [diff] [blame] | 3458 | rotate_pages(const dma_addr_t *in, unsigned int offset, |
Tvrtko Ursulin | 804beb4 | 2015-09-21 10:45:33 +0100 | [diff] [blame] | 3459 | unsigned int width, unsigned int height, |
Ville Syrjälä | 8713025 | 2016-01-20 21:05:23 +0200 | [diff] [blame] | 3460 | unsigned int stride, |
Tvrtko Ursulin | 804beb4 | 2015-09-21 10:45:33 +0100 | [diff] [blame] | 3461 | struct sg_table *st, struct scatterlist *sg) |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 3462 | { |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3463 | unsigned int column, row; |
| 3464 | unsigned int src_idx; |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3465 | |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3466 | for (column = 0; column < width; column++) { |
Ville Syrjälä | 8713025 | 2016-01-20 21:05:23 +0200 | [diff] [blame] | 3467 | src_idx = stride * (height - 1) + column; |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3468 | for (row = 0; row < height; row++) { |
| 3469 | st->nents++; |
| 3470 | /* We don't need the pages, but need to initialize |
| 3471 | * the entries so the sg list can be happily traversed. |
| 3472 | * The only thing we need are DMA addresses. |
| 3473 | */ |
| 3474 | sg_set_page(sg, NULL, PAGE_SIZE, 0); |
Tvrtko Ursulin | 804beb4 | 2015-09-21 10:45:33 +0100 | [diff] [blame] | 3475 | sg_dma_address(sg) = in[offset + src_idx]; |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3476 | sg_dma_len(sg) = PAGE_SIZE; |
| 3477 | sg = sg_next(sg); |
Ville Syrjälä | 8713025 | 2016-01-20 21:05:23 +0200 | [diff] [blame] | 3478 | src_idx -= stride; |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3479 | } |
| 3480 | } |
Tvrtko Ursulin | 804beb4 | 2015-09-21 10:45:33 +0100 | [diff] [blame] | 3481 | |
| 3482 | return sg; |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3483 | } |
| 3484 | |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 3485 | static noinline struct sg_table * |
| 3486 | intel_rotate_pages(struct intel_rotation_info *rot_info, |
| 3487 | struct drm_i915_gem_object *obj) |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3488 | { |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 3489 | const unsigned long n_pages = obj->base.size / PAGE_SIZE; |
Ville Syrjälä | 6687c90 | 2015-09-15 13:16:41 +0300 | [diff] [blame] | 3490 | unsigned int size = intel_rotation_info_size(rot_info); |
Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 3491 | struct sgt_iter sgt_iter; |
| 3492 | dma_addr_t dma_addr; |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3493 | unsigned long i; |
| 3494 | dma_addr_t *page_addr_list; |
| 3495 | struct sg_table *st; |
Tvrtko Ursulin | 89e3e14 | 2015-09-21 10:45:34 +0100 | [diff] [blame] | 3496 | struct scatterlist *sg; |
Tvrtko Ursulin | 1d00dad | 2015-03-25 10:15:26 +0000 | [diff] [blame] | 3497 | int ret = -ENOMEM; |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3498 | |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3499 | /* Allocate a temporary list of source pages for random access. */ |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 3500 | page_addr_list = kvmalloc_array(n_pages, |
Chris Wilson | f2a85e1 | 2016-04-08 12:11:13 +0100 | [diff] [blame] | 3501 | sizeof(dma_addr_t), |
| 3502 | GFP_TEMPORARY); |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3503 | if (!page_addr_list) |
| 3504 | return ERR_PTR(ret); |
| 3505 | |
| 3506 | /* Allocate target SG list. */ |
| 3507 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
| 3508 | if (!st) |
| 3509 | goto err_st_alloc; |
| 3510 | |
Ville Syrjälä | 6687c90 | 2015-09-15 13:16:41 +0300 | [diff] [blame] | 3511 | ret = sg_alloc_table(st, size, GFP_KERNEL); |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3512 | if (ret) |
| 3513 | goto err_sg_alloc; |
| 3514 | |
| 3515 | /* Populate source page list from the object. */ |
| 3516 | i = 0; |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3517 | for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages) |
Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 3518 | page_addr_list[i++] = dma_addr; |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3519 | |
Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 3520 | GEM_BUG_ON(i != n_pages); |
Ville Syrjälä | 11f2032 | 2016-02-15 22:54:46 +0200 | [diff] [blame] | 3521 | st->nents = 0; |
| 3522 | sg = st->sgl; |
| 3523 | |
Ville Syrjälä | 6687c90 | 2015-09-15 13:16:41 +0300 | [diff] [blame] | 3524 | for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { |
| 3525 | sg = rotate_pages(page_addr_list, rot_info->plane[i].offset, |
| 3526 | rot_info->plane[i].width, rot_info->plane[i].height, |
| 3527 | rot_info->plane[i].stride, st, sg); |
Tvrtko Ursulin | 89e3e14 | 2015-09-21 10:45:34 +0100 | [diff] [blame] | 3528 | } |
| 3529 | |
Ville Syrjälä | 6687c90 | 2015-09-15 13:16:41 +0300 | [diff] [blame] | 3530 | DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n", |
| 3531 | obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3532 | |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 3533 | kvfree(page_addr_list); |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3534 | |
| 3535 | return st; |
| 3536 | |
| 3537 | err_sg_alloc: |
| 3538 | kfree(st); |
| 3539 | err_st_alloc: |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 3540 | kvfree(page_addr_list); |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3541 | |
Ville Syrjälä | 6687c90 | 2015-09-15 13:16:41 +0300 | [diff] [blame] | 3542 | DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", |
| 3543 | obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); |
| 3544 | |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3545 | return ERR_PTR(ret); |
| 3546 | } |
| 3547 | |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 3548 | static noinline struct sg_table * |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 3549 | intel_partial_pages(const struct i915_ggtt_view *view, |
| 3550 | struct drm_i915_gem_object *obj) |
| 3551 | { |
| 3552 | struct sg_table *st; |
Chris Wilson | d2a84a7 | 2016-10-28 13:58:34 +0100 | [diff] [blame] | 3553 | struct scatterlist *sg, *iter; |
Chris Wilson | 8bab1193 | 2017-01-14 00:28:25 +0000 | [diff] [blame] | 3554 | unsigned int count = view->partial.size; |
Chris Wilson | d2a84a7 | 2016-10-28 13:58:34 +0100 | [diff] [blame] | 3555 | unsigned int offset; |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 3556 | int ret = -ENOMEM; |
| 3557 | |
| 3558 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
| 3559 | if (!st) |
| 3560 | goto err_st_alloc; |
| 3561 | |
Chris Wilson | d2a84a7 | 2016-10-28 13:58:34 +0100 | [diff] [blame] | 3562 | ret = sg_alloc_table(st, count, GFP_KERNEL); |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 3563 | if (ret) |
| 3564 | goto err_sg_alloc; |
| 3565 | |
Chris Wilson | 8bab1193 | 2017-01-14 00:28:25 +0000 | [diff] [blame] | 3566 | iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); |
Chris Wilson | d2a84a7 | 2016-10-28 13:58:34 +0100 | [diff] [blame] | 3567 | GEM_BUG_ON(!iter); |
| 3568 | |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 3569 | sg = st->sgl; |
| 3570 | st->nents = 0; |
Chris Wilson | d2a84a7 | 2016-10-28 13:58:34 +0100 | [diff] [blame] | 3571 | do { |
| 3572 | unsigned int len; |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 3573 | |
Chris Wilson | d2a84a7 | 2016-10-28 13:58:34 +0100 | [diff] [blame] | 3574 | len = min(iter->length - (offset << PAGE_SHIFT), |
| 3575 | count << PAGE_SHIFT); |
| 3576 | sg_set_page(sg, NULL, len, 0); |
| 3577 | sg_dma_address(sg) = |
| 3578 | sg_dma_address(iter) + (offset << PAGE_SHIFT); |
| 3579 | sg_dma_len(sg) = len; |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 3580 | |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 3581 | st->nents++; |
Chris Wilson | d2a84a7 | 2016-10-28 13:58:34 +0100 | [diff] [blame] | 3582 | count -= len >> PAGE_SHIFT; |
| 3583 | if (count == 0) { |
| 3584 | sg_mark_end(sg); |
| 3585 | return st; |
| 3586 | } |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 3587 | |
Chris Wilson | d2a84a7 | 2016-10-28 13:58:34 +0100 | [diff] [blame] | 3588 | sg = __sg_next(sg); |
| 3589 | iter = __sg_next(iter); |
| 3590 | offset = 0; |
| 3591 | } while (1); |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 3592 | |
| 3593 | err_sg_alloc: |
| 3594 | kfree(st); |
| 3595 | err_st_alloc: |
| 3596 | return ERR_PTR(ret); |
| 3597 | } |
| 3598 | |
Daniel Vetter | 70b9f6f | 2015-04-14 17:35:27 +0200 | [diff] [blame] | 3599 | static int |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3600 | i915_get_ggtt_vma_pages(struct i915_vma *vma) |
| 3601 | { |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 3602 | int ret; |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3603 | |
Chris Wilson | 2c3a3f4 | 2016-11-04 10:30:01 +0000 | [diff] [blame] | 3604 | /* The vma->pages are only valid within the lifespan of the borrowed |
| 3605 | * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so |
| 3606 | * must be the vma->pages. A simple rule is that vma->pages must only |
| 3607 | * be accessed when the obj->mm.pages are pinned. |
| 3608 | */ |
| 3609 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); |
| 3610 | |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 3611 | switch (vma->ggtt_view.type) { |
| 3612 | case I915_GGTT_VIEW_NORMAL: |
| 3613 | vma->pages = vma->obj->mm.pages; |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 3614 | return 0; |
| 3615 | |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 3616 | case I915_GGTT_VIEW_ROTATED: |
Chris Wilson | 247177d | 2016-08-15 10:48:47 +0100 | [diff] [blame] | 3617 | vma->pages = |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 3618 | intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); |
| 3619 | break; |
| 3620 | |
| 3621 | case I915_GGTT_VIEW_PARTIAL: |
Chris Wilson | 247177d | 2016-08-15 10:48:47 +0100 | [diff] [blame] | 3622 | vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 3623 | break; |
| 3624 | |
| 3625 | default: |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 3626 | WARN_ONCE(1, "GGTT view %u not implemented!\n", |
| 3627 | vma->ggtt_view.type); |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 3628 | return -EINVAL; |
| 3629 | } |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 3630 | |
Chris Wilson | ba7a574 | 2017-02-15 08:43:35 +0000 | [diff] [blame] | 3631 | ret = 0; |
| 3632 | if (unlikely(IS_ERR(vma->pages))) { |
Chris Wilson | 247177d | 2016-08-15 10:48:47 +0100 | [diff] [blame] | 3633 | ret = PTR_ERR(vma->pages); |
| 3634 | vma->pages = NULL; |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3635 | DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", |
| 3636 | vma->ggtt_view.type, ret); |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 3637 | } |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 3638 | return ret; |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 3639 | } |
| 3640 | |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 3641 | /** |
Chris Wilson | 625d988 | 2017-01-11 11:23:11 +0000 | [diff] [blame] | 3642 | * i915_gem_gtt_reserve - reserve a node in an address_space (GTT) |
Chris Wilson | a4dbf7c | 2017-01-12 16:45:59 +0000 | [diff] [blame] | 3643 | * @vm: the &struct i915_address_space |
| 3644 | * @node: the &struct drm_mm_node (typically i915_vma.mode) |
| 3645 | * @size: how much space to allocate inside the GTT, |
| 3646 | * must be #I915_GTT_PAGE_SIZE aligned |
| 3647 | * @offset: where to insert inside the GTT, |
| 3648 | * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node |
| 3649 | * (@offset + @size) must fit within the address space |
| 3650 | * @color: color to apply to node, if this node is not from a VMA, |
| 3651 | * color must be #I915_COLOR_UNEVICTABLE |
| 3652 | * @flags: control search and eviction behaviour |
Chris Wilson | 625d988 | 2017-01-11 11:23:11 +0000 | [diff] [blame] | 3653 | * |
| 3654 | * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside |
| 3655 | * the address space (using @size and @color). If the @node does not fit, it |
| 3656 | * tries to evict any overlapping nodes from the GTT, including any |
| 3657 | * neighbouring nodes if the colors do not match (to ensure guard pages between |
| 3658 | * differing domains). See i915_gem_evict_for_node() for the gory details |
| 3659 | * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on |
| 3660 | * evicting active overlapping objects, and any overlapping node that is pinned |
| 3661 | * or marked as unevictable will also result in failure. |
| 3662 | * |
| 3663 | * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if |
| 3664 | * asked to wait for eviction and interrupted. |
| 3665 | */ |
| 3666 | int i915_gem_gtt_reserve(struct i915_address_space *vm, |
| 3667 | struct drm_mm_node *node, |
| 3668 | u64 size, u64 offset, unsigned long color, |
| 3669 | unsigned int flags) |
| 3670 | { |
| 3671 | int err; |
| 3672 | |
| 3673 | GEM_BUG_ON(!size); |
| 3674 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); |
| 3675 | GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT)); |
| 3676 | GEM_BUG_ON(range_overflows(offset, size, vm->total)); |
Chris Wilson | 3fec7ec | 2017-01-15 13:47:46 +0000 | [diff] [blame] | 3677 | GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base); |
Chris Wilson | 9734ad1 | 2017-01-15 17:27:40 +0000 | [diff] [blame] | 3678 | GEM_BUG_ON(drm_mm_node_allocated(node)); |
Chris Wilson | 625d988 | 2017-01-11 11:23:11 +0000 | [diff] [blame] | 3679 | |
| 3680 | node->size = size; |
| 3681 | node->start = offset; |
| 3682 | node->color = color; |
| 3683 | |
| 3684 | err = drm_mm_reserve_node(&vm->mm, node); |
| 3685 | if (err != -ENOSPC) |
| 3686 | return err; |
| 3687 | |
Chris Wilson | 616d9ce | 2017-06-16 15:05:21 +0100 | [diff] [blame] | 3688 | if (flags & PIN_NOEVICT) |
| 3689 | return -ENOSPC; |
| 3690 | |
Chris Wilson | 625d988 | 2017-01-11 11:23:11 +0000 | [diff] [blame] | 3691 | err = i915_gem_evict_for_node(vm, node, flags); |
| 3692 | if (err == 0) |
| 3693 | err = drm_mm_reserve_node(&vm->mm, node); |
| 3694 | |
| 3695 | return err; |
| 3696 | } |
| 3697 | |
Chris Wilson | 606fec9 | 2017-01-11 11:23:12 +0000 | [diff] [blame] | 3698 | static u64 random_offset(u64 start, u64 end, u64 len, u64 align) |
| 3699 | { |
| 3700 | u64 range, addr; |
| 3701 | |
| 3702 | GEM_BUG_ON(range_overflows(start, len, end)); |
| 3703 | GEM_BUG_ON(round_up(start, align) > round_down(end - len, align)); |
| 3704 | |
| 3705 | range = round_down(end - len, align) - round_up(start, align); |
| 3706 | if (range) { |
| 3707 | if (sizeof(unsigned long) == sizeof(u64)) { |
| 3708 | addr = get_random_long(); |
| 3709 | } else { |
| 3710 | addr = get_random_int(); |
| 3711 | if (range > U32_MAX) { |
| 3712 | addr <<= 32; |
| 3713 | addr |= get_random_int(); |
| 3714 | } |
| 3715 | } |
| 3716 | div64_u64_rem(addr, range, &addr); |
| 3717 | start += addr; |
| 3718 | } |
| 3719 | |
| 3720 | return round_up(start, align); |
| 3721 | } |
| 3722 | |
Chris Wilson | 625d988 | 2017-01-11 11:23:11 +0000 | [diff] [blame] | 3723 | /** |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 3724 | * i915_gem_gtt_insert - insert a node into an address_space (GTT) |
Chris Wilson | a4dbf7c | 2017-01-12 16:45:59 +0000 | [diff] [blame] | 3725 | * @vm: the &struct i915_address_space |
| 3726 | * @node: the &struct drm_mm_node (typically i915_vma.node) |
| 3727 | * @size: how much space to allocate inside the GTT, |
| 3728 | * must be #I915_GTT_PAGE_SIZE aligned |
| 3729 | * @alignment: required alignment of starting offset, may be 0 but |
| 3730 | * if specified, this must be a power-of-two and at least |
| 3731 | * #I915_GTT_MIN_ALIGNMENT |
| 3732 | * @color: color to apply to node |
| 3733 | * @start: start of any range restriction inside GTT (0 for all), |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 3734 | * must be #I915_GTT_PAGE_SIZE aligned |
Chris Wilson | a4dbf7c | 2017-01-12 16:45:59 +0000 | [diff] [blame] | 3735 | * @end: end of any range restriction inside GTT (U64_MAX for all), |
| 3736 | * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX |
| 3737 | * @flags: control search and eviction behaviour |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 3738 | * |
| 3739 | * i915_gem_gtt_insert() first searches for an available hole into which |
| 3740 | * is can insert the node. The hole address is aligned to @alignment and |
| 3741 | * its @size must then fit entirely within the [@start, @end] bounds. The |
| 3742 | * nodes on either side of the hole must match @color, or else a guard page |
| 3743 | * will be inserted between the two nodes (or the node evicted). If no |
Chris Wilson | 606fec9 | 2017-01-11 11:23:12 +0000 | [diff] [blame] | 3744 | * suitable hole is found, first a victim is randomly selected and tested |
| 3745 | * for eviction, otherwise then the LRU list of objects within the GTT |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 3746 | * is scanned to find the first set of replacement nodes to create the hole. |
| 3747 | * Those old overlapping nodes are evicted from the GTT (and so must be |
| 3748 | * rebound before any future use). Any node that is currently pinned cannot |
| 3749 | * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently |
| 3750 | * active and #PIN_NONBLOCK is specified, that node is also skipped when |
| 3751 | * searching for an eviction candidate. See i915_gem_evict_something() for |
| 3752 | * the gory details on the eviction algorithm. |
| 3753 | * |
| 3754 | * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if |
| 3755 | * asked to wait for eviction and interrupted. |
| 3756 | */ |
| 3757 | int i915_gem_gtt_insert(struct i915_address_space *vm, |
| 3758 | struct drm_mm_node *node, |
| 3759 | u64 size, u64 alignment, unsigned long color, |
| 3760 | u64 start, u64 end, unsigned int flags) |
| 3761 | { |
Chris Wilson | 4e64e55 | 2017-02-02 21:04:38 +0000 | [diff] [blame] | 3762 | enum drm_mm_insert_mode mode; |
Chris Wilson | 606fec9 | 2017-01-11 11:23:12 +0000 | [diff] [blame] | 3763 | u64 offset; |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 3764 | int err; |
| 3765 | |
| 3766 | lockdep_assert_held(&vm->i915->drm.struct_mutex); |
| 3767 | GEM_BUG_ON(!size); |
| 3768 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); |
| 3769 | GEM_BUG_ON(alignment && !is_power_of_2(alignment)); |
| 3770 | GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); |
| 3771 | GEM_BUG_ON(start >= end); |
| 3772 | GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); |
| 3773 | GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); |
Chris Wilson | 3fec7ec | 2017-01-15 13:47:46 +0000 | [diff] [blame] | 3774 | GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base); |
Chris Wilson | 9734ad1 | 2017-01-15 17:27:40 +0000 | [diff] [blame] | 3775 | GEM_BUG_ON(drm_mm_node_allocated(node)); |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 3776 | |
| 3777 | if (unlikely(range_overflows(start, size, end))) |
| 3778 | return -ENOSPC; |
| 3779 | |
| 3780 | if (unlikely(round_up(start, alignment) > round_down(end - size, alignment))) |
| 3781 | return -ENOSPC; |
| 3782 | |
Chris Wilson | 4e64e55 | 2017-02-02 21:04:38 +0000 | [diff] [blame] | 3783 | mode = DRM_MM_INSERT_BEST; |
| 3784 | if (flags & PIN_HIGH) |
| 3785 | mode = DRM_MM_INSERT_HIGH; |
| 3786 | if (flags & PIN_MAPPABLE) |
| 3787 | mode = DRM_MM_INSERT_LOW; |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 3788 | |
| 3789 | /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, |
| 3790 | * so we know that we always have a minimum alignment of 4096. |
| 3791 | * The drm_mm range manager is optimised to return results |
| 3792 | * with zero alignment, so where possible use the optimal |
| 3793 | * path. |
| 3794 | */ |
| 3795 | BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE); |
| 3796 | if (alignment <= I915_GTT_MIN_ALIGNMENT) |
| 3797 | alignment = 0; |
| 3798 | |
Chris Wilson | 4e64e55 | 2017-02-02 21:04:38 +0000 | [diff] [blame] | 3799 | err = drm_mm_insert_node_in_range(&vm->mm, node, |
| 3800 | size, alignment, color, |
| 3801 | start, end, mode); |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 3802 | if (err != -ENOSPC) |
| 3803 | return err; |
| 3804 | |
Chris Wilson | 616d9ce | 2017-06-16 15:05:21 +0100 | [diff] [blame] | 3805 | if (flags & PIN_NOEVICT) |
| 3806 | return -ENOSPC; |
| 3807 | |
Chris Wilson | 606fec9 | 2017-01-11 11:23:12 +0000 | [diff] [blame] | 3808 | /* No free space, pick a slot at random. |
| 3809 | * |
| 3810 | * There is a pathological case here using a GTT shared between |
| 3811 | * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt): |
| 3812 | * |
| 3813 | * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->| |
| 3814 | * (64k objects) (448k objects) |
| 3815 | * |
| 3816 | * Now imagine that the eviction LRU is ordered top-down (just because |
| 3817 | * pathology meets real life), and that we need to evict an object to |
| 3818 | * make room inside the aperture. The eviction scan then has to walk |
| 3819 | * the 448k list before it finds one within range. And now imagine that |
| 3820 | * it has to search for a new hole between every byte inside the memcpy, |
| 3821 | * for several simultaneous clients. |
| 3822 | * |
| 3823 | * On a full-ppgtt system, if we have run out of available space, there |
| 3824 | * will be lots and lots of objects in the eviction list! Again, |
| 3825 | * searching that LRU list may be slow if we are also applying any |
| 3826 | * range restrictions (e.g. restriction to low 4GiB) and so, for |
| 3827 | * simplicity and similarilty between different GTT, try the single |
| 3828 | * random replacement first. |
| 3829 | */ |
| 3830 | offset = random_offset(start, end, |
| 3831 | size, alignment ?: I915_GTT_MIN_ALIGNMENT); |
| 3832 | err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags); |
| 3833 | if (err != -ENOSPC) |
| 3834 | return err; |
| 3835 | |
| 3836 | /* Randomly selected placement is pinned, do a search */ |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 3837 | err = i915_gem_evict_something(vm, size, alignment, color, |
| 3838 | start, end, flags); |
| 3839 | if (err) |
| 3840 | return err; |
| 3841 | |
Chris Wilson | 4e64e55 | 2017-02-02 21:04:38 +0000 | [diff] [blame] | 3842 | return drm_mm_insert_node_in_range(&vm->mm, node, |
| 3843 | size, alignment, color, |
| 3844 | start, end, DRM_MM_INSERT_EVICT); |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 3845 | } |
Chris Wilson | 3b5bb0a | 2017-02-13 17:15:18 +0000 | [diff] [blame] | 3846 | |
| 3847 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
| 3848 | #include "selftests/mock_gtt.c" |
Chris Wilson | 1c42819 | 2017-02-13 17:15:38 +0000 | [diff] [blame] | 3849 | #include "selftests/i915_gem_gtt.c" |
Chris Wilson | 3b5bb0a | 2017-02-13 17:15:18 +0000 | [diff] [blame] | 3850 | #endif |