blob: 10aa7762d9a63c440a845509c08acd05849bb28b [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
Ben Widawskyc4ac5242014-02-19 22:05:47 -08003 * Copyright © 2011-2014 Intel Corporation
Daniel Vetter76aaf222010-11-05 22:23:30 +01004 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
Chris Wilsonaae4a3d2017-02-13 17:15:44 +000026#include <linux/slab.h> /* fault-inject.h is not standalone! */
27
28#include <linux/fault-inject.h>
Chris Wilsone007b192017-01-11 11:23:10 +000029#include <linux/log2.h>
Chris Wilson606fec92017-01-11 11:23:12 +000030#include <linux/random.h>
Daniel Vetter0e46ce22014-01-08 16:10:27 +010031#include <linux/seq_file.h>
Chris Wilson5bab6f62015-10-23 18:43:32 +010032#include <linux/stop_machine.h>
Chris Wilsone007b192017-01-11 11:23:10 +000033
Laura Abbotted3ba072017-05-08 15:58:17 -070034#include <asm/set_memory.h>
35
David Howells760285e2012-10-02 18:01:07 +010036#include <drm/drmP.h>
37#include <drm/i915_drm.h>
Chris Wilsone007b192017-01-11 11:23:10 +000038
Daniel Vetter76aaf222010-11-05 22:23:30 +010039#include "i915_drv.h"
Yu Zhang5dda8fa2015-02-10 19:05:48 +080040#include "i915_vgpu.h"
Daniel Vetter76aaf222010-11-05 22:23:30 +010041#include "i915_trace.h"
42#include "intel_drv.h"
Chris Wilsond07f0e52016-10-28 13:58:44 +010043#include "intel_frontbuffer.h"
Daniel Vetter76aaf222010-11-05 22:23:30 +010044
Chris Wilsonbb8f9cf2016-08-22 08:44:31 +010045#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
46
Tvrtko Ursulin45f8f692014-12-10 17:27:59 +000047/**
48 * DOC: Global GTT views
49 *
50 * Background and previous state
51 *
52 * Historically objects could exists (be bound) in global GTT space only as
53 * singular instances with a view representing all of the object's backing pages
54 * in a linear fashion. This view will be called a normal view.
55 *
56 * To support multiple views of the same object, where the number of mapped
57 * pages is not equal to the backing store, or where the layout of the pages
58 * is not linear, concept of a GGTT view was added.
59 *
60 * One example of an alternative view is a stereo display driven by a single
61 * image. In this case we would have a framebuffer looking like this
62 * (2x2 pages):
63 *
64 * 12
65 * 34
66 *
67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68 * rendering. In contrast, fed to the display engine would be an alternative
69 * view which could look something like this:
70 *
71 * 1212
72 * 3434
73 *
74 * In this example both the size and layout of pages in the alternative view is
75 * different from the normal view.
76 *
77 * Implementation and usage
78 *
79 * GGTT views are implemented using VMAs and are distinguished via enum
80 * i915_ggtt_view_type and struct i915_ggtt_view.
81 *
82 * A new flavour of core GEM functions which work with GGTT bound objects were
Joonas Lahtinenec7adb62015-03-16 14:11:13 +020083 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84 * renaming in large amounts of code. They take the struct i915_ggtt_view
85 * parameter encapsulating all metadata required to implement a view.
Tvrtko Ursulin45f8f692014-12-10 17:27:59 +000086 *
87 * As a helper for callers which are only interested in the normal view,
88 * globally const i915_ggtt_view_normal singleton instance exists. All old core
89 * GEM API functions, the ones not taking the view parameter, are operating on,
90 * or with the normal GGTT view.
91 *
92 * Code wanting to add or use a new GGTT view needs to:
93 *
94 * 1. Add a new enum with a suitable name.
95 * 2. Extend the metadata in the i915_ggtt_view structure if required.
96 * 3. Add support to i915_get_vma_pages().
97 *
98 * New views are required to build a scatter-gather table from within the
99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100 * exists for the lifetime of an VMA.
101 *
102 * Core API is designed to have copy semantics which means that passed in
103 * struct i915_ggtt_view does not need to be persistent (left around after
104 * calling the core API functions).
105 *
106 */
107
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200108static int
109i915_get_ggtt_vma_pages(struct i915_vma *vma);
110
Chris Wilson7c3f86b2017-01-12 11:00:49 +0000111static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
112{
113 /* Note that as an uncached mmio write, this should flush the
114 * WCB of the writes into the GGTT before it triggers the invalidate.
115 */
116 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
117}
118
119static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
120{
121 gen6_ggtt_invalidate(dev_priv);
122 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
123}
124
125static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
126{
127 intel_gtt_chipset_flush();
128}
129
130static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
131{
132 i915->ggtt.invalidate(i915);
133}
134
Chris Wilsonc0336662016-05-06 15:40:21 +0100135int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
136 int enable_ppgtt)
Daniel Vettercfa7c862014-04-29 11:53:58 +0200137{
Chris Wilson1893a712014-09-19 11:56:27 +0100138 bool has_aliasing_ppgtt;
139 bool has_full_ppgtt;
Michel Thierry1f9a99e2015-09-30 15:36:19 +0100140 bool has_full_48bit_ppgtt;
Chris Wilson1893a712014-09-19 11:56:27 +0100141
Michel Thierry9e1d0e62016-12-05 17:57:03 -0800142 has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
143 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
144 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
Chris Wilson1893a712014-09-19 11:56:27 +0100145
Zhi Wange320d402016-09-06 12:04:12 +0800146 if (intel_vgpu_active(dev_priv)) {
147 /* emulation is too hard */
148 has_full_ppgtt = false;
149 has_full_48bit_ppgtt = false;
150 }
Yu Zhang71ba2d62015-02-10 19:05:54 +0800151
Chris Wilson0e4ca102016-04-29 13:18:22 +0100152 if (!has_aliasing_ppgtt)
153 return 0;
154
Damien Lespiau70ee45e2014-11-14 15:05:59 +0000155 /*
156 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
157 * execlists, the sole mechanism available to submit work.
158 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100159 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
Daniel Vettercfa7c862014-04-29 11:53:58 +0200160 return 0;
161
162 if (enable_ppgtt == 1)
163 return 1;
164
Chris Wilson1893a712014-09-19 11:56:27 +0100165 if (enable_ppgtt == 2 && has_full_ppgtt)
Daniel Vettercfa7c862014-04-29 11:53:58 +0200166 return 2;
167
Michel Thierry1f9a99e2015-09-30 15:36:19 +0100168 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
169 return 3;
170
Daniel Vetter93a25a92014-03-06 09:40:43 +0100171 /* Disable ppgtt on SNB if VT-d is on. */
Chris Wilson80debff2017-05-25 13:16:12 +0100172 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
Daniel Vetter93a25a92014-03-06 09:40:43 +0100173 DRM_INFO("Disabling PPGTT because VT-d is on\n");
Daniel Vettercfa7c862014-04-29 11:53:58 +0200174 return 0;
Daniel Vetter93a25a92014-03-06 09:40:43 +0100175 }
Daniel Vetter93a25a92014-03-06 09:40:43 +0100176
Jesse Barnes62942ed2014-06-13 09:28:33 -0700177 /* Early VLV doesn't have this */
Chris Wilson91c8a322016-07-05 10:40:23 +0100178 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
Jesse Barnes62942ed2014-06-13 09:28:33 -0700179 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
180 return 0;
181 }
182
Zhi Wange320d402016-09-06 12:04:12 +0800183 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
Michel Thierry1f9a99e2015-09-30 15:36:19 +0100184 return has_full_48bit_ppgtt ? 3 : 2;
Michel Thierry2f82bbd2014-12-15 14:58:00 +0000185 else
186 return has_aliasing_ppgtt ? 1 : 0;
Daniel Vetter93a25a92014-03-06 09:40:43 +0100187}
188
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200189static int ppgtt_bind_vma(struct i915_vma *vma,
190 enum i915_cache_level cache_level,
191 u32 unused)
Daniel Vetter47552652015-04-14 17:35:24 +0200192{
Chris Wilsonff685972017-02-15 08:43:42 +0000193 u32 pte_flags;
194 int ret;
195
Matthew Auld1f234752017-05-12 10:14:23 +0100196 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
197 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
198 vma->size);
199 if (ret)
200 return ret;
201 }
Daniel Vetter47552652015-04-14 17:35:24 +0200202
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100203 vma->pages = vma->obj->mm.pages;
Chris Wilson247177d2016-08-15 10:48:47 +0100204
Daniel Vetter47552652015-04-14 17:35:24 +0200205 /* Currently applicable only to VLV */
Chris Wilsonff685972017-02-15 08:43:42 +0000206 pte_flags = 0;
Daniel Vetter47552652015-04-14 17:35:24 +0200207 if (vma->obj->gt_ro)
208 pte_flags |= PTE_READ_ONLY;
209
Matthew Auld4a234c52017-06-22 10:58:36 +0100210 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200211
212 return 0;
Daniel Vetter47552652015-04-14 17:35:24 +0200213}
214
215static void ppgtt_unbind_vma(struct i915_vma *vma)
216{
Chris Wilsonff685972017-02-15 08:43:42 +0000217 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
Daniel Vetter47552652015-04-14 17:35:24 +0200218}
Ben Widawsky6f65e292013-12-06 14:10:56 -0800219
Daniel Vetter2c642b02015-04-14 17:35:26 +0200220static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200221 enum i915_cache_level level)
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700222{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200223 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700224 pte |= addr;
Ben Widawsky63c42e52014-04-18 18:04:27 -0300225
226 switch (level) {
227 case I915_CACHE_NONE:
Ben Widawskyfbe5d362013-11-04 19:56:49 -0800228 pte |= PPAT_UNCACHED_INDEX;
Ben Widawsky63c42e52014-04-18 18:04:27 -0300229 break;
230 case I915_CACHE_WT:
231 pte |= PPAT_DISPLAY_ELLC_INDEX;
232 break;
233 default:
234 pte |= PPAT_CACHED_INDEX;
235 break;
236 }
237
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700238 return pte;
239}
240
Mika Kuoppalafe36f552015-06-25 18:35:16 +0300241static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
242 const enum i915_cache_level level)
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800243{
Michel Thierry07749ef2015-03-16 16:00:54 +0000244 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800245 pde |= addr;
246 if (level != I915_CACHE_NONE)
247 pde |= PPAT_CACHED_PDE_INDEX;
248 else
249 pde |= PPAT_UNCACHED_INDEX;
250 return pde;
251}
252
Michel Thierry762d9932015-07-30 11:05:29 +0100253#define gen8_pdpe_encode gen8_pde_encode
254#define gen8_pml4e_encode gen8_pde_encode
255
Michel Thierry07749ef2015-03-16 16:00:54 +0000256static gen6_pte_t snb_pte_encode(dma_addr_t addr,
257 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200258 u32 unused)
Ben Widawsky54d12522012-09-24 16:44:32 -0700259{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200260 gen6_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky54d12522012-09-24 16:44:32 -0700261 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -0700262
263 switch (level) {
Chris Wilson350ec882013-08-06 13:17:02 +0100264 case I915_CACHE_L3_LLC:
265 case I915_CACHE_LLC:
266 pte |= GEN6_PTE_CACHE_LLC;
267 break;
268 case I915_CACHE_NONE:
269 pte |= GEN6_PTE_UNCACHED;
270 break;
271 default:
Daniel Vetter5f77eeb2014-12-08 16:40:10 +0100272 MISSING_CASE(level);
Chris Wilson350ec882013-08-06 13:17:02 +0100273 }
274
275 return pte;
276}
277
Michel Thierry07749ef2015-03-16 16:00:54 +0000278static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
279 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200280 u32 unused)
Chris Wilson350ec882013-08-06 13:17:02 +0100281{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200282 gen6_pte_t pte = GEN6_PTE_VALID;
Chris Wilson350ec882013-08-06 13:17:02 +0100283 pte |= GEN6_PTE_ADDR_ENCODE(addr);
284
285 switch (level) {
286 case I915_CACHE_L3_LLC:
287 pte |= GEN7_PTE_CACHE_L3_LLC;
Ben Widawskye7210c32012-10-19 09:33:22 -0700288 break;
289 case I915_CACHE_LLC:
290 pte |= GEN6_PTE_CACHE_LLC;
291 break;
292 case I915_CACHE_NONE:
Kenneth Graunke91197082013-04-22 00:53:51 -0700293 pte |= GEN6_PTE_UNCACHED;
Ben Widawskye7210c32012-10-19 09:33:22 -0700294 break;
295 default:
Daniel Vetter5f77eeb2014-12-08 16:40:10 +0100296 MISSING_CASE(level);
Ben Widawskye7210c32012-10-19 09:33:22 -0700297 }
298
Ben Widawsky54d12522012-09-24 16:44:32 -0700299 return pte;
300}
301
Michel Thierry07749ef2015-03-16 16:00:54 +0000302static gen6_pte_t byt_pte_encode(dma_addr_t addr,
303 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200304 u32 flags)
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700305{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200306 gen6_pte_t pte = GEN6_PTE_VALID;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700307 pte |= GEN6_PTE_ADDR_ENCODE(addr);
308
Akash Goel24f3a8c2014-06-17 10:59:42 +0530309 if (!(flags & PTE_READ_ONLY))
310 pte |= BYT_PTE_WRITEABLE;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700311
312 if (level != I915_CACHE_NONE)
313 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
314
315 return pte;
316}
317
Michel Thierry07749ef2015-03-16 16:00:54 +0000318static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
319 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200320 u32 unused)
Kenneth Graunke91197082013-04-22 00:53:51 -0700321{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200322 gen6_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky0d8ff152013-07-04 11:02:03 -0700323 pte |= HSW_PTE_ADDR_ENCODE(addr);
Kenneth Graunke91197082013-04-22 00:53:51 -0700324
325 if (level != I915_CACHE_NONE)
Ben Widawsky87a6b682013-08-04 23:47:29 -0700326 pte |= HSW_WB_LLC_AGE3;
Kenneth Graunke91197082013-04-22 00:53:51 -0700327
328 return pte;
329}
330
Michel Thierry07749ef2015-03-16 16:00:54 +0000331static gen6_pte_t iris_pte_encode(dma_addr_t addr,
332 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200333 u32 unused)
Ben Widawsky4d15c142013-07-04 11:02:06 -0700334{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200335 gen6_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky4d15c142013-07-04 11:02:06 -0700336 pte |= HSW_PTE_ADDR_ENCODE(addr);
337
Chris Wilson651d7942013-08-08 14:41:10 +0100338 switch (level) {
339 case I915_CACHE_NONE:
340 break;
341 case I915_CACHE_WT:
Chris Wilsonc51e9702013-11-22 10:37:53 +0000342 pte |= HSW_WT_ELLC_LLC_AGE3;
Chris Wilson651d7942013-08-08 14:41:10 +0100343 break;
344 default:
Chris Wilsonc51e9702013-11-22 10:37:53 +0000345 pte |= HSW_WB_ELLC_LLC_AGE3;
Chris Wilson651d7942013-08-08 14:41:10 +0100346 break;
347 }
Ben Widawsky4d15c142013-07-04 11:02:06 -0700348
349 return pte;
350}
351
Chris Wilson84486612017-02-15 08:43:40 +0000352static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
Ben Widawsky678d96f2015-03-16 16:00:56 +0000353{
Chris Wilson84486612017-02-15 08:43:40 +0000354 struct page *page;
Ben Widawsky678d96f2015-03-16 16:00:56 +0000355
Chris Wilson84486612017-02-15 08:43:40 +0000356 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
357 i915_gem_shrink_all(vm->i915);
Chris Wilsonaae4a3d2017-02-13 17:15:44 +0000358
Chris Wilson84486612017-02-15 08:43:40 +0000359 if (vm->free_pages.nr)
360 return vm->free_pages.pages[--vm->free_pages.nr];
361
362 page = alloc_page(gfp);
363 if (!page)
364 return NULL;
365
366 if (vm->pt_kmap_wc)
367 set_pages_array_wc(&page, 1);
368
369 return page;
370}
371
372static void vm_free_pages_release(struct i915_address_space *vm)
373{
374 GEM_BUG_ON(!pagevec_count(&vm->free_pages));
375
376 if (vm->pt_kmap_wc)
377 set_pages_array_wb(vm->free_pages.pages,
378 pagevec_count(&vm->free_pages));
379
380 __pagevec_release(&vm->free_pages);
381}
382
383static void vm_free_page(struct i915_address_space *vm, struct page *page)
384{
385 if (!pagevec_add(&vm->free_pages, page))
386 vm_free_pages_release(vm);
387}
388
389static int __setup_page_dma(struct i915_address_space *vm,
390 struct i915_page_dma *p,
391 gfp_t gfp)
392{
393 p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
394 if (unlikely(!p->page))
Michel Thierry1266cdb2015-03-24 17:06:33 +0000395 return -ENOMEM;
396
Chris Wilson84486612017-02-15 08:43:40 +0000397 p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
398 PCI_DMA_BIDIRECTIONAL);
399 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
400 vm_free_page(vm, p->page);
401 return -ENOMEM;
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300402 }
403
Michel Thierry1266cdb2015-03-24 17:06:33 +0000404 return 0;
Ben Widawsky678d96f2015-03-16 16:00:56 +0000405}
406
Chris Wilson84486612017-02-15 08:43:40 +0000407static int setup_page_dma(struct i915_address_space *vm,
Tvrtko Ursulin275a9912016-11-16 08:55:34 +0000408 struct i915_page_dma *p)
Mika Kuoppalac114f762015-06-25 18:35:13 +0300409{
Chris Wilson84486612017-02-15 08:43:40 +0000410 return __setup_page_dma(vm, p, I915_GFP_DMA);
Mika Kuoppalac114f762015-06-25 18:35:13 +0300411}
412
Chris Wilson84486612017-02-15 08:43:40 +0000413static void cleanup_page_dma(struct i915_address_space *vm,
Tvrtko Ursulin275a9912016-11-16 08:55:34 +0000414 struct i915_page_dma *p)
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300415{
Chris Wilson84486612017-02-15 08:43:40 +0000416 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
417 vm_free_page(vm, p->page);
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300418}
419
Chris Wilson9231da72017-02-15 08:43:41 +0000420#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300421
Chris Wilson84486612017-02-15 08:43:40 +0000422#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
423#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
424#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
425#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
Mika Kuoppala567047b2015-06-25 18:35:12 +0300426
Chris Wilson84486612017-02-15 08:43:40 +0000427static void fill_page_dma(struct i915_address_space *vm,
428 struct i915_page_dma *p,
429 const u64 val)
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300430{
Chris Wilson9231da72017-02-15 08:43:41 +0000431 u64 * const vaddr = kmap_atomic(p->page);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300432 int i;
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300433
434 for (i = 0; i < 512; i++)
435 vaddr[i] = val;
436
Chris Wilson9231da72017-02-15 08:43:41 +0000437 kunmap_atomic(vaddr);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300438}
439
Chris Wilson84486612017-02-15 08:43:40 +0000440static void fill_page_dma_32(struct i915_address_space *vm,
441 struct i915_page_dma *p,
442 const u32 v)
Mika Kuoppala73eeea52015-06-25 18:35:10 +0300443{
Chris Wilson84486612017-02-15 08:43:40 +0000444 fill_page_dma(vm, p, (u64)v << 32 | v);
Mika Kuoppala73eeea52015-06-25 18:35:10 +0300445}
446
Chris Wilson8bcdd0f72016-08-22 08:44:30 +0100447static int
Chris Wilson84486612017-02-15 08:43:40 +0000448setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300449{
Chris Wilson84486612017-02-15 08:43:40 +0000450 return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300451}
452
Chris Wilson84486612017-02-15 08:43:40 +0000453static void cleanup_scratch_page(struct i915_address_space *vm)
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300454{
Chris Wilson84486612017-02-15 08:43:40 +0000455 cleanup_page_dma(vm, &vm->scratch_page);
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300456}
457
Chris Wilson84486612017-02-15 08:43:40 +0000458static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
Ben Widawsky06fda602015-02-24 16:22:36 +0000459{
Michel Thierryec565b32015-04-08 12:13:23 +0100460 struct i915_page_table *pt;
Ben Widawsky06fda602015-02-24 16:22:36 +0000461
Chris Wilsondd196742017-02-15 08:43:46 +0000462 pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
463 if (unlikely(!pt))
Ben Widawsky06fda602015-02-24 16:22:36 +0000464 return ERR_PTR(-ENOMEM);
465
Chris Wilsondd196742017-02-15 08:43:46 +0000466 if (unlikely(setup_px(vm, pt))) {
467 kfree(pt);
468 return ERR_PTR(-ENOMEM);
469 }
Ben Widawsky678d96f2015-03-16 16:00:56 +0000470
Chris Wilsondd196742017-02-15 08:43:46 +0000471 pt->used_ptes = 0;
Ben Widawsky06fda602015-02-24 16:22:36 +0000472 return pt;
473}
474
Chris Wilson84486612017-02-15 08:43:40 +0000475static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
Ben Widawsky06fda602015-02-24 16:22:36 +0000476{
Chris Wilson84486612017-02-15 08:43:40 +0000477 cleanup_px(vm, pt);
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300478 kfree(pt);
479}
480
481static void gen8_initialize_pt(struct i915_address_space *vm,
482 struct i915_page_table *pt)
483{
Chris Wilsondd196742017-02-15 08:43:46 +0000484 fill_px(vm, pt,
485 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300486}
487
488static void gen6_initialize_pt(struct i915_address_space *vm,
489 struct i915_page_table *pt)
490{
Chris Wilsondd196742017-02-15 08:43:46 +0000491 fill32_px(vm, pt,
492 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
Ben Widawsky06fda602015-02-24 16:22:36 +0000493}
494
Chris Wilson84486612017-02-15 08:43:40 +0000495static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
Ben Widawsky06fda602015-02-24 16:22:36 +0000496{
Michel Thierryec565b32015-04-08 12:13:23 +0100497 struct i915_page_directory *pd;
Ben Widawsky06fda602015-02-24 16:22:36 +0000498
Chris Wilsonfe52e372017-02-15 08:43:47 +0000499 pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
500 if (unlikely(!pd))
Ben Widawsky06fda602015-02-24 16:22:36 +0000501 return ERR_PTR(-ENOMEM);
502
Chris Wilsonfe52e372017-02-15 08:43:47 +0000503 if (unlikely(setup_px(vm, pd))) {
504 kfree(pd);
505 return ERR_PTR(-ENOMEM);
506 }
Michel Thierry33c88192015-04-08 12:13:33 +0100507
Chris Wilsonfe52e372017-02-15 08:43:47 +0000508 pd->used_pdes = 0;
Ben Widawsky06fda602015-02-24 16:22:36 +0000509 return pd;
510}
511
Chris Wilson84486612017-02-15 08:43:40 +0000512static void free_pd(struct i915_address_space *vm,
Tvrtko Ursulin275a9912016-11-16 08:55:34 +0000513 struct i915_page_directory *pd)
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300514{
Chris Wilsonfe52e372017-02-15 08:43:47 +0000515 cleanup_px(vm, pd);
516 kfree(pd);
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300517}
518
519static void gen8_initialize_pd(struct i915_address_space *vm,
520 struct i915_page_directory *pd)
521{
Chris Wilsondd196742017-02-15 08:43:46 +0000522 unsigned int i;
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300523
Chris Wilsondd196742017-02-15 08:43:46 +0000524 fill_px(vm, pd,
525 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
526 for (i = 0; i < I915_PDES; i++)
527 pd->page_table[i] = vm->scratch_pt;
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300528}
529
Chris Wilsonfe52e372017-02-15 08:43:47 +0000530static int __pdp_init(struct i915_address_space *vm,
Michel Thierry6ac18502015-07-29 17:23:46 +0100531 struct i915_page_directory_pointer *pdp)
532{
Mika Kuoppala3e490042017-02-28 17:28:07 +0200533 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
Chris Wilsone2b763c2017-02-15 08:43:48 +0000534 unsigned int i;
Michel Thierry6ac18502015-07-29 17:23:46 +0100535
Chris Wilsonfe52e372017-02-15 08:43:47 +0000536 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
Chris Wilsone2b763c2017-02-15 08:43:48 +0000537 GFP_KERNEL | __GFP_NOWARN);
538 if (unlikely(!pdp->page_directory))
Michel Thierry6ac18502015-07-29 17:23:46 +0100539 return -ENOMEM;
Michel Thierry6ac18502015-07-29 17:23:46 +0100540
Chris Wilsonfe52e372017-02-15 08:43:47 +0000541 for (i = 0; i < pdpes; i++)
542 pdp->page_directory[i] = vm->scratch_pd;
543
Michel Thierry6ac18502015-07-29 17:23:46 +0100544 return 0;
545}
546
547static void __pdp_fini(struct i915_page_directory_pointer *pdp)
548{
Michel Thierry6ac18502015-07-29 17:23:46 +0100549 kfree(pdp->page_directory);
550 pdp->page_directory = NULL;
551}
552
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200553static inline bool use_4lvl(const struct i915_address_space *vm)
554{
555 return i915_vm_is_48bit(vm);
556}
557
Chris Wilson84486612017-02-15 08:43:40 +0000558static struct i915_page_directory_pointer *
559alloc_pdp(struct i915_address_space *vm)
Michel Thierry762d9932015-07-30 11:05:29 +0100560{
561 struct i915_page_directory_pointer *pdp;
562 int ret = -ENOMEM;
563
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200564 WARN_ON(!use_4lvl(vm));
Michel Thierry762d9932015-07-30 11:05:29 +0100565
566 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
567 if (!pdp)
568 return ERR_PTR(-ENOMEM);
569
Chris Wilsonfe52e372017-02-15 08:43:47 +0000570 ret = __pdp_init(vm, pdp);
Michel Thierry762d9932015-07-30 11:05:29 +0100571 if (ret)
572 goto fail_bitmap;
573
Chris Wilson84486612017-02-15 08:43:40 +0000574 ret = setup_px(vm, pdp);
Michel Thierry762d9932015-07-30 11:05:29 +0100575 if (ret)
576 goto fail_page_m;
577
578 return pdp;
579
580fail_page_m:
581 __pdp_fini(pdp);
582fail_bitmap:
583 kfree(pdp);
584
585 return ERR_PTR(ret);
586}
587
Chris Wilson84486612017-02-15 08:43:40 +0000588static void free_pdp(struct i915_address_space *vm,
Michel Thierry6ac18502015-07-29 17:23:46 +0100589 struct i915_page_directory_pointer *pdp)
590{
591 __pdp_fini(pdp);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200592
593 if (!use_4lvl(vm))
594 return;
595
596 cleanup_px(vm, pdp);
597 kfree(pdp);
Michel Thierry762d9932015-07-30 11:05:29 +0100598}
599
Michel Thierry69ab76f2015-07-29 17:23:55 +0100600static void gen8_initialize_pdp(struct i915_address_space *vm,
601 struct i915_page_directory_pointer *pdp)
602{
603 gen8_ppgtt_pdpe_t scratch_pdpe;
604
605 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
606
Chris Wilson84486612017-02-15 08:43:40 +0000607 fill_px(vm, pdp, scratch_pdpe);
Michel Thierry69ab76f2015-07-29 17:23:55 +0100608}
609
610static void gen8_initialize_pml4(struct i915_address_space *vm,
611 struct i915_pml4 *pml4)
612{
Chris Wilsone2b763c2017-02-15 08:43:48 +0000613 unsigned int i;
Michel Thierry69ab76f2015-07-29 17:23:55 +0100614
Chris Wilsone2b763c2017-02-15 08:43:48 +0000615 fill_px(vm, pml4,
616 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
617 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
618 pml4->pdps[i] = vm->scratch_pdp;
Michel Thierry6ac18502015-07-29 17:23:46 +0100619}
620
Ben Widawsky94e409c2013-11-04 22:29:36 -0800621/* Broadwell Page Directory Pointer Descriptors */
John Harrisone85b26d2015-05-29 17:43:56 +0100622static int gen8_write_pdp(struct drm_i915_gem_request *req,
Michel Thierry7cb6d7a2015-04-08 12:13:29 +0100623 unsigned entry,
624 dma_addr_t addr)
Ben Widawsky94e409c2013-11-04 22:29:36 -0800625{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000626 struct intel_engine_cs *engine = req->engine;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000627 u32 *cs;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800628
629 BUG_ON(entry >= 4);
630
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000631 cs = intel_ring_begin(req, 6);
632 if (IS_ERR(cs))
633 return PTR_ERR(cs);
Ben Widawsky94e409c2013-11-04 22:29:36 -0800634
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000635 *cs++ = MI_LOAD_REGISTER_IMM(1);
636 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
637 *cs++ = upper_32_bits(addr);
638 *cs++ = MI_LOAD_REGISTER_IMM(1);
639 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
640 *cs++ = lower_32_bits(addr);
641 intel_ring_advance(req, cs);
Ben Widawsky94e409c2013-11-04 22:29:36 -0800642
643 return 0;
644}
645
Mika Kuoppalae7167762017-02-28 17:28:10 +0200646static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
647 struct drm_i915_gem_request *req)
Ben Widawsky94e409c2013-11-04 22:29:36 -0800648{
Ben Widawskyeeb94882013-12-06 14:11:10 -0800649 int i, ret;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800650
Mika Kuoppalae7167762017-02-28 17:28:10 +0200651 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300652 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
653
John Harrisone85b26d2015-05-29 17:43:56 +0100654 ret = gen8_write_pdp(req, i, pd_daddr);
Ben Widawskyeeb94882013-12-06 14:11:10 -0800655 if (ret)
656 return ret;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800657 }
Ben Widawskyd595bd42013-11-25 09:54:32 -0800658
Ben Widawskyeeb94882013-12-06 14:11:10 -0800659 return 0;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800660}
661
Mika Kuoppalae7167762017-02-28 17:28:10 +0200662static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
663 struct drm_i915_gem_request *req)
Michel Thierry2dba3232015-07-30 11:06:23 +0100664{
665 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
666}
667
Mika Kuoppalafce93752016-10-31 17:24:46 +0200668/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
669 * the page table structures, we mark them dirty so that
670 * context switching/execlist queuing code takes extra steps
671 * to ensure that tlbs are flushed.
672 */
673static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
674{
Chris Wilson49d73912016-11-29 09:50:08 +0000675 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
Mika Kuoppalafce93752016-10-31 17:24:46 +0200676}
677
Michał Winiarski2ce51792016-10-13 14:02:42 +0200678/* Removes entries from a single page table, releasing it if it's empty.
679 * Caller can use the return value to update higher-level entries.
680 */
681static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200682 struct i915_page_table *pt,
Chris Wilsondd196742017-02-15 08:43:46 +0000683 u64 start, u64 length)
Ben Widawsky459108b2013-11-02 21:07:23 -0700684{
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200685 unsigned int num_entries = gen8_pte_count(start, length);
Mika Kuoppala37c63932016-11-01 15:27:36 +0200686 unsigned int pte = gen8_pte_index(start);
687 unsigned int pte_end = pte + num_entries;
Chris Wilson894cceb2017-02-15 08:43:37 +0000688 const gen8_pte_t scratch_pte =
689 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
690 gen8_pte_t *vaddr;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200691
Chris Wilsondd196742017-02-15 08:43:46 +0000692 GEM_BUG_ON(num_entries > pt->used_ptes);
Ben Widawsky459108b2013-11-02 21:07:23 -0700693
Chris Wilsondd196742017-02-15 08:43:46 +0000694 pt->used_ptes -= num_entries;
695 if (!pt->used_ptes)
696 return true;
Michał Winiarski2ce51792016-10-13 14:02:42 +0200697
Chris Wilson9231da72017-02-15 08:43:41 +0000698 vaddr = kmap_atomic_px(pt);
Mika Kuoppala37c63932016-11-01 15:27:36 +0200699 while (pte < pte_end)
Chris Wilson894cceb2017-02-15 08:43:37 +0000700 vaddr[pte++] = scratch_pte;
Chris Wilson9231da72017-02-15 08:43:41 +0000701 kunmap_atomic(vaddr);
Michał Winiarski2ce51792016-10-13 14:02:42 +0200702
703 return false;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200704}
705
Chris Wilsondd196742017-02-15 08:43:46 +0000706static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
707 struct i915_page_directory *pd,
708 struct i915_page_table *pt,
709 unsigned int pde)
710{
711 gen8_pde_t *vaddr;
712
713 pd->page_table[pde] = pt;
714
715 vaddr = kmap_atomic_px(pd);
716 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
717 kunmap_atomic(vaddr);
718}
719
Michał Winiarski2ce51792016-10-13 14:02:42 +0200720static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200721 struct i915_page_directory *pd,
Chris Wilsondd196742017-02-15 08:43:46 +0000722 u64 start, u64 length)
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200723{
724 struct i915_page_table *pt;
Chris Wilsondd196742017-02-15 08:43:46 +0000725 u32 pde;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200726
727 gen8_for_each_pde(pt, pd, start, length, pde) {
Chris Wilsonbf75d592017-02-27 12:26:52 +0000728 GEM_BUG_ON(pt == vm->scratch_pt);
729
Chris Wilsondd196742017-02-15 08:43:46 +0000730 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
731 continue;
Ben Widawsky06fda602015-02-24 16:22:36 +0000732
Chris Wilsondd196742017-02-15 08:43:46 +0000733 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
Chris Wilsonbf75d592017-02-27 12:26:52 +0000734 GEM_BUG_ON(!pd->used_pdes);
Chris Wilsonfe52e372017-02-15 08:43:47 +0000735 pd->used_pdes--;
Chris Wilsondd196742017-02-15 08:43:46 +0000736
737 free_pt(vm, pt);
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200738 }
Michał Winiarski2ce51792016-10-13 14:02:42 +0200739
Chris Wilsonfe52e372017-02-15 08:43:47 +0000740 return !pd->used_pdes;
741}
Michał Winiarski2ce51792016-10-13 14:02:42 +0200742
Chris Wilsonfe52e372017-02-15 08:43:47 +0000743static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
744 struct i915_page_directory_pointer *pdp,
745 struct i915_page_directory *pd,
746 unsigned int pdpe)
747{
748 gen8_ppgtt_pdpe_t *vaddr;
749
750 pdp->page_directory[pdpe] = pd;
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200751 if (!use_4lvl(vm))
Chris Wilsonfe52e372017-02-15 08:43:47 +0000752 return;
753
754 vaddr = kmap_atomic_px(pdp);
755 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
756 kunmap_atomic(vaddr);
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200757}
Ben Widawsky06fda602015-02-24 16:22:36 +0000758
Michał Winiarski2ce51792016-10-13 14:02:42 +0200759/* Removes entries from a single page dir pointer, releasing it if it's empty.
760 * Caller can use the return value to update higher-level entries
761 */
762static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200763 struct i915_page_directory_pointer *pdp,
Chris Wilsonfe52e372017-02-15 08:43:47 +0000764 u64 start, u64 length)
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200765{
766 struct i915_page_directory *pd;
Chris Wilsonfe52e372017-02-15 08:43:47 +0000767 unsigned int pdpe;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200768
769 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Chris Wilsonbf75d592017-02-27 12:26:52 +0000770 GEM_BUG_ON(pd == vm->scratch_pd);
771
Chris Wilsonfe52e372017-02-15 08:43:47 +0000772 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
773 continue;
Ben Widawsky06fda602015-02-24 16:22:36 +0000774
Chris Wilsonfe52e372017-02-15 08:43:47 +0000775 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
Chris Wilsonbf75d592017-02-27 12:26:52 +0000776 GEM_BUG_ON(!pdp->used_pdpes);
Chris Wilsone2b763c2017-02-15 08:43:48 +0000777 pdp->used_pdpes--;
Chris Wilsonfe52e372017-02-15 08:43:47 +0000778
779 free_pd(vm, pd);
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200780 }
Michał Winiarski2ce51792016-10-13 14:02:42 +0200781
Chris Wilsone2b763c2017-02-15 08:43:48 +0000782 return !pdp->used_pdpes;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200783}
Ben Widawsky459108b2013-11-02 21:07:23 -0700784
Chris Wilsonfe52e372017-02-15 08:43:47 +0000785static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
786 u64 start, u64 length)
787{
788 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
789}
790
Chris Wilsone2b763c2017-02-15 08:43:48 +0000791static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
792 struct i915_page_directory_pointer *pdp,
793 unsigned int pml4e)
794{
795 gen8_ppgtt_pml4e_t *vaddr;
796
797 pml4->pdps[pml4e] = pdp;
798
799 vaddr = kmap_atomic_px(pml4);
800 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
801 kunmap_atomic(vaddr);
802}
803
Michał Winiarski2ce51792016-10-13 14:02:42 +0200804/* Removes entries from a single pml4.
805 * This is the top-level structure in 4-level page tables used on gen8+.
806 * Empty entries are always scratch pml4e.
807 */
Chris Wilsonfe52e372017-02-15 08:43:47 +0000808static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
809 u64 start, u64 length)
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200810{
Chris Wilsonfe52e372017-02-15 08:43:47 +0000811 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
812 struct i915_pml4 *pml4 = &ppgtt->pml4;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200813 struct i915_page_directory_pointer *pdp;
Chris Wilsone2b763c2017-02-15 08:43:48 +0000814 unsigned int pml4e;
Michał Winiarski2ce51792016-10-13 14:02:42 +0200815
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200816 GEM_BUG_ON(!use_4lvl(vm));
Ben Widawsky459108b2013-11-02 21:07:23 -0700817
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200818 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Chris Wilsonbf75d592017-02-27 12:26:52 +0000819 GEM_BUG_ON(pdp == vm->scratch_pdp);
820
Chris Wilsone2b763c2017-02-15 08:43:48 +0000821 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
822 continue;
Ben Widawsky459108b2013-11-02 21:07:23 -0700823
Chris Wilsone2b763c2017-02-15 08:43:48 +0000824 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
Chris Wilsone2b763c2017-02-15 08:43:48 +0000825
826 free_pdp(vm, pdp);
Ben Widawsky459108b2013-11-02 21:07:23 -0700827 }
828}
829
Chris Wilson894cceb2017-02-15 08:43:37 +0000830struct sgt_dma {
831 struct scatterlist *sg;
832 dma_addr_t dma, max;
833};
834
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000835struct gen8_insert_pte {
836 u16 pml4e;
837 u16 pdpe;
838 u16 pde;
839 u16 pte;
840};
841
842static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
843{
844 return (struct gen8_insert_pte) {
845 gen8_pml4e_index(start),
846 gen8_pdpe_index(start),
847 gen8_pde_index(start),
848 gen8_pte_index(start),
849 };
850}
851
Chris Wilson894cceb2017-02-15 08:43:37 +0000852static __always_inline bool
853gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
Michel Thierryf9b5b782015-07-30 11:02:49 +0100854 struct i915_page_directory_pointer *pdp,
Chris Wilson894cceb2017-02-15 08:43:37 +0000855 struct sgt_dma *iter,
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000856 struct gen8_insert_pte *idx,
Michel Thierryf9b5b782015-07-30 11:02:49 +0100857 enum i915_cache_level cache_level)
858{
Chris Wilson894cceb2017-02-15 08:43:37 +0000859 struct i915_page_directory *pd;
860 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
861 gen8_pte_t *vaddr;
862 bool ret;
Ben Widawsky9df15b42013-11-02 21:07:24 -0700863
Mika Kuoppala3e490042017-02-28 17:28:07 +0200864 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000865 pd = pdp->page_directory[idx->pdpe];
866 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
Chris Wilson894cceb2017-02-15 08:43:37 +0000867 do {
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000868 vaddr[idx->pte] = pte_encode | iter->dma;
869
Chris Wilson894cceb2017-02-15 08:43:37 +0000870 iter->dma += PAGE_SIZE;
871 if (iter->dma >= iter->max) {
872 iter->sg = __sg_next(iter->sg);
873 if (!iter->sg) {
874 ret = false;
875 break;
876 }
Ben Widawsky9df15b42013-11-02 21:07:24 -0700877
Chris Wilson894cceb2017-02-15 08:43:37 +0000878 iter->dma = sg_dma_address(iter->sg);
879 iter->max = iter->dma + iter->sg->length;
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000880 }
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800881
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000882 if (++idx->pte == GEN8_PTES) {
883 idx->pte = 0;
884
885 if (++idx->pde == I915_PDES) {
886 idx->pde = 0;
887
Chris Wilson894cceb2017-02-15 08:43:37 +0000888 /* Limited by sg length for 3lvl */
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000889 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
890 idx->pdpe = 0;
Chris Wilson894cceb2017-02-15 08:43:37 +0000891 ret = true;
Michel Thierryde5ba8e2015-08-03 09:53:27 +0100892 break;
Chris Wilson894cceb2017-02-15 08:43:37 +0000893 }
894
Mika Kuoppala3e490042017-02-28 17:28:07 +0200895 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000896 pd = pdp->page_directory[idx->pdpe];
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800897 }
Chris Wilson894cceb2017-02-15 08:43:37 +0000898
Chris Wilson9231da72017-02-15 08:43:41 +0000899 kunmap_atomic(vaddr);
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000900 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
Ben Widawsky9df15b42013-11-02 21:07:24 -0700901 }
Chris Wilson894cceb2017-02-15 08:43:37 +0000902 } while (1);
Chris Wilson9231da72017-02-15 08:43:41 +0000903 kunmap_atomic(vaddr);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300904
Chris Wilson894cceb2017-02-15 08:43:37 +0000905 return ret;
Ben Widawsky9df15b42013-11-02 21:07:24 -0700906}
907
Chris Wilson894cceb2017-02-15 08:43:37 +0000908static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +0100909 struct i915_vma *vma,
Chris Wilson894cceb2017-02-15 08:43:37 +0000910 enum i915_cache_level cache_level,
911 u32 unused)
Michel Thierryf9b5b782015-07-30 11:02:49 +0100912{
Chuanxiao Dong17369ba2017-07-07 17:50:59 +0800913 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Chris Wilson894cceb2017-02-15 08:43:37 +0000914 struct sgt_dma iter = {
Matthew Auld4a234c52017-06-22 10:58:36 +0100915 .sg = vma->pages->sgl,
Chris Wilson894cceb2017-02-15 08:43:37 +0000916 .dma = sg_dma_address(iter.sg),
917 .max = iter.dma + iter.sg->length,
918 };
Matthew Auld4a234c52017-06-22 10:58:36 +0100919 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
Michel Thierryf9b5b782015-07-30 11:02:49 +0100920
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000921 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
922 cache_level);
Chris Wilson894cceb2017-02-15 08:43:37 +0000923}
Michel Thierryde5ba8e2015-08-03 09:53:27 +0100924
Chris Wilson894cceb2017-02-15 08:43:37 +0000925static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +0100926 struct i915_vma *vma,
Chris Wilson894cceb2017-02-15 08:43:37 +0000927 enum i915_cache_level cache_level,
928 u32 unused)
929{
930 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
931 struct sgt_dma iter = {
Matthew Auld4a234c52017-06-22 10:58:36 +0100932 .sg = vma->pages->sgl,
Chris Wilson894cceb2017-02-15 08:43:37 +0000933 .dma = sg_dma_address(iter.sg),
934 .max = iter.dma + iter.sg->length,
935 };
936 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
Matthew Auld4a234c52017-06-22 10:58:36 +0100937 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
Michel Thierryde5ba8e2015-08-03 09:53:27 +0100938
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000939 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
940 &idx, cache_level))
941 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
Michel Thierryf9b5b782015-07-30 11:02:49 +0100942}
943
Chris Wilson84486612017-02-15 08:43:40 +0000944static void gen8_free_page_tables(struct i915_address_space *vm,
Michel Thierryf37c0502015-06-10 17:46:39 +0100945 struct i915_page_directory *pd)
Ben Widawskyb45a6712014-02-12 14:28:44 -0800946{
947 int i;
948
Mika Kuoppala567047b2015-06-25 18:35:12 +0300949 if (!px_page(pd))
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800950 return;
Ben Widawskyb45a6712014-02-12 14:28:44 -0800951
Chris Wilsonfe52e372017-02-15 08:43:47 +0000952 for (i = 0; i < I915_PDES; i++) {
953 if (pd->page_table[i] != vm->scratch_pt)
954 free_pt(vm, pd->page_table[i]);
Ben Widawsky06fda602015-02-24 16:22:36 +0000955 }
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000956}
957
Mika Kuoppala8776f022015-06-30 18:16:40 +0300958static int gen8_init_scratch(struct i915_address_space *vm)
959{
Matthew Auld64c050d2016-04-27 13:19:25 +0100960 int ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +0300961
Chris Wilson84486612017-02-15 08:43:40 +0000962 ret = setup_scratch_page(vm, I915_GFP_DMA);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +0100963 if (ret)
964 return ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +0300965
Chris Wilson84486612017-02-15 08:43:40 +0000966 vm->scratch_pt = alloc_pt(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +0300967 if (IS_ERR(vm->scratch_pt)) {
Matthew Auld64c050d2016-04-27 13:19:25 +0100968 ret = PTR_ERR(vm->scratch_pt);
969 goto free_scratch_page;
Mika Kuoppala8776f022015-06-30 18:16:40 +0300970 }
971
Chris Wilson84486612017-02-15 08:43:40 +0000972 vm->scratch_pd = alloc_pd(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +0300973 if (IS_ERR(vm->scratch_pd)) {
Matthew Auld64c050d2016-04-27 13:19:25 +0100974 ret = PTR_ERR(vm->scratch_pd);
975 goto free_pt;
Mika Kuoppala8776f022015-06-30 18:16:40 +0300976 }
977
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200978 if (use_4lvl(vm)) {
Chris Wilson84486612017-02-15 08:43:40 +0000979 vm->scratch_pdp = alloc_pdp(vm);
Michel Thierry69ab76f2015-07-29 17:23:55 +0100980 if (IS_ERR(vm->scratch_pdp)) {
Matthew Auld64c050d2016-04-27 13:19:25 +0100981 ret = PTR_ERR(vm->scratch_pdp);
982 goto free_pd;
Michel Thierry69ab76f2015-07-29 17:23:55 +0100983 }
984 }
985
Mika Kuoppala8776f022015-06-30 18:16:40 +0300986 gen8_initialize_pt(vm, vm->scratch_pt);
987 gen8_initialize_pd(vm, vm->scratch_pd);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200988 if (use_4lvl(vm))
Michel Thierry69ab76f2015-07-29 17:23:55 +0100989 gen8_initialize_pdp(vm, vm->scratch_pdp);
Mika Kuoppala8776f022015-06-30 18:16:40 +0300990
991 return 0;
Matthew Auld64c050d2016-04-27 13:19:25 +0100992
993free_pd:
Chris Wilson84486612017-02-15 08:43:40 +0000994 free_pd(vm, vm->scratch_pd);
Matthew Auld64c050d2016-04-27 13:19:25 +0100995free_pt:
Chris Wilson84486612017-02-15 08:43:40 +0000996 free_pt(vm, vm->scratch_pt);
Matthew Auld64c050d2016-04-27 13:19:25 +0100997free_scratch_page:
Chris Wilson84486612017-02-15 08:43:40 +0000998 cleanup_scratch_page(vm);
Matthew Auld64c050d2016-04-27 13:19:25 +0100999
1000 return ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001001}
1002
Zhiyuan Lv650da342015-08-28 15:41:18 +08001003static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1004{
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001005 struct i915_address_space *vm = &ppgtt->base;
1006 struct drm_i915_private *dev_priv = vm->i915;
Zhiyuan Lv650da342015-08-28 15:41:18 +08001007 enum vgt_g2v_type msg;
Zhiyuan Lv650da342015-08-28 15:41:18 +08001008 int i;
1009
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001010 if (use_4lvl(vm)) {
1011 const u64 daddr = px_dma(&ppgtt->pml4);
Zhiyuan Lv650da342015-08-28 15:41:18 +08001012
Ville Syrjäläab75bb52015-11-04 23:20:12 +02001013 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1014 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
Zhiyuan Lv650da342015-08-28 15:41:18 +08001015
1016 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1017 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1018 } else {
Mika Kuoppalae7167762017-02-28 17:28:10 +02001019 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001020 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
Zhiyuan Lv650da342015-08-28 15:41:18 +08001021
Ville Syrjäläab75bb52015-11-04 23:20:12 +02001022 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1023 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
Zhiyuan Lv650da342015-08-28 15:41:18 +08001024 }
1025
1026 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1027 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1028 }
1029
1030 I915_WRITE(vgtif_reg(g2v_notify), msg);
1031
1032 return 0;
1033}
1034
Mika Kuoppala8776f022015-06-30 18:16:40 +03001035static void gen8_free_scratch(struct i915_address_space *vm)
1036{
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001037 if (use_4lvl(vm))
Chris Wilson84486612017-02-15 08:43:40 +00001038 free_pdp(vm, vm->scratch_pdp);
1039 free_pd(vm, vm->scratch_pd);
1040 free_pt(vm, vm->scratch_pt);
1041 cleanup_scratch_page(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001042}
1043
Chris Wilson84486612017-02-15 08:43:40 +00001044static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
Michel Thierry762d9932015-07-30 11:05:29 +01001045 struct i915_page_directory_pointer *pdp)
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001046{
Mika Kuoppala3e490042017-02-28 17:28:07 +02001047 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001048 int i;
1049
Mika Kuoppala3e490042017-02-28 17:28:07 +02001050 for (i = 0; i < pdpes; i++) {
Chris Wilsonfe52e372017-02-15 08:43:47 +00001051 if (pdp->page_directory[i] == vm->scratch_pd)
Ben Widawsky06fda602015-02-24 16:22:36 +00001052 continue;
1053
Chris Wilson84486612017-02-15 08:43:40 +00001054 gen8_free_page_tables(vm, pdp->page_directory[i]);
1055 free_pd(vm, pdp->page_directory[i]);
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001056 }
Michel Thierry69876be2015-04-08 12:13:27 +01001057
Chris Wilson84486612017-02-15 08:43:40 +00001058 free_pdp(vm, pdp);
Michel Thierry762d9932015-07-30 11:05:29 +01001059}
1060
1061static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1062{
1063 int i;
1064
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001065 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1066 if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
Michel Thierry762d9932015-07-30 11:05:29 +01001067 continue;
1068
Chris Wilson84486612017-02-15 08:43:40 +00001069 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
Michel Thierry762d9932015-07-30 11:05:29 +01001070 }
1071
Chris Wilson84486612017-02-15 08:43:40 +00001072 cleanup_px(&ppgtt->base, &ppgtt->pml4);
Michel Thierry762d9932015-07-30 11:05:29 +01001073}
1074
1075static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1076{
Chris Wilson49d73912016-11-29 09:50:08 +00001077 struct drm_i915_private *dev_priv = vm->i915;
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001078 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Michel Thierry762d9932015-07-30 11:05:29 +01001079
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001080 if (intel_vgpu_active(dev_priv))
Zhiyuan Lv650da342015-08-28 15:41:18 +08001081 gen8_ppgtt_notify_vgt(ppgtt, false);
1082
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001083 if (use_4lvl(vm))
Michel Thierry762d9932015-07-30 11:05:29 +01001084 gen8_ppgtt_cleanup_4lvl(ppgtt);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001085 else
1086 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
Michel Thierryd4ec9da2015-07-30 11:02:03 +01001087
Mika Kuoppala8776f022015-06-30 18:16:40 +03001088 gen8_free_scratch(vm);
Ben Widawskyb45a6712014-02-12 14:28:44 -08001089}
1090
Chris Wilsonfe52e372017-02-15 08:43:47 +00001091static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1092 struct i915_page_directory *pd,
1093 u64 start, u64 length)
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001094{
Michel Thierryd7b26332015-04-08 12:13:34 +01001095 struct i915_page_table *pt;
Chris Wilsondd196742017-02-15 08:43:46 +00001096 u64 from = start;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001097 unsigned int pde;
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001098
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001099 gen8_for_each_pde(pt, pd, start, length, pde) {
Chris Wilsonfe52e372017-02-15 08:43:47 +00001100 if (pt == vm->scratch_pt) {
Chris Wilsondd196742017-02-15 08:43:46 +00001101 pt = alloc_pt(vm);
1102 if (IS_ERR(pt))
1103 goto unwind;
1104
1105 gen8_initialize_pt(vm, pt);
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001106
Chris Wilsonfe52e372017-02-15 08:43:47 +00001107 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1108 pd->used_pdes++;
Chris Wilsonbf75d592017-02-27 12:26:52 +00001109 GEM_BUG_ON(pd->used_pdes > I915_PDES);
Chris Wilsonfe52e372017-02-15 08:43:47 +00001110 }
1111
1112 pt->used_ptes += gen8_pte_count(start, length);
1113 }
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001114 return 0;
1115
Chris Wilsondd196742017-02-15 08:43:46 +00001116unwind:
1117 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001118 return -ENOMEM;
1119}
1120
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001121static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1122 struct i915_page_directory_pointer *pdp,
1123 u64 start, u64 length)
Ben Widawskybf2b4ed2014-02-19 22:05:43 -08001124{
Michel Thierry5441f0c2015-04-08 12:13:28 +01001125 struct i915_page_directory *pd;
Chris Wilsone2b763c2017-02-15 08:43:48 +00001126 u64 from = start;
1127 unsigned int pdpe;
Ben Widawskybf2b4ed2014-02-19 22:05:43 -08001128 int ret;
1129
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001130 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Chris Wilsone2b763c2017-02-15 08:43:48 +00001131 if (pd == vm->scratch_pd) {
1132 pd = alloc_pd(vm);
1133 if (IS_ERR(pd))
1134 goto unwind;
Michel Thierry5441f0c2015-04-08 12:13:28 +01001135
Chris Wilsone2b763c2017-02-15 08:43:48 +00001136 gen8_initialize_pd(vm, pd);
Chris Wilsonfe52e372017-02-15 08:43:47 +00001137 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
Chris Wilsone2b763c2017-02-15 08:43:48 +00001138 pdp->used_pdpes++;
Mika Kuoppala3e490042017-02-28 17:28:07 +02001139 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
Chris Wilson75afcf72017-02-15 08:43:51 +00001140
1141 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
Chris Wilsone2b763c2017-02-15 08:43:48 +00001142 }
1143
1144 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
Chris Wilsonbf75d592017-02-27 12:26:52 +00001145 if (unlikely(ret))
1146 goto unwind_pd;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001147 }
Michel Thierry33c88192015-04-08 12:13:33 +01001148
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001149 return 0;
1150
Chris Wilsonbf75d592017-02-27 12:26:52 +00001151unwind_pd:
1152 if (!pd->used_pdes) {
1153 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1154 GEM_BUG_ON(!pdp->used_pdpes);
1155 pdp->used_pdpes--;
1156 free_pd(vm, pd);
1157 }
Chris Wilsone2b763c2017-02-15 08:43:48 +00001158unwind:
1159 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1160 return -ENOMEM;
Ben Widawskybf2b4ed2014-02-19 22:05:43 -08001161}
1162
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001163static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1164 u64 start, u64 length)
Michel Thierry762d9932015-07-30 11:05:29 +01001165{
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001166 return gen8_ppgtt_alloc_pdp(vm,
1167 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1168}
1169
1170static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1171 u64 start, u64 length)
1172{
1173 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1174 struct i915_pml4 *pml4 = &ppgtt->pml4;
Michel Thierry762d9932015-07-30 11:05:29 +01001175 struct i915_page_directory_pointer *pdp;
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001176 u64 from = start;
1177 u32 pml4e;
1178 int ret;
Michel Thierry762d9932015-07-30 11:05:29 +01001179
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001180 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001181 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1182 pdp = alloc_pdp(vm);
1183 if (IS_ERR(pdp))
1184 goto unwind;
Michel Thierry762d9932015-07-30 11:05:29 +01001185
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001186 gen8_initialize_pdp(vm, pdp);
1187 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1188 }
Michel Thierry762d9932015-07-30 11:05:29 +01001189
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001190 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
Chris Wilsonbf75d592017-02-27 12:26:52 +00001191 if (unlikely(ret))
1192 goto unwind_pdp;
Michel Thierry762d9932015-07-30 11:05:29 +01001193 }
1194
Michel Thierry762d9932015-07-30 11:05:29 +01001195 return 0;
1196
Chris Wilsonbf75d592017-02-27 12:26:52 +00001197unwind_pdp:
1198 if (!pdp->used_pdpes) {
1199 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1200 free_pdp(vm, pdp);
1201 }
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001202unwind:
1203 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1204 return -ENOMEM;
Michel Thierry762d9932015-07-30 11:05:29 +01001205}
1206
Chris Wilson84486612017-02-15 08:43:40 +00001207static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1208 struct i915_page_directory_pointer *pdp,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001209 u64 start, u64 length,
Michel Thierryea91e402015-07-29 17:23:57 +01001210 gen8_pte_t scratch_pte,
1211 struct seq_file *m)
1212{
Mika Kuoppala3e490042017-02-28 17:28:07 +02001213 struct i915_address_space *vm = &ppgtt->base;
Michel Thierryea91e402015-07-29 17:23:57 +01001214 struct i915_page_directory *pd;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001215 u32 pdpe;
Michel Thierryea91e402015-07-29 17:23:57 +01001216
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001217 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Michel Thierryea91e402015-07-29 17:23:57 +01001218 struct i915_page_table *pt;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001219 u64 pd_len = length;
1220 u64 pd_start = start;
1221 u32 pde;
Michel Thierryea91e402015-07-29 17:23:57 +01001222
Chris Wilsone2b763c2017-02-15 08:43:48 +00001223 if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
Michel Thierryea91e402015-07-29 17:23:57 +01001224 continue;
1225
1226 seq_printf(m, "\tPDPE #%d\n", pdpe);
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001227 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001228 u32 pte;
Michel Thierryea91e402015-07-29 17:23:57 +01001229 gen8_pte_t *pt_vaddr;
1230
Chris Wilsonfe52e372017-02-15 08:43:47 +00001231 if (pd->page_table[pde] == ppgtt->base.scratch_pt)
Michel Thierryea91e402015-07-29 17:23:57 +01001232 continue;
1233
Chris Wilson9231da72017-02-15 08:43:41 +00001234 pt_vaddr = kmap_atomic_px(pt);
Michel Thierryea91e402015-07-29 17:23:57 +01001235 for (pte = 0; pte < GEN8_PTES; pte += 4) {
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001236 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1237 pde << GEN8_PDE_SHIFT |
1238 pte << GEN8_PTE_SHIFT);
Michel Thierryea91e402015-07-29 17:23:57 +01001239 int i;
1240 bool found = false;
1241
1242 for (i = 0; i < 4; i++)
1243 if (pt_vaddr[pte + i] != scratch_pte)
1244 found = true;
1245 if (!found)
1246 continue;
1247
1248 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1249 for (i = 0; i < 4; i++) {
1250 if (pt_vaddr[pte + i] != scratch_pte)
1251 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1252 else
1253 seq_puts(m, " SCRATCH ");
1254 }
1255 seq_puts(m, "\n");
1256 }
Michel Thierryea91e402015-07-29 17:23:57 +01001257 kunmap_atomic(pt_vaddr);
1258 }
1259 }
1260}
1261
1262static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1263{
1264 struct i915_address_space *vm = &ppgtt->base;
Chris Wilson894cceb2017-02-15 08:43:37 +00001265 const gen8_pte_t scratch_pte =
1266 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
Chris Wilson381b9432017-02-15 08:43:54 +00001267 u64 start = 0, length = ppgtt->base.total;
Michel Thierryea91e402015-07-29 17:23:57 +01001268
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001269 if (use_4lvl(vm)) {
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001270 u64 pml4e;
Michel Thierryea91e402015-07-29 17:23:57 +01001271 struct i915_pml4 *pml4 = &ppgtt->pml4;
1272 struct i915_page_directory_pointer *pdp;
1273
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001274 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001275 if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
Michel Thierryea91e402015-07-29 17:23:57 +01001276 continue;
1277
1278 seq_printf(m, " PML4E #%llu\n", pml4e);
Chris Wilson84486612017-02-15 08:43:40 +00001279 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
Michel Thierryea91e402015-07-29 17:23:57 +01001280 }
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001281 } else {
1282 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
Michel Thierryea91e402015-07-29 17:23:57 +01001283 }
1284}
1285
Chris Wilsone2b763c2017-02-15 08:43:48 +00001286static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001287{
Chris Wilsone2b763c2017-02-15 08:43:48 +00001288 struct i915_address_space *vm = &ppgtt->base;
1289 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1290 struct i915_page_directory *pd;
1291 u64 start = 0, length = ppgtt->base.total;
1292 u64 from = start;
1293 unsigned int pdpe;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001294
Chris Wilsone2b763c2017-02-15 08:43:48 +00001295 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1296 pd = alloc_pd(vm);
1297 if (IS_ERR(pd))
1298 goto unwind;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001299
Chris Wilsone2b763c2017-02-15 08:43:48 +00001300 gen8_initialize_pd(vm, pd);
1301 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1302 pdp->used_pdpes++;
1303 }
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001304
Chris Wilsone2b763c2017-02-15 08:43:48 +00001305 pdp->used_pdpes++; /* never remove */
1306 return 0;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001307
Chris Wilsone2b763c2017-02-15 08:43:48 +00001308unwind:
1309 start -= from;
1310 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1311 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1312 free_pd(vm, pd);
1313 }
1314 pdp->used_pdpes = 0;
1315 return -ENOMEM;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001316}
1317
Daniel Vettereb0b44a2015-03-18 14:47:59 +01001318/*
Ben Widawskyf3a964b2014-02-19 22:05:42 -08001319 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1320 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1321 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1322 * space.
Ben Widawsky37aca442013-11-04 20:47:32 -08001323 *
Ben Widawskyf3a964b2014-02-19 22:05:42 -08001324 */
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001325static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky37aca442013-11-04 20:47:32 -08001326{
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001327 struct i915_address_space *vm = &ppgtt->base;
1328 struct drm_i915_private *dev_priv = vm->i915;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001329 int ret;
Michel Thierry69876be2015-04-08 12:13:27 +01001330
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001331 ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1332 1ULL << 48 :
1333 1ULL << 32;
1334
Mika Kuoppala8776f022015-06-30 18:16:40 +03001335 ret = gen8_init_scratch(&ppgtt->base);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001336 if (ret) {
1337 ppgtt->base.total = 0;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001338 return ret;
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001339 }
Michel Thierry69876be2015-04-08 12:13:27 +01001340
Chris Wilson84486612017-02-15 08:43:40 +00001341 /* There are only few exceptions for gen >=6. chv and bxt.
1342 * And we are not sure about the latter so play safe for now.
1343 */
1344 if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1345 ppgtt->base.pt_kmap_wc = true;
1346
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001347 if (use_4lvl(vm)) {
Chris Wilson84486612017-02-15 08:43:40 +00001348 ret = setup_px(&ppgtt->base, &ppgtt->pml4);
Michel Thierry762d9932015-07-30 11:05:29 +01001349 if (ret)
1350 goto free_scratch;
Michel Thierry6ac18502015-07-29 17:23:46 +01001351
Michel Thierry69ab76f2015-07-29 17:23:55 +01001352 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1353
Mika Kuoppalae7167762017-02-28 17:28:10 +02001354 ppgtt->switch_mm = gen8_mm_switch_4lvl;
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001355 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
Chris Wilson894cceb2017-02-15 08:43:37 +00001356 ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001357 ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
Michel Thierry762d9932015-07-30 11:05:29 +01001358 } else {
Chris Wilsonfe52e372017-02-15 08:43:47 +00001359 ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
Michel Thierry81ba8aef2015-08-03 09:52:01 +01001360 if (ret)
1361 goto free_scratch;
1362
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001363 if (intel_vgpu_active(dev_priv)) {
Chris Wilsone2b763c2017-02-15 08:43:48 +00001364 ret = gen8_preallocate_top_level_pdp(ppgtt);
1365 if (ret) {
1366 __pdp_fini(&ppgtt->pdp);
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001367 goto free_scratch;
Chris Wilsone2b763c2017-02-15 08:43:48 +00001368 }
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001369 }
Chris Wilson894cceb2017-02-15 08:43:37 +00001370
Mika Kuoppalae7167762017-02-28 17:28:10 +02001371 ppgtt->switch_mm = gen8_mm_switch_3lvl;
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001372 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
Chris Wilson894cceb2017-02-15 08:43:37 +00001373 ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001374 ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
Michel Thierry81ba8aef2015-08-03 09:52:01 +01001375 }
Michel Thierry6ac18502015-07-29 17:23:46 +01001376
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001377 if (intel_vgpu_active(dev_priv))
Zhiyuan Lv650da342015-08-28 15:41:18 +08001378 gen8_ppgtt_notify_vgt(ppgtt, true);
1379
Mika Kuoppala054b9ac2017-02-28 17:28:11 +02001380 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1381 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1382 ppgtt->base.bind_vma = ppgtt_bind_vma;
1383 ppgtt->debug_dump = gen8_dump_ppgtt;
1384
Michel Thierryd7b26332015-04-08 12:13:34 +01001385 return 0;
Michel Thierry6ac18502015-07-29 17:23:46 +01001386
1387free_scratch:
1388 gen8_free_scratch(&ppgtt->base);
1389 return ret;
Michel Thierryd7b26332015-04-08 12:13:34 +01001390}
1391
Ben Widawsky87d60b62013-12-06 14:11:29 -08001392static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1393{
Ben Widawsky87d60b62013-12-06 14:11:29 -08001394 struct i915_address_space *vm = &ppgtt->base;
Michel Thierry09942c62015-04-08 12:13:30 +01001395 struct i915_page_table *unused;
Michel Thierry07749ef2015-03-16 16:00:54 +00001396 gen6_pte_t scratch_pte;
Chris Wilson381b9432017-02-15 08:43:54 +00001397 u32 pd_entry, pte, pde;
1398 u32 start = 0, length = ppgtt->base.total;
Ben Widawsky87d60b62013-12-06 14:11:29 -08001399
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001400 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02001401 I915_CACHE_LLC, 0);
Ben Widawsky87d60b62013-12-06 14:11:29 -08001402
Dave Gordon731f74c2016-06-24 19:37:46 +01001403 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
Ben Widawsky87d60b62013-12-06 14:11:29 -08001404 u32 expected;
Michel Thierry07749ef2015-03-16 16:00:54 +00001405 gen6_pte_t *pt_vaddr;
Mika Kuoppala567047b2015-06-25 18:35:12 +03001406 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
Michel Thierry09942c62015-04-08 12:13:30 +01001407 pd_entry = readl(ppgtt->pd_addr + pde);
Ben Widawsky87d60b62013-12-06 14:11:29 -08001408 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1409
1410 if (pd_entry != expected)
1411 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1412 pde,
1413 pd_entry,
1414 expected);
1415 seq_printf(m, "\tPDE: %x\n", pd_entry);
1416
Chris Wilson9231da72017-02-15 08:43:41 +00001417 pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +03001418
Michel Thierry07749ef2015-03-16 16:00:54 +00001419 for (pte = 0; pte < GEN6_PTES; pte+=4) {
Ben Widawsky87d60b62013-12-06 14:11:29 -08001420 unsigned long va =
Michel Thierry07749ef2015-03-16 16:00:54 +00001421 (pde * PAGE_SIZE * GEN6_PTES) +
Ben Widawsky87d60b62013-12-06 14:11:29 -08001422 (pte * PAGE_SIZE);
1423 int i;
1424 bool found = false;
1425 for (i = 0; i < 4; i++)
1426 if (pt_vaddr[pte + i] != scratch_pte)
1427 found = true;
1428 if (!found)
1429 continue;
1430
1431 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1432 for (i = 0; i < 4; i++) {
1433 if (pt_vaddr[pte + i] != scratch_pte)
1434 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1435 else
1436 seq_puts(m, " SCRATCH ");
1437 }
1438 seq_puts(m, "\n");
1439 }
Chris Wilson9231da72017-02-15 08:43:41 +00001440 kunmap_atomic(pt_vaddr);
Ben Widawsky87d60b62013-12-06 14:11:29 -08001441 }
1442}
1443
Ben Widawsky678d96f2015-03-16 16:00:56 +00001444/* Write pde (index) from the page directory @pd to the page table @pt */
Chris Wilson16a011c2017-02-15 08:43:45 +00001445static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1446 const unsigned int pde,
1447 const struct i915_page_table *pt)
Ben Widawsky61973492013-04-08 18:43:54 -07001448{
Ben Widawsky678d96f2015-03-16 16:00:56 +00001449 /* Caller needs to make sure the write completes if necessary */
Chris Wilson16a011c2017-02-15 08:43:45 +00001450 writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1451 ppgtt->pd_addr + pde);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001452}
Ben Widawsky61973492013-04-08 18:43:54 -07001453
Ben Widawsky678d96f2015-03-16 16:00:56 +00001454/* Write all the page tables found in the ppgtt structure to incrementing page
1455 * directories. */
Chris Wilson16a011c2017-02-15 08:43:45 +00001456static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001457 u32 start, u32 length)
Ben Widawsky678d96f2015-03-16 16:00:56 +00001458{
Michel Thierryec565b32015-04-08 12:13:23 +01001459 struct i915_page_table *pt;
Chris Wilson16a011c2017-02-15 08:43:45 +00001460 unsigned int pde;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001461
Chris Wilson16a011c2017-02-15 08:43:45 +00001462 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1463 gen6_write_pde(ppgtt, pde, pt);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001464
Chris Wilson16a011c2017-02-15 08:43:45 +00001465 mark_tlbs_dirty(ppgtt);
Chris Wilsondd196742017-02-15 08:43:46 +00001466 wmb();
Ben Widawsky3e302542013-04-23 23:15:32 -07001467}
1468
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001469static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky3e302542013-04-23 23:15:32 -07001470{
Chris Wilsondd196742017-02-15 08:43:46 +00001471 GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1472 return ppgtt->pd.base.ggtt_offset << 10;
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001473}
Ben Widawsky61973492013-04-08 18:43:54 -07001474
Ben Widawsky90252e52013-12-06 14:11:12 -08001475static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +01001476 struct drm_i915_gem_request *req)
Ben Widawsky90252e52013-12-06 14:11:12 -08001477{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001478 struct intel_engine_cs *engine = req->engine;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001479 u32 *cs;
Ben Widawsky61973492013-04-08 18:43:54 -07001480
Ben Widawsky90252e52013-12-06 14:11:12 -08001481 /* NB: TLBs must be flushed and invalidated before a switch */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001482 cs = intel_ring_begin(req, 6);
1483 if (IS_ERR(cs))
1484 return PTR_ERR(cs);
Ben Widawsky90252e52013-12-06 14:11:12 -08001485
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001486 *cs++ = MI_LOAD_REGISTER_IMM(2);
1487 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1488 *cs++ = PP_DIR_DCLV_2G;
1489 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1490 *cs++ = get_pd_offset(ppgtt);
1491 *cs++ = MI_NOOP;
1492 intel_ring_advance(req, cs);
Ben Widawsky90252e52013-12-06 14:11:12 -08001493
1494 return 0;
1495}
1496
Ben Widawsky48a10382013-12-06 14:11:11 -08001497static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +01001498 struct drm_i915_gem_request *req)
Ben Widawsky48a10382013-12-06 14:11:11 -08001499{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001500 struct intel_engine_cs *engine = req->engine;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001501 u32 *cs;
Ben Widawsky48a10382013-12-06 14:11:11 -08001502
Ben Widawsky48a10382013-12-06 14:11:11 -08001503 /* NB: TLBs must be flushed and invalidated before a switch */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001504 cs = intel_ring_begin(req, 6);
1505 if (IS_ERR(cs))
1506 return PTR_ERR(cs);
Ben Widawsky48a10382013-12-06 14:11:11 -08001507
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001508 *cs++ = MI_LOAD_REGISTER_IMM(2);
1509 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1510 *cs++ = PP_DIR_DCLV_2G;
1511 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1512 *cs++ = get_pd_offset(ppgtt);
1513 *cs++ = MI_NOOP;
1514 intel_ring_advance(req, cs);
Ben Widawsky48a10382013-12-06 14:11:11 -08001515
1516 return 0;
1517}
1518
Ben Widawskyeeb94882013-12-06 14:11:10 -08001519static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +01001520 struct drm_i915_gem_request *req)
Ben Widawskyeeb94882013-12-06 14:11:10 -08001521{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001522 struct intel_engine_cs *engine = req->engine;
Chris Wilson8eb95202016-07-04 08:48:31 +01001523 struct drm_i915_private *dev_priv = req->i915;
Ben Widawsky48a10382013-12-06 14:11:11 -08001524
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001525 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1526 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
Ben Widawskyeeb94882013-12-06 14:11:10 -08001527 return 0;
1528}
1529
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001530static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
Ben Widawskyeeb94882013-12-06 14:11:10 -08001531{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001532 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05301533 enum intel_engine_id id;
Ben Widawskyeeb94882013-12-06 14:11:10 -08001534
Akash Goel3b3f1652016-10-13 22:44:48 +05301535 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001536 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1537 GEN8_GFX_PPGTT_48B : 0;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001538 I915_WRITE(RING_MODE_GEN7(engine),
Michel Thierry2dba3232015-07-30 11:06:23 +01001539 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
Ben Widawskyeeb94882013-12-06 14:11:10 -08001540 }
Ben Widawskyeeb94882013-12-06 14:11:10 -08001541}
1542
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001543static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001544{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001545 struct intel_engine_cs *engine;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001546 u32 ecochk, ecobits;
Akash Goel3b3f1652016-10-13 22:44:48 +05301547 enum intel_engine_id id;
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001548
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001549 ecobits = I915_READ(GAC_ECO_BITS);
1550 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1551
1552 ecochk = I915_READ(GAM_ECOCHK);
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01001553 if (IS_HASWELL(dev_priv)) {
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001554 ecochk |= ECOCHK_PPGTT_WB_HSW;
1555 } else {
1556 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1557 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1558 }
1559 I915_WRITE(GAM_ECOCHK, ecochk);
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001560
Akash Goel3b3f1652016-10-13 22:44:48 +05301561 for_each_engine(engine, dev_priv, id) {
Ben Widawskyeeb94882013-12-06 14:11:10 -08001562 /* GFX_MODE is per-ring on gen7+ */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001563 I915_WRITE(RING_MODE_GEN7(engine),
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001564 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Ben Widawsky61973492013-04-08 18:43:54 -07001565 }
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001566}
1567
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001568static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
Ben Widawsky61973492013-04-08 18:43:54 -07001569{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001570 u32 ecochk, gab_ctl, ecobits;
Ben Widawsky61973492013-04-08 18:43:54 -07001571
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001572 ecobits = I915_READ(GAC_ECO_BITS);
1573 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1574 ECOBITS_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -07001575
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001576 gab_ctl = I915_READ(GAB_CTL);
1577 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
Ben Widawsky61973492013-04-08 18:43:54 -07001578
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001579 ecochk = I915_READ(GAM_ECOCHK);
1580 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -07001581
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001582 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Ben Widawsky61973492013-04-08 18:43:54 -07001583}
1584
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001585/* PPGTT support for Sandybdrige/Gen6 and later */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001586static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
Chris Wilsondd196742017-02-15 08:43:46 +00001587 u64 start, u64 length)
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001588{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001589 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Chris Wilsondd196742017-02-15 08:43:46 +00001590 unsigned int first_entry = start >> PAGE_SHIFT;
1591 unsigned int pde = first_entry / GEN6_PTES;
1592 unsigned int pte = first_entry % GEN6_PTES;
1593 unsigned int num_entries = length >> PAGE_SHIFT;
1594 gen6_pte_t scratch_pte =
1595 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001596
Daniel Vetter7bddb012012-02-09 17:15:47 +01001597 while (num_entries) {
Chris Wilsondd196742017-02-15 08:43:46 +00001598 struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1599 unsigned int end = min(pte + num_entries, GEN6_PTES);
1600 gen6_pte_t *vaddr;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001601
Chris Wilsondd196742017-02-15 08:43:46 +00001602 num_entries -= end - pte;
Daniel Vetter7bddb012012-02-09 17:15:47 +01001603
Chris Wilsondd196742017-02-15 08:43:46 +00001604 /* Note that the hw doesn't support removing PDE on the fly
1605 * (they are cached inside the context with no means to
1606 * invalidate the cache), so we can only reset the PTE
1607 * entries back to scratch.
1608 */
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001609
Chris Wilsondd196742017-02-15 08:43:46 +00001610 vaddr = kmap_atomic_px(pt);
1611 do {
1612 vaddr[pte++] = scratch_pte;
1613 } while (pte < end);
1614 kunmap_atomic(vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001615
Chris Wilsondd196742017-02-15 08:43:46 +00001616 pte = 0;
Daniel Vetter7bddb012012-02-09 17:15:47 +01001617 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001618}
1619
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001620static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +01001621 struct i915_vma *vma,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001622 enum i915_cache_level cache_level,
1623 u32 flags)
Daniel Vetterdef886c2013-01-24 14:44:56 -08001624{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001625 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Matthew Auld4a234c52017-06-22 10:58:36 +01001626 unsigned first_entry = vma->node.start >> PAGE_SHIFT;
Michel Thierry07749ef2015-03-16 16:00:54 +00001627 unsigned act_pt = first_entry / GEN6_PTES;
1628 unsigned act_pte = first_entry % GEN6_PTES;
Chris Wilsonb31144c2017-02-15 08:43:36 +00001629 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1630 struct sgt_dma iter;
1631 gen6_pte_t *vaddr;
Daniel Vetterdef886c2013-01-24 14:44:56 -08001632
Chris Wilson9231da72017-02-15 08:43:41 +00001633 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
Matthew Auld4a234c52017-06-22 10:58:36 +01001634 iter.sg = vma->pages->sgl;
Chris Wilsonb31144c2017-02-15 08:43:36 +00001635 iter.dma = sg_dma_address(iter.sg);
1636 iter.max = iter.dma + iter.sg->length;
1637 do {
1638 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
Daniel Vetterdef886c2013-01-24 14:44:56 -08001639
Chris Wilsonb31144c2017-02-15 08:43:36 +00001640 iter.dma += PAGE_SIZE;
1641 if (iter.dma == iter.max) {
1642 iter.sg = __sg_next(iter.sg);
1643 if (!iter.sg)
1644 break;
1645
1646 iter.dma = sg_dma_address(iter.sg);
1647 iter.max = iter.dma + iter.sg->length;
1648 }
Akash Goel24f3a8c2014-06-17 10:59:42 +05301649
Michel Thierry07749ef2015-03-16 16:00:54 +00001650 if (++act_pte == GEN6_PTES) {
Chris Wilson9231da72017-02-15 08:43:41 +00001651 kunmap_atomic(vaddr);
1652 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +02001653 act_pte = 0;
Daniel Vetterdef886c2013-01-24 14:44:56 -08001654 }
Chris Wilsonb31144c2017-02-15 08:43:36 +00001655 } while (1);
Chris Wilson9231da72017-02-15 08:43:41 +00001656 kunmap_atomic(vaddr);
Daniel Vetterdef886c2013-01-24 14:44:56 -08001657}
1658
Ben Widawsky678d96f2015-03-16 16:00:56 +00001659static int gen6_alloc_va_range(struct i915_address_space *vm,
Chris Wilsondd196742017-02-15 08:43:46 +00001660 u64 start, u64 length)
Ben Widawsky678d96f2015-03-16 16:00:56 +00001661{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001662 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Michel Thierryec565b32015-04-08 12:13:23 +01001663 struct i915_page_table *pt;
Chris Wilsondd196742017-02-15 08:43:46 +00001664 u64 from = start;
1665 unsigned int pde;
1666 bool flush = false;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001667
Dave Gordon731f74c2016-06-24 19:37:46 +01001668 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
Chris Wilsondd196742017-02-15 08:43:46 +00001669 if (pt == vm->scratch_pt) {
1670 pt = alloc_pt(vm);
1671 if (IS_ERR(pt))
1672 goto unwind_out;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001673
Chris Wilsondd196742017-02-15 08:43:46 +00001674 gen6_initialize_pt(vm, pt);
1675 ppgtt->pd.page_table[pde] = pt;
Chris Wilson16a011c2017-02-15 08:43:45 +00001676 gen6_write_pde(ppgtt, pde, pt);
Chris Wilsondd196742017-02-15 08:43:46 +00001677 flush = true;
1678 }
Ben Widawsky678d96f2015-03-16 16:00:56 +00001679 }
1680
Chris Wilsondd196742017-02-15 08:43:46 +00001681 if (flush) {
1682 mark_tlbs_dirty(ppgtt);
1683 wmb();
1684 }
Michel Thierry4933d512015-03-24 15:46:22 +00001685
Ben Widawsky678d96f2015-03-16 16:00:56 +00001686 return 0;
Michel Thierry4933d512015-03-24 15:46:22 +00001687
1688unwind_out:
Chris Wilsondd196742017-02-15 08:43:46 +00001689 gen6_ppgtt_clear_range(vm, from, start);
1690 return -ENOMEM;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001691}
1692
Mika Kuoppala8776f022015-06-30 18:16:40 +03001693static int gen6_init_scratch(struct i915_address_space *vm)
1694{
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001695 int ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001696
Chris Wilson84486612017-02-15 08:43:40 +00001697 ret = setup_scratch_page(vm, I915_GFP_DMA);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001698 if (ret)
1699 return ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001700
Chris Wilson84486612017-02-15 08:43:40 +00001701 vm->scratch_pt = alloc_pt(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001702 if (IS_ERR(vm->scratch_pt)) {
Chris Wilson84486612017-02-15 08:43:40 +00001703 cleanup_scratch_page(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001704 return PTR_ERR(vm->scratch_pt);
1705 }
1706
1707 gen6_initialize_pt(vm, vm->scratch_pt);
1708
1709 return 0;
1710}
1711
1712static void gen6_free_scratch(struct i915_address_space *vm)
1713{
Chris Wilson84486612017-02-15 08:43:40 +00001714 free_pt(vm, vm->scratch_pt);
1715 cleanup_scratch_page(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001716}
1717
Daniel Vetter061dd492015-04-14 17:35:13 +02001718static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
Ben Widawskya00d8252014-02-19 22:05:48 -08001719{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001720 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Dave Gordon731f74c2016-06-24 19:37:46 +01001721 struct i915_page_directory *pd = &ppgtt->pd;
Michel Thierry09942c62015-04-08 12:13:30 +01001722 struct i915_page_table *pt;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001723 u32 pde;
Daniel Vetter3440d262013-01-24 13:49:56 -08001724
Daniel Vetter061dd492015-04-14 17:35:13 +02001725 drm_mm_remove_node(&ppgtt->node);
1726
Dave Gordon731f74c2016-06-24 19:37:46 +01001727 gen6_for_all_pdes(pt, pd, pde)
Mika Kuoppala79ab9372015-06-25 18:35:17 +03001728 if (pt != vm->scratch_pt)
Chris Wilson84486612017-02-15 08:43:40 +00001729 free_pt(vm, pt);
Michel Thierry4933d512015-03-24 15:46:22 +00001730
Mika Kuoppala8776f022015-06-30 18:16:40 +03001731 gen6_free_scratch(vm);
Daniel Vetter3440d262013-01-24 13:49:56 -08001732}
1733
Ben Widawskyb1465202014-02-19 22:05:49 -08001734static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
Daniel Vetter3440d262013-01-24 13:49:56 -08001735{
Mika Kuoppala8776f022015-06-30 18:16:40 +03001736 struct i915_address_space *vm = &ppgtt->base;
Chris Wilson49d73912016-11-29 09:50:08 +00001737 struct drm_i915_private *dev_priv = ppgtt->base.i915;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001738 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawskyb1465202014-02-19 22:05:49 -08001739 int ret;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001740
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001741 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1742 * allocator works in address space sizes, so it's multiplied by page
1743 * size. We allocate at the top of the GTT to avoid fragmentation.
1744 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001745 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
Michel Thierry4933d512015-03-24 15:46:22 +00001746
Mika Kuoppala8776f022015-06-30 18:16:40 +03001747 ret = gen6_init_scratch(vm);
1748 if (ret)
1749 return ret;
Michel Thierry4933d512015-03-24 15:46:22 +00001750
Chris Wilsone007b192017-01-11 11:23:10 +00001751 ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
1752 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1753 I915_COLOR_UNEVICTABLE,
1754 0, ggtt->base.total,
1755 PIN_HIGH);
Ben Widawskyc8c26622015-01-22 17:01:25 +00001756 if (ret)
Ben Widawsky678d96f2015-03-16 16:00:56 +00001757 goto err_out;
1758
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001759 if (ppgtt->node.start < ggtt->mappable_end)
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001760 DRM_DEBUG("Forced to use aperture for PDEs\n");
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001761
Chris Wilson52c126e2017-02-15 08:43:43 +00001762 ppgtt->pd.base.ggtt_offset =
1763 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1764
1765 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
1766 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
1767
Ben Widawskyc8c26622015-01-22 17:01:25 +00001768 return 0;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001769
1770err_out:
Mika Kuoppala8776f022015-06-30 18:16:40 +03001771 gen6_free_scratch(vm);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001772 return ret;
Ben Widawskyb1465202014-02-19 22:05:49 -08001773}
1774
Ben Widawskyb1465202014-02-19 22:05:49 -08001775static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1776{
kbuild test robot2f2cf682015-03-27 19:26:35 +08001777 return gen6_ppgtt_allocate_page_directories(ppgtt);
Ben Widawskyb1465202014-02-19 22:05:49 -08001778}
1779
Michel Thierry4933d512015-03-24 15:46:22 +00001780static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001781 u64 start, u64 length)
Michel Thierry4933d512015-03-24 15:46:22 +00001782{
Michel Thierryec565b32015-04-08 12:13:23 +01001783 struct i915_page_table *unused;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001784 u32 pde;
Michel Thierry4933d512015-03-24 15:46:22 +00001785
Dave Gordon731f74c2016-06-24 19:37:46 +01001786 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
Mika Kuoppala79ab9372015-06-25 18:35:17 +03001787 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
Michel Thierry4933d512015-03-24 15:46:22 +00001788}
1789
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001790static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
Ben Widawskyb1465202014-02-19 22:05:49 -08001791{
Chris Wilson49d73912016-11-29 09:50:08 +00001792 struct drm_i915_private *dev_priv = ppgtt->base.i915;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001793 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawskyb1465202014-02-19 22:05:49 -08001794 int ret;
1795
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001796 ppgtt->base.pte_encode = ggtt->base.pte_encode;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001797 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
Ben Widawsky48a10382013-12-06 14:11:11 -08001798 ppgtt->switch_mm = gen6_mm_switch;
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01001799 else if (IS_HASWELL(dev_priv))
Ben Widawsky90252e52013-12-06 14:11:12 -08001800 ppgtt->switch_mm = hsw_mm_switch;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001801 else if (IS_GEN7(dev_priv))
Ben Widawsky48a10382013-12-06 14:11:11 -08001802 ppgtt->switch_mm = gen7_mm_switch;
Chris Wilson8eb95202016-07-04 08:48:31 +01001803 else
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001804 BUG();
Ben Widawskyb1465202014-02-19 22:05:49 -08001805
1806 ret = gen6_ppgtt_alloc(ppgtt);
1807 if (ret)
1808 return ret;
1809
Michel Thierry09942c62015-04-08 12:13:30 +01001810 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001811
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001812 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
Chris Wilson16a011c2017-02-15 08:43:45 +00001813 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001814
Chris Wilson52c126e2017-02-15 08:43:43 +00001815 ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
1816 if (ret) {
1817 gen6_ppgtt_cleanup(&ppgtt->base);
1818 return ret;
1819 }
1820
Mika Kuoppala054b9ac2017-02-28 17:28:11 +02001821 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1822 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1823 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1824 ppgtt->base.bind_vma = ppgtt_bind_vma;
1825 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1826 ppgtt->debug_dump = gen6_dump_ppgtt;
1827
Thierry Reding440fd522015-01-23 09:05:06 +01001828 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001829 ppgtt->node.size >> 20,
1830 ppgtt->node.start / PAGE_SIZE);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001831
Chris Wilson52c126e2017-02-15 08:43:43 +00001832 DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
1833 ppgtt->pd.base.ggtt_offset << 10);
Daniel Vetterfa76da32014-08-06 20:19:54 +02001834
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001835 return 0;
Daniel Vetter3440d262013-01-24 13:49:56 -08001836}
1837
Chris Wilson2bfa9962016-08-04 07:52:25 +01001838static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
1839 struct drm_i915_private *dev_priv)
Daniel Vetter3440d262013-01-24 13:49:56 -08001840{
Chris Wilson49d73912016-11-29 09:50:08 +00001841 ppgtt->base.i915 = dev_priv;
Chris Wilson84486612017-02-15 08:43:40 +00001842 ppgtt->base.dma = &dev_priv->drm.pdev->dev;
Daniel Vetter3440d262013-01-24 13:49:56 -08001843
Chris Wilson2bfa9962016-08-04 07:52:25 +01001844 if (INTEL_INFO(dev_priv)->gen < 8)
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001845 return gen6_ppgtt_init(ppgtt);
Ben Widawsky3ed124b2013-04-08 18:43:53 -07001846 else
Michel Thierryd7b26332015-04-08 12:13:34 +01001847 return gen8_ppgtt_init(ppgtt);
Daniel Vetterfa76da32014-08-06 20:19:54 +02001848}
Mika Kuoppalac114f762015-06-25 18:35:13 +03001849
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001850static void i915_address_space_init(struct i915_address_space *vm,
Chris Wilson80b204b2016-10-28 13:58:58 +01001851 struct drm_i915_private *dev_priv,
1852 const char *name)
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001853{
Chris Wilson80b204b2016-10-28 13:58:58 +01001854 i915_gem_timeline_init(dev_priv, &vm->timeline, name);
Chris Wilson47db9222017-02-06 08:45:46 +00001855
Chris Wilson381b9432017-02-15 08:43:54 +00001856 drm_mm_init(&vm->mm, 0, vm->total);
Chris Wilson47db9222017-02-06 08:45:46 +00001857 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
1858
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001859 INIT_LIST_HEAD(&vm->active_list);
1860 INIT_LIST_HEAD(&vm->inactive_list);
Chris Wilson50e046b2016-08-04 07:52:46 +01001861 INIT_LIST_HEAD(&vm->unbound_list);
Chris Wilson47db9222017-02-06 08:45:46 +00001862
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001863 list_add_tail(&vm->global_link, &dev_priv->vm_list);
Chris Wilson84486612017-02-15 08:43:40 +00001864 pagevec_init(&vm->free_pages, false);
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001865}
1866
Matthew Aulded9724d2016-11-17 21:04:10 +00001867static void i915_address_space_fini(struct i915_address_space *vm)
1868{
Chris Wilson84486612017-02-15 08:43:40 +00001869 if (pagevec_count(&vm->free_pages))
1870 vm_free_pages_release(vm);
1871
Matthew Aulded9724d2016-11-17 21:04:10 +00001872 i915_gem_timeline_fini(&vm->timeline);
1873 drm_mm_takedown(&vm->mm);
1874 list_del(&vm->global_link);
1875}
1876
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001877static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
Tim Gored5165eb2016-02-04 11:49:34 +00001878{
Tim Gored5165eb2016-02-04 11:49:34 +00001879 /* This function is for gtt related workarounds. This function is
1880 * called on driver load and after a GPU reset, so you can place
1881 * workarounds here even if they get overwritten by GPU reset.
1882 */
Rodrigo Vivi46c26662017-06-16 15:49:58 -07001883 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */
Tvrtko Ursulin86527442016-10-13 11:03:00 +01001884 if (IS_BROADWELL(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001885 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01001886 else if (IS_CHERRYVIEW(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001887 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001888 else if (IS_GEN9_BC(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001889 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
Ander Conselvan de Oliveira9fb50262017-01-26 11:16:58 +02001890 else if (IS_GEN9_LP(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001891 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
1892}
1893
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001894int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
Daniel Vetter82460d92014-08-06 20:19:53 +02001895{
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001896 gtt_write_workarounds(dev_priv);
Tim Gored5165eb2016-02-04 11:49:34 +00001897
Thomas Daniel671b50132014-08-20 16:24:50 +01001898 /* In the case of execlists, PPGTT is enabled by the context descriptor
1899 * and the PDPs are contained within the context itself. We don't
1900 * need to do anything here. */
1901 if (i915.enable_execlists)
1902 return 0;
1903
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001904 if (!USES_PPGTT(dev_priv))
Daniel Vetter82460d92014-08-06 20:19:53 +02001905 return 0;
1906
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001907 if (IS_GEN6(dev_priv))
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001908 gen6_ppgtt_enable(dev_priv);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001909 else if (IS_GEN7(dev_priv))
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001910 gen7_ppgtt_enable(dev_priv);
1911 else if (INTEL_GEN(dev_priv) >= 8)
1912 gen8_ppgtt_enable(dev_priv);
Daniel Vetter82460d92014-08-06 20:19:53 +02001913 else
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001914 MISSING_CASE(INTEL_GEN(dev_priv));
Daniel Vetter82460d92014-08-06 20:19:53 +02001915
John Harrison4ad2fd82015-06-18 13:11:20 +01001916 return 0;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001917}
John Harrison4ad2fd82015-06-18 13:11:20 +01001918
Daniel Vetter4d884702014-08-06 15:04:47 +02001919struct i915_hw_ppgtt *
Chris Wilson2bfa9962016-08-04 07:52:25 +01001920i915_ppgtt_create(struct drm_i915_private *dev_priv,
Chris Wilson80b204b2016-10-28 13:58:58 +01001921 struct drm_i915_file_private *fpriv,
1922 const char *name)
Daniel Vetter4d884702014-08-06 15:04:47 +02001923{
1924 struct i915_hw_ppgtt *ppgtt;
1925 int ret;
1926
1927 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1928 if (!ppgtt)
1929 return ERR_PTR(-ENOMEM);
1930
Chris Wilson1188bc62017-02-15 08:43:38 +00001931 ret = __hw_ppgtt_init(ppgtt, dev_priv);
Daniel Vetter4d884702014-08-06 15:04:47 +02001932 if (ret) {
1933 kfree(ppgtt);
1934 return ERR_PTR(ret);
1935 }
1936
Chris Wilson1188bc62017-02-15 08:43:38 +00001937 kref_init(&ppgtt->ref);
1938 i915_address_space_init(&ppgtt->base, dev_priv, name);
1939 ppgtt->base.file = fpriv;
1940
Daniele Ceraolo Spurio198c9742014-11-10 13:44:31 +00001941 trace_i915_ppgtt_create(&ppgtt->base);
1942
Daniel Vetter4d884702014-08-06 15:04:47 +02001943 return ppgtt;
1944}
1945
Chris Wilson0c7eeda2017-01-11 21:09:25 +00001946void i915_ppgtt_close(struct i915_address_space *vm)
1947{
1948 struct list_head *phases[] = {
1949 &vm->active_list,
1950 &vm->inactive_list,
1951 &vm->unbound_list,
1952 NULL,
1953 }, **phase;
1954
1955 GEM_BUG_ON(vm->closed);
1956 vm->closed = true;
1957
1958 for (phase = phases; *phase; phase++) {
1959 struct i915_vma *vma, *vn;
1960
1961 list_for_each_entry_safe(vma, vn, *phase, vm_link)
1962 if (!i915_vma_is_closed(vma))
1963 i915_vma_close(vma);
1964 }
1965}
1966
Matthew Aulded9724d2016-11-17 21:04:10 +00001967void i915_ppgtt_release(struct kref *kref)
Daniel Vetteree960be2014-08-06 15:04:45 +02001968{
1969 struct i915_hw_ppgtt *ppgtt =
1970 container_of(kref, struct i915_hw_ppgtt, ref);
1971
Daniele Ceraolo Spurio198c9742014-11-10 13:44:31 +00001972 trace_i915_ppgtt_release(&ppgtt->base);
1973
Chris Wilson50e046b2016-08-04 07:52:46 +01001974 /* vmas should already be unbound and destroyed */
Daniel Vetteree960be2014-08-06 15:04:45 +02001975 WARN_ON(!list_empty(&ppgtt->base.active_list));
1976 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
Chris Wilson50e046b2016-08-04 07:52:46 +01001977 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
Daniel Vetteree960be2014-08-06 15:04:45 +02001978
1979 ppgtt->base.cleanup(&ppgtt->base);
Chris Wilson84486612017-02-15 08:43:40 +00001980 i915_address_space_fini(&ppgtt->base);
Daniel Vetteree960be2014-08-06 15:04:45 +02001981 kfree(ppgtt);
1982}
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001983
Ben Widawskya81cc002013-01-18 12:30:31 -08001984/* Certain Gen5 chipsets require require idling the GPU before
1985 * unmapping anything from the GTT when VT-d is enabled.
1986 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001987static bool needs_idle_maps(struct drm_i915_private *dev_priv)
Ben Widawskya81cc002013-01-18 12:30:31 -08001988{
Ben Widawskya81cc002013-01-18 12:30:31 -08001989 /* Query intel_iommu to see if we need the workaround. Presumably that
1990 * was loaded first.
1991 */
Chris Wilson80debff2017-05-25 13:16:12 +01001992 return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
Ben Widawskya81cc002013-01-18 12:30:31 -08001993}
1994
Chris Wilsondc979972016-05-10 14:10:04 +01001995void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
Ben Widawsky828c7902013-10-16 09:21:30 -07001996{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001997 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05301998 enum intel_engine_id id;
Ben Widawsky828c7902013-10-16 09:21:30 -07001999
Chris Wilsondc979972016-05-10 14:10:04 +01002000 if (INTEL_INFO(dev_priv)->gen < 6)
Ben Widawsky828c7902013-10-16 09:21:30 -07002001 return;
2002
Akash Goel3b3f1652016-10-13 22:44:48 +05302003 for_each_engine(engine, dev_priv, id) {
Ben Widawsky828c7902013-10-16 09:21:30 -07002004 u32 fault_reg;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002005 fault_reg = I915_READ(RING_FAULT_REG(engine));
Ben Widawsky828c7902013-10-16 09:21:30 -07002006 if (fault_reg & RING_FAULT_VALID) {
2007 DRM_DEBUG_DRIVER("Unexpected fault\n"
Paulo Zanoni59a5d292014-10-30 15:52:45 -02002008 "\tAddr: 0x%08lx\n"
Ben Widawsky828c7902013-10-16 09:21:30 -07002009 "\tAddress space: %s\n"
2010 "\tSource ID: %d\n"
2011 "\tType: %d\n",
2012 fault_reg & PAGE_MASK,
2013 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2014 RING_FAULT_SRCID(fault_reg),
2015 RING_FAULT_FAULT_TYPE(fault_reg));
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002016 I915_WRITE(RING_FAULT_REG(engine),
Ben Widawsky828c7902013-10-16 09:21:30 -07002017 fault_reg & ~RING_FAULT_VALID);
2018 }
2019 }
Akash Goel3b3f1652016-10-13 22:44:48 +05302020
2021 /* Engine specific init may not have been done till this point. */
2022 if (dev_priv->engine[RCS])
2023 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
Ben Widawsky828c7902013-10-16 09:21:30 -07002024}
2025
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00002026void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
Ben Widawsky828c7902013-10-16 09:21:30 -07002027{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002028 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawsky828c7902013-10-16 09:21:30 -07002029
2030 /* Don't bother messing with faults pre GEN6 as we have little
2031 * documentation supporting that it's a good idea.
2032 */
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00002033 if (INTEL_GEN(dev_priv) < 6)
Ben Widawsky828c7902013-10-16 09:21:30 -07002034 return;
2035
Chris Wilsondc979972016-05-10 14:10:04 +01002036 i915_check_and_clear_faults(dev_priv);
Ben Widawsky828c7902013-10-16 09:21:30 -07002037
Chris Wilson381b9432017-02-15 08:43:54 +00002038 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
Chris Wilson91e56492014-09-25 10:13:12 +01002039
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002040 i915_ggtt_invalidate(dev_priv);
Ben Widawsky828c7902013-10-16 09:21:30 -07002041}
2042
Chris Wilson03ac84f2016-10-28 13:58:36 +01002043int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2044 struct sg_table *pages)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002045{
Chris Wilson1a292fa2017-01-06 15:22:39 +00002046 do {
2047 if (dma_map_sg(&obj->base.dev->pdev->dev,
2048 pages->sgl, pages->nents,
2049 PCI_DMA_BIDIRECTIONAL))
2050 return 0;
2051
2052 /* If the DMA remap fails, one cause can be that we have
2053 * too many objects pinned in a small remapping table,
2054 * such as swiotlb. Incrementally purge all other objects and
2055 * try again - if there are no more pages to remove from
2056 * the DMA remapper, i915_gem_shrink will return 0.
2057 */
2058 GEM_BUG_ON(obj->mm.pages == pages);
2059 } while (i915_gem_shrink(to_i915(obj->base.dev),
2060 obj->base.size >> PAGE_SHIFT,
2061 I915_SHRINK_BOUND |
2062 I915_SHRINK_UNBOUND |
2063 I915_SHRINK_ACTIVE));
Chris Wilson9da3da62012-06-01 15:20:22 +01002064
Chris Wilson03ac84f2016-10-28 13:58:36 +01002065 return -ENOSPC;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002066}
2067
Daniel Vetter2c642b02015-04-14 17:35:26 +02002068static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002069{
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002070 writeq(pte, addr);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002071}
2072
Chris Wilsond6473f52016-06-10 14:22:59 +05302073static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2074 dma_addr_t addr,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002075 u64 offset,
Chris Wilsond6473f52016-06-10 14:22:59 +05302076 enum i915_cache_level level,
2077 u32 unused)
2078{
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002079 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Chris Wilsond6473f52016-06-10 14:22:59 +05302080 gen8_pte_t __iomem *pte =
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002081 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
Chris Wilsond6473f52016-06-10 14:22:59 +05302082
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002083 gen8_set_pte(pte, gen8_pte_encode(addr, level));
Chris Wilsond6473f52016-06-10 14:22:59 +05302084
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002085 ggtt->invalidate(vm->i915);
Chris Wilsond6473f52016-06-10 14:22:59 +05302086}
2087
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002088static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +01002089 struct i915_vma *vma,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002090 enum i915_cache_level level,
2091 u32 unused)
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002092{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002093 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Dave Gordon85d12252016-05-20 11:54:06 +01002094 struct sgt_iter sgt_iter;
2095 gen8_pte_t __iomem *gtt_entries;
Chris Wilson894cceb2017-02-15 08:43:37 +00002096 const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
Dave Gordon85d12252016-05-20 11:54:06 +01002097 dma_addr_t addr;
Imre Deakbe694592015-12-15 20:10:38 +02002098
Chris Wilson894cceb2017-02-15 08:43:37 +00002099 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
Matthew Auld4a234c52017-06-22 10:58:36 +01002100 gtt_entries += vma->node.start >> PAGE_SHIFT;
2101 for_each_sgt_dma(addr, sgt_iter, vma->pages)
Chris Wilson894cceb2017-02-15 08:43:37 +00002102 gen8_set_pte(gtt_entries++, pte_encode | addr);
Dave Gordon85d12252016-05-20 11:54:06 +01002103
Chris Wilson894cceb2017-02-15 08:43:37 +00002104 wmb();
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002105
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002106 /* This next bit makes the above posting read even more important. We
2107 * want to flush the TLBs only after we're certain all the PTE updates
2108 * have finished.
2109 */
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002110 ggtt->invalidate(vm->i915);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002111}
2112
Chris Wilsond6473f52016-06-10 14:22:59 +05302113static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2114 dma_addr_t addr,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002115 u64 offset,
Chris Wilsond6473f52016-06-10 14:22:59 +05302116 enum i915_cache_level level,
2117 u32 flags)
2118{
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002119 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Chris Wilsond6473f52016-06-10 14:22:59 +05302120 gen6_pte_t __iomem *pte =
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002121 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
Chris Wilsond6473f52016-06-10 14:22:59 +05302122
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002123 iowrite32(vm->pte_encode(addr, level, flags), pte);
Chris Wilsond6473f52016-06-10 14:22:59 +05302124
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002125 ggtt->invalidate(vm->i915);
Chris Wilsond6473f52016-06-10 14:22:59 +05302126}
2127
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002128/*
2129 * Binds an object into the global gtt with the specified cache level. The object
2130 * will be accessible to the GPU via commands whose operands reference offsets
2131 * within the global GTT as well as accessible by the GPU through the GMADR
2132 * mapped BAR (dev_priv->mm.gtt->gtt).
2133 */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002134static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +01002135 struct i915_vma *vma,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002136 enum i915_cache_level level,
2137 u32 flags)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002138{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002139 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Chris Wilsonb31144c2017-02-15 08:43:36 +00002140 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
Matthew Auld4a234c52017-06-22 10:58:36 +01002141 unsigned int i = vma->node.start >> PAGE_SHIFT;
Chris Wilsonb31144c2017-02-15 08:43:36 +00002142 struct sgt_iter iter;
Dave Gordon85d12252016-05-20 11:54:06 +01002143 dma_addr_t addr;
Matthew Auld4a234c52017-06-22 10:58:36 +01002144 for_each_sgt_dma(addr, iter, vma->pages)
Chris Wilsonb31144c2017-02-15 08:43:36 +00002145 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2146 wmb();
Ben Widawsky0f9b91c2012-11-04 09:21:30 -08002147
2148 /* This next bit makes the above posting read even more important. We
2149 * want to flush the TLBs only after we're certain all the PTE updates
2150 * have finished.
2151 */
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002152 ggtt->invalidate(vm->i915);
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002153}
2154
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002155static void nop_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002156 u64 start, u64 length)
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002157{
2158}
2159
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002160static void gen8_ggtt_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002161 u64 start, u64 length)
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002162{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002163 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Ben Widawsky782f1492014-02-20 11:50:33 -08002164 unsigned first_entry = start >> PAGE_SHIFT;
2165 unsigned num_entries = length >> PAGE_SHIFT;
Chris Wilson894cceb2017-02-15 08:43:37 +00002166 const gen8_pte_t scratch_pte =
2167 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2168 gen8_pte_t __iomem *gtt_base =
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002169 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2170 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002171 int i;
2172
2173 if (WARN(num_entries > max_entries,
2174 "First entry = %d; Num entries = %d (max=%d)\n",
2175 first_entry, num_entries, max_entries))
2176 num_entries = max_entries;
2177
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002178 for (i = 0; i < num_entries; i++)
2179 gen8_set_pte(&gtt_base[i], scratch_pte);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002180}
2181
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002182static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2183{
2184 struct drm_i915_private *dev_priv = vm->i915;
2185
2186 /*
2187 * Make sure the internal GAM fifo has been cleared of all GTT
2188 * writes before exiting stop_machine(). This guarantees that
2189 * any aperture accesses waiting to start in another process
2190 * cannot back up behind the GTT writes causing a hang.
2191 * The register can be any arbitrary GAM register.
2192 */
2193 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2194}
2195
2196struct insert_page {
2197 struct i915_address_space *vm;
2198 dma_addr_t addr;
2199 u64 offset;
2200 enum i915_cache_level level;
2201};
2202
2203static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2204{
2205 struct insert_page *arg = _arg;
2206
2207 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2208 bxt_vtd_ggtt_wa(arg->vm);
2209
2210 return 0;
2211}
2212
2213static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2214 dma_addr_t addr,
2215 u64 offset,
2216 enum i915_cache_level level,
2217 u32 unused)
2218{
2219 struct insert_page arg = { vm, addr, offset, level };
2220
2221 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2222}
2223
2224struct insert_entries {
2225 struct i915_address_space *vm;
Matthew Auld4a234c52017-06-22 10:58:36 +01002226 struct i915_vma *vma;
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002227 enum i915_cache_level level;
2228};
2229
2230static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2231{
2232 struct insert_entries *arg = _arg;
2233
Matthew Auld4a234c52017-06-22 10:58:36 +01002234 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002235 bxt_vtd_ggtt_wa(arg->vm);
2236
2237 return 0;
2238}
2239
2240static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +01002241 struct i915_vma *vma,
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002242 enum i915_cache_level level,
2243 u32 unused)
2244{
Chuanxiao Dong17369ba2017-07-07 17:50:59 +08002245 struct insert_entries arg = { vm, vma, level };
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002246
2247 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2248}
2249
2250struct clear_range {
2251 struct i915_address_space *vm;
2252 u64 start;
2253 u64 length;
2254};
2255
2256static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2257{
2258 struct clear_range *arg = _arg;
2259
2260 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2261 bxt_vtd_ggtt_wa(arg->vm);
2262
2263 return 0;
2264}
2265
2266static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2267 u64 start,
2268 u64 length)
2269{
2270 struct clear_range arg = { vm, start, length };
2271
2272 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2273}
2274
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002275static void gen6_ggtt_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002276 u64 start, u64 length)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002277{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002278 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Ben Widawsky782f1492014-02-20 11:50:33 -08002279 unsigned first_entry = start >> PAGE_SHIFT;
2280 unsigned num_entries = length >> PAGE_SHIFT;
Michel Thierry07749ef2015-03-16 16:00:54 +00002281 gen6_pte_t scratch_pte, __iomem *gtt_base =
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002282 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2283 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002284 int i;
2285
2286 if (WARN(num_entries > max_entries,
2287 "First entry = %d; Num entries = %d (max=%d)\n",
2288 first_entry, num_entries, max_entries))
2289 num_entries = max_entries;
2290
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002291 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002292 I915_CACHE_LLC, 0);
Ben Widawsky828c7902013-10-16 09:21:30 -07002293
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002294 for (i = 0; i < num_entries; i++)
2295 iowrite32(scratch_pte, &gtt_base[i]);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002296}
2297
Chris Wilsond6473f52016-06-10 14:22:59 +05302298static void i915_ggtt_insert_page(struct i915_address_space *vm,
2299 dma_addr_t addr,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002300 u64 offset,
Chris Wilsond6473f52016-06-10 14:22:59 +05302301 enum i915_cache_level cache_level,
2302 u32 unused)
2303{
Chris Wilsond6473f52016-06-10 14:22:59 +05302304 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2305 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
Chris Wilsond6473f52016-06-10 14:22:59 +05302306
2307 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
Chris Wilsond6473f52016-06-10 14:22:59 +05302308}
2309
Daniel Vetterd369d2d2015-04-14 17:35:25 +02002310static void i915_ggtt_insert_entries(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +01002311 struct i915_vma *vma,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002312 enum i915_cache_level cache_level,
2313 u32 unused)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002314{
2315 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2316 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2317
Matthew Auld4a234c52017-06-22 10:58:36 +01002318 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2319 flags);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002320}
2321
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002322static void i915_ggtt_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002323 u64 start, u64 length)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002324{
Chris Wilson2eedfc72016-10-24 13:42:17 +01002325 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002326}
2327
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02002328static int ggtt_bind_vma(struct i915_vma *vma,
2329 enum i915_cache_level cache_level,
2330 u32 flags)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002331{
Chris Wilson49d73912016-11-29 09:50:08 +00002332 struct drm_i915_private *i915 = vma->vm->i915;
Daniel Vetter0a878712015-10-15 14:23:01 +02002333 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsonba7a5742017-02-15 08:43:35 +00002334 u32 pte_flags;
Daniel Vetter0a878712015-10-15 14:23:01 +02002335
Chris Wilsonba7a5742017-02-15 08:43:35 +00002336 if (unlikely(!vma->pages)) {
2337 int ret = i915_get_ggtt_vma_pages(vma);
2338 if (ret)
2339 return ret;
2340 }
Daniel Vetter0a878712015-10-15 14:23:01 +02002341
2342 /* Currently applicable only to VLV */
Chris Wilsonba7a5742017-02-15 08:43:35 +00002343 pte_flags = 0;
Daniel Vetter0a878712015-10-15 14:23:01 +02002344 if (obj->gt_ro)
2345 pte_flags |= PTE_READ_ONLY;
2346
Chris Wilson9c870d02016-10-24 13:42:15 +01002347 intel_runtime_pm_get(i915);
Matthew Auld4a234c52017-06-22 10:58:36 +01002348 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
Chris Wilson9c870d02016-10-24 13:42:15 +01002349 intel_runtime_pm_put(i915);
Daniel Vetter0a878712015-10-15 14:23:01 +02002350
2351 /*
2352 * Without aliasing PPGTT there's no difference between
2353 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2354 * upgrade to both bound if we bind either to avoid double-binding.
2355 */
Chris Wilson3272db52016-08-04 16:32:32 +01002356 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Daniel Vetter0a878712015-10-15 14:23:01 +02002357
2358 return 0;
2359}
2360
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002361static void ggtt_unbind_vma(struct i915_vma *vma)
2362{
2363 struct drm_i915_private *i915 = vma->vm->i915;
2364
2365 intel_runtime_pm_get(i915);
2366 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2367 intel_runtime_pm_put(i915);
2368}
2369
Daniel Vetter0a878712015-10-15 14:23:01 +02002370static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2371 enum i915_cache_level cache_level,
2372 u32 flags)
2373{
Chris Wilson49d73912016-11-29 09:50:08 +00002374 struct drm_i915_private *i915 = vma->vm->i915;
Chris Wilson321d1782015-11-20 10:27:18 +00002375 u32 pte_flags;
Chris Wilsonff685972017-02-15 08:43:42 +00002376 int ret;
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02002377
Chris Wilsonba7a5742017-02-15 08:43:35 +00002378 if (unlikely(!vma->pages)) {
Chris Wilsonff685972017-02-15 08:43:42 +00002379 ret = i915_get_ggtt_vma_pages(vma);
Chris Wilsonba7a5742017-02-15 08:43:35 +00002380 if (ret)
2381 return ret;
2382 }
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002383
Akash Goel24f3a8c2014-06-17 10:59:42 +05302384 /* Currently applicable only to VLV */
Chris Wilson321d1782015-11-20 10:27:18 +00002385 pte_flags = 0;
2386 if (vma->obj->gt_ro)
Daniel Vetterf329f5f2015-04-14 17:35:15 +02002387 pte_flags |= PTE_READ_ONLY;
Akash Goel24f3a8c2014-06-17 10:59:42 +05302388
Chris Wilsonff685972017-02-15 08:43:42 +00002389 if (flags & I915_VMA_LOCAL_BIND) {
2390 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2391
Matthew Auld1f234752017-05-12 10:14:23 +01002392 if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2393 appgtt->base.allocate_va_range) {
Chris Wilsonff685972017-02-15 08:43:42 +00002394 ret = appgtt->base.allocate_va_range(&appgtt->base,
2395 vma->node.start,
Matthew Auldd5672322017-05-16 09:55:14 +01002396 vma->size);
Chris Wilsonff685972017-02-15 08:43:42 +00002397 if (ret)
Chris Wilson2f7399a2017-02-27 12:26:53 +00002398 goto err_pages;
Chris Wilsonff685972017-02-15 08:43:42 +00002399 }
2400
Matthew Auld4a234c52017-06-22 10:58:36 +01002401 appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
2402 pte_flags);
Chris Wilsonff685972017-02-15 08:43:42 +00002403 }
2404
Chris Wilson3272db52016-08-04 16:32:32 +01002405 if (flags & I915_VMA_GLOBAL_BIND) {
Chris Wilson9c870d02016-10-24 13:42:15 +01002406 intel_runtime_pm_get(i915);
Matthew Auld4a234c52017-06-22 10:58:36 +01002407 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
Chris Wilson9c870d02016-10-24 13:42:15 +01002408 intel_runtime_pm_put(i915);
Ben Widawsky6f65e292013-12-06 14:10:56 -08002409 }
Daniel Vetter74898d72012-02-15 23:50:22 +01002410
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02002411 return 0;
Chris Wilson2f7399a2017-02-27 12:26:53 +00002412
2413err_pages:
2414 if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
2415 if (vma->pages != vma->obj->mm.pages) {
2416 GEM_BUG_ON(!vma->pages);
2417 sg_free_table(vma->pages);
2418 kfree(vma->pages);
2419 }
2420 vma->pages = NULL;
2421 }
2422 return ret;
Ben Widawsky6f65e292013-12-06 14:10:56 -08002423}
2424
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002425static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
Ben Widawsky6f65e292013-12-06 14:10:56 -08002426{
Chris Wilson49d73912016-11-29 09:50:08 +00002427 struct drm_i915_private *i915 = vma->vm->i915;
Ben Widawsky6f65e292013-12-06 14:10:56 -08002428
Chris Wilson9c870d02016-10-24 13:42:15 +01002429 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2430 intel_runtime_pm_get(i915);
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002431 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
Chris Wilson9c870d02016-10-24 13:42:15 +01002432 intel_runtime_pm_put(i915);
2433 }
Ben Widawsky6f65e292013-12-06 14:10:56 -08002434
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002435 if (vma->flags & I915_VMA_LOCAL_BIND) {
2436 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2437
2438 vm->clear_range(vm, vma->node.start, vma->size);
2439 }
Daniel Vetter74163902012-02-15 23:50:21 +01002440}
2441
Chris Wilson03ac84f2016-10-28 13:58:36 +01002442void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2443 struct sg_table *pages)
Daniel Vetter74163902012-02-15 23:50:21 +01002444{
David Weinehall52a05c32016-08-22 13:32:44 +03002445 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2446 struct device *kdev = &dev_priv->drm.pdev->dev;
Chris Wilson307dc252016-08-05 10:14:12 +01002447 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawsky5c042282011-10-17 15:51:55 -07002448
Chris Wilson307dc252016-08-05 10:14:12 +01002449 if (unlikely(ggtt->do_idle_maps)) {
Chris Wilson228ec872017-03-30 09:53:41 +01002450 if (i915_gem_wait_for_idle(dev_priv, 0)) {
Chris Wilson307dc252016-08-05 10:14:12 +01002451 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2452 /* Wait a bit, in hopes it avoids the hang */
2453 udelay(10);
2454 }
2455 }
Ben Widawsky5c042282011-10-17 15:51:55 -07002456
Chris Wilson03ac84f2016-10-28 13:58:36 +01002457 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002458}
Daniel Vetter644ec022012-03-26 09:45:40 +02002459
Chris Wilson45b186f2016-12-16 07:46:42 +00002460static void i915_gtt_color_adjust(const struct drm_mm_node *node,
Chris Wilson42d6ab42012-07-26 11:49:32 +01002461 unsigned long color,
Thierry Reding440fd522015-01-23 09:05:06 +01002462 u64 *start,
2463 u64 *end)
Chris Wilson42d6ab42012-07-26 11:49:32 +01002464{
Chris Wilsona6508de2017-02-06 08:45:47 +00002465 if (node->allocated && node->color != color)
Chris Wilsonf51455d2017-01-10 14:47:34 +00002466 *start += I915_GTT_PAGE_SIZE;
Chris Wilson42d6ab42012-07-26 11:49:32 +01002467
Chris Wilsona6508de2017-02-06 08:45:47 +00002468 /* Also leave a space between the unallocated reserved node after the
2469 * GTT and any objects within the GTT, i.e. we use the color adjustment
2470 * to insert a guard page to prevent prefetches crossing over the
2471 * GTT boundary.
2472 */
Chris Wilsonb44f97f2016-12-16 07:46:40 +00002473 node = list_next_entry(node, node_list);
Chris Wilsona6508de2017-02-06 08:45:47 +00002474 if (node->color != color)
Chris Wilsonf51455d2017-01-10 14:47:34 +00002475 *end -= I915_GTT_PAGE_SIZE;
Chris Wilson42d6ab42012-07-26 11:49:32 +01002476}
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002477
Chris Wilson6cde9a02017-02-13 17:15:50 +00002478int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2479{
2480 struct i915_ggtt *ggtt = &i915->ggtt;
2481 struct i915_hw_ppgtt *ppgtt;
2482 int err;
2483
Chris Wilson57202f42017-02-15 08:43:56 +00002484 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
Chris Wilson1188bc62017-02-15 08:43:38 +00002485 if (IS_ERR(ppgtt))
2486 return PTR_ERR(ppgtt);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002487
Chris Wilsone565ceb2017-02-15 08:43:55 +00002488 if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2489 err = -ENODEV;
2490 goto err_ppgtt;
2491 }
2492
Chris Wilson6cde9a02017-02-13 17:15:50 +00002493 if (ppgtt->base.allocate_va_range) {
Chris Wilsone565ceb2017-02-15 08:43:55 +00002494 /* Note we only pre-allocate as far as the end of the global
2495 * GTT. On 48b / 4-level page-tables, the difference is very,
2496 * very significant! We have to preallocate as GVT/vgpu does
2497 * not like the page directory disappearing.
2498 */
Chris Wilson6cde9a02017-02-13 17:15:50 +00002499 err = ppgtt->base.allocate_va_range(&ppgtt->base,
Chris Wilsone565ceb2017-02-15 08:43:55 +00002500 0, ggtt->base.total);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002501 if (err)
Chris Wilson1188bc62017-02-15 08:43:38 +00002502 goto err_ppgtt;
Chris Wilson6cde9a02017-02-13 17:15:50 +00002503 }
2504
Chris Wilson6cde9a02017-02-13 17:15:50 +00002505 i915->mm.aliasing_ppgtt = ppgtt;
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002506
Chris Wilson6cde9a02017-02-13 17:15:50 +00002507 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2508 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2509
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002510 WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2511 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2512
Chris Wilson6cde9a02017-02-13 17:15:50 +00002513 return 0;
2514
Chris Wilson6cde9a02017-02-13 17:15:50 +00002515err_ppgtt:
Chris Wilson1188bc62017-02-15 08:43:38 +00002516 i915_ppgtt_put(ppgtt);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002517 return err;
2518}
2519
2520void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2521{
2522 struct i915_ggtt *ggtt = &i915->ggtt;
2523 struct i915_hw_ppgtt *ppgtt;
2524
2525 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2526 if (!ppgtt)
2527 return;
2528
Chris Wilson1188bc62017-02-15 08:43:38 +00002529 i915_ppgtt_put(ppgtt);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002530
2531 ggtt->base.bind_vma = ggtt_bind_vma;
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002532 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilson6cde9a02017-02-13 17:15:50 +00002533}
2534
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002535int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
Daniel Vetter644ec022012-03-26 09:45:40 +02002536{
Ben Widawskye78891c2013-01-25 16:41:04 -08002537 /* Let GEM Manage all of the aperture.
2538 *
2539 * However, leave one page at the end still bound to the scratch page.
2540 * There are a number of places where the hardware apparently prefetches
2541 * past the end of the object, and we've seen multiple hangs with the
2542 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2543 * aperture. One page should be enough to keep any prefetching inside
2544 * of the aperture.
2545 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002546 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsoned2f3452012-11-15 11:32:19 +00002547 unsigned long hole_start, hole_end;
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002548 struct drm_mm_node *entry;
Daniel Vetterfa76da32014-08-06 20:19:54 +02002549 int ret;
Daniel Vetter644ec022012-03-26 09:45:40 +02002550
Zhi Wangb02d22a2016-06-16 08:06:59 -04002551 ret = intel_vgt_balloon(dev_priv);
2552 if (ret)
2553 return ret;
Yu Zhang5dda8fa2015-02-10 19:05:48 +08002554
Chris Wilson95374d72016-10-12 10:05:20 +01002555 /* Reserve a mappable slot for our lockless error capture */
Chris Wilson4e64e552017-02-02 21:04:38 +00002556 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2557 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2558 0, ggtt->mappable_end,
2559 DRM_MM_INSERT_LOW);
Chris Wilson95374d72016-10-12 10:05:20 +01002560 if (ret)
2561 return ret;
2562
Chris Wilsoned2f3452012-11-15 11:32:19 +00002563 /* Clear any non-preallocated blocks */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002564 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
Chris Wilsoned2f3452012-11-15 11:32:19 +00002565 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2566 hole_start, hole_end);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002567 ggtt->base.clear_range(&ggtt->base, hole_start,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002568 hole_end - hole_start);
Chris Wilsoned2f3452012-11-15 11:32:19 +00002569 }
2570
2571 /* And finally clear the reserved guard page */
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002572 ggtt->base.clear_range(&ggtt->base,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002573 ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
Daniel Vetter6c5566a2014-08-06 15:04:50 +02002574
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002575 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
Chris Wilson6cde9a02017-02-13 17:15:50 +00002576 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
Chris Wilson95374d72016-10-12 10:05:20 +01002577 if (ret)
Chris Wilson6cde9a02017-02-13 17:15:50 +00002578 goto err;
Daniel Vetterfa76da32014-08-06 20:19:54 +02002579 }
2580
Daniel Vetter6c5566a2014-08-06 15:04:50 +02002581 return 0;
Chris Wilson95374d72016-10-12 10:05:20 +01002582
Chris Wilson95374d72016-10-12 10:05:20 +01002583err:
2584 drm_mm_remove_node(&ggtt->error_capture);
2585 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002586}
2587
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002588/**
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002589 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002590 * @dev_priv: i915 device
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002591 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002592void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002593{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002594 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson94d4a2a2017-02-10 16:35:22 +00002595 struct i915_vma *vma, *vn;
2596
2597 ggtt->base.closed = true;
2598
2599 mutex_lock(&dev_priv->drm.struct_mutex);
2600 WARN_ON(!list_empty(&ggtt->base.active_list));
2601 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2602 WARN_ON(i915_vma_unbind(vma));
2603 mutex_unlock(&dev_priv->drm.struct_mutex);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002604
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002605 i915_gem_cleanup_stolen(&dev_priv->drm);
Imre Deaka4eba472016-01-19 15:26:32 +02002606
Chris Wilson1188bc62017-02-15 08:43:38 +00002607 mutex_lock(&dev_priv->drm.struct_mutex);
2608 i915_gem_fini_aliasing_ppgtt(dev_priv);
2609
Chris Wilson95374d72016-10-12 10:05:20 +01002610 if (drm_mm_node_allocated(&ggtt->error_capture))
2611 drm_mm_remove_node(&ggtt->error_capture);
2612
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002613 if (drm_mm_initialized(&ggtt->base.mm)) {
Zhi Wangb02d22a2016-06-16 08:06:59 -04002614 intel_vgt_deballoon(dev_priv);
Matthew Aulded9724d2016-11-17 21:04:10 +00002615 i915_address_space_fini(&ggtt->base);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002616 }
2617
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002618 ggtt->base.cleanup(&ggtt->base);
Chris Wilson1188bc62017-02-15 08:43:38 +00002619 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002620
2621 arch_phys_wc_del(ggtt->mtrr);
Chris Wilsonf7bbe782016-08-19 16:54:27 +01002622 io_mapping_fini(&ggtt->mappable);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002623}
Daniel Vetter70e32542014-08-06 15:04:57 +02002624
Daniel Vetter2c642b02015-04-14 17:35:26 +02002625static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002626{
2627 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2628 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2629 return snb_gmch_ctl << 20;
2630}
2631
Daniel Vetter2c642b02015-04-14 17:35:26 +02002632static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
Ben Widawsky9459d252013-11-03 16:53:55 -08002633{
2634 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2635 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2636 if (bdw_gmch_ctl)
2637 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
Ben Widawsky562d55d2014-05-27 16:53:08 -07002638
2639#ifdef CONFIG_X86_32
2640 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2641 if (bdw_gmch_ctl > 4)
2642 bdw_gmch_ctl = 4;
2643#endif
2644
Ben Widawsky9459d252013-11-03 16:53:55 -08002645 return bdw_gmch_ctl << 20;
2646}
2647
Daniel Vetter2c642b02015-04-14 17:35:26 +02002648static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002649{
2650 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2651 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2652
2653 if (gmch_ctrl)
2654 return 1 << (20 + gmch_ctrl);
2655
2656 return 0;
2657}
2658
Daniel Vetter2c642b02015-04-14 17:35:26 +02002659static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002660{
2661 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2662 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
Imre Deaka92d1a92017-05-10 12:21:52 +03002663 return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002664}
2665
Daniel Vetter2c642b02015-04-14 17:35:26 +02002666static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
Ben Widawsky9459d252013-11-03 16:53:55 -08002667{
2668 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2669 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
Imre Deaka92d1a92017-05-10 12:21:52 +03002670 return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
Ben Widawsky9459d252013-11-03 16:53:55 -08002671}
2672
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002673static size_t chv_get_stolen_size(u16 gmch_ctrl)
2674{
2675 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2676 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2677
2678 /*
2679 * 0x0 to 0x10: 32MB increments starting at 0MB
2680 * 0x11 to 0x16: 4MB increments starting at 8MB
2681 * 0x17 to 0x1d: 4MB increments start at 36MB
2682 */
2683 if (gmch_ctrl < 0x11)
Imre Deaka92d1a92017-05-10 12:21:52 +03002684 return (size_t)gmch_ctrl << 25;
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002685 else if (gmch_ctrl < 0x17)
Imre Deaka92d1a92017-05-10 12:21:52 +03002686 return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002687 else
Imre Deaka92d1a92017-05-10 12:21:52 +03002688 return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002689}
2690
Damien Lespiau66375012014-01-09 18:02:46 +00002691static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2692{
2693 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2694 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2695
2696 if (gen9_gmch_ctl < 0xf0)
Imre Deaka92d1a92017-05-10 12:21:52 +03002697 return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
Damien Lespiau66375012014-01-09 18:02:46 +00002698 else
2699 /* 4MB increments starting at 0xf0 for 4MB */
Imre Deaka92d1a92017-05-10 12:21:52 +03002700 return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
Damien Lespiau66375012014-01-09 18:02:46 +00002701}
2702
Chris Wilson34c998b2016-08-04 07:52:24 +01002703static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
Ben Widawsky63340132013-11-04 19:32:22 -08002704{
Chris Wilson49d73912016-11-29 09:50:08 +00002705 struct drm_i915_private *dev_priv = ggtt->base.i915;
2706 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson34c998b2016-08-04 07:52:24 +01002707 phys_addr_t phys_addr;
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002708 int ret;
Ben Widawsky63340132013-11-04 19:32:22 -08002709
2710 /* For Modern GENs the PTEs and register space are split in the BAR */
Chris Wilson34c998b2016-08-04 07:52:24 +01002711 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
Ben Widawsky63340132013-11-04 19:32:22 -08002712
Imre Deak2a073f892015-03-27 13:07:33 +02002713 /*
2714 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2715 * dropped. For WC mappings in general we have 64 byte burst writes
2716 * when the WC buffer is flushed, so we can't use it, but have to
2717 * resort to an uncached mapping. The WC issue is easily caught by the
2718 * readback check when writing GTT PTE entries.
2719 */
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002720 if (IS_GEN9_LP(dev_priv))
Chris Wilson34c998b2016-08-04 07:52:24 +01002721 ggtt->gsm = ioremap_nocache(phys_addr, size);
Imre Deak2a073f892015-03-27 13:07:33 +02002722 else
Chris Wilson34c998b2016-08-04 07:52:24 +01002723 ggtt->gsm = ioremap_wc(phys_addr, size);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002724 if (!ggtt->gsm) {
Chris Wilson34c998b2016-08-04 07:52:24 +01002725 DRM_ERROR("Failed to map the ggtt page table\n");
Ben Widawsky63340132013-11-04 19:32:22 -08002726 return -ENOMEM;
2727 }
2728
Chris Wilson84486612017-02-15 08:43:40 +00002729 ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002730 if (ret) {
Ben Widawsky63340132013-11-04 19:32:22 -08002731 DRM_ERROR("Scratch setup failed\n");
2732 /* iounmap will also get called at remove, but meh */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002733 iounmap(ggtt->gsm);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002734 return ret;
Ben Widawsky63340132013-11-04 19:32:22 -08002735 }
2736
Mika Kuoppala4ad2af12015-06-30 18:16:39 +03002737 return 0;
Ben Widawsky63340132013-11-04 19:32:22 -08002738}
2739
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002740/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2741 * bits. When using advanced contexts each context stores its own PAT, but
2742 * writing this data shouldn't be harmful even in those cases. */
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002743static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002744{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002745 u64 pat;
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002746
2747 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
2748 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2749 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2750 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
2751 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2752 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2753 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2754 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2755
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002756 if (!USES_PPGTT(dev_priv))
Rodrigo Vivid6a8b722014-11-05 16:56:36 -08002757 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2758 * so RTL will always use the value corresponding to
2759 * pat_sel = 000".
2760 * So let's disable cache for GGTT to avoid screen corruptions.
2761 * MOCS still can be used though.
2762 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2763 * before this patch, i.e. the same uncached + snooping access
2764 * like on gen6/7 seems to be in effect.
2765 * - So this just fixes blitter/render access. Again it looks
2766 * like it's not just uncached access, but uncached + snooping.
2767 * So we can still hold onto all our assumptions wrt cpu
2768 * clflushing on LLC machines.
2769 */
2770 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2771
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002772 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2773 * write would work. */
Ville Syrjälä7e435ad2015-09-18 20:03:25 +03002774 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2775 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002776}
2777
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002778static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2779{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002780 u64 pat;
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002781
2782 /*
2783 * Map WB on BDW to snooped on CHV.
2784 *
2785 * Only the snoop bit has meaning for CHV, the rest is
2786 * ignored.
2787 *
Ville Syrjäläcf3d2622014-11-14 21:02:44 +02002788 * The hardware will never snoop for certain types of accesses:
2789 * - CPU GTT (GMADR->GGTT->no snoop->memory)
2790 * - PPGTT page tables
2791 * - some other special cycles
2792 *
2793 * As with BDW, we also need to consider the following for GT accesses:
2794 * "For GGTT, there is NO pat_sel[2:0] from the entry,
2795 * so RTL will always use the value corresponding to
2796 * pat_sel = 000".
2797 * Which means we must set the snoop bit in PAT entry 0
2798 * in order to keep the global status page working.
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002799 */
2800 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
2801 GEN8_PPAT(1, 0) |
2802 GEN8_PPAT(2, 0) |
2803 GEN8_PPAT(3, 0) |
2804 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
2805 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
2806 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2807 GEN8_PPAT(7, CHV_PPAT_SNOOP);
2808
Ville Syrjälä7e435ad2015-09-18 20:03:25 +03002809 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2810 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002811}
2812
Chris Wilson34c998b2016-08-04 07:52:24 +01002813static void gen6_gmch_remove(struct i915_address_space *vm)
2814{
2815 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2816
2817 iounmap(ggtt->gsm);
Chris Wilson84486612017-02-15 08:43:40 +00002818 cleanup_scratch_page(vm);
Chris Wilson34c998b2016-08-04 07:52:24 +01002819}
2820
Joonas Lahtinend507d732016-03-18 10:42:58 +02002821static int gen8_gmch_probe(struct i915_ggtt *ggtt)
Ben Widawsky63340132013-11-04 19:32:22 -08002822{
Chris Wilson49d73912016-11-29 09:50:08 +00002823 struct drm_i915_private *dev_priv = ggtt->base.i915;
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002824 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson34c998b2016-08-04 07:52:24 +01002825 unsigned int size;
Ben Widawsky63340132013-11-04 19:32:22 -08002826 u16 snb_gmch_ctl;
Imre Deak45192902017-05-10 12:21:50 +03002827 int err;
Ben Widawsky63340132013-11-04 19:32:22 -08002828
2829 /* TODO: We're not aware of mappable constraints on gen8 yet */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002830 ggtt->mappable_base = pci_resource_start(pdev, 2);
2831 ggtt->mappable_end = pci_resource_len(pdev, 2);
Ben Widawsky63340132013-11-04 19:32:22 -08002832
Imre Deak45192902017-05-10 12:21:50 +03002833 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
2834 if (!err)
2835 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
2836 if (err)
2837 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
Ben Widawsky63340132013-11-04 19:32:22 -08002838
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002839 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
Ben Widawsky63340132013-11-04 19:32:22 -08002840
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002841 if (INTEL_GEN(dev_priv) >= 9) {
Joonas Lahtinend507d732016-03-18 10:42:58 +02002842 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
Chris Wilson34c998b2016-08-04 07:52:24 +01002843 size = gen8_get_total_gtt_size(snb_gmch_ctl);
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002844 } else if (IS_CHERRYVIEW(dev_priv)) {
Joonas Lahtinend507d732016-03-18 10:42:58 +02002845 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
Chris Wilson34c998b2016-08-04 07:52:24 +01002846 size = chv_get_total_gtt_size(snb_gmch_ctl);
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002847 } else {
Joonas Lahtinend507d732016-03-18 10:42:58 +02002848 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
Chris Wilson34c998b2016-08-04 07:52:24 +01002849 size = gen8_get_total_gtt_size(snb_gmch_ctl);
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002850 }
Ben Widawsky63340132013-11-04 19:32:22 -08002851
Chris Wilson34c998b2016-08-04 07:52:24 +01002852 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
Ben Widawsky63340132013-11-04 19:32:22 -08002853
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002854 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002855 chv_setup_private_ppat(dev_priv);
2856 else
2857 bdw_setup_private_ppat(dev_priv);
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002858
Chris Wilson34c998b2016-08-04 07:52:24 +01002859 ggtt->base.cleanup = gen6_gmch_remove;
Joonas Lahtinend507d732016-03-18 10:42:58 +02002860 ggtt->base.bind_vma = ggtt_bind_vma;
2861 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilsond6473f52016-06-10 14:22:59 +05302862 ggtt->base.insert_page = gen8_ggtt_insert_page;
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002863 ggtt->base.clear_range = nop_clear_range;
Chris Wilson48f112f2016-06-24 14:07:14 +01002864 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002865 ggtt->base.clear_range = gen8_ggtt_clear_range;
2866
2867 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002868
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002869 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
2870 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
2871 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
2872 ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
2873 if (ggtt->base.clear_range != nop_clear_range)
2874 ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
2875 }
2876
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002877 ggtt->invalidate = gen6_ggtt_invalidate;
2878
Chris Wilson34c998b2016-08-04 07:52:24 +01002879 return ggtt_probe_common(ggtt, size);
Ben Widawsky63340132013-11-04 19:32:22 -08002880}
2881
Joonas Lahtinend507d732016-03-18 10:42:58 +02002882static int gen6_gmch_probe(struct i915_ggtt *ggtt)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002883{
Chris Wilson49d73912016-11-29 09:50:08 +00002884 struct drm_i915_private *dev_priv = ggtt->base.i915;
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002885 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson34c998b2016-08-04 07:52:24 +01002886 unsigned int size;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002887 u16 snb_gmch_ctl;
Imre Deak45192902017-05-10 12:21:50 +03002888 int err;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002889
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002890 ggtt->mappable_base = pci_resource_start(pdev, 2);
2891 ggtt->mappable_end = pci_resource_len(pdev, 2);
Ben Widawsky41907dd2013-02-08 11:32:47 -08002892
Ben Widawskybaa09f52013-01-24 13:49:57 -08002893 /* 64/512MB is the current min/max we actually know of, but this is just
2894 * a coarse sanity check.
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002895 */
Chris Wilson34c998b2016-08-04 07:52:24 +01002896 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
Joonas Lahtinend507d732016-03-18 10:42:58 +02002897 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002898 return -ENXIO;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002899 }
2900
Imre Deak45192902017-05-10 12:21:50 +03002901 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2902 if (!err)
2903 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2904 if (err)
2905 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002906 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002907
Joonas Lahtinend507d732016-03-18 10:42:58 +02002908 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002909
Chris Wilson34c998b2016-08-04 07:52:24 +01002910 size = gen6_get_total_gtt_size(snb_gmch_ctl);
2911 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002912
Joonas Lahtinend507d732016-03-18 10:42:58 +02002913 ggtt->base.clear_range = gen6_ggtt_clear_range;
Chris Wilsond6473f52016-06-10 14:22:59 +05302914 ggtt->base.insert_page = gen6_ggtt_insert_page;
Joonas Lahtinend507d732016-03-18 10:42:58 +02002915 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
2916 ggtt->base.bind_vma = ggtt_bind_vma;
2917 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilson34c998b2016-08-04 07:52:24 +01002918 ggtt->base.cleanup = gen6_gmch_remove;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002919
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002920 ggtt->invalidate = gen6_ggtt_invalidate;
2921
Chris Wilson34c998b2016-08-04 07:52:24 +01002922 if (HAS_EDRAM(dev_priv))
2923 ggtt->base.pte_encode = iris_pte_encode;
2924 else if (IS_HASWELL(dev_priv))
2925 ggtt->base.pte_encode = hsw_pte_encode;
2926 else if (IS_VALLEYVIEW(dev_priv))
2927 ggtt->base.pte_encode = byt_pte_encode;
2928 else if (INTEL_GEN(dev_priv) >= 7)
2929 ggtt->base.pte_encode = ivb_pte_encode;
2930 else
2931 ggtt->base.pte_encode = snb_pte_encode;
2932
2933 return ggtt_probe_common(ggtt, size);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002934}
2935
Chris Wilson34c998b2016-08-04 07:52:24 +01002936static void i915_gmch_remove(struct i915_address_space *vm)
Ben Widawskybaa09f52013-01-24 13:49:57 -08002937{
Chris Wilson34c998b2016-08-04 07:52:24 +01002938 intel_gmch_remove();
Ben Widawskybaa09f52013-01-24 13:49:57 -08002939}
2940
Joonas Lahtinend507d732016-03-18 10:42:58 +02002941static int i915_gmch_probe(struct i915_ggtt *ggtt)
Ben Widawskybaa09f52013-01-24 13:49:57 -08002942{
Chris Wilson49d73912016-11-29 09:50:08 +00002943 struct drm_i915_private *dev_priv = ggtt->base.i915;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002944 int ret;
2945
Chris Wilson91c8a322016-07-05 10:40:23 +01002946 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002947 if (!ret) {
2948 DRM_ERROR("failed to set up gmch\n");
2949 return -EIO;
2950 }
2951
Chris Wilsonedd1f2f2017-01-06 15:20:11 +00002952 intel_gtt_get(&ggtt->base.total,
2953 &ggtt->stolen_size,
2954 &ggtt->mappable_base,
2955 &ggtt->mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002956
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002957 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
Chris Wilsond6473f52016-06-10 14:22:59 +05302958 ggtt->base.insert_page = i915_ggtt_insert_page;
Joonas Lahtinend507d732016-03-18 10:42:58 +02002959 ggtt->base.insert_entries = i915_ggtt_insert_entries;
2960 ggtt->base.clear_range = i915_ggtt_clear_range;
2961 ggtt->base.bind_vma = ggtt_bind_vma;
2962 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilson34c998b2016-08-04 07:52:24 +01002963 ggtt->base.cleanup = i915_gmch_remove;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002964
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002965 ggtt->invalidate = gmch_ggtt_invalidate;
2966
Joonas Lahtinend507d732016-03-18 10:42:58 +02002967 if (unlikely(ggtt->do_idle_maps))
Chris Wilsonc0a7f812013-12-30 12:16:15 +00002968 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
2969
Ben Widawskybaa09f52013-01-24 13:49:57 -08002970 return 0;
2971}
2972
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002973/**
Chris Wilson0088e522016-08-04 07:52:21 +01002974 * i915_ggtt_probe_hw - Probe GGTT hardware location
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002975 * @dev_priv: i915 device
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002976 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002977int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
Ben Widawskybaa09f52013-01-24 13:49:57 -08002978{
Joonas Lahtinen62106b42016-03-18 10:42:57 +02002979 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002980 int ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002981
Chris Wilson49d73912016-11-29 09:50:08 +00002982 ggtt->base.i915 = dev_priv;
Chris Wilson84486612017-02-15 08:43:40 +00002983 ggtt->base.dma = &dev_priv->drm.pdev->dev;
Mika Kuoppalac114f762015-06-25 18:35:13 +03002984
Chris Wilson34c998b2016-08-04 07:52:24 +01002985 if (INTEL_GEN(dev_priv) <= 5)
2986 ret = i915_gmch_probe(ggtt);
2987 else if (INTEL_GEN(dev_priv) < 8)
2988 ret = gen6_gmch_probe(ggtt);
2989 else
2990 ret = gen8_gmch_probe(ggtt);
Ben Widawskya54c0c22013-01-24 14:45:00 -08002991 if (ret)
Ben Widawskybaa09f52013-01-24 13:49:57 -08002992 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002993
Chris Wilsondb9309a2017-01-05 15:30:23 +00002994 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
2995 * This is easier than doing range restriction on the fly, as we
2996 * currently don't have any bits spare to pass in this upper
2997 * restriction!
2998 */
2999 if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
3000 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3001 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3002 }
3003
Chris Wilsonc890e2d2016-03-18 10:42:59 +02003004 if ((ggtt->base.total - 1) >> 32) {
3005 DRM_ERROR("We never expected a Global GTT with more than 32bits"
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003006 " of address space! Found %lldM!\n",
Chris Wilsonc890e2d2016-03-18 10:42:59 +02003007 ggtt->base.total >> 20);
3008 ggtt->base.total = 1ULL << 32;
3009 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3010 }
3011
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003012 if (ggtt->mappable_end > ggtt->base.total) {
3013 DRM_ERROR("mappable aperture extends past end of GGTT,"
3014 " aperture=%llx, total=%llx\n",
3015 ggtt->mappable_end, ggtt->base.total);
3016 ggtt->mappable_end = ggtt->base.total;
3017 }
3018
Ben Widawskybaa09f52013-01-24 13:49:57 -08003019 /* GMADR is the PCI mmio aperture into the global GTT. */
Mika Kuoppalac44ef602015-06-25 18:35:05 +03003020 DRM_INFO("Memory usable by graphics device = %lluM\n",
Joonas Lahtinen62106b42016-03-18 10:42:57 +02003021 ggtt->base.total >> 20);
3022 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
Chris Wilsonedd1f2f2017-01-06 15:20:11 +00003023 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
Chris Wilson80debff2017-05-25 13:16:12 +01003024 if (intel_vtd_active())
Daniel Vetter5db6c732014-03-31 16:23:04 +02003025 DRM_INFO("VT-d active for gfx access\n");
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08003026
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003027 return 0;
Chris Wilson0088e522016-08-04 07:52:21 +01003028}
3029
3030/**
3031 * i915_ggtt_init_hw - Initialize GGTT hardware
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003032 * @dev_priv: i915 device
Chris Wilson0088e522016-08-04 07:52:21 +01003033 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003034int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
Chris Wilson0088e522016-08-04 07:52:21 +01003035{
Chris Wilson0088e522016-08-04 07:52:21 +01003036 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3037 int ret;
3038
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003039 INIT_LIST_HEAD(&dev_priv->vm_list);
3040
Chris Wilsona6508de2017-02-06 08:45:47 +00003041 /* Note that we use page colouring to enforce a guard page at the
3042 * end of the address space. This is required as the CS may prefetch
3043 * beyond the end of the batch buffer, across the page boundary,
3044 * and beyond the end of the GTT if we do not provide a guard.
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003045 */
Chris Wilson80b204b2016-10-28 13:58:58 +01003046 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson80b204b2016-10-28 13:58:58 +01003047 i915_address_space_init(&ggtt->base, dev_priv, "[global]");
Chris Wilsona6508de2017-02-06 08:45:47 +00003048 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003049 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
Chris Wilson80b204b2016-10-28 13:58:58 +01003050 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003051
Chris Wilsonf7bbe782016-08-19 16:54:27 +01003052 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
3053 dev_priv->ggtt.mappable_base,
3054 dev_priv->ggtt.mappable_end)) {
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003055 ret = -EIO;
3056 goto out_gtt_cleanup;
3057 }
3058
3059 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
3060
Chris Wilson0088e522016-08-04 07:52:21 +01003061 /*
3062 * Initialise stolen early so that we may reserve preallocated
3063 * objects for the BIOS to KMS transition.
3064 */
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +00003065 ret = i915_gem_init_stolen(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +01003066 if (ret)
3067 goto out_gtt_cleanup;
3068
3069 return 0;
Imre Deaka4eba472016-01-19 15:26:32 +02003070
3071out_gtt_cleanup:
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003072 ggtt->base.cleanup(&ggtt->base);
Imre Deaka4eba472016-01-19 15:26:32 +02003073 return ret;
Daniel Vetter644ec022012-03-26 09:45:40 +02003074}
Ben Widawsky6f65e292013-12-06 14:10:56 -08003075
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003076int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
Ville Syrjäläac840ae2016-05-06 21:35:55 +03003077{
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003078 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
Ville Syrjäläac840ae2016-05-06 21:35:55 +03003079 return -EIO;
3080
3081 return 0;
3082}
3083
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003084void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3085{
Chris Wilson04f7b24e2017-06-01 10:04:46 +01003086 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3087
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003088 i915->ggtt.invalidate = guc_ggtt_invalidate;
3089}
3090
3091void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3092{
Chris Wilson04f7b24e2017-06-01 10:04:46 +01003093 /* We should only be called after i915_ggtt_enable_guc() */
3094 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3095
3096 i915->ggtt.invalidate = gen6_ggtt_invalidate;
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003097}
3098
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00003099void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
Daniel Vetterfa423312015-04-14 17:35:23 +02003100{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003101 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003102 struct drm_i915_gem_object *obj, *on;
Daniel Vetterfa423312015-04-14 17:35:23 +02003103
Chris Wilsondc979972016-05-10 14:10:04 +01003104 i915_check_and_clear_faults(dev_priv);
Daniel Vetterfa423312015-04-14 17:35:23 +02003105
3106 /* First fill our portion of the GTT with scratch pages */
Chris Wilson381b9432017-02-15 08:43:54 +00003107 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
Daniel Vetterfa423312015-04-14 17:35:23 +02003108
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003109 ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3110
3111 /* clflush objects bound into the GGTT and rebind them. */
3112 list_for_each_entry_safe(obj, on,
Joonas Lahtinen56cea322016-11-02 12:16:04 +02003113 &dev_priv->mm.bound_list, global_link) {
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003114 bool ggtt_bound = false;
3115 struct i915_vma *vma;
3116
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003117 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003118 if (vma->vm != &ggtt->base)
Tvrtko Ursulin2c3d9982015-07-06 15:15:01 +01003119 continue;
Daniel Vetterfa423312015-04-14 17:35:23 +02003120
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003121 if (!i915_vma_unbind(vma))
3122 continue;
3123
Tvrtko Ursulin2c3d9982015-07-06 15:15:01 +01003124 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3125 PIN_UPDATE));
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003126 ggtt_bound = true;
Tvrtko Ursulin2c3d9982015-07-06 15:15:01 +01003127 }
3128
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003129 if (ggtt_bound)
Chris Wilson975f7ff2016-05-14 07:26:34 +01003130 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
Daniel Vetterfa423312015-04-14 17:35:23 +02003131 }
3132
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003133 ggtt->base.closed = false;
3134
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00003135 if (INTEL_GEN(dev_priv) >= 8) {
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02003136 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
Daniel Vetterfa423312015-04-14 17:35:23 +02003137 chv_setup_private_ppat(dev_priv);
3138 else
3139 bdw_setup_private_ppat(dev_priv);
3140
3141 return;
3142 }
3143
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00003144 if (USES_PPGTT(dev_priv)) {
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003145 struct i915_address_space *vm;
3146
Daniel Vetterfa423312015-04-14 17:35:23 +02003147 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
Joonas Lahtinene5716f52016-04-07 11:08:03 +03003148 struct i915_hw_ppgtt *ppgtt;
Daniel Vetterfa423312015-04-14 17:35:23 +02003149
Chris Wilson2bfa9962016-08-04 07:52:25 +01003150 if (i915_is_ggtt(vm))
Daniel Vetterfa423312015-04-14 17:35:23 +02003151 ppgtt = dev_priv->mm.aliasing_ppgtt;
Joonas Lahtinene5716f52016-04-07 11:08:03 +03003152 else
3153 ppgtt = i915_vm_to_ppgtt(vm);
Daniel Vetterfa423312015-04-14 17:35:23 +02003154
Chris Wilson16a011c2017-02-15 08:43:45 +00003155 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
Daniel Vetterfa423312015-04-14 17:35:23 +02003156 }
3157 }
3158
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003159 i915_ggtt_invalidate(dev_priv);
Daniel Vetterfa423312015-04-14 17:35:23 +02003160}
3161
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003162static struct scatterlist *
Ville Syrjälä2d7f3bd2016-01-14 15:22:11 +02003163rotate_pages(const dma_addr_t *in, unsigned int offset,
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003164 unsigned int width, unsigned int height,
Ville Syrjälä87130252016-01-20 21:05:23 +02003165 unsigned int stride,
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003166 struct sg_table *st, struct scatterlist *sg)
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003167{
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003168 unsigned int column, row;
3169 unsigned int src_idx;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003170
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003171 for (column = 0; column < width; column++) {
Ville Syrjälä87130252016-01-20 21:05:23 +02003172 src_idx = stride * (height - 1) + column;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003173 for (row = 0; row < height; row++) {
3174 st->nents++;
3175 /* We don't need the pages, but need to initialize
3176 * the entries so the sg list can be happily traversed.
3177 * The only thing we need are DMA addresses.
3178 */
3179 sg_set_page(sg, NULL, PAGE_SIZE, 0);
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003180 sg_dma_address(sg) = in[offset + src_idx];
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003181 sg_dma_len(sg) = PAGE_SIZE;
3182 sg = sg_next(sg);
Ville Syrjälä87130252016-01-20 21:05:23 +02003183 src_idx -= stride;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003184 }
3185 }
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003186
3187 return sg;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003188}
3189
Chris Wilsonba7a5742017-02-15 08:43:35 +00003190static noinline struct sg_table *
3191intel_rotate_pages(struct intel_rotation_info *rot_info,
3192 struct drm_i915_gem_object *obj)
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003193{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00003194 const unsigned long n_pages = obj->base.size / PAGE_SIZE;
Ville Syrjälä6687c902015-09-15 13:16:41 +03003195 unsigned int size = intel_rotation_info_size(rot_info);
Dave Gordon85d12252016-05-20 11:54:06 +01003196 struct sgt_iter sgt_iter;
3197 dma_addr_t dma_addr;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003198 unsigned long i;
3199 dma_addr_t *page_addr_list;
3200 struct sg_table *st;
Tvrtko Ursulin89e3e142015-09-21 10:45:34 +01003201 struct scatterlist *sg;
Tvrtko Ursulin1d00dad2015-03-25 10:15:26 +00003202 int ret = -ENOMEM;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003203
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003204 /* Allocate a temporary list of source pages for random access. */
Michal Hocko20981052017-05-17 14:23:12 +02003205 page_addr_list = kvmalloc_array(n_pages,
Chris Wilsonf2a85e12016-04-08 12:11:13 +01003206 sizeof(dma_addr_t),
3207 GFP_TEMPORARY);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003208 if (!page_addr_list)
3209 return ERR_PTR(ret);
3210
3211 /* Allocate target SG list. */
3212 st = kmalloc(sizeof(*st), GFP_KERNEL);
3213 if (!st)
3214 goto err_st_alloc;
3215
Ville Syrjälä6687c902015-09-15 13:16:41 +03003216 ret = sg_alloc_table(st, size, GFP_KERNEL);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003217 if (ret)
3218 goto err_sg_alloc;
3219
3220 /* Populate source page list from the object. */
3221 i = 0;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003222 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
Dave Gordon85d12252016-05-20 11:54:06 +01003223 page_addr_list[i++] = dma_addr;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003224
Dave Gordon85d12252016-05-20 11:54:06 +01003225 GEM_BUG_ON(i != n_pages);
Ville Syrjälä11f20322016-02-15 22:54:46 +02003226 st->nents = 0;
3227 sg = st->sgl;
3228
Ville Syrjälä6687c902015-09-15 13:16:41 +03003229 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3230 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3231 rot_info->plane[i].width, rot_info->plane[i].height,
3232 rot_info->plane[i].stride, st, sg);
Tvrtko Ursulin89e3e142015-09-21 10:45:34 +01003233 }
3234
Ville Syrjälä6687c902015-09-15 13:16:41 +03003235 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3236 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003237
Michal Hocko20981052017-05-17 14:23:12 +02003238 kvfree(page_addr_list);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003239
3240 return st;
3241
3242err_sg_alloc:
3243 kfree(st);
3244err_st_alloc:
Michal Hocko20981052017-05-17 14:23:12 +02003245 kvfree(page_addr_list);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003246
Ville Syrjälä6687c902015-09-15 13:16:41 +03003247 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3248 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3249
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003250 return ERR_PTR(ret);
3251}
3252
Chris Wilsonba7a5742017-02-15 08:43:35 +00003253static noinline struct sg_table *
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003254intel_partial_pages(const struct i915_ggtt_view *view,
3255 struct drm_i915_gem_object *obj)
3256{
3257 struct sg_table *st;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003258 struct scatterlist *sg, *iter;
Chris Wilson8bab11932017-01-14 00:28:25 +00003259 unsigned int count = view->partial.size;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003260 unsigned int offset;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003261 int ret = -ENOMEM;
3262
3263 st = kmalloc(sizeof(*st), GFP_KERNEL);
3264 if (!st)
3265 goto err_st_alloc;
3266
Chris Wilsond2a84a72016-10-28 13:58:34 +01003267 ret = sg_alloc_table(st, count, GFP_KERNEL);
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003268 if (ret)
3269 goto err_sg_alloc;
3270
Chris Wilson8bab11932017-01-14 00:28:25 +00003271 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
Chris Wilsond2a84a72016-10-28 13:58:34 +01003272 GEM_BUG_ON(!iter);
3273
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003274 sg = st->sgl;
3275 st->nents = 0;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003276 do {
3277 unsigned int len;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003278
Chris Wilsond2a84a72016-10-28 13:58:34 +01003279 len = min(iter->length - (offset << PAGE_SHIFT),
3280 count << PAGE_SHIFT);
3281 sg_set_page(sg, NULL, len, 0);
3282 sg_dma_address(sg) =
3283 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3284 sg_dma_len(sg) = len;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003285
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003286 st->nents++;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003287 count -= len >> PAGE_SHIFT;
3288 if (count == 0) {
3289 sg_mark_end(sg);
3290 return st;
3291 }
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003292
Chris Wilsond2a84a72016-10-28 13:58:34 +01003293 sg = __sg_next(sg);
3294 iter = __sg_next(iter);
3295 offset = 0;
3296 } while (1);
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003297
3298err_sg_alloc:
3299 kfree(st);
3300err_st_alloc:
3301 return ERR_PTR(ret);
3302}
3303
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02003304static int
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003305i915_get_ggtt_vma_pages(struct i915_vma *vma)
3306{
Chris Wilsonba7a5742017-02-15 08:43:35 +00003307 int ret;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003308
Chris Wilson2c3a3f42016-11-04 10:30:01 +00003309 /* The vma->pages are only valid within the lifespan of the borrowed
3310 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3311 * must be the vma->pages. A simple rule is that vma->pages must only
3312 * be accessed when the obj->mm.pages are pinned.
3313 */
3314 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3315
Chris Wilsonba7a5742017-02-15 08:43:35 +00003316 switch (vma->ggtt_view.type) {
3317 case I915_GGTT_VIEW_NORMAL:
3318 vma->pages = vma->obj->mm.pages;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003319 return 0;
3320
Chris Wilsonba7a5742017-02-15 08:43:35 +00003321 case I915_GGTT_VIEW_ROTATED:
Chris Wilson247177d2016-08-15 10:48:47 +01003322 vma->pages =
Chris Wilsonba7a5742017-02-15 08:43:35 +00003323 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3324 break;
3325
3326 case I915_GGTT_VIEW_PARTIAL:
Chris Wilson247177d2016-08-15 10:48:47 +01003327 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
Chris Wilsonba7a5742017-02-15 08:43:35 +00003328 break;
3329
3330 default:
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003331 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3332 vma->ggtt_view.type);
Chris Wilsonba7a5742017-02-15 08:43:35 +00003333 return -EINVAL;
3334 }
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003335
Chris Wilsonba7a5742017-02-15 08:43:35 +00003336 ret = 0;
3337 if (unlikely(IS_ERR(vma->pages))) {
Chris Wilson247177d2016-08-15 10:48:47 +01003338 ret = PTR_ERR(vma->pages);
3339 vma->pages = NULL;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003340 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3341 vma->ggtt_view.type, ret);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003342 }
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003343 return ret;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003344}
3345
Chris Wilsone007b192017-01-11 11:23:10 +00003346/**
Chris Wilson625d9882017-01-11 11:23:11 +00003347 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
Chris Wilsona4dbf7c2017-01-12 16:45:59 +00003348 * @vm: the &struct i915_address_space
3349 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3350 * @size: how much space to allocate inside the GTT,
3351 * must be #I915_GTT_PAGE_SIZE aligned
3352 * @offset: where to insert inside the GTT,
3353 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3354 * (@offset + @size) must fit within the address space
3355 * @color: color to apply to node, if this node is not from a VMA,
3356 * color must be #I915_COLOR_UNEVICTABLE
3357 * @flags: control search and eviction behaviour
Chris Wilson625d9882017-01-11 11:23:11 +00003358 *
3359 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3360 * the address space (using @size and @color). If the @node does not fit, it
3361 * tries to evict any overlapping nodes from the GTT, including any
3362 * neighbouring nodes if the colors do not match (to ensure guard pages between
3363 * differing domains). See i915_gem_evict_for_node() for the gory details
3364 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3365 * evicting active overlapping objects, and any overlapping node that is pinned
3366 * or marked as unevictable will also result in failure.
3367 *
3368 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3369 * asked to wait for eviction and interrupted.
3370 */
3371int i915_gem_gtt_reserve(struct i915_address_space *vm,
3372 struct drm_mm_node *node,
3373 u64 size, u64 offset, unsigned long color,
3374 unsigned int flags)
3375{
3376 int err;
3377
3378 GEM_BUG_ON(!size);
3379 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3380 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3381 GEM_BUG_ON(range_overflows(offset, size, vm->total));
Chris Wilson3fec7ec2017-01-15 13:47:46 +00003382 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
Chris Wilson9734ad12017-01-15 17:27:40 +00003383 GEM_BUG_ON(drm_mm_node_allocated(node));
Chris Wilson625d9882017-01-11 11:23:11 +00003384
3385 node->size = size;
3386 node->start = offset;
3387 node->color = color;
3388
3389 err = drm_mm_reserve_node(&vm->mm, node);
3390 if (err != -ENOSPC)
3391 return err;
3392
Chris Wilson616d9ce2017-06-16 15:05:21 +01003393 if (flags & PIN_NOEVICT)
3394 return -ENOSPC;
3395
Chris Wilson625d9882017-01-11 11:23:11 +00003396 err = i915_gem_evict_for_node(vm, node, flags);
3397 if (err == 0)
3398 err = drm_mm_reserve_node(&vm->mm, node);
3399
3400 return err;
3401}
3402
Chris Wilson606fec92017-01-11 11:23:12 +00003403static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3404{
3405 u64 range, addr;
3406
3407 GEM_BUG_ON(range_overflows(start, len, end));
3408 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3409
3410 range = round_down(end - len, align) - round_up(start, align);
3411 if (range) {
3412 if (sizeof(unsigned long) == sizeof(u64)) {
3413 addr = get_random_long();
3414 } else {
3415 addr = get_random_int();
3416 if (range > U32_MAX) {
3417 addr <<= 32;
3418 addr |= get_random_int();
3419 }
3420 }
3421 div64_u64_rem(addr, range, &addr);
3422 start += addr;
3423 }
3424
3425 return round_up(start, align);
3426}
3427
Chris Wilson625d9882017-01-11 11:23:11 +00003428/**
Chris Wilsone007b192017-01-11 11:23:10 +00003429 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
Chris Wilsona4dbf7c2017-01-12 16:45:59 +00003430 * @vm: the &struct i915_address_space
3431 * @node: the &struct drm_mm_node (typically i915_vma.node)
3432 * @size: how much space to allocate inside the GTT,
3433 * must be #I915_GTT_PAGE_SIZE aligned
3434 * @alignment: required alignment of starting offset, may be 0 but
3435 * if specified, this must be a power-of-two and at least
3436 * #I915_GTT_MIN_ALIGNMENT
3437 * @color: color to apply to node
3438 * @start: start of any range restriction inside GTT (0 for all),
Chris Wilsone007b192017-01-11 11:23:10 +00003439 * must be #I915_GTT_PAGE_SIZE aligned
Chris Wilsona4dbf7c2017-01-12 16:45:59 +00003440 * @end: end of any range restriction inside GTT (U64_MAX for all),
3441 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3442 * @flags: control search and eviction behaviour
Chris Wilsone007b192017-01-11 11:23:10 +00003443 *
3444 * i915_gem_gtt_insert() first searches for an available hole into which
3445 * is can insert the node. The hole address is aligned to @alignment and
3446 * its @size must then fit entirely within the [@start, @end] bounds. The
3447 * nodes on either side of the hole must match @color, or else a guard page
3448 * will be inserted between the two nodes (or the node evicted). If no
Chris Wilson606fec92017-01-11 11:23:12 +00003449 * suitable hole is found, first a victim is randomly selected and tested
3450 * for eviction, otherwise then the LRU list of objects within the GTT
Chris Wilsone007b192017-01-11 11:23:10 +00003451 * is scanned to find the first set of replacement nodes to create the hole.
3452 * Those old overlapping nodes are evicted from the GTT (and so must be
3453 * rebound before any future use). Any node that is currently pinned cannot
3454 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3455 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3456 * searching for an eviction candidate. See i915_gem_evict_something() for
3457 * the gory details on the eviction algorithm.
3458 *
3459 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3460 * asked to wait for eviction and interrupted.
3461 */
3462int i915_gem_gtt_insert(struct i915_address_space *vm,
3463 struct drm_mm_node *node,
3464 u64 size, u64 alignment, unsigned long color,
3465 u64 start, u64 end, unsigned int flags)
3466{
Chris Wilson4e64e552017-02-02 21:04:38 +00003467 enum drm_mm_insert_mode mode;
Chris Wilson606fec92017-01-11 11:23:12 +00003468 u64 offset;
Chris Wilsone007b192017-01-11 11:23:10 +00003469 int err;
3470
3471 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3472 GEM_BUG_ON(!size);
3473 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3474 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3475 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3476 GEM_BUG_ON(start >= end);
3477 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3478 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Chris Wilson3fec7ec2017-01-15 13:47:46 +00003479 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
Chris Wilson9734ad12017-01-15 17:27:40 +00003480 GEM_BUG_ON(drm_mm_node_allocated(node));
Chris Wilsone007b192017-01-11 11:23:10 +00003481
3482 if (unlikely(range_overflows(start, size, end)))
3483 return -ENOSPC;
3484
3485 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3486 return -ENOSPC;
3487
Chris Wilson4e64e552017-02-02 21:04:38 +00003488 mode = DRM_MM_INSERT_BEST;
3489 if (flags & PIN_HIGH)
3490 mode = DRM_MM_INSERT_HIGH;
3491 if (flags & PIN_MAPPABLE)
3492 mode = DRM_MM_INSERT_LOW;
Chris Wilsone007b192017-01-11 11:23:10 +00003493
3494 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3495 * so we know that we always have a minimum alignment of 4096.
3496 * The drm_mm range manager is optimised to return results
3497 * with zero alignment, so where possible use the optimal
3498 * path.
3499 */
3500 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3501 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3502 alignment = 0;
3503
Chris Wilson4e64e552017-02-02 21:04:38 +00003504 err = drm_mm_insert_node_in_range(&vm->mm, node,
3505 size, alignment, color,
3506 start, end, mode);
Chris Wilsone007b192017-01-11 11:23:10 +00003507 if (err != -ENOSPC)
3508 return err;
3509
Chris Wilson616d9ce2017-06-16 15:05:21 +01003510 if (flags & PIN_NOEVICT)
3511 return -ENOSPC;
3512
Chris Wilson606fec92017-01-11 11:23:12 +00003513 /* No free space, pick a slot at random.
3514 *
3515 * There is a pathological case here using a GTT shared between
3516 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3517 *
3518 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3519 * (64k objects) (448k objects)
3520 *
3521 * Now imagine that the eviction LRU is ordered top-down (just because
3522 * pathology meets real life), and that we need to evict an object to
3523 * make room inside the aperture. The eviction scan then has to walk
3524 * the 448k list before it finds one within range. And now imagine that
3525 * it has to search for a new hole between every byte inside the memcpy,
3526 * for several simultaneous clients.
3527 *
3528 * On a full-ppgtt system, if we have run out of available space, there
3529 * will be lots and lots of objects in the eviction list! Again,
3530 * searching that LRU list may be slow if we are also applying any
3531 * range restrictions (e.g. restriction to low 4GiB) and so, for
3532 * simplicity and similarilty between different GTT, try the single
3533 * random replacement first.
3534 */
3535 offset = random_offset(start, end,
3536 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3537 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3538 if (err != -ENOSPC)
3539 return err;
3540
3541 /* Randomly selected placement is pinned, do a search */
Chris Wilsone007b192017-01-11 11:23:10 +00003542 err = i915_gem_evict_something(vm, size, alignment, color,
3543 start, end, flags);
3544 if (err)
3545 return err;
3546
Chris Wilson4e64e552017-02-02 21:04:38 +00003547 return drm_mm_insert_node_in_range(&vm->mm, node,
3548 size, alignment, color,
3549 start, end, DRM_MM_INSERT_EVICT);
Chris Wilsone007b192017-01-11 11:23:10 +00003550}
Chris Wilson3b5bb0a2017-02-13 17:15:18 +00003551
3552#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3553#include "selftests/mock_gtt.c"
Chris Wilson1c428192017-02-13 17:15:38 +00003554#include "selftests/i915_gem_gtt.c"
Chris Wilson3b5bb0a2017-02-13 17:15:18 +00003555#endif