blob: ecb5e8cd37ba9ba4d2bfce40955d45680f202fb6 [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
Ben Widawskyc4ac5242014-02-19 22:05:47 -08003 * Copyright © 2011-2014 Intel Corporation
Daniel Vetter76aaf222010-11-05 22:23:30 +01004 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
Chris Wilsonaae4a3d2017-02-13 17:15:44 +000026#include <linux/slab.h> /* fault-inject.h is not standalone! */
27
28#include <linux/fault-inject.h>
Chris Wilsone007b192017-01-11 11:23:10 +000029#include <linux/log2.h>
Chris Wilson606fec92017-01-11 11:23:12 +000030#include <linux/random.h>
Daniel Vetter0e46ce22014-01-08 16:10:27 +010031#include <linux/seq_file.h>
Chris Wilson5bab6f62015-10-23 18:43:32 +010032#include <linux/stop_machine.h>
Chris Wilsone007b192017-01-11 11:23:10 +000033
Laura Abbotted3ba072017-05-08 15:58:17 -070034#include <asm/set_memory.h>
35
David Howells760285e2012-10-02 18:01:07 +010036#include <drm/drmP.h>
37#include <drm/i915_drm.h>
Chris Wilsone007b192017-01-11 11:23:10 +000038
Daniel Vetter76aaf222010-11-05 22:23:30 +010039#include "i915_drv.h"
Yu Zhang5dda8fa2015-02-10 19:05:48 +080040#include "i915_vgpu.h"
Daniel Vetter76aaf222010-11-05 22:23:30 +010041#include "i915_trace.h"
42#include "intel_drv.h"
Chris Wilsond07f0e52016-10-28 13:58:44 +010043#include "intel_frontbuffer.h"
Daniel Vetter76aaf222010-11-05 22:23:30 +010044
Chris Wilsonbb8f9cf2016-08-22 08:44:31 +010045#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
46
Tvrtko Ursulin45f8f692014-12-10 17:27:59 +000047/**
48 * DOC: Global GTT views
49 *
50 * Background and previous state
51 *
52 * Historically objects could exists (be bound) in global GTT space only as
53 * singular instances with a view representing all of the object's backing pages
54 * in a linear fashion. This view will be called a normal view.
55 *
56 * To support multiple views of the same object, where the number of mapped
57 * pages is not equal to the backing store, or where the layout of the pages
58 * is not linear, concept of a GGTT view was added.
59 *
60 * One example of an alternative view is a stereo display driven by a single
61 * image. In this case we would have a framebuffer looking like this
62 * (2x2 pages):
63 *
64 * 12
65 * 34
66 *
67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68 * rendering. In contrast, fed to the display engine would be an alternative
69 * view which could look something like this:
70 *
71 * 1212
72 * 3434
73 *
74 * In this example both the size and layout of pages in the alternative view is
75 * different from the normal view.
76 *
77 * Implementation and usage
78 *
79 * GGTT views are implemented using VMAs and are distinguished via enum
80 * i915_ggtt_view_type and struct i915_ggtt_view.
81 *
82 * A new flavour of core GEM functions which work with GGTT bound objects were
Joonas Lahtinenec7adb62015-03-16 14:11:13 +020083 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84 * renaming in large amounts of code. They take the struct i915_ggtt_view
85 * parameter encapsulating all metadata required to implement a view.
Tvrtko Ursulin45f8f692014-12-10 17:27:59 +000086 *
87 * As a helper for callers which are only interested in the normal view,
88 * globally const i915_ggtt_view_normal singleton instance exists. All old core
89 * GEM API functions, the ones not taking the view parameter, are operating on,
90 * or with the normal GGTT view.
91 *
92 * Code wanting to add or use a new GGTT view needs to:
93 *
94 * 1. Add a new enum with a suitable name.
95 * 2. Extend the metadata in the i915_ggtt_view structure if required.
96 * 3. Add support to i915_get_vma_pages().
97 *
98 * New views are required to build a scatter-gather table from within the
99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100 * exists for the lifetime of an VMA.
101 *
102 * Core API is designed to have copy semantics which means that passed in
103 * struct i915_ggtt_view does not need to be persistent (left around after
104 * calling the core API functions).
105 *
106 */
107
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200108static int
109i915_get_ggtt_vma_pages(struct i915_vma *vma);
110
Chris Wilson7c3f86b2017-01-12 11:00:49 +0000111static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
112{
113 /* Note that as an uncached mmio write, this should flush the
114 * WCB of the writes into the GGTT before it triggers the invalidate.
115 */
116 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
117}
118
119static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
120{
121 gen6_ggtt_invalidate(dev_priv);
122 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
123}
124
125static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
126{
127 intel_gtt_chipset_flush();
128}
129
130static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
131{
132 i915->ggtt.invalidate(i915);
133}
134
Chris Wilsonc0336662016-05-06 15:40:21 +0100135int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
136 int enable_ppgtt)
Daniel Vettercfa7c862014-04-29 11:53:58 +0200137{
Chris Wilson1893a712014-09-19 11:56:27 +0100138 bool has_aliasing_ppgtt;
139 bool has_full_ppgtt;
Michel Thierry1f9a99e2015-09-30 15:36:19 +0100140 bool has_full_48bit_ppgtt;
Chris Wilson1893a712014-09-19 11:56:27 +0100141
Michel Thierry9e1d0e62016-12-05 17:57:03 -0800142 has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
143 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
144 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
Chris Wilson1893a712014-09-19 11:56:27 +0100145
Zhi Wange320d402016-09-06 12:04:12 +0800146 if (intel_vgpu_active(dev_priv)) {
Tina Zhang8a4ab662017-08-14 15:20:46 +0800147 /* GVT-g has no support for 32bit ppgtt */
Zhi Wange320d402016-09-06 12:04:12 +0800148 has_full_ppgtt = false;
Tina Zhang8a4ab662017-08-14 15:20:46 +0800149 has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
Zhi Wange320d402016-09-06 12:04:12 +0800150 }
Yu Zhang71ba2d62015-02-10 19:05:54 +0800151
Chris Wilson0e4ca102016-04-29 13:18:22 +0100152 if (!has_aliasing_ppgtt)
153 return 0;
154
Damien Lespiau70ee45e2014-11-14 15:05:59 +0000155 /*
156 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
157 * execlists, the sole mechanism available to submit work.
158 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100159 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
Daniel Vettercfa7c862014-04-29 11:53:58 +0200160 return 0;
161
162 if (enable_ppgtt == 1)
163 return 1;
164
Chris Wilson1893a712014-09-19 11:56:27 +0100165 if (enable_ppgtt == 2 && has_full_ppgtt)
Daniel Vettercfa7c862014-04-29 11:53:58 +0200166 return 2;
167
Michel Thierry1f9a99e2015-09-30 15:36:19 +0100168 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
169 return 3;
170
Daniel Vetter93a25a92014-03-06 09:40:43 +0100171 /* Disable ppgtt on SNB if VT-d is on. */
Chris Wilson80debff2017-05-25 13:16:12 +0100172 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
Daniel Vetter93a25a92014-03-06 09:40:43 +0100173 DRM_INFO("Disabling PPGTT because VT-d is on\n");
Daniel Vettercfa7c862014-04-29 11:53:58 +0200174 return 0;
Daniel Vetter93a25a92014-03-06 09:40:43 +0100175 }
Daniel Vetter93a25a92014-03-06 09:40:43 +0100176
Jesse Barnes62942ed2014-06-13 09:28:33 -0700177 /* Early VLV doesn't have this */
Chris Wilson91c8a322016-07-05 10:40:23 +0100178 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
Jesse Barnes62942ed2014-06-13 09:28:33 -0700179 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
180 return 0;
181 }
182
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000183 if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
Joonas Lahtinen4fc05062017-08-11 12:51:26 +0300184 if (has_full_48bit_ppgtt)
185 return 3;
186
187 if (has_full_ppgtt)
188 return 2;
189 }
190
191 return has_aliasing_ppgtt ? 1 : 0;
Daniel Vetter93a25a92014-03-06 09:40:43 +0100192}
193
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200194static int ppgtt_bind_vma(struct i915_vma *vma,
195 enum i915_cache_level cache_level,
196 u32 unused)
Daniel Vetter47552652015-04-14 17:35:24 +0200197{
Chris Wilsonff685972017-02-15 08:43:42 +0000198 u32 pte_flags;
199 int ret;
200
Matthew Auld1f234752017-05-12 10:14:23 +0100201 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
202 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
203 vma->size);
204 if (ret)
205 return ret;
206 }
Daniel Vetter47552652015-04-14 17:35:24 +0200207
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100208 vma->pages = vma->obj->mm.pages;
Chris Wilson247177d2016-08-15 10:48:47 +0100209
Daniel Vetter47552652015-04-14 17:35:24 +0200210 /* Currently applicable only to VLV */
Chris Wilsonff685972017-02-15 08:43:42 +0000211 pte_flags = 0;
Daniel Vetter47552652015-04-14 17:35:24 +0200212 if (vma->obj->gt_ro)
213 pte_flags |= PTE_READ_ONLY;
214
Matthew Auld4a234c52017-06-22 10:58:36 +0100215 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200216
217 return 0;
Daniel Vetter47552652015-04-14 17:35:24 +0200218}
219
220static void ppgtt_unbind_vma(struct i915_vma *vma)
221{
Chris Wilsonff685972017-02-15 08:43:42 +0000222 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
Daniel Vetter47552652015-04-14 17:35:24 +0200223}
Ben Widawsky6f65e292013-12-06 14:10:56 -0800224
Daniel Vetter2c642b02015-04-14 17:35:26 +0200225static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200226 enum i915_cache_level level)
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700227{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200228 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700229 pte |= addr;
Ben Widawsky63c42e52014-04-18 18:04:27 -0300230
231 switch (level) {
232 case I915_CACHE_NONE:
Zhi Wangc095b972017-09-14 20:39:41 +0800233 pte |= PPAT_UNCACHED;
Ben Widawsky63c42e52014-04-18 18:04:27 -0300234 break;
235 case I915_CACHE_WT:
Zhi Wangc095b972017-09-14 20:39:41 +0800236 pte |= PPAT_DISPLAY_ELLC;
Ben Widawsky63c42e52014-04-18 18:04:27 -0300237 break;
238 default:
Zhi Wangc095b972017-09-14 20:39:41 +0800239 pte |= PPAT_CACHED;
Ben Widawsky63c42e52014-04-18 18:04:27 -0300240 break;
241 }
242
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700243 return pte;
244}
245
Mika Kuoppalafe36f552015-06-25 18:35:16 +0300246static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
247 const enum i915_cache_level level)
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800248{
Michel Thierry07749ef2015-03-16 16:00:54 +0000249 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800250 pde |= addr;
251 if (level != I915_CACHE_NONE)
Zhi Wangc095b972017-09-14 20:39:41 +0800252 pde |= PPAT_CACHED_PDE;
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800253 else
Zhi Wangc095b972017-09-14 20:39:41 +0800254 pde |= PPAT_UNCACHED;
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800255 return pde;
256}
257
Michel Thierry762d9932015-07-30 11:05:29 +0100258#define gen8_pdpe_encode gen8_pde_encode
259#define gen8_pml4e_encode gen8_pde_encode
260
Michel Thierry07749ef2015-03-16 16:00:54 +0000261static gen6_pte_t snb_pte_encode(dma_addr_t addr,
262 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200263 u32 unused)
Ben Widawsky54d12522012-09-24 16:44:32 -0700264{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200265 gen6_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky54d12522012-09-24 16:44:32 -0700266 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -0700267
268 switch (level) {
Chris Wilson350ec882013-08-06 13:17:02 +0100269 case I915_CACHE_L3_LLC:
270 case I915_CACHE_LLC:
271 pte |= GEN6_PTE_CACHE_LLC;
272 break;
273 case I915_CACHE_NONE:
274 pte |= GEN6_PTE_UNCACHED;
275 break;
276 default:
Daniel Vetter5f77eeb2014-12-08 16:40:10 +0100277 MISSING_CASE(level);
Chris Wilson350ec882013-08-06 13:17:02 +0100278 }
279
280 return pte;
281}
282
Michel Thierry07749ef2015-03-16 16:00:54 +0000283static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
284 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200285 u32 unused)
Chris Wilson350ec882013-08-06 13:17:02 +0100286{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200287 gen6_pte_t pte = GEN6_PTE_VALID;
Chris Wilson350ec882013-08-06 13:17:02 +0100288 pte |= GEN6_PTE_ADDR_ENCODE(addr);
289
290 switch (level) {
291 case I915_CACHE_L3_LLC:
292 pte |= GEN7_PTE_CACHE_L3_LLC;
Ben Widawskye7210c32012-10-19 09:33:22 -0700293 break;
294 case I915_CACHE_LLC:
295 pte |= GEN6_PTE_CACHE_LLC;
296 break;
297 case I915_CACHE_NONE:
Kenneth Graunke91197082013-04-22 00:53:51 -0700298 pte |= GEN6_PTE_UNCACHED;
Ben Widawskye7210c32012-10-19 09:33:22 -0700299 break;
300 default:
Daniel Vetter5f77eeb2014-12-08 16:40:10 +0100301 MISSING_CASE(level);
Ben Widawskye7210c32012-10-19 09:33:22 -0700302 }
303
Ben Widawsky54d12522012-09-24 16:44:32 -0700304 return pte;
305}
306
Michel Thierry07749ef2015-03-16 16:00:54 +0000307static gen6_pte_t byt_pte_encode(dma_addr_t addr,
308 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200309 u32 flags)
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700310{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200311 gen6_pte_t pte = GEN6_PTE_VALID;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700312 pte |= GEN6_PTE_ADDR_ENCODE(addr);
313
Akash Goel24f3a8c2014-06-17 10:59:42 +0530314 if (!(flags & PTE_READ_ONLY))
315 pte |= BYT_PTE_WRITEABLE;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700316
317 if (level != I915_CACHE_NONE)
318 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
319
320 return pte;
321}
322
Michel Thierry07749ef2015-03-16 16:00:54 +0000323static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
324 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200325 u32 unused)
Kenneth Graunke91197082013-04-22 00:53:51 -0700326{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200327 gen6_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky0d8ff152013-07-04 11:02:03 -0700328 pte |= HSW_PTE_ADDR_ENCODE(addr);
Kenneth Graunke91197082013-04-22 00:53:51 -0700329
330 if (level != I915_CACHE_NONE)
Ben Widawsky87a6b682013-08-04 23:47:29 -0700331 pte |= HSW_WB_LLC_AGE3;
Kenneth Graunke91197082013-04-22 00:53:51 -0700332
333 return pte;
334}
335
Michel Thierry07749ef2015-03-16 16:00:54 +0000336static gen6_pte_t iris_pte_encode(dma_addr_t addr,
337 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200338 u32 unused)
Ben Widawsky4d15c142013-07-04 11:02:06 -0700339{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200340 gen6_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky4d15c142013-07-04 11:02:06 -0700341 pte |= HSW_PTE_ADDR_ENCODE(addr);
342
Chris Wilson651d7942013-08-08 14:41:10 +0100343 switch (level) {
344 case I915_CACHE_NONE:
345 break;
346 case I915_CACHE_WT:
Chris Wilsonc51e9702013-11-22 10:37:53 +0000347 pte |= HSW_WT_ELLC_LLC_AGE3;
Chris Wilson651d7942013-08-08 14:41:10 +0100348 break;
349 default:
Chris Wilsonc51e9702013-11-22 10:37:53 +0000350 pte |= HSW_WB_ELLC_LLC_AGE3;
Chris Wilson651d7942013-08-08 14:41:10 +0100351 break;
352 }
Ben Widawsky4d15c142013-07-04 11:02:06 -0700353
354 return pte;
355}
356
Chris Wilson84486612017-02-15 08:43:40 +0000357static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
Ben Widawsky678d96f2015-03-16 16:00:56 +0000358{
Chris Wilson66df1012017-08-22 18:38:28 +0100359 struct pagevec *pvec = &vm->free_pages;
Ben Widawsky678d96f2015-03-16 16:00:56 +0000360
Chris Wilson84486612017-02-15 08:43:40 +0000361 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
362 i915_gem_shrink_all(vm->i915);
Chris Wilsonaae4a3d2017-02-13 17:15:44 +0000363
Chris Wilson66df1012017-08-22 18:38:28 +0100364 if (likely(pvec->nr))
365 return pvec->pages[--pvec->nr];
Chris Wilson84486612017-02-15 08:43:40 +0000366
Chris Wilson66df1012017-08-22 18:38:28 +0100367 if (!vm->pt_kmap_wc)
368 return alloc_page(gfp);
369
370 /* A placeholder for a specific mutex to guard the WC stash */
371 lockdep_assert_held(&vm->i915->drm.struct_mutex);
372
373 /* Look in our global stash of WC pages... */
374 pvec = &vm->i915->mm.wc_stash;
375 if (likely(pvec->nr))
376 return pvec->pages[--pvec->nr];
377
378 /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
379 do {
380 struct page *page;
381
382 page = alloc_page(gfp);
383 if (unlikely(!page))
384 break;
385
386 pvec->pages[pvec->nr++] = page;
387 } while (pagevec_space(pvec));
388
389 if (unlikely(!pvec->nr))
Chris Wilson84486612017-02-15 08:43:40 +0000390 return NULL;
391
Chris Wilson66df1012017-08-22 18:38:28 +0100392 set_pages_array_wc(pvec->pages, pvec->nr);
Chris Wilson84486612017-02-15 08:43:40 +0000393
Chris Wilson66df1012017-08-22 18:38:28 +0100394 return pvec->pages[--pvec->nr];
Chris Wilson84486612017-02-15 08:43:40 +0000395}
396
Chris Wilson66df1012017-08-22 18:38:28 +0100397static void vm_free_pages_release(struct i915_address_space *vm,
398 bool immediate)
Chris Wilson84486612017-02-15 08:43:40 +0000399{
Chris Wilson66df1012017-08-22 18:38:28 +0100400 struct pagevec *pvec = &vm->free_pages;
Chris Wilson84486612017-02-15 08:43:40 +0000401
Chris Wilson66df1012017-08-22 18:38:28 +0100402 GEM_BUG_ON(!pagevec_count(pvec));
Chris Wilson84486612017-02-15 08:43:40 +0000403
Chris Wilson66df1012017-08-22 18:38:28 +0100404 if (vm->pt_kmap_wc) {
405 struct pagevec *stash = &vm->i915->mm.wc_stash;
406
407 /* When we use WC, first fill up the global stash and then
408 * only if full immediately free the overflow.
409 */
410
411 lockdep_assert_held(&vm->i915->drm.struct_mutex);
412 if (pagevec_space(stash)) {
413 do {
414 stash->pages[stash->nr++] =
415 pvec->pages[--pvec->nr];
416 if (!pvec->nr)
417 return;
418 } while (pagevec_space(stash));
419
420 /* As we have made some room in the VM's free_pages,
421 * we can wait for it to fill again. Unless we are
422 * inside i915_address_space_fini() and must
423 * immediately release the pages!
424 */
425 if (!immediate)
426 return;
427 }
428
429 set_pages_array_wb(pvec->pages, pvec->nr);
430 }
431
432 __pagevec_release(pvec);
Chris Wilson84486612017-02-15 08:43:40 +0000433}
434
435static void vm_free_page(struct i915_address_space *vm, struct page *page)
436{
437 if (!pagevec_add(&vm->free_pages, page))
Chris Wilson66df1012017-08-22 18:38:28 +0100438 vm_free_pages_release(vm, false);
Chris Wilson84486612017-02-15 08:43:40 +0000439}
440
441static int __setup_page_dma(struct i915_address_space *vm,
442 struct i915_page_dma *p,
443 gfp_t gfp)
444{
445 p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
446 if (unlikely(!p->page))
Michel Thierry1266cdb2015-03-24 17:06:33 +0000447 return -ENOMEM;
448
Chris Wilson84486612017-02-15 08:43:40 +0000449 p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
450 PCI_DMA_BIDIRECTIONAL);
451 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
452 vm_free_page(vm, p->page);
453 return -ENOMEM;
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300454 }
455
Michel Thierry1266cdb2015-03-24 17:06:33 +0000456 return 0;
Ben Widawsky678d96f2015-03-16 16:00:56 +0000457}
458
Chris Wilson84486612017-02-15 08:43:40 +0000459static int setup_page_dma(struct i915_address_space *vm,
Tvrtko Ursulin275a9912016-11-16 08:55:34 +0000460 struct i915_page_dma *p)
Mika Kuoppalac114f762015-06-25 18:35:13 +0300461{
Chris Wilson84486612017-02-15 08:43:40 +0000462 return __setup_page_dma(vm, p, I915_GFP_DMA);
Mika Kuoppalac114f762015-06-25 18:35:13 +0300463}
464
Chris Wilson84486612017-02-15 08:43:40 +0000465static void cleanup_page_dma(struct i915_address_space *vm,
Tvrtko Ursulin275a9912016-11-16 08:55:34 +0000466 struct i915_page_dma *p)
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300467{
Chris Wilson84486612017-02-15 08:43:40 +0000468 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
469 vm_free_page(vm, p->page);
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300470}
471
Chris Wilson9231da72017-02-15 08:43:41 +0000472#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300473
Chris Wilson84486612017-02-15 08:43:40 +0000474#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
475#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
476#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
477#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
Mika Kuoppala567047b2015-06-25 18:35:12 +0300478
Chris Wilson84486612017-02-15 08:43:40 +0000479static void fill_page_dma(struct i915_address_space *vm,
480 struct i915_page_dma *p,
481 const u64 val)
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300482{
Chris Wilson9231da72017-02-15 08:43:41 +0000483 u64 * const vaddr = kmap_atomic(p->page);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300484 int i;
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300485
486 for (i = 0; i < 512; i++)
487 vaddr[i] = val;
488
Chris Wilson9231da72017-02-15 08:43:41 +0000489 kunmap_atomic(vaddr);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300490}
491
Chris Wilson84486612017-02-15 08:43:40 +0000492static void fill_page_dma_32(struct i915_address_space *vm,
493 struct i915_page_dma *p,
494 const u32 v)
Mika Kuoppala73eeea52015-06-25 18:35:10 +0300495{
Chris Wilson84486612017-02-15 08:43:40 +0000496 fill_page_dma(vm, p, (u64)v << 32 | v);
Mika Kuoppala73eeea52015-06-25 18:35:10 +0300497}
498
Chris Wilson8bcdd0f72016-08-22 08:44:30 +0100499static int
Chris Wilson84486612017-02-15 08:43:40 +0000500setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300501{
Chris Wilson66df1012017-08-22 18:38:28 +0100502 struct page *page;
503 dma_addr_t addr;
504
505 page = alloc_page(gfp | __GFP_ZERO);
506 if (unlikely(!page))
507 return -ENOMEM;
508
509 addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
510 PCI_DMA_BIDIRECTIONAL);
511 if (unlikely(dma_mapping_error(vm->dma, addr))) {
512 __free_page(page);
513 return -ENOMEM;
514 }
515
516 vm->scratch_page.page = page;
517 vm->scratch_page.daddr = addr;
518 return 0;
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300519}
520
Chris Wilson84486612017-02-15 08:43:40 +0000521static void cleanup_scratch_page(struct i915_address_space *vm)
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300522{
Chris Wilson66df1012017-08-22 18:38:28 +0100523 struct i915_page_dma *p = &vm->scratch_page;
524
525 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
526 __free_page(p->page);
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300527}
528
Chris Wilson84486612017-02-15 08:43:40 +0000529static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
Ben Widawsky06fda602015-02-24 16:22:36 +0000530{
Michel Thierryec565b32015-04-08 12:13:23 +0100531 struct i915_page_table *pt;
Ben Widawsky06fda602015-02-24 16:22:36 +0000532
Chris Wilsondd196742017-02-15 08:43:46 +0000533 pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
534 if (unlikely(!pt))
Ben Widawsky06fda602015-02-24 16:22:36 +0000535 return ERR_PTR(-ENOMEM);
536
Chris Wilsondd196742017-02-15 08:43:46 +0000537 if (unlikely(setup_px(vm, pt))) {
538 kfree(pt);
539 return ERR_PTR(-ENOMEM);
540 }
Ben Widawsky678d96f2015-03-16 16:00:56 +0000541
Chris Wilsondd196742017-02-15 08:43:46 +0000542 pt->used_ptes = 0;
Ben Widawsky06fda602015-02-24 16:22:36 +0000543 return pt;
544}
545
Chris Wilson84486612017-02-15 08:43:40 +0000546static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
Ben Widawsky06fda602015-02-24 16:22:36 +0000547{
Chris Wilson84486612017-02-15 08:43:40 +0000548 cleanup_px(vm, pt);
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300549 kfree(pt);
550}
551
552static void gen8_initialize_pt(struct i915_address_space *vm,
553 struct i915_page_table *pt)
554{
Chris Wilsondd196742017-02-15 08:43:46 +0000555 fill_px(vm, pt,
556 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300557}
558
559static void gen6_initialize_pt(struct i915_address_space *vm,
560 struct i915_page_table *pt)
561{
Chris Wilsondd196742017-02-15 08:43:46 +0000562 fill32_px(vm, pt,
563 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
Ben Widawsky06fda602015-02-24 16:22:36 +0000564}
565
Chris Wilson84486612017-02-15 08:43:40 +0000566static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
Ben Widawsky06fda602015-02-24 16:22:36 +0000567{
Michel Thierryec565b32015-04-08 12:13:23 +0100568 struct i915_page_directory *pd;
Ben Widawsky06fda602015-02-24 16:22:36 +0000569
Chris Wilsonfe52e372017-02-15 08:43:47 +0000570 pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
571 if (unlikely(!pd))
Ben Widawsky06fda602015-02-24 16:22:36 +0000572 return ERR_PTR(-ENOMEM);
573
Chris Wilsonfe52e372017-02-15 08:43:47 +0000574 if (unlikely(setup_px(vm, pd))) {
575 kfree(pd);
576 return ERR_PTR(-ENOMEM);
577 }
Michel Thierry33c88192015-04-08 12:13:33 +0100578
Chris Wilsonfe52e372017-02-15 08:43:47 +0000579 pd->used_pdes = 0;
Ben Widawsky06fda602015-02-24 16:22:36 +0000580 return pd;
581}
582
Chris Wilson84486612017-02-15 08:43:40 +0000583static void free_pd(struct i915_address_space *vm,
Tvrtko Ursulin275a9912016-11-16 08:55:34 +0000584 struct i915_page_directory *pd)
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300585{
Chris Wilsonfe52e372017-02-15 08:43:47 +0000586 cleanup_px(vm, pd);
587 kfree(pd);
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300588}
589
590static void gen8_initialize_pd(struct i915_address_space *vm,
591 struct i915_page_directory *pd)
592{
Chris Wilsondd196742017-02-15 08:43:46 +0000593 unsigned int i;
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300594
Chris Wilsondd196742017-02-15 08:43:46 +0000595 fill_px(vm, pd,
596 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
597 for (i = 0; i < I915_PDES; i++)
598 pd->page_table[i] = vm->scratch_pt;
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300599}
600
Chris Wilsonfe52e372017-02-15 08:43:47 +0000601static int __pdp_init(struct i915_address_space *vm,
Michel Thierry6ac18502015-07-29 17:23:46 +0100602 struct i915_page_directory_pointer *pdp)
603{
Mika Kuoppala3e490042017-02-28 17:28:07 +0200604 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
Chris Wilsone2b763c2017-02-15 08:43:48 +0000605 unsigned int i;
Michel Thierry6ac18502015-07-29 17:23:46 +0100606
Chris Wilsonfe52e372017-02-15 08:43:47 +0000607 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
Chris Wilsone2b763c2017-02-15 08:43:48 +0000608 GFP_KERNEL | __GFP_NOWARN);
609 if (unlikely(!pdp->page_directory))
Michel Thierry6ac18502015-07-29 17:23:46 +0100610 return -ENOMEM;
Michel Thierry6ac18502015-07-29 17:23:46 +0100611
Chris Wilsonfe52e372017-02-15 08:43:47 +0000612 for (i = 0; i < pdpes; i++)
613 pdp->page_directory[i] = vm->scratch_pd;
614
Michel Thierry6ac18502015-07-29 17:23:46 +0100615 return 0;
616}
617
618static void __pdp_fini(struct i915_page_directory_pointer *pdp)
619{
Michel Thierry6ac18502015-07-29 17:23:46 +0100620 kfree(pdp->page_directory);
621 pdp->page_directory = NULL;
622}
623
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200624static inline bool use_4lvl(const struct i915_address_space *vm)
625{
626 return i915_vm_is_48bit(vm);
627}
628
Chris Wilson84486612017-02-15 08:43:40 +0000629static struct i915_page_directory_pointer *
630alloc_pdp(struct i915_address_space *vm)
Michel Thierry762d9932015-07-30 11:05:29 +0100631{
632 struct i915_page_directory_pointer *pdp;
633 int ret = -ENOMEM;
634
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200635 WARN_ON(!use_4lvl(vm));
Michel Thierry762d9932015-07-30 11:05:29 +0100636
637 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
638 if (!pdp)
639 return ERR_PTR(-ENOMEM);
640
Chris Wilsonfe52e372017-02-15 08:43:47 +0000641 ret = __pdp_init(vm, pdp);
Michel Thierry762d9932015-07-30 11:05:29 +0100642 if (ret)
643 goto fail_bitmap;
644
Chris Wilson84486612017-02-15 08:43:40 +0000645 ret = setup_px(vm, pdp);
Michel Thierry762d9932015-07-30 11:05:29 +0100646 if (ret)
647 goto fail_page_m;
648
649 return pdp;
650
651fail_page_m:
652 __pdp_fini(pdp);
653fail_bitmap:
654 kfree(pdp);
655
656 return ERR_PTR(ret);
657}
658
Chris Wilson84486612017-02-15 08:43:40 +0000659static void free_pdp(struct i915_address_space *vm,
Michel Thierry6ac18502015-07-29 17:23:46 +0100660 struct i915_page_directory_pointer *pdp)
661{
662 __pdp_fini(pdp);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200663
664 if (!use_4lvl(vm))
665 return;
666
667 cleanup_px(vm, pdp);
668 kfree(pdp);
Michel Thierry762d9932015-07-30 11:05:29 +0100669}
670
Michel Thierry69ab76f2015-07-29 17:23:55 +0100671static void gen8_initialize_pdp(struct i915_address_space *vm,
672 struct i915_page_directory_pointer *pdp)
673{
674 gen8_ppgtt_pdpe_t scratch_pdpe;
675
676 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
677
Chris Wilson84486612017-02-15 08:43:40 +0000678 fill_px(vm, pdp, scratch_pdpe);
Michel Thierry69ab76f2015-07-29 17:23:55 +0100679}
680
681static void gen8_initialize_pml4(struct i915_address_space *vm,
682 struct i915_pml4 *pml4)
683{
Chris Wilsone2b763c2017-02-15 08:43:48 +0000684 unsigned int i;
Michel Thierry69ab76f2015-07-29 17:23:55 +0100685
Chris Wilsone2b763c2017-02-15 08:43:48 +0000686 fill_px(vm, pml4,
687 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
688 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
689 pml4->pdps[i] = vm->scratch_pdp;
Michel Thierry6ac18502015-07-29 17:23:46 +0100690}
691
Ben Widawsky94e409c2013-11-04 22:29:36 -0800692/* Broadwell Page Directory Pointer Descriptors */
John Harrisone85b26d2015-05-29 17:43:56 +0100693static int gen8_write_pdp(struct drm_i915_gem_request *req,
Michel Thierry7cb6d7a2015-04-08 12:13:29 +0100694 unsigned entry,
695 dma_addr_t addr)
Ben Widawsky94e409c2013-11-04 22:29:36 -0800696{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000697 struct intel_engine_cs *engine = req->engine;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000698 u32 *cs;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800699
700 BUG_ON(entry >= 4);
701
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000702 cs = intel_ring_begin(req, 6);
703 if (IS_ERR(cs))
704 return PTR_ERR(cs);
Ben Widawsky94e409c2013-11-04 22:29:36 -0800705
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000706 *cs++ = MI_LOAD_REGISTER_IMM(1);
707 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
708 *cs++ = upper_32_bits(addr);
709 *cs++ = MI_LOAD_REGISTER_IMM(1);
710 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
711 *cs++ = lower_32_bits(addr);
712 intel_ring_advance(req, cs);
Ben Widawsky94e409c2013-11-04 22:29:36 -0800713
714 return 0;
715}
716
Mika Kuoppalae7167762017-02-28 17:28:10 +0200717static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
718 struct drm_i915_gem_request *req)
Ben Widawsky94e409c2013-11-04 22:29:36 -0800719{
Ben Widawskyeeb94882013-12-06 14:11:10 -0800720 int i, ret;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800721
Mika Kuoppalae7167762017-02-28 17:28:10 +0200722 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300723 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
724
John Harrisone85b26d2015-05-29 17:43:56 +0100725 ret = gen8_write_pdp(req, i, pd_daddr);
Ben Widawskyeeb94882013-12-06 14:11:10 -0800726 if (ret)
727 return ret;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800728 }
Ben Widawskyd595bd42013-11-25 09:54:32 -0800729
Ben Widawskyeeb94882013-12-06 14:11:10 -0800730 return 0;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800731}
732
Mika Kuoppalae7167762017-02-28 17:28:10 +0200733static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
734 struct drm_i915_gem_request *req)
Michel Thierry2dba3232015-07-30 11:06:23 +0100735{
736 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
737}
738
Mika Kuoppalafce93752016-10-31 17:24:46 +0200739/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
740 * the page table structures, we mark them dirty so that
741 * context switching/execlist queuing code takes extra steps
742 * to ensure that tlbs are flushed.
743 */
744static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
745{
Chris Wilson49d73912016-11-29 09:50:08 +0000746 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
Mika Kuoppalafce93752016-10-31 17:24:46 +0200747}
748
Michał Winiarski2ce51792016-10-13 14:02:42 +0200749/* Removes entries from a single page table, releasing it if it's empty.
750 * Caller can use the return value to update higher-level entries.
751 */
752static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200753 struct i915_page_table *pt,
Chris Wilsondd196742017-02-15 08:43:46 +0000754 u64 start, u64 length)
Ben Widawsky459108b2013-11-02 21:07:23 -0700755{
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200756 unsigned int num_entries = gen8_pte_count(start, length);
Mika Kuoppala37c63932016-11-01 15:27:36 +0200757 unsigned int pte = gen8_pte_index(start);
758 unsigned int pte_end = pte + num_entries;
Chris Wilson894cceb2017-02-15 08:43:37 +0000759 const gen8_pte_t scratch_pte =
760 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
761 gen8_pte_t *vaddr;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200762
Chris Wilsondd196742017-02-15 08:43:46 +0000763 GEM_BUG_ON(num_entries > pt->used_ptes);
Ben Widawsky459108b2013-11-02 21:07:23 -0700764
Chris Wilsondd196742017-02-15 08:43:46 +0000765 pt->used_ptes -= num_entries;
766 if (!pt->used_ptes)
767 return true;
Michał Winiarski2ce51792016-10-13 14:02:42 +0200768
Chris Wilson9231da72017-02-15 08:43:41 +0000769 vaddr = kmap_atomic_px(pt);
Mika Kuoppala37c63932016-11-01 15:27:36 +0200770 while (pte < pte_end)
Chris Wilson894cceb2017-02-15 08:43:37 +0000771 vaddr[pte++] = scratch_pte;
Chris Wilson9231da72017-02-15 08:43:41 +0000772 kunmap_atomic(vaddr);
Michał Winiarski2ce51792016-10-13 14:02:42 +0200773
774 return false;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200775}
776
Chris Wilsondd196742017-02-15 08:43:46 +0000777static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
778 struct i915_page_directory *pd,
779 struct i915_page_table *pt,
780 unsigned int pde)
781{
782 gen8_pde_t *vaddr;
783
784 pd->page_table[pde] = pt;
785
786 vaddr = kmap_atomic_px(pd);
787 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
788 kunmap_atomic(vaddr);
789}
790
Michał Winiarski2ce51792016-10-13 14:02:42 +0200791static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200792 struct i915_page_directory *pd,
Chris Wilsondd196742017-02-15 08:43:46 +0000793 u64 start, u64 length)
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200794{
795 struct i915_page_table *pt;
Chris Wilsondd196742017-02-15 08:43:46 +0000796 u32 pde;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200797
798 gen8_for_each_pde(pt, pd, start, length, pde) {
Chris Wilsonbf75d592017-02-27 12:26:52 +0000799 GEM_BUG_ON(pt == vm->scratch_pt);
800
Chris Wilsondd196742017-02-15 08:43:46 +0000801 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
802 continue;
Ben Widawsky06fda602015-02-24 16:22:36 +0000803
Chris Wilsondd196742017-02-15 08:43:46 +0000804 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
Chris Wilsonbf75d592017-02-27 12:26:52 +0000805 GEM_BUG_ON(!pd->used_pdes);
Chris Wilsonfe52e372017-02-15 08:43:47 +0000806 pd->used_pdes--;
Chris Wilsondd196742017-02-15 08:43:46 +0000807
808 free_pt(vm, pt);
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200809 }
Michał Winiarski2ce51792016-10-13 14:02:42 +0200810
Chris Wilsonfe52e372017-02-15 08:43:47 +0000811 return !pd->used_pdes;
812}
Michał Winiarski2ce51792016-10-13 14:02:42 +0200813
Chris Wilsonfe52e372017-02-15 08:43:47 +0000814static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
815 struct i915_page_directory_pointer *pdp,
816 struct i915_page_directory *pd,
817 unsigned int pdpe)
818{
819 gen8_ppgtt_pdpe_t *vaddr;
820
821 pdp->page_directory[pdpe] = pd;
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200822 if (!use_4lvl(vm))
Chris Wilsonfe52e372017-02-15 08:43:47 +0000823 return;
824
825 vaddr = kmap_atomic_px(pdp);
826 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
827 kunmap_atomic(vaddr);
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200828}
Ben Widawsky06fda602015-02-24 16:22:36 +0000829
Michał Winiarski2ce51792016-10-13 14:02:42 +0200830/* Removes entries from a single page dir pointer, releasing it if it's empty.
831 * Caller can use the return value to update higher-level entries
832 */
833static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200834 struct i915_page_directory_pointer *pdp,
Chris Wilsonfe52e372017-02-15 08:43:47 +0000835 u64 start, u64 length)
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200836{
837 struct i915_page_directory *pd;
Chris Wilsonfe52e372017-02-15 08:43:47 +0000838 unsigned int pdpe;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200839
840 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Chris Wilsonbf75d592017-02-27 12:26:52 +0000841 GEM_BUG_ON(pd == vm->scratch_pd);
842
Chris Wilsonfe52e372017-02-15 08:43:47 +0000843 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
844 continue;
Ben Widawsky06fda602015-02-24 16:22:36 +0000845
Chris Wilsonfe52e372017-02-15 08:43:47 +0000846 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
Chris Wilsonbf75d592017-02-27 12:26:52 +0000847 GEM_BUG_ON(!pdp->used_pdpes);
Chris Wilsone2b763c2017-02-15 08:43:48 +0000848 pdp->used_pdpes--;
Chris Wilsonfe52e372017-02-15 08:43:47 +0000849
850 free_pd(vm, pd);
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200851 }
Michał Winiarski2ce51792016-10-13 14:02:42 +0200852
Chris Wilsone2b763c2017-02-15 08:43:48 +0000853 return !pdp->used_pdpes;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200854}
Ben Widawsky459108b2013-11-02 21:07:23 -0700855
Chris Wilsonfe52e372017-02-15 08:43:47 +0000856static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
857 u64 start, u64 length)
858{
859 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
860}
861
Chris Wilsone2b763c2017-02-15 08:43:48 +0000862static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
863 struct i915_page_directory_pointer *pdp,
864 unsigned int pml4e)
865{
866 gen8_ppgtt_pml4e_t *vaddr;
867
868 pml4->pdps[pml4e] = pdp;
869
870 vaddr = kmap_atomic_px(pml4);
871 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
872 kunmap_atomic(vaddr);
873}
874
Michał Winiarski2ce51792016-10-13 14:02:42 +0200875/* Removes entries from a single pml4.
876 * This is the top-level structure in 4-level page tables used on gen8+.
877 * Empty entries are always scratch pml4e.
878 */
Chris Wilsonfe52e372017-02-15 08:43:47 +0000879static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
880 u64 start, u64 length)
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200881{
Chris Wilsonfe52e372017-02-15 08:43:47 +0000882 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
883 struct i915_pml4 *pml4 = &ppgtt->pml4;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200884 struct i915_page_directory_pointer *pdp;
Chris Wilsone2b763c2017-02-15 08:43:48 +0000885 unsigned int pml4e;
Michał Winiarski2ce51792016-10-13 14:02:42 +0200886
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200887 GEM_BUG_ON(!use_4lvl(vm));
Ben Widawsky459108b2013-11-02 21:07:23 -0700888
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200889 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Chris Wilsonbf75d592017-02-27 12:26:52 +0000890 GEM_BUG_ON(pdp == vm->scratch_pdp);
891
Chris Wilsone2b763c2017-02-15 08:43:48 +0000892 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
893 continue;
Ben Widawsky459108b2013-11-02 21:07:23 -0700894
Chris Wilsone2b763c2017-02-15 08:43:48 +0000895 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
Chris Wilsone2b763c2017-02-15 08:43:48 +0000896
897 free_pdp(vm, pdp);
Ben Widawsky459108b2013-11-02 21:07:23 -0700898 }
899}
900
Chris Wilson894cceb2017-02-15 08:43:37 +0000901struct sgt_dma {
902 struct scatterlist *sg;
903 dma_addr_t dma, max;
904};
905
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000906struct gen8_insert_pte {
907 u16 pml4e;
908 u16 pdpe;
909 u16 pde;
910 u16 pte;
911};
912
913static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
914{
915 return (struct gen8_insert_pte) {
916 gen8_pml4e_index(start),
917 gen8_pdpe_index(start),
918 gen8_pde_index(start),
919 gen8_pte_index(start),
920 };
921}
922
Chris Wilson894cceb2017-02-15 08:43:37 +0000923static __always_inline bool
924gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
Michel Thierryf9b5b782015-07-30 11:02:49 +0100925 struct i915_page_directory_pointer *pdp,
Chris Wilson894cceb2017-02-15 08:43:37 +0000926 struct sgt_dma *iter,
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000927 struct gen8_insert_pte *idx,
Michel Thierryf9b5b782015-07-30 11:02:49 +0100928 enum i915_cache_level cache_level)
929{
Chris Wilson894cceb2017-02-15 08:43:37 +0000930 struct i915_page_directory *pd;
931 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
932 gen8_pte_t *vaddr;
933 bool ret;
Ben Widawsky9df15b42013-11-02 21:07:24 -0700934
Mika Kuoppala3e490042017-02-28 17:28:07 +0200935 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000936 pd = pdp->page_directory[idx->pdpe];
937 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
Chris Wilson894cceb2017-02-15 08:43:37 +0000938 do {
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000939 vaddr[idx->pte] = pte_encode | iter->dma;
940
Chris Wilson894cceb2017-02-15 08:43:37 +0000941 iter->dma += PAGE_SIZE;
942 if (iter->dma >= iter->max) {
943 iter->sg = __sg_next(iter->sg);
944 if (!iter->sg) {
945 ret = false;
946 break;
947 }
Ben Widawsky9df15b42013-11-02 21:07:24 -0700948
Chris Wilson894cceb2017-02-15 08:43:37 +0000949 iter->dma = sg_dma_address(iter->sg);
950 iter->max = iter->dma + iter->sg->length;
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000951 }
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800952
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000953 if (++idx->pte == GEN8_PTES) {
954 idx->pte = 0;
955
956 if (++idx->pde == I915_PDES) {
957 idx->pde = 0;
958
Chris Wilson894cceb2017-02-15 08:43:37 +0000959 /* Limited by sg length for 3lvl */
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000960 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
961 idx->pdpe = 0;
Chris Wilson894cceb2017-02-15 08:43:37 +0000962 ret = true;
Michel Thierryde5ba8e2015-08-03 09:53:27 +0100963 break;
Chris Wilson894cceb2017-02-15 08:43:37 +0000964 }
965
Mika Kuoppala3e490042017-02-28 17:28:07 +0200966 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000967 pd = pdp->page_directory[idx->pdpe];
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800968 }
Chris Wilson894cceb2017-02-15 08:43:37 +0000969
Chris Wilson9231da72017-02-15 08:43:41 +0000970 kunmap_atomic(vaddr);
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000971 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
Ben Widawsky9df15b42013-11-02 21:07:24 -0700972 }
Chris Wilson894cceb2017-02-15 08:43:37 +0000973 } while (1);
Chris Wilson9231da72017-02-15 08:43:41 +0000974 kunmap_atomic(vaddr);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300975
Chris Wilson894cceb2017-02-15 08:43:37 +0000976 return ret;
Ben Widawsky9df15b42013-11-02 21:07:24 -0700977}
978
Chris Wilson894cceb2017-02-15 08:43:37 +0000979static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +0100980 struct i915_vma *vma,
Chris Wilson894cceb2017-02-15 08:43:37 +0000981 enum i915_cache_level cache_level,
982 u32 unused)
Michel Thierryf9b5b782015-07-30 11:02:49 +0100983{
Chuanxiao Dong17369ba2017-07-07 17:50:59 +0800984 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Chris Wilson894cceb2017-02-15 08:43:37 +0000985 struct sgt_dma iter = {
Matthew Auld4a234c52017-06-22 10:58:36 +0100986 .sg = vma->pages->sgl,
Chris Wilson894cceb2017-02-15 08:43:37 +0000987 .dma = sg_dma_address(iter.sg),
988 .max = iter.dma + iter.sg->length,
989 };
Matthew Auld4a234c52017-06-22 10:58:36 +0100990 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
Michel Thierryf9b5b782015-07-30 11:02:49 +0100991
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000992 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
993 cache_level);
Chris Wilson894cceb2017-02-15 08:43:37 +0000994}
Michel Thierryde5ba8e2015-08-03 09:53:27 +0100995
Chris Wilson894cceb2017-02-15 08:43:37 +0000996static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +0100997 struct i915_vma *vma,
Chris Wilson894cceb2017-02-15 08:43:37 +0000998 enum i915_cache_level cache_level,
999 u32 unused)
1000{
1001 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1002 struct sgt_dma iter = {
Matthew Auld4a234c52017-06-22 10:58:36 +01001003 .sg = vma->pages->sgl,
Chris Wilson894cceb2017-02-15 08:43:37 +00001004 .dma = sg_dma_address(iter.sg),
1005 .max = iter.dma + iter.sg->length,
1006 };
1007 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
Matthew Auld4a234c52017-06-22 10:58:36 +01001008 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
Michel Thierryde5ba8e2015-08-03 09:53:27 +01001009
Chris Wilson9e89f9e2017-02-25 18:11:22 +00001010 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
1011 &idx, cache_level))
1012 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
Michel Thierryf9b5b782015-07-30 11:02:49 +01001013}
1014
Chris Wilson84486612017-02-15 08:43:40 +00001015static void gen8_free_page_tables(struct i915_address_space *vm,
Michel Thierryf37c0502015-06-10 17:46:39 +01001016 struct i915_page_directory *pd)
Ben Widawskyb45a6712014-02-12 14:28:44 -08001017{
1018 int i;
1019
Mika Kuoppala567047b2015-06-25 18:35:12 +03001020 if (!px_page(pd))
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001021 return;
Ben Widawskyb45a6712014-02-12 14:28:44 -08001022
Chris Wilsonfe52e372017-02-15 08:43:47 +00001023 for (i = 0; i < I915_PDES; i++) {
1024 if (pd->page_table[i] != vm->scratch_pt)
1025 free_pt(vm, pd->page_table[i]);
Ben Widawsky06fda602015-02-24 16:22:36 +00001026 }
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001027}
1028
Mika Kuoppala8776f022015-06-30 18:16:40 +03001029static int gen8_init_scratch(struct i915_address_space *vm)
1030{
Matthew Auld64c050d2016-04-27 13:19:25 +01001031 int ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001032
Chris Wilson84486612017-02-15 08:43:40 +00001033 ret = setup_scratch_page(vm, I915_GFP_DMA);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001034 if (ret)
1035 return ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001036
Chris Wilson84486612017-02-15 08:43:40 +00001037 vm->scratch_pt = alloc_pt(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001038 if (IS_ERR(vm->scratch_pt)) {
Matthew Auld64c050d2016-04-27 13:19:25 +01001039 ret = PTR_ERR(vm->scratch_pt);
1040 goto free_scratch_page;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001041 }
1042
Chris Wilson84486612017-02-15 08:43:40 +00001043 vm->scratch_pd = alloc_pd(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001044 if (IS_ERR(vm->scratch_pd)) {
Matthew Auld64c050d2016-04-27 13:19:25 +01001045 ret = PTR_ERR(vm->scratch_pd);
1046 goto free_pt;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001047 }
1048
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001049 if (use_4lvl(vm)) {
Chris Wilson84486612017-02-15 08:43:40 +00001050 vm->scratch_pdp = alloc_pdp(vm);
Michel Thierry69ab76f2015-07-29 17:23:55 +01001051 if (IS_ERR(vm->scratch_pdp)) {
Matthew Auld64c050d2016-04-27 13:19:25 +01001052 ret = PTR_ERR(vm->scratch_pdp);
1053 goto free_pd;
Michel Thierry69ab76f2015-07-29 17:23:55 +01001054 }
1055 }
1056
Mika Kuoppala8776f022015-06-30 18:16:40 +03001057 gen8_initialize_pt(vm, vm->scratch_pt);
1058 gen8_initialize_pd(vm, vm->scratch_pd);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001059 if (use_4lvl(vm))
Michel Thierry69ab76f2015-07-29 17:23:55 +01001060 gen8_initialize_pdp(vm, vm->scratch_pdp);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001061
1062 return 0;
Matthew Auld64c050d2016-04-27 13:19:25 +01001063
1064free_pd:
Chris Wilson84486612017-02-15 08:43:40 +00001065 free_pd(vm, vm->scratch_pd);
Matthew Auld64c050d2016-04-27 13:19:25 +01001066free_pt:
Chris Wilson84486612017-02-15 08:43:40 +00001067 free_pt(vm, vm->scratch_pt);
Matthew Auld64c050d2016-04-27 13:19:25 +01001068free_scratch_page:
Chris Wilson84486612017-02-15 08:43:40 +00001069 cleanup_scratch_page(vm);
Matthew Auld64c050d2016-04-27 13:19:25 +01001070
1071 return ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001072}
1073
Zhiyuan Lv650da342015-08-28 15:41:18 +08001074static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1075{
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001076 struct i915_address_space *vm = &ppgtt->base;
1077 struct drm_i915_private *dev_priv = vm->i915;
Zhiyuan Lv650da342015-08-28 15:41:18 +08001078 enum vgt_g2v_type msg;
Zhiyuan Lv650da342015-08-28 15:41:18 +08001079 int i;
1080
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001081 if (use_4lvl(vm)) {
1082 const u64 daddr = px_dma(&ppgtt->pml4);
Zhiyuan Lv650da342015-08-28 15:41:18 +08001083
Ville Syrjäläab75bb52015-11-04 23:20:12 +02001084 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1085 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
Zhiyuan Lv650da342015-08-28 15:41:18 +08001086
1087 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1088 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1089 } else {
Mika Kuoppalae7167762017-02-28 17:28:10 +02001090 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001091 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
Zhiyuan Lv650da342015-08-28 15:41:18 +08001092
Ville Syrjäläab75bb52015-11-04 23:20:12 +02001093 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1094 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
Zhiyuan Lv650da342015-08-28 15:41:18 +08001095 }
1096
1097 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1098 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1099 }
1100
1101 I915_WRITE(vgtif_reg(g2v_notify), msg);
1102
1103 return 0;
1104}
1105
Mika Kuoppala8776f022015-06-30 18:16:40 +03001106static void gen8_free_scratch(struct i915_address_space *vm)
1107{
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001108 if (use_4lvl(vm))
Chris Wilson84486612017-02-15 08:43:40 +00001109 free_pdp(vm, vm->scratch_pdp);
1110 free_pd(vm, vm->scratch_pd);
1111 free_pt(vm, vm->scratch_pt);
1112 cleanup_scratch_page(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001113}
1114
Chris Wilson84486612017-02-15 08:43:40 +00001115static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
Michel Thierry762d9932015-07-30 11:05:29 +01001116 struct i915_page_directory_pointer *pdp)
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001117{
Mika Kuoppala3e490042017-02-28 17:28:07 +02001118 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001119 int i;
1120
Mika Kuoppala3e490042017-02-28 17:28:07 +02001121 for (i = 0; i < pdpes; i++) {
Chris Wilsonfe52e372017-02-15 08:43:47 +00001122 if (pdp->page_directory[i] == vm->scratch_pd)
Ben Widawsky06fda602015-02-24 16:22:36 +00001123 continue;
1124
Chris Wilson84486612017-02-15 08:43:40 +00001125 gen8_free_page_tables(vm, pdp->page_directory[i]);
1126 free_pd(vm, pdp->page_directory[i]);
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001127 }
Michel Thierry69876be2015-04-08 12:13:27 +01001128
Chris Wilson84486612017-02-15 08:43:40 +00001129 free_pdp(vm, pdp);
Michel Thierry762d9932015-07-30 11:05:29 +01001130}
1131
1132static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1133{
1134 int i;
1135
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001136 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1137 if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
Michel Thierry762d9932015-07-30 11:05:29 +01001138 continue;
1139
Chris Wilson84486612017-02-15 08:43:40 +00001140 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
Michel Thierry762d9932015-07-30 11:05:29 +01001141 }
1142
Chris Wilson84486612017-02-15 08:43:40 +00001143 cleanup_px(&ppgtt->base, &ppgtt->pml4);
Michel Thierry762d9932015-07-30 11:05:29 +01001144}
1145
1146static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1147{
Chris Wilson49d73912016-11-29 09:50:08 +00001148 struct drm_i915_private *dev_priv = vm->i915;
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001149 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Michel Thierry762d9932015-07-30 11:05:29 +01001150
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001151 if (intel_vgpu_active(dev_priv))
Zhiyuan Lv650da342015-08-28 15:41:18 +08001152 gen8_ppgtt_notify_vgt(ppgtt, false);
1153
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001154 if (use_4lvl(vm))
Michel Thierry762d9932015-07-30 11:05:29 +01001155 gen8_ppgtt_cleanup_4lvl(ppgtt);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001156 else
1157 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
Michel Thierryd4ec9da2015-07-30 11:02:03 +01001158
Mika Kuoppala8776f022015-06-30 18:16:40 +03001159 gen8_free_scratch(vm);
Ben Widawskyb45a6712014-02-12 14:28:44 -08001160}
1161
Chris Wilsonfe52e372017-02-15 08:43:47 +00001162static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1163 struct i915_page_directory *pd,
1164 u64 start, u64 length)
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001165{
Michel Thierryd7b26332015-04-08 12:13:34 +01001166 struct i915_page_table *pt;
Chris Wilsondd196742017-02-15 08:43:46 +00001167 u64 from = start;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001168 unsigned int pde;
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001169
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001170 gen8_for_each_pde(pt, pd, start, length, pde) {
Chris Wilson14826672017-09-08 19:16:22 +01001171 int count = gen8_pte_count(start, length);
1172
Chris Wilsonfe52e372017-02-15 08:43:47 +00001173 if (pt == vm->scratch_pt) {
Chris Wilsondd196742017-02-15 08:43:46 +00001174 pt = alloc_pt(vm);
1175 if (IS_ERR(pt))
1176 goto unwind;
1177
Chris Wilson14826672017-09-08 19:16:22 +01001178 if (count < GEN8_PTES)
1179 gen8_initialize_pt(vm, pt);
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001180
Chris Wilsonfe52e372017-02-15 08:43:47 +00001181 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1182 pd->used_pdes++;
Chris Wilsonbf75d592017-02-27 12:26:52 +00001183 GEM_BUG_ON(pd->used_pdes > I915_PDES);
Chris Wilsonfe52e372017-02-15 08:43:47 +00001184 }
1185
Chris Wilson14826672017-09-08 19:16:22 +01001186 pt->used_ptes += count;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001187 }
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001188 return 0;
1189
Chris Wilsondd196742017-02-15 08:43:46 +00001190unwind:
1191 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001192 return -ENOMEM;
1193}
1194
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001195static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1196 struct i915_page_directory_pointer *pdp,
1197 u64 start, u64 length)
Ben Widawskybf2b4ed2014-02-19 22:05:43 -08001198{
Michel Thierry5441f0c2015-04-08 12:13:28 +01001199 struct i915_page_directory *pd;
Chris Wilsone2b763c2017-02-15 08:43:48 +00001200 u64 from = start;
1201 unsigned int pdpe;
Ben Widawskybf2b4ed2014-02-19 22:05:43 -08001202 int ret;
1203
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001204 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Chris Wilsone2b763c2017-02-15 08:43:48 +00001205 if (pd == vm->scratch_pd) {
1206 pd = alloc_pd(vm);
1207 if (IS_ERR(pd))
1208 goto unwind;
Michel Thierry5441f0c2015-04-08 12:13:28 +01001209
Chris Wilsone2b763c2017-02-15 08:43:48 +00001210 gen8_initialize_pd(vm, pd);
Chris Wilsonfe52e372017-02-15 08:43:47 +00001211 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
Chris Wilsone2b763c2017-02-15 08:43:48 +00001212 pdp->used_pdpes++;
Mika Kuoppala3e490042017-02-28 17:28:07 +02001213 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
Chris Wilson75afcf72017-02-15 08:43:51 +00001214
1215 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
Chris Wilsone2b763c2017-02-15 08:43:48 +00001216 }
1217
1218 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
Chris Wilsonbf75d592017-02-27 12:26:52 +00001219 if (unlikely(ret))
1220 goto unwind_pd;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001221 }
Michel Thierry33c88192015-04-08 12:13:33 +01001222
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001223 return 0;
1224
Chris Wilsonbf75d592017-02-27 12:26:52 +00001225unwind_pd:
1226 if (!pd->used_pdes) {
1227 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1228 GEM_BUG_ON(!pdp->used_pdpes);
1229 pdp->used_pdpes--;
1230 free_pd(vm, pd);
1231 }
Chris Wilsone2b763c2017-02-15 08:43:48 +00001232unwind:
1233 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1234 return -ENOMEM;
Ben Widawskybf2b4ed2014-02-19 22:05:43 -08001235}
1236
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001237static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1238 u64 start, u64 length)
Michel Thierry762d9932015-07-30 11:05:29 +01001239{
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001240 return gen8_ppgtt_alloc_pdp(vm,
1241 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1242}
1243
1244static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1245 u64 start, u64 length)
1246{
1247 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1248 struct i915_pml4 *pml4 = &ppgtt->pml4;
Michel Thierry762d9932015-07-30 11:05:29 +01001249 struct i915_page_directory_pointer *pdp;
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001250 u64 from = start;
1251 u32 pml4e;
1252 int ret;
Michel Thierry762d9932015-07-30 11:05:29 +01001253
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001254 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001255 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1256 pdp = alloc_pdp(vm);
1257 if (IS_ERR(pdp))
1258 goto unwind;
Michel Thierry762d9932015-07-30 11:05:29 +01001259
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001260 gen8_initialize_pdp(vm, pdp);
1261 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1262 }
Michel Thierry762d9932015-07-30 11:05:29 +01001263
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001264 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
Chris Wilsonbf75d592017-02-27 12:26:52 +00001265 if (unlikely(ret))
1266 goto unwind_pdp;
Michel Thierry762d9932015-07-30 11:05:29 +01001267 }
1268
Michel Thierry762d9932015-07-30 11:05:29 +01001269 return 0;
1270
Chris Wilsonbf75d592017-02-27 12:26:52 +00001271unwind_pdp:
1272 if (!pdp->used_pdpes) {
1273 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1274 free_pdp(vm, pdp);
1275 }
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001276unwind:
1277 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1278 return -ENOMEM;
Michel Thierry762d9932015-07-30 11:05:29 +01001279}
1280
Chris Wilson84486612017-02-15 08:43:40 +00001281static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1282 struct i915_page_directory_pointer *pdp,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001283 u64 start, u64 length,
Michel Thierryea91e402015-07-29 17:23:57 +01001284 gen8_pte_t scratch_pte,
1285 struct seq_file *m)
1286{
Mika Kuoppala3e490042017-02-28 17:28:07 +02001287 struct i915_address_space *vm = &ppgtt->base;
Michel Thierryea91e402015-07-29 17:23:57 +01001288 struct i915_page_directory *pd;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001289 u32 pdpe;
Michel Thierryea91e402015-07-29 17:23:57 +01001290
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001291 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Michel Thierryea91e402015-07-29 17:23:57 +01001292 struct i915_page_table *pt;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001293 u64 pd_len = length;
1294 u64 pd_start = start;
1295 u32 pde;
Michel Thierryea91e402015-07-29 17:23:57 +01001296
Chris Wilsone2b763c2017-02-15 08:43:48 +00001297 if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
Michel Thierryea91e402015-07-29 17:23:57 +01001298 continue;
1299
1300 seq_printf(m, "\tPDPE #%d\n", pdpe);
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001301 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001302 u32 pte;
Michel Thierryea91e402015-07-29 17:23:57 +01001303 gen8_pte_t *pt_vaddr;
1304
Chris Wilsonfe52e372017-02-15 08:43:47 +00001305 if (pd->page_table[pde] == ppgtt->base.scratch_pt)
Michel Thierryea91e402015-07-29 17:23:57 +01001306 continue;
1307
Chris Wilson9231da72017-02-15 08:43:41 +00001308 pt_vaddr = kmap_atomic_px(pt);
Michel Thierryea91e402015-07-29 17:23:57 +01001309 for (pte = 0; pte < GEN8_PTES; pte += 4) {
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001310 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1311 pde << GEN8_PDE_SHIFT |
1312 pte << GEN8_PTE_SHIFT);
Michel Thierryea91e402015-07-29 17:23:57 +01001313 int i;
1314 bool found = false;
1315
1316 for (i = 0; i < 4; i++)
1317 if (pt_vaddr[pte + i] != scratch_pte)
1318 found = true;
1319 if (!found)
1320 continue;
1321
1322 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1323 for (i = 0; i < 4; i++) {
1324 if (pt_vaddr[pte + i] != scratch_pte)
1325 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1326 else
1327 seq_puts(m, " SCRATCH ");
1328 }
1329 seq_puts(m, "\n");
1330 }
Michel Thierryea91e402015-07-29 17:23:57 +01001331 kunmap_atomic(pt_vaddr);
1332 }
1333 }
1334}
1335
1336static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1337{
1338 struct i915_address_space *vm = &ppgtt->base;
Chris Wilson894cceb2017-02-15 08:43:37 +00001339 const gen8_pte_t scratch_pte =
1340 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
Chris Wilson381b9432017-02-15 08:43:54 +00001341 u64 start = 0, length = ppgtt->base.total;
Michel Thierryea91e402015-07-29 17:23:57 +01001342
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001343 if (use_4lvl(vm)) {
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001344 u64 pml4e;
Michel Thierryea91e402015-07-29 17:23:57 +01001345 struct i915_pml4 *pml4 = &ppgtt->pml4;
1346 struct i915_page_directory_pointer *pdp;
1347
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001348 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001349 if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
Michel Thierryea91e402015-07-29 17:23:57 +01001350 continue;
1351
1352 seq_printf(m, " PML4E #%llu\n", pml4e);
Chris Wilson84486612017-02-15 08:43:40 +00001353 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
Michel Thierryea91e402015-07-29 17:23:57 +01001354 }
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001355 } else {
1356 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
Michel Thierryea91e402015-07-29 17:23:57 +01001357 }
1358}
1359
Chris Wilsone2b763c2017-02-15 08:43:48 +00001360static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001361{
Chris Wilsone2b763c2017-02-15 08:43:48 +00001362 struct i915_address_space *vm = &ppgtt->base;
1363 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1364 struct i915_page_directory *pd;
1365 u64 start = 0, length = ppgtt->base.total;
1366 u64 from = start;
1367 unsigned int pdpe;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001368
Chris Wilsone2b763c2017-02-15 08:43:48 +00001369 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1370 pd = alloc_pd(vm);
1371 if (IS_ERR(pd))
1372 goto unwind;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001373
Chris Wilsone2b763c2017-02-15 08:43:48 +00001374 gen8_initialize_pd(vm, pd);
1375 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1376 pdp->used_pdpes++;
1377 }
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001378
Chris Wilsone2b763c2017-02-15 08:43:48 +00001379 pdp->used_pdpes++; /* never remove */
1380 return 0;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001381
Chris Wilsone2b763c2017-02-15 08:43:48 +00001382unwind:
1383 start -= from;
1384 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1385 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1386 free_pd(vm, pd);
1387 }
1388 pdp->used_pdpes = 0;
1389 return -ENOMEM;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001390}
1391
Daniel Vettereb0b44a2015-03-18 14:47:59 +01001392/*
Ben Widawskyf3a964b2014-02-19 22:05:42 -08001393 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1394 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1395 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1396 * space.
Ben Widawsky37aca442013-11-04 20:47:32 -08001397 *
Ben Widawskyf3a964b2014-02-19 22:05:42 -08001398 */
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001399static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky37aca442013-11-04 20:47:32 -08001400{
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001401 struct i915_address_space *vm = &ppgtt->base;
1402 struct drm_i915_private *dev_priv = vm->i915;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001403 int ret;
Michel Thierry69876be2015-04-08 12:13:27 +01001404
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001405 ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1406 1ULL << 48 :
1407 1ULL << 32;
1408
Chris Wilson84486612017-02-15 08:43:40 +00001409 /* There are only few exceptions for gen >=6. chv and bxt.
1410 * And we are not sure about the latter so play safe for now.
1411 */
1412 if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1413 ppgtt->base.pt_kmap_wc = true;
1414
Chris Wilson66df1012017-08-22 18:38:28 +01001415 ret = gen8_init_scratch(&ppgtt->base);
1416 if (ret) {
1417 ppgtt->base.total = 0;
1418 return ret;
1419 }
1420
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001421 if (use_4lvl(vm)) {
Chris Wilson84486612017-02-15 08:43:40 +00001422 ret = setup_px(&ppgtt->base, &ppgtt->pml4);
Michel Thierry762d9932015-07-30 11:05:29 +01001423 if (ret)
1424 goto free_scratch;
Michel Thierry6ac18502015-07-29 17:23:46 +01001425
Michel Thierry69ab76f2015-07-29 17:23:55 +01001426 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1427
Mika Kuoppalae7167762017-02-28 17:28:10 +02001428 ppgtt->switch_mm = gen8_mm_switch_4lvl;
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001429 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
Chris Wilson894cceb2017-02-15 08:43:37 +00001430 ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001431 ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
Michel Thierry762d9932015-07-30 11:05:29 +01001432 } else {
Chris Wilsonfe52e372017-02-15 08:43:47 +00001433 ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
Michel Thierry81ba8aef2015-08-03 09:52:01 +01001434 if (ret)
1435 goto free_scratch;
1436
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001437 if (intel_vgpu_active(dev_priv)) {
Chris Wilsone2b763c2017-02-15 08:43:48 +00001438 ret = gen8_preallocate_top_level_pdp(ppgtt);
1439 if (ret) {
1440 __pdp_fini(&ppgtt->pdp);
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001441 goto free_scratch;
Chris Wilsone2b763c2017-02-15 08:43:48 +00001442 }
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001443 }
Chris Wilson894cceb2017-02-15 08:43:37 +00001444
Mika Kuoppalae7167762017-02-28 17:28:10 +02001445 ppgtt->switch_mm = gen8_mm_switch_3lvl;
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001446 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
Chris Wilson894cceb2017-02-15 08:43:37 +00001447 ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001448 ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
Michel Thierry81ba8aef2015-08-03 09:52:01 +01001449 }
Michel Thierry6ac18502015-07-29 17:23:46 +01001450
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001451 if (intel_vgpu_active(dev_priv))
Zhiyuan Lv650da342015-08-28 15:41:18 +08001452 gen8_ppgtt_notify_vgt(ppgtt, true);
1453
Mika Kuoppala054b9ac2017-02-28 17:28:11 +02001454 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1455 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1456 ppgtt->base.bind_vma = ppgtt_bind_vma;
1457 ppgtt->debug_dump = gen8_dump_ppgtt;
1458
Michel Thierryd7b26332015-04-08 12:13:34 +01001459 return 0;
Michel Thierry6ac18502015-07-29 17:23:46 +01001460
1461free_scratch:
1462 gen8_free_scratch(&ppgtt->base);
1463 return ret;
Michel Thierryd7b26332015-04-08 12:13:34 +01001464}
1465
Ben Widawsky87d60b62013-12-06 14:11:29 -08001466static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1467{
Ben Widawsky87d60b62013-12-06 14:11:29 -08001468 struct i915_address_space *vm = &ppgtt->base;
Michel Thierry09942c62015-04-08 12:13:30 +01001469 struct i915_page_table *unused;
Michel Thierry07749ef2015-03-16 16:00:54 +00001470 gen6_pte_t scratch_pte;
Chris Wilson381b9432017-02-15 08:43:54 +00001471 u32 pd_entry, pte, pde;
1472 u32 start = 0, length = ppgtt->base.total;
Ben Widawsky87d60b62013-12-06 14:11:29 -08001473
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001474 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02001475 I915_CACHE_LLC, 0);
Ben Widawsky87d60b62013-12-06 14:11:29 -08001476
Dave Gordon731f74c2016-06-24 19:37:46 +01001477 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
Ben Widawsky87d60b62013-12-06 14:11:29 -08001478 u32 expected;
Michel Thierry07749ef2015-03-16 16:00:54 +00001479 gen6_pte_t *pt_vaddr;
Mika Kuoppala567047b2015-06-25 18:35:12 +03001480 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
Michel Thierry09942c62015-04-08 12:13:30 +01001481 pd_entry = readl(ppgtt->pd_addr + pde);
Ben Widawsky87d60b62013-12-06 14:11:29 -08001482 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1483
1484 if (pd_entry != expected)
1485 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1486 pde,
1487 pd_entry,
1488 expected);
1489 seq_printf(m, "\tPDE: %x\n", pd_entry);
1490
Chris Wilson9231da72017-02-15 08:43:41 +00001491 pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +03001492
Michel Thierry07749ef2015-03-16 16:00:54 +00001493 for (pte = 0; pte < GEN6_PTES; pte+=4) {
Ben Widawsky87d60b62013-12-06 14:11:29 -08001494 unsigned long va =
Michel Thierry07749ef2015-03-16 16:00:54 +00001495 (pde * PAGE_SIZE * GEN6_PTES) +
Ben Widawsky87d60b62013-12-06 14:11:29 -08001496 (pte * PAGE_SIZE);
1497 int i;
1498 bool found = false;
1499 for (i = 0; i < 4; i++)
1500 if (pt_vaddr[pte + i] != scratch_pte)
1501 found = true;
1502 if (!found)
1503 continue;
1504
1505 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1506 for (i = 0; i < 4; i++) {
1507 if (pt_vaddr[pte + i] != scratch_pte)
1508 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1509 else
1510 seq_puts(m, " SCRATCH ");
1511 }
1512 seq_puts(m, "\n");
1513 }
Chris Wilson9231da72017-02-15 08:43:41 +00001514 kunmap_atomic(pt_vaddr);
Ben Widawsky87d60b62013-12-06 14:11:29 -08001515 }
1516}
1517
Ben Widawsky678d96f2015-03-16 16:00:56 +00001518/* Write pde (index) from the page directory @pd to the page table @pt */
Chris Wilson16a011c2017-02-15 08:43:45 +00001519static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1520 const unsigned int pde,
1521 const struct i915_page_table *pt)
Ben Widawsky61973492013-04-08 18:43:54 -07001522{
Ben Widawsky678d96f2015-03-16 16:00:56 +00001523 /* Caller needs to make sure the write completes if necessary */
Chris Wilson16a011c2017-02-15 08:43:45 +00001524 writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1525 ppgtt->pd_addr + pde);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001526}
Ben Widawsky61973492013-04-08 18:43:54 -07001527
Ben Widawsky678d96f2015-03-16 16:00:56 +00001528/* Write all the page tables found in the ppgtt structure to incrementing page
1529 * directories. */
Chris Wilson16a011c2017-02-15 08:43:45 +00001530static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001531 u32 start, u32 length)
Ben Widawsky678d96f2015-03-16 16:00:56 +00001532{
Michel Thierryec565b32015-04-08 12:13:23 +01001533 struct i915_page_table *pt;
Chris Wilson16a011c2017-02-15 08:43:45 +00001534 unsigned int pde;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001535
Chris Wilson16a011c2017-02-15 08:43:45 +00001536 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1537 gen6_write_pde(ppgtt, pde, pt);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001538
Chris Wilson16a011c2017-02-15 08:43:45 +00001539 mark_tlbs_dirty(ppgtt);
Chris Wilsondd196742017-02-15 08:43:46 +00001540 wmb();
Ben Widawsky3e302542013-04-23 23:15:32 -07001541}
1542
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001543static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky3e302542013-04-23 23:15:32 -07001544{
Chris Wilsondd196742017-02-15 08:43:46 +00001545 GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1546 return ppgtt->pd.base.ggtt_offset << 10;
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001547}
Ben Widawsky61973492013-04-08 18:43:54 -07001548
Ben Widawsky90252e52013-12-06 14:11:12 -08001549static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +01001550 struct drm_i915_gem_request *req)
Ben Widawsky90252e52013-12-06 14:11:12 -08001551{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001552 struct intel_engine_cs *engine = req->engine;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001553 u32 *cs;
Ben Widawsky61973492013-04-08 18:43:54 -07001554
Ben Widawsky90252e52013-12-06 14:11:12 -08001555 /* NB: TLBs must be flushed and invalidated before a switch */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001556 cs = intel_ring_begin(req, 6);
1557 if (IS_ERR(cs))
1558 return PTR_ERR(cs);
Ben Widawsky90252e52013-12-06 14:11:12 -08001559
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001560 *cs++ = MI_LOAD_REGISTER_IMM(2);
1561 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1562 *cs++ = PP_DIR_DCLV_2G;
1563 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1564 *cs++ = get_pd_offset(ppgtt);
1565 *cs++ = MI_NOOP;
1566 intel_ring_advance(req, cs);
Ben Widawsky90252e52013-12-06 14:11:12 -08001567
1568 return 0;
1569}
1570
Ben Widawsky48a10382013-12-06 14:11:11 -08001571static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +01001572 struct drm_i915_gem_request *req)
Ben Widawsky48a10382013-12-06 14:11:11 -08001573{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001574 struct intel_engine_cs *engine = req->engine;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001575 u32 *cs;
Ben Widawsky48a10382013-12-06 14:11:11 -08001576
Ben Widawsky48a10382013-12-06 14:11:11 -08001577 /* NB: TLBs must be flushed and invalidated before a switch */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001578 cs = intel_ring_begin(req, 6);
1579 if (IS_ERR(cs))
1580 return PTR_ERR(cs);
Ben Widawsky48a10382013-12-06 14:11:11 -08001581
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001582 *cs++ = MI_LOAD_REGISTER_IMM(2);
1583 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1584 *cs++ = PP_DIR_DCLV_2G;
1585 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1586 *cs++ = get_pd_offset(ppgtt);
1587 *cs++ = MI_NOOP;
1588 intel_ring_advance(req, cs);
Ben Widawsky48a10382013-12-06 14:11:11 -08001589
1590 return 0;
1591}
1592
Ben Widawskyeeb94882013-12-06 14:11:10 -08001593static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +01001594 struct drm_i915_gem_request *req)
Ben Widawskyeeb94882013-12-06 14:11:10 -08001595{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001596 struct intel_engine_cs *engine = req->engine;
Chris Wilson8eb95202016-07-04 08:48:31 +01001597 struct drm_i915_private *dev_priv = req->i915;
Ben Widawsky48a10382013-12-06 14:11:11 -08001598
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001599 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1600 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
Ben Widawskyeeb94882013-12-06 14:11:10 -08001601 return 0;
1602}
1603
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001604static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
Ben Widawskyeeb94882013-12-06 14:11:10 -08001605{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001606 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05301607 enum intel_engine_id id;
Ben Widawskyeeb94882013-12-06 14:11:10 -08001608
Akash Goel3b3f1652016-10-13 22:44:48 +05301609 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001610 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1611 GEN8_GFX_PPGTT_48B : 0;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001612 I915_WRITE(RING_MODE_GEN7(engine),
Michel Thierry2dba3232015-07-30 11:06:23 +01001613 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
Ben Widawskyeeb94882013-12-06 14:11:10 -08001614 }
Ben Widawskyeeb94882013-12-06 14:11:10 -08001615}
1616
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001617static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001618{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001619 struct intel_engine_cs *engine;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001620 u32 ecochk, ecobits;
Akash Goel3b3f1652016-10-13 22:44:48 +05301621 enum intel_engine_id id;
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001622
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001623 ecobits = I915_READ(GAC_ECO_BITS);
1624 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1625
1626 ecochk = I915_READ(GAM_ECOCHK);
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01001627 if (IS_HASWELL(dev_priv)) {
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001628 ecochk |= ECOCHK_PPGTT_WB_HSW;
1629 } else {
1630 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1631 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1632 }
1633 I915_WRITE(GAM_ECOCHK, ecochk);
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001634
Akash Goel3b3f1652016-10-13 22:44:48 +05301635 for_each_engine(engine, dev_priv, id) {
Ben Widawskyeeb94882013-12-06 14:11:10 -08001636 /* GFX_MODE is per-ring on gen7+ */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001637 I915_WRITE(RING_MODE_GEN7(engine),
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001638 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Ben Widawsky61973492013-04-08 18:43:54 -07001639 }
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001640}
1641
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001642static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
Ben Widawsky61973492013-04-08 18:43:54 -07001643{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001644 u32 ecochk, gab_ctl, ecobits;
Ben Widawsky61973492013-04-08 18:43:54 -07001645
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001646 ecobits = I915_READ(GAC_ECO_BITS);
1647 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1648 ECOBITS_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -07001649
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001650 gab_ctl = I915_READ(GAB_CTL);
1651 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
Ben Widawsky61973492013-04-08 18:43:54 -07001652
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001653 ecochk = I915_READ(GAM_ECOCHK);
1654 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -07001655
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001656 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Ben Widawsky61973492013-04-08 18:43:54 -07001657}
1658
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001659/* PPGTT support for Sandybdrige/Gen6 and later */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001660static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
Chris Wilsondd196742017-02-15 08:43:46 +00001661 u64 start, u64 length)
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001662{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001663 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Chris Wilsondd196742017-02-15 08:43:46 +00001664 unsigned int first_entry = start >> PAGE_SHIFT;
1665 unsigned int pde = first_entry / GEN6_PTES;
1666 unsigned int pte = first_entry % GEN6_PTES;
1667 unsigned int num_entries = length >> PAGE_SHIFT;
1668 gen6_pte_t scratch_pte =
1669 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001670
Daniel Vetter7bddb012012-02-09 17:15:47 +01001671 while (num_entries) {
Chris Wilsondd196742017-02-15 08:43:46 +00001672 struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1673 unsigned int end = min(pte + num_entries, GEN6_PTES);
1674 gen6_pte_t *vaddr;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001675
Chris Wilsondd196742017-02-15 08:43:46 +00001676 num_entries -= end - pte;
Daniel Vetter7bddb012012-02-09 17:15:47 +01001677
Chris Wilsondd196742017-02-15 08:43:46 +00001678 /* Note that the hw doesn't support removing PDE on the fly
1679 * (they are cached inside the context with no means to
1680 * invalidate the cache), so we can only reset the PTE
1681 * entries back to scratch.
1682 */
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001683
Chris Wilsondd196742017-02-15 08:43:46 +00001684 vaddr = kmap_atomic_px(pt);
1685 do {
1686 vaddr[pte++] = scratch_pte;
1687 } while (pte < end);
1688 kunmap_atomic(vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001689
Chris Wilsondd196742017-02-15 08:43:46 +00001690 pte = 0;
Daniel Vetter7bddb012012-02-09 17:15:47 +01001691 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001692}
1693
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001694static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +01001695 struct i915_vma *vma,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001696 enum i915_cache_level cache_level,
1697 u32 flags)
Daniel Vetterdef886c2013-01-24 14:44:56 -08001698{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001699 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Matthew Auld4a234c52017-06-22 10:58:36 +01001700 unsigned first_entry = vma->node.start >> PAGE_SHIFT;
Michel Thierry07749ef2015-03-16 16:00:54 +00001701 unsigned act_pt = first_entry / GEN6_PTES;
1702 unsigned act_pte = first_entry % GEN6_PTES;
Chris Wilsonb31144c2017-02-15 08:43:36 +00001703 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1704 struct sgt_dma iter;
1705 gen6_pte_t *vaddr;
Daniel Vetterdef886c2013-01-24 14:44:56 -08001706
Chris Wilson9231da72017-02-15 08:43:41 +00001707 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
Matthew Auld4a234c52017-06-22 10:58:36 +01001708 iter.sg = vma->pages->sgl;
Chris Wilsonb31144c2017-02-15 08:43:36 +00001709 iter.dma = sg_dma_address(iter.sg);
1710 iter.max = iter.dma + iter.sg->length;
1711 do {
1712 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
Daniel Vetterdef886c2013-01-24 14:44:56 -08001713
Chris Wilsonb31144c2017-02-15 08:43:36 +00001714 iter.dma += PAGE_SIZE;
1715 if (iter.dma == iter.max) {
1716 iter.sg = __sg_next(iter.sg);
1717 if (!iter.sg)
1718 break;
1719
1720 iter.dma = sg_dma_address(iter.sg);
1721 iter.max = iter.dma + iter.sg->length;
1722 }
Akash Goel24f3a8c2014-06-17 10:59:42 +05301723
Michel Thierry07749ef2015-03-16 16:00:54 +00001724 if (++act_pte == GEN6_PTES) {
Chris Wilson9231da72017-02-15 08:43:41 +00001725 kunmap_atomic(vaddr);
1726 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +02001727 act_pte = 0;
Daniel Vetterdef886c2013-01-24 14:44:56 -08001728 }
Chris Wilsonb31144c2017-02-15 08:43:36 +00001729 } while (1);
Chris Wilson9231da72017-02-15 08:43:41 +00001730 kunmap_atomic(vaddr);
Daniel Vetterdef886c2013-01-24 14:44:56 -08001731}
1732
Ben Widawsky678d96f2015-03-16 16:00:56 +00001733static int gen6_alloc_va_range(struct i915_address_space *vm,
Chris Wilsondd196742017-02-15 08:43:46 +00001734 u64 start, u64 length)
Ben Widawsky678d96f2015-03-16 16:00:56 +00001735{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001736 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Michel Thierryec565b32015-04-08 12:13:23 +01001737 struct i915_page_table *pt;
Chris Wilsondd196742017-02-15 08:43:46 +00001738 u64 from = start;
1739 unsigned int pde;
1740 bool flush = false;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001741
Dave Gordon731f74c2016-06-24 19:37:46 +01001742 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
Chris Wilsondd196742017-02-15 08:43:46 +00001743 if (pt == vm->scratch_pt) {
1744 pt = alloc_pt(vm);
1745 if (IS_ERR(pt))
1746 goto unwind_out;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001747
Chris Wilsondd196742017-02-15 08:43:46 +00001748 gen6_initialize_pt(vm, pt);
1749 ppgtt->pd.page_table[pde] = pt;
Chris Wilson16a011c2017-02-15 08:43:45 +00001750 gen6_write_pde(ppgtt, pde, pt);
Chris Wilsondd196742017-02-15 08:43:46 +00001751 flush = true;
1752 }
Ben Widawsky678d96f2015-03-16 16:00:56 +00001753 }
1754
Chris Wilsondd196742017-02-15 08:43:46 +00001755 if (flush) {
1756 mark_tlbs_dirty(ppgtt);
1757 wmb();
1758 }
Michel Thierry4933d512015-03-24 15:46:22 +00001759
Ben Widawsky678d96f2015-03-16 16:00:56 +00001760 return 0;
Michel Thierry4933d512015-03-24 15:46:22 +00001761
1762unwind_out:
Chris Wilsondd196742017-02-15 08:43:46 +00001763 gen6_ppgtt_clear_range(vm, from, start);
1764 return -ENOMEM;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001765}
1766
Mika Kuoppala8776f022015-06-30 18:16:40 +03001767static int gen6_init_scratch(struct i915_address_space *vm)
1768{
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001769 int ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001770
Chris Wilson84486612017-02-15 08:43:40 +00001771 ret = setup_scratch_page(vm, I915_GFP_DMA);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001772 if (ret)
1773 return ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001774
Chris Wilson84486612017-02-15 08:43:40 +00001775 vm->scratch_pt = alloc_pt(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001776 if (IS_ERR(vm->scratch_pt)) {
Chris Wilson84486612017-02-15 08:43:40 +00001777 cleanup_scratch_page(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001778 return PTR_ERR(vm->scratch_pt);
1779 }
1780
1781 gen6_initialize_pt(vm, vm->scratch_pt);
1782
1783 return 0;
1784}
1785
1786static void gen6_free_scratch(struct i915_address_space *vm)
1787{
Chris Wilson84486612017-02-15 08:43:40 +00001788 free_pt(vm, vm->scratch_pt);
1789 cleanup_scratch_page(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001790}
1791
Daniel Vetter061dd492015-04-14 17:35:13 +02001792static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
Ben Widawskya00d8252014-02-19 22:05:48 -08001793{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001794 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Dave Gordon731f74c2016-06-24 19:37:46 +01001795 struct i915_page_directory *pd = &ppgtt->pd;
Michel Thierry09942c62015-04-08 12:13:30 +01001796 struct i915_page_table *pt;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001797 u32 pde;
Daniel Vetter3440d262013-01-24 13:49:56 -08001798
Daniel Vetter061dd492015-04-14 17:35:13 +02001799 drm_mm_remove_node(&ppgtt->node);
1800
Dave Gordon731f74c2016-06-24 19:37:46 +01001801 gen6_for_all_pdes(pt, pd, pde)
Mika Kuoppala79ab9372015-06-25 18:35:17 +03001802 if (pt != vm->scratch_pt)
Chris Wilson84486612017-02-15 08:43:40 +00001803 free_pt(vm, pt);
Michel Thierry4933d512015-03-24 15:46:22 +00001804
Mika Kuoppala8776f022015-06-30 18:16:40 +03001805 gen6_free_scratch(vm);
Daniel Vetter3440d262013-01-24 13:49:56 -08001806}
1807
Ben Widawskyb1465202014-02-19 22:05:49 -08001808static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
Daniel Vetter3440d262013-01-24 13:49:56 -08001809{
Mika Kuoppala8776f022015-06-30 18:16:40 +03001810 struct i915_address_space *vm = &ppgtt->base;
Chris Wilson49d73912016-11-29 09:50:08 +00001811 struct drm_i915_private *dev_priv = ppgtt->base.i915;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001812 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawskyb1465202014-02-19 22:05:49 -08001813 int ret;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001814
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001815 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1816 * allocator works in address space sizes, so it's multiplied by page
1817 * size. We allocate at the top of the GTT to avoid fragmentation.
1818 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001819 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
Michel Thierry4933d512015-03-24 15:46:22 +00001820
Mika Kuoppala8776f022015-06-30 18:16:40 +03001821 ret = gen6_init_scratch(vm);
1822 if (ret)
1823 return ret;
Michel Thierry4933d512015-03-24 15:46:22 +00001824
Chris Wilsone007b192017-01-11 11:23:10 +00001825 ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
1826 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1827 I915_COLOR_UNEVICTABLE,
1828 0, ggtt->base.total,
1829 PIN_HIGH);
Ben Widawskyc8c26622015-01-22 17:01:25 +00001830 if (ret)
Ben Widawsky678d96f2015-03-16 16:00:56 +00001831 goto err_out;
1832
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001833 if (ppgtt->node.start < ggtt->mappable_end)
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001834 DRM_DEBUG("Forced to use aperture for PDEs\n");
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001835
Chris Wilson52c126e2017-02-15 08:43:43 +00001836 ppgtt->pd.base.ggtt_offset =
1837 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1838
1839 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
1840 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
1841
Ben Widawskyc8c26622015-01-22 17:01:25 +00001842 return 0;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001843
1844err_out:
Mika Kuoppala8776f022015-06-30 18:16:40 +03001845 gen6_free_scratch(vm);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001846 return ret;
Ben Widawskyb1465202014-02-19 22:05:49 -08001847}
1848
Ben Widawskyb1465202014-02-19 22:05:49 -08001849static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1850{
kbuild test robot2f2cf682015-03-27 19:26:35 +08001851 return gen6_ppgtt_allocate_page_directories(ppgtt);
Ben Widawskyb1465202014-02-19 22:05:49 -08001852}
1853
Michel Thierry4933d512015-03-24 15:46:22 +00001854static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001855 u64 start, u64 length)
Michel Thierry4933d512015-03-24 15:46:22 +00001856{
Michel Thierryec565b32015-04-08 12:13:23 +01001857 struct i915_page_table *unused;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001858 u32 pde;
Michel Thierry4933d512015-03-24 15:46:22 +00001859
Dave Gordon731f74c2016-06-24 19:37:46 +01001860 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
Mika Kuoppala79ab9372015-06-25 18:35:17 +03001861 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
Michel Thierry4933d512015-03-24 15:46:22 +00001862}
1863
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001864static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
Ben Widawskyb1465202014-02-19 22:05:49 -08001865{
Chris Wilson49d73912016-11-29 09:50:08 +00001866 struct drm_i915_private *dev_priv = ppgtt->base.i915;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001867 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawskyb1465202014-02-19 22:05:49 -08001868 int ret;
1869
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001870 ppgtt->base.pte_encode = ggtt->base.pte_encode;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001871 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
Ben Widawsky48a10382013-12-06 14:11:11 -08001872 ppgtt->switch_mm = gen6_mm_switch;
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01001873 else if (IS_HASWELL(dev_priv))
Ben Widawsky90252e52013-12-06 14:11:12 -08001874 ppgtt->switch_mm = hsw_mm_switch;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001875 else if (IS_GEN7(dev_priv))
Ben Widawsky48a10382013-12-06 14:11:11 -08001876 ppgtt->switch_mm = gen7_mm_switch;
Chris Wilson8eb95202016-07-04 08:48:31 +01001877 else
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001878 BUG();
Ben Widawskyb1465202014-02-19 22:05:49 -08001879
1880 ret = gen6_ppgtt_alloc(ppgtt);
1881 if (ret)
1882 return ret;
1883
Michel Thierry09942c62015-04-08 12:13:30 +01001884 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001885
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001886 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
Chris Wilson16a011c2017-02-15 08:43:45 +00001887 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001888
Chris Wilson52c126e2017-02-15 08:43:43 +00001889 ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
1890 if (ret) {
1891 gen6_ppgtt_cleanup(&ppgtt->base);
1892 return ret;
1893 }
1894
Mika Kuoppala054b9ac2017-02-28 17:28:11 +02001895 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1896 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1897 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1898 ppgtt->base.bind_vma = ppgtt_bind_vma;
1899 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1900 ppgtt->debug_dump = gen6_dump_ppgtt;
1901
Thierry Reding440fd522015-01-23 09:05:06 +01001902 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001903 ppgtt->node.size >> 20,
1904 ppgtt->node.start / PAGE_SIZE);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001905
Chris Wilson52c126e2017-02-15 08:43:43 +00001906 DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
1907 ppgtt->pd.base.ggtt_offset << 10);
Daniel Vetterfa76da32014-08-06 20:19:54 +02001908
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001909 return 0;
Daniel Vetter3440d262013-01-24 13:49:56 -08001910}
1911
Chris Wilson2bfa9962016-08-04 07:52:25 +01001912static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
1913 struct drm_i915_private *dev_priv)
Daniel Vetter3440d262013-01-24 13:49:56 -08001914{
Chris Wilson49d73912016-11-29 09:50:08 +00001915 ppgtt->base.i915 = dev_priv;
Chris Wilson84486612017-02-15 08:43:40 +00001916 ppgtt->base.dma = &dev_priv->drm.pdev->dev;
Daniel Vetter3440d262013-01-24 13:49:56 -08001917
Chris Wilson2bfa9962016-08-04 07:52:25 +01001918 if (INTEL_INFO(dev_priv)->gen < 8)
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001919 return gen6_ppgtt_init(ppgtt);
Ben Widawsky3ed124b2013-04-08 18:43:53 -07001920 else
Michel Thierryd7b26332015-04-08 12:13:34 +01001921 return gen8_ppgtt_init(ppgtt);
Daniel Vetterfa76da32014-08-06 20:19:54 +02001922}
Mika Kuoppalac114f762015-06-25 18:35:13 +03001923
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001924static void i915_address_space_init(struct i915_address_space *vm,
Chris Wilson80b204b2016-10-28 13:58:58 +01001925 struct drm_i915_private *dev_priv,
1926 const char *name)
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001927{
Chris Wilson80b204b2016-10-28 13:58:58 +01001928 i915_gem_timeline_init(dev_priv, &vm->timeline, name);
Chris Wilson47db9222017-02-06 08:45:46 +00001929
Chris Wilson381b9432017-02-15 08:43:54 +00001930 drm_mm_init(&vm->mm, 0, vm->total);
Chris Wilson47db9222017-02-06 08:45:46 +00001931 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
1932
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001933 INIT_LIST_HEAD(&vm->active_list);
1934 INIT_LIST_HEAD(&vm->inactive_list);
Chris Wilson50e046b2016-08-04 07:52:46 +01001935 INIT_LIST_HEAD(&vm->unbound_list);
Chris Wilson47db9222017-02-06 08:45:46 +00001936
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001937 list_add_tail(&vm->global_link, &dev_priv->vm_list);
Chris Wilson84486612017-02-15 08:43:40 +00001938 pagevec_init(&vm->free_pages, false);
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001939}
1940
Matthew Aulded9724d2016-11-17 21:04:10 +00001941static void i915_address_space_fini(struct i915_address_space *vm)
1942{
Chris Wilson84486612017-02-15 08:43:40 +00001943 if (pagevec_count(&vm->free_pages))
Chris Wilson66df1012017-08-22 18:38:28 +01001944 vm_free_pages_release(vm, true);
Chris Wilson84486612017-02-15 08:43:40 +00001945
Matthew Aulded9724d2016-11-17 21:04:10 +00001946 i915_gem_timeline_fini(&vm->timeline);
1947 drm_mm_takedown(&vm->mm);
1948 list_del(&vm->global_link);
1949}
1950
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001951static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
Tim Gored5165eb2016-02-04 11:49:34 +00001952{
Tim Gored5165eb2016-02-04 11:49:34 +00001953 /* This function is for gtt related workarounds. This function is
1954 * called on driver load and after a GPU reset, so you can place
1955 * workarounds here even if they get overwritten by GPU reset.
1956 */
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001957 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */
Tvrtko Ursulin86527442016-10-13 11:03:00 +01001958 if (IS_BROADWELL(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001959 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01001960 else if (IS_CHERRYVIEW(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001961 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
Rodrigo Vivi90007bc2017-08-15 16:16:48 -07001962 else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001963 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
Ander Conselvan de Oliveira9fb50262017-01-26 11:16:58 +02001964 else if (IS_GEN9_LP(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001965 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
1966}
1967
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001968int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
Daniel Vetter82460d92014-08-06 20:19:53 +02001969{
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001970 gtt_write_workarounds(dev_priv);
Tim Gored5165eb2016-02-04 11:49:34 +00001971
Thomas Daniel671b50132014-08-20 16:24:50 +01001972 /* In the case of execlists, PPGTT is enabled by the context descriptor
1973 * and the PDPs are contained within the context itself. We don't
1974 * need to do anything here. */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001975 if (i915_modparams.enable_execlists)
Thomas Daniel671b50132014-08-20 16:24:50 +01001976 return 0;
1977
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001978 if (!USES_PPGTT(dev_priv))
Daniel Vetter82460d92014-08-06 20:19:53 +02001979 return 0;
1980
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001981 if (IS_GEN6(dev_priv))
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001982 gen6_ppgtt_enable(dev_priv);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001983 else if (IS_GEN7(dev_priv))
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001984 gen7_ppgtt_enable(dev_priv);
1985 else if (INTEL_GEN(dev_priv) >= 8)
1986 gen8_ppgtt_enable(dev_priv);
Daniel Vetter82460d92014-08-06 20:19:53 +02001987 else
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001988 MISSING_CASE(INTEL_GEN(dev_priv));
Daniel Vetter82460d92014-08-06 20:19:53 +02001989
John Harrison4ad2fd82015-06-18 13:11:20 +01001990 return 0;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001991}
John Harrison4ad2fd82015-06-18 13:11:20 +01001992
Daniel Vetter4d884702014-08-06 15:04:47 +02001993struct i915_hw_ppgtt *
Chris Wilson2bfa9962016-08-04 07:52:25 +01001994i915_ppgtt_create(struct drm_i915_private *dev_priv,
Chris Wilson80b204b2016-10-28 13:58:58 +01001995 struct drm_i915_file_private *fpriv,
1996 const char *name)
Daniel Vetter4d884702014-08-06 15:04:47 +02001997{
1998 struct i915_hw_ppgtt *ppgtt;
1999 int ret;
2000
2001 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2002 if (!ppgtt)
2003 return ERR_PTR(-ENOMEM);
2004
Chris Wilson1188bc62017-02-15 08:43:38 +00002005 ret = __hw_ppgtt_init(ppgtt, dev_priv);
Daniel Vetter4d884702014-08-06 15:04:47 +02002006 if (ret) {
2007 kfree(ppgtt);
2008 return ERR_PTR(ret);
2009 }
2010
Chris Wilson1188bc62017-02-15 08:43:38 +00002011 kref_init(&ppgtt->ref);
2012 i915_address_space_init(&ppgtt->base, dev_priv, name);
2013 ppgtt->base.file = fpriv;
2014
Daniele Ceraolo Spurio198c9742014-11-10 13:44:31 +00002015 trace_i915_ppgtt_create(&ppgtt->base);
2016
Daniel Vetter4d884702014-08-06 15:04:47 +02002017 return ppgtt;
2018}
2019
Chris Wilson0c7eeda2017-01-11 21:09:25 +00002020void i915_ppgtt_close(struct i915_address_space *vm)
2021{
2022 struct list_head *phases[] = {
2023 &vm->active_list,
2024 &vm->inactive_list,
2025 &vm->unbound_list,
2026 NULL,
2027 }, **phase;
2028
2029 GEM_BUG_ON(vm->closed);
2030 vm->closed = true;
2031
2032 for (phase = phases; *phase; phase++) {
2033 struct i915_vma *vma, *vn;
2034
2035 list_for_each_entry_safe(vma, vn, *phase, vm_link)
2036 if (!i915_vma_is_closed(vma))
2037 i915_vma_close(vma);
2038 }
2039}
2040
Matthew Aulded9724d2016-11-17 21:04:10 +00002041void i915_ppgtt_release(struct kref *kref)
Daniel Vetteree960be2014-08-06 15:04:45 +02002042{
2043 struct i915_hw_ppgtt *ppgtt =
2044 container_of(kref, struct i915_hw_ppgtt, ref);
2045
Daniele Ceraolo Spurio198c9742014-11-10 13:44:31 +00002046 trace_i915_ppgtt_release(&ppgtt->base);
2047
Chris Wilson50e046b2016-08-04 07:52:46 +01002048 /* vmas should already be unbound and destroyed */
Daniel Vetteree960be2014-08-06 15:04:45 +02002049 WARN_ON(!list_empty(&ppgtt->base.active_list));
2050 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
Chris Wilson50e046b2016-08-04 07:52:46 +01002051 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
Daniel Vetteree960be2014-08-06 15:04:45 +02002052
2053 ppgtt->base.cleanup(&ppgtt->base);
Chris Wilson84486612017-02-15 08:43:40 +00002054 i915_address_space_fini(&ppgtt->base);
Daniel Vetteree960be2014-08-06 15:04:45 +02002055 kfree(ppgtt);
2056}
Daniel Vetter1d2a3142012-02-09 17:15:46 +01002057
Ben Widawskya81cc002013-01-18 12:30:31 -08002058/* Certain Gen5 chipsets require require idling the GPU before
2059 * unmapping anything from the GTT when VT-d is enabled.
2060 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002061static bool needs_idle_maps(struct drm_i915_private *dev_priv)
Ben Widawskya81cc002013-01-18 12:30:31 -08002062{
Ben Widawskya81cc002013-01-18 12:30:31 -08002063 /* Query intel_iommu to see if we need the workaround. Presumably that
2064 * was loaded first.
2065 */
Chris Wilson80debff2017-05-25 13:16:12 +01002066 return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
Ben Widawskya81cc002013-01-18 12:30:31 -08002067}
2068
Chris Wilsondc979972016-05-10 14:10:04 +01002069void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
Ben Widawsky828c7902013-10-16 09:21:30 -07002070{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002071 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302072 enum intel_engine_id id;
Ben Widawsky828c7902013-10-16 09:21:30 -07002073
Chris Wilsondc979972016-05-10 14:10:04 +01002074 if (INTEL_INFO(dev_priv)->gen < 6)
Ben Widawsky828c7902013-10-16 09:21:30 -07002075 return;
2076
Akash Goel3b3f1652016-10-13 22:44:48 +05302077 for_each_engine(engine, dev_priv, id) {
Ben Widawsky828c7902013-10-16 09:21:30 -07002078 u32 fault_reg;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002079 fault_reg = I915_READ(RING_FAULT_REG(engine));
Ben Widawsky828c7902013-10-16 09:21:30 -07002080 if (fault_reg & RING_FAULT_VALID) {
2081 DRM_DEBUG_DRIVER("Unexpected fault\n"
Paulo Zanoni59a5d292014-10-30 15:52:45 -02002082 "\tAddr: 0x%08lx\n"
Ben Widawsky828c7902013-10-16 09:21:30 -07002083 "\tAddress space: %s\n"
2084 "\tSource ID: %d\n"
2085 "\tType: %d\n",
2086 fault_reg & PAGE_MASK,
2087 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2088 RING_FAULT_SRCID(fault_reg),
2089 RING_FAULT_FAULT_TYPE(fault_reg));
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002090 I915_WRITE(RING_FAULT_REG(engine),
Ben Widawsky828c7902013-10-16 09:21:30 -07002091 fault_reg & ~RING_FAULT_VALID);
2092 }
2093 }
Akash Goel3b3f1652016-10-13 22:44:48 +05302094
2095 /* Engine specific init may not have been done till this point. */
2096 if (dev_priv->engine[RCS])
2097 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
Ben Widawsky828c7902013-10-16 09:21:30 -07002098}
2099
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00002100void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
Ben Widawsky828c7902013-10-16 09:21:30 -07002101{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002102 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawsky828c7902013-10-16 09:21:30 -07002103
2104 /* Don't bother messing with faults pre GEN6 as we have little
2105 * documentation supporting that it's a good idea.
2106 */
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00002107 if (INTEL_GEN(dev_priv) < 6)
Ben Widawsky828c7902013-10-16 09:21:30 -07002108 return;
2109
Chris Wilsondc979972016-05-10 14:10:04 +01002110 i915_check_and_clear_faults(dev_priv);
Ben Widawsky828c7902013-10-16 09:21:30 -07002111
Chris Wilson381b9432017-02-15 08:43:54 +00002112 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
Chris Wilson91e56492014-09-25 10:13:12 +01002113
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002114 i915_ggtt_invalidate(dev_priv);
Ben Widawsky828c7902013-10-16 09:21:30 -07002115}
2116
Chris Wilson03ac84f2016-10-28 13:58:36 +01002117int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2118 struct sg_table *pages)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002119{
Chris Wilson1a292fa2017-01-06 15:22:39 +00002120 do {
2121 if (dma_map_sg(&obj->base.dev->pdev->dev,
2122 pages->sgl, pages->nents,
2123 PCI_DMA_BIDIRECTIONAL))
2124 return 0;
2125
2126 /* If the DMA remap fails, one cause can be that we have
2127 * too many objects pinned in a small remapping table,
2128 * such as swiotlb. Incrementally purge all other objects and
2129 * try again - if there are no more pages to remove from
2130 * the DMA remapper, i915_gem_shrink will return 0.
2131 */
2132 GEM_BUG_ON(obj->mm.pages == pages);
2133 } while (i915_gem_shrink(to_i915(obj->base.dev),
2134 obj->base.size >> PAGE_SHIFT,
2135 I915_SHRINK_BOUND |
2136 I915_SHRINK_UNBOUND |
2137 I915_SHRINK_ACTIVE));
Chris Wilson9da3da62012-06-01 15:20:22 +01002138
Chris Wilson03ac84f2016-10-28 13:58:36 +01002139 return -ENOSPC;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002140}
2141
Daniel Vetter2c642b02015-04-14 17:35:26 +02002142static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002143{
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002144 writeq(pte, addr);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002145}
2146
Chris Wilsond6473f52016-06-10 14:22:59 +05302147static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2148 dma_addr_t addr,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002149 u64 offset,
Chris Wilsond6473f52016-06-10 14:22:59 +05302150 enum i915_cache_level level,
2151 u32 unused)
2152{
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002153 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Chris Wilsond6473f52016-06-10 14:22:59 +05302154 gen8_pte_t __iomem *pte =
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002155 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
Chris Wilsond6473f52016-06-10 14:22:59 +05302156
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002157 gen8_set_pte(pte, gen8_pte_encode(addr, level));
Chris Wilsond6473f52016-06-10 14:22:59 +05302158
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002159 ggtt->invalidate(vm->i915);
Chris Wilsond6473f52016-06-10 14:22:59 +05302160}
2161
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002162static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +01002163 struct i915_vma *vma,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002164 enum i915_cache_level level,
2165 u32 unused)
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002166{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002167 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Dave Gordon85d12252016-05-20 11:54:06 +01002168 struct sgt_iter sgt_iter;
2169 gen8_pte_t __iomem *gtt_entries;
Chris Wilson894cceb2017-02-15 08:43:37 +00002170 const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
Dave Gordon85d12252016-05-20 11:54:06 +01002171 dma_addr_t addr;
Imre Deakbe694592015-12-15 20:10:38 +02002172
Chris Wilson894cceb2017-02-15 08:43:37 +00002173 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
Matthew Auld4a234c52017-06-22 10:58:36 +01002174 gtt_entries += vma->node.start >> PAGE_SHIFT;
2175 for_each_sgt_dma(addr, sgt_iter, vma->pages)
Chris Wilson894cceb2017-02-15 08:43:37 +00002176 gen8_set_pte(gtt_entries++, pte_encode | addr);
Dave Gordon85d12252016-05-20 11:54:06 +01002177
Chris Wilson894cceb2017-02-15 08:43:37 +00002178 wmb();
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002179
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002180 /* This next bit makes the above posting read even more important. We
2181 * want to flush the TLBs only after we're certain all the PTE updates
2182 * have finished.
2183 */
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002184 ggtt->invalidate(vm->i915);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002185}
2186
Chris Wilsond6473f52016-06-10 14:22:59 +05302187static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2188 dma_addr_t addr,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002189 u64 offset,
Chris Wilsond6473f52016-06-10 14:22:59 +05302190 enum i915_cache_level level,
2191 u32 flags)
2192{
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002193 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Chris Wilsond6473f52016-06-10 14:22:59 +05302194 gen6_pte_t __iomem *pte =
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002195 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
Chris Wilsond6473f52016-06-10 14:22:59 +05302196
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002197 iowrite32(vm->pte_encode(addr, level, flags), pte);
Chris Wilsond6473f52016-06-10 14:22:59 +05302198
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002199 ggtt->invalidate(vm->i915);
Chris Wilsond6473f52016-06-10 14:22:59 +05302200}
2201
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002202/*
2203 * Binds an object into the global gtt with the specified cache level. The object
2204 * will be accessible to the GPU via commands whose operands reference offsets
2205 * within the global GTT as well as accessible by the GPU through the GMADR
2206 * mapped BAR (dev_priv->mm.gtt->gtt).
2207 */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002208static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +01002209 struct i915_vma *vma,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002210 enum i915_cache_level level,
2211 u32 flags)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002212{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002213 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Chris Wilsonb31144c2017-02-15 08:43:36 +00002214 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
Matthew Auld4a234c52017-06-22 10:58:36 +01002215 unsigned int i = vma->node.start >> PAGE_SHIFT;
Chris Wilsonb31144c2017-02-15 08:43:36 +00002216 struct sgt_iter iter;
Dave Gordon85d12252016-05-20 11:54:06 +01002217 dma_addr_t addr;
Matthew Auld4a234c52017-06-22 10:58:36 +01002218 for_each_sgt_dma(addr, iter, vma->pages)
Chris Wilsonb31144c2017-02-15 08:43:36 +00002219 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2220 wmb();
Ben Widawsky0f9b91c2012-11-04 09:21:30 -08002221
2222 /* This next bit makes the above posting read even more important. We
2223 * want to flush the TLBs only after we're certain all the PTE updates
2224 * have finished.
2225 */
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002226 ggtt->invalidate(vm->i915);
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002227}
2228
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002229static void nop_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002230 u64 start, u64 length)
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002231{
2232}
2233
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002234static void gen8_ggtt_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002235 u64 start, u64 length)
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002236{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002237 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Ben Widawsky782f1492014-02-20 11:50:33 -08002238 unsigned first_entry = start >> PAGE_SHIFT;
2239 unsigned num_entries = length >> PAGE_SHIFT;
Chris Wilson894cceb2017-02-15 08:43:37 +00002240 const gen8_pte_t scratch_pte =
2241 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2242 gen8_pte_t __iomem *gtt_base =
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002243 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2244 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002245 int i;
2246
2247 if (WARN(num_entries > max_entries,
2248 "First entry = %d; Num entries = %d (max=%d)\n",
2249 first_entry, num_entries, max_entries))
2250 num_entries = max_entries;
2251
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002252 for (i = 0; i < num_entries; i++)
2253 gen8_set_pte(&gtt_base[i], scratch_pte);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002254}
2255
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002256static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2257{
2258 struct drm_i915_private *dev_priv = vm->i915;
2259
2260 /*
2261 * Make sure the internal GAM fifo has been cleared of all GTT
2262 * writes before exiting stop_machine(). This guarantees that
2263 * any aperture accesses waiting to start in another process
2264 * cannot back up behind the GTT writes causing a hang.
2265 * The register can be any arbitrary GAM register.
2266 */
2267 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2268}
2269
2270struct insert_page {
2271 struct i915_address_space *vm;
2272 dma_addr_t addr;
2273 u64 offset;
2274 enum i915_cache_level level;
2275};
2276
2277static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2278{
2279 struct insert_page *arg = _arg;
2280
2281 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2282 bxt_vtd_ggtt_wa(arg->vm);
2283
2284 return 0;
2285}
2286
2287static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2288 dma_addr_t addr,
2289 u64 offset,
2290 enum i915_cache_level level,
2291 u32 unused)
2292{
2293 struct insert_page arg = { vm, addr, offset, level };
2294
2295 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2296}
2297
2298struct insert_entries {
2299 struct i915_address_space *vm;
Matthew Auld4a234c52017-06-22 10:58:36 +01002300 struct i915_vma *vma;
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002301 enum i915_cache_level level;
2302};
2303
2304static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2305{
2306 struct insert_entries *arg = _arg;
2307
Matthew Auld4a234c52017-06-22 10:58:36 +01002308 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002309 bxt_vtd_ggtt_wa(arg->vm);
2310
2311 return 0;
2312}
2313
2314static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +01002315 struct i915_vma *vma,
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002316 enum i915_cache_level level,
2317 u32 unused)
2318{
Chuanxiao Dong17369ba2017-07-07 17:50:59 +08002319 struct insert_entries arg = { vm, vma, level };
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002320
2321 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2322}
2323
2324struct clear_range {
2325 struct i915_address_space *vm;
2326 u64 start;
2327 u64 length;
2328};
2329
2330static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2331{
2332 struct clear_range *arg = _arg;
2333
2334 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2335 bxt_vtd_ggtt_wa(arg->vm);
2336
2337 return 0;
2338}
2339
2340static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2341 u64 start,
2342 u64 length)
2343{
2344 struct clear_range arg = { vm, start, length };
2345
2346 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2347}
2348
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002349static void gen6_ggtt_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002350 u64 start, u64 length)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002351{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002352 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Ben Widawsky782f1492014-02-20 11:50:33 -08002353 unsigned first_entry = start >> PAGE_SHIFT;
2354 unsigned num_entries = length >> PAGE_SHIFT;
Michel Thierry07749ef2015-03-16 16:00:54 +00002355 gen6_pte_t scratch_pte, __iomem *gtt_base =
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002356 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2357 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002358 int i;
2359
2360 if (WARN(num_entries > max_entries,
2361 "First entry = %d; Num entries = %d (max=%d)\n",
2362 first_entry, num_entries, max_entries))
2363 num_entries = max_entries;
2364
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002365 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002366 I915_CACHE_LLC, 0);
Ben Widawsky828c7902013-10-16 09:21:30 -07002367
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002368 for (i = 0; i < num_entries; i++)
2369 iowrite32(scratch_pte, &gtt_base[i]);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002370}
2371
Chris Wilsond6473f52016-06-10 14:22:59 +05302372static void i915_ggtt_insert_page(struct i915_address_space *vm,
2373 dma_addr_t addr,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002374 u64 offset,
Chris Wilsond6473f52016-06-10 14:22:59 +05302375 enum i915_cache_level cache_level,
2376 u32 unused)
2377{
Chris Wilsond6473f52016-06-10 14:22:59 +05302378 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2379 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
Chris Wilsond6473f52016-06-10 14:22:59 +05302380
2381 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
Chris Wilsond6473f52016-06-10 14:22:59 +05302382}
2383
Daniel Vetterd369d2d2015-04-14 17:35:25 +02002384static void i915_ggtt_insert_entries(struct i915_address_space *vm,
Matthew Auld4a234c52017-06-22 10:58:36 +01002385 struct i915_vma *vma,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002386 enum i915_cache_level cache_level,
2387 u32 unused)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002388{
2389 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2390 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2391
Matthew Auld4a234c52017-06-22 10:58:36 +01002392 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2393 flags);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002394}
2395
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002396static void i915_ggtt_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002397 u64 start, u64 length)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002398{
Chris Wilson2eedfc72016-10-24 13:42:17 +01002399 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002400}
2401
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02002402static int ggtt_bind_vma(struct i915_vma *vma,
2403 enum i915_cache_level cache_level,
2404 u32 flags)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002405{
Chris Wilson49d73912016-11-29 09:50:08 +00002406 struct drm_i915_private *i915 = vma->vm->i915;
Daniel Vetter0a878712015-10-15 14:23:01 +02002407 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsonba7a5742017-02-15 08:43:35 +00002408 u32 pte_flags;
Daniel Vetter0a878712015-10-15 14:23:01 +02002409
Chris Wilsonba7a5742017-02-15 08:43:35 +00002410 if (unlikely(!vma->pages)) {
2411 int ret = i915_get_ggtt_vma_pages(vma);
2412 if (ret)
2413 return ret;
2414 }
Daniel Vetter0a878712015-10-15 14:23:01 +02002415
2416 /* Currently applicable only to VLV */
Chris Wilsonba7a5742017-02-15 08:43:35 +00002417 pte_flags = 0;
Daniel Vetter0a878712015-10-15 14:23:01 +02002418 if (obj->gt_ro)
2419 pte_flags |= PTE_READ_ONLY;
2420
Chris Wilson9c870d02016-10-24 13:42:15 +01002421 intel_runtime_pm_get(i915);
Matthew Auld4a234c52017-06-22 10:58:36 +01002422 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
Chris Wilson9c870d02016-10-24 13:42:15 +01002423 intel_runtime_pm_put(i915);
Daniel Vetter0a878712015-10-15 14:23:01 +02002424
2425 /*
2426 * Without aliasing PPGTT there's no difference between
2427 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2428 * upgrade to both bound if we bind either to avoid double-binding.
2429 */
Chris Wilson3272db52016-08-04 16:32:32 +01002430 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Daniel Vetter0a878712015-10-15 14:23:01 +02002431
2432 return 0;
2433}
2434
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002435static void ggtt_unbind_vma(struct i915_vma *vma)
2436{
2437 struct drm_i915_private *i915 = vma->vm->i915;
2438
2439 intel_runtime_pm_get(i915);
2440 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2441 intel_runtime_pm_put(i915);
2442}
2443
Daniel Vetter0a878712015-10-15 14:23:01 +02002444static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2445 enum i915_cache_level cache_level,
2446 u32 flags)
2447{
Chris Wilson49d73912016-11-29 09:50:08 +00002448 struct drm_i915_private *i915 = vma->vm->i915;
Chris Wilson321d1782015-11-20 10:27:18 +00002449 u32 pte_flags;
Chris Wilsonff685972017-02-15 08:43:42 +00002450 int ret;
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02002451
Chris Wilsonba7a5742017-02-15 08:43:35 +00002452 if (unlikely(!vma->pages)) {
Chris Wilsonff685972017-02-15 08:43:42 +00002453 ret = i915_get_ggtt_vma_pages(vma);
Chris Wilsonba7a5742017-02-15 08:43:35 +00002454 if (ret)
2455 return ret;
2456 }
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002457
Akash Goel24f3a8c2014-06-17 10:59:42 +05302458 /* Currently applicable only to VLV */
Chris Wilson321d1782015-11-20 10:27:18 +00002459 pte_flags = 0;
2460 if (vma->obj->gt_ro)
Daniel Vetterf329f5f2015-04-14 17:35:15 +02002461 pte_flags |= PTE_READ_ONLY;
Akash Goel24f3a8c2014-06-17 10:59:42 +05302462
Chris Wilsonff685972017-02-15 08:43:42 +00002463 if (flags & I915_VMA_LOCAL_BIND) {
2464 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2465
Matthew Auld1f234752017-05-12 10:14:23 +01002466 if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2467 appgtt->base.allocate_va_range) {
Chris Wilsonff685972017-02-15 08:43:42 +00002468 ret = appgtt->base.allocate_va_range(&appgtt->base,
2469 vma->node.start,
Matthew Auldd5672322017-05-16 09:55:14 +01002470 vma->size);
Chris Wilsonff685972017-02-15 08:43:42 +00002471 if (ret)
Chris Wilson2f7399a2017-02-27 12:26:53 +00002472 goto err_pages;
Chris Wilsonff685972017-02-15 08:43:42 +00002473 }
2474
Matthew Auld4a234c52017-06-22 10:58:36 +01002475 appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
2476 pte_flags);
Chris Wilsonff685972017-02-15 08:43:42 +00002477 }
2478
Chris Wilson3272db52016-08-04 16:32:32 +01002479 if (flags & I915_VMA_GLOBAL_BIND) {
Chris Wilson9c870d02016-10-24 13:42:15 +01002480 intel_runtime_pm_get(i915);
Matthew Auld4a234c52017-06-22 10:58:36 +01002481 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
Chris Wilson9c870d02016-10-24 13:42:15 +01002482 intel_runtime_pm_put(i915);
Ben Widawsky6f65e292013-12-06 14:10:56 -08002483 }
Daniel Vetter74898d72012-02-15 23:50:22 +01002484
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02002485 return 0;
Chris Wilson2f7399a2017-02-27 12:26:53 +00002486
2487err_pages:
2488 if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
2489 if (vma->pages != vma->obj->mm.pages) {
2490 GEM_BUG_ON(!vma->pages);
2491 sg_free_table(vma->pages);
2492 kfree(vma->pages);
2493 }
2494 vma->pages = NULL;
2495 }
2496 return ret;
Ben Widawsky6f65e292013-12-06 14:10:56 -08002497}
2498
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002499static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
Ben Widawsky6f65e292013-12-06 14:10:56 -08002500{
Chris Wilson49d73912016-11-29 09:50:08 +00002501 struct drm_i915_private *i915 = vma->vm->i915;
Ben Widawsky6f65e292013-12-06 14:10:56 -08002502
Chris Wilson9c870d02016-10-24 13:42:15 +01002503 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2504 intel_runtime_pm_get(i915);
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002505 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
Chris Wilson9c870d02016-10-24 13:42:15 +01002506 intel_runtime_pm_put(i915);
2507 }
Ben Widawsky6f65e292013-12-06 14:10:56 -08002508
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002509 if (vma->flags & I915_VMA_LOCAL_BIND) {
2510 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2511
2512 vm->clear_range(vm, vma->node.start, vma->size);
2513 }
Daniel Vetter74163902012-02-15 23:50:21 +01002514}
2515
Chris Wilson03ac84f2016-10-28 13:58:36 +01002516void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2517 struct sg_table *pages)
Daniel Vetter74163902012-02-15 23:50:21 +01002518{
David Weinehall52a05c32016-08-22 13:32:44 +03002519 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2520 struct device *kdev = &dev_priv->drm.pdev->dev;
Chris Wilson307dc252016-08-05 10:14:12 +01002521 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawsky5c042282011-10-17 15:51:55 -07002522
Chris Wilson307dc252016-08-05 10:14:12 +01002523 if (unlikely(ggtt->do_idle_maps)) {
Chris Wilson228ec872017-03-30 09:53:41 +01002524 if (i915_gem_wait_for_idle(dev_priv, 0)) {
Chris Wilson307dc252016-08-05 10:14:12 +01002525 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2526 /* Wait a bit, in hopes it avoids the hang */
2527 udelay(10);
2528 }
2529 }
Ben Widawsky5c042282011-10-17 15:51:55 -07002530
Chris Wilson03ac84f2016-10-28 13:58:36 +01002531 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002532}
Daniel Vetter644ec022012-03-26 09:45:40 +02002533
Chris Wilson45b186f2016-12-16 07:46:42 +00002534static void i915_gtt_color_adjust(const struct drm_mm_node *node,
Chris Wilson42d6ab42012-07-26 11:49:32 +01002535 unsigned long color,
Thierry Reding440fd522015-01-23 09:05:06 +01002536 u64 *start,
2537 u64 *end)
Chris Wilson42d6ab42012-07-26 11:49:32 +01002538{
Chris Wilsona6508de2017-02-06 08:45:47 +00002539 if (node->allocated && node->color != color)
Chris Wilsonf51455d2017-01-10 14:47:34 +00002540 *start += I915_GTT_PAGE_SIZE;
Chris Wilson42d6ab42012-07-26 11:49:32 +01002541
Chris Wilsona6508de2017-02-06 08:45:47 +00002542 /* Also leave a space between the unallocated reserved node after the
2543 * GTT and any objects within the GTT, i.e. we use the color adjustment
2544 * to insert a guard page to prevent prefetches crossing over the
2545 * GTT boundary.
2546 */
Chris Wilsonb44f97f2016-12-16 07:46:40 +00002547 node = list_next_entry(node, node_list);
Chris Wilsona6508de2017-02-06 08:45:47 +00002548 if (node->color != color)
Chris Wilsonf51455d2017-01-10 14:47:34 +00002549 *end -= I915_GTT_PAGE_SIZE;
Chris Wilson42d6ab42012-07-26 11:49:32 +01002550}
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002551
Chris Wilson6cde9a02017-02-13 17:15:50 +00002552int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2553{
2554 struct i915_ggtt *ggtt = &i915->ggtt;
2555 struct i915_hw_ppgtt *ppgtt;
2556 int err;
2557
Chris Wilson57202f42017-02-15 08:43:56 +00002558 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
Chris Wilson1188bc62017-02-15 08:43:38 +00002559 if (IS_ERR(ppgtt))
2560 return PTR_ERR(ppgtt);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002561
Chris Wilsone565ceb2017-02-15 08:43:55 +00002562 if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2563 err = -ENODEV;
2564 goto err_ppgtt;
2565 }
2566
Chris Wilson6cde9a02017-02-13 17:15:50 +00002567 if (ppgtt->base.allocate_va_range) {
Chris Wilsone565ceb2017-02-15 08:43:55 +00002568 /* Note we only pre-allocate as far as the end of the global
2569 * GTT. On 48b / 4-level page-tables, the difference is very,
2570 * very significant! We have to preallocate as GVT/vgpu does
2571 * not like the page directory disappearing.
2572 */
Chris Wilson6cde9a02017-02-13 17:15:50 +00002573 err = ppgtt->base.allocate_va_range(&ppgtt->base,
Chris Wilsone565ceb2017-02-15 08:43:55 +00002574 0, ggtt->base.total);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002575 if (err)
Chris Wilson1188bc62017-02-15 08:43:38 +00002576 goto err_ppgtt;
Chris Wilson6cde9a02017-02-13 17:15:50 +00002577 }
2578
Chris Wilson6cde9a02017-02-13 17:15:50 +00002579 i915->mm.aliasing_ppgtt = ppgtt;
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002580
Chris Wilson6cde9a02017-02-13 17:15:50 +00002581 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2582 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2583
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002584 WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2585 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2586
Chris Wilson6cde9a02017-02-13 17:15:50 +00002587 return 0;
2588
Chris Wilson6cde9a02017-02-13 17:15:50 +00002589err_ppgtt:
Chris Wilson1188bc62017-02-15 08:43:38 +00002590 i915_ppgtt_put(ppgtt);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002591 return err;
2592}
2593
2594void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2595{
2596 struct i915_ggtt *ggtt = &i915->ggtt;
2597 struct i915_hw_ppgtt *ppgtt;
2598
2599 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2600 if (!ppgtt)
2601 return;
2602
Chris Wilson1188bc62017-02-15 08:43:38 +00002603 i915_ppgtt_put(ppgtt);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002604
2605 ggtt->base.bind_vma = ggtt_bind_vma;
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002606 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilson6cde9a02017-02-13 17:15:50 +00002607}
2608
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002609int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
Daniel Vetter644ec022012-03-26 09:45:40 +02002610{
Ben Widawskye78891c2013-01-25 16:41:04 -08002611 /* Let GEM Manage all of the aperture.
2612 *
2613 * However, leave one page at the end still bound to the scratch page.
2614 * There are a number of places where the hardware apparently prefetches
2615 * past the end of the object, and we've seen multiple hangs with the
2616 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2617 * aperture. One page should be enough to keep any prefetching inside
2618 * of the aperture.
2619 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002620 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsoned2f3452012-11-15 11:32:19 +00002621 unsigned long hole_start, hole_end;
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002622 struct drm_mm_node *entry;
Daniel Vetterfa76da32014-08-06 20:19:54 +02002623 int ret;
Daniel Vetter644ec022012-03-26 09:45:40 +02002624
Zhi Wangb02d22a2016-06-16 08:06:59 -04002625 ret = intel_vgt_balloon(dev_priv);
2626 if (ret)
2627 return ret;
Yu Zhang5dda8fa2015-02-10 19:05:48 +08002628
Chris Wilson95374d72016-10-12 10:05:20 +01002629 /* Reserve a mappable slot for our lockless error capture */
Chris Wilson4e64e552017-02-02 21:04:38 +00002630 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2631 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2632 0, ggtt->mappable_end,
2633 DRM_MM_INSERT_LOW);
Chris Wilson95374d72016-10-12 10:05:20 +01002634 if (ret)
2635 return ret;
2636
Chris Wilsoned2f3452012-11-15 11:32:19 +00002637 /* Clear any non-preallocated blocks */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002638 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
Chris Wilsoned2f3452012-11-15 11:32:19 +00002639 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2640 hole_start, hole_end);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002641 ggtt->base.clear_range(&ggtt->base, hole_start,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002642 hole_end - hole_start);
Chris Wilsoned2f3452012-11-15 11:32:19 +00002643 }
2644
2645 /* And finally clear the reserved guard page */
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002646 ggtt->base.clear_range(&ggtt->base,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002647 ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
Daniel Vetter6c5566a2014-08-06 15:04:50 +02002648
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002649 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
Chris Wilson6cde9a02017-02-13 17:15:50 +00002650 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
Chris Wilson95374d72016-10-12 10:05:20 +01002651 if (ret)
Chris Wilson6cde9a02017-02-13 17:15:50 +00002652 goto err;
Daniel Vetterfa76da32014-08-06 20:19:54 +02002653 }
2654
Daniel Vetter6c5566a2014-08-06 15:04:50 +02002655 return 0;
Chris Wilson95374d72016-10-12 10:05:20 +01002656
Chris Wilson95374d72016-10-12 10:05:20 +01002657err:
2658 drm_mm_remove_node(&ggtt->error_capture);
2659 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002660}
2661
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002662/**
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002663 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002664 * @dev_priv: i915 device
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002665 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002666void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002667{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002668 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson94d4a2a2017-02-10 16:35:22 +00002669 struct i915_vma *vma, *vn;
Chris Wilson66df1012017-08-22 18:38:28 +01002670 struct pagevec *pvec;
Chris Wilson94d4a2a2017-02-10 16:35:22 +00002671
2672 ggtt->base.closed = true;
2673
2674 mutex_lock(&dev_priv->drm.struct_mutex);
2675 WARN_ON(!list_empty(&ggtt->base.active_list));
2676 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2677 WARN_ON(i915_vma_unbind(vma));
2678 mutex_unlock(&dev_priv->drm.struct_mutex);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002679
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002680 i915_gem_cleanup_stolen(&dev_priv->drm);
Imre Deaka4eba472016-01-19 15:26:32 +02002681
Chris Wilson1188bc62017-02-15 08:43:38 +00002682 mutex_lock(&dev_priv->drm.struct_mutex);
2683 i915_gem_fini_aliasing_ppgtt(dev_priv);
2684
Chris Wilson95374d72016-10-12 10:05:20 +01002685 if (drm_mm_node_allocated(&ggtt->error_capture))
2686 drm_mm_remove_node(&ggtt->error_capture);
2687
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002688 if (drm_mm_initialized(&ggtt->base.mm)) {
Zhi Wangb02d22a2016-06-16 08:06:59 -04002689 intel_vgt_deballoon(dev_priv);
Matthew Aulded9724d2016-11-17 21:04:10 +00002690 i915_address_space_fini(&ggtt->base);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002691 }
2692
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002693 ggtt->base.cleanup(&ggtt->base);
Chris Wilson66df1012017-08-22 18:38:28 +01002694
2695 pvec = &dev_priv->mm.wc_stash;
2696 if (pvec->nr) {
2697 set_pages_array_wb(pvec->pages, pvec->nr);
2698 __pagevec_release(pvec);
2699 }
2700
Chris Wilson1188bc62017-02-15 08:43:38 +00002701 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002702
2703 arch_phys_wc_del(ggtt->mtrr);
Chris Wilsonf7bbe782016-08-19 16:54:27 +01002704 io_mapping_fini(&ggtt->mappable);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002705}
Daniel Vetter70e32542014-08-06 15:04:57 +02002706
Daniel Vetter2c642b02015-04-14 17:35:26 +02002707static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002708{
2709 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2710 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2711 return snb_gmch_ctl << 20;
2712}
2713
Daniel Vetter2c642b02015-04-14 17:35:26 +02002714static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
Ben Widawsky9459d252013-11-03 16:53:55 -08002715{
2716 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2717 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2718 if (bdw_gmch_ctl)
2719 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
Ben Widawsky562d55d2014-05-27 16:53:08 -07002720
2721#ifdef CONFIG_X86_32
2722 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2723 if (bdw_gmch_ctl > 4)
2724 bdw_gmch_ctl = 4;
2725#endif
2726
Ben Widawsky9459d252013-11-03 16:53:55 -08002727 return bdw_gmch_ctl << 20;
2728}
2729
Daniel Vetter2c642b02015-04-14 17:35:26 +02002730static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002731{
2732 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2733 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2734
2735 if (gmch_ctrl)
2736 return 1 << (20 + gmch_ctrl);
2737
2738 return 0;
2739}
2740
Daniel Vetter2c642b02015-04-14 17:35:26 +02002741static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002742{
2743 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2744 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
Imre Deaka92d1a92017-05-10 12:21:52 +03002745 return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002746}
2747
Daniel Vetter2c642b02015-04-14 17:35:26 +02002748static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
Ben Widawsky9459d252013-11-03 16:53:55 -08002749{
2750 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2751 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
Imre Deaka92d1a92017-05-10 12:21:52 +03002752 return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
Ben Widawsky9459d252013-11-03 16:53:55 -08002753}
2754
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002755static size_t chv_get_stolen_size(u16 gmch_ctrl)
2756{
2757 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2758 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2759
2760 /*
2761 * 0x0 to 0x10: 32MB increments starting at 0MB
2762 * 0x11 to 0x16: 4MB increments starting at 8MB
2763 * 0x17 to 0x1d: 4MB increments start at 36MB
2764 */
2765 if (gmch_ctrl < 0x11)
Imre Deaka92d1a92017-05-10 12:21:52 +03002766 return (size_t)gmch_ctrl << 25;
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002767 else if (gmch_ctrl < 0x17)
Imre Deaka92d1a92017-05-10 12:21:52 +03002768 return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002769 else
Imre Deaka92d1a92017-05-10 12:21:52 +03002770 return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002771}
2772
Damien Lespiau66375012014-01-09 18:02:46 +00002773static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2774{
2775 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2776 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2777
2778 if (gen9_gmch_ctl < 0xf0)
Imre Deaka92d1a92017-05-10 12:21:52 +03002779 return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
Damien Lespiau66375012014-01-09 18:02:46 +00002780 else
2781 /* 4MB increments starting at 0xf0 for 4MB */
Imre Deaka92d1a92017-05-10 12:21:52 +03002782 return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
Damien Lespiau66375012014-01-09 18:02:46 +00002783}
2784
Chris Wilson34c998b2016-08-04 07:52:24 +01002785static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
Ben Widawsky63340132013-11-04 19:32:22 -08002786{
Chris Wilson49d73912016-11-29 09:50:08 +00002787 struct drm_i915_private *dev_priv = ggtt->base.i915;
2788 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson34c998b2016-08-04 07:52:24 +01002789 phys_addr_t phys_addr;
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002790 int ret;
Ben Widawsky63340132013-11-04 19:32:22 -08002791
2792 /* For Modern GENs the PTEs and register space are split in the BAR */
Chris Wilson34c998b2016-08-04 07:52:24 +01002793 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
Ben Widawsky63340132013-11-04 19:32:22 -08002794
Imre Deak2a073f892015-03-27 13:07:33 +02002795 /*
Rodrigo Vivi385db982017-08-29 16:09:07 -07002796 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
2797 * will be dropped. For WC mappings in general we have 64 byte burst
2798 * writes when the WC buffer is flushed, so we can't use it, but have to
Imre Deak2a073f892015-03-27 13:07:33 +02002799 * resort to an uncached mapping. The WC issue is easily caught by the
2800 * readback check when writing GTT PTE entries.
2801 */
Rodrigo Vivi385db982017-08-29 16:09:07 -07002802 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
Chris Wilson34c998b2016-08-04 07:52:24 +01002803 ggtt->gsm = ioremap_nocache(phys_addr, size);
Imre Deak2a073f892015-03-27 13:07:33 +02002804 else
Chris Wilson34c998b2016-08-04 07:52:24 +01002805 ggtt->gsm = ioremap_wc(phys_addr, size);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002806 if (!ggtt->gsm) {
Chris Wilson34c998b2016-08-04 07:52:24 +01002807 DRM_ERROR("Failed to map the ggtt page table\n");
Ben Widawsky63340132013-11-04 19:32:22 -08002808 return -ENOMEM;
2809 }
2810
Chris Wilson84486612017-02-15 08:43:40 +00002811 ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002812 if (ret) {
Ben Widawsky63340132013-11-04 19:32:22 -08002813 DRM_ERROR("Scratch setup failed\n");
2814 /* iounmap will also get called at remove, but meh */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002815 iounmap(ggtt->gsm);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002816 return ret;
Ben Widawsky63340132013-11-04 19:32:22 -08002817 }
2818
Mika Kuoppala4ad2af12015-06-30 18:16:39 +03002819 return 0;
Ben Widawsky63340132013-11-04 19:32:22 -08002820}
2821
Zhi Wang43958902017-09-14 20:39:40 +08002822static struct intel_ppat_entry *
2823__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
Rodrigo Vivi4e349352017-08-15 16:25:39 -07002824{
Zhi Wang43958902017-09-14 20:39:40 +08002825 struct intel_ppat_entry *entry = &ppat->entries[index];
2826
2827 GEM_BUG_ON(index >= ppat->max_entries);
2828 GEM_BUG_ON(test_bit(index, ppat->used));
2829
2830 entry->ppat = ppat;
2831 entry->value = value;
2832 kref_init(&entry->ref);
2833 set_bit(index, ppat->used);
2834 set_bit(index, ppat->dirty);
2835
2836 return entry;
2837}
2838
2839static void __free_ppat_entry(struct intel_ppat_entry *entry)
2840{
2841 struct intel_ppat *ppat = entry->ppat;
2842 unsigned int index = entry - ppat->entries;
2843
2844 GEM_BUG_ON(index >= ppat->max_entries);
2845 GEM_BUG_ON(!test_bit(index, ppat->used));
2846
2847 entry->value = ppat->clear_value;
2848 clear_bit(index, ppat->used);
2849 set_bit(index, ppat->dirty);
2850}
2851
2852/**
2853 * intel_ppat_get - get a usable PPAT entry
2854 * @i915: i915 device instance
2855 * @value: the PPAT value required by the caller
2856 *
2857 * The function tries to search if there is an existing PPAT entry which
2858 * matches with the required value. If perfectly matched, the existing PPAT
2859 * entry will be used. If only partially matched, it will try to check if
2860 * there is any available PPAT index. If yes, it will allocate a new PPAT
2861 * index for the required entry and update the HW. If not, the partially
2862 * matched entry will be used.
2863 */
2864const struct intel_ppat_entry *
2865intel_ppat_get(struct drm_i915_private *i915, u8 value)
2866{
2867 struct intel_ppat *ppat = &i915->ppat;
2868 struct intel_ppat_entry *entry;
2869 unsigned int scanned, best_score;
2870 int i;
2871
2872 GEM_BUG_ON(!ppat->max_entries);
2873
2874 scanned = best_score = 0;
2875 for_each_set_bit(i, ppat->used, ppat->max_entries) {
2876 unsigned int score;
2877
2878 score = ppat->match(ppat->entries[i].value, value);
2879 if (score > best_score) {
2880 entry = &ppat->entries[i];
2881 if (score == INTEL_PPAT_PERFECT_MATCH) {
2882 kref_get(&entry->ref);
2883 return entry;
2884 }
2885 best_score = score;
2886 }
2887 scanned++;
2888 }
2889
2890 if (scanned == ppat->max_entries) {
2891 if (!best_score)
2892 return ERR_PTR(-ENOSPC);
2893
2894 kref_get(&entry->ref);
2895 return entry;
2896 }
2897
2898 i = find_first_zero_bit(ppat->used, ppat->max_entries);
2899 entry = __alloc_ppat_entry(ppat, i, value);
2900 ppat->update_hw(i915);
2901 return entry;
2902}
2903
2904static void release_ppat(struct kref *kref)
2905{
2906 struct intel_ppat_entry *entry =
2907 container_of(kref, struct intel_ppat_entry, ref);
2908 struct drm_i915_private *i915 = entry->ppat->i915;
2909
2910 __free_ppat_entry(entry);
2911 entry->ppat->update_hw(i915);
2912}
2913
2914/**
2915 * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
2916 * @entry: an intel PPAT entry
2917 *
2918 * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
2919 * entry is dynamically allocated, its reference count will be decreased. Once
2920 * the reference count becomes into zero, the PPAT index becomes free again.
2921 */
2922void intel_ppat_put(const struct intel_ppat_entry *entry)
2923{
2924 struct intel_ppat *ppat = entry->ppat;
2925 unsigned int index = entry - ppat->entries;
2926
2927 GEM_BUG_ON(!ppat->max_entries);
2928
2929 kref_put(&ppat->entries[index].ref, release_ppat);
2930}
2931
2932static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
2933{
2934 struct intel_ppat *ppat = &dev_priv->ppat;
2935 int i;
2936
2937 for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
2938 I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
2939 clear_bit(i, ppat->dirty);
2940 }
2941}
2942
2943static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
2944{
2945 struct intel_ppat *ppat = &dev_priv->ppat;
2946 u64 pat = 0;
2947 int i;
2948
2949 for (i = 0; i < ppat->max_entries; i++)
2950 pat |= GEN8_PPAT(i, ppat->entries[i].value);
2951
2952 bitmap_clear(ppat->dirty, 0, ppat->max_entries);
2953
2954 I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
2955 I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
2956}
2957
2958static unsigned int bdw_private_pat_match(u8 src, u8 dst)
2959{
2960 unsigned int score = 0;
2961 enum {
2962 AGE_MATCH = BIT(0),
2963 TC_MATCH = BIT(1),
2964 CA_MATCH = BIT(2),
2965 };
2966
2967 /* Cache attribute has to be matched. */
Zhi Wang1298d512017-09-18 21:36:34 +08002968 if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
Zhi Wang43958902017-09-14 20:39:40 +08002969 return 0;
2970
2971 score |= CA_MATCH;
2972
2973 if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
2974 score |= TC_MATCH;
2975
2976 if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
2977 score |= AGE_MATCH;
2978
2979 if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
2980 return INTEL_PPAT_PERFECT_MATCH;
2981
2982 return score;
2983}
2984
2985static unsigned int chv_private_pat_match(u8 src, u8 dst)
2986{
2987 return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
2988 INTEL_PPAT_PERFECT_MATCH : 0;
2989}
2990
2991static void cnl_setup_private_ppat(struct intel_ppat *ppat)
2992{
2993 ppat->max_entries = 8;
2994 ppat->update_hw = cnl_private_pat_update_hw;
2995 ppat->match = bdw_private_pat_match;
2996 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
2997
Rodrigo Vivi4e349352017-08-15 16:25:39 -07002998 /* XXX: spec is unclear if this is still needed for CNL+ */
Zhi Wang43958902017-09-14 20:39:40 +08002999 if (!USES_PPGTT(ppat->i915)) {
3000 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
Rodrigo Vivi4e349352017-08-15 16:25:39 -07003001 return;
3002 }
3003
Zhi Wang43958902017-09-14 20:39:40 +08003004 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
3005 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
3006 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
3007 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
3008 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3009 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3010 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3011 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
Rodrigo Vivi4e349352017-08-15 16:25:39 -07003012}
3013
Ben Widawskyfbe5d362013-11-04 19:56:49 -08003014/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
3015 * bits. When using advanced contexts each context stores its own PAT, but
3016 * writing this data shouldn't be harmful even in those cases. */
Zhi Wang43958902017-09-14 20:39:40 +08003017static void bdw_setup_private_ppat(struct intel_ppat *ppat)
Ben Widawskyfbe5d362013-11-04 19:56:49 -08003018{
Zhi Wang43958902017-09-14 20:39:40 +08003019 ppat->max_entries = 8;
3020 ppat->update_hw = bdw_private_pat_update_hw;
3021 ppat->match = bdw_private_pat_match;
3022 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
Ben Widawskyfbe5d362013-11-04 19:56:49 -08003023
Zhi Wang43958902017-09-14 20:39:40 +08003024 if (!USES_PPGTT(ppat->i915)) {
Rodrigo Vivid6a8b722014-11-05 16:56:36 -08003025 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3026 * so RTL will always use the value corresponding to
3027 * pat_sel = 000".
3028 * So let's disable cache for GGTT to avoid screen corruptions.
3029 * MOCS still can be used though.
3030 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3031 * before this patch, i.e. the same uncached + snooping access
3032 * like on gen6/7 seems to be in effect.
3033 * - So this just fixes blitter/render access. Again it looks
3034 * like it's not just uncached access, but uncached + snooping.
3035 * So we can still hold onto all our assumptions wrt cpu
3036 * clflushing on LLC machines.
3037 */
Zhi Wang43958902017-09-14 20:39:40 +08003038 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
3039 return;
3040 }
Rodrigo Vivid6a8b722014-11-05 16:56:36 -08003041
Zhi Wang43958902017-09-14 20:39:40 +08003042 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
3043 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
3044 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
3045 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
3046 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3047 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3048 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3049 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
Ben Widawskyfbe5d362013-11-04 19:56:49 -08003050}
3051
Zhi Wang43958902017-09-14 20:39:40 +08003052static void chv_setup_private_ppat(struct intel_ppat *ppat)
Ville Syrjäläee0ce472014-04-09 13:28:01 +03003053{
Zhi Wang43958902017-09-14 20:39:40 +08003054 ppat->max_entries = 8;
3055 ppat->update_hw = bdw_private_pat_update_hw;
3056 ppat->match = chv_private_pat_match;
3057 ppat->clear_value = CHV_PPAT_SNOOP;
Ville Syrjäläee0ce472014-04-09 13:28:01 +03003058
3059 /*
3060 * Map WB on BDW to snooped on CHV.
3061 *
3062 * Only the snoop bit has meaning for CHV, the rest is
3063 * ignored.
3064 *
Ville Syrjäläcf3d2622014-11-14 21:02:44 +02003065 * The hardware will never snoop for certain types of accesses:
3066 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3067 * - PPGTT page tables
3068 * - some other special cycles
3069 *
3070 * As with BDW, we also need to consider the following for GT accesses:
3071 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3072 * so RTL will always use the value corresponding to
3073 * pat_sel = 000".
3074 * Which means we must set the snoop bit in PAT entry 0
3075 * in order to keep the global status page working.
Ville Syrjäläee0ce472014-04-09 13:28:01 +03003076 */
Ville Syrjäläee0ce472014-04-09 13:28:01 +03003077
Zhi Wang43958902017-09-14 20:39:40 +08003078 __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
3079 __alloc_ppat_entry(ppat, 1, 0);
3080 __alloc_ppat_entry(ppat, 2, 0);
3081 __alloc_ppat_entry(ppat, 3, 0);
3082 __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
3083 __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
3084 __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
3085 __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
Ville Syrjäläee0ce472014-04-09 13:28:01 +03003086}
3087
Chris Wilson34c998b2016-08-04 07:52:24 +01003088static void gen6_gmch_remove(struct i915_address_space *vm)
3089{
3090 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3091
3092 iounmap(ggtt->gsm);
Chris Wilson84486612017-02-15 08:43:40 +00003093 cleanup_scratch_page(vm);
Chris Wilson34c998b2016-08-04 07:52:24 +01003094}
3095
Zhi Wang36e16c42017-09-12 15:42:24 +08003096static void setup_private_pat(struct drm_i915_private *dev_priv)
3097{
Zhi Wang43958902017-09-14 20:39:40 +08003098 struct intel_ppat *ppat = &dev_priv->ppat;
3099 int i;
3100
3101 ppat->i915 = dev_priv;
3102
Zhi Wang36e16c42017-09-12 15:42:24 +08003103 if (INTEL_GEN(dev_priv) >= 10)
Zhi Wang43958902017-09-14 20:39:40 +08003104 cnl_setup_private_ppat(ppat);
Zhi Wang36e16c42017-09-12 15:42:24 +08003105 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
Zhi Wang43958902017-09-14 20:39:40 +08003106 chv_setup_private_ppat(ppat);
Zhi Wang36e16c42017-09-12 15:42:24 +08003107 else
Zhi Wang43958902017-09-14 20:39:40 +08003108 bdw_setup_private_ppat(ppat);
3109
3110 GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
3111
3112 for_each_clear_bit(i, ppat->used, ppat->max_entries) {
3113 ppat->entries[i].value = ppat->clear_value;
3114 ppat->entries[i].ppat = ppat;
3115 set_bit(i, ppat->dirty);
3116 }
3117
3118 ppat->update_hw(dev_priv);
Zhi Wang36e16c42017-09-12 15:42:24 +08003119}
3120
Joonas Lahtinend507d732016-03-18 10:42:58 +02003121static int gen8_gmch_probe(struct i915_ggtt *ggtt)
Ben Widawsky63340132013-11-04 19:32:22 -08003122{
Chris Wilson49d73912016-11-29 09:50:08 +00003123 struct drm_i915_private *dev_priv = ggtt->base.i915;
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003124 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson34c998b2016-08-04 07:52:24 +01003125 unsigned int size;
Ben Widawsky63340132013-11-04 19:32:22 -08003126 u16 snb_gmch_ctl;
Imre Deak45192902017-05-10 12:21:50 +03003127 int err;
Ben Widawsky63340132013-11-04 19:32:22 -08003128
3129 /* TODO: We're not aware of mappable constraints on gen8 yet */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003130 ggtt->mappable_base = pci_resource_start(pdev, 2);
3131 ggtt->mappable_end = pci_resource_len(pdev, 2);
Ben Widawsky63340132013-11-04 19:32:22 -08003132
Imre Deak45192902017-05-10 12:21:50 +03003133 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
3134 if (!err)
3135 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
3136 if (err)
3137 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
Ben Widawsky63340132013-11-04 19:32:22 -08003138
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003139 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
Ben Widawsky63340132013-11-04 19:32:22 -08003140
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003141 if (INTEL_GEN(dev_priv) >= 9) {
Joonas Lahtinend507d732016-03-18 10:42:58 +02003142 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
Chris Wilson34c998b2016-08-04 07:52:24 +01003143 size = gen8_get_total_gtt_size(snb_gmch_ctl);
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003144 } else if (IS_CHERRYVIEW(dev_priv)) {
Joonas Lahtinend507d732016-03-18 10:42:58 +02003145 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
Chris Wilson34c998b2016-08-04 07:52:24 +01003146 size = chv_get_total_gtt_size(snb_gmch_ctl);
Damien Lespiaud7f25f22014-05-08 22:19:40 +03003147 } else {
Joonas Lahtinend507d732016-03-18 10:42:58 +02003148 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
Chris Wilson34c998b2016-08-04 07:52:24 +01003149 size = gen8_get_total_gtt_size(snb_gmch_ctl);
Damien Lespiaud7f25f22014-05-08 22:19:40 +03003150 }
Ben Widawsky63340132013-11-04 19:32:22 -08003151
Chris Wilson34c998b2016-08-04 07:52:24 +01003152 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
Chris Wilson34c998b2016-08-04 07:52:24 +01003153 ggtt->base.cleanup = gen6_gmch_remove;
Joonas Lahtinend507d732016-03-18 10:42:58 +02003154 ggtt->base.bind_vma = ggtt_bind_vma;
3155 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilsond6473f52016-06-10 14:22:59 +05303156 ggtt->base.insert_page = gen8_ggtt_insert_page;
Chris Wilsonf7770bf2016-05-14 07:26:35 +01003157 ggtt->base.clear_range = nop_clear_range;
Chris Wilson48f112f2016-06-24 14:07:14 +01003158 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
Chris Wilsonf7770bf2016-05-14 07:26:35 +01003159 ggtt->base.clear_range = gen8_ggtt_clear_range;
3160
3161 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
Chris Wilsonf7770bf2016-05-14 07:26:35 +01003162
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07003163 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3164 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
3165 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3166 ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
3167 if (ggtt->base.clear_range != nop_clear_range)
3168 ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3169 }
3170
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003171 ggtt->invalidate = gen6_ggtt_invalidate;
3172
Zhi Wang36e16c42017-09-12 15:42:24 +08003173 setup_private_pat(dev_priv);
3174
Chris Wilson34c998b2016-08-04 07:52:24 +01003175 return ggtt_probe_common(ggtt, size);
Ben Widawsky63340132013-11-04 19:32:22 -08003176}
3177
Joonas Lahtinend507d732016-03-18 10:42:58 +02003178static int gen6_gmch_probe(struct i915_ggtt *ggtt)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003179{
Chris Wilson49d73912016-11-29 09:50:08 +00003180 struct drm_i915_private *dev_priv = ggtt->base.i915;
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003181 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson34c998b2016-08-04 07:52:24 +01003182 unsigned int size;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003183 u16 snb_gmch_ctl;
Imre Deak45192902017-05-10 12:21:50 +03003184 int err;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003185
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003186 ggtt->mappable_base = pci_resource_start(pdev, 2);
3187 ggtt->mappable_end = pci_resource_len(pdev, 2);
Ben Widawsky41907dd2013-02-08 11:32:47 -08003188
Ben Widawskybaa09f52013-01-24 13:49:57 -08003189 /* 64/512MB is the current min/max we actually know of, but this is just
3190 * a coarse sanity check.
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003191 */
Chris Wilson34c998b2016-08-04 07:52:24 +01003192 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
Joonas Lahtinend507d732016-03-18 10:42:58 +02003193 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -08003194 return -ENXIO;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003195 }
3196
Imre Deak45192902017-05-10 12:21:50 +03003197 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3198 if (!err)
3199 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3200 if (err)
3201 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003202 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -08003203
Joonas Lahtinend507d732016-03-18 10:42:58 +02003204 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -08003205
Chris Wilson34c998b2016-08-04 07:52:24 +01003206 size = gen6_get_total_gtt_size(snb_gmch_ctl);
3207 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
Ben Widawskybaa09f52013-01-24 13:49:57 -08003208
Joonas Lahtinend507d732016-03-18 10:42:58 +02003209 ggtt->base.clear_range = gen6_ggtt_clear_range;
Chris Wilsond6473f52016-06-10 14:22:59 +05303210 ggtt->base.insert_page = gen6_ggtt_insert_page;
Joonas Lahtinend507d732016-03-18 10:42:58 +02003211 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
3212 ggtt->base.bind_vma = ggtt_bind_vma;
3213 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilson34c998b2016-08-04 07:52:24 +01003214 ggtt->base.cleanup = gen6_gmch_remove;
Ben Widawskybaa09f52013-01-24 13:49:57 -08003215
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003216 ggtt->invalidate = gen6_ggtt_invalidate;
3217
Chris Wilson34c998b2016-08-04 07:52:24 +01003218 if (HAS_EDRAM(dev_priv))
3219 ggtt->base.pte_encode = iris_pte_encode;
3220 else if (IS_HASWELL(dev_priv))
3221 ggtt->base.pte_encode = hsw_pte_encode;
3222 else if (IS_VALLEYVIEW(dev_priv))
3223 ggtt->base.pte_encode = byt_pte_encode;
3224 else if (INTEL_GEN(dev_priv) >= 7)
3225 ggtt->base.pte_encode = ivb_pte_encode;
3226 else
3227 ggtt->base.pte_encode = snb_pte_encode;
3228
3229 return ggtt_probe_common(ggtt, size);
Ben Widawskybaa09f52013-01-24 13:49:57 -08003230}
3231
Chris Wilson34c998b2016-08-04 07:52:24 +01003232static void i915_gmch_remove(struct i915_address_space *vm)
Ben Widawskybaa09f52013-01-24 13:49:57 -08003233{
Chris Wilson34c998b2016-08-04 07:52:24 +01003234 intel_gmch_remove();
Ben Widawskybaa09f52013-01-24 13:49:57 -08003235}
3236
Joonas Lahtinend507d732016-03-18 10:42:58 +02003237static int i915_gmch_probe(struct i915_ggtt *ggtt)
Ben Widawskybaa09f52013-01-24 13:49:57 -08003238{
Chris Wilson49d73912016-11-29 09:50:08 +00003239 struct drm_i915_private *dev_priv = ggtt->base.i915;
Ben Widawskybaa09f52013-01-24 13:49:57 -08003240 int ret;
3241
Chris Wilson91c8a322016-07-05 10:40:23 +01003242 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
Ben Widawskybaa09f52013-01-24 13:49:57 -08003243 if (!ret) {
3244 DRM_ERROR("failed to set up gmch\n");
3245 return -EIO;
3246 }
3247
Chris Wilsonedd1f2f2017-01-06 15:20:11 +00003248 intel_gtt_get(&ggtt->base.total,
3249 &ggtt->stolen_size,
3250 &ggtt->mappable_base,
3251 &ggtt->mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -08003252
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003253 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
Chris Wilsond6473f52016-06-10 14:22:59 +05303254 ggtt->base.insert_page = i915_ggtt_insert_page;
Joonas Lahtinend507d732016-03-18 10:42:58 +02003255 ggtt->base.insert_entries = i915_ggtt_insert_entries;
3256 ggtt->base.clear_range = i915_ggtt_clear_range;
3257 ggtt->base.bind_vma = ggtt_bind_vma;
3258 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilson34c998b2016-08-04 07:52:24 +01003259 ggtt->base.cleanup = i915_gmch_remove;
Ben Widawskybaa09f52013-01-24 13:49:57 -08003260
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003261 ggtt->invalidate = gmch_ggtt_invalidate;
3262
Joonas Lahtinend507d732016-03-18 10:42:58 +02003263 if (unlikely(ggtt->do_idle_maps))
Chris Wilsonc0a7f812013-12-30 12:16:15 +00003264 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3265
Ben Widawskybaa09f52013-01-24 13:49:57 -08003266 return 0;
3267}
3268
Joonas Lahtinend85489d2016-03-24 16:47:46 +02003269/**
Chris Wilson0088e522016-08-04 07:52:21 +01003270 * i915_ggtt_probe_hw - Probe GGTT hardware location
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003271 * @dev_priv: i915 device
Joonas Lahtinend85489d2016-03-24 16:47:46 +02003272 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003273int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
Ben Widawskybaa09f52013-01-24 13:49:57 -08003274{
Joonas Lahtinen62106b42016-03-18 10:42:57 +02003275 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawskybaa09f52013-01-24 13:49:57 -08003276 int ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003277
Chris Wilson49d73912016-11-29 09:50:08 +00003278 ggtt->base.i915 = dev_priv;
Chris Wilson84486612017-02-15 08:43:40 +00003279 ggtt->base.dma = &dev_priv->drm.pdev->dev;
Mika Kuoppalac114f762015-06-25 18:35:13 +03003280
Chris Wilson34c998b2016-08-04 07:52:24 +01003281 if (INTEL_GEN(dev_priv) <= 5)
3282 ret = i915_gmch_probe(ggtt);
3283 else if (INTEL_GEN(dev_priv) < 8)
3284 ret = gen6_gmch_probe(ggtt);
3285 else
3286 ret = gen8_gmch_probe(ggtt);
Ben Widawskya54c0c22013-01-24 14:45:00 -08003287 if (ret)
Ben Widawskybaa09f52013-01-24 13:49:57 -08003288 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003289
Chris Wilsondb9309a2017-01-05 15:30:23 +00003290 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3291 * This is easier than doing range restriction on the fly, as we
3292 * currently don't have any bits spare to pass in this upper
3293 * restriction!
3294 */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00003295 if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
Chris Wilsondb9309a2017-01-05 15:30:23 +00003296 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3297 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3298 }
3299
Chris Wilsonc890e2d2016-03-18 10:42:59 +02003300 if ((ggtt->base.total - 1) >> 32) {
3301 DRM_ERROR("We never expected a Global GTT with more than 32bits"
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003302 " of address space! Found %lldM!\n",
Chris Wilsonc890e2d2016-03-18 10:42:59 +02003303 ggtt->base.total >> 20);
3304 ggtt->base.total = 1ULL << 32;
3305 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3306 }
3307
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003308 if (ggtt->mappable_end > ggtt->base.total) {
3309 DRM_ERROR("mappable aperture extends past end of GGTT,"
3310 " aperture=%llx, total=%llx\n",
3311 ggtt->mappable_end, ggtt->base.total);
3312 ggtt->mappable_end = ggtt->base.total;
3313 }
3314
Ben Widawskybaa09f52013-01-24 13:49:57 -08003315 /* GMADR is the PCI mmio aperture into the global GTT. */
Mika Kuoppalac44ef602015-06-25 18:35:05 +03003316 DRM_INFO("Memory usable by graphics device = %lluM\n",
Joonas Lahtinen62106b42016-03-18 10:42:57 +02003317 ggtt->base.total >> 20);
3318 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
Chris Wilsonedd1f2f2017-01-06 15:20:11 +00003319 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
Chris Wilson80debff2017-05-25 13:16:12 +01003320 if (intel_vtd_active())
Daniel Vetter5db6c732014-03-31 16:23:04 +02003321 DRM_INFO("VT-d active for gfx access\n");
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08003322
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003323 return 0;
Chris Wilson0088e522016-08-04 07:52:21 +01003324}
3325
3326/**
3327 * i915_ggtt_init_hw - Initialize GGTT hardware
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003328 * @dev_priv: i915 device
Chris Wilson0088e522016-08-04 07:52:21 +01003329 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003330int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
Chris Wilson0088e522016-08-04 07:52:21 +01003331{
Chris Wilson0088e522016-08-04 07:52:21 +01003332 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3333 int ret;
3334
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003335 INIT_LIST_HEAD(&dev_priv->vm_list);
3336
Chris Wilsona6508de2017-02-06 08:45:47 +00003337 /* Note that we use page colouring to enforce a guard page at the
3338 * end of the address space. This is required as the CS may prefetch
3339 * beyond the end of the batch buffer, across the page boundary,
3340 * and beyond the end of the GTT if we do not provide a guard.
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003341 */
Chris Wilson80b204b2016-10-28 13:58:58 +01003342 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson80b204b2016-10-28 13:58:58 +01003343 i915_address_space_init(&ggtt->base, dev_priv, "[global]");
Chris Wilsona6508de2017-02-06 08:45:47 +00003344 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003345 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
Chris Wilson80b204b2016-10-28 13:58:58 +01003346 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003347
Chris Wilsonf7bbe782016-08-19 16:54:27 +01003348 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
3349 dev_priv->ggtt.mappable_base,
3350 dev_priv->ggtt.mappable_end)) {
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003351 ret = -EIO;
3352 goto out_gtt_cleanup;
3353 }
3354
3355 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
3356
Chris Wilson0088e522016-08-04 07:52:21 +01003357 /*
3358 * Initialise stolen early so that we may reserve preallocated
3359 * objects for the BIOS to KMS transition.
3360 */
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +00003361 ret = i915_gem_init_stolen(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +01003362 if (ret)
3363 goto out_gtt_cleanup;
3364
3365 return 0;
Imre Deaka4eba472016-01-19 15:26:32 +02003366
3367out_gtt_cleanup:
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003368 ggtt->base.cleanup(&ggtt->base);
Imre Deaka4eba472016-01-19 15:26:32 +02003369 return ret;
Daniel Vetter644ec022012-03-26 09:45:40 +02003370}
Ben Widawsky6f65e292013-12-06 14:10:56 -08003371
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003372int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
Ville Syrjäläac840ae2016-05-06 21:35:55 +03003373{
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003374 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
Ville Syrjäläac840ae2016-05-06 21:35:55 +03003375 return -EIO;
3376
3377 return 0;
3378}
3379
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003380void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3381{
Chris Wilson04f7b24e2017-06-01 10:04:46 +01003382 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3383
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003384 i915->ggtt.invalidate = guc_ggtt_invalidate;
3385}
3386
3387void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3388{
Chris Wilson04f7b24e2017-06-01 10:04:46 +01003389 /* We should only be called after i915_ggtt_enable_guc() */
3390 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3391
3392 i915->ggtt.invalidate = gen6_ggtt_invalidate;
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003393}
3394
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00003395void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
Daniel Vetterfa423312015-04-14 17:35:23 +02003396{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003397 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003398 struct drm_i915_gem_object *obj, *on;
Daniel Vetterfa423312015-04-14 17:35:23 +02003399
Chris Wilsondc979972016-05-10 14:10:04 +01003400 i915_check_and_clear_faults(dev_priv);
Daniel Vetterfa423312015-04-14 17:35:23 +02003401
3402 /* First fill our portion of the GTT with scratch pages */
Chris Wilson381b9432017-02-15 08:43:54 +00003403 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
Daniel Vetterfa423312015-04-14 17:35:23 +02003404
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003405 ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3406
3407 /* clflush objects bound into the GGTT and rebind them. */
3408 list_for_each_entry_safe(obj, on,
Joonas Lahtinen56cea322016-11-02 12:16:04 +02003409 &dev_priv->mm.bound_list, global_link) {
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003410 bool ggtt_bound = false;
3411 struct i915_vma *vma;
3412
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003413 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003414 if (vma->vm != &ggtt->base)
Tvrtko Ursulin2c3d9982015-07-06 15:15:01 +01003415 continue;
Daniel Vetterfa423312015-04-14 17:35:23 +02003416
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003417 if (!i915_vma_unbind(vma))
3418 continue;
3419
Tvrtko Ursulin2c3d9982015-07-06 15:15:01 +01003420 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3421 PIN_UPDATE));
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003422 ggtt_bound = true;
Tvrtko Ursulin2c3d9982015-07-06 15:15:01 +01003423 }
3424
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003425 if (ggtt_bound)
Chris Wilson975f7ff2016-05-14 07:26:34 +01003426 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
Daniel Vetterfa423312015-04-14 17:35:23 +02003427 }
3428
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003429 ggtt->base.closed = false;
3430
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00003431 if (INTEL_GEN(dev_priv) >= 8) {
Zhi Wang43958902017-09-14 20:39:40 +08003432 struct intel_ppat *ppat = &dev_priv->ppat;
Daniel Vetterfa423312015-04-14 17:35:23 +02003433
Zhi Wang43958902017-09-14 20:39:40 +08003434 bitmap_set(ppat->dirty, 0, ppat->max_entries);
3435 dev_priv->ppat.update_hw(dev_priv);
Daniel Vetterfa423312015-04-14 17:35:23 +02003436 return;
3437 }
3438
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00003439 if (USES_PPGTT(dev_priv)) {
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003440 struct i915_address_space *vm;
3441
Daniel Vetterfa423312015-04-14 17:35:23 +02003442 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
Joonas Lahtinene5716f52016-04-07 11:08:03 +03003443 struct i915_hw_ppgtt *ppgtt;
Daniel Vetterfa423312015-04-14 17:35:23 +02003444
Chris Wilson2bfa9962016-08-04 07:52:25 +01003445 if (i915_is_ggtt(vm))
Daniel Vetterfa423312015-04-14 17:35:23 +02003446 ppgtt = dev_priv->mm.aliasing_ppgtt;
Joonas Lahtinene5716f52016-04-07 11:08:03 +03003447 else
3448 ppgtt = i915_vm_to_ppgtt(vm);
Daniel Vetterfa423312015-04-14 17:35:23 +02003449
Chris Wilson16a011c2017-02-15 08:43:45 +00003450 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
Daniel Vetterfa423312015-04-14 17:35:23 +02003451 }
3452 }
3453
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003454 i915_ggtt_invalidate(dev_priv);
Daniel Vetterfa423312015-04-14 17:35:23 +02003455}
3456
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003457static struct scatterlist *
Ville Syrjälä2d7f3bd2016-01-14 15:22:11 +02003458rotate_pages(const dma_addr_t *in, unsigned int offset,
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003459 unsigned int width, unsigned int height,
Ville Syrjälä87130252016-01-20 21:05:23 +02003460 unsigned int stride,
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003461 struct sg_table *st, struct scatterlist *sg)
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003462{
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003463 unsigned int column, row;
3464 unsigned int src_idx;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003465
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003466 for (column = 0; column < width; column++) {
Ville Syrjälä87130252016-01-20 21:05:23 +02003467 src_idx = stride * (height - 1) + column;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003468 for (row = 0; row < height; row++) {
3469 st->nents++;
3470 /* We don't need the pages, but need to initialize
3471 * the entries so the sg list can be happily traversed.
3472 * The only thing we need are DMA addresses.
3473 */
3474 sg_set_page(sg, NULL, PAGE_SIZE, 0);
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003475 sg_dma_address(sg) = in[offset + src_idx];
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003476 sg_dma_len(sg) = PAGE_SIZE;
3477 sg = sg_next(sg);
Ville Syrjälä87130252016-01-20 21:05:23 +02003478 src_idx -= stride;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003479 }
3480 }
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003481
3482 return sg;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003483}
3484
Chris Wilsonba7a5742017-02-15 08:43:35 +00003485static noinline struct sg_table *
3486intel_rotate_pages(struct intel_rotation_info *rot_info,
3487 struct drm_i915_gem_object *obj)
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003488{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00003489 const unsigned long n_pages = obj->base.size / PAGE_SIZE;
Ville Syrjälä6687c902015-09-15 13:16:41 +03003490 unsigned int size = intel_rotation_info_size(rot_info);
Dave Gordon85d12252016-05-20 11:54:06 +01003491 struct sgt_iter sgt_iter;
3492 dma_addr_t dma_addr;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003493 unsigned long i;
3494 dma_addr_t *page_addr_list;
3495 struct sg_table *st;
Tvrtko Ursulin89e3e142015-09-21 10:45:34 +01003496 struct scatterlist *sg;
Tvrtko Ursulin1d00dad2015-03-25 10:15:26 +00003497 int ret = -ENOMEM;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003498
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003499 /* Allocate a temporary list of source pages for random access. */
Michal Hocko20981052017-05-17 14:23:12 +02003500 page_addr_list = kvmalloc_array(n_pages,
Chris Wilsonf2a85e12016-04-08 12:11:13 +01003501 sizeof(dma_addr_t),
3502 GFP_TEMPORARY);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003503 if (!page_addr_list)
3504 return ERR_PTR(ret);
3505
3506 /* Allocate target SG list. */
3507 st = kmalloc(sizeof(*st), GFP_KERNEL);
3508 if (!st)
3509 goto err_st_alloc;
3510
Ville Syrjälä6687c902015-09-15 13:16:41 +03003511 ret = sg_alloc_table(st, size, GFP_KERNEL);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003512 if (ret)
3513 goto err_sg_alloc;
3514
3515 /* Populate source page list from the object. */
3516 i = 0;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003517 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
Dave Gordon85d12252016-05-20 11:54:06 +01003518 page_addr_list[i++] = dma_addr;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003519
Dave Gordon85d12252016-05-20 11:54:06 +01003520 GEM_BUG_ON(i != n_pages);
Ville Syrjälä11f20322016-02-15 22:54:46 +02003521 st->nents = 0;
3522 sg = st->sgl;
3523
Ville Syrjälä6687c902015-09-15 13:16:41 +03003524 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3525 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3526 rot_info->plane[i].width, rot_info->plane[i].height,
3527 rot_info->plane[i].stride, st, sg);
Tvrtko Ursulin89e3e142015-09-21 10:45:34 +01003528 }
3529
Ville Syrjälä6687c902015-09-15 13:16:41 +03003530 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3531 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003532
Michal Hocko20981052017-05-17 14:23:12 +02003533 kvfree(page_addr_list);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003534
3535 return st;
3536
3537err_sg_alloc:
3538 kfree(st);
3539err_st_alloc:
Michal Hocko20981052017-05-17 14:23:12 +02003540 kvfree(page_addr_list);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003541
Ville Syrjälä6687c902015-09-15 13:16:41 +03003542 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3543 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3544
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003545 return ERR_PTR(ret);
3546}
3547
Chris Wilsonba7a5742017-02-15 08:43:35 +00003548static noinline struct sg_table *
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003549intel_partial_pages(const struct i915_ggtt_view *view,
3550 struct drm_i915_gem_object *obj)
3551{
3552 struct sg_table *st;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003553 struct scatterlist *sg, *iter;
Chris Wilson8bab11932017-01-14 00:28:25 +00003554 unsigned int count = view->partial.size;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003555 unsigned int offset;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003556 int ret = -ENOMEM;
3557
3558 st = kmalloc(sizeof(*st), GFP_KERNEL);
3559 if (!st)
3560 goto err_st_alloc;
3561
Chris Wilsond2a84a72016-10-28 13:58:34 +01003562 ret = sg_alloc_table(st, count, GFP_KERNEL);
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003563 if (ret)
3564 goto err_sg_alloc;
3565
Chris Wilson8bab11932017-01-14 00:28:25 +00003566 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
Chris Wilsond2a84a72016-10-28 13:58:34 +01003567 GEM_BUG_ON(!iter);
3568
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003569 sg = st->sgl;
3570 st->nents = 0;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003571 do {
3572 unsigned int len;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003573
Chris Wilsond2a84a72016-10-28 13:58:34 +01003574 len = min(iter->length - (offset << PAGE_SHIFT),
3575 count << PAGE_SHIFT);
3576 sg_set_page(sg, NULL, len, 0);
3577 sg_dma_address(sg) =
3578 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3579 sg_dma_len(sg) = len;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003580
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003581 st->nents++;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003582 count -= len >> PAGE_SHIFT;
3583 if (count == 0) {
3584 sg_mark_end(sg);
3585 return st;
3586 }
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003587
Chris Wilsond2a84a72016-10-28 13:58:34 +01003588 sg = __sg_next(sg);
3589 iter = __sg_next(iter);
3590 offset = 0;
3591 } while (1);
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003592
3593err_sg_alloc:
3594 kfree(st);
3595err_st_alloc:
3596 return ERR_PTR(ret);
3597}
3598
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02003599static int
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003600i915_get_ggtt_vma_pages(struct i915_vma *vma)
3601{
Chris Wilsonba7a5742017-02-15 08:43:35 +00003602 int ret;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003603
Chris Wilson2c3a3f42016-11-04 10:30:01 +00003604 /* The vma->pages are only valid within the lifespan of the borrowed
3605 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3606 * must be the vma->pages. A simple rule is that vma->pages must only
3607 * be accessed when the obj->mm.pages are pinned.
3608 */
3609 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3610
Chris Wilsonba7a5742017-02-15 08:43:35 +00003611 switch (vma->ggtt_view.type) {
3612 case I915_GGTT_VIEW_NORMAL:
3613 vma->pages = vma->obj->mm.pages;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003614 return 0;
3615
Chris Wilsonba7a5742017-02-15 08:43:35 +00003616 case I915_GGTT_VIEW_ROTATED:
Chris Wilson247177d2016-08-15 10:48:47 +01003617 vma->pages =
Chris Wilsonba7a5742017-02-15 08:43:35 +00003618 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3619 break;
3620
3621 case I915_GGTT_VIEW_PARTIAL:
Chris Wilson247177d2016-08-15 10:48:47 +01003622 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
Chris Wilsonba7a5742017-02-15 08:43:35 +00003623 break;
3624
3625 default:
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003626 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3627 vma->ggtt_view.type);
Chris Wilsonba7a5742017-02-15 08:43:35 +00003628 return -EINVAL;
3629 }
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003630
Chris Wilsonba7a5742017-02-15 08:43:35 +00003631 ret = 0;
3632 if (unlikely(IS_ERR(vma->pages))) {
Chris Wilson247177d2016-08-15 10:48:47 +01003633 ret = PTR_ERR(vma->pages);
3634 vma->pages = NULL;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003635 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3636 vma->ggtt_view.type, ret);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003637 }
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003638 return ret;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003639}
3640
Chris Wilsone007b192017-01-11 11:23:10 +00003641/**
Chris Wilson625d9882017-01-11 11:23:11 +00003642 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
Chris Wilsona4dbf7c2017-01-12 16:45:59 +00003643 * @vm: the &struct i915_address_space
3644 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3645 * @size: how much space to allocate inside the GTT,
3646 * must be #I915_GTT_PAGE_SIZE aligned
3647 * @offset: where to insert inside the GTT,
3648 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3649 * (@offset + @size) must fit within the address space
3650 * @color: color to apply to node, if this node is not from a VMA,
3651 * color must be #I915_COLOR_UNEVICTABLE
3652 * @flags: control search and eviction behaviour
Chris Wilson625d9882017-01-11 11:23:11 +00003653 *
3654 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3655 * the address space (using @size and @color). If the @node does not fit, it
3656 * tries to evict any overlapping nodes from the GTT, including any
3657 * neighbouring nodes if the colors do not match (to ensure guard pages between
3658 * differing domains). See i915_gem_evict_for_node() for the gory details
3659 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3660 * evicting active overlapping objects, and any overlapping node that is pinned
3661 * or marked as unevictable will also result in failure.
3662 *
3663 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3664 * asked to wait for eviction and interrupted.
3665 */
3666int i915_gem_gtt_reserve(struct i915_address_space *vm,
3667 struct drm_mm_node *node,
3668 u64 size, u64 offset, unsigned long color,
3669 unsigned int flags)
3670{
3671 int err;
3672
3673 GEM_BUG_ON(!size);
3674 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3675 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3676 GEM_BUG_ON(range_overflows(offset, size, vm->total));
Chris Wilson3fec7ec2017-01-15 13:47:46 +00003677 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
Chris Wilson9734ad12017-01-15 17:27:40 +00003678 GEM_BUG_ON(drm_mm_node_allocated(node));
Chris Wilson625d9882017-01-11 11:23:11 +00003679
3680 node->size = size;
3681 node->start = offset;
3682 node->color = color;
3683
3684 err = drm_mm_reserve_node(&vm->mm, node);
3685 if (err != -ENOSPC)
3686 return err;
3687
Chris Wilson616d9ce2017-06-16 15:05:21 +01003688 if (flags & PIN_NOEVICT)
3689 return -ENOSPC;
3690
Chris Wilson625d9882017-01-11 11:23:11 +00003691 err = i915_gem_evict_for_node(vm, node, flags);
3692 if (err == 0)
3693 err = drm_mm_reserve_node(&vm->mm, node);
3694
3695 return err;
3696}
3697
Chris Wilson606fec92017-01-11 11:23:12 +00003698static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3699{
3700 u64 range, addr;
3701
3702 GEM_BUG_ON(range_overflows(start, len, end));
3703 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3704
3705 range = round_down(end - len, align) - round_up(start, align);
3706 if (range) {
3707 if (sizeof(unsigned long) == sizeof(u64)) {
3708 addr = get_random_long();
3709 } else {
3710 addr = get_random_int();
3711 if (range > U32_MAX) {
3712 addr <<= 32;
3713 addr |= get_random_int();
3714 }
3715 }
3716 div64_u64_rem(addr, range, &addr);
3717 start += addr;
3718 }
3719
3720 return round_up(start, align);
3721}
3722
Chris Wilson625d9882017-01-11 11:23:11 +00003723/**
Chris Wilsone007b192017-01-11 11:23:10 +00003724 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
Chris Wilsona4dbf7c2017-01-12 16:45:59 +00003725 * @vm: the &struct i915_address_space
3726 * @node: the &struct drm_mm_node (typically i915_vma.node)
3727 * @size: how much space to allocate inside the GTT,
3728 * must be #I915_GTT_PAGE_SIZE aligned
3729 * @alignment: required alignment of starting offset, may be 0 but
3730 * if specified, this must be a power-of-two and at least
3731 * #I915_GTT_MIN_ALIGNMENT
3732 * @color: color to apply to node
3733 * @start: start of any range restriction inside GTT (0 for all),
Chris Wilsone007b192017-01-11 11:23:10 +00003734 * must be #I915_GTT_PAGE_SIZE aligned
Chris Wilsona4dbf7c2017-01-12 16:45:59 +00003735 * @end: end of any range restriction inside GTT (U64_MAX for all),
3736 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3737 * @flags: control search and eviction behaviour
Chris Wilsone007b192017-01-11 11:23:10 +00003738 *
3739 * i915_gem_gtt_insert() first searches for an available hole into which
3740 * is can insert the node. The hole address is aligned to @alignment and
3741 * its @size must then fit entirely within the [@start, @end] bounds. The
3742 * nodes on either side of the hole must match @color, or else a guard page
3743 * will be inserted between the two nodes (or the node evicted). If no
Chris Wilson606fec92017-01-11 11:23:12 +00003744 * suitable hole is found, first a victim is randomly selected and tested
3745 * for eviction, otherwise then the LRU list of objects within the GTT
Chris Wilsone007b192017-01-11 11:23:10 +00003746 * is scanned to find the first set of replacement nodes to create the hole.
3747 * Those old overlapping nodes are evicted from the GTT (and so must be
3748 * rebound before any future use). Any node that is currently pinned cannot
3749 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3750 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3751 * searching for an eviction candidate. See i915_gem_evict_something() for
3752 * the gory details on the eviction algorithm.
3753 *
3754 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3755 * asked to wait for eviction and interrupted.
3756 */
3757int i915_gem_gtt_insert(struct i915_address_space *vm,
3758 struct drm_mm_node *node,
3759 u64 size, u64 alignment, unsigned long color,
3760 u64 start, u64 end, unsigned int flags)
3761{
Chris Wilson4e64e552017-02-02 21:04:38 +00003762 enum drm_mm_insert_mode mode;
Chris Wilson606fec92017-01-11 11:23:12 +00003763 u64 offset;
Chris Wilsone007b192017-01-11 11:23:10 +00003764 int err;
3765
3766 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3767 GEM_BUG_ON(!size);
3768 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3769 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3770 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3771 GEM_BUG_ON(start >= end);
3772 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3773 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Chris Wilson3fec7ec2017-01-15 13:47:46 +00003774 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
Chris Wilson9734ad12017-01-15 17:27:40 +00003775 GEM_BUG_ON(drm_mm_node_allocated(node));
Chris Wilsone007b192017-01-11 11:23:10 +00003776
3777 if (unlikely(range_overflows(start, size, end)))
3778 return -ENOSPC;
3779
3780 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3781 return -ENOSPC;
3782
Chris Wilson4e64e552017-02-02 21:04:38 +00003783 mode = DRM_MM_INSERT_BEST;
3784 if (flags & PIN_HIGH)
3785 mode = DRM_MM_INSERT_HIGH;
3786 if (flags & PIN_MAPPABLE)
3787 mode = DRM_MM_INSERT_LOW;
Chris Wilsone007b192017-01-11 11:23:10 +00003788
3789 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3790 * so we know that we always have a minimum alignment of 4096.
3791 * The drm_mm range manager is optimised to return results
3792 * with zero alignment, so where possible use the optimal
3793 * path.
3794 */
3795 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3796 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3797 alignment = 0;
3798
Chris Wilson4e64e552017-02-02 21:04:38 +00003799 err = drm_mm_insert_node_in_range(&vm->mm, node,
3800 size, alignment, color,
3801 start, end, mode);
Chris Wilsone007b192017-01-11 11:23:10 +00003802 if (err != -ENOSPC)
3803 return err;
3804
Chris Wilson616d9ce2017-06-16 15:05:21 +01003805 if (flags & PIN_NOEVICT)
3806 return -ENOSPC;
3807
Chris Wilson606fec92017-01-11 11:23:12 +00003808 /* No free space, pick a slot at random.
3809 *
3810 * There is a pathological case here using a GTT shared between
3811 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3812 *
3813 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3814 * (64k objects) (448k objects)
3815 *
3816 * Now imagine that the eviction LRU is ordered top-down (just because
3817 * pathology meets real life), and that we need to evict an object to
3818 * make room inside the aperture. The eviction scan then has to walk
3819 * the 448k list before it finds one within range. And now imagine that
3820 * it has to search for a new hole between every byte inside the memcpy,
3821 * for several simultaneous clients.
3822 *
3823 * On a full-ppgtt system, if we have run out of available space, there
3824 * will be lots and lots of objects in the eviction list! Again,
3825 * searching that LRU list may be slow if we are also applying any
3826 * range restrictions (e.g. restriction to low 4GiB) and so, for
3827 * simplicity and similarilty between different GTT, try the single
3828 * random replacement first.
3829 */
3830 offset = random_offset(start, end,
3831 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3832 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3833 if (err != -ENOSPC)
3834 return err;
3835
3836 /* Randomly selected placement is pinned, do a search */
Chris Wilsone007b192017-01-11 11:23:10 +00003837 err = i915_gem_evict_something(vm, size, alignment, color,
3838 start, end, flags);
3839 if (err)
3840 return err;
3841
Chris Wilson4e64e552017-02-02 21:04:38 +00003842 return drm_mm_insert_node_in_range(&vm->mm, node,
3843 size, alignment, color,
3844 start, end, DRM_MM_INSERT_EVICT);
Chris Wilsone007b192017-01-11 11:23:10 +00003845}
Chris Wilson3b5bb0a2017-02-13 17:15:18 +00003846
3847#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3848#include "selftests/mock_gtt.c"
Chris Wilson1c428192017-02-13 17:15:38 +00003849#include "selftests/i915_gem_gtt.c"
Chris Wilson3b5bb0a2017-02-13 17:15:18 +00003850#endif