blob: f52068aee24736f4c0040e5f1f8ae97484812f9b [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
Ben Widawskyc4ac5242014-02-19 22:05:47 -08003 * Copyright © 2011-2014 Intel Corporation
Daniel Vetter76aaf222010-11-05 22:23:30 +01004 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
Chris Wilsonaae4a3d2017-02-13 17:15:44 +000026#include <linux/slab.h> /* fault-inject.h is not standalone! */
27
28#include <linux/fault-inject.h>
Chris Wilsone007b192017-01-11 11:23:10 +000029#include <linux/log2.h>
Chris Wilson606fec92017-01-11 11:23:12 +000030#include <linux/random.h>
Daniel Vetter0e46ce22014-01-08 16:10:27 +010031#include <linux/seq_file.h>
Chris Wilson5bab6f62015-10-23 18:43:32 +010032#include <linux/stop_machine.h>
Chris Wilsone007b192017-01-11 11:23:10 +000033
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Chris Wilsone007b192017-01-11 11:23:10 +000036
Daniel Vetter76aaf222010-11-05 22:23:30 +010037#include "i915_drv.h"
Yu Zhang5dda8fa2015-02-10 19:05:48 +080038#include "i915_vgpu.h"
Daniel Vetter76aaf222010-11-05 22:23:30 +010039#include "i915_trace.h"
40#include "intel_drv.h"
Chris Wilsond07f0e52016-10-28 13:58:44 +010041#include "intel_frontbuffer.h"
Daniel Vetter76aaf222010-11-05 22:23:30 +010042
Chris Wilsonbb8f9cf2016-08-22 08:44:31 +010043#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
44
Tvrtko Ursulin45f8f692014-12-10 17:27:59 +000045/**
46 * DOC: Global GTT views
47 *
48 * Background and previous state
49 *
50 * Historically objects could exists (be bound) in global GTT space only as
51 * singular instances with a view representing all of the object's backing pages
52 * in a linear fashion. This view will be called a normal view.
53 *
54 * To support multiple views of the same object, where the number of mapped
55 * pages is not equal to the backing store, or where the layout of the pages
56 * is not linear, concept of a GGTT view was added.
57 *
58 * One example of an alternative view is a stereo display driven by a single
59 * image. In this case we would have a framebuffer looking like this
60 * (2x2 pages):
61 *
62 * 12
63 * 34
64 *
65 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
66 * rendering. In contrast, fed to the display engine would be an alternative
67 * view which could look something like this:
68 *
69 * 1212
70 * 3434
71 *
72 * In this example both the size and layout of pages in the alternative view is
73 * different from the normal view.
74 *
75 * Implementation and usage
76 *
77 * GGTT views are implemented using VMAs and are distinguished via enum
78 * i915_ggtt_view_type and struct i915_ggtt_view.
79 *
80 * A new flavour of core GEM functions which work with GGTT bound objects were
Joonas Lahtinenec7adb62015-03-16 14:11:13 +020081 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
82 * renaming in large amounts of code. They take the struct i915_ggtt_view
83 * parameter encapsulating all metadata required to implement a view.
Tvrtko Ursulin45f8f692014-12-10 17:27:59 +000084 *
85 * As a helper for callers which are only interested in the normal view,
86 * globally const i915_ggtt_view_normal singleton instance exists. All old core
87 * GEM API functions, the ones not taking the view parameter, are operating on,
88 * or with the normal GGTT view.
89 *
90 * Code wanting to add or use a new GGTT view needs to:
91 *
92 * 1. Add a new enum with a suitable name.
93 * 2. Extend the metadata in the i915_ggtt_view structure if required.
94 * 3. Add support to i915_get_vma_pages().
95 *
96 * New views are required to build a scatter-gather table from within the
97 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
98 * exists for the lifetime of an VMA.
99 *
100 * Core API is designed to have copy semantics which means that passed in
101 * struct i915_ggtt_view does not need to be persistent (left around after
102 * calling the core API functions).
103 *
104 */
105
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200106static int
107i915_get_ggtt_vma_pages(struct i915_vma *vma);
108
Chris Wilson7c3f86b2017-01-12 11:00:49 +0000109static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
110{
111 /* Note that as an uncached mmio write, this should flush the
112 * WCB of the writes into the GGTT before it triggers the invalidate.
113 */
114 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
115}
116
117static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
118{
119 gen6_ggtt_invalidate(dev_priv);
120 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
121}
122
123static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
124{
125 intel_gtt_chipset_flush();
126}
127
128static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
129{
130 i915->ggtt.invalidate(i915);
131}
132
Chris Wilsonc0336662016-05-06 15:40:21 +0100133int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
134 int enable_ppgtt)
Daniel Vettercfa7c862014-04-29 11:53:58 +0200135{
Chris Wilson1893a712014-09-19 11:56:27 +0100136 bool has_aliasing_ppgtt;
137 bool has_full_ppgtt;
Michel Thierry1f9a99e2015-09-30 15:36:19 +0100138 bool has_full_48bit_ppgtt;
Chris Wilson1893a712014-09-19 11:56:27 +0100139
Michel Thierry9e1d0e62016-12-05 17:57:03 -0800140 has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
141 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
142 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
Chris Wilson1893a712014-09-19 11:56:27 +0100143
Zhi Wange320d402016-09-06 12:04:12 +0800144 if (intel_vgpu_active(dev_priv)) {
145 /* emulation is too hard */
146 has_full_ppgtt = false;
147 has_full_48bit_ppgtt = false;
148 }
Yu Zhang71ba2d62015-02-10 19:05:54 +0800149
Chris Wilson0e4ca102016-04-29 13:18:22 +0100150 if (!has_aliasing_ppgtt)
151 return 0;
152
Damien Lespiau70ee45e2014-11-14 15:05:59 +0000153 /*
154 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
155 * execlists, the sole mechanism available to submit work.
156 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100157 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
Daniel Vettercfa7c862014-04-29 11:53:58 +0200158 return 0;
159
160 if (enable_ppgtt == 1)
161 return 1;
162
Chris Wilson1893a712014-09-19 11:56:27 +0100163 if (enable_ppgtt == 2 && has_full_ppgtt)
Daniel Vettercfa7c862014-04-29 11:53:58 +0200164 return 2;
165
Michel Thierry1f9a99e2015-09-30 15:36:19 +0100166 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
167 return 3;
168
Daniel Vetter93a25a92014-03-06 09:40:43 +0100169 /* Disable ppgtt on SNB if VT-d is on. */
Chris Wilson80debff2017-05-25 13:16:12 +0100170 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
Daniel Vetter93a25a92014-03-06 09:40:43 +0100171 DRM_INFO("Disabling PPGTT because VT-d is on\n");
Daniel Vettercfa7c862014-04-29 11:53:58 +0200172 return 0;
Daniel Vetter93a25a92014-03-06 09:40:43 +0100173 }
Daniel Vetter93a25a92014-03-06 09:40:43 +0100174
Jesse Barnes62942ed2014-06-13 09:28:33 -0700175 /* Early VLV doesn't have this */
Chris Wilson91c8a322016-07-05 10:40:23 +0100176 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
Jesse Barnes62942ed2014-06-13 09:28:33 -0700177 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
178 return 0;
179 }
180
Zhi Wange320d402016-09-06 12:04:12 +0800181 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
Michel Thierry1f9a99e2015-09-30 15:36:19 +0100182 return has_full_48bit_ppgtt ? 3 : 2;
Michel Thierry2f82bbd2014-12-15 14:58:00 +0000183 else
184 return has_aliasing_ppgtt ? 1 : 0;
Daniel Vetter93a25a92014-03-06 09:40:43 +0100185}
186
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200187static int ppgtt_bind_vma(struct i915_vma *vma,
188 enum i915_cache_level cache_level,
189 u32 unused)
Daniel Vetter47552652015-04-14 17:35:24 +0200190{
Chris Wilsonff685972017-02-15 08:43:42 +0000191 u32 pte_flags;
192 int ret;
193
Matthew Auld1f234752017-05-12 10:14:23 +0100194 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
195 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
196 vma->size);
197 if (ret)
198 return ret;
199 }
Daniel Vetter47552652015-04-14 17:35:24 +0200200
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100201 vma->pages = vma->obj->mm.pages;
Chris Wilson247177d2016-08-15 10:48:47 +0100202
Daniel Vetter47552652015-04-14 17:35:24 +0200203 /* Currently applicable only to VLV */
Chris Wilsonff685972017-02-15 08:43:42 +0000204 pte_flags = 0;
Daniel Vetter47552652015-04-14 17:35:24 +0200205 if (vma->obj->gt_ro)
206 pte_flags |= PTE_READ_ONLY;
207
Chris Wilson247177d2016-08-15 10:48:47 +0100208 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
Daniel Vetter47552652015-04-14 17:35:24 +0200209 cache_level, pte_flags);
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200210
211 return 0;
Daniel Vetter47552652015-04-14 17:35:24 +0200212}
213
214static void ppgtt_unbind_vma(struct i915_vma *vma)
215{
Chris Wilsonff685972017-02-15 08:43:42 +0000216 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
Daniel Vetter47552652015-04-14 17:35:24 +0200217}
Ben Widawsky6f65e292013-12-06 14:10:56 -0800218
Daniel Vetter2c642b02015-04-14 17:35:26 +0200219static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200220 enum i915_cache_level level)
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700221{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200222 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700223 pte |= addr;
Ben Widawsky63c42e52014-04-18 18:04:27 -0300224
225 switch (level) {
226 case I915_CACHE_NONE:
Ben Widawskyfbe5d362013-11-04 19:56:49 -0800227 pte |= PPAT_UNCACHED_INDEX;
Ben Widawsky63c42e52014-04-18 18:04:27 -0300228 break;
229 case I915_CACHE_WT:
230 pte |= PPAT_DISPLAY_ELLC_INDEX;
231 break;
232 default:
233 pte |= PPAT_CACHED_INDEX;
234 break;
235 }
236
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700237 return pte;
238}
239
Mika Kuoppalafe36f552015-06-25 18:35:16 +0300240static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
241 const enum i915_cache_level level)
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800242{
Michel Thierry07749ef2015-03-16 16:00:54 +0000243 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800244 pde |= addr;
245 if (level != I915_CACHE_NONE)
246 pde |= PPAT_CACHED_PDE_INDEX;
247 else
248 pde |= PPAT_UNCACHED_INDEX;
249 return pde;
250}
251
Michel Thierry762d9932015-07-30 11:05:29 +0100252#define gen8_pdpe_encode gen8_pde_encode
253#define gen8_pml4e_encode gen8_pde_encode
254
Michel Thierry07749ef2015-03-16 16:00:54 +0000255static gen6_pte_t snb_pte_encode(dma_addr_t addr,
256 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200257 u32 unused)
Ben Widawsky54d12522012-09-24 16:44:32 -0700258{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200259 gen6_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky54d12522012-09-24 16:44:32 -0700260 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -0700261
262 switch (level) {
Chris Wilson350ec882013-08-06 13:17:02 +0100263 case I915_CACHE_L3_LLC:
264 case I915_CACHE_LLC:
265 pte |= GEN6_PTE_CACHE_LLC;
266 break;
267 case I915_CACHE_NONE:
268 pte |= GEN6_PTE_UNCACHED;
269 break;
270 default:
Daniel Vetter5f77eeb2014-12-08 16:40:10 +0100271 MISSING_CASE(level);
Chris Wilson350ec882013-08-06 13:17:02 +0100272 }
273
274 return pte;
275}
276
Michel Thierry07749ef2015-03-16 16:00:54 +0000277static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
278 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200279 u32 unused)
Chris Wilson350ec882013-08-06 13:17:02 +0100280{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200281 gen6_pte_t pte = GEN6_PTE_VALID;
Chris Wilson350ec882013-08-06 13:17:02 +0100282 pte |= GEN6_PTE_ADDR_ENCODE(addr);
283
284 switch (level) {
285 case I915_CACHE_L3_LLC:
286 pte |= GEN7_PTE_CACHE_L3_LLC;
Ben Widawskye7210c32012-10-19 09:33:22 -0700287 break;
288 case I915_CACHE_LLC:
289 pte |= GEN6_PTE_CACHE_LLC;
290 break;
291 case I915_CACHE_NONE:
Kenneth Graunke91197082013-04-22 00:53:51 -0700292 pte |= GEN6_PTE_UNCACHED;
Ben Widawskye7210c32012-10-19 09:33:22 -0700293 break;
294 default:
Daniel Vetter5f77eeb2014-12-08 16:40:10 +0100295 MISSING_CASE(level);
Ben Widawskye7210c32012-10-19 09:33:22 -0700296 }
297
Ben Widawsky54d12522012-09-24 16:44:32 -0700298 return pte;
299}
300
Michel Thierry07749ef2015-03-16 16:00:54 +0000301static gen6_pte_t byt_pte_encode(dma_addr_t addr,
302 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200303 u32 flags)
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700304{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200305 gen6_pte_t pte = GEN6_PTE_VALID;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700306 pte |= GEN6_PTE_ADDR_ENCODE(addr);
307
Akash Goel24f3a8c2014-06-17 10:59:42 +0530308 if (!(flags & PTE_READ_ONLY))
309 pte |= BYT_PTE_WRITEABLE;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700310
311 if (level != I915_CACHE_NONE)
312 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
313
314 return pte;
315}
316
Michel Thierry07749ef2015-03-16 16:00:54 +0000317static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
318 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200319 u32 unused)
Kenneth Graunke91197082013-04-22 00:53:51 -0700320{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200321 gen6_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky0d8ff152013-07-04 11:02:03 -0700322 pte |= HSW_PTE_ADDR_ENCODE(addr);
Kenneth Graunke91197082013-04-22 00:53:51 -0700323
324 if (level != I915_CACHE_NONE)
Ben Widawsky87a6b682013-08-04 23:47:29 -0700325 pte |= HSW_WB_LLC_AGE3;
Kenneth Graunke91197082013-04-22 00:53:51 -0700326
327 return pte;
328}
329
Michel Thierry07749ef2015-03-16 16:00:54 +0000330static gen6_pte_t iris_pte_encode(dma_addr_t addr,
331 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200332 u32 unused)
Ben Widawsky4d15c142013-07-04 11:02:06 -0700333{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200334 gen6_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky4d15c142013-07-04 11:02:06 -0700335 pte |= HSW_PTE_ADDR_ENCODE(addr);
336
Chris Wilson651d7942013-08-08 14:41:10 +0100337 switch (level) {
338 case I915_CACHE_NONE:
339 break;
340 case I915_CACHE_WT:
Chris Wilsonc51e9702013-11-22 10:37:53 +0000341 pte |= HSW_WT_ELLC_LLC_AGE3;
Chris Wilson651d7942013-08-08 14:41:10 +0100342 break;
343 default:
Chris Wilsonc51e9702013-11-22 10:37:53 +0000344 pte |= HSW_WB_ELLC_LLC_AGE3;
Chris Wilson651d7942013-08-08 14:41:10 +0100345 break;
346 }
Ben Widawsky4d15c142013-07-04 11:02:06 -0700347
348 return pte;
349}
350
Chris Wilson84486612017-02-15 08:43:40 +0000351static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
Ben Widawsky678d96f2015-03-16 16:00:56 +0000352{
Chris Wilson84486612017-02-15 08:43:40 +0000353 struct page *page;
Ben Widawsky678d96f2015-03-16 16:00:56 +0000354
Chris Wilson84486612017-02-15 08:43:40 +0000355 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
356 i915_gem_shrink_all(vm->i915);
Chris Wilsonaae4a3d2017-02-13 17:15:44 +0000357
Chris Wilson84486612017-02-15 08:43:40 +0000358 if (vm->free_pages.nr)
359 return vm->free_pages.pages[--vm->free_pages.nr];
360
361 page = alloc_page(gfp);
362 if (!page)
363 return NULL;
364
365 if (vm->pt_kmap_wc)
366 set_pages_array_wc(&page, 1);
367
368 return page;
369}
370
371static void vm_free_pages_release(struct i915_address_space *vm)
372{
373 GEM_BUG_ON(!pagevec_count(&vm->free_pages));
374
375 if (vm->pt_kmap_wc)
376 set_pages_array_wb(vm->free_pages.pages,
377 pagevec_count(&vm->free_pages));
378
379 __pagevec_release(&vm->free_pages);
380}
381
382static void vm_free_page(struct i915_address_space *vm, struct page *page)
383{
384 if (!pagevec_add(&vm->free_pages, page))
385 vm_free_pages_release(vm);
386}
387
388static int __setup_page_dma(struct i915_address_space *vm,
389 struct i915_page_dma *p,
390 gfp_t gfp)
391{
392 p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
393 if (unlikely(!p->page))
Michel Thierry1266cdb2015-03-24 17:06:33 +0000394 return -ENOMEM;
395
Chris Wilson84486612017-02-15 08:43:40 +0000396 p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
397 PCI_DMA_BIDIRECTIONAL);
398 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
399 vm_free_page(vm, p->page);
400 return -ENOMEM;
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300401 }
402
Michel Thierry1266cdb2015-03-24 17:06:33 +0000403 return 0;
Ben Widawsky678d96f2015-03-16 16:00:56 +0000404}
405
Chris Wilson84486612017-02-15 08:43:40 +0000406static int setup_page_dma(struct i915_address_space *vm,
Tvrtko Ursulin275a9912016-11-16 08:55:34 +0000407 struct i915_page_dma *p)
Mika Kuoppalac114f762015-06-25 18:35:13 +0300408{
Chris Wilson84486612017-02-15 08:43:40 +0000409 return __setup_page_dma(vm, p, I915_GFP_DMA);
Mika Kuoppalac114f762015-06-25 18:35:13 +0300410}
411
Chris Wilson84486612017-02-15 08:43:40 +0000412static void cleanup_page_dma(struct i915_address_space *vm,
Tvrtko Ursulin275a9912016-11-16 08:55:34 +0000413 struct i915_page_dma *p)
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300414{
Chris Wilson84486612017-02-15 08:43:40 +0000415 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
416 vm_free_page(vm, p->page);
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300417}
418
Chris Wilson9231da72017-02-15 08:43:41 +0000419#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300420
Chris Wilson84486612017-02-15 08:43:40 +0000421#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
422#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
423#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
424#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
Mika Kuoppala567047b2015-06-25 18:35:12 +0300425
Chris Wilson84486612017-02-15 08:43:40 +0000426static void fill_page_dma(struct i915_address_space *vm,
427 struct i915_page_dma *p,
428 const u64 val)
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300429{
Chris Wilson9231da72017-02-15 08:43:41 +0000430 u64 * const vaddr = kmap_atomic(p->page);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300431 int i;
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300432
433 for (i = 0; i < 512; i++)
434 vaddr[i] = val;
435
Chris Wilson9231da72017-02-15 08:43:41 +0000436 kunmap_atomic(vaddr);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300437}
438
Chris Wilson84486612017-02-15 08:43:40 +0000439static void fill_page_dma_32(struct i915_address_space *vm,
440 struct i915_page_dma *p,
441 const u32 v)
Mika Kuoppala73eeea52015-06-25 18:35:10 +0300442{
Chris Wilson84486612017-02-15 08:43:40 +0000443 fill_page_dma(vm, p, (u64)v << 32 | v);
Mika Kuoppala73eeea52015-06-25 18:35:10 +0300444}
445
Chris Wilson8bcdd0f72016-08-22 08:44:30 +0100446static int
Chris Wilson84486612017-02-15 08:43:40 +0000447setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300448{
Chris Wilson84486612017-02-15 08:43:40 +0000449 return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300450}
451
Chris Wilson84486612017-02-15 08:43:40 +0000452static void cleanup_scratch_page(struct i915_address_space *vm)
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300453{
Chris Wilson84486612017-02-15 08:43:40 +0000454 cleanup_page_dma(vm, &vm->scratch_page);
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300455}
456
Chris Wilson84486612017-02-15 08:43:40 +0000457static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
Ben Widawsky06fda602015-02-24 16:22:36 +0000458{
Michel Thierryec565b32015-04-08 12:13:23 +0100459 struct i915_page_table *pt;
Ben Widawsky06fda602015-02-24 16:22:36 +0000460
Chris Wilsondd196742017-02-15 08:43:46 +0000461 pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
462 if (unlikely(!pt))
Ben Widawsky06fda602015-02-24 16:22:36 +0000463 return ERR_PTR(-ENOMEM);
464
Chris Wilsondd196742017-02-15 08:43:46 +0000465 if (unlikely(setup_px(vm, pt))) {
466 kfree(pt);
467 return ERR_PTR(-ENOMEM);
468 }
Ben Widawsky678d96f2015-03-16 16:00:56 +0000469
Chris Wilsondd196742017-02-15 08:43:46 +0000470 pt->used_ptes = 0;
Ben Widawsky06fda602015-02-24 16:22:36 +0000471 return pt;
472}
473
Chris Wilson84486612017-02-15 08:43:40 +0000474static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
Ben Widawsky06fda602015-02-24 16:22:36 +0000475{
Chris Wilson84486612017-02-15 08:43:40 +0000476 cleanup_px(vm, pt);
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300477 kfree(pt);
478}
479
480static void gen8_initialize_pt(struct i915_address_space *vm,
481 struct i915_page_table *pt)
482{
Chris Wilsondd196742017-02-15 08:43:46 +0000483 fill_px(vm, pt,
484 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300485}
486
487static void gen6_initialize_pt(struct i915_address_space *vm,
488 struct i915_page_table *pt)
489{
Chris Wilsondd196742017-02-15 08:43:46 +0000490 fill32_px(vm, pt,
491 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
Ben Widawsky06fda602015-02-24 16:22:36 +0000492}
493
Chris Wilson84486612017-02-15 08:43:40 +0000494static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
Ben Widawsky06fda602015-02-24 16:22:36 +0000495{
Michel Thierryec565b32015-04-08 12:13:23 +0100496 struct i915_page_directory *pd;
Ben Widawsky06fda602015-02-24 16:22:36 +0000497
Chris Wilsonfe52e372017-02-15 08:43:47 +0000498 pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
499 if (unlikely(!pd))
Ben Widawsky06fda602015-02-24 16:22:36 +0000500 return ERR_PTR(-ENOMEM);
501
Chris Wilsonfe52e372017-02-15 08:43:47 +0000502 if (unlikely(setup_px(vm, pd))) {
503 kfree(pd);
504 return ERR_PTR(-ENOMEM);
505 }
Michel Thierry33c88192015-04-08 12:13:33 +0100506
Chris Wilsonfe52e372017-02-15 08:43:47 +0000507 pd->used_pdes = 0;
Ben Widawsky06fda602015-02-24 16:22:36 +0000508 return pd;
509}
510
Chris Wilson84486612017-02-15 08:43:40 +0000511static void free_pd(struct i915_address_space *vm,
Tvrtko Ursulin275a9912016-11-16 08:55:34 +0000512 struct i915_page_directory *pd)
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300513{
Chris Wilsonfe52e372017-02-15 08:43:47 +0000514 cleanup_px(vm, pd);
515 kfree(pd);
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300516}
517
518static void gen8_initialize_pd(struct i915_address_space *vm,
519 struct i915_page_directory *pd)
520{
Chris Wilsondd196742017-02-15 08:43:46 +0000521 unsigned int i;
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300522
Chris Wilsondd196742017-02-15 08:43:46 +0000523 fill_px(vm, pd,
524 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
525 for (i = 0; i < I915_PDES; i++)
526 pd->page_table[i] = vm->scratch_pt;
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300527}
528
Chris Wilsonfe52e372017-02-15 08:43:47 +0000529static int __pdp_init(struct i915_address_space *vm,
Michel Thierry6ac18502015-07-29 17:23:46 +0100530 struct i915_page_directory_pointer *pdp)
531{
Mika Kuoppala3e490042017-02-28 17:28:07 +0200532 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
Chris Wilsone2b763c2017-02-15 08:43:48 +0000533 unsigned int i;
Michel Thierry6ac18502015-07-29 17:23:46 +0100534
Chris Wilsonfe52e372017-02-15 08:43:47 +0000535 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
Chris Wilsone2b763c2017-02-15 08:43:48 +0000536 GFP_KERNEL | __GFP_NOWARN);
537 if (unlikely(!pdp->page_directory))
Michel Thierry6ac18502015-07-29 17:23:46 +0100538 return -ENOMEM;
Michel Thierry6ac18502015-07-29 17:23:46 +0100539
Chris Wilsonfe52e372017-02-15 08:43:47 +0000540 for (i = 0; i < pdpes; i++)
541 pdp->page_directory[i] = vm->scratch_pd;
542
Michel Thierry6ac18502015-07-29 17:23:46 +0100543 return 0;
544}
545
546static void __pdp_fini(struct i915_page_directory_pointer *pdp)
547{
Michel Thierry6ac18502015-07-29 17:23:46 +0100548 kfree(pdp->page_directory);
549 pdp->page_directory = NULL;
550}
551
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200552static inline bool use_4lvl(const struct i915_address_space *vm)
553{
554 return i915_vm_is_48bit(vm);
555}
556
Chris Wilson84486612017-02-15 08:43:40 +0000557static struct i915_page_directory_pointer *
558alloc_pdp(struct i915_address_space *vm)
Michel Thierry762d9932015-07-30 11:05:29 +0100559{
560 struct i915_page_directory_pointer *pdp;
561 int ret = -ENOMEM;
562
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200563 WARN_ON(!use_4lvl(vm));
Michel Thierry762d9932015-07-30 11:05:29 +0100564
565 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
566 if (!pdp)
567 return ERR_PTR(-ENOMEM);
568
Chris Wilsonfe52e372017-02-15 08:43:47 +0000569 ret = __pdp_init(vm, pdp);
Michel Thierry762d9932015-07-30 11:05:29 +0100570 if (ret)
571 goto fail_bitmap;
572
Chris Wilson84486612017-02-15 08:43:40 +0000573 ret = setup_px(vm, pdp);
Michel Thierry762d9932015-07-30 11:05:29 +0100574 if (ret)
575 goto fail_page_m;
576
577 return pdp;
578
579fail_page_m:
580 __pdp_fini(pdp);
581fail_bitmap:
582 kfree(pdp);
583
584 return ERR_PTR(ret);
585}
586
Chris Wilson84486612017-02-15 08:43:40 +0000587static void free_pdp(struct i915_address_space *vm,
Michel Thierry6ac18502015-07-29 17:23:46 +0100588 struct i915_page_directory_pointer *pdp)
589{
590 __pdp_fini(pdp);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200591
592 if (!use_4lvl(vm))
593 return;
594
595 cleanup_px(vm, pdp);
596 kfree(pdp);
Michel Thierry762d9932015-07-30 11:05:29 +0100597}
598
Michel Thierry69ab76f2015-07-29 17:23:55 +0100599static void gen8_initialize_pdp(struct i915_address_space *vm,
600 struct i915_page_directory_pointer *pdp)
601{
602 gen8_ppgtt_pdpe_t scratch_pdpe;
603
604 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
605
Chris Wilson84486612017-02-15 08:43:40 +0000606 fill_px(vm, pdp, scratch_pdpe);
Michel Thierry69ab76f2015-07-29 17:23:55 +0100607}
608
609static void gen8_initialize_pml4(struct i915_address_space *vm,
610 struct i915_pml4 *pml4)
611{
Chris Wilsone2b763c2017-02-15 08:43:48 +0000612 unsigned int i;
Michel Thierry69ab76f2015-07-29 17:23:55 +0100613
Chris Wilsone2b763c2017-02-15 08:43:48 +0000614 fill_px(vm, pml4,
615 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
616 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
617 pml4->pdps[i] = vm->scratch_pdp;
Michel Thierry6ac18502015-07-29 17:23:46 +0100618}
619
Ben Widawsky94e409c2013-11-04 22:29:36 -0800620/* Broadwell Page Directory Pointer Descriptors */
John Harrisone85b26d2015-05-29 17:43:56 +0100621static int gen8_write_pdp(struct drm_i915_gem_request *req,
Michel Thierry7cb6d7a2015-04-08 12:13:29 +0100622 unsigned entry,
623 dma_addr_t addr)
Ben Widawsky94e409c2013-11-04 22:29:36 -0800624{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000625 struct intel_engine_cs *engine = req->engine;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000626 u32 *cs;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800627
628 BUG_ON(entry >= 4);
629
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000630 cs = intel_ring_begin(req, 6);
631 if (IS_ERR(cs))
632 return PTR_ERR(cs);
Ben Widawsky94e409c2013-11-04 22:29:36 -0800633
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000634 *cs++ = MI_LOAD_REGISTER_IMM(1);
635 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
636 *cs++ = upper_32_bits(addr);
637 *cs++ = MI_LOAD_REGISTER_IMM(1);
638 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
639 *cs++ = lower_32_bits(addr);
640 intel_ring_advance(req, cs);
Ben Widawsky94e409c2013-11-04 22:29:36 -0800641
642 return 0;
643}
644
Mika Kuoppalae7167762017-02-28 17:28:10 +0200645static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
646 struct drm_i915_gem_request *req)
Ben Widawsky94e409c2013-11-04 22:29:36 -0800647{
Ben Widawskyeeb94882013-12-06 14:11:10 -0800648 int i, ret;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800649
Mika Kuoppalae7167762017-02-28 17:28:10 +0200650 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300651 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
652
John Harrisone85b26d2015-05-29 17:43:56 +0100653 ret = gen8_write_pdp(req, i, pd_daddr);
Ben Widawskyeeb94882013-12-06 14:11:10 -0800654 if (ret)
655 return ret;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800656 }
Ben Widawskyd595bd42013-11-25 09:54:32 -0800657
Ben Widawskyeeb94882013-12-06 14:11:10 -0800658 return 0;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800659}
660
Mika Kuoppalae7167762017-02-28 17:28:10 +0200661static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
662 struct drm_i915_gem_request *req)
Michel Thierry2dba3232015-07-30 11:06:23 +0100663{
664 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
665}
666
Mika Kuoppalafce93752016-10-31 17:24:46 +0200667/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
668 * the page table structures, we mark them dirty so that
669 * context switching/execlist queuing code takes extra steps
670 * to ensure that tlbs are flushed.
671 */
672static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
673{
Chris Wilson49d73912016-11-29 09:50:08 +0000674 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
Mika Kuoppalafce93752016-10-31 17:24:46 +0200675}
676
Michał Winiarski2ce51792016-10-13 14:02:42 +0200677/* Removes entries from a single page table, releasing it if it's empty.
678 * Caller can use the return value to update higher-level entries.
679 */
680static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200681 struct i915_page_table *pt,
Chris Wilsondd196742017-02-15 08:43:46 +0000682 u64 start, u64 length)
Ben Widawsky459108b2013-11-02 21:07:23 -0700683{
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200684 unsigned int num_entries = gen8_pte_count(start, length);
Mika Kuoppala37c63932016-11-01 15:27:36 +0200685 unsigned int pte = gen8_pte_index(start);
686 unsigned int pte_end = pte + num_entries;
Chris Wilson894cceb2017-02-15 08:43:37 +0000687 const gen8_pte_t scratch_pte =
688 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
689 gen8_pte_t *vaddr;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200690
Chris Wilsondd196742017-02-15 08:43:46 +0000691 GEM_BUG_ON(num_entries > pt->used_ptes);
Ben Widawsky459108b2013-11-02 21:07:23 -0700692
Chris Wilsondd196742017-02-15 08:43:46 +0000693 pt->used_ptes -= num_entries;
694 if (!pt->used_ptes)
695 return true;
Michał Winiarski2ce51792016-10-13 14:02:42 +0200696
Chris Wilson9231da72017-02-15 08:43:41 +0000697 vaddr = kmap_atomic_px(pt);
Mika Kuoppala37c63932016-11-01 15:27:36 +0200698 while (pte < pte_end)
Chris Wilson894cceb2017-02-15 08:43:37 +0000699 vaddr[pte++] = scratch_pte;
Chris Wilson9231da72017-02-15 08:43:41 +0000700 kunmap_atomic(vaddr);
Michał Winiarski2ce51792016-10-13 14:02:42 +0200701
702 return false;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200703}
704
Chris Wilsondd196742017-02-15 08:43:46 +0000705static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
706 struct i915_page_directory *pd,
707 struct i915_page_table *pt,
708 unsigned int pde)
709{
710 gen8_pde_t *vaddr;
711
712 pd->page_table[pde] = pt;
713
714 vaddr = kmap_atomic_px(pd);
715 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
716 kunmap_atomic(vaddr);
717}
718
Michał Winiarski2ce51792016-10-13 14:02:42 +0200719static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200720 struct i915_page_directory *pd,
Chris Wilsondd196742017-02-15 08:43:46 +0000721 u64 start, u64 length)
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200722{
723 struct i915_page_table *pt;
Chris Wilsondd196742017-02-15 08:43:46 +0000724 u32 pde;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200725
726 gen8_for_each_pde(pt, pd, start, length, pde) {
Chris Wilsonbf75d592017-02-27 12:26:52 +0000727 GEM_BUG_ON(pt == vm->scratch_pt);
728
Chris Wilsondd196742017-02-15 08:43:46 +0000729 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
730 continue;
Ben Widawsky06fda602015-02-24 16:22:36 +0000731
Chris Wilsondd196742017-02-15 08:43:46 +0000732 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
Chris Wilsonbf75d592017-02-27 12:26:52 +0000733 GEM_BUG_ON(!pd->used_pdes);
Chris Wilsonfe52e372017-02-15 08:43:47 +0000734 pd->used_pdes--;
Chris Wilsondd196742017-02-15 08:43:46 +0000735
736 free_pt(vm, pt);
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200737 }
Michał Winiarski2ce51792016-10-13 14:02:42 +0200738
Chris Wilsonfe52e372017-02-15 08:43:47 +0000739 return !pd->used_pdes;
740}
Michał Winiarski2ce51792016-10-13 14:02:42 +0200741
Chris Wilsonfe52e372017-02-15 08:43:47 +0000742static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
743 struct i915_page_directory_pointer *pdp,
744 struct i915_page_directory *pd,
745 unsigned int pdpe)
746{
747 gen8_ppgtt_pdpe_t *vaddr;
748
749 pdp->page_directory[pdpe] = pd;
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200750 if (!use_4lvl(vm))
Chris Wilsonfe52e372017-02-15 08:43:47 +0000751 return;
752
753 vaddr = kmap_atomic_px(pdp);
754 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
755 kunmap_atomic(vaddr);
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200756}
Ben Widawsky06fda602015-02-24 16:22:36 +0000757
Michał Winiarski2ce51792016-10-13 14:02:42 +0200758/* Removes entries from a single page dir pointer, releasing it if it's empty.
759 * Caller can use the return value to update higher-level entries
760 */
761static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200762 struct i915_page_directory_pointer *pdp,
Chris Wilsonfe52e372017-02-15 08:43:47 +0000763 u64 start, u64 length)
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200764{
765 struct i915_page_directory *pd;
Chris Wilsonfe52e372017-02-15 08:43:47 +0000766 unsigned int pdpe;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200767
768 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Chris Wilsonbf75d592017-02-27 12:26:52 +0000769 GEM_BUG_ON(pd == vm->scratch_pd);
770
Chris Wilsonfe52e372017-02-15 08:43:47 +0000771 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
772 continue;
Ben Widawsky06fda602015-02-24 16:22:36 +0000773
Chris Wilsonfe52e372017-02-15 08:43:47 +0000774 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
Chris Wilsonbf75d592017-02-27 12:26:52 +0000775 GEM_BUG_ON(!pdp->used_pdpes);
Chris Wilsone2b763c2017-02-15 08:43:48 +0000776 pdp->used_pdpes--;
Chris Wilsonfe52e372017-02-15 08:43:47 +0000777
778 free_pd(vm, pd);
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200779 }
Michał Winiarski2ce51792016-10-13 14:02:42 +0200780
Chris Wilsone2b763c2017-02-15 08:43:48 +0000781 return !pdp->used_pdpes;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200782}
Ben Widawsky459108b2013-11-02 21:07:23 -0700783
Chris Wilsonfe52e372017-02-15 08:43:47 +0000784static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
785 u64 start, u64 length)
786{
787 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
788}
789
Chris Wilsone2b763c2017-02-15 08:43:48 +0000790static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
791 struct i915_page_directory_pointer *pdp,
792 unsigned int pml4e)
793{
794 gen8_ppgtt_pml4e_t *vaddr;
795
796 pml4->pdps[pml4e] = pdp;
797
798 vaddr = kmap_atomic_px(pml4);
799 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
800 kunmap_atomic(vaddr);
801}
802
Michał Winiarski2ce51792016-10-13 14:02:42 +0200803/* Removes entries from a single pml4.
804 * This is the top-level structure in 4-level page tables used on gen8+.
805 * Empty entries are always scratch pml4e.
806 */
Chris Wilsonfe52e372017-02-15 08:43:47 +0000807static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
808 u64 start, u64 length)
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200809{
Chris Wilsonfe52e372017-02-15 08:43:47 +0000810 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
811 struct i915_pml4 *pml4 = &ppgtt->pml4;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200812 struct i915_page_directory_pointer *pdp;
Chris Wilsone2b763c2017-02-15 08:43:48 +0000813 unsigned int pml4e;
Michał Winiarski2ce51792016-10-13 14:02:42 +0200814
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200815 GEM_BUG_ON(!use_4lvl(vm));
Ben Widawsky459108b2013-11-02 21:07:23 -0700816
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200817 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Chris Wilsonbf75d592017-02-27 12:26:52 +0000818 GEM_BUG_ON(pdp == vm->scratch_pdp);
819
Chris Wilsone2b763c2017-02-15 08:43:48 +0000820 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
821 continue;
Ben Widawsky459108b2013-11-02 21:07:23 -0700822
Chris Wilsone2b763c2017-02-15 08:43:48 +0000823 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
Chris Wilsone2b763c2017-02-15 08:43:48 +0000824
825 free_pdp(vm, pdp);
Ben Widawsky459108b2013-11-02 21:07:23 -0700826 }
827}
828
Chris Wilson894cceb2017-02-15 08:43:37 +0000829struct sgt_dma {
830 struct scatterlist *sg;
831 dma_addr_t dma, max;
832};
833
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000834struct gen8_insert_pte {
835 u16 pml4e;
836 u16 pdpe;
837 u16 pde;
838 u16 pte;
839};
840
841static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
842{
843 return (struct gen8_insert_pte) {
844 gen8_pml4e_index(start),
845 gen8_pdpe_index(start),
846 gen8_pde_index(start),
847 gen8_pte_index(start),
848 };
849}
850
Chris Wilson894cceb2017-02-15 08:43:37 +0000851static __always_inline bool
852gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
Michel Thierryf9b5b782015-07-30 11:02:49 +0100853 struct i915_page_directory_pointer *pdp,
Chris Wilson894cceb2017-02-15 08:43:37 +0000854 struct sgt_dma *iter,
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000855 struct gen8_insert_pte *idx,
Michel Thierryf9b5b782015-07-30 11:02:49 +0100856 enum i915_cache_level cache_level)
857{
Chris Wilson894cceb2017-02-15 08:43:37 +0000858 struct i915_page_directory *pd;
859 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
860 gen8_pte_t *vaddr;
861 bool ret;
Ben Widawsky9df15b42013-11-02 21:07:24 -0700862
Mika Kuoppala3e490042017-02-28 17:28:07 +0200863 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000864 pd = pdp->page_directory[idx->pdpe];
865 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
Chris Wilson894cceb2017-02-15 08:43:37 +0000866 do {
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000867 vaddr[idx->pte] = pte_encode | iter->dma;
868
Chris Wilson894cceb2017-02-15 08:43:37 +0000869 iter->dma += PAGE_SIZE;
870 if (iter->dma >= iter->max) {
871 iter->sg = __sg_next(iter->sg);
872 if (!iter->sg) {
873 ret = false;
874 break;
875 }
Ben Widawsky9df15b42013-11-02 21:07:24 -0700876
Chris Wilson894cceb2017-02-15 08:43:37 +0000877 iter->dma = sg_dma_address(iter->sg);
878 iter->max = iter->dma + iter->sg->length;
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000879 }
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800880
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000881 if (++idx->pte == GEN8_PTES) {
882 idx->pte = 0;
883
884 if (++idx->pde == I915_PDES) {
885 idx->pde = 0;
886
Chris Wilson894cceb2017-02-15 08:43:37 +0000887 /* Limited by sg length for 3lvl */
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000888 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
889 idx->pdpe = 0;
Chris Wilson894cceb2017-02-15 08:43:37 +0000890 ret = true;
Michel Thierryde5ba8e2015-08-03 09:53:27 +0100891 break;
Chris Wilson894cceb2017-02-15 08:43:37 +0000892 }
893
Mika Kuoppala3e490042017-02-28 17:28:07 +0200894 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000895 pd = pdp->page_directory[idx->pdpe];
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800896 }
Chris Wilson894cceb2017-02-15 08:43:37 +0000897
Chris Wilson9231da72017-02-15 08:43:41 +0000898 kunmap_atomic(vaddr);
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000899 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
Ben Widawsky9df15b42013-11-02 21:07:24 -0700900 }
Chris Wilson894cceb2017-02-15 08:43:37 +0000901 } while (1);
Chris Wilson9231da72017-02-15 08:43:41 +0000902 kunmap_atomic(vaddr);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300903
Chris Wilson894cceb2017-02-15 08:43:37 +0000904 return ret;
Ben Widawsky9df15b42013-11-02 21:07:24 -0700905}
906
Chris Wilson894cceb2017-02-15 08:43:37 +0000907static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
908 struct sg_table *pages,
909 u64 start,
910 enum i915_cache_level cache_level,
911 u32 unused)
Michel Thierryf9b5b782015-07-30 11:02:49 +0100912{
Joonas Lahtinene5716f52016-04-07 11:08:03 +0300913 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Chris Wilson894cceb2017-02-15 08:43:37 +0000914 struct sgt_dma iter = {
915 .sg = pages->sgl,
916 .dma = sg_dma_address(iter.sg),
917 .max = iter.dma + iter.sg->length,
918 };
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000919 struct gen8_insert_pte idx = gen8_insert_pte(start);
Michel Thierryf9b5b782015-07-30 11:02:49 +0100920
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000921 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
922 cache_level);
Chris Wilson894cceb2017-02-15 08:43:37 +0000923}
Michel Thierryde5ba8e2015-08-03 09:53:27 +0100924
Chris Wilson894cceb2017-02-15 08:43:37 +0000925static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
926 struct sg_table *pages,
Chris Wilson75c7b0b2017-02-15 08:43:57 +0000927 u64 start,
Chris Wilson894cceb2017-02-15 08:43:37 +0000928 enum i915_cache_level cache_level,
929 u32 unused)
930{
931 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
932 struct sgt_dma iter = {
933 .sg = pages->sgl,
934 .dma = sg_dma_address(iter.sg),
935 .max = iter.dma + iter.sg->length,
936 };
937 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000938 struct gen8_insert_pte idx = gen8_insert_pte(start);
Michel Thierryde5ba8e2015-08-03 09:53:27 +0100939
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000940 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
941 &idx, cache_level))
942 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
Michel Thierryf9b5b782015-07-30 11:02:49 +0100943}
944
Chris Wilson84486612017-02-15 08:43:40 +0000945static void gen8_free_page_tables(struct i915_address_space *vm,
Michel Thierryf37c0502015-06-10 17:46:39 +0100946 struct i915_page_directory *pd)
Ben Widawskyb45a6712014-02-12 14:28:44 -0800947{
948 int i;
949
Mika Kuoppala567047b2015-06-25 18:35:12 +0300950 if (!px_page(pd))
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800951 return;
Ben Widawskyb45a6712014-02-12 14:28:44 -0800952
Chris Wilsonfe52e372017-02-15 08:43:47 +0000953 for (i = 0; i < I915_PDES; i++) {
954 if (pd->page_table[i] != vm->scratch_pt)
955 free_pt(vm, pd->page_table[i]);
Ben Widawsky06fda602015-02-24 16:22:36 +0000956 }
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000957}
958
Mika Kuoppala8776f022015-06-30 18:16:40 +0300959static int gen8_init_scratch(struct i915_address_space *vm)
960{
Matthew Auld64c050d2016-04-27 13:19:25 +0100961 int ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +0300962
Chris Wilson84486612017-02-15 08:43:40 +0000963 ret = setup_scratch_page(vm, I915_GFP_DMA);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +0100964 if (ret)
965 return ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +0300966
Chris Wilson84486612017-02-15 08:43:40 +0000967 vm->scratch_pt = alloc_pt(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +0300968 if (IS_ERR(vm->scratch_pt)) {
Matthew Auld64c050d2016-04-27 13:19:25 +0100969 ret = PTR_ERR(vm->scratch_pt);
970 goto free_scratch_page;
Mika Kuoppala8776f022015-06-30 18:16:40 +0300971 }
972
Chris Wilson84486612017-02-15 08:43:40 +0000973 vm->scratch_pd = alloc_pd(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +0300974 if (IS_ERR(vm->scratch_pd)) {
Matthew Auld64c050d2016-04-27 13:19:25 +0100975 ret = PTR_ERR(vm->scratch_pd);
976 goto free_pt;
Mika Kuoppala8776f022015-06-30 18:16:40 +0300977 }
978
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200979 if (use_4lvl(vm)) {
Chris Wilson84486612017-02-15 08:43:40 +0000980 vm->scratch_pdp = alloc_pdp(vm);
Michel Thierry69ab76f2015-07-29 17:23:55 +0100981 if (IS_ERR(vm->scratch_pdp)) {
Matthew Auld64c050d2016-04-27 13:19:25 +0100982 ret = PTR_ERR(vm->scratch_pdp);
983 goto free_pd;
Michel Thierry69ab76f2015-07-29 17:23:55 +0100984 }
985 }
986
Mika Kuoppala8776f022015-06-30 18:16:40 +0300987 gen8_initialize_pt(vm, vm->scratch_pt);
988 gen8_initialize_pd(vm, vm->scratch_pd);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200989 if (use_4lvl(vm))
Michel Thierry69ab76f2015-07-29 17:23:55 +0100990 gen8_initialize_pdp(vm, vm->scratch_pdp);
Mika Kuoppala8776f022015-06-30 18:16:40 +0300991
992 return 0;
Matthew Auld64c050d2016-04-27 13:19:25 +0100993
994free_pd:
Chris Wilson84486612017-02-15 08:43:40 +0000995 free_pd(vm, vm->scratch_pd);
Matthew Auld64c050d2016-04-27 13:19:25 +0100996free_pt:
Chris Wilson84486612017-02-15 08:43:40 +0000997 free_pt(vm, vm->scratch_pt);
Matthew Auld64c050d2016-04-27 13:19:25 +0100998free_scratch_page:
Chris Wilson84486612017-02-15 08:43:40 +0000999 cleanup_scratch_page(vm);
Matthew Auld64c050d2016-04-27 13:19:25 +01001000
1001 return ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001002}
1003
Zhiyuan Lv650da342015-08-28 15:41:18 +08001004static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1005{
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001006 struct i915_address_space *vm = &ppgtt->base;
1007 struct drm_i915_private *dev_priv = vm->i915;
Zhiyuan Lv650da342015-08-28 15:41:18 +08001008 enum vgt_g2v_type msg;
Zhiyuan Lv650da342015-08-28 15:41:18 +08001009 int i;
1010
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001011 if (use_4lvl(vm)) {
1012 const u64 daddr = px_dma(&ppgtt->pml4);
Zhiyuan Lv650da342015-08-28 15:41:18 +08001013
Ville Syrjäläab75bb52015-11-04 23:20:12 +02001014 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1015 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
Zhiyuan Lv650da342015-08-28 15:41:18 +08001016
1017 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1018 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1019 } else {
Mika Kuoppalae7167762017-02-28 17:28:10 +02001020 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001021 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
Zhiyuan Lv650da342015-08-28 15:41:18 +08001022
Ville Syrjäläab75bb52015-11-04 23:20:12 +02001023 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1024 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
Zhiyuan Lv650da342015-08-28 15:41:18 +08001025 }
1026
1027 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1028 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1029 }
1030
1031 I915_WRITE(vgtif_reg(g2v_notify), msg);
1032
1033 return 0;
1034}
1035
Mika Kuoppala8776f022015-06-30 18:16:40 +03001036static void gen8_free_scratch(struct i915_address_space *vm)
1037{
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001038 if (use_4lvl(vm))
Chris Wilson84486612017-02-15 08:43:40 +00001039 free_pdp(vm, vm->scratch_pdp);
1040 free_pd(vm, vm->scratch_pd);
1041 free_pt(vm, vm->scratch_pt);
1042 cleanup_scratch_page(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001043}
1044
Chris Wilson84486612017-02-15 08:43:40 +00001045static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
Michel Thierry762d9932015-07-30 11:05:29 +01001046 struct i915_page_directory_pointer *pdp)
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001047{
Mika Kuoppala3e490042017-02-28 17:28:07 +02001048 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001049 int i;
1050
Mika Kuoppala3e490042017-02-28 17:28:07 +02001051 for (i = 0; i < pdpes; i++) {
Chris Wilsonfe52e372017-02-15 08:43:47 +00001052 if (pdp->page_directory[i] == vm->scratch_pd)
Ben Widawsky06fda602015-02-24 16:22:36 +00001053 continue;
1054
Chris Wilson84486612017-02-15 08:43:40 +00001055 gen8_free_page_tables(vm, pdp->page_directory[i]);
1056 free_pd(vm, pdp->page_directory[i]);
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001057 }
Michel Thierry69876be2015-04-08 12:13:27 +01001058
Chris Wilson84486612017-02-15 08:43:40 +00001059 free_pdp(vm, pdp);
Michel Thierry762d9932015-07-30 11:05:29 +01001060}
1061
1062static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1063{
1064 int i;
1065
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001066 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1067 if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
Michel Thierry762d9932015-07-30 11:05:29 +01001068 continue;
1069
Chris Wilson84486612017-02-15 08:43:40 +00001070 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
Michel Thierry762d9932015-07-30 11:05:29 +01001071 }
1072
Chris Wilson84486612017-02-15 08:43:40 +00001073 cleanup_px(&ppgtt->base, &ppgtt->pml4);
Michel Thierry762d9932015-07-30 11:05:29 +01001074}
1075
1076static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1077{
Chris Wilson49d73912016-11-29 09:50:08 +00001078 struct drm_i915_private *dev_priv = vm->i915;
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001079 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Michel Thierry762d9932015-07-30 11:05:29 +01001080
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001081 if (intel_vgpu_active(dev_priv))
Zhiyuan Lv650da342015-08-28 15:41:18 +08001082 gen8_ppgtt_notify_vgt(ppgtt, false);
1083
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001084 if (use_4lvl(vm))
Michel Thierry762d9932015-07-30 11:05:29 +01001085 gen8_ppgtt_cleanup_4lvl(ppgtt);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001086 else
1087 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
Michel Thierryd4ec9da2015-07-30 11:02:03 +01001088
Mika Kuoppala8776f022015-06-30 18:16:40 +03001089 gen8_free_scratch(vm);
Ben Widawskyb45a6712014-02-12 14:28:44 -08001090}
1091
Chris Wilsonfe52e372017-02-15 08:43:47 +00001092static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1093 struct i915_page_directory *pd,
1094 u64 start, u64 length)
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001095{
Michel Thierryd7b26332015-04-08 12:13:34 +01001096 struct i915_page_table *pt;
Chris Wilsondd196742017-02-15 08:43:46 +00001097 u64 from = start;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001098 unsigned int pde;
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001099
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001100 gen8_for_each_pde(pt, pd, start, length, pde) {
Chris Wilsonfe52e372017-02-15 08:43:47 +00001101 if (pt == vm->scratch_pt) {
Chris Wilsondd196742017-02-15 08:43:46 +00001102 pt = alloc_pt(vm);
1103 if (IS_ERR(pt))
1104 goto unwind;
1105
1106 gen8_initialize_pt(vm, pt);
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001107
Chris Wilsonfe52e372017-02-15 08:43:47 +00001108 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1109 pd->used_pdes++;
Chris Wilsonbf75d592017-02-27 12:26:52 +00001110 GEM_BUG_ON(pd->used_pdes > I915_PDES);
Chris Wilsonfe52e372017-02-15 08:43:47 +00001111 }
1112
1113 pt->used_ptes += gen8_pte_count(start, length);
1114 }
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001115 return 0;
1116
Chris Wilsondd196742017-02-15 08:43:46 +00001117unwind:
1118 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001119 return -ENOMEM;
1120}
1121
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001122static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1123 struct i915_page_directory_pointer *pdp,
1124 u64 start, u64 length)
Ben Widawskybf2b4ed2014-02-19 22:05:43 -08001125{
Michel Thierry5441f0c2015-04-08 12:13:28 +01001126 struct i915_page_directory *pd;
Chris Wilsone2b763c2017-02-15 08:43:48 +00001127 u64 from = start;
1128 unsigned int pdpe;
Ben Widawskybf2b4ed2014-02-19 22:05:43 -08001129 int ret;
1130
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001131 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Chris Wilsone2b763c2017-02-15 08:43:48 +00001132 if (pd == vm->scratch_pd) {
1133 pd = alloc_pd(vm);
1134 if (IS_ERR(pd))
1135 goto unwind;
Michel Thierry5441f0c2015-04-08 12:13:28 +01001136
Chris Wilsone2b763c2017-02-15 08:43:48 +00001137 gen8_initialize_pd(vm, pd);
Chris Wilsonfe52e372017-02-15 08:43:47 +00001138 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
Chris Wilsone2b763c2017-02-15 08:43:48 +00001139 pdp->used_pdpes++;
Mika Kuoppala3e490042017-02-28 17:28:07 +02001140 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
Chris Wilson75afcf72017-02-15 08:43:51 +00001141
1142 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
Chris Wilsone2b763c2017-02-15 08:43:48 +00001143 }
1144
1145 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
Chris Wilsonbf75d592017-02-27 12:26:52 +00001146 if (unlikely(ret))
1147 goto unwind_pd;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001148 }
Michel Thierry33c88192015-04-08 12:13:33 +01001149
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001150 return 0;
1151
Chris Wilsonbf75d592017-02-27 12:26:52 +00001152unwind_pd:
1153 if (!pd->used_pdes) {
1154 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1155 GEM_BUG_ON(!pdp->used_pdpes);
1156 pdp->used_pdpes--;
1157 free_pd(vm, pd);
1158 }
Chris Wilsone2b763c2017-02-15 08:43:48 +00001159unwind:
1160 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1161 return -ENOMEM;
Ben Widawskybf2b4ed2014-02-19 22:05:43 -08001162}
1163
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001164static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1165 u64 start, u64 length)
Michel Thierry762d9932015-07-30 11:05:29 +01001166{
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001167 return gen8_ppgtt_alloc_pdp(vm,
1168 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1169}
1170
1171static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1172 u64 start, u64 length)
1173{
1174 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1175 struct i915_pml4 *pml4 = &ppgtt->pml4;
Michel Thierry762d9932015-07-30 11:05:29 +01001176 struct i915_page_directory_pointer *pdp;
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001177 u64 from = start;
1178 u32 pml4e;
1179 int ret;
Michel Thierry762d9932015-07-30 11:05:29 +01001180
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001181 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001182 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1183 pdp = alloc_pdp(vm);
1184 if (IS_ERR(pdp))
1185 goto unwind;
Michel Thierry762d9932015-07-30 11:05:29 +01001186
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001187 gen8_initialize_pdp(vm, pdp);
1188 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1189 }
Michel Thierry762d9932015-07-30 11:05:29 +01001190
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001191 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
Chris Wilsonbf75d592017-02-27 12:26:52 +00001192 if (unlikely(ret))
1193 goto unwind_pdp;
Michel Thierry762d9932015-07-30 11:05:29 +01001194 }
1195
Michel Thierry762d9932015-07-30 11:05:29 +01001196 return 0;
1197
Chris Wilsonbf75d592017-02-27 12:26:52 +00001198unwind_pdp:
1199 if (!pdp->used_pdpes) {
1200 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1201 free_pdp(vm, pdp);
1202 }
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001203unwind:
1204 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1205 return -ENOMEM;
Michel Thierry762d9932015-07-30 11:05:29 +01001206}
1207
Chris Wilson84486612017-02-15 08:43:40 +00001208static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1209 struct i915_page_directory_pointer *pdp,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001210 u64 start, u64 length,
Michel Thierryea91e402015-07-29 17:23:57 +01001211 gen8_pte_t scratch_pte,
1212 struct seq_file *m)
1213{
Mika Kuoppala3e490042017-02-28 17:28:07 +02001214 struct i915_address_space *vm = &ppgtt->base;
Michel Thierryea91e402015-07-29 17:23:57 +01001215 struct i915_page_directory *pd;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001216 u32 pdpe;
Michel Thierryea91e402015-07-29 17:23:57 +01001217
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001218 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Michel Thierryea91e402015-07-29 17:23:57 +01001219 struct i915_page_table *pt;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001220 u64 pd_len = length;
1221 u64 pd_start = start;
1222 u32 pde;
Michel Thierryea91e402015-07-29 17:23:57 +01001223
Chris Wilsone2b763c2017-02-15 08:43:48 +00001224 if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
Michel Thierryea91e402015-07-29 17:23:57 +01001225 continue;
1226
1227 seq_printf(m, "\tPDPE #%d\n", pdpe);
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001228 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001229 u32 pte;
Michel Thierryea91e402015-07-29 17:23:57 +01001230 gen8_pte_t *pt_vaddr;
1231
Chris Wilsonfe52e372017-02-15 08:43:47 +00001232 if (pd->page_table[pde] == ppgtt->base.scratch_pt)
Michel Thierryea91e402015-07-29 17:23:57 +01001233 continue;
1234
Chris Wilson9231da72017-02-15 08:43:41 +00001235 pt_vaddr = kmap_atomic_px(pt);
Michel Thierryea91e402015-07-29 17:23:57 +01001236 for (pte = 0; pte < GEN8_PTES; pte += 4) {
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001237 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1238 pde << GEN8_PDE_SHIFT |
1239 pte << GEN8_PTE_SHIFT);
Michel Thierryea91e402015-07-29 17:23:57 +01001240 int i;
1241 bool found = false;
1242
1243 for (i = 0; i < 4; i++)
1244 if (pt_vaddr[pte + i] != scratch_pte)
1245 found = true;
1246 if (!found)
1247 continue;
1248
1249 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1250 for (i = 0; i < 4; i++) {
1251 if (pt_vaddr[pte + i] != scratch_pte)
1252 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1253 else
1254 seq_puts(m, " SCRATCH ");
1255 }
1256 seq_puts(m, "\n");
1257 }
Michel Thierryea91e402015-07-29 17:23:57 +01001258 kunmap_atomic(pt_vaddr);
1259 }
1260 }
1261}
1262
1263static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1264{
1265 struct i915_address_space *vm = &ppgtt->base;
Chris Wilson894cceb2017-02-15 08:43:37 +00001266 const gen8_pte_t scratch_pte =
1267 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
Chris Wilson381b9432017-02-15 08:43:54 +00001268 u64 start = 0, length = ppgtt->base.total;
Michel Thierryea91e402015-07-29 17:23:57 +01001269
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001270 if (use_4lvl(vm)) {
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001271 u64 pml4e;
Michel Thierryea91e402015-07-29 17:23:57 +01001272 struct i915_pml4 *pml4 = &ppgtt->pml4;
1273 struct i915_page_directory_pointer *pdp;
1274
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001275 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001276 if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
Michel Thierryea91e402015-07-29 17:23:57 +01001277 continue;
1278
1279 seq_printf(m, " PML4E #%llu\n", pml4e);
Chris Wilson84486612017-02-15 08:43:40 +00001280 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
Michel Thierryea91e402015-07-29 17:23:57 +01001281 }
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001282 } else {
1283 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
Michel Thierryea91e402015-07-29 17:23:57 +01001284 }
1285}
1286
Chris Wilsone2b763c2017-02-15 08:43:48 +00001287static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001288{
Chris Wilsone2b763c2017-02-15 08:43:48 +00001289 struct i915_address_space *vm = &ppgtt->base;
1290 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1291 struct i915_page_directory *pd;
1292 u64 start = 0, length = ppgtt->base.total;
1293 u64 from = start;
1294 unsigned int pdpe;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001295
Chris Wilsone2b763c2017-02-15 08:43:48 +00001296 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1297 pd = alloc_pd(vm);
1298 if (IS_ERR(pd))
1299 goto unwind;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001300
Chris Wilsone2b763c2017-02-15 08:43:48 +00001301 gen8_initialize_pd(vm, pd);
1302 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1303 pdp->used_pdpes++;
1304 }
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001305
Chris Wilsone2b763c2017-02-15 08:43:48 +00001306 pdp->used_pdpes++; /* never remove */
1307 return 0;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001308
Chris Wilsone2b763c2017-02-15 08:43:48 +00001309unwind:
1310 start -= from;
1311 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1312 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1313 free_pd(vm, pd);
1314 }
1315 pdp->used_pdpes = 0;
1316 return -ENOMEM;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001317}
1318
Daniel Vettereb0b44a2015-03-18 14:47:59 +01001319/*
Ben Widawskyf3a964b2014-02-19 22:05:42 -08001320 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1321 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1322 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1323 * space.
Ben Widawsky37aca442013-11-04 20:47:32 -08001324 *
Ben Widawskyf3a964b2014-02-19 22:05:42 -08001325 */
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001326static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky37aca442013-11-04 20:47:32 -08001327{
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001328 struct i915_address_space *vm = &ppgtt->base;
1329 struct drm_i915_private *dev_priv = vm->i915;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001330 int ret;
Michel Thierry69876be2015-04-08 12:13:27 +01001331
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001332 ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1333 1ULL << 48 :
1334 1ULL << 32;
1335
Mika Kuoppala8776f022015-06-30 18:16:40 +03001336 ret = gen8_init_scratch(&ppgtt->base);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001337 if (ret) {
1338 ppgtt->base.total = 0;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001339 return ret;
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001340 }
Michel Thierry69876be2015-04-08 12:13:27 +01001341
Chris Wilson84486612017-02-15 08:43:40 +00001342 /* There are only few exceptions for gen >=6. chv and bxt.
1343 * And we are not sure about the latter so play safe for now.
1344 */
1345 if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1346 ppgtt->base.pt_kmap_wc = true;
1347
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001348 if (use_4lvl(vm)) {
Chris Wilson84486612017-02-15 08:43:40 +00001349 ret = setup_px(&ppgtt->base, &ppgtt->pml4);
Michel Thierry762d9932015-07-30 11:05:29 +01001350 if (ret)
1351 goto free_scratch;
Michel Thierry6ac18502015-07-29 17:23:46 +01001352
Michel Thierry69ab76f2015-07-29 17:23:55 +01001353 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1354
Mika Kuoppalae7167762017-02-28 17:28:10 +02001355 ppgtt->switch_mm = gen8_mm_switch_4lvl;
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001356 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
Chris Wilson894cceb2017-02-15 08:43:37 +00001357 ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001358 ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
Michel Thierry762d9932015-07-30 11:05:29 +01001359 } else {
Chris Wilsonfe52e372017-02-15 08:43:47 +00001360 ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
Michel Thierry81ba8aef2015-08-03 09:52:01 +01001361 if (ret)
1362 goto free_scratch;
1363
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001364 if (intel_vgpu_active(dev_priv)) {
Chris Wilsone2b763c2017-02-15 08:43:48 +00001365 ret = gen8_preallocate_top_level_pdp(ppgtt);
1366 if (ret) {
1367 __pdp_fini(&ppgtt->pdp);
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001368 goto free_scratch;
Chris Wilsone2b763c2017-02-15 08:43:48 +00001369 }
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001370 }
Chris Wilson894cceb2017-02-15 08:43:37 +00001371
Mika Kuoppalae7167762017-02-28 17:28:10 +02001372 ppgtt->switch_mm = gen8_mm_switch_3lvl;
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001373 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
Chris Wilson894cceb2017-02-15 08:43:37 +00001374 ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001375 ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
Michel Thierry81ba8aef2015-08-03 09:52:01 +01001376 }
Michel Thierry6ac18502015-07-29 17:23:46 +01001377
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001378 if (intel_vgpu_active(dev_priv))
Zhiyuan Lv650da342015-08-28 15:41:18 +08001379 gen8_ppgtt_notify_vgt(ppgtt, true);
1380
Mika Kuoppala054b9ac2017-02-28 17:28:11 +02001381 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1382 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1383 ppgtt->base.bind_vma = ppgtt_bind_vma;
1384 ppgtt->debug_dump = gen8_dump_ppgtt;
1385
Michel Thierryd7b26332015-04-08 12:13:34 +01001386 return 0;
Michel Thierry6ac18502015-07-29 17:23:46 +01001387
1388free_scratch:
1389 gen8_free_scratch(&ppgtt->base);
1390 return ret;
Michel Thierryd7b26332015-04-08 12:13:34 +01001391}
1392
Ben Widawsky87d60b62013-12-06 14:11:29 -08001393static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1394{
Ben Widawsky87d60b62013-12-06 14:11:29 -08001395 struct i915_address_space *vm = &ppgtt->base;
Michel Thierry09942c62015-04-08 12:13:30 +01001396 struct i915_page_table *unused;
Michel Thierry07749ef2015-03-16 16:00:54 +00001397 gen6_pte_t scratch_pte;
Chris Wilson381b9432017-02-15 08:43:54 +00001398 u32 pd_entry, pte, pde;
1399 u32 start = 0, length = ppgtt->base.total;
Ben Widawsky87d60b62013-12-06 14:11:29 -08001400
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001401 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02001402 I915_CACHE_LLC, 0);
Ben Widawsky87d60b62013-12-06 14:11:29 -08001403
Dave Gordon731f74c2016-06-24 19:37:46 +01001404 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
Ben Widawsky87d60b62013-12-06 14:11:29 -08001405 u32 expected;
Michel Thierry07749ef2015-03-16 16:00:54 +00001406 gen6_pte_t *pt_vaddr;
Mika Kuoppala567047b2015-06-25 18:35:12 +03001407 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
Michel Thierry09942c62015-04-08 12:13:30 +01001408 pd_entry = readl(ppgtt->pd_addr + pde);
Ben Widawsky87d60b62013-12-06 14:11:29 -08001409 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1410
1411 if (pd_entry != expected)
1412 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1413 pde,
1414 pd_entry,
1415 expected);
1416 seq_printf(m, "\tPDE: %x\n", pd_entry);
1417
Chris Wilson9231da72017-02-15 08:43:41 +00001418 pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +03001419
Michel Thierry07749ef2015-03-16 16:00:54 +00001420 for (pte = 0; pte < GEN6_PTES; pte+=4) {
Ben Widawsky87d60b62013-12-06 14:11:29 -08001421 unsigned long va =
Michel Thierry07749ef2015-03-16 16:00:54 +00001422 (pde * PAGE_SIZE * GEN6_PTES) +
Ben Widawsky87d60b62013-12-06 14:11:29 -08001423 (pte * PAGE_SIZE);
1424 int i;
1425 bool found = false;
1426 for (i = 0; i < 4; i++)
1427 if (pt_vaddr[pte + i] != scratch_pte)
1428 found = true;
1429 if (!found)
1430 continue;
1431
1432 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1433 for (i = 0; i < 4; i++) {
1434 if (pt_vaddr[pte + i] != scratch_pte)
1435 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1436 else
1437 seq_puts(m, " SCRATCH ");
1438 }
1439 seq_puts(m, "\n");
1440 }
Chris Wilson9231da72017-02-15 08:43:41 +00001441 kunmap_atomic(pt_vaddr);
Ben Widawsky87d60b62013-12-06 14:11:29 -08001442 }
1443}
1444
Ben Widawsky678d96f2015-03-16 16:00:56 +00001445/* Write pde (index) from the page directory @pd to the page table @pt */
Chris Wilson16a011c2017-02-15 08:43:45 +00001446static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1447 const unsigned int pde,
1448 const struct i915_page_table *pt)
Ben Widawsky61973492013-04-08 18:43:54 -07001449{
Ben Widawsky678d96f2015-03-16 16:00:56 +00001450 /* Caller needs to make sure the write completes if necessary */
Chris Wilson16a011c2017-02-15 08:43:45 +00001451 writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1452 ppgtt->pd_addr + pde);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001453}
Ben Widawsky61973492013-04-08 18:43:54 -07001454
Ben Widawsky678d96f2015-03-16 16:00:56 +00001455/* Write all the page tables found in the ppgtt structure to incrementing page
1456 * directories. */
Chris Wilson16a011c2017-02-15 08:43:45 +00001457static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001458 u32 start, u32 length)
Ben Widawsky678d96f2015-03-16 16:00:56 +00001459{
Michel Thierryec565b32015-04-08 12:13:23 +01001460 struct i915_page_table *pt;
Chris Wilson16a011c2017-02-15 08:43:45 +00001461 unsigned int pde;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001462
Chris Wilson16a011c2017-02-15 08:43:45 +00001463 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1464 gen6_write_pde(ppgtt, pde, pt);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001465
Chris Wilson16a011c2017-02-15 08:43:45 +00001466 mark_tlbs_dirty(ppgtt);
Chris Wilsondd196742017-02-15 08:43:46 +00001467 wmb();
Ben Widawsky3e302542013-04-23 23:15:32 -07001468}
1469
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001470static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky3e302542013-04-23 23:15:32 -07001471{
Chris Wilsondd196742017-02-15 08:43:46 +00001472 GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1473 return ppgtt->pd.base.ggtt_offset << 10;
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001474}
Ben Widawsky61973492013-04-08 18:43:54 -07001475
Ben Widawsky90252e52013-12-06 14:11:12 -08001476static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +01001477 struct drm_i915_gem_request *req)
Ben Widawsky90252e52013-12-06 14:11:12 -08001478{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001479 struct intel_engine_cs *engine = req->engine;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001480 u32 *cs;
Ben Widawsky61973492013-04-08 18:43:54 -07001481
Ben Widawsky90252e52013-12-06 14:11:12 -08001482 /* NB: TLBs must be flushed and invalidated before a switch */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001483 cs = intel_ring_begin(req, 6);
1484 if (IS_ERR(cs))
1485 return PTR_ERR(cs);
Ben Widawsky90252e52013-12-06 14:11:12 -08001486
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001487 *cs++ = MI_LOAD_REGISTER_IMM(2);
1488 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1489 *cs++ = PP_DIR_DCLV_2G;
1490 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1491 *cs++ = get_pd_offset(ppgtt);
1492 *cs++ = MI_NOOP;
1493 intel_ring_advance(req, cs);
Ben Widawsky90252e52013-12-06 14:11:12 -08001494
1495 return 0;
1496}
1497
Ben Widawsky48a10382013-12-06 14:11:11 -08001498static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +01001499 struct drm_i915_gem_request *req)
Ben Widawsky48a10382013-12-06 14:11:11 -08001500{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001501 struct intel_engine_cs *engine = req->engine;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001502 u32 *cs;
Ben Widawsky48a10382013-12-06 14:11:11 -08001503
Ben Widawsky48a10382013-12-06 14:11:11 -08001504 /* NB: TLBs must be flushed and invalidated before a switch */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001505 cs = intel_ring_begin(req, 6);
1506 if (IS_ERR(cs))
1507 return PTR_ERR(cs);
Ben Widawsky48a10382013-12-06 14:11:11 -08001508
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001509 *cs++ = MI_LOAD_REGISTER_IMM(2);
1510 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1511 *cs++ = PP_DIR_DCLV_2G;
1512 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1513 *cs++ = get_pd_offset(ppgtt);
1514 *cs++ = MI_NOOP;
1515 intel_ring_advance(req, cs);
Ben Widawsky48a10382013-12-06 14:11:11 -08001516
1517 return 0;
1518}
1519
Ben Widawskyeeb94882013-12-06 14:11:10 -08001520static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +01001521 struct drm_i915_gem_request *req)
Ben Widawskyeeb94882013-12-06 14:11:10 -08001522{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001523 struct intel_engine_cs *engine = req->engine;
Chris Wilson8eb95202016-07-04 08:48:31 +01001524 struct drm_i915_private *dev_priv = req->i915;
Ben Widawsky48a10382013-12-06 14:11:11 -08001525
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001526 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1527 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
Ben Widawskyeeb94882013-12-06 14:11:10 -08001528 return 0;
1529}
1530
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001531static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
Ben Widawskyeeb94882013-12-06 14:11:10 -08001532{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001533 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05301534 enum intel_engine_id id;
Ben Widawskyeeb94882013-12-06 14:11:10 -08001535
Akash Goel3b3f1652016-10-13 22:44:48 +05301536 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001537 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1538 GEN8_GFX_PPGTT_48B : 0;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001539 I915_WRITE(RING_MODE_GEN7(engine),
Michel Thierry2dba3232015-07-30 11:06:23 +01001540 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
Ben Widawskyeeb94882013-12-06 14:11:10 -08001541 }
Ben Widawskyeeb94882013-12-06 14:11:10 -08001542}
1543
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001544static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001545{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001546 struct intel_engine_cs *engine;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001547 u32 ecochk, ecobits;
Akash Goel3b3f1652016-10-13 22:44:48 +05301548 enum intel_engine_id id;
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001549
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001550 ecobits = I915_READ(GAC_ECO_BITS);
1551 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1552
1553 ecochk = I915_READ(GAM_ECOCHK);
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01001554 if (IS_HASWELL(dev_priv)) {
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001555 ecochk |= ECOCHK_PPGTT_WB_HSW;
1556 } else {
1557 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1558 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1559 }
1560 I915_WRITE(GAM_ECOCHK, ecochk);
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001561
Akash Goel3b3f1652016-10-13 22:44:48 +05301562 for_each_engine(engine, dev_priv, id) {
Ben Widawskyeeb94882013-12-06 14:11:10 -08001563 /* GFX_MODE is per-ring on gen7+ */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001564 I915_WRITE(RING_MODE_GEN7(engine),
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001565 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Ben Widawsky61973492013-04-08 18:43:54 -07001566 }
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001567}
1568
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001569static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
Ben Widawsky61973492013-04-08 18:43:54 -07001570{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001571 u32 ecochk, gab_ctl, ecobits;
Ben Widawsky61973492013-04-08 18:43:54 -07001572
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001573 ecobits = I915_READ(GAC_ECO_BITS);
1574 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1575 ECOBITS_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -07001576
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001577 gab_ctl = I915_READ(GAB_CTL);
1578 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
Ben Widawsky61973492013-04-08 18:43:54 -07001579
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001580 ecochk = I915_READ(GAM_ECOCHK);
1581 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -07001582
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001583 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Ben Widawsky61973492013-04-08 18:43:54 -07001584}
1585
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001586/* PPGTT support for Sandybdrige/Gen6 and later */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001587static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
Chris Wilsondd196742017-02-15 08:43:46 +00001588 u64 start, u64 length)
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001589{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001590 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Chris Wilsondd196742017-02-15 08:43:46 +00001591 unsigned int first_entry = start >> PAGE_SHIFT;
1592 unsigned int pde = first_entry / GEN6_PTES;
1593 unsigned int pte = first_entry % GEN6_PTES;
1594 unsigned int num_entries = length >> PAGE_SHIFT;
1595 gen6_pte_t scratch_pte =
1596 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001597
Daniel Vetter7bddb012012-02-09 17:15:47 +01001598 while (num_entries) {
Chris Wilsondd196742017-02-15 08:43:46 +00001599 struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1600 unsigned int end = min(pte + num_entries, GEN6_PTES);
1601 gen6_pte_t *vaddr;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001602
Chris Wilsondd196742017-02-15 08:43:46 +00001603 num_entries -= end - pte;
Daniel Vetter7bddb012012-02-09 17:15:47 +01001604
Chris Wilsondd196742017-02-15 08:43:46 +00001605 /* Note that the hw doesn't support removing PDE on the fly
1606 * (they are cached inside the context with no means to
1607 * invalidate the cache), so we can only reset the PTE
1608 * entries back to scratch.
1609 */
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001610
Chris Wilsondd196742017-02-15 08:43:46 +00001611 vaddr = kmap_atomic_px(pt);
1612 do {
1613 vaddr[pte++] = scratch_pte;
1614 } while (pte < end);
1615 kunmap_atomic(vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001616
Chris Wilsondd196742017-02-15 08:43:46 +00001617 pte = 0;
Daniel Vetter7bddb012012-02-09 17:15:47 +01001618 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001619}
1620
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001621static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
Daniel Vetterdef886c2013-01-24 14:44:56 -08001622 struct sg_table *pages,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001623 u64 start,
1624 enum i915_cache_level cache_level,
1625 u32 flags)
Daniel Vetterdef886c2013-01-24 14:44:56 -08001626{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001627 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Ben Widawsky782f1492014-02-20 11:50:33 -08001628 unsigned first_entry = start >> PAGE_SHIFT;
Michel Thierry07749ef2015-03-16 16:00:54 +00001629 unsigned act_pt = first_entry / GEN6_PTES;
1630 unsigned act_pte = first_entry % GEN6_PTES;
Chris Wilsonb31144c2017-02-15 08:43:36 +00001631 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1632 struct sgt_dma iter;
1633 gen6_pte_t *vaddr;
Daniel Vetterdef886c2013-01-24 14:44:56 -08001634
Chris Wilson9231da72017-02-15 08:43:41 +00001635 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
Chris Wilsonb31144c2017-02-15 08:43:36 +00001636 iter.sg = pages->sgl;
1637 iter.dma = sg_dma_address(iter.sg);
1638 iter.max = iter.dma + iter.sg->length;
1639 do {
1640 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
Daniel Vetterdef886c2013-01-24 14:44:56 -08001641
Chris Wilsonb31144c2017-02-15 08:43:36 +00001642 iter.dma += PAGE_SIZE;
1643 if (iter.dma == iter.max) {
1644 iter.sg = __sg_next(iter.sg);
1645 if (!iter.sg)
1646 break;
1647
1648 iter.dma = sg_dma_address(iter.sg);
1649 iter.max = iter.dma + iter.sg->length;
1650 }
Akash Goel24f3a8c2014-06-17 10:59:42 +05301651
Michel Thierry07749ef2015-03-16 16:00:54 +00001652 if (++act_pte == GEN6_PTES) {
Chris Wilson9231da72017-02-15 08:43:41 +00001653 kunmap_atomic(vaddr);
1654 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +02001655 act_pte = 0;
Daniel Vetterdef886c2013-01-24 14:44:56 -08001656 }
Chris Wilsonb31144c2017-02-15 08:43:36 +00001657 } while (1);
Chris Wilson9231da72017-02-15 08:43:41 +00001658 kunmap_atomic(vaddr);
Daniel Vetterdef886c2013-01-24 14:44:56 -08001659}
1660
Ben Widawsky678d96f2015-03-16 16:00:56 +00001661static int gen6_alloc_va_range(struct i915_address_space *vm,
Chris Wilsondd196742017-02-15 08:43:46 +00001662 u64 start, u64 length)
Ben Widawsky678d96f2015-03-16 16:00:56 +00001663{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001664 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Michel Thierryec565b32015-04-08 12:13:23 +01001665 struct i915_page_table *pt;
Chris Wilsondd196742017-02-15 08:43:46 +00001666 u64 from = start;
1667 unsigned int pde;
1668 bool flush = false;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001669
Dave Gordon731f74c2016-06-24 19:37:46 +01001670 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
Chris Wilsondd196742017-02-15 08:43:46 +00001671 if (pt == vm->scratch_pt) {
1672 pt = alloc_pt(vm);
1673 if (IS_ERR(pt))
1674 goto unwind_out;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001675
Chris Wilsondd196742017-02-15 08:43:46 +00001676 gen6_initialize_pt(vm, pt);
1677 ppgtt->pd.page_table[pde] = pt;
Chris Wilson16a011c2017-02-15 08:43:45 +00001678 gen6_write_pde(ppgtt, pde, pt);
Chris Wilsondd196742017-02-15 08:43:46 +00001679 flush = true;
1680 }
Ben Widawsky678d96f2015-03-16 16:00:56 +00001681 }
1682
Chris Wilsondd196742017-02-15 08:43:46 +00001683 if (flush) {
1684 mark_tlbs_dirty(ppgtt);
1685 wmb();
1686 }
Michel Thierry4933d512015-03-24 15:46:22 +00001687
Ben Widawsky678d96f2015-03-16 16:00:56 +00001688 return 0;
Michel Thierry4933d512015-03-24 15:46:22 +00001689
1690unwind_out:
Chris Wilsondd196742017-02-15 08:43:46 +00001691 gen6_ppgtt_clear_range(vm, from, start);
1692 return -ENOMEM;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001693}
1694
Mika Kuoppala8776f022015-06-30 18:16:40 +03001695static int gen6_init_scratch(struct i915_address_space *vm)
1696{
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001697 int ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001698
Chris Wilson84486612017-02-15 08:43:40 +00001699 ret = setup_scratch_page(vm, I915_GFP_DMA);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001700 if (ret)
1701 return ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001702
Chris Wilson84486612017-02-15 08:43:40 +00001703 vm->scratch_pt = alloc_pt(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001704 if (IS_ERR(vm->scratch_pt)) {
Chris Wilson84486612017-02-15 08:43:40 +00001705 cleanup_scratch_page(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001706 return PTR_ERR(vm->scratch_pt);
1707 }
1708
1709 gen6_initialize_pt(vm, vm->scratch_pt);
1710
1711 return 0;
1712}
1713
1714static void gen6_free_scratch(struct i915_address_space *vm)
1715{
Chris Wilson84486612017-02-15 08:43:40 +00001716 free_pt(vm, vm->scratch_pt);
1717 cleanup_scratch_page(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001718}
1719
Daniel Vetter061dd492015-04-14 17:35:13 +02001720static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
Ben Widawskya00d8252014-02-19 22:05:48 -08001721{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001722 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Dave Gordon731f74c2016-06-24 19:37:46 +01001723 struct i915_page_directory *pd = &ppgtt->pd;
Michel Thierry09942c62015-04-08 12:13:30 +01001724 struct i915_page_table *pt;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001725 u32 pde;
Daniel Vetter3440d262013-01-24 13:49:56 -08001726
Daniel Vetter061dd492015-04-14 17:35:13 +02001727 drm_mm_remove_node(&ppgtt->node);
1728
Dave Gordon731f74c2016-06-24 19:37:46 +01001729 gen6_for_all_pdes(pt, pd, pde)
Mika Kuoppala79ab9372015-06-25 18:35:17 +03001730 if (pt != vm->scratch_pt)
Chris Wilson84486612017-02-15 08:43:40 +00001731 free_pt(vm, pt);
Michel Thierry4933d512015-03-24 15:46:22 +00001732
Mika Kuoppala8776f022015-06-30 18:16:40 +03001733 gen6_free_scratch(vm);
Daniel Vetter3440d262013-01-24 13:49:56 -08001734}
1735
Ben Widawskyb1465202014-02-19 22:05:49 -08001736static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
Daniel Vetter3440d262013-01-24 13:49:56 -08001737{
Mika Kuoppala8776f022015-06-30 18:16:40 +03001738 struct i915_address_space *vm = &ppgtt->base;
Chris Wilson49d73912016-11-29 09:50:08 +00001739 struct drm_i915_private *dev_priv = ppgtt->base.i915;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001740 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawskyb1465202014-02-19 22:05:49 -08001741 int ret;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001742
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001743 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1744 * allocator works in address space sizes, so it's multiplied by page
1745 * size. We allocate at the top of the GTT to avoid fragmentation.
1746 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001747 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
Michel Thierry4933d512015-03-24 15:46:22 +00001748
Mika Kuoppala8776f022015-06-30 18:16:40 +03001749 ret = gen6_init_scratch(vm);
1750 if (ret)
1751 return ret;
Michel Thierry4933d512015-03-24 15:46:22 +00001752
Chris Wilsone007b192017-01-11 11:23:10 +00001753 ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
1754 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1755 I915_COLOR_UNEVICTABLE,
1756 0, ggtt->base.total,
1757 PIN_HIGH);
Ben Widawskyc8c26622015-01-22 17:01:25 +00001758 if (ret)
Ben Widawsky678d96f2015-03-16 16:00:56 +00001759 goto err_out;
1760
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001761 if (ppgtt->node.start < ggtt->mappable_end)
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001762 DRM_DEBUG("Forced to use aperture for PDEs\n");
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001763
Chris Wilson52c126e2017-02-15 08:43:43 +00001764 ppgtt->pd.base.ggtt_offset =
1765 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1766
1767 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
1768 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
1769
Ben Widawskyc8c26622015-01-22 17:01:25 +00001770 return 0;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001771
1772err_out:
Mika Kuoppala8776f022015-06-30 18:16:40 +03001773 gen6_free_scratch(vm);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001774 return ret;
Ben Widawskyb1465202014-02-19 22:05:49 -08001775}
1776
Ben Widawskyb1465202014-02-19 22:05:49 -08001777static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1778{
kbuild test robot2f2cf682015-03-27 19:26:35 +08001779 return gen6_ppgtt_allocate_page_directories(ppgtt);
Ben Widawskyb1465202014-02-19 22:05:49 -08001780}
1781
Michel Thierry4933d512015-03-24 15:46:22 +00001782static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001783 u64 start, u64 length)
Michel Thierry4933d512015-03-24 15:46:22 +00001784{
Michel Thierryec565b32015-04-08 12:13:23 +01001785 struct i915_page_table *unused;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001786 u32 pde;
Michel Thierry4933d512015-03-24 15:46:22 +00001787
Dave Gordon731f74c2016-06-24 19:37:46 +01001788 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
Mika Kuoppala79ab9372015-06-25 18:35:17 +03001789 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
Michel Thierry4933d512015-03-24 15:46:22 +00001790}
1791
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001792static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
Ben Widawskyb1465202014-02-19 22:05:49 -08001793{
Chris Wilson49d73912016-11-29 09:50:08 +00001794 struct drm_i915_private *dev_priv = ppgtt->base.i915;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001795 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawskyb1465202014-02-19 22:05:49 -08001796 int ret;
1797
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001798 ppgtt->base.pte_encode = ggtt->base.pte_encode;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001799 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
Ben Widawsky48a10382013-12-06 14:11:11 -08001800 ppgtt->switch_mm = gen6_mm_switch;
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01001801 else if (IS_HASWELL(dev_priv))
Ben Widawsky90252e52013-12-06 14:11:12 -08001802 ppgtt->switch_mm = hsw_mm_switch;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001803 else if (IS_GEN7(dev_priv))
Ben Widawsky48a10382013-12-06 14:11:11 -08001804 ppgtt->switch_mm = gen7_mm_switch;
Chris Wilson8eb95202016-07-04 08:48:31 +01001805 else
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001806 BUG();
Ben Widawskyb1465202014-02-19 22:05:49 -08001807
1808 ret = gen6_ppgtt_alloc(ppgtt);
1809 if (ret)
1810 return ret;
1811
Michel Thierry09942c62015-04-08 12:13:30 +01001812 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001813
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001814 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
Chris Wilson16a011c2017-02-15 08:43:45 +00001815 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001816
Chris Wilson52c126e2017-02-15 08:43:43 +00001817 ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
1818 if (ret) {
1819 gen6_ppgtt_cleanup(&ppgtt->base);
1820 return ret;
1821 }
1822
Mika Kuoppala054b9ac2017-02-28 17:28:11 +02001823 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1824 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1825 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1826 ppgtt->base.bind_vma = ppgtt_bind_vma;
1827 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1828 ppgtt->debug_dump = gen6_dump_ppgtt;
1829
Thierry Reding440fd522015-01-23 09:05:06 +01001830 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001831 ppgtt->node.size >> 20,
1832 ppgtt->node.start / PAGE_SIZE);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001833
Chris Wilson52c126e2017-02-15 08:43:43 +00001834 DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
1835 ppgtt->pd.base.ggtt_offset << 10);
Daniel Vetterfa76da32014-08-06 20:19:54 +02001836
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001837 return 0;
Daniel Vetter3440d262013-01-24 13:49:56 -08001838}
1839
Chris Wilson2bfa9962016-08-04 07:52:25 +01001840static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
1841 struct drm_i915_private *dev_priv)
Daniel Vetter3440d262013-01-24 13:49:56 -08001842{
Chris Wilson49d73912016-11-29 09:50:08 +00001843 ppgtt->base.i915 = dev_priv;
Chris Wilson84486612017-02-15 08:43:40 +00001844 ppgtt->base.dma = &dev_priv->drm.pdev->dev;
Daniel Vetter3440d262013-01-24 13:49:56 -08001845
Chris Wilson2bfa9962016-08-04 07:52:25 +01001846 if (INTEL_INFO(dev_priv)->gen < 8)
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001847 return gen6_ppgtt_init(ppgtt);
Ben Widawsky3ed124b2013-04-08 18:43:53 -07001848 else
Michel Thierryd7b26332015-04-08 12:13:34 +01001849 return gen8_ppgtt_init(ppgtt);
Daniel Vetterfa76da32014-08-06 20:19:54 +02001850}
Mika Kuoppalac114f762015-06-25 18:35:13 +03001851
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001852static void i915_address_space_init(struct i915_address_space *vm,
Chris Wilson80b204b2016-10-28 13:58:58 +01001853 struct drm_i915_private *dev_priv,
1854 const char *name)
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001855{
Chris Wilson80b204b2016-10-28 13:58:58 +01001856 i915_gem_timeline_init(dev_priv, &vm->timeline, name);
Chris Wilson47db9222017-02-06 08:45:46 +00001857
Chris Wilson381b9432017-02-15 08:43:54 +00001858 drm_mm_init(&vm->mm, 0, vm->total);
Chris Wilson47db9222017-02-06 08:45:46 +00001859 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
1860
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001861 INIT_LIST_HEAD(&vm->active_list);
1862 INIT_LIST_HEAD(&vm->inactive_list);
Chris Wilson50e046b2016-08-04 07:52:46 +01001863 INIT_LIST_HEAD(&vm->unbound_list);
Chris Wilson47db9222017-02-06 08:45:46 +00001864
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001865 list_add_tail(&vm->global_link, &dev_priv->vm_list);
Chris Wilson84486612017-02-15 08:43:40 +00001866 pagevec_init(&vm->free_pages, false);
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001867}
1868
Matthew Aulded9724d2016-11-17 21:04:10 +00001869static void i915_address_space_fini(struct i915_address_space *vm)
1870{
Chris Wilson84486612017-02-15 08:43:40 +00001871 if (pagevec_count(&vm->free_pages))
1872 vm_free_pages_release(vm);
1873
Matthew Aulded9724d2016-11-17 21:04:10 +00001874 i915_gem_timeline_fini(&vm->timeline);
1875 drm_mm_takedown(&vm->mm);
1876 list_del(&vm->global_link);
1877}
1878
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001879static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
Tim Gored5165eb2016-02-04 11:49:34 +00001880{
Tim Gored5165eb2016-02-04 11:49:34 +00001881 /* This function is for gtt related workarounds. This function is
1882 * called on driver load and after a GPU reset, so you can place
1883 * workarounds here even if they get overwritten by GPU reset.
1884 */
Ander Conselvan de Oliveira9fb50262017-01-26 11:16:58 +02001885 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */
Tvrtko Ursulin86527442016-10-13 11:03:00 +01001886 if (IS_BROADWELL(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001887 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01001888 else if (IS_CHERRYVIEW(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001889 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001890 else if (IS_GEN9_BC(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001891 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
Ander Conselvan de Oliveira9fb50262017-01-26 11:16:58 +02001892 else if (IS_GEN9_LP(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001893 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
1894}
1895
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001896int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
Daniel Vetter82460d92014-08-06 20:19:53 +02001897{
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001898 gtt_write_workarounds(dev_priv);
Tim Gored5165eb2016-02-04 11:49:34 +00001899
Thomas Daniel671b50132014-08-20 16:24:50 +01001900 /* In the case of execlists, PPGTT is enabled by the context descriptor
1901 * and the PDPs are contained within the context itself. We don't
1902 * need to do anything here. */
1903 if (i915.enable_execlists)
1904 return 0;
1905
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001906 if (!USES_PPGTT(dev_priv))
Daniel Vetter82460d92014-08-06 20:19:53 +02001907 return 0;
1908
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001909 if (IS_GEN6(dev_priv))
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001910 gen6_ppgtt_enable(dev_priv);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001911 else if (IS_GEN7(dev_priv))
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001912 gen7_ppgtt_enable(dev_priv);
1913 else if (INTEL_GEN(dev_priv) >= 8)
1914 gen8_ppgtt_enable(dev_priv);
Daniel Vetter82460d92014-08-06 20:19:53 +02001915 else
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001916 MISSING_CASE(INTEL_GEN(dev_priv));
Daniel Vetter82460d92014-08-06 20:19:53 +02001917
John Harrison4ad2fd82015-06-18 13:11:20 +01001918 return 0;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001919}
John Harrison4ad2fd82015-06-18 13:11:20 +01001920
Daniel Vetter4d884702014-08-06 15:04:47 +02001921struct i915_hw_ppgtt *
Chris Wilson2bfa9962016-08-04 07:52:25 +01001922i915_ppgtt_create(struct drm_i915_private *dev_priv,
Chris Wilson80b204b2016-10-28 13:58:58 +01001923 struct drm_i915_file_private *fpriv,
1924 const char *name)
Daniel Vetter4d884702014-08-06 15:04:47 +02001925{
1926 struct i915_hw_ppgtt *ppgtt;
1927 int ret;
1928
1929 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1930 if (!ppgtt)
1931 return ERR_PTR(-ENOMEM);
1932
Chris Wilson1188bc62017-02-15 08:43:38 +00001933 ret = __hw_ppgtt_init(ppgtt, dev_priv);
Daniel Vetter4d884702014-08-06 15:04:47 +02001934 if (ret) {
1935 kfree(ppgtt);
1936 return ERR_PTR(ret);
1937 }
1938
Chris Wilson1188bc62017-02-15 08:43:38 +00001939 kref_init(&ppgtt->ref);
1940 i915_address_space_init(&ppgtt->base, dev_priv, name);
1941 ppgtt->base.file = fpriv;
1942
Daniele Ceraolo Spurio198c9742014-11-10 13:44:31 +00001943 trace_i915_ppgtt_create(&ppgtt->base);
1944
Daniel Vetter4d884702014-08-06 15:04:47 +02001945 return ppgtt;
1946}
1947
Chris Wilson0c7eeda2017-01-11 21:09:25 +00001948void i915_ppgtt_close(struct i915_address_space *vm)
1949{
1950 struct list_head *phases[] = {
1951 &vm->active_list,
1952 &vm->inactive_list,
1953 &vm->unbound_list,
1954 NULL,
1955 }, **phase;
1956
1957 GEM_BUG_ON(vm->closed);
1958 vm->closed = true;
1959
1960 for (phase = phases; *phase; phase++) {
1961 struct i915_vma *vma, *vn;
1962
1963 list_for_each_entry_safe(vma, vn, *phase, vm_link)
1964 if (!i915_vma_is_closed(vma))
1965 i915_vma_close(vma);
1966 }
1967}
1968
Matthew Aulded9724d2016-11-17 21:04:10 +00001969void i915_ppgtt_release(struct kref *kref)
Daniel Vetteree960be2014-08-06 15:04:45 +02001970{
1971 struct i915_hw_ppgtt *ppgtt =
1972 container_of(kref, struct i915_hw_ppgtt, ref);
1973
Daniele Ceraolo Spurio198c9742014-11-10 13:44:31 +00001974 trace_i915_ppgtt_release(&ppgtt->base);
1975
Chris Wilson50e046b2016-08-04 07:52:46 +01001976 /* vmas should already be unbound and destroyed */
Daniel Vetteree960be2014-08-06 15:04:45 +02001977 WARN_ON(!list_empty(&ppgtt->base.active_list));
1978 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
Chris Wilson50e046b2016-08-04 07:52:46 +01001979 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
Daniel Vetteree960be2014-08-06 15:04:45 +02001980
1981 ppgtt->base.cleanup(&ppgtt->base);
Chris Wilson84486612017-02-15 08:43:40 +00001982 i915_address_space_fini(&ppgtt->base);
Daniel Vetteree960be2014-08-06 15:04:45 +02001983 kfree(ppgtt);
1984}
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001985
Ben Widawskya81cc002013-01-18 12:30:31 -08001986/* Certain Gen5 chipsets require require idling the GPU before
1987 * unmapping anything from the GTT when VT-d is enabled.
1988 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001989static bool needs_idle_maps(struct drm_i915_private *dev_priv)
Ben Widawskya81cc002013-01-18 12:30:31 -08001990{
Ben Widawskya81cc002013-01-18 12:30:31 -08001991 /* Query intel_iommu to see if we need the workaround. Presumably that
1992 * was loaded first.
1993 */
Chris Wilson80debff2017-05-25 13:16:12 +01001994 return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
Ben Widawskya81cc002013-01-18 12:30:31 -08001995}
1996
Chris Wilsondc979972016-05-10 14:10:04 +01001997void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
Ben Widawsky828c7902013-10-16 09:21:30 -07001998{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001999 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302000 enum intel_engine_id id;
Ben Widawsky828c7902013-10-16 09:21:30 -07002001
Chris Wilsondc979972016-05-10 14:10:04 +01002002 if (INTEL_INFO(dev_priv)->gen < 6)
Ben Widawsky828c7902013-10-16 09:21:30 -07002003 return;
2004
Akash Goel3b3f1652016-10-13 22:44:48 +05302005 for_each_engine(engine, dev_priv, id) {
Ben Widawsky828c7902013-10-16 09:21:30 -07002006 u32 fault_reg;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002007 fault_reg = I915_READ(RING_FAULT_REG(engine));
Ben Widawsky828c7902013-10-16 09:21:30 -07002008 if (fault_reg & RING_FAULT_VALID) {
2009 DRM_DEBUG_DRIVER("Unexpected fault\n"
Paulo Zanoni59a5d292014-10-30 15:52:45 -02002010 "\tAddr: 0x%08lx\n"
Ben Widawsky828c7902013-10-16 09:21:30 -07002011 "\tAddress space: %s\n"
2012 "\tSource ID: %d\n"
2013 "\tType: %d\n",
2014 fault_reg & PAGE_MASK,
2015 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2016 RING_FAULT_SRCID(fault_reg),
2017 RING_FAULT_FAULT_TYPE(fault_reg));
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002018 I915_WRITE(RING_FAULT_REG(engine),
Ben Widawsky828c7902013-10-16 09:21:30 -07002019 fault_reg & ~RING_FAULT_VALID);
2020 }
2021 }
Akash Goel3b3f1652016-10-13 22:44:48 +05302022
2023 /* Engine specific init may not have been done till this point. */
2024 if (dev_priv->engine[RCS])
2025 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
Ben Widawsky828c7902013-10-16 09:21:30 -07002026}
2027
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00002028void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
Ben Widawsky828c7902013-10-16 09:21:30 -07002029{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002030 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawsky828c7902013-10-16 09:21:30 -07002031
2032 /* Don't bother messing with faults pre GEN6 as we have little
2033 * documentation supporting that it's a good idea.
2034 */
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00002035 if (INTEL_GEN(dev_priv) < 6)
Ben Widawsky828c7902013-10-16 09:21:30 -07002036 return;
2037
Chris Wilsondc979972016-05-10 14:10:04 +01002038 i915_check_and_clear_faults(dev_priv);
Ben Widawsky828c7902013-10-16 09:21:30 -07002039
Chris Wilson381b9432017-02-15 08:43:54 +00002040 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
Chris Wilson91e56492014-09-25 10:13:12 +01002041
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002042 i915_ggtt_invalidate(dev_priv);
Ben Widawsky828c7902013-10-16 09:21:30 -07002043}
2044
Chris Wilson03ac84f2016-10-28 13:58:36 +01002045int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2046 struct sg_table *pages)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002047{
Chris Wilson1a292fa2017-01-06 15:22:39 +00002048 do {
2049 if (dma_map_sg(&obj->base.dev->pdev->dev,
2050 pages->sgl, pages->nents,
2051 PCI_DMA_BIDIRECTIONAL))
2052 return 0;
2053
2054 /* If the DMA remap fails, one cause can be that we have
2055 * too many objects pinned in a small remapping table,
2056 * such as swiotlb. Incrementally purge all other objects and
2057 * try again - if there are no more pages to remove from
2058 * the DMA remapper, i915_gem_shrink will return 0.
2059 */
2060 GEM_BUG_ON(obj->mm.pages == pages);
2061 } while (i915_gem_shrink(to_i915(obj->base.dev),
2062 obj->base.size >> PAGE_SHIFT,
2063 I915_SHRINK_BOUND |
2064 I915_SHRINK_UNBOUND |
2065 I915_SHRINK_ACTIVE));
Chris Wilson9da3da62012-06-01 15:20:22 +01002066
Chris Wilson03ac84f2016-10-28 13:58:36 +01002067 return -ENOSPC;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002068}
2069
Daniel Vetter2c642b02015-04-14 17:35:26 +02002070static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002071{
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002072 writeq(pte, addr);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002073}
2074
Chris Wilsond6473f52016-06-10 14:22:59 +05302075static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2076 dma_addr_t addr,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002077 u64 offset,
Chris Wilsond6473f52016-06-10 14:22:59 +05302078 enum i915_cache_level level,
2079 u32 unused)
2080{
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002081 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Chris Wilsond6473f52016-06-10 14:22:59 +05302082 gen8_pte_t __iomem *pte =
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002083 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
Chris Wilsond6473f52016-06-10 14:22:59 +05302084
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002085 gen8_set_pte(pte, gen8_pte_encode(addr, level));
Chris Wilsond6473f52016-06-10 14:22:59 +05302086
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002087 ggtt->invalidate(vm->i915);
Chris Wilsond6473f52016-06-10 14:22:59 +05302088}
2089
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002090static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2091 struct sg_table *st,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002092 u64 start,
2093 enum i915_cache_level level,
2094 u32 unused)
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002095{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002096 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Dave Gordon85d12252016-05-20 11:54:06 +01002097 struct sgt_iter sgt_iter;
2098 gen8_pte_t __iomem *gtt_entries;
Chris Wilson894cceb2017-02-15 08:43:37 +00002099 const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
Dave Gordon85d12252016-05-20 11:54:06 +01002100 dma_addr_t addr;
Imre Deakbe694592015-12-15 20:10:38 +02002101
Chris Wilson894cceb2017-02-15 08:43:37 +00002102 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2103 gtt_entries += start >> PAGE_SHIFT;
2104 for_each_sgt_dma(addr, sgt_iter, st)
2105 gen8_set_pte(gtt_entries++, pte_encode | addr);
Dave Gordon85d12252016-05-20 11:54:06 +01002106
Chris Wilson894cceb2017-02-15 08:43:37 +00002107 wmb();
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002108
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002109 /* This next bit makes the above posting read even more important. We
2110 * want to flush the TLBs only after we're certain all the PTE updates
2111 * have finished.
2112 */
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002113 ggtt->invalidate(vm->i915);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002114}
2115
Chris Wilsond6473f52016-06-10 14:22:59 +05302116static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2117 dma_addr_t addr,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002118 u64 offset,
Chris Wilsond6473f52016-06-10 14:22:59 +05302119 enum i915_cache_level level,
2120 u32 flags)
2121{
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002122 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Chris Wilsond6473f52016-06-10 14:22:59 +05302123 gen6_pte_t __iomem *pte =
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002124 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
Chris Wilsond6473f52016-06-10 14:22:59 +05302125
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002126 iowrite32(vm->pte_encode(addr, level, flags), pte);
Chris Wilsond6473f52016-06-10 14:22:59 +05302127
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002128 ggtt->invalidate(vm->i915);
Chris Wilsond6473f52016-06-10 14:22:59 +05302129}
2130
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002131/*
2132 * Binds an object into the global gtt with the specified cache level. The object
2133 * will be accessible to the GPU via commands whose operands reference offsets
2134 * within the global GTT as well as accessible by the GPU through the GMADR
2135 * mapped BAR (dev_priv->mm.gtt->gtt).
2136 */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002137static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002138 struct sg_table *st,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002139 u64 start,
2140 enum i915_cache_level level,
2141 u32 flags)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002142{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002143 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Chris Wilsonb31144c2017-02-15 08:43:36 +00002144 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2145 unsigned int i = start >> PAGE_SHIFT;
2146 struct sgt_iter iter;
Dave Gordon85d12252016-05-20 11:54:06 +01002147 dma_addr_t addr;
Chris Wilsonb31144c2017-02-15 08:43:36 +00002148 for_each_sgt_dma(addr, iter, st)
2149 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2150 wmb();
Ben Widawsky0f9b91c2012-11-04 09:21:30 -08002151
2152 /* This next bit makes the above posting read even more important. We
2153 * want to flush the TLBs only after we're certain all the PTE updates
2154 * have finished.
2155 */
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002156 ggtt->invalidate(vm->i915);
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002157}
2158
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002159static void nop_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002160 u64 start, u64 length)
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002161{
2162}
2163
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002164static void gen8_ggtt_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002165 u64 start, u64 length)
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002166{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002167 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Ben Widawsky782f1492014-02-20 11:50:33 -08002168 unsigned first_entry = start >> PAGE_SHIFT;
2169 unsigned num_entries = length >> PAGE_SHIFT;
Chris Wilson894cceb2017-02-15 08:43:37 +00002170 const gen8_pte_t scratch_pte =
2171 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2172 gen8_pte_t __iomem *gtt_base =
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002173 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2174 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002175 int i;
2176
2177 if (WARN(num_entries > max_entries,
2178 "First entry = %d; Num entries = %d (max=%d)\n",
2179 first_entry, num_entries, max_entries))
2180 num_entries = max_entries;
2181
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002182 for (i = 0; i < num_entries; i++)
2183 gen8_set_pte(&gtt_base[i], scratch_pte);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002184}
2185
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002186static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2187{
2188 struct drm_i915_private *dev_priv = vm->i915;
2189
2190 /*
2191 * Make sure the internal GAM fifo has been cleared of all GTT
2192 * writes before exiting stop_machine(). This guarantees that
2193 * any aperture accesses waiting to start in another process
2194 * cannot back up behind the GTT writes causing a hang.
2195 * The register can be any arbitrary GAM register.
2196 */
2197 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2198}
2199
2200struct insert_page {
2201 struct i915_address_space *vm;
2202 dma_addr_t addr;
2203 u64 offset;
2204 enum i915_cache_level level;
2205};
2206
2207static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2208{
2209 struct insert_page *arg = _arg;
2210
2211 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2212 bxt_vtd_ggtt_wa(arg->vm);
2213
2214 return 0;
2215}
2216
2217static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2218 dma_addr_t addr,
2219 u64 offset,
2220 enum i915_cache_level level,
2221 u32 unused)
2222{
2223 struct insert_page arg = { vm, addr, offset, level };
2224
2225 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2226}
2227
2228struct insert_entries {
2229 struct i915_address_space *vm;
2230 struct sg_table *st;
2231 u64 start;
2232 enum i915_cache_level level;
2233};
2234
2235static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2236{
2237 struct insert_entries *arg = _arg;
2238
2239 gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
2240 bxt_vtd_ggtt_wa(arg->vm);
2241
2242 return 0;
2243}
2244
2245static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2246 struct sg_table *st,
2247 u64 start,
2248 enum i915_cache_level level,
2249 u32 unused)
2250{
2251 struct insert_entries arg = { vm, st, start, level };
2252
2253 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2254}
2255
2256struct clear_range {
2257 struct i915_address_space *vm;
2258 u64 start;
2259 u64 length;
2260};
2261
2262static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2263{
2264 struct clear_range *arg = _arg;
2265
2266 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2267 bxt_vtd_ggtt_wa(arg->vm);
2268
2269 return 0;
2270}
2271
2272static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2273 u64 start,
2274 u64 length)
2275{
2276 struct clear_range arg = { vm, start, length };
2277
2278 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2279}
2280
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002281static void gen6_ggtt_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002282 u64 start, u64 length)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002283{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002284 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Ben Widawsky782f1492014-02-20 11:50:33 -08002285 unsigned first_entry = start >> PAGE_SHIFT;
2286 unsigned num_entries = length >> PAGE_SHIFT;
Michel Thierry07749ef2015-03-16 16:00:54 +00002287 gen6_pte_t scratch_pte, __iomem *gtt_base =
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002288 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2289 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002290 int i;
2291
2292 if (WARN(num_entries > max_entries,
2293 "First entry = %d; Num entries = %d (max=%d)\n",
2294 first_entry, num_entries, max_entries))
2295 num_entries = max_entries;
2296
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002297 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002298 I915_CACHE_LLC, 0);
Ben Widawsky828c7902013-10-16 09:21:30 -07002299
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002300 for (i = 0; i < num_entries; i++)
2301 iowrite32(scratch_pte, &gtt_base[i]);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002302}
2303
Chris Wilsond6473f52016-06-10 14:22:59 +05302304static void i915_ggtt_insert_page(struct i915_address_space *vm,
2305 dma_addr_t addr,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002306 u64 offset,
Chris Wilsond6473f52016-06-10 14:22:59 +05302307 enum i915_cache_level cache_level,
2308 u32 unused)
2309{
Chris Wilsond6473f52016-06-10 14:22:59 +05302310 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2311 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
Chris Wilsond6473f52016-06-10 14:22:59 +05302312
2313 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
Chris Wilsond6473f52016-06-10 14:22:59 +05302314}
2315
Daniel Vetterd369d2d2015-04-14 17:35:25 +02002316static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2317 struct sg_table *pages,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002318 u64 start,
2319 enum i915_cache_level cache_level,
2320 u32 unused)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002321{
2322 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2323 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2324
Daniel Vetterd369d2d2015-04-14 17:35:25 +02002325 intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002326}
2327
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002328static void i915_ggtt_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002329 u64 start, u64 length)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002330{
Chris Wilson2eedfc72016-10-24 13:42:17 +01002331 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002332}
2333
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02002334static int ggtt_bind_vma(struct i915_vma *vma,
2335 enum i915_cache_level cache_level,
2336 u32 flags)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002337{
Chris Wilson49d73912016-11-29 09:50:08 +00002338 struct drm_i915_private *i915 = vma->vm->i915;
Daniel Vetter0a878712015-10-15 14:23:01 +02002339 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsonba7a5742017-02-15 08:43:35 +00002340 u32 pte_flags;
Daniel Vetter0a878712015-10-15 14:23:01 +02002341
Chris Wilsonba7a5742017-02-15 08:43:35 +00002342 if (unlikely(!vma->pages)) {
2343 int ret = i915_get_ggtt_vma_pages(vma);
2344 if (ret)
2345 return ret;
2346 }
Daniel Vetter0a878712015-10-15 14:23:01 +02002347
2348 /* Currently applicable only to VLV */
Chris Wilsonba7a5742017-02-15 08:43:35 +00002349 pte_flags = 0;
Daniel Vetter0a878712015-10-15 14:23:01 +02002350 if (obj->gt_ro)
2351 pte_flags |= PTE_READ_ONLY;
2352
Chris Wilson9c870d02016-10-24 13:42:15 +01002353 intel_runtime_pm_get(i915);
Chris Wilson247177d2016-08-15 10:48:47 +01002354 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
Daniel Vetter0a878712015-10-15 14:23:01 +02002355 cache_level, pte_flags);
Chris Wilson9c870d02016-10-24 13:42:15 +01002356 intel_runtime_pm_put(i915);
Daniel Vetter0a878712015-10-15 14:23:01 +02002357
2358 /*
2359 * Without aliasing PPGTT there's no difference between
2360 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2361 * upgrade to both bound if we bind either to avoid double-binding.
2362 */
Chris Wilson3272db52016-08-04 16:32:32 +01002363 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Daniel Vetter0a878712015-10-15 14:23:01 +02002364
2365 return 0;
2366}
2367
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002368static void ggtt_unbind_vma(struct i915_vma *vma)
2369{
2370 struct drm_i915_private *i915 = vma->vm->i915;
2371
2372 intel_runtime_pm_get(i915);
2373 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2374 intel_runtime_pm_put(i915);
2375}
2376
Daniel Vetter0a878712015-10-15 14:23:01 +02002377static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2378 enum i915_cache_level cache_level,
2379 u32 flags)
2380{
Chris Wilson49d73912016-11-29 09:50:08 +00002381 struct drm_i915_private *i915 = vma->vm->i915;
Chris Wilson321d1782015-11-20 10:27:18 +00002382 u32 pte_flags;
Chris Wilsonff685972017-02-15 08:43:42 +00002383 int ret;
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02002384
Chris Wilsonba7a5742017-02-15 08:43:35 +00002385 if (unlikely(!vma->pages)) {
Chris Wilsonff685972017-02-15 08:43:42 +00002386 ret = i915_get_ggtt_vma_pages(vma);
Chris Wilsonba7a5742017-02-15 08:43:35 +00002387 if (ret)
2388 return ret;
2389 }
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002390
Akash Goel24f3a8c2014-06-17 10:59:42 +05302391 /* Currently applicable only to VLV */
Chris Wilson321d1782015-11-20 10:27:18 +00002392 pte_flags = 0;
2393 if (vma->obj->gt_ro)
Daniel Vetterf329f5f2015-04-14 17:35:15 +02002394 pte_flags |= PTE_READ_ONLY;
Akash Goel24f3a8c2014-06-17 10:59:42 +05302395
Chris Wilsonff685972017-02-15 08:43:42 +00002396 if (flags & I915_VMA_LOCAL_BIND) {
2397 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2398
Matthew Auld1f234752017-05-12 10:14:23 +01002399 if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2400 appgtt->base.allocate_va_range) {
Chris Wilsonff685972017-02-15 08:43:42 +00002401 ret = appgtt->base.allocate_va_range(&appgtt->base,
2402 vma->node.start,
Matthew Auldd5672322017-05-16 09:55:14 +01002403 vma->size);
Chris Wilsonff685972017-02-15 08:43:42 +00002404 if (ret)
Chris Wilson2f7399a2017-02-27 12:26:53 +00002405 goto err_pages;
Chris Wilsonff685972017-02-15 08:43:42 +00002406 }
2407
2408 appgtt->base.insert_entries(&appgtt->base,
2409 vma->pages, vma->node.start,
2410 cache_level, pte_flags);
2411 }
2412
Chris Wilson3272db52016-08-04 16:32:32 +01002413 if (flags & I915_VMA_GLOBAL_BIND) {
Chris Wilson9c870d02016-10-24 13:42:15 +01002414 intel_runtime_pm_get(i915);
Chris Wilson321d1782015-11-20 10:27:18 +00002415 vma->vm->insert_entries(vma->vm,
Chris Wilson247177d2016-08-15 10:48:47 +01002416 vma->pages, vma->node.start,
Daniel Vetter08755462015-04-20 09:04:05 -07002417 cache_level, pte_flags);
Chris Wilson9c870d02016-10-24 13:42:15 +01002418 intel_runtime_pm_put(i915);
Ben Widawsky6f65e292013-12-06 14:10:56 -08002419 }
Daniel Vetter74898d72012-02-15 23:50:22 +01002420
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02002421 return 0;
Chris Wilson2f7399a2017-02-27 12:26:53 +00002422
2423err_pages:
2424 if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
2425 if (vma->pages != vma->obj->mm.pages) {
2426 GEM_BUG_ON(!vma->pages);
2427 sg_free_table(vma->pages);
2428 kfree(vma->pages);
2429 }
2430 vma->pages = NULL;
2431 }
2432 return ret;
Ben Widawsky6f65e292013-12-06 14:10:56 -08002433}
2434
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002435static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
Ben Widawsky6f65e292013-12-06 14:10:56 -08002436{
Chris Wilson49d73912016-11-29 09:50:08 +00002437 struct drm_i915_private *i915 = vma->vm->i915;
Ben Widawsky6f65e292013-12-06 14:10:56 -08002438
Chris Wilson9c870d02016-10-24 13:42:15 +01002439 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2440 intel_runtime_pm_get(i915);
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002441 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
Chris Wilson9c870d02016-10-24 13:42:15 +01002442 intel_runtime_pm_put(i915);
2443 }
Ben Widawsky6f65e292013-12-06 14:10:56 -08002444
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002445 if (vma->flags & I915_VMA_LOCAL_BIND) {
2446 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2447
2448 vm->clear_range(vm, vma->node.start, vma->size);
2449 }
Daniel Vetter74163902012-02-15 23:50:21 +01002450}
2451
Chris Wilson03ac84f2016-10-28 13:58:36 +01002452void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2453 struct sg_table *pages)
Daniel Vetter74163902012-02-15 23:50:21 +01002454{
David Weinehall52a05c32016-08-22 13:32:44 +03002455 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2456 struct device *kdev = &dev_priv->drm.pdev->dev;
Chris Wilson307dc252016-08-05 10:14:12 +01002457 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawsky5c042282011-10-17 15:51:55 -07002458
Chris Wilson307dc252016-08-05 10:14:12 +01002459 if (unlikely(ggtt->do_idle_maps)) {
Chris Wilson228ec872017-03-30 09:53:41 +01002460 if (i915_gem_wait_for_idle(dev_priv, 0)) {
Chris Wilson307dc252016-08-05 10:14:12 +01002461 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2462 /* Wait a bit, in hopes it avoids the hang */
2463 udelay(10);
2464 }
2465 }
Ben Widawsky5c042282011-10-17 15:51:55 -07002466
Chris Wilson03ac84f2016-10-28 13:58:36 +01002467 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002468}
Daniel Vetter644ec022012-03-26 09:45:40 +02002469
Chris Wilson45b186f2016-12-16 07:46:42 +00002470static void i915_gtt_color_adjust(const struct drm_mm_node *node,
Chris Wilson42d6ab42012-07-26 11:49:32 +01002471 unsigned long color,
Thierry Reding440fd522015-01-23 09:05:06 +01002472 u64 *start,
2473 u64 *end)
Chris Wilson42d6ab42012-07-26 11:49:32 +01002474{
Chris Wilsona6508de2017-02-06 08:45:47 +00002475 if (node->allocated && node->color != color)
Chris Wilsonf51455d2017-01-10 14:47:34 +00002476 *start += I915_GTT_PAGE_SIZE;
Chris Wilson42d6ab42012-07-26 11:49:32 +01002477
Chris Wilsona6508de2017-02-06 08:45:47 +00002478 /* Also leave a space between the unallocated reserved node after the
2479 * GTT and any objects within the GTT, i.e. we use the color adjustment
2480 * to insert a guard page to prevent prefetches crossing over the
2481 * GTT boundary.
2482 */
Chris Wilsonb44f97f2016-12-16 07:46:40 +00002483 node = list_next_entry(node, node_list);
Chris Wilsona6508de2017-02-06 08:45:47 +00002484 if (node->color != color)
Chris Wilsonf51455d2017-01-10 14:47:34 +00002485 *end -= I915_GTT_PAGE_SIZE;
Chris Wilson42d6ab42012-07-26 11:49:32 +01002486}
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002487
Chris Wilson6cde9a02017-02-13 17:15:50 +00002488int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2489{
2490 struct i915_ggtt *ggtt = &i915->ggtt;
2491 struct i915_hw_ppgtt *ppgtt;
2492 int err;
2493
Chris Wilson57202f42017-02-15 08:43:56 +00002494 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
Chris Wilson1188bc62017-02-15 08:43:38 +00002495 if (IS_ERR(ppgtt))
2496 return PTR_ERR(ppgtt);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002497
Chris Wilsone565ceb2017-02-15 08:43:55 +00002498 if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2499 err = -ENODEV;
2500 goto err_ppgtt;
2501 }
2502
Chris Wilson6cde9a02017-02-13 17:15:50 +00002503 if (ppgtt->base.allocate_va_range) {
Chris Wilsone565ceb2017-02-15 08:43:55 +00002504 /* Note we only pre-allocate as far as the end of the global
2505 * GTT. On 48b / 4-level page-tables, the difference is very,
2506 * very significant! We have to preallocate as GVT/vgpu does
2507 * not like the page directory disappearing.
2508 */
Chris Wilson6cde9a02017-02-13 17:15:50 +00002509 err = ppgtt->base.allocate_va_range(&ppgtt->base,
Chris Wilsone565ceb2017-02-15 08:43:55 +00002510 0, ggtt->base.total);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002511 if (err)
Chris Wilson1188bc62017-02-15 08:43:38 +00002512 goto err_ppgtt;
Chris Wilson6cde9a02017-02-13 17:15:50 +00002513 }
2514
Chris Wilson6cde9a02017-02-13 17:15:50 +00002515 i915->mm.aliasing_ppgtt = ppgtt;
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002516
Chris Wilson6cde9a02017-02-13 17:15:50 +00002517 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2518 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2519
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002520 WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2521 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2522
Chris Wilson6cde9a02017-02-13 17:15:50 +00002523 return 0;
2524
Chris Wilson6cde9a02017-02-13 17:15:50 +00002525err_ppgtt:
Chris Wilson1188bc62017-02-15 08:43:38 +00002526 i915_ppgtt_put(ppgtt);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002527 return err;
2528}
2529
2530void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2531{
2532 struct i915_ggtt *ggtt = &i915->ggtt;
2533 struct i915_hw_ppgtt *ppgtt;
2534
2535 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2536 if (!ppgtt)
2537 return;
2538
Chris Wilson1188bc62017-02-15 08:43:38 +00002539 i915_ppgtt_put(ppgtt);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002540
2541 ggtt->base.bind_vma = ggtt_bind_vma;
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002542 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilson6cde9a02017-02-13 17:15:50 +00002543}
2544
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002545int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
Daniel Vetter644ec022012-03-26 09:45:40 +02002546{
Ben Widawskye78891c2013-01-25 16:41:04 -08002547 /* Let GEM Manage all of the aperture.
2548 *
2549 * However, leave one page at the end still bound to the scratch page.
2550 * There are a number of places where the hardware apparently prefetches
2551 * past the end of the object, and we've seen multiple hangs with the
2552 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2553 * aperture. One page should be enough to keep any prefetching inside
2554 * of the aperture.
2555 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002556 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsoned2f3452012-11-15 11:32:19 +00002557 unsigned long hole_start, hole_end;
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002558 struct drm_mm_node *entry;
Daniel Vetterfa76da32014-08-06 20:19:54 +02002559 int ret;
Daniel Vetter644ec022012-03-26 09:45:40 +02002560
Zhi Wangb02d22a2016-06-16 08:06:59 -04002561 ret = intel_vgt_balloon(dev_priv);
2562 if (ret)
2563 return ret;
Yu Zhang5dda8fa2015-02-10 19:05:48 +08002564
Chris Wilson95374d72016-10-12 10:05:20 +01002565 /* Reserve a mappable slot for our lockless error capture */
Chris Wilson4e64e552017-02-02 21:04:38 +00002566 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2567 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2568 0, ggtt->mappable_end,
2569 DRM_MM_INSERT_LOW);
Chris Wilson95374d72016-10-12 10:05:20 +01002570 if (ret)
2571 return ret;
2572
Chris Wilsoned2f3452012-11-15 11:32:19 +00002573 /* Clear any non-preallocated blocks */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002574 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
Chris Wilsoned2f3452012-11-15 11:32:19 +00002575 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2576 hole_start, hole_end);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002577 ggtt->base.clear_range(&ggtt->base, hole_start,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002578 hole_end - hole_start);
Chris Wilsoned2f3452012-11-15 11:32:19 +00002579 }
2580
2581 /* And finally clear the reserved guard page */
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002582 ggtt->base.clear_range(&ggtt->base,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002583 ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
Daniel Vetter6c5566a2014-08-06 15:04:50 +02002584
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002585 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
Chris Wilson6cde9a02017-02-13 17:15:50 +00002586 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
Chris Wilson95374d72016-10-12 10:05:20 +01002587 if (ret)
Chris Wilson6cde9a02017-02-13 17:15:50 +00002588 goto err;
Daniel Vetterfa76da32014-08-06 20:19:54 +02002589 }
2590
Daniel Vetter6c5566a2014-08-06 15:04:50 +02002591 return 0;
Chris Wilson95374d72016-10-12 10:05:20 +01002592
Chris Wilson95374d72016-10-12 10:05:20 +01002593err:
2594 drm_mm_remove_node(&ggtt->error_capture);
2595 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002596}
2597
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002598/**
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002599 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002600 * @dev_priv: i915 device
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002601 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002602void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002603{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002604 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson94d4a2a2017-02-10 16:35:22 +00002605 struct i915_vma *vma, *vn;
2606
2607 ggtt->base.closed = true;
2608
2609 mutex_lock(&dev_priv->drm.struct_mutex);
2610 WARN_ON(!list_empty(&ggtt->base.active_list));
2611 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2612 WARN_ON(i915_vma_unbind(vma));
2613 mutex_unlock(&dev_priv->drm.struct_mutex);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002614
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002615 i915_gem_cleanup_stolen(&dev_priv->drm);
Imre Deaka4eba472016-01-19 15:26:32 +02002616
Chris Wilson1188bc62017-02-15 08:43:38 +00002617 mutex_lock(&dev_priv->drm.struct_mutex);
2618 i915_gem_fini_aliasing_ppgtt(dev_priv);
2619
Chris Wilson95374d72016-10-12 10:05:20 +01002620 if (drm_mm_node_allocated(&ggtt->error_capture))
2621 drm_mm_remove_node(&ggtt->error_capture);
2622
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002623 if (drm_mm_initialized(&ggtt->base.mm)) {
Zhi Wangb02d22a2016-06-16 08:06:59 -04002624 intel_vgt_deballoon(dev_priv);
Matthew Aulded9724d2016-11-17 21:04:10 +00002625 i915_address_space_fini(&ggtt->base);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002626 }
2627
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002628 ggtt->base.cleanup(&ggtt->base);
Chris Wilson1188bc62017-02-15 08:43:38 +00002629 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002630
2631 arch_phys_wc_del(ggtt->mtrr);
Chris Wilsonf7bbe782016-08-19 16:54:27 +01002632 io_mapping_fini(&ggtt->mappable);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002633}
Daniel Vetter70e32542014-08-06 15:04:57 +02002634
Daniel Vetter2c642b02015-04-14 17:35:26 +02002635static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002636{
2637 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2638 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2639 return snb_gmch_ctl << 20;
2640}
2641
Daniel Vetter2c642b02015-04-14 17:35:26 +02002642static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
Ben Widawsky9459d252013-11-03 16:53:55 -08002643{
2644 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2645 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2646 if (bdw_gmch_ctl)
2647 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
Ben Widawsky562d55d2014-05-27 16:53:08 -07002648
2649#ifdef CONFIG_X86_32
2650 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2651 if (bdw_gmch_ctl > 4)
2652 bdw_gmch_ctl = 4;
2653#endif
2654
Ben Widawsky9459d252013-11-03 16:53:55 -08002655 return bdw_gmch_ctl << 20;
2656}
2657
Daniel Vetter2c642b02015-04-14 17:35:26 +02002658static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002659{
2660 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2661 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2662
2663 if (gmch_ctrl)
2664 return 1 << (20 + gmch_ctrl);
2665
2666 return 0;
2667}
2668
Daniel Vetter2c642b02015-04-14 17:35:26 +02002669static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002670{
2671 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2672 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
Imre Deaka92d1a92017-05-10 12:21:52 +03002673 return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002674}
2675
Daniel Vetter2c642b02015-04-14 17:35:26 +02002676static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
Ben Widawsky9459d252013-11-03 16:53:55 -08002677{
2678 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2679 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
Imre Deaka92d1a92017-05-10 12:21:52 +03002680 return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
Ben Widawsky9459d252013-11-03 16:53:55 -08002681}
2682
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002683static size_t chv_get_stolen_size(u16 gmch_ctrl)
2684{
2685 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2686 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2687
2688 /*
2689 * 0x0 to 0x10: 32MB increments starting at 0MB
2690 * 0x11 to 0x16: 4MB increments starting at 8MB
2691 * 0x17 to 0x1d: 4MB increments start at 36MB
2692 */
2693 if (gmch_ctrl < 0x11)
Imre Deaka92d1a92017-05-10 12:21:52 +03002694 return (size_t)gmch_ctrl << 25;
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002695 else if (gmch_ctrl < 0x17)
Imre Deaka92d1a92017-05-10 12:21:52 +03002696 return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002697 else
Imre Deaka92d1a92017-05-10 12:21:52 +03002698 return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002699}
2700
Damien Lespiau66375012014-01-09 18:02:46 +00002701static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2702{
2703 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2704 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2705
2706 if (gen9_gmch_ctl < 0xf0)
Imre Deaka92d1a92017-05-10 12:21:52 +03002707 return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
Damien Lespiau66375012014-01-09 18:02:46 +00002708 else
2709 /* 4MB increments starting at 0xf0 for 4MB */
Imre Deaka92d1a92017-05-10 12:21:52 +03002710 return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
Damien Lespiau66375012014-01-09 18:02:46 +00002711}
2712
Chris Wilson34c998b2016-08-04 07:52:24 +01002713static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
Ben Widawsky63340132013-11-04 19:32:22 -08002714{
Chris Wilson49d73912016-11-29 09:50:08 +00002715 struct drm_i915_private *dev_priv = ggtt->base.i915;
2716 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson34c998b2016-08-04 07:52:24 +01002717 phys_addr_t phys_addr;
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002718 int ret;
Ben Widawsky63340132013-11-04 19:32:22 -08002719
2720 /* For Modern GENs the PTEs and register space are split in the BAR */
Chris Wilson34c998b2016-08-04 07:52:24 +01002721 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
Ben Widawsky63340132013-11-04 19:32:22 -08002722
Imre Deak2a073f892015-03-27 13:07:33 +02002723 /*
2724 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2725 * dropped. For WC mappings in general we have 64 byte burst writes
2726 * when the WC buffer is flushed, so we can't use it, but have to
2727 * resort to an uncached mapping. The WC issue is easily caught by the
2728 * readback check when writing GTT PTE entries.
2729 */
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002730 if (IS_GEN9_LP(dev_priv))
Chris Wilson34c998b2016-08-04 07:52:24 +01002731 ggtt->gsm = ioremap_nocache(phys_addr, size);
Imre Deak2a073f892015-03-27 13:07:33 +02002732 else
Chris Wilson34c998b2016-08-04 07:52:24 +01002733 ggtt->gsm = ioremap_wc(phys_addr, size);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002734 if (!ggtt->gsm) {
Chris Wilson34c998b2016-08-04 07:52:24 +01002735 DRM_ERROR("Failed to map the ggtt page table\n");
Ben Widawsky63340132013-11-04 19:32:22 -08002736 return -ENOMEM;
2737 }
2738
Chris Wilson84486612017-02-15 08:43:40 +00002739 ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002740 if (ret) {
Ben Widawsky63340132013-11-04 19:32:22 -08002741 DRM_ERROR("Scratch setup failed\n");
2742 /* iounmap will also get called at remove, but meh */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002743 iounmap(ggtt->gsm);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002744 return ret;
Ben Widawsky63340132013-11-04 19:32:22 -08002745 }
2746
Mika Kuoppala4ad2af12015-06-30 18:16:39 +03002747 return 0;
Ben Widawsky63340132013-11-04 19:32:22 -08002748}
2749
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002750/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2751 * bits. When using advanced contexts each context stores its own PAT, but
2752 * writing this data shouldn't be harmful even in those cases. */
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002753static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002754{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002755 u64 pat;
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002756
2757 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
2758 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2759 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2760 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
2761 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2762 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2763 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2764 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2765
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002766 if (!USES_PPGTT(dev_priv))
Rodrigo Vivid6a8b722014-11-05 16:56:36 -08002767 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2768 * so RTL will always use the value corresponding to
2769 * pat_sel = 000".
2770 * So let's disable cache for GGTT to avoid screen corruptions.
2771 * MOCS still can be used though.
2772 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2773 * before this patch, i.e. the same uncached + snooping access
2774 * like on gen6/7 seems to be in effect.
2775 * - So this just fixes blitter/render access. Again it looks
2776 * like it's not just uncached access, but uncached + snooping.
2777 * So we can still hold onto all our assumptions wrt cpu
2778 * clflushing on LLC machines.
2779 */
2780 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2781
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002782 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2783 * write would work. */
Ville Syrjälä7e435ad2015-09-18 20:03:25 +03002784 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2785 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002786}
2787
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002788static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2789{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002790 u64 pat;
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002791
2792 /*
2793 * Map WB on BDW to snooped on CHV.
2794 *
2795 * Only the snoop bit has meaning for CHV, the rest is
2796 * ignored.
2797 *
Ville Syrjäläcf3d2622014-11-14 21:02:44 +02002798 * The hardware will never snoop for certain types of accesses:
2799 * - CPU GTT (GMADR->GGTT->no snoop->memory)
2800 * - PPGTT page tables
2801 * - some other special cycles
2802 *
2803 * As with BDW, we also need to consider the following for GT accesses:
2804 * "For GGTT, there is NO pat_sel[2:0] from the entry,
2805 * so RTL will always use the value corresponding to
2806 * pat_sel = 000".
2807 * Which means we must set the snoop bit in PAT entry 0
2808 * in order to keep the global status page working.
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002809 */
2810 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
2811 GEN8_PPAT(1, 0) |
2812 GEN8_PPAT(2, 0) |
2813 GEN8_PPAT(3, 0) |
2814 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
2815 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
2816 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2817 GEN8_PPAT(7, CHV_PPAT_SNOOP);
2818
Ville Syrjälä7e435ad2015-09-18 20:03:25 +03002819 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2820 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002821}
2822
Chris Wilson34c998b2016-08-04 07:52:24 +01002823static void gen6_gmch_remove(struct i915_address_space *vm)
2824{
2825 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2826
2827 iounmap(ggtt->gsm);
Chris Wilson84486612017-02-15 08:43:40 +00002828 cleanup_scratch_page(vm);
Chris Wilson34c998b2016-08-04 07:52:24 +01002829}
2830
Joonas Lahtinend507d732016-03-18 10:42:58 +02002831static int gen8_gmch_probe(struct i915_ggtt *ggtt)
Ben Widawsky63340132013-11-04 19:32:22 -08002832{
Chris Wilson49d73912016-11-29 09:50:08 +00002833 struct drm_i915_private *dev_priv = ggtt->base.i915;
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002834 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson34c998b2016-08-04 07:52:24 +01002835 unsigned int size;
Ben Widawsky63340132013-11-04 19:32:22 -08002836 u16 snb_gmch_ctl;
Imre Deak45192902017-05-10 12:21:50 +03002837 int err;
Ben Widawsky63340132013-11-04 19:32:22 -08002838
2839 /* TODO: We're not aware of mappable constraints on gen8 yet */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002840 ggtt->mappable_base = pci_resource_start(pdev, 2);
2841 ggtt->mappable_end = pci_resource_len(pdev, 2);
Ben Widawsky63340132013-11-04 19:32:22 -08002842
Imre Deak45192902017-05-10 12:21:50 +03002843 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
2844 if (!err)
2845 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
2846 if (err)
2847 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
Ben Widawsky63340132013-11-04 19:32:22 -08002848
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002849 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
Ben Widawsky63340132013-11-04 19:32:22 -08002850
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002851 if (INTEL_GEN(dev_priv) >= 9) {
Joonas Lahtinend507d732016-03-18 10:42:58 +02002852 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
Chris Wilson34c998b2016-08-04 07:52:24 +01002853 size = gen8_get_total_gtt_size(snb_gmch_ctl);
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002854 } else if (IS_CHERRYVIEW(dev_priv)) {
Joonas Lahtinend507d732016-03-18 10:42:58 +02002855 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
Chris Wilson34c998b2016-08-04 07:52:24 +01002856 size = chv_get_total_gtt_size(snb_gmch_ctl);
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002857 } else {
Joonas Lahtinend507d732016-03-18 10:42:58 +02002858 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
Chris Wilson34c998b2016-08-04 07:52:24 +01002859 size = gen8_get_total_gtt_size(snb_gmch_ctl);
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002860 }
Ben Widawsky63340132013-11-04 19:32:22 -08002861
Chris Wilson34c998b2016-08-04 07:52:24 +01002862 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
Ben Widawsky63340132013-11-04 19:32:22 -08002863
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002864 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002865 chv_setup_private_ppat(dev_priv);
2866 else
2867 bdw_setup_private_ppat(dev_priv);
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002868
Chris Wilson34c998b2016-08-04 07:52:24 +01002869 ggtt->base.cleanup = gen6_gmch_remove;
Joonas Lahtinend507d732016-03-18 10:42:58 +02002870 ggtt->base.bind_vma = ggtt_bind_vma;
2871 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilsond6473f52016-06-10 14:22:59 +05302872 ggtt->base.insert_page = gen8_ggtt_insert_page;
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002873 ggtt->base.clear_range = nop_clear_range;
Chris Wilson48f112f2016-06-24 14:07:14 +01002874 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002875 ggtt->base.clear_range = gen8_ggtt_clear_range;
2876
2877 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002878
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002879 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
2880 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
2881 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
2882 ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
2883 if (ggtt->base.clear_range != nop_clear_range)
2884 ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
2885 }
2886
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002887 ggtt->invalidate = gen6_ggtt_invalidate;
2888
Chris Wilson34c998b2016-08-04 07:52:24 +01002889 return ggtt_probe_common(ggtt, size);
Ben Widawsky63340132013-11-04 19:32:22 -08002890}
2891
Joonas Lahtinend507d732016-03-18 10:42:58 +02002892static int gen6_gmch_probe(struct i915_ggtt *ggtt)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002893{
Chris Wilson49d73912016-11-29 09:50:08 +00002894 struct drm_i915_private *dev_priv = ggtt->base.i915;
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002895 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson34c998b2016-08-04 07:52:24 +01002896 unsigned int size;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002897 u16 snb_gmch_ctl;
Imre Deak45192902017-05-10 12:21:50 +03002898 int err;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002899
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002900 ggtt->mappable_base = pci_resource_start(pdev, 2);
2901 ggtt->mappable_end = pci_resource_len(pdev, 2);
Ben Widawsky41907dd2013-02-08 11:32:47 -08002902
Ben Widawskybaa09f52013-01-24 13:49:57 -08002903 /* 64/512MB is the current min/max we actually know of, but this is just
2904 * a coarse sanity check.
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002905 */
Chris Wilson34c998b2016-08-04 07:52:24 +01002906 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
Joonas Lahtinend507d732016-03-18 10:42:58 +02002907 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002908 return -ENXIO;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002909 }
2910
Imre Deak45192902017-05-10 12:21:50 +03002911 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2912 if (!err)
2913 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2914 if (err)
2915 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002916 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002917
Joonas Lahtinend507d732016-03-18 10:42:58 +02002918 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002919
Chris Wilson34c998b2016-08-04 07:52:24 +01002920 size = gen6_get_total_gtt_size(snb_gmch_ctl);
2921 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002922
Joonas Lahtinend507d732016-03-18 10:42:58 +02002923 ggtt->base.clear_range = gen6_ggtt_clear_range;
Chris Wilsond6473f52016-06-10 14:22:59 +05302924 ggtt->base.insert_page = gen6_ggtt_insert_page;
Joonas Lahtinend507d732016-03-18 10:42:58 +02002925 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
2926 ggtt->base.bind_vma = ggtt_bind_vma;
2927 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilson34c998b2016-08-04 07:52:24 +01002928 ggtt->base.cleanup = gen6_gmch_remove;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002929
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002930 ggtt->invalidate = gen6_ggtt_invalidate;
2931
Chris Wilson34c998b2016-08-04 07:52:24 +01002932 if (HAS_EDRAM(dev_priv))
2933 ggtt->base.pte_encode = iris_pte_encode;
2934 else if (IS_HASWELL(dev_priv))
2935 ggtt->base.pte_encode = hsw_pte_encode;
2936 else if (IS_VALLEYVIEW(dev_priv))
2937 ggtt->base.pte_encode = byt_pte_encode;
2938 else if (INTEL_GEN(dev_priv) >= 7)
2939 ggtt->base.pte_encode = ivb_pte_encode;
2940 else
2941 ggtt->base.pte_encode = snb_pte_encode;
2942
2943 return ggtt_probe_common(ggtt, size);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002944}
2945
Chris Wilson34c998b2016-08-04 07:52:24 +01002946static void i915_gmch_remove(struct i915_address_space *vm)
Ben Widawskybaa09f52013-01-24 13:49:57 -08002947{
Chris Wilson34c998b2016-08-04 07:52:24 +01002948 intel_gmch_remove();
Ben Widawskybaa09f52013-01-24 13:49:57 -08002949}
2950
Joonas Lahtinend507d732016-03-18 10:42:58 +02002951static int i915_gmch_probe(struct i915_ggtt *ggtt)
Ben Widawskybaa09f52013-01-24 13:49:57 -08002952{
Chris Wilson49d73912016-11-29 09:50:08 +00002953 struct drm_i915_private *dev_priv = ggtt->base.i915;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002954 int ret;
2955
Chris Wilson91c8a322016-07-05 10:40:23 +01002956 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002957 if (!ret) {
2958 DRM_ERROR("failed to set up gmch\n");
2959 return -EIO;
2960 }
2961
Chris Wilsonedd1f2f2017-01-06 15:20:11 +00002962 intel_gtt_get(&ggtt->base.total,
2963 &ggtt->stolen_size,
2964 &ggtt->mappable_base,
2965 &ggtt->mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002966
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002967 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
Chris Wilsond6473f52016-06-10 14:22:59 +05302968 ggtt->base.insert_page = i915_ggtt_insert_page;
Joonas Lahtinend507d732016-03-18 10:42:58 +02002969 ggtt->base.insert_entries = i915_ggtt_insert_entries;
2970 ggtt->base.clear_range = i915_ggtt_clear_range;
2971 ggtt->base.bind_vma = ggtt_bind_vma;
2972 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilson34c998b2016-08-04 07:52:24 +01002973 ggtt->base.cleanup = i915_gmch_remove;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002974
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002975 ggtt->invalidate = gmch_ggtt_invalidate;
2976
Joonas Lahtinend507d732016-03-18 10:42:58 +02002977 if (unlikely(ggtt->do_idle_maps))
Chris Wilsonc0a7f812013-12-30 12:16:15 +00002978 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
2979
Ben Widawskybaa09f52013-01-24 13:49:57 -08002980 return 0;
2981}
2982
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002983/**
Chris Wilson0088e522016-08-04 07:52:21 +01002984 * i915_ggtt_probe_hw - Probe GGTT hardware location
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002985 * @dev_priv: i915 device
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002986 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002987int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
Ben Widawskybaa09f52013-01-24 13:49:57 -08002988{
Joonas Lahtinen62106b42016-03-18 10:42:57 +02002989 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002990 int ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002991
Chris Wilson49d73912016-11-29 09:50:08 +00002992 ggtt->base.i915 = dev_priv;
Chris Wilson84486612017-02-15 08:43:40 +00002993 ggtt->base.dma = &dev_priv->drm.pdev->dev;
Mika Kuoppalac114f762015-06-25 18:35:13 +03002994
Chris Wilson34c998b2016-08-04 07:52:24 +01002995 if (INTEL_GEN(dev_priv) <= 5)
2996 ret = i915_gmch_probe(ggtt);
2997 else if (INTEL_GEN(dev_priv) < 8)
2998 ret = gen6_gmch_probe(ggtt);
2999 else
3000 ret = gen8_gmch_probe(ggtt);
Ben Widawskya54c0c22013-01-24 14:45:00 -08003001 if (ret)
Ben Widawskybaa09f52013-01-24 13:49:57 -08003002 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003003
Chris Wilsondb9309a2017-01-05 15:30:23 +00003004 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3005 * This is easier than doing range restriction on the fly, as we
3006 * currently don't have any bits spare to pass in this upper
3007 * restriction!
3008 */
3009 if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
3010 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3011 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3012 }
3013
Chris Wilsonc890e2d2016-03-18 10:42:59 +02003014 if ((ggtt->base.total - 1) >> 32) {
3015 DRM_ERROR("We never expected a Global GTT with more than 32bits"
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003016 " of address space! Found %lldM!\n",
Chris Wilsonc890e2d2016-03-18 10:42:59 +02003017 ggtt->base.total >> 20);
3018 ggtt->base.total = 1ULL << 32;
3019 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3020 }
3021
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003022 if (ggtt->mappable_end > ggtt->base.total) {
3023 DRM_ERROR("mappable aperture extends past end of GGTT,"
3024 " aperture=%llx, total=%llx\n",
3025 ggtt->mappable_end, ggtt->base.total);
3026 ggtt->mappable_end = ggtt->base.total;
3027 }
3028
Ben Widawskybaa09f52013-01-24 13:49:57 -08003029 /* GMADR is the PCI mmio aperture into the global GTT. */
Mika Kuoppalac44ef602015-06-25 18:35:05 +03003030 DRM_INFO("Memory usable by graphics device = %lluM\n",
Joonas Lahtinen62106b42016-03-18 10:42:57 +02003031 ggtt->base.total >> 20);
3032 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
Chris Wilsonedd1f2f2017-01-06 15:20:11 +00003033 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
Chris Wilson80debff2017-05-25 13:16:12 +01003034 if (intel_vtd_active())
Daniel Vetter5db6c732014-03-31 16:23:04 +02003035 DRM_INFO("VT-d active for gfx access\n");
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08003036
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003037 return 0;
Chris Wilson0088e522016-08-04 07:52:21 +01003038}
3039
3040/**
3041 * i915_ggtt_init_hw - Initialize GGTT hardware
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003042 * @dev_priv: i915 device
Chris Wilson0088e522016-08-04 07:52:21 +01003043 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003044int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
Chris Wilson0088e522016-08-04 07:52:21 +01003045{
Chris Wilson0088e522016-08-04 07:52:21 +01003046 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3047 int ret;
3048
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003049 INIT_LIST_HEAD(&dev_priv->vm_list);
3050
Chris Wilsona6508de2017-02-06 08:45:47 +00003051 /* Note that we use page colouring to enforce a guard page at the
3052 * end of the address space. This is required as the CS may prefetch
3053 * beyond the end of the batch buffer, across the page boundary,
3054 * and beyond the end of the GTT if we do not provide a guard.
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003055 */
Chris Wilson80b204b2016-10-28 13:58:58 +01003056 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson80b204b2016-10-28 13:58:58 +01003057 i915_address_space_init(&ggtt->base, dev_priv, "[global]");
Chris Wilsona6508de2017-02-06 08:45:47 +00003058 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003059 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
Chris Wilson80b204b2016-10-28 13:58:58 +01003060 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003061
Chris Wilsonf7bbe782016-08-19 16:54:27 +01003062 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
3063 dev_priv->ggtt.mappable_base,
3064 dev_priv->ggtt.mappable_end)) {
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003065 ret = -EIO;
3066 goto out_gtt_cleanup;
3067 }
3068
3069 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
3070
Chris Wilson0088e522016-08-04 07:52:21 +01003071 /*
3072 * Initialise stolen early so that we may reserve preallocated
3073 * objects for the BIOS to KMS transition.
3074 */
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +00003075 ret = i915_gem_init_stolen(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +01003076 if (ret)
3077 goto out_gtt_cleanup;
3078
3079 return 0;
Imre Deaka4eba472016-01-19 15:26:32 +02003080
3081out_gtt_cleanup:
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003082 ggtt->base.cleanup(&ggtt->base);
Imre Deaka4eba472016-01-19 15:26:32 +02003083 return ret;
Daniel Vetter644ec022012-03-26 09:45:40 +02003084}
Ben Widawsky6f65e292013-12-06 14:10:56 -08003085
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003086int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
Ville Syrjäläac840ae2016-05-06 21:35:55 +03003087{
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003088 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
Ville Syrjäläac840ae2016-05-06 21:35:55 +03003089 return -EIO;
3090
3091 return 0;
3092}
3093
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003094void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3095{
3096 i915->ggtt.invalidate = guc_ggtt_invalidate;
3097}
3098
3099void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3100{
3101 i915->ggtt.invalidate = gen6_ggtt_invalidate;
3102}
3103
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00003104void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
Daniel Vetterfa423312015-04-14 17:35:23 +02003105{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003106 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003107 struct drm_i915_gem_object *obj, *on;
Daniel Vetterfa423312015-04-14 17:35:23 +02003108
Chris Wilsondc979972016-05-10 14:10:04 +01003109 i915_check_and_clear_faults(dev_priv);
Daniel Vetterfa423312015-04-14 17:35:23 +02003110
3111 /* First fill our portion of the GTT with scratch pages */
Chris Wilson381b9432017-02-15 08:43:54 +00003112 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
Daniel Vetterfa423312015-04-14 17:35:23 +02003113
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003114 ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3115
3116 /* clflush objects bound into the GGTT and rebind them. */
3117 list_for_each_entry_safe(obj, on,
Joonas Lahtinen56cea322016-11-02 12:16:04 +02003118 &dev_priv->mm.bound_list, global_link) {
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003119 bool ggtt_bound = false;
3120 struct i915_vma *vma;
3121
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003122 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003123 if (vma->vm != &ggtt->base)
Tvrtko Ursulin2c3d9982015-07-06 15:15:01 +01003124 continue;
Daniel Vetterfa423312015-04-14 17:35:23 +02003125
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003126 if (!i915_vma_unbind(vma))
3127 continue;
3128
Tvrtko Ursulin2c3d9982015-07-06 15:15:01 +01003129 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3130 PIN_UPDATE));
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003131 ggtt_bound = true;
Tvrtko Ursulin2c3d9982015-07-06 15:15:01 +01003132 }
3133
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003134 if (ggtt_bound)
Chris Wilson975f7ff2016-05-14 07:26:34 +01003135 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
Daniel Vetterfa423312015-04-14 17:35:23 +02003136 }
3137
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003138 ggtt->base.closed = false;
3139
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00003140 if (INTEL_GEN(dev_priv) >= 8) {
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02003141 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
Daniel Vetterfa423312015-04-14 17:35:23 +02003142 chv_setup_private_ppat(dev_priv);
3143 else
3144 bdw_setup_private_ppat(dev_priv);
3145
3146 return;
3147 }
3148
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00003149 if (USES_PPGTT(dev_priv)) {
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003150 struct i915_address_space *vm;
3151
Daniel Vetterfa423312015-04-14 17:35:23 +02003152 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
Joonas Lahtinene5716f52016-04-07 11:08:03 +03003153 struct i915_hw_ppgtt *ppgtt;
Daniel Vetterfa423312015-04-14 17:35:23 +02003154
Chris Wilson2bfa9962016-08-04 07:52:25 +01003155 if (i915_is_ggtt(vm))
Daniel Vetterfa423312015-04-14 17:35:23 +02003156 ppgtt = dev_priv->mm.aliasing_ppgtt;
Joonas Lahtinene5716f52016-04-07 11:08:03 +03003157 else
3158 ppgtt = i915_vm_to_ppgtt(vm);
Daniel Vetterfa423312015-04-14 17:35:23 +02003159
Chris Wilson16a011c2017-02-15 08:43:45 +00003160 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
Daniel Vetterfa423312015-04-14 17:35:23 +02003161 }
3162 }
3163
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003164 i915_ggtt_invalidate(dev_priv);
Daniel Vetterfa423312015-04-14 17:35:23 +02003165}
3166
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003167static struct scatterlist *
Ville Syrjälä2d7f3bd2016-01-14 15:22:11 +02003168rotate_pages(const dma_addr_t *in, unsigned int offset,
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003169 unsigned int width, unsigned int height,
Ville Syrjälä87130252016-01-20 21:05:23 +02003170 unsigned int stride,
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003171 struct sg_table *st, struct scatterlist *sg)
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003172{
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003173 unsigned int column, row;
3174 unsigned int src_idx;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003175
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003176 for (column = 0; column < width; column++) {
Ville Syrjälä87130252016-01-20 21:05:23 +02003177 src_idx = stride * (height - 1) + column;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003178 for (row = 0; row < height; row++) {
3179 st->nents++;
3180 /* We don't need the pages, but need to initialize
3181 * the entries so the sg list can be happily traversed.
3182 * The only thing we need are DMA addresses.
3183 */
3184 sg_set_page(sg, NULL, PAGE_SIZE, 0);
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003185 sg_dma_address(sg) = in[offset + src_idx];
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003186 sg_dma_len(sg) = PAGE_SIZE;
3187 sg = sg_next(sg);
Ville Syrjälä87130252016-01-20 21:05:23 +02003188 src_idx -= stride;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003189 }
3190 }
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003191
3192 return sg;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003193}
3194
Chris Wilsonba7a5742017-02-15 08:43:35 +00003195static noinline struct sg_table *
3196intel_rotate_pages(struct intel_rotation_info *rot_info,
3197 struct drm_i915_gem_object *obj)
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003198{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00003199 const unsigned long n_pages = obj->base.size / PAGE_SIZE;
Ville Syrjälä6687c902015-09-15 13:16:41 +03003200 unsigned int size = intel_rotation_info_size(rot_info);
Dave Gordon85d12252016-05-20 11:54:06 +01003201 struct sgt_iter sgt_iter;
3202 dma_addr_t dma_addr;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003203 unsigned long i;
3204 dma_addr_t *page_addr_list;
3205 struct sg_table *st;
Tvrtko Ursulin89e3e142015-09-21 10:45:34 +01003206 struct scatterlist *sg;
Tvrtko Ursulin1d00dad2015-03-25 10:15:26 +00003207 int ret = -ENOMEM;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003208
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003209 /* Allocate a temporary list of source pages for random access. */
Dave Gordon85d12252016-05-20 11:54:06 +01003210 page_addr_list = drm_malloc_gfp(n_pages,
Chris Wilsonf2a85e12016-04-08 12:11:13 +01003211 sizeof(dma_addr_t),
3212 GFP_TEMPORARY);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003213 if (!page_addr_list)
3214 return ERR_PTR(ret);
3215
3216 /* Allocate target SG list. */
3217 st = kmalloc(sizeof(*st), GFP_KERNEL);
3218 if (!st)
3219 goto err_st_alloc;
3220
Ville Syrjälä6687c902015-09-15 13:16:41 +03003221 ret = sg_alloc_table(st, size, GFP_KERNEL);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003222 if (ret)
3223 goto err_sg_alloc;
3224
3225 /* Populate source page list from the object. */
3226 i = 0;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003227 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
Dave Gordon85d12252016-05-20 11:54:06 +01003228 page_addr_list[i++] = dma_addr;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003229
Dave Gordon85d12252016-05-20 11:54:06 +01003230 GEM_BUG_ON(i != n_pages);
Ville Syrjälä11f20322016-02-15 22:54:46 +02003231 st->nents = 0;
3232 sg = st->sgl;
3233
Ville Syrjälä6687c902015-09-15 13:16:41 +03003234 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3235 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3236 rot_info->plane[i].width, rot_info->plane[i].height,
3237 rot_info->plane[i].stride, st, sg);
Tvrtko Ursulin89e3e142015-09-21 10:45:34 +01003238 }
3239
Ville Syrjälä6687c902015-09-15 13:16:41 +03003240 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3241 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003242
3243 drm_free_large(page_addr_list);
3244
3245 return st;
3246
3247err_sg_alloc:
3248 kfree(st);
3249err_st_alloc:
3250 drm_free_large(page_addr_list);
3251
Ville Syrjälä6687c902015-09-15 13:16:41 +03003252 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3253 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3254
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003255 return ERR_PTR(ret);
3256}
3257
Chris Wilsonba7a5742017-02-15 08:43:35 +00003258static noinline struct sg_table *
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003259intel_partial_pages(const struct i915_ggtt_view *view,
3260 struct drm_i915_gem_object *obj)
3261{
3262 struct sg_table *st;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003263 struct scatterlist *sg, *iter;
Chris Wilson8bab11932017-01-14 00:28:25 +00003264 unsigned int count = view->partial.size;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003265 unsigned int offset;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003266 int ret = -ENOMEM;
3267
3268 st = kmalloc(sizeof(*st), GFP_KERNEL);
3269 if (!st)
3270 goto err_st_alloc;
3271
Chris Wilsond2a84a72016-10-28 13:58:34 +01003272 ret = sg_alloc_table(st, count, GFP_KERNEL);
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003273 if (ret)
3274 goto err_sg_alloc;
3275
Chris Wilson8bab11932017-01-14 00:28:25 +00003276 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
Chris Wilsond2a84a72016-10-28 13:58:34 +01003277 GEM_BUG_ON(!iter);
3278
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003279 sg = st->sgl;
3280 st->nents = 0;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003281 do {
3282 unsigned int len;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003283
Chris Wilsond2a84a72016-10-28 13:58:34 +01003284 len = min(iter->length - (offset << PAGE_SHIFT),
3285 count << PAGE_SHIFT);
3286 sg_set_page(sg, NULL, len, 0);
3287 sg_dma_address(sg) =
3288 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3289 sg_dma_len(sg) = len;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003290
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003291 st->nents++;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003292 count -= len >> PAGE_SHIFT;
3293 if (count == 0) {
3294 sg_mark_end(sg);
3295 return st;
3296 }
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003297
Chris Wilsond2a84a72016-10-28 13:58:34 +01003298 sg = __sg_next(sg);
3299 iter = __sg_next(iter);
3300 offset = 0;
3301 } while (1);
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003302
3303err_sg_alloc:
3304 kfree(st);
3305err_st_alloc:
3306 return ERR_PTR(ret);
3307}
3308
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02003309static int
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003310i915_get_ggtt_vma_pages(struct i915_vma *vma)
3311{
Chris Wilsonba7a5742017-02-15 08:43:35 +00003312 int ret;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003313
Chris Wilson2c3a3f42016-11-04 10:30:01 +00003314 /* The vma->pages are only valid within the lifespan of the borrowed
3315 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3316 * must be the vma->pages. A simple rule is that vma->pages must only
3317 * be accessed when the obj->mm.pages are pinned.
3318 */
3319 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3320
Chris Wilsonba7a5742017-02-15 08:43:35 +00003321 switch (vma->ggtt_view.type) {
3322 case I915_GGTT_VIEW_NORMAL:
3323 vma->pages = vma->obj->mm.pages;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003324 return 0;
3325
Chris Wilsonba7a5742017-02-15 08:43:35 +00003326 case I915_GGTT_VIEW_ROTATED:
Chris Wilson247177d2016-08-15 10:48:47 +01003327 vma->pages =
Chris Wilsonba7a5742017-02-15 08:43:35 +00003328 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3329 break;
3330
3331 case I915_GGTT_VIEW_PARTIAL:
Chris Wilson247177d2016-08-15 10:48:47 +01003332 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
Chris Wilsonba7a5742017-02-15 08:43:35 +00003333 break;
3334
3335 default:
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003336 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3337 vma->ggtt_view.type);
Chris Wilsonba7a5742017-02-15 08:43:35 +00003338 return -EINVAL;
3339 }
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003340
Chris Wilsonba7a5742017-02-15 08:43:35 +00003341 ret = 0;
3342 if (unlikely(IS_ERR(vma->pages))) {
Chris Wilson247177d2016-08-15 10:48:47 +01003343 ret = PTR_ERR(vma->pages);
3344 vma->pages = NULL;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003345 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3346 vma->ggtt_view.type, ret);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003347 }
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003348 return ret;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003349}
3350
Chris Wilsone007b192017-01-11 11:23:10 +00003351/**
Chris Wilson625d9882017-01-11 11:23:11 +00003352 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
Chris Wilsona4dbf7c2017-01-12 16:45:59 +00003353 * @vm: the &struct i915_address_space
3354 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3355 * @size: how much space to allocate inside the GTT,
3356 * must be #I915_GTT_PAGE_SIZE aligned
3357 * @offset: where to insert inside the GTT,
3358 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3359 * (@offset + @size) must fit within the address space
3360 * @color: color to apply to node, if this node is not from a VMA,
3361 * color must be #I915_COLOR_UNEVICTABLE
3362 * @flags: control search and eviction behaviour
Chris Wilson625d9882017-01-11 11:23:11 +00003363 *
3364 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3365 * the address space (using @size and @color). If the @node does not fit, it
3366 * tries to evict any overlapping nodes from the GTT, including any
3367 * neighbouring nodes if the colors do not match (to ensure guard pages between
3368 * differing domains). See i915_gem_evict_for_node() for the gory details
3369 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3370 * evicting active overlapping objects, and any overlapping node that is pinned
3371 * or marked as unevictable will also result in failure.
3372 *
3373 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3374 * asked to wait for eviction and interrupted.
3375 */
3376int i915_gem_gtt_reserve(struct i915_address_space *vm,
3377 struct drm_mm_node *node,
3378 u64 size, u64 offset, unsigned long color,
3379 unsigned int flags)
3380{
3381 int err;
3382
3383 GEM_BUG_ON(!size);
3384 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3385 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3386 GEM_BUG_ON(range_overflows(offset, size, vm->total));
Chris Wilson3fec7ec2017-01-15 13:47:46 +00003387 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
Chris Wilson9734ad12017-01-15 17:27:40 +00003388 GEM_BUG_ON(drm_mm_node_allocated(node));
Chris Wilson625d9882017-01-11 11:23:11 +00003389
3390 node->size = size;
3391 node->start = offset;
3392 node->color = color;
3393
3394 err = drm_mm_reserve_node(&vm->mm, node);
3395 if (err != -ENOSPC)
3396 return err;
3397
3398 err = i915_gem_evict_for_node(vm, node, flags);
3399 if (err == 0)
3400 err = drm_mm_reserve_node(&vm->mm, node);
3401
3402 return err;
3403}
3404
Chris Wilson606fec92017-01-11 11:23:12 +00003405static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3406{
3407 u64 range, addr;
3408
3409 GEM_BUG_ON(range_overflows(start, len, end));
3410 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3411
3412 range = round_down(end - len, align) - round_up(start, align);
3413 if (range) {
3414 if (sizeof(unsigned long) == sizeof(u64)) {
3415 addr = get_random_long();
3416 } else {
3417 addr = get_random_int();
3418 if (range > U32_MAX) {
3419 addr <<= 32;
3420 addr |= get_random_int();
3421 }
3422 }
3423 div64_u64_rem(addr, range, &addr);
3424 start += addr;
3425 }
3426
3427 return round_up(start, align);
3428}
3429
Chris Wilson625d9882017-01-11 11:23:11 +00003430/**
Chris Wilsone007b192017-01-11 11:23:10 +00003431 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
Chris Wilsona4dbf7c2017-01-12 16:45:59 +00003432 * @vm: the &struct i915_address_space
3433 * @node: the &struct drm_mm_node (typically i915_vma.node)
3434 * @size: how much space to allocate inside the GTT,
3435 * must be #I915_GTT_PAGE_SIZE aligned
3436 * @alignment: required alignment of starting offset, may be 0 but
3437 * if specified, this must be a power-of-two and at least
3438 * #I915_GTT_MIN_ALIGNMENT
3439 * @color: color to apply to node
3440 * @start: start of any range restriction inside GTT (0 for all),
Chris Wilsone007b192017-01-11 11:23:10 +00003441 * must be #I915_GTT_PAGE_SIZE aligned
Chris Wilsona4dbf7c2017-01-12 16:45:59 +00003442 * @end: end of any range restriction inside GTT (U64_MAX for all),
3443 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3444 * @flags: control search and eviction behaviour
Chris Wilsone007b192017-01-11 11:23:10 +00003445 *
3446 * i915_gem_gtt_insert() first searches for an available hole into which
3447 * is can insert the node. The hole address is aligned to @alignment and
3448 * its @size must then fit entirely within the [@start, @end] bounds. The
3449 * nodes on either side of the hole must match @color, or else a guard page
3450 * will be inserted between the two nodes (or the node evicted). If no
Chris Wilson606fec92017-01-11 11:23:12 +00003451 * suitable hole is found, first a victim is randomly selected and tested
3452 * for eviction, otherwise then the LRU list of objects within the GTT
Chris Wilsone007b192017-01-11 11:23:10 +00003453 * is scanned to find the first set of replacement nodes to create the hole.
3454 * Those old overlapping nodes are evicted from the GTT (and so must be
3455 * rebound before any future use). Any node that is currently pinned cannot
3456 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3457 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3458 * searching for an eviction candidate. See i915_gem_evict_something() for
3459 * the gory details on the eviction algorithm.
3460 *
3461 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3462 * asked to wait for eviction and interrupted.
3463 */
3464int i915_gem_gtt_insert(struct i915_address_space *vm,
3465 struct drm_mm_node *node,
3466 u64 size, u64 alignment, unsigned long color,
3467 u64 start, u64 end, unsigned int flags)
3468{
Chris Wilson4e64e552017-02-02 21:04:38 +00003469 enum drm_mm_insert_mode mode;
Chris Wilson606fec92017-01-11 11:23:12 +00003470 u64 offset;
Chris Wilsone007b192017-01-11 11:23:10 +00003471 int err;
3472
3473 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3474 GEM_BUG_ON(!size);
3475 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3476 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3477 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3478 GEM_BUG_ON(start >= end);
3479 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3480 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Chris Wilson3fec7ec2017-01-15 13:47:46 +00003481 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
Chris Wilson9734ad12017-01-15 17:27:40 +00003482 GEM_BUG_ON(drm_mm_node_allocated(node));
Chris Wilsone007b192017-01-11 11:23:10 +00003483
3484 if (unlikely(range_overflows(start, size, end)))
3485 return -ENOSPC;
3486
3487 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3488 return -ENOSPC;
3489
Chris Wilson4e64e552017-02-02 21:04:38 +00003490 mode = DRM_MM_INSERT_BEST;
3491 if (flags & PIN_HIGH)
3492 mode = DRM_MM_INSERT_HIGH;
3493 if (flags & PIN_MAPPABLE)
3494 mode = DRM_MM_INSERT_LOW;
Chris Wilsone007b192017-01-11 11:23:10 +00003495
3496 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3497 * so we know that we always have a minimum alignment of 4096.
3498 * The drm_mm range manager is optimised to return results
3499 * with zero alignment, so where possible use the optimal
3500 * path.
3501 */
3502 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3503 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3504 alignment = 0;
3505
Chris Wilson4e64e552017-02-02 21:04:38 +00003506 err = drm_mm_insert_node_in_range(&vm->mm, node,
3507 size, alignment, color,
3508 start, end, mode);
Chris Wilsone007b192017-01-11 11:23:10 +00003509 if (err != -ENOSPC)
3510 return err;
3511
Chris Wilson606fec92017-01-11 11:23:12 +00003512 /* No free space, pick a slot at random.
3513 *
3514 * There is a pathological case here using a GTT shared between
3515 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3516 *
3517 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3518 * (64k objects) (448k objects)
3519 *
3520 * Now imagine that the eviction LRU is ordered top-down (just because
3521 * pathology meets real life), and that we need to evict an object to
3522 * make room inside the aperture. The eviction scan then has to walk
3523 * the 448k list before it finds one within range. And now imagine that
3524 * it has to search for a new hole between every byte inside the memcpy,
3525 * for several simultaneous clients.
3526 *
3527 * On a full-ppgtt system, if we have run out of available space, there
3528 * will be lots and lots of objects in the eviction list! Again,
3529 * searching that LRU list may be slow if we are also applying any
3530 * range restrictions (e.g. restriction to low 4GiB) and so, for
3531 * simplicity and similarilty between different GTT, try the single
3532 * random replacement first.
3533 */
3534 offset = random_offset(start, end,
3535 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3536 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3537 if (err != -ENOSPC)
3538 return err;
3539
3540 /* Randomly selected placement is pinned, do a search */
Chris Wilsone007b192017-01-11 11:23:10 +00003541 err = i915_gem_evict_something(vm, size, alignment, color,
3542 start, end, flags);
3543 if (err)
3544 return err;
3545
Chris Wilson4e64e552017-02-02 21:04:38 +00003546 return drm_mm_insert_node_in_range(&vm->mm, node,
3547 size, alignment, color,
3548 start, end, DRM_MM_INSERT_EVICT);
Chris Wilsone007b192017-01-11 11:23:10 +00003549}
Chris Wilson3b5bb0a2017-02-13 17:15:18 +00003550
3551#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3552#include "selftests/mock_gtt.c"
Chris Wilson1c428192017-02-13 17:15:38 +00003553#include "selftests/i915_gem_gtt.c"
Chris Wilson3b5bb0a2017-02-13 17:15:18 +00003554#endif