blob: 205dd91d3601dcb6ddf7046aeb5af6860975c558 [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
Ben Widawskyc4ac5242014-02-19 22:05:47 -08003 * Copyright © 2011-2014 Intel Corporation
Daniel Vetter76aaf222010-11-05 22:23:30 +01004 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
Chris Wilsonaae4a3d2017-02-13 17:15:44 +000026#include <linux/slab.h> /* fault-inject.h is not standalone! */
27
28#include <linux/fault-inject.h>
Chris Wilsone007b192017-01-11 11:23:10 +000029#include <linux/log2.h>
Chris Wilson606fec92017-01-11 11:23:12 +000030#include <linux/random.h>
Daniel Vetter0e46ce22014-01-08 16:10:27 +010031#include <linux/seq_file.h>
Chris Wilson5bab6f62015-10-23 18:43:32 +010032#include <linux/stop_machine.h>
Chris Wilsone007b192017-01-11 11:23:10 +000033
Laura Abbotted3ba072017-05-08 15:58:17 -070034#include <asm/set_memory.h>
35
David Howells760285e2012-10-02 18:01:07 +010036#include <drm/drmP.h>
37#include <drm/i915_drm.h>
Chris Wilsone007b192017-01-11 11:23:10 +000038
Daniel Vetter76aaf222010-11-05 22:23:30 +010039#include "i915_drv.h"
Yu Zhang5dda8fa2015-02-10 19:05:48 +080040#include "i915_vgpu.h"
Daniel Vetter76aaf222010-11-05 22:23:30 +010041#include "i915_trace.h"
42#include "intel_drv.h"
Chris Wilsond07f0e52016-10-28 13:58:44 +010043#include "intel_frontbuffer.h"
Daniel Vetter76aaf222010-11-05 22:23:30 +010044
Chris Wilsonbb8f9cf2016-08-22 08:44:31 +010045#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
46
Tvrtko Ursulin45f8f692014-12-10 17:27:59 +000047/**
48 * DOC: Global GTT views
49 *
50 * Background and previous state
51 *
52 * Historically objects could exists (be bound) in global GTT space only as
53 * singular instances with a view representing all of the object's backing pages
54 * in a linear fashion. This view will be called a normal view.
55 *
56 * To support multiple views of the same object, where the number of mapped
57 * pages is not equal to the backing store, or where the layout of the pages
58 * is not linear, concept of a GGTT view was added.
59 *
60 * One example of an alternative view is a stereo display driven by a single
61 * image. In this case we would have a framebuffer looking like this
62 * (2x2 pages):
63 *
64 * 12
65 * 34
66 *
67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68 * rendering. In contrast, fed to the display engine would be an alternative
69 * view which could look something like this:
70 *
71 * 1212
72 * 3434
73 *
74 * In this example both the size and layout of pages in the alternative view is
75 * different from the normal view.
76 *
77 * Implementation and usage
78 *
79 * GGTT views are implemented using VMAs and are distinguished via enum
80 * i915_ggtt_view_type and struct i915_ggtt_view.
81 *
82 * A new flavour of core GEM functions which work with GGTT bound objects were
Joonas Lahtinenec7adb62015-03-16 14:11:13 +020083 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84 * renaming in large amounts of code. They take the struct i915_ggtt_view
85 * parameter encapsulating all metadata required to implement a view.
Tvrtko Ursulin45f8f692014-12-10 17:27:59 +000086 *
87 * As a helper for callers which are only interested in the normal view,
88 * globally const i915_ggtt_view_normal singleton instance exists. All old core
89 * GEM API functions, the ones not taking the view parameter, are operating on,
90 * or with the normal GGTT view.
91 *
92 * Code wanting to add or use a new GGTT view needs to:
93 *
94 * 1. Add a new enum with a suitable name.
95 * 2. Extend the metadata in the i915_ggtt_view structure if required.
96 * 3. Add support to i915_get_vma_pages().
97 *
98 * New views are required to build a scatter-gather table from within the
99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100 * exists for the lifetime of an VMA.
101 *
102 * Core API is designed to have copy semantics which means that passed in
103 * struct i915_ggtt_view does not need to be persistent (left around after
104 * calling the core API functions).
105 *
106 */
107
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200108static int
109i915_get_ggtt_vma_pages(struct i915_vma *vma);
110
Chris Wilson7c3f86b2017-01-12 11:00:49 +0000111static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
112{
113 /* Note that as an uncached mmio write, this should flush the
114 * WCB of the writes into the GGTT before it triggers the invalidate.
115 */
116 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
117}
118
119static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
120{
121 gen6_ggtt_invalidate(dev_priv);
122 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
123}
124
125static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
126{
127 intel_gtt_chipset_flush();
128}
129
130static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
131{
132 i915->ggtt.invalidate(i915);
133}
134
Chris Wilsonc0336662016-05-06 15:40:21 +0100135int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
136 int enable_ppgtt)
Daniel Vettercfa7c862014-04-29 11:53:58 +0200137{
Chris Wilson1893a712014-09-19 11:56:27 +0100138 bool has_aliasing_ppgtt;
139 bool has_full_ppgtt;
Michel Thierry1f9a99e2015-09-30 15:36:19 +0100140 bool has_full_48bit_ppgtt;
Chris Wilson1893a712014-09-19 11:56:27 +0100141
Michel Thierry9e1d0e62016-12-05 17:57:03 -0800142 has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
143 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
144 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
Chris Wilson1893a712014-09-19 11:56:27 +0100145
Zhi Wange320d402016-09-06 12:04:12 +0800146 if (intel_vgpu_active(dev_priv)) {
147 /* emulation is too hard */
148 has_full_ppgtt = false;
149 has_full_48bit_ppgtt = false;
150 }
Yu Zhang71ba2d62015-02-10 19:05:54 +0800151
Chris Wilson0e4ca102016-04-29 13:18:22 +0100152 if (!has_aliasing_ppgtt)
153 return 0;
154
Damien Lespiau70ee45e2014-11-14 15:05:59 +0000155 /*
156 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
157 * execlists, the sole mechanism available to submit work.
158 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100159 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
Daniel Vettercfa7c862014-04-29 11:53:58 +0200160 return 0;
161
162 if (enable_ppgtt == 1)
163 return 1;
164
Chris Wilson1893a712014-09-19 11:56:27 +0100165 if (enable_ppgtt == 2 && has_full_ppgtt)
Daniel Vettercfa7c862014-04-29 11:53:58 +0200166 return 2;
167
Michel Thierry1f9a99e2015-09-30 15:36:19 +0100168 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
169 return 3;
170
Daniel Vetter93a25a92014-03-06 09:40:43 +0100171 /* Disable ppgtt on SNB if VT-d is on. */
Chris Wilson80debff2017-05-25 13:16:12 +0100172 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
Daniel Vetter93a25a92014-03-06 09:40:43 +0100173 DRM_INFO("Disabling PPGTT because VT-d is on\n");
Daniel Vettercfa7c862014-04-29 11:53:58 +0200174 return 0;
Daniel Vetter93a25a92014-03-06 09:40:43 +0100175 }
Daniel Vetter93a25a92014-03-06 09:40:43 +0100176
Jesse Barnes62942ed2014-06-13 09:28:33 -0700177 /* Early VLV doesn't have this */
Chris Wilson91c8a322016-07-05 10:40:23 +0100178 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
Jesse Barnes62942ed2014-06-13 09:28:33 -0700179 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
180 return 0;
181 }
182
Zhi Wange320d402016-09-06 12:04:12 +0800183 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
Michel Thierry1f9a99e2015-09-30 15:36:19 +0100184 return has_full_48bit_ppgtt ? 3 : 2;
Michel Thierry2f82bbd2014-12-15 14:58:00 +0000185 else
186 return has_aliasing_ppgtt ? 1 : 0;
Daniel Vetter93a25a92014-03-06 09:40:43 +0100187}
188
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200189static int ppgtt_bind_vma(struct i915_vma *vma,
190 enum i915_cache_level cache_level,
191 u32 unused)
Daniel Vetter47552652015-04-14 17:35:24 +0200192{
Chris Wilsonff685972017-02-15 08:43:42 +0000193 u32 pte_flags;
194 int ret;
195
Matthew Auld1f234752017-05-12 10:14:23 +0100196 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
197 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
198 vma->size);
199 if (ret)
200 return ret;
201 }
Daniel Vetter47552652015-04-14 17:35:24 +0200202
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100203 vma->pages = vma->obj->mm.pages;
Chris Wilson247177d2016-08-15 10:48:47 +0100204
Daniel Vetter47552652015-04-14 17:35:24 +0200205 /* Currently applicable only to VLV */
Chris Wilsonff685972017-02-15 08:43:42 +0000206 pte_flags = 0;
Daniel Vetter47552652015-04-14 17:35:24 +0200207 if (vma->obj->gt_ro)
208 pte_flags |= PTE_READ_ONLY;
209
Chris Wilson247177d2016-08-15 10:48:47 +0100210 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
Daniel Vetter47552652015-04-14 17:35:24 +0200211 cache_level, pte_flags);
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200212
213 return 0;
Daniel Vetter47552652015-04-14 17:35:24 +0200214}
215
216static void ppgtt_unbind_vma(struct i915_vma *vma)
217{
Chris Wilsonff685972017-02-15 08:43:42 +0000218 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
Daniel Vetter47552652015-04-14 17:35:24 +0200219}
Ben Widawsky6f65e292013-12-06 14:10:56 -0800220
Daniel Vetter2c642b02015-04-14 17:35:26 +0200221static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200222 enum i915_cache_level level)
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700223{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200224 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700225 pte |= addr;
Ben Widawsky63c42e52014-04-18 18:04:27 -0300226
227 switch (level) {
228 case I915_CACHE_NONE:
Ben Widawskyfbe5d362013-11-04 19:56:49 -0800229 pte |= PPAT_UNCACHED_INDEX;
Ben Widawsky63c42e52014-04-18 18:04:27 -0300230 break;
231 case I915_CACHE_WT:
232 pte |= PPAT_DISPLAY_ELLC_INDEX;
233 break;
234 default:
235 pte |= PPAT_CACHED_INDEX;
236 break;
237 }
238
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700239 return pte;
240}
241
Mika Kuoppalafe36f552015-06-25 18:35:16 +0300242static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
243 const enum i915_cache_level level)
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800244{
Michel Thierry07749ef2015-03-16 16:00:54 +0000245 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800246 pde |= addr;
247 if (level != I915_CACHE_NONE)
248 pde |= PPAT_CACHED_PDE_INDEX;
249 else
250 pde |= PPAT_UNCACHED_INDEX;
251 return pde;
252}
253
Michel Thierry762d9932015-07-30 11:05:29 +0100254#define gen8_pdpe_encode gen8_pde_encode
255#define gen8_pml4e_encode gen8_pde_encode
256
Michel Thierry07749ef2015-03-16 16:00:54 +0000257static gen6_pte_t snb_pte_encode(dma_addr_t addr,
258 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200259 u32 unused)
Ben Widawsky54d12522012-09-24 16:44:32 -0700260{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200261 gen6_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky54d12522012-09-24 16:44:32 -0700262 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -0700263
264 switch (level) {
Chris Wilson350ec882013-08-06 13:17:02 +0100265 case I915_CACHE_L3_LLC:
266 case I915_CACHE_LLC:
267 pte |= GEN6_PTE_CACHE_LLC;
268 break;
269 case I915_CACHE_NONE:
270 pte |= GEN6_PTE_UNCACHED;
271 break;
272 default:
Daniel Vetter5f77eeb2014-12-08 16:40:10 +0100273 MISSING_CASE(level);
Chris Wilson350ec882013-08-06 13:17:02 +0100274 }
275
276 return pte;
277}
278
Michel Thierry07749ef2015-03-16 16:00:54 +0000279static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
280 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200281 u32 unused)
Chris Wilson350ec882013-08-06 13:17:02 +0100282{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200283 gen6_pte_t pte = GEN6_PTE_VALID;
Chris Wilson350ec882013-08-06 13:17:02 +0100284 pte |= GEN6_PTE_ADDR_ENCODE(addr);
285
286 switch (level) {
287 case I915_CACHE_L3_LLC:
288 pte |= GEN7_PTE_CACHE_L3_LLC;
Ben Widawskye7210c32012-10-19 09:33:22 -0700289 break;
290 case I915_CACHE_LLC:
291 pte |= GEN6_PTE_CACHE_LLC;
292 break;
293 case I915_CACHE_NONE:
Kenneth Graunke91197082013-04-22 00:53:51 -0700294 pte |= GEN6_PTE_UNCACHED;
Ben Widawskye7210c32012-10-19 09:33:22 -0700295 break;
296 default:
Daniel Vetter5f77eeb2014-12-08 16:40:10 +0100297 MISSING_CASE(level);
Ben Widawskye7210c32012-10-19 09:33:22 -0700298 }
299
Ben Widawsky54d12522012-09-24 16:44:32 -0700300 return pte;
301}
302
Michel Thierry07749ef2015-03-16 16:00:54 +0000303static gen6_pte_t byt_pte_encode(dma_addr_t addr,
304 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200305 u32 flags)
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700306{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200307 gen6_pte_t pte = GEN6_PTE_VALID;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700308 pte |= GEN6_PTE_ADDR_ENCODE(addr);
309
Akash Goel24f3a8c2014-06-17 10:59:42 +0530310 if (!(flags & PTE_READ_ONLY))
311 pte |= BYT_PTE_WRITEABLE;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700312
313 if (level != I915_CACHE_NONE)
314 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
315
316 return pte;
317}
318
Michel Thierry07749ef2015-03-16 16:00:54 +0000319static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
320 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200321 u32 unused)
Kenneth Graunke91197082013-04-22 00:53:51 -0700322{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200323 gen6_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky0d8ff152013-07-04 11:02:03 -0700324 pte |= HSW_PTE_ADDR_ENCODE(addr);
Kenneth Graunke91197082013-04-22 00:53:51 -0700325
326 if (level != I915_CACHE_NONE)
Ben Widawsky87a6b682013-08-04 23:47:29 -0700327 pte |= HSW_WB_LLC_AGE3;
Kenneth Graunke91197082013-04-22 00:53:51 -0700328
329 return pte;
330}
331
Michel Thierry07749ef2015-03-16 16:00:54 +0000332static gen6_pte_t iris_pte_encode(dma_addr_t addr,
333 enum i915_cache_level level,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200334 u32 unused)
Ben Widawsky4d15c142013-07-04 11:02:06 -0700335{
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200336 gen6_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky4d15c142013-07-04 11:02:06 -0700337 pte |= HSW_PTE_ADDR_ENCODE(addr);
338
Chris Wilson651d7942013-08-08 14:41:10 +0100339 switch (level) {
340 case I915_CACHE_NONE:
341 break;
342 case I915_CACHE_WT:
Chris Wilsonc51e9702013-11-22 10:37:53 +0000343 pte |= HSW_WT_ELLC_LLC_AGE3;
Chris Wilson651d7942013-08-08 14:41:10 +0100344 break;
345 default:
Chris Wilsonc51e9702013-11-22 10:37:53 +0000346 pte |= HSW_WB_ELLC_LLC_AGE3;
Chris Wilson651d7942013-08-08 14:41:10 +0100347 break;
348 }
Ben Widawsky4d15c142013-07-04 11:02:06 -0700349
350 return pte;
351}
352
Chris Wilson84486612017-02-15 08:43:40 +0000353static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
Ben Widawsky678d96f2015-03-16 16:00:56 +0000354{
Chris Wilson84486612017-02-15 08:43:40 +0000355 struct page *page;
Ben Widawsky678d96f2015-03-16 16:00:56 +0000356
Chris Wilson84486612017-02-15 08:43:40 +0000357 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
358 i915_gem_shrink_all(vm->i915);
Chris Wilsonaae4a3d2017-02-13 17:15:44 +0000359
Chris Wilson84486612017-02-15 08:43:40 +0000360 if (vm->free_pages.nr)
361 return vm->free_pages.pages[--vm->free_pages.nr];
362
363 page = alloc_page(gfp);
364 if (!page)
365 return NULL;
366
367 if (vm->pt_kmap_wc)
368 set_pages_array_wc(&page, 1);
369
370 return page;
371}
372
373static void vm_free_pages_release(struct i915_address_space *vm)
374{
375 GEM_BUG_ON(!pagevec_count(&vm->free_pages));
376
377 if (vm->pt_kmap_wc)
378 set_pages_array_wb(vm->free_pages.pages,
379 pagevec_count(&vm->free_pages));
380
381 __pagevec_release(&vm->free_pages);
382}
383
384static void vm_free_page(struct i915_address_space *vm, struct page *page)
385{
386 if (!pagevec_add(&vm->free_pages, page))
387 vm_free_pages_release(vm);
388}
389
390static int __setup_page_dma(struct i915_address_space *vm,
391 struct i915_page_dma *p,
392 gfp_t gfp)
393{
394 p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
395 if (unlikely(!p->page))
Michel Thierry1266cdb2015-03-24 17:06:33 +0000396 return -ENOMEM;
397
Chris Wilson84486612017-02-15 08:43:40 +0000398 p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
399 PCI_DMA_BIDIRECTIONAL);
400 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
401 vm_free_page(vm, p->page);
402 return -ENOMEM;
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300403 }
404
Michel Thierry1266cdb2015-03-24 17:06:33 +0000405 return 0;
Ben Widawsky678d96f2015-03-16 16:00:56 +0000406}
407
Chris Wilson84486612017-02-15 08:43:40 +0000408static int setup_page_dma(struct i915_address_space *vm,
Tvrtko Ursulin275a9912016-11-16 08:55:34 +0000409 struct i915_page_dma *p)
Mika Kuoppalac114f762015-06-25 18:35:13 +0300410{
Chris Wilson84486612017-02-15 08:43:40 +0000411 return __setup_page_dma(vm, p, I915_GFP_DMA);
Mika Kuoppalac114f762015-06-25 18:35:13 +0300412}
413
Chris Wilson84486612017-02-15 08:43:40 +0000414static void cleanup_page_dma(struct i915_address_space *vm,
Tvrtko Ursulin275a9912016-11-16 08:55:34 +0000415 struct i915_page_dma *p)
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300416{
Chris Wilson84486612017-02-15 08:43:40 +0000417 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
418 vm_free_page(vm, p->page);
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300419}
420
Chris Wilson9231da72017-02-15 08:43:41 +0000421#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300422
Chris Wilson84486612017-02-15 08:43:40 +0000423#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
424#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
425#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
426#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
Mika Kuoppala567047b2015-06-25 18:35:12 +0300427
Chris Wilson84486612017-02-15 08:43:40 +0000428static void fill_page_dma(struct i915_address_space *vm,
429 struct i915_page_dma *p,
430 const u64 val)
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300431{
Chris Wilson9231da72017-02-15 08:43:41 +0000432 u64 * const vaddr = kmap_atomic(p->page);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300433 int i;
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300434
435 for (i = 0; i < 512; i++)
436 vaddr[i] = val;
437
Chris Wilson9231da72017-02-15 08:43:41 +0000438 kunmap_atomic(vaddr);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300439}
440
Chris Wilson84486612017-02-15 08:43:40 +0000441static void fill_page_dma_32(struct i915_address_space *vm,
442 struct i915_page_dma *p,
443 const u32 v)
Mika Kuoppala73eeea52015-06-25 18:35:10 +0300444{
Chris Wilson84486612017-02-15 08:43:40 +0000445 fill_page_dma(vm, p, (u64)v << 32 | v);
Mika Kuoppala73eeea52015-06-25 18:35:10 +0300446}
447
Chris Wilson8bcdd0f72016-08-22 08:44:30 +0100448static int
Chris Wilson84486612017-02-15 08:43:40 +0000449setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300450{
Chris Wilson84486612017-02-15 08:43:40 +0000451 return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300452}
453
Chris Wilson84486612017-02-15 08:43:40 +0000454static void cleanup_scratch_page(struct i915_address_space *vm)
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300455{
Chris Wilson84486612017-02-15 08:43:40 +0000456 cleanup_page_dma(vm, &vm->scratch_page);
Mika Kuoppala4ad2af12015-06-30 18:16:39 +0300457}
458
Chris Wilson84486612017-02-15 08:43:40 +0000459static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
Ben Widawsky06fda602015-02-24 16:22:36 +0000460{
Michel Thierryec565b32015-04-08 12:13:23 +0100461 struct i915_page_table *pt;
Ben Widawsky06fda602015-02-24 16:22:36 +0000462
Chris Wilsondd196742017-02-15 08:43:46 +0000463 pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
464 if (unlikely(!pt))
Ben Widawsky06fda602015-02-24 16:22:36 +0000465 return ERR_PTR(-ENOMEM);
466
Chris Wilsondd196742017-02-15 08:43:46 +0000467 if (unlikely(setup_px(vm, pt))) {
468 kfree(pt);
469 return ERR_PTR(-ENOMEM);
470 }
Ben Widawsky678d96f2015-03-16 16:00:56 +0000471
Chris Wilsondd196742017-02-15 08:43:46 +0000472 pt->used_ptes = 0;
Ben Widawsky06fda602015-02-24 16:22:36 +0000473 return pt;
474}
475
Chris Wilson84486612017-02-15 08:43:40 +0000476static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
Ben Widawsky06fda602015-02-24 16:22:36 +0000477{
Chris Wilson84486612017-02-15 08:43:40 +0000478 cleanup_px(vm, pt);
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300479 kfree(pt);
480}
481
482static void gen8_initialize_pt(struct i915_address_space *vm,
483 struct i915_page_table *pt)
484{
Chris Wilsondd196742017-02-15 08:43:46 +0000485 fill_px(vm, pt,
486 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300487}
488
489static void gen6_initialize_pt(struct i915_address_space *vm,
490 struct i915_page_table *pt)
491{
Chris Wilsondd196742017-02-15 08:43:46 +0000492 fill32_px(vm, pt,
493 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
Ben Widawsky06fda602015-02-24 16:22:36 +0000494}
495
Chris Wilson84486612017-02-15 08:43:40 +0000496static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
Ben Widawsky06fda602015-02-24 16:22:36 +0000497{
Michel Thierryec565b32015-04-08 12:13:23 +0100498 struct i915_page_directory *pd;
Ben Widawsky06fda602015-02-24 16:22:36 +0000499
Chris Wilsonfe52e372017-02-15 08:43:47 +0000500 pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
501 if (unlikely(!pd))
Ben Widawsky06fda602015-02-24 16:22:36 +0000502 return ERR_PTR(-ENOMEM);
503
Chris Wilsonfe52e372017-02-15 08:43:47 +0000504 if (unlikely(setup_px(vm, pd))) {
505 kfree(pd);
506 return ERR_PTR(-ENOMEM);
507 }
Michel Thierry33c88192015-04-08 12:13:33 +0100508
Chris Wilsonfe52e372017-02-15 08:43:47 +0000509 pd->used_pdes = 0;
Ben Widawsky06fda602015-02-24 16:22:36 +0000510 return pd;
511}
512
Chris Wilson84486612017-02-15 08:43:40 +0000513static void free_pd(struct i915_address_space *vm,
Tvrtko Ursulin275a9912016-11-16 08:55:34 +0000514 struct i915_page_directory *pd)
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300515{
Chris Wilsonfe52e372017-02-15 08:43:47 +0000516 cleanup_px(vm, pd);
517 kfree(pd);
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300518}
519
520static void gen8_initialize_pd(struct i915_address_space *vm,
521 struct i915_page_directory *pd)
522{
Chris Wilsondd196742017-02-15 08:43:46 +0000523 unsigned int i;
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300524
Chris Wilsondd196742017-02-15 08:43:46 +0000525 fill_px(vm, pd,
526 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
527 for (i = 0; i < I915_PDES; i++)
528 pd->page_table[i] = vm->scratch_pt;
Mika Kuoppala2e906be2015-06-30 18:16:37 +0300529}
530
Chris Wilsonfe52e372017-02-15 08:43:47 +0000531static int __pdp_init(struct i915_address_space *vm,
Michel Thierry6ac18502015-07-29 17:23:46 +0100532 struct i915_page_directory_pointer *pdp)
533{
Mika Kuoppala3e490042017-02-28 17:28:07 +0200534 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
Chris Wilsone2b763c2017-02-15 08:43:48 +0000535 unsigned int i;
Michel Thierry6ac18502015-07-29 17:23:46 +0100536
Chris Wilsonfe52e372017-02-15 08:43:47 +0000537 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
Chris Wilsone2b763c2017-02-15 08:43:48 +0000538 GFP_KERNEL | __GFP_NOWARN);
539 if (unlikely(!pdp->page_directory))
Michel Thierry6ac18502015-07-29 17:23:46 +0100540 return -ENOMEM;
Michel Thierry6ac18502015-07-29 17:23:46 +0100541
Chris Wilsonfe52e372017-02-15 08:43:47 +0000542 for (i = 0; i < pdpes; i++)
543 pdp->page_directory[i] = vm->scratch_pd;
544
Michel Thierry6ac18502015-07-29 17:23:46 +0100545 return 0;
546}
547
548static void __pdp_fini(struct i915_page_directory_pointer *pdp)
549{
Michel Thierry6ac18502015-07-29 17:23:46 +0100550 kfree(pdp->page_directory);
551 pdp->page_directory = NULL;
552}
553
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200554static inline bool use_4lvl(const struct i915_address_space *vm)
555{
556 return i915_vm_is_48bit(vm);
557}
558
Chris Wilson84486612017-02-15 08:43:40 +0000559static struct i915_page_directory_pointer *
560alloc_pdp(struct i915_address_space *vm)
Michel Thierry762d9932015-07-30 11:05:29 +0100561{
562 struct i915_page_directory_pointer *pdp;
563 int ret = -ENOMEM;
564
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200565 WARN_ON(!use_4lvl(vm));
Michel Thierry762d9932015-07-30 11:05:29 +0100566
567 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
568 if (!pdp)
569 return ERR_PTR(-ENOMEM);
570
Chris Wilsonfe52e372017-02-15 08:43:47 +0000571 ret = __pdp_init(vm, pdp);
Michel Thierry762d9932015-07-30 11:05:29 +0100572 if (ret)
573 goto fail_bitmap;
574
Chris Wilson84486612017-02-15 08:43:40 +0000575 ret = setup_px(vm, pdp);
Michel Thierry762d9932015-07-30 11:05:29 +0100576 if (ret)
577 goto fail_page_m;
578
579 return pdp;
580
581fail_page_m:
582 __pdp_fini(pdp);
583fail_bitmap:
584 kfree(pdp);
585
586 return ERR_PTR(ret);
587}
588
Chris Wilson84486612017-02-15 08:43:40 +0000589static void free_pdp(struct i915_address_space *vm,
Michel Thierry6ac18502015-07-29 17:23:46 +0100590 struct i915_page_directory_pointer *pdp)
591{
592 __pdp_fini(pdp);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200593
594 if (!use_4lvl(vm))
595 return;
596
597 cleanup_px(vm, pdp);
598 kfree(pdp);
Michel Thierry762d9932015-07-30 11:05:29 +0100599}
600
Michel Thierry69ab76f2015-07-29 17:23:55 +0100601static void gen8_initialize_pdp(struct i915_address_space *vm,
602 struct i915_page_directory_pointer *pdp)
603{
604 gen8_ppgtt_pdpe_t scratch_pdpe;
605
606 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
607
Chris Wilson84486612017-02-15 08:43:40 +0000608 fill_px(vm, pdp, scratch_pdpe);
Michel Thierry69ab76f2015-07-29 17:23:55 +0100609}
610
611static void gen8_initialize_pml4(struct i915_address_space *vm,
612 struct i915_pml4 *pml4)
613{
Chris Wilsone2b763c2017-02-15 08:43:48 +0000614 unsigned int i;
Michel Thierry69ab76f2015-07-29 17:23:55 +0100615
Chris Wilsone2b763c2017-02-15 08:43:48 +0000616 fill_px(vm, pml4,
617 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
618 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
619 pml4->pdps[i] = vm->scratch_pdp;
Michel Thierry6ac18502015-07-29 17:23:46 +0100620}
621
Ben Widawsky94e409c2013-11-04 22:29:36 -0800622/* Broadwell Page Directory Pointer Descriptors */
John Harrisone85b26d2015-05-29 17:43:56 +0100623static int gen8_write_pdp(struct drm_i915_gem_request *req,
Michel Thierry7cb6d7a2015-04-08 12:13:29 +0100624 unsigned entry,
625 dma_addr_t addr)
Ben Widawsky94e409c2013-11-04 22:29:36 -0800626{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000627 struct intel_engine_cs *engine = req->engine;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000628 u32 *cs;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800629
630 BUG_ON(entry >= 4);
631
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000632 cs = intel_ring_begin(req, 6);
633 if (IS_ERR(cs))
634 return PTR_ERR(cs);
Ben Widawsky94e409c2013-11-04 22:29:36 -0800635
Tvrtko Ursulin73dec952017-02-14 11:32:42 +0000636 *cs++ = MI_LOAD_REGISTER_IMM(1);
637 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
638 *cs++ = upper_32_bits(addr);
639 *cs++ = MI_LOAD_REGISTER_IMM(1);
640 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
641 *cs++ = lower_32_bits(addr);
642 intel_ring_advance(req, cs);
Ben Widawsky94e409c2013-11-04 22:29:36 -0800643
644 return 0;
645}
646
Mika Kuoppalae7167762017-02-28 17:28:10 +0200647static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
648 struct drm_i915_gem_request *req)
Ben Widawsky94e409c2013-11-04 22:29:36 -0800649{
Ben Widawskyeeb94882013-12-06 14:11:10 -0800650 int i, ret;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800651
Mika Kuoppalae7167762017-02-28 17:28:10 +0200652 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300653 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
654
John Harrisone85b26d2015-05-29 17:43:56 +0100655 ret = gen8_write_pdp(req, i, pd_daddr);
Ben Widawskyeeb94882013-12-06 14:11:10 -0800656 if (ret)
657 return ret;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800658 }
Ben Widawskyd595bd42013-11-25 09:54:32 -0800659
Ben Widawskyeeb94882013-12-06 14:11:10 -0800660 return 0;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800661}
662
Mika Kuoppalae7167762017-02-28 17:28:10 +0200663static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
664 struct drm_i915_gem_request *req)
Michel Thierry2dba3232015-07-30 11:06:23 +0100665{
666 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
667}
668
Mika Kuoppalafce93752016-10-31 17:24:46 +0200669/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
670 * the page table structures, we mark them dirty so that
671 * context switching/execlist queuing code takes extra steps
672 * to ensure that tlbs are flushed.
673 */
674static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
675{
Chris Wilson49d73912016-11-29 09:50:08 +0000676 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
Mika Kuoppalafce93752016-10-31 17:24:46 +0200677}
678
Michał Winiarski2ce51792016-10-13 14:02:42 +0200679/* Removes entries from a single page table, releasing it if it's empty.
680 * Caller can use the return value to update higher-level entries.
681 */
682static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200683 struct i915_page_table *pt,
Chris Wilsondd196742017-02-15 08:43:46 +0000684 u64 start, u64 length)
Ben Widawsky459108b2013-11-02 21:07:23 -0700685{
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200686 unsigned int num_entries = gen8_pte_count(start, length);
Mika Kuoppala37c63932016-11-01 15:27:36 +0200687 unsigned int pte = gen8_pte_index(start);
688 unsigned int pte_end = pte + num_entries;
Chris Wilson894cceb2017-02-15 08:43:37 +0000689 const gen8_pte_t scratch_pte =
690 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
691 gen8_pte_t *vaddr;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200692
Chris Wilsondd196742017-02-15 08:43:46 +0000693 GEM_BUG_ON(num_entries > pt->used_ptes);
Ben Widawsky459108b2013-11-02 21:07:23 -0700694
Chris Wilsondd196742017-02-15 08:43:46 +0000695 pt->used_ptes -= num_entries;
696 if (!pt->used_ptes)
697 return true;
Michał Winiarski2ce51792016-10-13 14:02:42 +0200698
Chris Wilson9231da72017-02-15 08:43:41 +0000699 vaddr = kmap_atomic_px(pt);
Mika Kuoppala37c63932016-11-01 15:27:36 +0200700 while (pte < pte_end)
Chris Wilson894cceb2017-02-15 08:43:37 +0000701 vaddr[pte++] = scratch_pte;
Chris Wilson9231da72017-02-15 08:43:41 +0000702 kunmap_atomic(vaddr);
Michał Winiarski2ce51792016-10-13 14:02:42 +0200703
704 return false;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200705}
706
Chris Wilsondd196742017-02-15 08:43:46 +0000707static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
708 struct i915_page_directory *pd,
709 struct i915_page_table *pt,
710 unsigned int pde)
711{
712 gen8_pde_t *vaddr;
713
714 pd->page_table[pde] = pt;
715
716 vaddr = kmap_atomic_px(pd);
717 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
718 kunmap_atomic(vaddr);
719}
720
Michał Winiarski2ce51792016-10-13 14:02:42 +0200721static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200722 struct i915_page_directory *pd,
Chris Wilsondd196742017-02-15 08:43:46 +0000723 u64 start, u64 length)
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200724{
725 struct i915_page_table *pt;
Chris Wilsondd196742017-02-15 08:43:46 +0000726 u32 pde;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200727
728 gen8_for_each_pde(pt, pd, start, length, pde) {
Chris Wilsonbf75d592017-02-27 12:26:52 +0000729 GEM_BUG_ON(pt == vm->scratch_pt);
730
Chris Wilsondd196742017-02-15 08:43:46 +0000731 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
732 continue;
Ben Widawsky06fda602015-02-24 16:22:36 +0000733
Chris Wilsondd196742017-02-15 08:43:46 +0000734 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
Chris Wilsonbf75d592017-02-27 12:26:52 +0000735 GEM_BUG_ON(!pd->used_pdes);
Chris Wilsonfe52e372017-02-15 08:43:47 +0000736 pd->used_pdes--;
Chris Wilsondd196742017-02-15 08:43:46 +0000737
738 free_pt(vm, pt);
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200739 }
Michał Winiarski2ce51792016-10-13 14:02:42 +0200740
Chris Wilsonfe52e372017-02-15 08:43:47 +0000741 return !pd->used_pdes;
742}
Michał Winiarski2ce51792016-10-13 14:02:42 +0200743
Chris Wilsonfe52e372017-02-15 08:43:47 +0000744static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
745 struct i915_page_directory_pointer *pdp,
746 struct i915_page_directory *pd,
747 unsigned int pdpe)
748{
749 gen8_ppgtt_pdpe_t *vaddr;
750
751 pdp->page_directory[pdpe] = pd;
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200752 if (!use_4lvl(vm))
Chris Wilsonfe52e372017-02-15 08:43:47 +0000753 return;
754
755 vaddr = kmap_atomic_px(pdp);
756 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
757 kunmap_atomic(vaddr);
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200758}
Ben Widawsky06fda602015-02-24 16:22:36 +0000759
Michał Winiarski2ce51792016-10-13 14:02:42 +0200760/* Removes entries from a single page dir pointer, releasing it if it's empty.
761 * Caller can use the return value to update higher-level entries
762 */
763static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200764 struct i915_page_directory_pointer *pdp,
Chris Wilsonfe52e372017-02-15 08:43:47 +0000765 u64 start, u64 length)
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200766{
767 struct i915_page_directory *pd;
Chris Wilsonfe52e372017-02-15 08:43:47 +0000768 unsigned int pdpe;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200769
770 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Chris Wilsonbf75d592017-02-27 12:26:52 +0000771 GEM_BUG_ON(pd == vm->scratch_pd);
772
Chris Wilsonfe52e372017-02-15 08:43:47 +0000773 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
774 continue;
Ben Widawsky06fda602015-02-24 16:22:36 +0000775
Chris Wilsonfe52e372017-02-15 08:43:47 +0000776 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
Chris Wilsonbf75d592017-02-27 12:26:52 +0000777 GEM_BUG_ON(!pdp->used_pdpes);
Chris Wilsone2b763c2017-02-15 08:43:48 +0000778 pdp->used_pdpes--;
Chris Wilsonfe52e372017-02-15 08:43:47 +0000779
780 free_pd(vm, pd);
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200781 }
Michał Winiarski2ce51792016-10-13 14:02:42 +0200782
Chris Wilsone2b763c2017-02-15 08:43:48 +0000783 return !pdp->used_pdpes;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200784}
Ben Widawsky459108b2013-11-02 21:07:23 -0700785
Chris Wilsonfe52e372017-02-15 08:43:47 +0000786static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
787 u64 start, u64 length)
788{
789 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
790}
791
Chris Wilsone2b763c2017-02-15 08:43:48 +0000792static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
793 struct i915_page_directory_pointer *pdp,
794 unsigned int pml4e)
795{
796 gen8_ppgtt_pml4e_t *vaddr;
797
798 pml4->pdps[pml4e] = pdp;
799
800 vaddr = kmap_atomic_px(pml4);
801 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
802 kunmap_atomic(vaddr);
803}
804
Michał Winiarski2ce51792016-10-13 14:02:42 +0200805/* Removes entries from a single pml4.
806 * This is the top-level structure in 4-level page tables used on gen8+.
807 * Empty entries are always scratch pml4e.
808 */
Chris Wilsonfe52e372017-02-15 08:43:47 +0000809static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
810 u64 start, u64 length)
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200811{
Chris Wilsonfe52e372017-02-15 08:43:47 +0000812 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
813 struct i915_pml4 *pml4 = &ppgtt->pml4;
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200814 struct i915_page_directory_pointer *pdp;
Chris Wilsone2b763c2017-02-15 08:43:48 +0000815 unsigned int pml4e;
Michał Winiarski2ce51792016-10-13 14:02:42 +0200816
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200817 GEM_BUG_ON(!use_4lvl(vm));
Ben Widawsky459108b2013-11-02 21:07:23 -0700818
Michał Winiarskid209b9c2016-10-13 14:02:41 +0200819 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Chris Wilsonbf75d592017-02-27 12:26:52 +0000820 GEM_BUG_ON(pdp == vm->scratch_pdp);
821
Chris Wilsone2b763c2017-02-15 08:43:48 +0000822 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
823 continue;
Ben Widawsky459108b2013-11-02 21:07:23 -0700824
Chris Wilsone2b763c2017-02-15 08:43:48 +0000825 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
Chris Wilsone2b763c2017-02-15 08:43:48 +0000826
827 free_pdp(vm, pdp);
Ben Widawsky459108b2013-11-02 21:07:23 -0700828 }
829}
830
Chris Wilson894cceb2017-02-15 08:43:37 +0000831struct sgt_dma {
832 struct scatterlist *sg;
833 dma_addr_t dma, max;
834};
835
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000836struct gen8_insert_pte {
837 u16 pml4e;
838 u16 pdpe;
839 u16 pde;
840 u16 pte;
841};
842
843static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
844{
845 return (struct gen8_insert_pte) {
846 gen8_pml4e_index(start),
847 gen8_pdpe_index(start),
848 gen8_pde_index(start),
849 gen8_pte_index(start),
850 };
851}
852
Chris Wilson894cceb2017-02-15 08:43:37 +0000853static __always_inline bool
854gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
Michel Thierryf9b5b782015-07-30 11:02:49 +0100855 struct i915_page_directory_pointer *pdp,
Chris Wilson894cceb2017-02-15 08:43:37 +0000856 struct sgt_dma *iter,
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000857 struct gen8_insert_pte *idx,
Michel Thierryf9b5b782015-07-30 11:02:49 +0100858 enum i915_cache_level cache_level)
859{
Chris Wilson894cceb2017-02-15 08:43:37 +0000860 struct i915_page_directory *pd;
861 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
862 gen8_pte_t *vaddr;
863 bool ret;
Ben Widawsky9df15b42013-11-02 21:07:24 -0700864
Mika Kuoppala3e490042017-02-28 17:28:07 +0200865 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000866 pd = pdp->page_directory[idx->pdpe];
867 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
Chris Wilson894cceb2017-02-15 08:43:37 +0000868 do {
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000869 vaddr[idx->pte] = pte_encode | iter->dma;
870
Chris Wilson894cceb2017-02-15 08:43:37 +0000871 iter->dma += PAGE_SIZE;
872 if (iter->dma >= iter->max) {
873 iter->sg = __sg_next(iter->sg);
874 if (!iter->sg) {
875 ret = false;
876 break;
877 }
Ben Widawsky9df15b42013-11-02 21:07:24 -0700878
Chris Wilson894cceb2017-02-15 08:43:37 +0000879 iter->dma = sg_dma_address(iter->sg);
880 iter->max = iter->dma + iter->sg->length;
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000881 }
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800882
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000883 if (++idx->pte == GEN8_PTES) {
884 idx->pte = 0;
885
886 if (++idx->pde == I915_PDES) {
887 idx->pde = 0;
888
Chris Wilson894cceb2017-02-15 08:43:37 +0000889 /* Limited by sg length for 3lvl */
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000890 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
891 idx->pdpe = 0;
Chris Wilson894cceb2017-02-15 08:43:37 +0000892 ret = true;
Michel Thierryde5ba8e2015-08-03 09:53:27 +0100893 break;
Chris Wilson894cceb2017-02-15 08:43:37 +0000894 }
895
Mika Kuoppala3e490042017-02-28 17:28:07 +0200896 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000897 pd = pdp->page_directory[idx->pdpe];
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800898 }
Chris Wilson894cceb2017-02-15 08:43:37 +0000899
Chris Wilson9231da72017-02-15 08:43:41 +0000900 kunmap_atomic(vaddr);
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000901 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
Ben Widawsky9df15b42013-11-02 21:07:24 -0700902 }
Chris Wilson894cceb2017-02-15 08:43:37 +0000903 } while (1);
Chris Wilson9231da72017-02-15 08:43:41 +0000904 kunmap_atomic(vaddr);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +0300905
Chris Wilson894cceb2017-02-15 08:43:37 +0000906 return ret;
Ben Widawsky9df15b42013-11-02 21:07:24 -0700907}
908
Chris Wilson894cceb2017-02-15 08:43:37 +0000909static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
910 struct sg_table *pages,
911 u64 start,
912 enum i915_cache_level cache_level,
913 u32 unused)
Michel Thierryf9b5b782015-07-30 11:02:49 +0100914{
Joonas Lahtinene5716f52016-04-07 11:08:03 +0300915 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Chris Wilson894cceb2017-02-15 08:43:37 +0000916 struct sgt_dma iter = {
917 .sg = pages->sgl,
918 .dma = sg_dma_address(iter.sg),
919 .max = iter.dma + iter.sg->length,
920 };
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000921 struct gen8_insert_pte idx = gen8_insert_pte(start);
Michel Thierryf9b5b782015-07-30 11:02:49 +0100922
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000923 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
924 cache_level);
Chris Wilson894cceb2017-02-15 08:43:37 +0000925}
Michel Thierryde5ba8e2015-08-03 09:53:27 +0100926
Chris Wilson894cceb2017-02-15 08:43:37 +0000927static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
928 struct sg_table *pages,
Chris Wilson75c7b0b2017-02-15 08:43:57 +0000929 u64 start,
Chris Wilson894cceb2017-02-15 08:43:37 +0000930 enum i915_cache_level cache_level,
931 u32 unused)
932{
933 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
934 struct sgt_dma iter = {
935 .sg = pages->sgl,
936 .dma = sg_dma_address(iter.sg),
937 .max = iter.dma + iter.sg->length,
938 };
939 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000940 struct gen8_insert_pte idx = gen8_insert_pte(start);
Michel Thierryde5ba8e2015-08-03 09:53:27 +0100941
Chris Wilson9e89f9e2017-02-25 18:11:22 +0000942 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
943 &idx, cache_level))
944 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
Michel Thierryf9b5b782015-07-30 11:02:49 +0100945}
946
Chris Wilson84486612017-02-15 08:43:40 +0000947static void gen8_free_page_tables(struct i915_address_space *vm,
Michel Thierryf37c0502015-06-10 17:46:39 +0100948 struct i915_page_directory *pd)
Ben Widawskyb45a6712014-02-12 14:28:44 -0800949{
950 int i;
951
Mika Kuoppala567047b2015-06-25 18:35:12 +0300952 if (!px_page(pd))
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800953 return;
Ben Widawskyb45a6712014-02-12 14:28:44 -0800954
Chris Wilsonfe52e372017-02-15 08:43:47 +0000955 for (i = 0; i < I915_PDES; i++) {
956 if (pd->page_table[i] != vm->scratch_pt)
957 free_pt(vm, pd->page_table[i]);
Ben Widawsky06fda602015-02-24 16:22:36 +0000958 }
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000959}
960
Mika Kuoppala8776f022015-06-30 18:16:40 +0300961static int gen8_init_scratch(struct i915_address_space *vm)
962{
Matthew Auld64c050d2016-04-27 13:19:25 +0100963 int ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +0300964
Chris Wilson84486612017-02-15 08:43:40 +0000965 ret = setup_scratch_page(vm, I915_GFP_DMA);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +0100966 if (ret)
967 return ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +0300968
Chris Wilson84486612017-02-15 08:43:40 +0000969 vm->scratch_pt = alloc_pt(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +0300970 if (IS_ERR(vm->scratch_pt)) {
Matthew Auld64c050d2016-04-27 13:19:25 +0100971 ret = PTR_ERR(vm->scratch_pt);
972 goto free_scratch_page;
Mika Kuoppala8776f022015-06-30 18:16:40 +0300973 }
974
Chris Wilson84486612017-02-15 08:43:40 +0000975 vm->scratch_pd = alloc_pd(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +0300976 if (IS_ERR(vm->scratch_pd)) {
Matthew Auld64c050d2016-04-27 13:19:25 +0100977 ret = PTR_ERR(vm->scratch_pd);
978 goto free_pt;
Mika Kuoppala8776f022015-06-30 18:16:40 +0300979 }
980
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200981 if (use_4lvl(vm)) {
Chris Wilson84486612017-02-15 08:43:40 +0000982 vm->scratch_pdp = alloc_pdp(vm);
Michel Thierry69ab76f2015-07-29 17:23:55 +0100983 if (IS_ERR(vm->scratch_pdp)) {
Matthew Auld64c050d2016-04-27 13:19:25 +0100984 ret = PTR_ERR(vm->scratch_pdp);
985 goto free_pd;
Michel Thierry69ab76f2015-07-29 17:23:55 +0100986 }
987 }
988
Mika Kuoppala8776f022015-06-30 18:16:40 +0300989 gen8_initialize_pt(vm, vm->scratch_pt);
990 gen8_initialize_pd(vm, vm->scratch_pd);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +0200991 if (use_4lvl(vm))
Michel Thierry69ab76f2015-07-29 17:23:55 +0100992 gen8_initialize_pdp(vm, vm->scratch_pdp);
Mika Kuoppala8776f022015-06-30 18:16:40 +0300993
994 return 0;
Matthew Auld64c050d2016-04-27 13:19:25 +0100995
996free_pd:
Chris Wilson84486612017-02-15 08:43:40 +0000997 free_pd(vm, vm->scratch_pd);
Matthew Auld64c050d2016-04-27 13:19:25 +0100998free_pt:
Chris Wilson84486612017-02-15 08:43:40 +0000999 free_pt(vm, vm->scratch_pt);
Matthew Auld64c050d2016-04-27 13:19:25 +01001000free_scratch_page:
Chris Wilson84486612017-02-15 08:43:40 +00001001 cleanup_scratch_page(vm);
Matthew Auld64c050d2016-04-27 13:19:25 +01001002
1003 return ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001004}
1005
Zhiyuan Lv650da342015-08-28 15:41:18 +08001006static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1007{
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001008 struct i915_address_space *vm = &ppgtt->base;
1009 struct drm_i915_private *dev_priv = vm->i915;
Zhiyuan Lv650da342015-08-28 15:41:18 +08001010 enum vgt_g2v_type msg;
Zhiyuan Lv650da342015-08-28 15:41:18 +08001011 int i;
1012
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001013 if (use_4lvl(vm)) {
1014 const u64 daddr = px_dma(&ppgtt->pml4);
Zhiyuan Lv650da342015-08-28 15:41:18 +08001015
Ville Syrjäläab75bb52015-11-04 23:20:12 +02001016 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1017 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
Zhiyuan Lv650da342015-08-28 15:41:18 +08001018
1019 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1020 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1021 } else {
Mika Kuoppalae7167762017-02-28 17:28:10 +02001022 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001023 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
Zhiyuan Lv650da342015-08-28 15:41:18 +08001024
Ville Syrjäläab75bb52015-11-04 23:20:12 +02001025 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1026 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
Zhiyuan Lv650da342015-08-28 15:41:18 +08001027 }
1028
1029 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1030 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1031 }
1032
1033 I915_WRITE(vgtif_reg(g2v_notify), msg);
1034
1035 return 0;
1036}
1037
Mika Kuoppala8776f022015-06-30 18:16:40 +03001038static void gen8_free_scratch(struct i915_address_space *vm)
1039{
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001040 if (use_4lvl(vm))
Chris Wilson84486612017-02-15 08:43:40 +00001041 free_pdp(vm, vm->scratch_pdp);
1042 free_pd(vm, vm->scratch_pd);
1043 free_pt(vm, vm->scratch_pt);
1044 cleanup_scratch_page(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001045}
1046
Chris Wilson84486612017-02-15 08:43:40 +00001047static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
Michel Thierry762d9932015-07-30 11:05:29 +01001048 struct i915_page_directory_pointer *pdp)
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001049{
Mika Kuoppala3e490042017-02-28 17:28:07 +02001050 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001051 int i;
1052
Mika Kuoppala3e490042017-02-28 17:28:07 +02001053 for (i = 0; i < pdpes; i++) {
Chris Wilsonfe52e372017-02-15 08:43:47 +00001054 if (pdp->page_directory[i] == vm->scratch_pd)
Ben Widawsky06fda602015-02-24 16:22:36 +00001055 continue;
1056
Chris Wilson84486612017-02-15 08:43:40 +00001057 gen8_free_page_tables(vm, pdp->page_directory[i]);
1058 free_pd(vm, pdp->page_directory[i]);
Ben Widawsky7ad47cf2014-02-20 11:51:21 -08001059 }
Michel Thierry69876be2015-04-08 12:13:27 +01001060
Chris Wilson84486612017-02-15 08:43:40 +00001061 free_pdp(vm, pdp);
Michel Thierry762d9932015-07-30 11:05:29 +01001062}
1063
1064static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1065{
1066 int i;
1067
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001068 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1069 if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
Michel Thierry762d9932015-07-30 11:05:29 +01001070 continue;
1071
Chris Wilson84486612017-02-15 08:43:40 +00001072 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
Michel Thierry762d9932015-07-30 11:05:29 +01001073 }
1074
Chris Wilson84486612017-02-15 08:43:40 +00001075 cleanup_px(&ppgtt->base, &ppgtt->pml4);
Michel Thierry762d9932015-07-30 11:05:29 +01001076}
1077
1078static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1079{
Chris Wilson49d73912016-11-29 09:50:08 +00001080 struct drm_i915_private *dev_priv = vm->i915;
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001081 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Michel Thierry762d9932015-07-30 11:05:29 +01001082
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001083 if (intel_vgpu_active(dev_priv))
Zhiyuan Lv650da342015-08-28 15:41:18 +08001084 gen8_ppgtt_notify_vgt(ppgtt, false);
1085
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001086 if (use_4lvl(vm))
Michel Thierry762d9932015-07-30 11:05:29 +01001087 gen8_ppgtt_cleanup_4lvl(ppgtt);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001088 else
1089 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
Michel Thierryd4ec9da2015-07-30 11:02:03 +01001090
Mika Kuoppala8776f022015-06-30 18:16:40 +03001091 gen8_free_scratch(vm);
Ben Widawskyb45a6712014-02-12 14:28:44 -08001092}
1093
Chris Wilsonfe52e372017-02-15 08:43:47 +00001094static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1095 struct i915_page_directory *pd,
1096 u64 start, u64 length)
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001097{
Michel Thierryd7b26332015-04-08 12:13:34 +01001098 struct i915_page_table *pt;
Chris Wilsondd196742017-02-15 08:43:46 +00001099 u64 from = start;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001100 unsigned int pde;
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001101
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001102 gen8_for_each_pde(pt, pd, start, length, pde) {
Chris Wilsonfe52e372017-02-15 08:43:47 +00001103 if (pt == vm->scratch_pt) {
Chris Wilsondd196742017-02-15 08:43:46 +00001104 pt = alloc_pt(vm);
1105 if (IS_ERR(pt))
1106 goto unwind;
1107
1108 gen8_initialize_pt(vm, pt);
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001109
Chris Wilsonfe52e372017-02-15 08:43:47 +00001110 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1111 pd->used_pdes++;
Chris Wilsonbf75d592017-02-27 12:26:52 +00001112 GEM_BUG_ON(pd->used_pdes > I915_PDES);
Chris Wilsonfe52e372017-02-15 08:43:47 +00001113 }
1114
1115 pt->used_ptes += gen8_pte_count(start, length);
1116 }
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001117 return 0;
1118
Chris Wilsondd196742017-02-15 08:43:46 +00001119unwind:
1120 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001121 return -ENOMEM;
1122}
1123
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001124static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1125 struct i915_page_directory_pointer *pdp,
1126 u64 start, u64 length)
Ben Widawskybf2b4ed2014-02-19 22:05:43 -08001127{
Michel Thierry5441f0c2015-04-08 12:13:28 +01001128 struct i915_page_directory *pd;
Chris Wilsone2b763c2017-02-15 08:43:48 +00001129 u64 from = start;
1130 unsigned int pdpe;
Ben Widawskybf2b4ed2014-02-19 22:05:43 -08001131 int ret;
1132
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001133 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Chris Wilsone2b763c2017-02-15 08:43:48 +00001134 if (pd == vm->scratch_pd) {
1135 pd = alloc_pd(vm);
1136 if (IS_ERR(pd))
1137 goto unwind;
Michel Thierry5441f0c2015-04-08 12:13:28 +01001138
Chris Wilsone2b763c2017-02-15 08:43:48 +00001139 gen8_initialize_pd(vm, pd);
Chris Wilsonfe52e372017-02-15 08:43:47 +00001140 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
Chris Wilsone2b763c2017-02-15 08:43:48 +00001141 pdp->used_pdpes++;
Mika Kuoppala3e490042017-02-28 17:28:07 +02001142 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
Chris Wilson75afcf72017-02-15 08:43:51 +00001143
1144 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
Chris Wilsone2b763c2017-02-15 08:43:48 +00001145 }
1146
1147 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
Chris Wilsonbf75d592017-02-27 12:26:52 +00001148 if (unlikely(ret))
1149 goto unwind_pd;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001150 }
Michel Thierry33c88192015-04-08 12:13:33 +01001151
Ben Widawskyd7b3de92015-02-24 16:22:34 +00001152 return 0;
1153
Chris Wilsonbf75d592017-02-27 12:26:52 +00001154unwind_pd:
1155 if (!pd->used_pdes) {
1156 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1157 GEM_BUG_ON(!pdp->used_pdpes);
1158 pdp->used_pdpes--;
1159 free_pd(vm, pd);
1160 }
Chris Wilsone2b763c2017-02-15 08:43:48 +00001161unwind:
1162 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1163 return -ENOMEM;
Ben Widawskybf2b4ed2014-02-19 22:05:43 -08001164}
1165
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001166static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1167 u64 start, u64 length)
Michel Thierry762d9932015-07-30 11:05:29 +01001168{
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001169 return gen8_ppgtt_alloc_pdp(vm,
1170 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1171}
1172
1173static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1174 u64 start, u64 length)
1175{
1176 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1177 struct i915_pml4 *pml4 = &ppgtt->pml4;
Michel Thierry762d9932015-07-30 11:05:29 +01001178 struct i915_page_directory_pointer *pdp;
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001179 u64 from = start;
1180 u32 pml4e;
1181 int ret;
Michel Thierry762d9932015-07-30 11:05:29 +01001182
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001183 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001184 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1185 pdp = alloc_pdp(vm);
1186 if (IS_ERR(pdp))
1187 goto unwind;
Michel Thierry762d9932015-07-30 11:05:29 +01001188
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001189 gen8_initialize_pdp(vm, pdp);
1190 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1191 }
Michel Thierry762d9932015-07-30 11:05:29 +01001192
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001193 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
Chris Wilsonbf75d592017-02-27 12:26:52 +00001194 if (unlikely(ret))
1195 goto unwind_pdp;
Michel Thierry762d9932015-07-30 11:05:29 +01001196 }
1197
Michel Thierry762d9932015-07-30 11:05:29 +01001198 return 0;
1199
Chris Wilsonbf75d592017-02-27 12:26:52 +00001200unwind_pdp:
1201 if (!pdp->used_pdpes) {
1202 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1203 free_pdp(vm, pdp);
1204 }
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001205unwind:
1206 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1207 return -ENOMEM;
Michel Thierry762d9932015-07-30 11:05:29 +01001208}
1209
Chris Wilson84486612017-02-15 08:43:40 +00001210static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1211 struct i915_page_directory_pointer *pdp,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001212 u64 start, u64 length,
Michel Thierryea91e402015-07-29 17:23:57 +01001213 gen8_pte_t scratch_pte,
1214 struct seq_file *m)
1215{
Mika Kuoppala3e490042017-02-28 17:28:07 +02001216 struct i915_address_space *vm = &ppgtt->base;
Michel Thierryea91e402015-07-29 17:23:57 +01001217 struct i915_page_directory *pd;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001218 u32 pdpe;
Michel Thierryea91e402015-07-29 17:23:57 +01001219
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001220 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Michel Thierryea91e402015-07-29 17:23:57 +01001221 struct i915_page_table *pt;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001222 u64 pd_len = length;
1223 u64 pd_start = start;
1224 u32 pde;
Michel Thierryea91e402015-07-29 17:23:57 +01001225
Chris Wilsone2b763c2017-02-15 08:43:48 +00001226 if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
Michel Thierryea91e402015-07-29 17:23:57 +01001227 continue;
1228
1229 seq_printf(m, "\tPDPE #%d\n", pdpe);
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001230 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001231 u32 pte;
Michel Thierryea91e402015-07-29 17:23:57 +01001232 gen8_pte_t *pt_vaddr;
1233
Chris Wilsonfe52e372017-02-15 08:43:47 +00001234 if (pd->page_table[pde] == ppgtt->base.scratch_pt)
Michel Thierryea91e402015-07-29 17:23:57 +01001235 continue;
1236
Chris Wilson9231da72017-02-15 08:43:41 +00001237 pt_vaddr = kmap_atomic_px(pt);
Michel Thierryea91e402015-07-29 17:23:57 +01001238 for (pte = 0; pte < GEN8_PTES; pte += 4) {
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001239 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1240 pde << GEN8_PDE_SHIFT |
1241 pte << GEN8_PTE_SHIFT);
Michel Thierryea91e402015-07-29 17:23:57 +01001242 int i;
1243 bool found = false;
1244
1245 for (i = 0; i < 4; i++)
1246 if (pt_vaddr[pte + i] != scratch_pte)
1247 found = true;
1248 if (!found)
1249 continue;
1250
1251 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1252 for (i = 0; i < 4; i++) {
1253 if (pt_vaddr[pte + i] != scratch_pte)
1254 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1255 else
1256 seq_puts(m, " SCRATCH ");
1257 }
1258 seq_puts(m, "\n");
1259 }
Michel Thierryea91e402015-07-29 17:23:57 +01001260 kunmap_atomic(pt_vaddr);
1261 }
1262 }
1263}
1264
1265static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1266{
1267 struct i915_address_space *vm = &ppgtt->base;
Chris Wilson894cceb2017-02-15 08:43:37 +00001268 const gen8_pte_t scratch_pte =
1269 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
Chris Wilson381b9432017-02-15 08:43:54 +00001270 u64 start = 0, length = ppgtt->base.total;
Michel Thierryea91e402015-07-29 17:23:57 +01001271
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001272 if (use_4lvl(vm)) {
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001273 u64 pml4e;
Michel Thierryea91e402015-07-29 17:23:57 +01001274 struct i915_pml4 *pml4 = &ppgtt->pml4;
1275 struct i915_page_directory_pointer *pdp;
1276
Dave Gordone8ebd8e2015-12-08 13:30:51 +00001277 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001278 if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
Michel Thierryea91e402015-07-29 17:23:57 +01001279 continue;
1280
1281 seq_printf(m, " PML4E #%llu\n", pml4e);
Chris Wilson84486612017-02-15 08:43:40 +00001282 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
Michel Thierryea91e402015-07-29 17:23:57 +01001283 }
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001284 } else {
1285 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
Michel Thierryea91e402015-07-29 17:23:57 +01001286 }
1287}
1288
Chris Wilsone2b763c2017-02-15 08:43:48 +00001289static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001290{
Chris Wilsone2b763c2017-02-15 08:43:48 +00001291 struct i915_address_space *vm = &ppgtt->base;
1292 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1293 struct i915_page_directory *pd;
1294 u64 start = 0, length = ppgtt->base.total;
1295 u64 from = start;
1296 unsigned int pdpe;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001297
Chris Wilsone2b763c2017-02-15 08:43:48 +00001298 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1299 pd = alloc_pd(vm);
1300 if (IS_ERR(pd))
1301 goto unwind;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001302
Chris Wilsone2b763c2017-02-15 08:43:48 +00001303 gen8_initialize_pd(vm, pd);
1304 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1305 pdp->used_pdpes++;
1306 }
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001307
Chris Wilsone2b763c2017-02-15 08:43:48 +00001308 pdp->used_pdpes++; /* never remove */
1309 return 0;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001310
Chris Wilsone2b763c2017-02-15 08:43:48 +00001311unwind:
1312 start -= from;
1313 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1314 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1315 free_pd(vm, pd);
1316 }
1317 pdp->used_pdpes = 0;
1318 return -ENOMEM;
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001319}
1320
Daniel Vettereb0b44a2015-03-18 14:47:59 +01001321/*
Ben Widawskyf3a964b2014-02-19 22:05:42 -08001322 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1323 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1324 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1325 * space.
Ben Widawsky37aca442013-11-04 20:47:32 -08001326 *
Ben Widawskyf3a964b2014-02-19 22:05:42 -08001327 */
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001328static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky37aca442013-11-04 20:47:32 -08001329{
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001330 struct i915_address_space *vm = &ppgtt->base;
1331 struct drm_i915_private *dev_priv = vm->i915;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001332 int ret;
Michel Thierry69876be2015-04-08 12:13:27 +01001333
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001334 ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1335 1ULL << 48 :
1336 1ULL << 32;
1337
Mika Kuoppala8776f022015-06-30 18:16:40 +03001338 ret = gen8_init_scratch(&ppgtt->base);
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001339 if (ret) {
1340 ppgtt->base.total = 0;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001341 return ret;
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001342 }
Michel Thierry69876be2015-04-08 12:13:27 +01001343
Chris Wilson84486612017-02-15 08:43:40 +00001344 /* There are only few exceptions for gen >=6. chv and bxt.
1345 * And we are not sure about the latter so play safe for now.
1346 */
1347 if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1348 ppgtt->base.pt_kmap_wc = true;
1349
Mika Kuoppala1e6437b2017-02-28 17:28:09 +02001350 if (use_4lvl(vm)) {
Chris Wilson84486612017-02-15 08:43:40 +00001351 ret = setup_px(&ppgtt->base, &ppgtt->pml4);
Michel Thierry762d9932015-07-30 11:05:29 +01001352 if (ret)
1353 goto free_scratch;
Michel Thierry6ac18502015-07-29 17:23:46 +01001354
Michel Thierry69ab76f2015-07-29 17:23:55 +01001355 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1356
Mika Kuoppalae7167762017-02-28 17:28:10 +02001357 ppgtt->switch_mm = gen8_mm_switch_4lvl;
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001358 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
Chris Wilson894cceb2017-02-15 08:43:37 +00001359 ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001360 ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
Michel Thierry762d9932015-07-30 11:05:29 +01001361 } else {
Chris Wilsonfe52e372017-02-15 08:43:47 +00001362 ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
Michel Thierry81ba8aef2015-08-03 09:52:01 +01001363 if (ret)
1364 goto free_scratch;
1365
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001366 if (intel_vgpu_active(dev_priv)) {
Chris Wilsone2b763c2017-02-15 08:43:48 +00001367 ret = gen8_preallocate_top_level_pdp(ppgtt);
1368 if (ret) {
1369 __pdp_fini(&ppgtt->pdp);
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001370 goto free_scratch;
Chris Wilsone2b763c2017-02-15 08:43:48 +00001371 }
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001372 }
Chris Wilson894cceb2017-02-15 08:43:37 +00001373
Mika Kuoppalae7167762017-02-28 17:28:10 +02001374 ppgtt->switch_mm = gen8_mm_switch_3lvl;
Chris Wilsonc5d092a2017-02-15 08:43:49 +00001375 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
Chris Wilson894cceb2017-02-15 08:43:37 +00001376 ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
Chris Wilsonfe52e372017-02-15 08:43:47 +00001377 ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
Michel Thierry81ba8aef2015-08-03 09:52:01 +01001378 }
Michel Thierry6ac18502015-07-29 17:23:46 +01001379
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001380 if (intel_vgpu_active(dev_priv))
Zhiyuan Lv650da342015-08-28 15:41:18 +08001381 gen8_ppgtt_notify_vgt(ppgtt, true);
1382
Mika Kuoppala054b9ac2017-02-28 17:28:11 +02001383 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1384 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1385 ppgtt->base.bind_vma = ppgtt_bind_vma;
1386 ppgtt->debug_dump = gen8_dump_ppgtt;
1387
Michel Thierryd7b26332015-04-08 12:13:34 +01001388 return 0;
Michel Thierry6ac18502015-07-29 17:23:46 +01001389
1390free_scratch:
1391 gen8_free_scratch(&ppgtt->base);
1392 return ret;
Michel Thierryd7b26332015-04-08 12:13:34 +01001393}
1394
Ben Widawsky87d60b62013-12-06 14:11:29 -08001395static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1396{
Ben Widawsky87d60b62013-12-06 14:11:29 -08001397 struct i915_address_space *vm = &ppgtt->base;
Michel Thierry09942c62015-04-08 12:13:30 +01001398 struct i915_page_table *unused;
Michel Thierry07749ef2015-03-16 16:00:54 +00001399 gen6_pte_t scratch_pte;
Chris Wilson381b9432017-02-15 08:43:54 +00001400 u32 pd_entry, pte, pde;
1401 u32 start = 0, length = ppgtt->base.total;
Ben Widawsky87d60b62013-12-06 14:11:29 -08001402
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001403 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02001404 I915_CACHE_LLC, 0);
Ben Widawsky87d60b62013-12-06 14:11:29 -08001405
Dave Gordon731f74c2016-06-24 19:37:46 +01001406 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
Ben Widawsky87d60b62013-12-06 14:11:29 -08001407 u32 expected;
Michel Thierry07749ef2015-03-16 16:00:54 +00001408 gen6_pte_t *pt_vaddr;
Mika Kuoppala567047b2015-06-25 18:35:12 +03001409 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
Michel Thierry09942c62015-04-08 12:13:30 +01001410 pd_entry = readl(ppgtt->pd_addr + pde);
Ben Widawsky87d60b62013-12-06 14:11:29 -08001411 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1412
1413 if (pd_entry != expected)
1414 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1415 pde,
1416 pd_entry,
1417 expected);
1418 seq_printf(m, "\tPDE: %x\n", pd_entry);
1419
Chris Wilson9231da72017-02-15 08:43:41 +00001420 pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
Mika Kuoppalad1c54ac2015-06-25 18:35:11 +03001421
Michel Thierry07749ef2015-03-16 16:00:54 +00001422 for (pte = 0; pte < GEN6_PTES; pte+=4) {
Ben Widawsky87d60b62013-12-06 14:11:29 -08001423 unsigned long va =
Michel Thierry07749ef2015-03-16 16:00:54 +00001424 (pde * PAGE_SIZE * GEN6_PTES) +
Ben Widawsky87d60b62013-12-06 14:11:29 -08001425 (pte * PAGE_SIZE);
1426 int i;
1427 bool found = false;
1428 for (i = 0; i < 4; i++)
1429 if (pt_vaddr[pte + i] != scratch_pte)
1430 found = true;
1431 if (!found)
1432 continue;
1433
1434 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1435 for (i = 0; i < 4; i++) {
1436 if (pt_vaddr[pte + i] != scratch_pte)
1437 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1438 else
1439 seq_puts(m, " SCRATCH ");
1440 }
1441 seq_puts(m, "\n");
1442 }
Chris Wilson9231da72017-02-15 08:43:41 +00001443 kunmap_atomic(pt_vaddr);
Ben Widawsky87d60b62013-12-06 14:11:29 -08001444 }
1445}
1446
Ben Widawsky678d96f2015-03-16 16:00:56 +00001447/* Write pde (index) from the page directory @pd to the page table @pt */
Chris Wilson16a011c2017-02-15 08:43:45 +00001448static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1449 const unsigned int pde,
1450 const struct i915_page_table *pt)
Ben Widawsky61973492013-04-08 18:43:54 -07001451{
Ben Widawsky678d96f2015-03-16 16:00:56 +00001452 /* Caller needs to make sure the write completes if necessary */
Chris Wilson16a011c2017-02-15 08:43:45 +00001453 writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1454 ppgtt->pd_addr + pde);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001455}
Ben Widawsky61973492013-04-08 18:43:54 -07001456
Ben Widawsky678d96f2015-03-16 16:00:56 +00001457/* Write all the page tables found in the ppgtt structure to incrementing page
1458 * directories. */
Chris Wilson16a011c2017-02-15 08:43:45 +00001459static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001460 u32 start, u32 length)
Ben Widawsky678d96f2015-03-16 16:00:56 +00001461{
Michel Thierryec565b32015-04-08 12:13:23 +01001462 struct i915_page_table *pt;
Chris Wilson16a011c2017-02-15 08:43:45 +00001463 unsigned int pde;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001464
Chris Wilson16a011c2017-02-15 08:43:45 +00001465 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1466 gen6_write_pde(ppgtt, pde, pt);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001467
Chris Wilson16a011c2017-02-15 08:43:45 +00001468 mark_tlbs_dirty(ppgtt);
Chris Wilsondd196742017-02-15 08:43:46 +00001469 wmb();
Ben Widawsky3e302542013-04-23 23:15:32 -07001470}
1471
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001472static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky3e302542013-04-23 23:15:32 -07001473{
Chris Wilsondd196742017-02-15 08:43:46 +00001474 GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1475 return ppgtt->pd.base.ggtt_offset << 10;
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001476}
Ben Widawsky61973492013-04-08 18:43:54 -07001477
Ben Widawsky90252e52013-12-06 14:11:12 -08001478static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +01001479 struct drm_i915_gem_request *req)
Ben Widawsky90252e52013-12-06 14:11:12 -08001480{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001481 struct intel_engine_cs *engine = req->engine;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001482 u32 *cs;
Ben Widawsky61973492013-04-08 18:43:54 -07001483
Ben Widawsky90252e52013-12-06 14:11:12 -08001484 /* NB: TLBs must be flushed and invalidated before a switch */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001485 cs = intel_ring_begin(req, 6);
1486 if (IS_ERR(cs))
1487 return PTR_ERR(cs);
Ben Widawsky90252e52013-12-06 14:11:12 -08001488
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001489 *cs++ = MI_LOAD_REGISTER_IMM(2);
1490 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1491 *cs++ = PP_DIR_DCLV_2G;
1492 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1493 *cs++ = get_pd_offset(ppgtt);
1494 *cs++ = MI_NOOP;
1495 intel_ring_advance(req, cs);
Ben Widawsky90252e52013-12-06 14:11:12 -08001496
1497 return 0;
1498}
1499
Ben Widawsky48a10382013-12-06 14:11:11 -08001500static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +01001501 struct drm_i915_gem_request *req)
Ben Widawsky48a10382013-12-06 14:11:11 -08001502{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001503 struct intel_engine_cs *engine = req->engine;
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001504 u32 *cs;
Ben Widawsky48a10382013-12-06 14:11:11 -08001505
Ben Widawsky48a10382013-12-06 14:11:11 -08001506 /* NB: TLBs must be flushed and invalidated before a switch */
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001507 cs = intel_ring_begin(req, 6);
1508 if (IS_ERR(cs))
1509 return PTR_ERR(cs);
Ben Widawsky48a10382013-12-06 14:11:11 -08001510
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001511 *cs++ = MI_LOAD_REGISTER_IMM(2);
1512 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1513 *cs++ = PP_DIR_DCLV_2G;
1514 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1515 *cs++ = get_pd_offset(ppgtt);
1516 *cs++ = MI_NOOP;
1517 intel_ring_advance(req, cs);
Ben Widawsky48a10382013-12-06 14:11:11 -08001518
1519 return 0;
1520}
1521
Ben Widawskyeeb94882013-12-06 14:11:10 -08001522static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +01001523 struct drm_i915_gem_request *req)
Ben Widawskyeeb94882013-12-06 14:11:10 -08001524{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001525 struct intel_engine_cs *engine = req->engine;
Chris Wilson8eb95202016-07-04 08:48:31 +01001526 struct drm_i915_private *dev_priv = req->i915;
Ben Widawsky48a10382013-12-06 14:11:11 -08001527
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001528 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1529 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
Ben Widawskyeeb94882013-12-06 14:11:10 -08001530 return 0;
1531}
1532
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001533static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
Ben Widawskyeeb94882013-12-06 14:11:10 -08001534{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001535 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05301536 enum intel_engine_id id;
Ben Widawskyeeb94882013-12-06 14:11:10 -08001537
Akash Goel3b3f1652016-10-13 22:44:48 +05301538 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001539 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1540 GEN8_GFX_PPGTT_48B : 0;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001541 I915_WRITE(RING_MODE_GEN7(engine),
Michel Thierry2dba3232015-07-30 11:06:23 +01001542 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
Ben Widawskyeeb94882013-12-06 14:11:10 -08001543 }
Ben Widawskyeeb94882013-12-06 14:11:10 -08001544}
1545
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001546static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001547{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001548 struct intel_engine_cs *engine;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001549 u32 ecochk, ecobits;
Akash Goel3b3f1652016-10-13 22:44:48 +05301550 enum intel_engine_id id;
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001551
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001552 ecobits = I915_READ(GAC_ECO_BITS);
1553 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1554
1555 ecochk = I915_READ(GAM_ECOCHK);
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01001556 if (IS_HASWELL(dev_priv)) {
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001557 ecochk |= ECOCHK_PPGTT_WB_HSW;
1558 } else {
1559 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1560 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1561 }
1562 I915_WRITE(GAM_ECOCHK, ecochk);
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001563
Akash Goel3b3f1652016-10-13 22:44:48 +05301564 for_each_engine(engine, dev_priv, id) {
Ben Widawskyeeb94882013-12-06 14:11:10 -08001565 /* GFX_MODE is per-ring on gen7+ */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001566 I915_WRITE(RING_MODE_GEN7(engine),
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001567 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Ben Widawsky61973492013-04-08 18:43:54 -07001568 }
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001569}
1570
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001571static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
Ben Widawsky61973492013-04-08 18:43:54 -07001572{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001573 u32 ecochk, gab_ctl, ecobits;
Ben Widawsky61973492013-04-08 18:43:54 -07001574
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001575 ecobits = I915_READ(GAC_ECO_BITS);
1576 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1577 ECOBITS_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -07001578
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001579 gab_ctl = I915_READ(GAB_CTL);
1580 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
Ben Widawsky61973492013-04-08 18:43:54 -07001581
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001582 ecochk = I915_READ(GAM_ECOCHK);
1583 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -07001584
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001585 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Ben Widawsky61973492013-04-08 18:43:54 -07001586}
1587
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001588/* PPGTT support for Sandybdrige/Gen6 and later */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001589static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
Chris Wilsondd196742017-02-15 08:43:46 +00001590 u64 start, u64 length)
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001591{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001592 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Chris Wilsondd196742017-02-15 08:43:46 +00001593 unsigned int first_entry = start >> PAGE_SHIFT;
1594 unsigned int pde = first_entry / GEN6_PTES;
1595 unsigned int pte = first_entry % GEN6_PTES;
1596 unsigned int num_entries = length >> PAGE_SHIFT;
1597 gen6_pte_t scratch_pte =
1598 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001599
Daniel Vetter7bddb012012-02-09 17:15:47 +01001600 while (num_entries) {
Chris Wilsondd196742017-02-15 08:43:46 +00001601 struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1602 unsigned int end = min(pte + num_entries, GEN6_PTES);
1603 gen6_pte_t *vaddr;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001604
Chris Wilsondd196742017-02-15 08:43:46 +00001605 num_entries -= end - pte;
Daniel Vetter7bddb012012-02-09 17:15:47 +01001606
Chris Wilsondd196742017-02-15 08:43:46 +00001607 /* Note that the hw doesn't support removing PDE on the fly
1608 * (they are cached inside the context with no means to
1609 * invalidate the cache), so we can only reset the PTE
1610 * entries back to scratch.
1611 */
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001612
Chris Wilsondd196742017-02-15 08:43:46 +00001613 vaddr = kmap_atomic_px(pt);
1614 do {
1615 vaddr[pte++] = scratch_pte;
1616 } while (pte < end);
1617 kunmap_atomic(vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001618
Chris Wilsondd196742017-02-15 08:43:46 +00001619 pte = 0;
Daniel Vetter7bddb012012-02-09 17:15:47 +01001620 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001621}
1622
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001623static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
Daniel Vetterdef886c2013-01-24 14:44:56 -08001624 struct sg_table *pages,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001625 u64 start,
1626 enum i915_cache_level cache_level,
1627 u32 flags)
Daniel Vetterdef886c2013-01-24 14:44:56 -08001628{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001629 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Ben Widawsky782f1492014-02-20 11:50:33 -08001630 unsigned first_entry = start >> PAGE_SHIFT;
Michel Thierry07749ef2015-03-16 16:00:54 +00001631 unsigned act_pt = first_entry / GEN6_PTES;
1632 unsigned act_pte = first_entry % GEN6_PTES;
Chris Wilsonb31144c2017-02-15 08:43:36 +00001633 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1634 struct sgt_dma iter;
1635 gen6_pte_t *vaddr;
Daniel Vetterdef886c2013-01-24 14:44:56 -08001636
Chris Wilson9231da72017-02-15 08:43:41 +00001637 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
Chris Wilsonb31144c2017-02-15 08:43:36 +00001638 iter.sg = pages->sgl;
1639 iter.dma = sg_dma_address(iter.sg);
1640 iter.max = iter.dma + iter.sg->length;
1641 do {
1642 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
Daniel Vetterdef886c2013-01-24 14:44:56 -08001643
Chris Wilsonb31144c2017-02-15 08:43:36 +00001644 iter.dma += PAGE_SIZE;
1645 if (iter.dma == iter.max) {
1646 iter.sg = __sg_next(iter.sg);
1647 if (!iter.sg)
1648 break;
1649
1650 iter.dma = sg_dma_address(iter.sg);
1651 iter.max = iter.dma + iter.sg->length;
1652 }
Akash Goel24f3a8c2014-06-17 10:59:42 +05301653
Michel Thierry07749ef2015-03-16 16:00:54 +00001654 if (++act_pte == GEN6_PTES) {
Chris Wilson9231da72017-02-15 08:43:41 +00001655 kunmap_atomic(vaddr);
1656 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +02001657 act_pte = 0;
Daniel Vetterdef886c2013-01-24 14:44:56 -08001658 }
Chris Wilsonb31144c2017-02-15 08:43:36 +00001659 } while (1);
Chris Wilson9231da72017-02-15 08:43:41 +00001660 kunmap_atomic(vaddr);
Daniel Vetterdef886c2013-01-24 14:44:56 -08001661}
1662
Ben Widawsky678d96f2015-03-16 16:00:56 +00001663static int gen6_alloc_va_range(struct i915_address_space *vm,
Chris Wilsondd196742017-02-15 08:43:46 +00001664 u64 start, u64 length)
Ben Widawsky678d96f2015-03-16 16:00:56 +00001665{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001666 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Michel Thierryec565b32015-04-08 12:13:23 +01001667 struct i915_page_table *pt;
Chris Wilsondd196742017-02-15 08:43:46 +00001668 u64 from = start;
1669 unsigned int pde;
1670 bool flush = false;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001671
Dave Gordon731f74c2016-06-24 19:37:46 +01001672 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
Chris Wilsondd196742017-02-15 08:43:46 +00001673 if (pt == vm->scratch_pt) {
1674 pt = alloc_pt(vm);
1675 if (IS_ERR(pt))
1676 goto unwind_out;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001677
Chris Wilsondd196742017-02-15 08:43:46 +00001678 gen6_initialize_pt(vm, pt);
1679 ppgtt->pd.page_table[pde] = pt;
Chris Wilson16a011c2017-02-15 08:43:45 +00001680 gen6_write_pde(ppgtt, pde, pt);
Chris Wilsondd196742017-02-15 08:43:46 +00001681 flush = true;
1682 }
Ben Widawsky678d96f2015-03-16 16:00:56 +00001683 }
1684
Chris Wilsondd196742017-02-15 08:43:46 +00001685 if (flush) {
1686 mark_tlbs_dirty(ppgtt);
1687 wmb();
1688 }
Michel Thierry4933d512015-03-24 15:46:22 +00001689
Ben Widawsky678d96f2015-03-16 16:00:56 +00001690 return 0;
Michel Thierry4933d512015-03-24 15:46:22 +00001691
1692unwind_out:
Chris Wilsondd196742017-02-15 08:43:46 +00001693 gen6_ppgtt_clear_range(vm, from, start);
1694 return -ENOMEM;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001695}
1696
Mika Kuoppala8776f022015-06-30 18:16:40 +03001697static int gen6_init_scratch(struct i915_address_space *vm)
1698{
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001699 int ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001700
Chris Wilson84486612017-02-15 08:43:40 +00001701 ret = setup_scratch_page(vm, I915_GFP_DMA);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01001702 if (ret)
1703 return ret;
Mika Kuoppala8776f022015-06-30 18:16:40 +03001704
Chris Wilson84486612017-02-15 08:43:40 +00001705 vm->scratch_pt = alloc_pt(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001706 if (IS_ERR(vm->scratch_pt)) {
Chris Wilson84486612017-02-15 08:43:40 +00001707 cleanup_scratch_page(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001708 return PTR_ERR(vm->scratch_pt);
1709 }
1710
1711 gen6_initialize_pt(vm, vm->scratch_pt);
1712
1713 return 0;
1714}
1715
1716static void gen6_free_scratch(struct i915_address_space *vm)
1717{
Chris Wilson84486612017-02-15 08:43:40 +00001718 free_pt(vm, vm->scratch_pt);
1719 cleanup_scratch_page(vm);
Mika Kuoppala8776f022015-06-30 18:16:40 +03001720}
1721
Daniel Vetter061dd492015-04-14 17:35:13 +02001722static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
Ben Widawskya00d8252014-02-19 22:05:48 -08001723{
Joonas Lahtinene5716f52016-04-07 11:08:03 +03001724 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
Dave Gordon731f74c2016-06-24 19:37:46 +01001725 struct i915_page_directory *pd = &ppgtt->pd;
Michel Thierry09942c62015-04-08 12:13:30 +01001726 struct i915_page_table *pt;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001727 u32 pde;
Daniel Vetter3440d262013-01-24 13:49:56 -08001728
Daniel Vetter061dd492015-04-14 17:35:13 +02001729 drm_mm_remove_node(&ppgtt->node);
1730
Dave Gordon731f74c2016-06-24 19:37:46 +01001731 gen6_for_all_pdes(pt, pd, pde)
Mika Kuoppala79ab9372015-06-25 18:35:17 +03001732 if (pt != vm->scratch_pt)
Chris Wilson84486612017-02-15 08:43:40 +00001733 free_pt(vm, pt);
Michel Thierry4933d512015-03-24 15:46:22 +00001734
Mika Kuoppala8776f022015-06-30 18:16:40 +03001735 gen6_free_scratch(vm);
Daniel Vetter3440d262013-01-24 13:49:56 -08001736}
1737
Ben Widawskyb1465202014-02-19 22:05:49 -08001738static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
Daniel Vetter3440d262013-01-24 13:49:56 -08001739{
Mika Kuoppala8776f022015-06-30 18:16:40 +03001740 struct i915_address_space *vm = &ppgtt->base;
Chris Wilson49d73912016-11-29 09:50:08 +00001741 struct drm_i915_private *dev_priv = ppgtt->base.i915;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001742 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawskyb1465202014-02-19 22:05:49 -08001743 int ret;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001744
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001745 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1746 * allocator works in address space sizes, so it's multiplied by page
1747 * size. We allocate at the top of the GTT to avoid fragmentation.
1748 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001749 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
Michel Thierry4933d512015-03-24 15:46:22 +00001750
Mika Kuoppala8776f022015-06-30 18:16:40 +03001751 ret = gen6_init_scratch(vm);
1752 if (ret)
1753 return ret;
Michel Thierry4933d512015-03-24 15:46:22 +00001754
Chris Wilsone007b192017-01-11 11:23:10 +00001755 ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
1756 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1757 I915_COLOR_UNEVICTABLE,
1758 0, ggtt->base.total,
1759 PIN_HIGH);
Ben Widawskyc8c26622015-01-22 17:01:25 +00001760 if (ret)
Ben Widawsky678d96f2015-03-16 16:00:56 +00001761 goto err_out;
1762
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001763 if (ppgtt->node.start < ggtt->mappable_end)
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001764 DRM_DEBUG("Forced to use aperture for PDEs\n");
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001765
Chris Wilson52c126e2017-02-15 08:43:43 +00001766 ppgtt->pd.base.ggtt_offset =
1767 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1768
1769 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
1770 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
1771
Ben Widawskyc8c26622015-01-22 17:01:25 +00001772 return 0;
Ben Widawsky678d96f2015-03-16 16:00:56 +00001773
1774err_out:
Mika Kuoppala8776f022015-06-30 18:16:40 +03001775 gen6_free_scratch(vm);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001776 return ret;
Ben Widawskyb1465202014-02-19 22:05:49 -08001777}
1778
Ben Widawskyb1465202014-02-19 22:05:49 -08001779static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1780{
kbuild test robot2f2cf682015-03-27 19:26:35 +08001781 return gen6_ppgtt_allocate_page_directories(ppgtt);
Ben Widawskyb1465202014-02-19 22:05:49 -08001782}
1783
Michel Thierry4933d512015-03-24 15:46:22 +00001784static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001785 u64 start, u64 length)
Michel Thierry4933d512015-03-24 15:46:22 +00001786{
Michel Thierryec565b32015-04-08 12:13:23 +01001787 struct i915_page_table *unused;
Chris Wilson75c7b0b2017-02-15 08:43:57 +00001788 u32 pde;
Michel Thierry4933d512015-03-24 15:46:22 +00001789
Dave Gordon731f74c2016-06-24 19:37:46 +01001790 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
Mika Kuoppala79ab9372015-06-25 18:35:17 +03001791 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
Michel Thierry4933d512015-03-24 15:46:22 +00001792}
1793
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001794static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
Ben Widawskyb1465202014-02-19 22:05:49 -08001795{
Chris Wilson49d73912016-11-29 09:50:08 +00001796 struct drm_i915_private *dev_priv = ppgtt->base.i915;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001797 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawskyb1465202014-02-19 22:05:49 -08001798 int ret;
1799
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001800 ppgtt->base.pte_encode = ggtt->base.pte_encode;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001801 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
Ben Widawsky48a10382013-12-06 14:11:11 -08001802 ppgtt->switch_mm = gen6_mm_switch;
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01001803 else if (IS_HASWELL(dev_priv))
Ben Widawsky90252e52013-12-06 14:11:12 -08001804 ppgtt->switch_mm = hsw_mm_switch;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001805 else if (IS_GEN7(dev_priv))
Ben Widawsky48a10382013-12-06 14:11:11 -08001806 ppgtt->switch_mm = gen7_mm_switch;
Chris Wilson8eb95202016-07-04 08:48:31 +01001807 else
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001808 BUG();
Ben Widawskyb1465202014-02-19 22:05:49 -08001809
1810 ret = gen6_ppgtt_alloc(ppgtt);
1811 if (ret)
1812 return ret;
1813
Michel Thierry09942c62015-04-08 12:13:30 +01001814 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001815
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001816 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
Chris Wilson16a011c2017-02-15 08:43:45 +00001817 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
Ben Widawsky678d96f2015-03-16 16:00:56 +00001818
Chris Wilson52c126e2017-02-15 08:43:43 +00001819 ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
1820 if (ret) {
1821 gen6_ppgtt_cleanup(&ppgtt->base);
1822 return ret;
1823 }
1824
Mika Kuoppala054b9ac2017-02-28 17:28:11 +02001825 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1826 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1827 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1828 ppgtt->base.bind_vma = ppgtt_bind_vma;
1829 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1830 ppgtt->debug_dump = gen6_dump_ppgtt;
1831
Thierry Reding440fd522015-01-23 09:05:06 +01001832 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001833 ppgtt->node.size >> 20,
1834 ppgtt->node.start / PAGE_SIZE);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001835
Chris Wilson52c126e2017-02-15 08:43:43 +00001836 DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
1837 ppgtt->pd.base.ggtt_offset << 10);
Daniel Vetterfa76da32014-08-06 20:19:54 +02001838
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001839 return 0;
Daniel Vetter3440d262013-01-24 13:49:56 -08001840}
1841
Chris Wilson2bfa9962016-08-04 07:52:25 +01001842static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
1843 struct drm_i915_private *dev_priv)
Daniel Vetter3440d262013-01-24 13:49:56 -08001844{
Chris Wilson49d73912016-11-29 09:50:08 +00001845 ppgtt->base.i915 = dev_priv;
Chris Wilson84486612017-02-15 08:43:40 +00001846 ppgtt->base.dma = &dev_priv->drm.pdev->dev;
Daniel Vetter3440d262013-01-24 13:49:56 -08001847
Chris Wilson2bfa9962016-08-04 07:52:25 +01001848 if (INTEL_INFO(dev_priv)->gen < 8)
Daniel Vetter5c5f6452015-04-14 17:35:14 +02001849 return gen6_ppgtt_init(ppgtt);
Ben Widawsky3ed124b2013-04-08 18:43:53 -07001850 else
Michel Thierryd7b26332015-04-08 12:13:34 +01001851 return gen8_ppgtt_init(ppgtt);
Daniel Vetterfa76da32014-08-06 20:19:54 +02001852}
Mika Kuoppalac114f762015-06-25 18:35:13 +03001853
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001854static void i915_address_space_init(struct i915_address_space *vm,
Chris Wilson80b204b2016-10-28 13:58:58 +01001855 struct drm_i915_private *dev_priv,
1856 const char *name)
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001857{
Chris Wilson80b204b2016-10-28 13:58:58 +01001858 i915_gem_timeline_init(dev_priv, &vm->timeline, name);
Chris Wilson47db9222017-02-06 08:45:46 +00001859
Chris Wilson381b9432017-02-15 08:43:54 +00001860 drm_mm_init(&vm->mm, 0, vm->total);
Chris Wilson47db9222017-02-06 08:45:46 +00001861 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
1862
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001863 INIT_LIST_HEAD(&vm->active_list);
1864 INIT_LIST_HEAD(&vm->inactive_list);
Chris Wilson50e046b2016-08-04 07:52:46 +01001865 INIT_LIST_HEAD(&vm->unbound_list);
Chris Wilson47db9222017-02-06 08:45:46 +00001866
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001867 list_add_tail(&vm->global_link, &dev_priv->vm_list);
Chris Wilson84486612017-02-15 08:43:40 +00001868 pagevec_init(&vm->free_pages, false);
Michał Winiarskia2cad9d2015-09-16 11:49:00 +02001869}
1870
Matthew Aulded9724d2016-11-17 21:04:10 +00001871static void i915_address_space_fini(struct i915_address_space *vm)
1872{
Chris Wilson84486612017-02-15 08:43:40 +00001873 if (pagevec_count(&vm->free_pages))
1874 vm_free_pages_release(vm);
1875
Matthew Aulded9724d2016-11-17 21:04:10 +00001876 i915_gem_timeline_fini(&vm->timeline);
1877 drm_mm_takedown(&vm->mm);
1878 list_del(&vm->global_link);
1879}
1880
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001881static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
Tim Gored5165eb2016-02-04 11:49:34 +00001882{
Tim Gored5165eb2016-02-04 11:49:34 +00001883 /* This function is for gtt related workarounds. This function is
1884 * called on driver load and after a GPU reset, so you can place
1885 * workarounds here even if they get overwritten by GPU reset.
1886 */
Ander Conselvan de Oliveira9fb50262017-01-26 11:16:58 +02001887 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */
Tvrtko Ursulin86527442016-10-13 11:03:00 +01001888 if (IS_BROADWELL(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001889 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01001890 else if (IS_CHERRYVIEW(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001891 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001892 else if (IS_GEN9_BC(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001893 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
Ander Conselvan de Oliveira9fb50262017-01-26 11:16:58 +02001894 else if (IS_GEN9_LP(dev_priv))
Tim Gored5165eb2016-02-04 11:49:34 +00001895 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
1896}
1897
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001898int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
Daniel Vetter82460d92014-08-06 20:19:53 +02001899{
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001900 gtt_write_workarounds(dev_priv);
Tim Gored5165eb2016-02-04 11:49:34 +00001901
Thomas Daniel671b50132014-08-20 16:24:50 +01001902 /* In the case of execlists, PPGTT is enabled by the context descriptor
1903 * and the PDPs are contained within the context itself. We don't
1904 * need to do anything here. */
1905 if (i915.enable_execlists)
1906 return 0;
1907
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001908 if (!USES_PPGTT(dev_priv))
Daniel Vetter82460d92014-08-06 20:19:53 +02001909 return 0;
1910
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001911 if (IS_GEN6(dev_priv))
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001912 gen6_ppgtt_enable(dev_priv);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001913 else if (IS_GEN7(dev_priv))
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001914 gen7_ppgtt_enable(dev_priv);
1915 else if (INTEL_GEN(dev_priv) >= 8)
1916 gen8_ppgtt_enable(dev_priv);
Daniel Vetter82460d92014-08-06 20:19:53 +02001917 else
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001918 MISSING_CASE(INTEL_GEN(dev_priv));
Daniel Vetter82460d92014-08-06 20:19:53 +02001919
John Harrison4ad2fd82015-06-18 13:11:20 +01001920 return 0;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001921}
John Harrison4ad2fd82015-06-18 13:11:20 +01001922
Daniel Vetter4d884702014-08-06 15:04:47 +02001923struct i915_hw_ppgtt *
Chris Wilson2bfa9962016-08-04 07:52:25 +01001924i915_ppgtt_create(struct drm_i915_private *dev_priv,
Chris Wilson80b204b2016-10-28 13:58:58 +01001925 struct drm_i915_file_private *fpriv,
1926 const char *name)
Daniel Vetter4d884702014-08-06 15:04:47 +02001927{
1928 struct i915_hw_ppgtt *ppgtt;
1929 int ret;
1930
1931 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1932 if (!ppgtt)
1933 return ERR_PTR(-ENOMEM);
1934
Chris Wilson1188bc62017-02-15 08:43:38 +00001935 ret = __hw_ppgtt_init(ppgtt, dev_priv);
Daniel Vetter4d884702014-08-06 15:04:47 +02001936 if (ret) {
1937 kfree(ppgtt);
1938 return ERR_PTR(ret);
1939 }
1940
Chris Wilson1188bc62017-02-15 08:43:38 +00001941 kref_init(&ppgtt->ref);
1942 i915_address_space_init(&ppgtt->base, dev_priv, name);
1943 ppgtt->base.file = fpriv;
1944
Daniele Ceraolo Spurio198c9742014-11-10 13:44:31 +00001945 trace_i915_ppgtt_create(&ppgtt->base);
1946
Daniel Vetter4d884702014-08-06 15:04:47 +02001947 return ppgtt;
1948}
1949
Chris Wilson0c7eeda2017-01-11 21:09:25 +00001950void i915_ppgtt_close(struct i915_address_space *vm)
1951{
1952 struct list_head *phases[] = {
1953 &vm->active_list,
1954 &vm->inactive_list,
1955 &vm->unbound_list,
1956 NULL,
1957 }, **phase;
1958
1959 GEM_BUG_ON(vm->closed);
1960 vm->closed = true;
1961
1962 for (phase = phases; *phase; phase++) {
1963 struct i915_vma *vma, *vn;
1964
1965 list_for_each_entry_safe(vma, vn, *phase, vm_link)
1966 if (!i915_vma_is_closed(vma))
1967 i915_vma_close(vma);
1968 }
1969}
1970
Matthew Aulded9724d2016-11-17 21:04:10 +00001971void i915_ppgtt_release(struct kref *kref)
Daniel Vetteree960be2014-08-06 15:04:45 +02001972{
1973 struct i915_hw_ppgtt *ppgtt =
1974 container_of(kref, struct i915_hw_ppgtt, ref);
1975
Daniele Ceraolo Spurio198c9742014-11-10 13:44:31 +00001976 trace_i915_ppgtt_release(&ppgtt->base);
1977
Chris Wilson50e046b2016-08-04 07:52:46 +01001978 /* vmas should already be unbound and destroyed */
Daniel Vetteree960be2014-08-06 15:04:45 +02001979 WARN_ON(!list_empty(&ppgtt->base.active_list));
1980 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
Chris Wilson50e046b2016-08-04 07:52:46 +01001981 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
Daniel Vetteree960be2014-08-06 15:04:45 +02001982
1983 ppgtt->base.cleanup(&ppgtt->base);
Chris Wilson84486612017-02-15 08:43:40 +00001984 i915_address_space_fini(&ppgtt->base);
Daniel Vetteree960be2014-08-06 15:04:45 +02001985 kfree(ppgtt);
1986}
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001987
Ben Widawskya81cc002013-01-18 12:30:31 -08001988/* Certain Gen5 chipsets require require idling the GPU before
1989 * unmapping anything from the GTT when VT-d is enabled.
1990 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001991static bool needs_idle_maps(struct drm_i915_private *dev_priv)
Ben Widawskya81cc002013-01-18 12:30:31 -08001992{
Ben Widawskya81cc002013-01-18 12:30:31 -08001993 /* Query intel_iommu to see if we need the workaround. Presumably that
1994 * was loaded first.
1995 */
Chris Wilson80debff2017-05-25 13:16:12 +01001996 return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
Ben Widawskya81cc002013-01-18 12:30:31 -08001997}
1998
Chris Wilsondc979972016-05-10 14:10:04 +01001999void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
Ben Widawsky828c7902013-10-16 09:21:30 -07002000{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002001 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302002 enum intel_engine_id id;
Ben Widawsky828c7902013-10-16 09:21:30 -07002003
Chris Wilsondc979972016-05-10 14:10:04 +01002004 if (INTEL_INFO(dev_priv)->gen < 6)
Ben Widawsky828c7902013-10-16 09:21:30 -07002005 return;
2006
Akash Goel3b3f1652016-10-13 22:44:48 +05302007 for_each_engine(engine, dev_priv, id) {
Ben Widawsky828c7902013-10-16 09:21:30 -07002008 u32 fault_reg;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002009 fault_reg = I915_READ(RING_FAULT_REG(engine));
Ben Widawsky828c7902013-10-16 09:21:30 -07002010 if (fault_reg & RING_FAULT_VALID) {
2011 DRM_DEBUG_DRIVER("Unexpected fault\n"
Paulo Zanoni59a5d292014-10-30 15:52:45 -02002012 "\tAddr: 0x%08lx\n"
Ben Widawsky828c7902013-10-16 09:21:30 -07002013 "\tAddress space: %s\n"
2014 "\tSource ID: %d\n"
2015 "\tType: %d\n",
2016 fault_reg & PAGE_MASK,
2017 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2018 RING_FAULT_SRCID(fault_reg),
2019 RING_FAULT_FAULT_TYPE(fault_reg));
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002020 I915_WRITE(RING_FAULT_REG(engine),
Ben Widawsky828c7902013-10-16 09:21:30 -07002021 fault_reg & ~RING_FAULT_VALID);
2022 }
2023 }
Akash Goel3b3f1652016-10-13 22:44:48 +05302024
2025 /* Engine specific init may not have been done till this point. */
2026 if (dev_priv->engine[RCS])
2027 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
Ben Widawsky828c7902013-10-16 09:21:30 -07002028}
2029
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00002030void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
Ben Widawsky828c7902013-10-16 09:21:30 -07002031{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002032 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawsky828c7902013-10-16 09:21:30 -07002033
2034 /* Don't bother messing with faults pre GEN6 as we have little
2035 * documentation supporting that it's a good idea.
2036 */
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00002037 if (INTEL_GEN(dev_priv) < 6)
Ben Widawsky828c7902013-10-16 09:21:30 -07002038 return;
2039
Chris Wilsondc979972016-05-10 14:10:04 +01002040 i915_check_and_clear_faults(dev_priv);
Ben Widawsky828c7902013-10-16 09:21:30 -07002041
Chris Wilson381b9432017-02-15 08:43:54 +00002042 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
Chris Wilson91e56492014-09-25 10:13:12 +01002043
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002044 i915_ggtt_invalidate(dev_priv);
Ben Widawsky828c7902013-10-16 09:21:30 -07002045}
2046
Chris Wilson03ac84f2016-10-28 13:58:36 +01002047int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2048 struct sg_table *pages)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002049{
Chris Wilson1a292fa2017-01-06 15:22:39 +00002050 do {
2051 if (dma_map_sg(&obj->base.dev->pdev->dev,
2052 pages->sgl, pages->nents,
2053 PCI_DMA_BIDIRECTIONAL))
2054 return 0;
2055
2056 /* If the DMA remap fails, one cause can be that we have
2057 * too many objects pinned in a small remapping table,
2058 * such as swiotlb. Incrementally purge all other objects and
2059 * try again - if there are no more pages to remove from
2060 * the DMA remapper, i915_gem_shrink will return 0.
2061 */
2062 GEM_BUG_ON(obj->mm.pages == pages);
2063 } while (i915_gem_shrink(to_i915(obj->base.dev),
2064 obj->base.size >> PAGE_SHIFT,
2065 I915_SHRINK_BOUND |
2066 I915_SHRINK_UNBOUND |
2067 I915_SHRINK_ACTIVE));
Chris Wilson9da3da62012-06-01 15:20:22 +01002068
Chris Wilson03ac84f2016-10-28 13:58:36 +01002069 return -ENOSPC;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002070}
2071
Daniel Vetter2c642b02015-04-14 17:35:26 +02002072static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002073{
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002074 writeq(pte, addr);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002075}
2076
Chris Wilsond6473f52016-06-10 14:22:59 +05302077static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2078 dma_addr_t addr,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002079 u64 offset,
Chris Wilsond6473f52016-06-10 14:22:59 +05302080 enum i915_cache_level level,
2081 u32 unused)
2082{
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002083 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Chris Wilsond6473f52016-06-10 14:22:59 +05302084 gen8_pte_t __iomem *pte =
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002085 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
Chris Wilsond6473f52016-06-10 14:22:59 +05302086
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002087 gen8_set_pte(pte, gen8_pte_encode(addr, level));
Chris Wilsond6473f52016-06-10 14:22:59 +05302088
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002089 ggtt->invalidate(vm->i915);
Chris Wilsond6473f52016-06-10 14:22:59 +05302090}
2091
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002092static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2093 struct sg_table *st,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002094 u64 start,
2095 enum i915_cache_level level,
2096 u32 unused)
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002097{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002098 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Dave Gordon85d12252016-05-20 11:54:06 +01002099 struct sgt_iter sgt_iter;
2100 gen8_pte_t __iomem *gtt_entries;
Chris Wilson894cceb2017-02-15 08:43:37 +00002101 const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
Dave Gordon85d12252016-05-20 11:54:06 +01002102 dma_addr_t addr;
Imre Deakbe694592015-12-15 20:10:38 +02002103
Chris Wilson894cceb2017-02-15 08:43:37 +00002104 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2105 gtt_entries += start >> PAGE_SHIFT;
2106 for_each_sgt_dma(addr, sgt_iter, st)
2107 gen8_set_pte(gtt_entries++, pte_encode | addr);
Dave Gordon85d12252016-05-20 11:54:06 +01002108
Chris Wilson894cceb2017-02-15 08:43:37 +00002109 wmb();
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002110
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002111 /* This next bit makes the above posting read even more important. We
2112 * want to flush the TLBs only after we're certain all the PTE updates
2113 * have finished.
2114 */
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002115 ggtt->invalidate(vm->i915);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002116}
2117
Chris Wilsond6473f52016-06-10 14:22:59 +05302118static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2119 dma_addr_t addr,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002120 u64 offset,
Chris Wilsond6473f52016-06-10 14:22:59 +05302121 enum i915_cache_level level,
2122 u32 flags)
2123{
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002124 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Chris Wilsond6473f52016-06-10 14:22:59 +05302125 gen6_pte_t __iomem *pte =
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002126 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
Chris Wilsond6473f52016-06-10 14:22:59 +05302127
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002128 iowrite32(vm->pte_encode(addr, level, flags), pte);
Chris Wilsond6473f52016-06-10 14:22:59 +05302129
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002130 ggtt->invalidate(vm->i915);
Chris Wilsond6473f52016-06-10 14:22:59 +05302131}
2132
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002133/*
2134 * Binds an object into the global gtt with the specified cache level. The object
2135 * will be accessible to the GPU via commands whose operands reference offsets
2136 * within the global GTT as well as accessible by the GPU through the GMADR
2137 * mapped BAR (dev_priv->mm.gtt->gtt).
2138 */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002139static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002140 struct sg_table *st,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002141 u64 start,
2142 enum i915_cache_level level,
2143 u32 flags)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002144{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002145 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Chris Wilsonb31144c2017-02-15 08:43:36 +00002146 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2147 unsigned int i = start >> PAGE_SHIFT;
2148 struct sgt_iter iter;
Dave Gordon85d12252016-05-20 11:54:06 +01002149 dma_addr_t addr;
Chris Wilsonb31144c2017-02-15 08:43:36 +00002150 for_each_sgt_dma(addr, iter, st)
2151 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2152 wmb();
Ben Widawsky0f9b91c2012-11-04 09:21:30 -08002153
2154 /* This next bit makes the above posting read even more important. We
2155 * want to flush the TLBs only after we're certain all the PTE updates
2156 * have finished.
2157 */
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002158 ggtt->invalidate(vm->i915);
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002159}
2160
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002161static void nop_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002162 u64 start, u64 length)
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002163{
2164}
2165
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002166static void gen8_ggtt_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002167 u64 start, u64 length)
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002168{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002169 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Ben Widawsky782f1492014-02-20 11:50:33 -08002170 unsigned first_entry = start >> PAGE_SHIFT;
2171 unsigned num_entries = length >> PAGE_SHIFT;
Chris Wilson894cceb2017-02-15 08:43:37 +00002172 const gen8_pte_t scratch_pte =
2173 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2174 gen8_pte_t __iomem *gtt_base =
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002175 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2176 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002177 int i;
2178
2179 if (WARN(num_entries > max_entries,
2180 "First entry = %d; Num entries = %d (max=%d)\n",
2181 first_entry, num_entries, max_entries))
2182 num_entries = max_entries;
2183
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002184 for (i = 0; i < num_entries; i++)
2185 gen8_set_pte(&gtt_base[i], scratch_pte);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07002186}
2187
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002188static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2189{
2190 struct drm_i915_private *dev_priv = vm->i915;
2191
2192 /*
2193 * Make sure the internal GAM fifo has been cleared of all GTT
2194 * writes before exiting stop_machine(). This guarantees that
2195 * any aperture accesses waiting to start in another process
2196 * cannot back up behind the GTT writes causing a hang.
2197 * The register can be any arbitrary GAM register.
2198 */
2199 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2200}
2201
2202struct insert_page {
2203 struct i915_address_space *vm;
2204 dma_addr_t addr;
2205 u64 offset;
2206 enum i915_cache_level level;
2207};
2208
2209static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2210{
2211 struct insert_page *arg = _arg;
2212
2213 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2214 bxt_vtd_ggtt_wa(arg->vm);
2215
2216 return 0;
2217}
2218
2219static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2220 dma_addr_t addr,
2221 u64 offset,
2222 enum i915_cache_level level,
2223 u32 unused)
2224{
2225 struct insert_page arg = { vm, addr, offset, level };
2226
2227 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2228}
2229
2230struct insert_entries {
2231 struct i915_address_space *vm;
2232 struct sg_table *st;
2233 u64 start;
2234 enum i915_cache_level level;
2235};
2236
2237static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2238{
2239 struct insert_entries *arg = _arg;
2240
2241 gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
2242 bxt_vtd_ggtt_wa(arg->vm);
2243
2244 return 0;
2245}
2246
2247static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2248 struct sg_table *st,
2249 u64 start,
2250 enum i915_cache_level level,
2251 u32 unused)
2252{
2253 struct insert_entries arg = { vm, st, start, level };
2254
2255 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2256}
2257
2258struct clear_range {
2259 struct i915_address_space *vm;
2260 u64 start;
2261 u64 length;
2262};
2263
2264static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2265{
2266 struct clear_range *arg = _arg;
2267
2268 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2269 bxt_vtd_ggtt_wa(arg->vm);
2270
2271 return 0;
2272}
2273
2274static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2275 u64 start,
2276 u64 length)
2277{
2278 struct clear_range arg = { vm, start, length };
2279
2280 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2281}
2282
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002283static void gen6_ggtt_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002284 u64 start, u64 length)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002285{
Chris Wilsonce7fda22016-04-28 09:56:38 +01002286 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
Ben Widawsky782f1492014-02-20 11:50:33 -08002287 unsigned first_entry = start >> PAGE_SHIFT;
2288 unsigned num_entries = length >> PAGE_SHIFT;
Michel Thierry07749ef2015-03-16 16:00:54 +00002289 gen6_pte_t scratch_pte, __iomem *gtt_base =
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002290 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2291 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002292 int i;
2293
2294 if (WARN(num_entries > max_entries,
2295 "First entry = %d; Num entries = %d (max=%d)\n",
2296 first_entry, num_entries, max_entries))
2297 num_entries = max_entries;
2298
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002299 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002300 I915_CACHE_LLC, 0);
Ben Widawsky828c7902013-10-16 09:21:30 -07002301
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002302 for (i = 0; i < num_entries; i++)
2303 iowrite32(scratch_pte, &gtt_base[i]);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002304}
2305
Chris Wilsond6473f52016-06-10 14:22:59 +05302306static void i915_ggtt_insert_page(struct i915_address_space *vm,
2307 dma_addr_t addr,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002308 u64 offset,
Chris Wilsond6473f52016-06-10 14:22:59 +05302309 enum i915_cache_level cache_level,
2310 u32 unused)
2311{
Chris Wilsond6473f52016-06-10 14:22:59 +05302312 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2313 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
Chris Wilsond6473f52016-06-10 14:22:59 +05302314
2315 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
Chris Wilsond6473f52016-06-10 14:22:59 +05302316}
2317
Daniel Vetterd369d2d2015-04-14 17:35:25 +02002318static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2319 struct sg_table *pages,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002320 u64 start,
2321 enum i915_cache_level cache_level,
2322 u32 unused)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002323{
2324 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2325 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2326
Daniel Vetterd369d2d2015-04-14 17:35:25 +02002327 intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002328}
2329
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002330static void i915_ggtt_clear_range(struct i915_address_space *vm,
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002331 u64 start, u64 length)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002332{
Chris Wilson2eedfc72016-10-24 13:42:17 +01002333 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002334}
2335
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02002336static int ggtt_bind_vma(struct i915_vma *vma,
2337 enum i915_cache_level cache_level,
2338 u32 flags)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002339{
Chris Wilson49d73912016-11-29 09:50:08 +00002340 struct drm_i915_private *i915 = vma->vm->i915;
Daniel Vetter0a878712015-10-15 14:23:01 +02002341 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsonba7a5742017-02-15 08:43:35 +00002342 u32 pte_flags;
Daniel Vetter0a878712015-10-15 14:23:01 +02002343
Chris Wilsonba7a5742017-02-15 08:43:35 +00002344 if (unlikely(!vma->pages)) {
2345 int ret = i915_get_ggtt_vma_pages(vma);
2346 if (ret)
2347 return ret;
2348 }
Daniel Vetter0a878712015-10-15 14:23:01 +02002349
2350 /* Currently applicable only to VLV */
Chris Wilsonba7a5742017-02-15 08:43:35 +00002351 pte_flags = 0;
Daniel Vetter0a878712015-10-15 14:23:01 +02002352 if (obj->gt_ro)
2353 pte_flags |= PTE_READ_ONLY;
2354
Chris Wilson9c870d02016-10-24 13:42:15 +01002355 intel_runtime_pm_get(i915);
Chris Wilson247177d2016-08-15 10:48:47 +01002356 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
Daniel Vetter0a878712015-10-15 14:23:01 +02002357 cache_level, pte_flags);
Chris Wilson9c870d02016-10-24 13:42:15 +01002358 intel_runtime_pm_put(i915);
Daniel Vetter0a878712015-10-15 14:23:01 +02002359
2360 /*
2361 * Without aliasing PPGTT there's no difference between
2362 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2363 * upgrade to both bound if we bind either to avoid double-binding.
2364 */
Chris Wilson3272db52016-08-04 16:32:32 +01002365 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Daniel Vetter0a878712015-10-15 14:23:01 +02002366
2367 return 0;
2368}
2369
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002370static void ggtt_unbind_vma(struct i915_vma *vma)
2371{
2372 struct drm_i915_private *i915 = vma->vm->i915;
2373
2374 intel_runtime_pm_get(i915);
2375 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2376 intel_runtime_pm_put(i915);
2377}
2378
Daniel Vetter0a878712015-10-15 14:23:01 +02002379static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2380 enum i915_cache_level cache_level,
2381 u32 flags)
2382{
Chris Wilson49d73912016-11-29 09:50:08 +00002383 struct drm_i915_private *i915 = vma->vm->i915;
Chris Wilson321d1782015-11-20 10:27:18 +00002384 u32 pte_flags;
Chris Wilsonff685972017-02-15 08:43:42 +00002385 int ret;
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02002386
Chris Wilsonba7a5742017-02-15 08:43:35 +00002387 if (unlikely(!vma->pages)) {
Chris Wilsonff685972017-02-15 08:43:42 +00002388 ret = i915_get_ggtt_vma_pages(vma);
Chris Wilsonba7a5742017-02-15 08:43:35 +00002389 if (ret)
2390 return ret;
2391 }
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002392
Akash Goel24f3a8c2014-06-17 10:59:42 +05302393 /* Currently applicable only to VLV */
Chris Wilson321d1782015-11-20 10:27:18 +00002394 pte_flags = 0;
2395 if (vma->obj->gt_ro)
Daniel Vetterf329f5f2015-04-14 17:35:15 +02002396 pte_flags |= PTE_READ_ONLY;
Akash Goel24f3a8c2014-06-17 10:59:42 +05302397
Chris Wilsonff685972017-02-15 08:43:42 +00002398 if (flags & I915_VMA_LOCAL_BIND) {
2399 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2400
Matthew Auld1f234752017-05-12 10:14:23 +01002401 if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2402 appgtt->base.allocate_va_range) {
Chris Wilsonff685972017-02-15 08:43:42 +00002403 ret = appgtt->base.allocate_va_range(&appgtt->base,
2404 vma->node.start,
Matthew Auldd5672322017-05-16 09:55:14 +01002405 vma->size);
Chris Wilsonff685972017-02-15 08:43:42 +00002406 if (ret)
Chris Wilson2f7399a2017-02-27 12:26:53 +00002407 goto err_pages;
Chris Wilsonff685972017-02-15 08:43:42 +00002408 }
2409
2410 appgtt->base.insert_entries(&appgtt->base,
2411 vma->pages, vma->node.start,
2412 cache_level, pte_flags);
2413 }
2414
Chris Wilson3272db52016-08-04 16:32:32 +01002415 if (flags & I915_VMA_GLOBAL_BIND) {
Chris Wilson9c870d02016-10-24 13:42:15 +01002416 intel_runtime_pm_get(i915);
Chris Wilson321d1782015-11-20 10:27:18 +00002417 vma->vm->insert_entries(vma->vm,
Chris Wilson247177d2016-08-15 10:48:47 +01002418 vma->pages, vma->node.start,
Daniel Vetter08755462015-04-20 09:04:05 -07002419 cache_level, pte_flags);
Chris Wilson9c870d02016-10-24 13:42:15 +01002420 intel_runtime_pm_put(i915);
Ben Widawsky6f65e292013-12-06 14:10:56 -08002421 }
Daniel Vetter74898d72012-02-15 23:50:22 +01002422
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02002423 return 0;
Chris Wilson2f7399a2017-02-27 12:26:53 +00002424
2425err_pages:
2426 if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
2427 if (vma->pages != vma->obj->mm.pages) {
2428 GEM_BUG_ON(!vma->pages);
2429 sg_free_table(vma->pages);
2430 kfree(vma->pages);
2431 }
2432 vma->pages = NULL;
2433 }
2434 return ret;
Ben Widawsky6f65e292013-12-06 14:10:56 -08002435}
2436
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002437static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
Ben Widawsky6f65e292013-12-06 14:10:56 -08002438{
Chris Wilson49d73912016-11-29 09:50:08 +00002439 struct drm_i915_private *i915 = vma->vm->i915;
Ben Widawsky6f65e292013-12-06 14:10:56 -08002440
Chris Wilson9c870d02016-10-24 13:42:15 +01002441 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2442 intel_runtime_pm_get(i915);
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002443 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
Chris Wilson9c870d02016-10-24 13:42:15 +01002444 intel_runtime_pm_put(i915);
2445 }
Ben Widawsky6f65e292013-12-06 14:10:56 -08002446
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002447 if (vma->flags & I915_VMA_LOCAL_BIND) {
2448 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2449
2450 vm->clear_range(vm, vma->node.start, vma->size);
2451 }
Daniel Vetter74163902012-02-15 23:50:21 +01002452}
2453
Chris Wilson03ac84f2016-10-28 13:58:36 +01002454void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2455 struct sg_table *pages)
Daniel Vetter74163902012-02-15 23:50:21 +01002456{
David Weinehall52a05c32016-08-22 13:32:44 +03002457 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2458 struct device *kdev = &dev_priv->drm.pdev->dev;
Chris Wilson307dc252016-08-05 10:14:12 +01002459 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawsky5c042282011-10-17 15:51:55 -07002460
Chris Wilson307dc252016-08-05 10:14:12 +01002461 if (unlikely(ggtt->do_idle_maps)) {
Chris Wilson228ec872017-03-30 09:53:41 +01002462 if (i915_gem_wait_for_idle(dev_priv, 0)) {
Chris Wilson307dc252016-08-05 10:14:12 +01002463 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2464 /* Wait a bit, in hopes it avoids the hang */
2465 udelay(10);
2466 }
2467 }
Ben Widawsky5c042282011-10-17 15:51:55 -07002468
Chris Wilson03ac84f2016-10-28 13:58:36 +01002469 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002470}
Daniel Vetter644ec022012-03-26 09:45:40 +02002471
Chris Wilson45b186f2016-12-16 07:46:42 +00002472static void i915_gtt_color_adjust(const struct drm_mm_node *node,
Chris Wilson42d6ab42012-07-26 11:49:32 +01002473 unsigned long color,
Thierry Reding440fd522015-01-23 09:05:06 +01002474 u64 *start,
2475 u64 *end)
Chris Wilson42d6ab42012-07-26 11:49:32 +01002476{
Chris Wilsona6508de2017-02-06 08:45:47 +00002477 if (node->allocated && node->color != color)
Chris Wilsonf51455d2017-01-10 14:47:34 +00002478 *start += I915_GTT_PAGE_SIZE;
Chris Wilson42d6ab42012-07-26 11:49:32 +01002479
Chris Wilsona6508de2017-02-06 08:45:47 +00002480 /* Also leave a space between the unallocated reserved node after the
2481 * GTT and any objects within the GTT, i.e. we use the color adjustment
2482 * to insert a guard page to prevent prefetches crossing over the
2483 * GTT boundary.
2484 */
Chris Wilsonb44f97f2016-12-16 07:46:40 +00002485 node = list_next_entry(node, node_list);
Chris Wilsona6508de2017-02-06 08:45:47 +00002486 if (node->color != color)
Chris Wilsonf51455d2017-01-10 14:47:34 +00002487 *end -= I915_GTT_PAGE_SIZE;
Chris Wilson42d6ab42012-07-26 11:49:32 +01002488}
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002489
Chris Wilson6cde9a02017-02-13 17:15:50 +00002490int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2491{
2492 struct i915_ggtt *ggtt = &i915->ggtt;
2493 struct i915_hw_ppgtt *ppgtt;
2494 int err;
2495
Chris Wilson57202f42017-02-15 08:43:56 +00002496 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
Chris Wilson1188bc62017-02-15 08:43:38 +00002497 if (IS_ERR(ppgtt))
2498 return PTR_ERR(ppgtt);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002499
Chris Wilsone565ceb2017-02-15 08:43:55 +00002500 if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2501 err = -ENODEV;
2502 goto err_ppgtt;
2503 }
2504
Chris Wilson6cde9a02017-02-13 17:15:50 +00002505 if (ppgtt->base.allocate_va_range) {
Chris Wilsone565ceb2017-02-15 08:43:55 +00002506 /* Note we only pre-allocate as far as the end of the global
2507 * GTT. On 48b / 4-level page-tables, the difference is very,
2508 * very significant! We have to preallocate as GVT/vgpu does
2509 * not like the page directory disappearing.
2510 */
Chris Wilson6cde9a02017-02-13 17:15:50 +00002511 err = ppgtt->base.allocate_va_range(&ppgtt->base,
Chris Wilsone565ceb2017-02-15 08:43:55 +00002512 0, ggtt->base.total);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002513 if (err)
Chris Wilson1188bc62017-02-15 08:43:38 +00002514 goto err_ppgtt;
Chris Wilson6cde9a02017-02-13 17:15:50 +00002515 }
2516
Chris Wilson6cde9a02017-02-13 17:15:50 +00002517 i915->mm.aliasing_ppgtt = ppgtt;
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002518
Chris Wilson6cde9a02017-02-13 17:15:50 +00002519 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2520 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2521
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002522 WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2523 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2524
Chris Wilson6cde9a02017-02-13 17:15:50 +00002525 return 0;
2526
Chris Wilson6cde9a02017-02-13 17:15:50 +00002527err_ppgtt:
Chris Wilson1188bc62017-02-15 08:43:38 +00002528 i915_ppgtt_put(ppgtt);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002529 return err;
2530}
2531
2532void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2533{
2534 struct i915_ggtt *ggtt = &i915->ggtt;
2535 struct i915_hw_ppgtt *ppgtt;
2536
2537 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2538 if (!ppgtt)
2539 return;
2540
Chris Wilson1188bc62017-02-15 08:43:38 +00002541 i915_ppgtt_put(ppgtt);
Chris Wilson6cde9a02017-02-13 17:15:50 +00002542
2543 ggtt->base.bind_vma = ggtt_bind_vma;
Chris Wilsoncbc4e9e2017-02-15 08:43:39 +00002544 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilson6cde9a02017-02-13 17:15:50 +00002545}
2546
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002547int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
Daniel Vetter644ec022012-03-26 09:45:40 +02002548{
Ben Widawskye78891c2013-01-25 16:41:04 -08002549 /* Let GEM Manage all of the aperture.
2550 *
2551 * However, leave one page at the end still bound to the scratch page.
2552 * There are a number of places where the hardware apparently prefetches
2553 * past the end of the object, and we've seen multiple hangs with the
2554 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2555 * aperture. One page should be enough to keep any prefetching inside
2556 * of the aperture.
2557 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002558 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsoned2f3452012-11-15 11:32:19 +00002559 unsigned long hole_start, hole_end;
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002560 struct drm_mm_node *entry;
Daniel Vetterfa76da32014-08-06 20:19:54 +02002561 int ret;
Daniel Vetter644ec022012-03-26 09:45:40 +02002562
Zhi Wangb02d22a2016-06-16 08:06:59 -04002563 ret = intel_vgt_balloon(dev_priv);
2564 if (ret)
2565 return ret;
Yu Zhang5dda8fa2015-02-10 19:05:48 +08002566
Chris Wilson95374d72016-10-12 10:05:20 +01002567 /* Reserve a mappable slot for our lockless error capture */
Chris Wilson4e64e552017-02-02 21:04:38 +00002568 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2569 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2570 0, ggtt->mappable_end,
2571 DRM_MM_INSERT_LOW);
Chris Wilson95374d72016-10-12 10:05:20 +01002572 if (ret)
2573 return ret;
2574
Chris Wilsoned2f3452012-11-15 11:32:19 +00002575 /* Clear any non-preallocated blocks */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002576 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
Chris Wilsoned2f3452012-11-15 11:32:19 +00002577 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2578 hole_start, hole_end);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002579 ggtt->base.clear_range(&ggtt->base, hole_start,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002580 hole_end - hole_start);
Chris Wilsoned2f3452012-11-15 11:32:19 +00002581 }
2582
2583 /* And finally clear the reserved guard page */
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002584 ggtt->base.clear_range(&ggtt->base,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02002585 ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
Daniel Vetter6c5566a2014-08-06 15:04:50 +02002586
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002587 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
Chris Wilson6cde9a02017-02-13 17:15:50 +00002588 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
Chris Wilson95374d72016-10-12 10:05:20 +01002589 if (ret)
Chris Wilson6cde9a02017-02-13 17:15:50 +00002590 goto err;
Daniel Vetterfa76da32014-08-06 20:19:54 +02002591 }
2592
Daniel Vetter6c5566a2014-08-06 15:04:50 +02002593 return 0;
Chris Wilson95374d72016-10-12 10:05:20 +01002594
Chris Wilson95374d72016-10-12 10:05:20 +01002595err:
2596 drm_mm_remove_node(&ggtt->error_capture);
2597 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002598}
2599
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002600/**
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002601 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002602 * @dev_priv: i915 device
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002603 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002604void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002605{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002606 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson94d4a2a2017-02-10 16:35:22 +00002607 struct i915_vma *vma, *vn;
2608
2609 ggtt->base.closed = true;
2610
2611 mutex_lock(&dev_priv->drm.struct_mutex);
2612 WARN_ON(!list_empty(&ggtt->base.active_list));
2613 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2614 WARN_ON(i915_vma_unbind(vma));
2615 mutex_unlock(&dev_priv->drm.struct_mutex);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002616
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002617 i915_gem_cleanup_stolen(&dev_priv->drm);
Imre Deaka4eba472016-01-19 15:26:32 +02002618
Chris Wilson1188bc62017-02-15 08:43:38 +00002619 mutex_lock(&dev_priv->drm.struct_mutex);
2620 i915_gem_fini_aliasing_ppgtt(dev_priv);
2621
Chris Wilson95374d72016-10-12 10:05:20 +01002622 if (drm_mm_node_allocated(&ggtt->error_capture))
2623 drm_mm_remove_node(&ggtt->error_capture);
2624
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002625 if (drm_mm_initialized(&ggtt->base.mm)) {
Zhi Wangb02d22a2016-06-16 08:06:59 -04002626 intel_vgt_deballoon(dev_priv);
Matthew Aulded9724d2016-11-17 21:04:10 +00002627 i915_address_space_fini(&ggtt->base);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002628 }
2629
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002630 ggtt->base.cleanup(&ggtt->base);
Chris Wilson1188bc62017-02-15 08:43:38 +00002631 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01002632
2633 arch_phys_wc_del(ggtt->mtrr);
Chris Wilsonf7bbe782016-08-19 16:54:27 +01002634 io_mapping_fini(&ggtt->mappable);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +02002635}
Daniel Vetter70e32542014-08-06 15:04:57 +02002636
Daniel Vetter2c642b02015-04-14 17:35:26 +02002637static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002638{
2639 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2640 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2641 return snb_gmch_ctl << 20;
2642}
2643
Daniel Vetter2c642b02015-04-14 17:35:26 +02002644static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
Ben Widawsky9459d252013-11-03 16:53:55 -08002645{
2646 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2647 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2648 if (bdw_gmch_ctl)
2649 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
Ben Widawsky562d55d2014-05-27 16:53:08 -07002650
2651#ifdef CONFIG_X86_32
2652 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2653 if (bdw_gmch_ctl > 4)
2654 bdw_gmch_ctl = 4;
2655#endif
2656
Ben Widawsky9459d252013-11-03 16:53:55 -08002657 return bdw_gmch_ctl << 20;
2658}
2659
Daniel Vetter2c642b02015-04-14 17:35:26 +02002660static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002661{
2662 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2663 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2664
2665 if (gmch_ctrl)
2666 return 1 << (20 + gmch_ctrl);
2667
2668 return 0;
2669}
2670
Daniel Vetter2c642b02015-04-14 17:35:26 +02002671static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002672{
2673 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2674 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
Imre Deaka92d1a92017-05-10 12:21:52 +03002675 return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002676}
2677
Daniel Vetter2c642b02015-04-14 17:35:26 +02002678static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
Ben Widawsky9459d252013-11-03 16:53:55 -08002679{
2680 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2681 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
Imre Deaka92d1a92017-05-10 12:21:52 +03002682 return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
Ben Widawsky9459d252013-11-03 16:53:55 -08002683}
2684
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002685static size_t chv_get_stolen_size(u16 gmch_ctrl)
2686{
2687 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2688 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2689
2690 /*
2691 * 0x0 to 0x10: 32MB increments starting at 0MB
2692 * 0x11 to 0x16: 4MB increments starting at 8MB
2693 * 0x17 to 0x1d: 4MB increments start at 36MB
2694 */
2695 if (gmch_ctrl < 0x11)
Imre Deaka92d1a92017-05-10 12:21:52 +03002696 return (size_t)gmch_ctrl << 25;
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002697 else if (gmch_ctrl < 0x17)
Imre Deaka92d1a92017-05-10 12:21:52 +03002698 return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002699 else
Imre Deaka92d1a92017-05-10 12:21:52 +03002700 return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002701}
2702
Damien Lespiau66375012014-01-09 18:02:46 +00002703static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2704{
2705 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2706 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2707
2708 if (gen9_gmch_ctl < 0xf0)
Imre Deaka92d1a92017-05-10 12:21:52 +03002709 return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
Damien Lespiau66375012014-01-09 18:02:46 +00002710 else
2711 /* 4MB increments starting at 0xf0 for 4MB */
Imre Deaka92d1a92017-05-10 12:21:52 +03002712 return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
Damien Lespiau66375012014-01-09 18:02:46 +00002713}
2714
Chris Wilson34c998b2016-08-04 07:52:24 +01002715static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
Ben Widawsky63340132013-11-04 19:32:22 -08002716{
Chris Wilson49d73912016-11-29 09:50:08 +00002717 struct drm_i915_private *dev_priv = ggtt->base.i915;
2718 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson34c998b2016-08-04 07:52:24 +01002719 phys_addr_t phys_addr;
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002720 int ret;
Ben Widawsky63340132013-11-04 19:32:22 -08002721
2722 /* For Modern GENs the PTEs and register space are split in the BAR */
Chris Wilson34c998b2016-08-04 07:52:24 +01002723 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
Ben Widawsky63340132013-11-04 19:32:22 -08002724
Imre Deak2a073f892015-03-27 13:07:33 +02002725 /*
2726 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2727 * dropped. For WC mappings in general we have 64 byte burst writes
2728 * when the WC buffer is flushed, so we can't use it, but have to
2729 * resort to an uncached mapping. The WC issue is easily caught by the
2730 * readback check when writing GTT PTE entries.
2731 */
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002732 if (IS_GEN9_LP(dev_priv))
Chris Wilson34c998b2016-08-04 07:52:24 +01002733 ggtt->gsm = ioremap_nocache(phys_addr, size);
Imre Deak2a073f892015-03-27 13:07:33 +02002734 else
Chris Wilson34c998b2016-08-04 07:52:24 +01002735 ggtt->gsm = ioremap_wc(phys_addr, size);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002736 if (!ggtt->gsm) {
Chris Wilson34c998b2016-08-04 07:52:24 +01002737 DRM_ERROR("Failed to map the ggtt page table\n");
Ben Widawsky63340132013-11-04 19:32:22 -08002738 return -ENOMEM;
2739 }
2740
Chris Wilson84486612017-02-15 08:43:40 +00002741 ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002742 if (ret) {
Ben Widawsky63340132013-11-04 19:32:22 -08002743 DRM_ERROR("Scratch setup failed\n");
2744 /* iounmap will also get called at remove, but meh */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002745 iounmap(ggtt->gsm);
Chris Wilson8bcdd0f72016-08-22 08:44:30 +01002746 return ret;
Ben Widawsky63340132013-11-04 19:32:22 -08002747 }
2748
Mika Kuoppala4ad2af12015-06-30 18:16:39 +03002749 return 0;
Ben Widawsky63340132013-11-04 19:32:22 -08002750}
2751
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002752/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2753 * bits. When using advanced contexts each context stores its own PAT, but
2754 * writing this data shouldn't be harmful even in those cases. */
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002755static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002756{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002757 u64 pat;
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002758
2759 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
2760 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2761 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2762 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
2763 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2764 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2765 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2766 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2767
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002768 if (!USES_PPGTT(dev_priv))
Rodrigo Vivid6a8b722014-11-05 16:56:36 -08002769 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2770 * so RTL will always use the value corresponding to
2771 * pat_sel = 000".
2772 * So let's disable cache for GGTT to avoid screen corruptions.
2773 * MOCS still can be used though.
2774 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2775 * before this patch, i.e. the same uncached + snooping access
2776 * like on gen6/7 seems to be in effect.
2777 * - So this just fixes blitter/render access. Again it looks
2778 * like it's not just uncached access, but uncached + snooping.
2779 * So we can still hold onto all our assumptions wrt cpu
2780 * clflushing on LLC machines.
2781 */
2782 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2783
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002784 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2785 * write would work. */
Ville Syrjälä7e435ad2015-09-18 20:03:25 +03002786 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2787 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002788}
2789
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002790static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2791{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00002792 u64 pat;
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002793
2794 /*
2795 * Map WB on BDW to snooped on CHV.
2796 *
2797 * Only the snoop bit has meaning for CHV, the rest is
2798 * ignored.
2799 *
Ville Syrjäläcf3d2622014-11-14 21:02:44 +02002800 * The hardware will never snoop for certain types of accesses:
2801 * - CPU GTT (GMADR->GGTT->no snoop->memory)
2802 * - PPGTT page tables
2803 * - some other special cycles
2804 *
2805 * As with BDW, we also need to consider the following for GT accesses:
2806 * "For GGTT, there is NO pat_sel[2:0] from the entry,
2807 * so RTL will always use the value corresponding to
2808 * pat_sel = 000".
2809 * Which means we must set the snoop bit in PAT entry 0
2810 * in order to keep the global status page working.
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002811 */
2812 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
2813 GEN8_PPAT(1, 0) |
2814 GEN8_PPAT(2, 0) |
2815 GEN8_PPAT(3, 0) |
2816 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
2817 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
2818 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2819 GEN8_PPAT(7, CHV_PPAT_SNOOP);
2820
Ville Syrjälä7e435ad2015-09-18 20:03:25 +03002821 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2822 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002823}
2824
Chris Wilson34c998b2016-08-04 07:52:24 +01002825static void gen6_gmch_remove(struct i915_address_space *vm)
2826{
2827 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2828
2829 iounmap(ggtt->gsm);
Chris Wilson84486612017-02-15 08:43:40 +00002830 cleanup_scratch_page(vm);
Chris Wilson34c998b2016-08-04 07:52:24 +01002831}
2832
Joonas Lahtinend507d732016-03-18 10:42:58 +02002833static int gen8_gmch_probe(struct i915_ggtt *ggtt)
Ben Widawsky63340132013-11-04 19:32:22 -08002834{
Chris Wilson49d73912016-11-29 09:50:08 +00002835 struct drm_i915_private *dev_priv = ggtt->base.i915;
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002836 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson34c998b2016-08-04 07:52:24 +01002837 unsigned int size;
Ben Widawsky63340132013-11-04 19:32:22 -08002838 u16 snb_gmch_ctl;
Imre Deak45192902017-05-10 12:21:50 +03002839 int err;
Ben Widawsky63340132013-11-04 19:32:22 -08002840
2841 /* TODO: We're not aware of mappable constraints on gen8 yet */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002842 ggtt->mappable_base = pci_resource_start(pdev, 2);
2843 ggtt->mappable_end = pci_resource_len(pdev, 2);
Ben Widawsky63340132013-11-04 19:32:22 -08002844
Imre Deak45192902017-05-10 12:21:50 +03002845 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
2846 if (!err)
2847 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
2848 if (err)
2849 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
Ben Widawsky63340132013-11-04 19:32:22 -08002850
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002851 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
Ben Widawsky63340132013-11-04 19:32:22 -08002852
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002853 if (INTEL_GEN(dev_priv) >= 9) {
Joonas Lahtinend507d732016-03-18 10:42:58 +02002854 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
Chris Wilson34c998b2016-08-04 07:52:24 +01002855 size = gen8_get_total_gtt_size(snb_gmch_ctl);
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002856 } else if (IS_CHERRYVIEW(dev_priv)) {
Joonas Lahtinend507d732016-03-18 10:42:58 +02002857 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
Chris Wilson34c998b2016-08-04 07:52:24 +01002858 size = chv_get_total_gtt_size(snb_gmch_ctl);
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002859 } else {
Joonas Lahtinend507d732016-03-18 10:42:58 +02002860 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
Chris Wilson34c998b2016-08-04 07:52:24 +01002861 size = gen8_get_total_gtt_size(snb_gmch_ctl);
Damien Lespiaud7f25f22014-05-08 22:19:40 +03002862 }
Ben Widawsky63340132013-11-04 19:32:22 -08002863
Chris Wilson34c998b2016-08-04 07:52:24 +01002864 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
Ben Widawsky63340132013-11-04 19:32:22 -08002865
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002866 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
Ville Syrjäläee0ce472014-04-09 13:28:01 +03002867 chv_setup_private_ppat(dev_priv);
2868 else
2869 bdw_setup_private_ppat(dev_priv);
Ben Widawskyfbe5d362013-11-04 19:56:49 -08002870
Chris Wilson34c998b2016-08-04 07:52:24 +01002871 ggtt->base.cleanup = gen6_gmch_remove;
Joonas Lahtinend507d732016-03-18 10:42:58 +02002872 ggtt->base.bind_vma = ggtt_bind_vma;
2873 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilsond6473f52016-06-10 14:22:59 +05302874 ggtt->base.insert_page = gen8_ggtt_insert_page;
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002875 ggtt->base.clear_range = nop_clear_range;
Chris Wilson48f112f2016-06-24 14:07:14 +01002876 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002877 ggtt->base.clear_range = gen8_ggtt_clear_range;
2878
2879 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
Chris Wilsonf7770bf2016-05-14 07:26:35 +01002880
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07002881 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
2882 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
2883 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
2884 ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
2885 if (ggtt->base.clear_range != nop_clear_range)
2886 ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
2887 }
2888
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002889 ggtt->invalidate = gen6_ggtt_invalidate;
2890
Chris Wilson34c998b2016-08-04 07:52:24 +01002891 return ggtt_probe_common(ggtt, size);
Ben Widawsky63340132013-11-04 19:32:22 -08002892}
2893
Joonas Lahtinend507d732016-03-18 10:42:58 +02002894static int gen6_gmch_probe(struct i915_ggtt *ggtt)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002895{
Chris Wilson49d73912016-11-29 09:50:08 +00002896 struct drm_i915_private *dev_priv = ggtt->base.i915;
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002897 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson34c998b2016-08-04 07:52:24 +01002898 unsigned int size;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002899 u16 snb_gmch_ctl;
Imre Deak45192902017-05-10 12:21:50 +03002900 int err;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002901
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002902 ggtt->mappable_base = pci_resource_start(pdev, 2);
2903 ggtt->mappable_end = pci_resource_len(pdev, 2);
Ben Widawsky41907dd2013-02-08 11:32:47 -08002904
Ben Widawskybaa09f52013-01-24 13:49:57 -08002905 /* 64/512MB is the current min/max we actually know of, but this is just
2906 * a coarse sanity check.
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002907 */
Chris Wilson34c998b2016-08-04 07:52:24 +01002908 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
Joonas Lahtinend507d732016-03-18 10:42:58 +02002909 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002910 return -ENXIO;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002911 }
2912
Imre Deak45192902017-05-10 12:21:50 +03002913 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2914 if (!err)
2915 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2916 if (err)
2917 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002918 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002919
Joonas Lahtinend507d732016-03-18 10:42:58 +02002920 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002921
Chris Wilson34c998b2016-08-04 07:52:24 +01002922 size = gen6_get_total_gtt_size(snb_gmch_ctl);
2923 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002924
Joonas Lahtinend507d732016-03-18 10:42:58 +02002925 ggtt->base.clear_range = gen6_ggtt_clear_range;
Chris Wilsond6473f52016-06-10 14:22:59 +05302926 ggtt->base.insert_page = gen6_ggtt_insert_page;
Joonas Lahtinend507d732016-03-18 10:42:58 +02002927 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
2928 ggtt->base.bind_vma = ggtt_bind_vma;
2929 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilson34c998b2016-08-04 07:52:24 +01002930 ggtt->base.cleanup = gen6_gmch_remove;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002931
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002932 ggtt->invalidate = gen6_ggtt_invalidate;
2933
Chris Wilson34c998b2016-08-04 07:52:24 +01002934 if (HAS_EDRAM(dev_priv))
2935 ggtt->base.pte_encode = iris_pte_encode;
2936 else if (IS_HASWELL(dev_priv))
2937 ggtt->base.pte_encode = hsw_pte_encode;
2938 else if (IS_VALLEYVIEW(dev_priv))
2939 ggtt->base.pte_encode = byt_pte_encode;
2940 else if (INTEL_GEN(dev_priv) >= 7)
2941 ggtt->base.pte_encode = ivb_pte_encode;
2942 else
2943 ggtt->base.pte_encode = snb_pte_encode;
2944
2945 return ggtt_probe_common(ggtt, size);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002946}
2947
Chris Wilson34c998b2016-08-04 07:52:24 +01002948static void i915_gmch_remove(struct i915_address_space *vm)
Ben Widawskybaa09f52013-01-24 13:49:57 -08002949{
Chris Wilson34c998b2016-08-04 07:52:24 +01002950 intel_gmch_remove();
Ben Widawskybaa09f52013-01-24 13:49:57 -08002951}
2952
Joonas Lahtinend507d732016-03-18 10:42:58 +02002953static int i915_gmch_probe(struct i915_ggtt *ggtt)
Ben Widawskybaa09f52013-01-24 13:49:57 -08002954{
Chris Wilson49d73912016-11-29 09:50:08 +00002955 struct drm_i915_private *dev_priv = ggtt->base.i915;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002956 int ret;
2957
Chris Wilson91c8a322016-07-05 10:40:23 +01002958 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002959 if (!ret) {
2960 DRM_ERROR("failed to set up gmch\n");
2961 return -EIO;
2962 }
2963
Chris Wilsonedd1f2f2017-01-06 15:20:11 +00002964 intel_gtt_get(&ggtt->base.total,
2965 &ggtt->stolen_size,
2966 &ggtt->mappable_base,
2967 &ggtt->mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -08002968
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002969 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
Chris Wilsond6473f52016-06-10 14:22:59 +05302970 ggtt->base.insert_page = i915_ggtt_insert_page;
Joonas Lahtinend507d732016-03-18 10:42:58 +02002971 ggtt->base.insert_entries = i915_ggtt_insert_entries;
2972 ggtt->base.clear_range = i915_ggtt_clear_range;
2973 ggtt->base.bind_vma = ggtt_bind_vma;
2974 ggtt->base.unbind_vma = ggtt_unbind_vma;
Chris Wilson34c998b2016-08-04 07:52:24 +01002975 ggtt->base.cleanup = i915_gmch_remove;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002976
Chris Wilson7c3f86b2017-01-12 11:00:49 +00002977 ggtt->invalidate = gmch_ggtt_invalidate;
2978
Joonas Lahtinend507d732016-03-18 10:42:58 +02002979 if (unlikely(ggtt->do_idle_maps))
Chris Wilsonc0a7f812013-12-30 12:16:15 +00002980 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
2981
Ben Widawskybaa09f52013-01-24 13:49:57 -08002982 return 0;
2983}
2984
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002985/**
Chris Wilson0088e522016-08-04 07:52:21 +01002986 * i915_ggtt_probe_hw - Probe GGTT hardware location
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002987 * @dev_priv: i915 device
Joonas Lahtinend85489d2016-03-24 16:47:46 +02002988 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002989int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
Ben Widawskybaa09f52013-01-24 13:49:57 -08002990{
Joonas Lahtinen62106b42016-03-18 10:42:57 +02002991 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawskybaa09f52013-01-24 13:49:57 -08002992 int ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002993
Chris Wilson49d73912016-11-29 09:50:08 +00002994 ggtt->base.i915 = dev_priv;
Chris Wilson84486612017-02-15 08:43:40 +00002995 ggtt->base.dma = &dev_priv->drm.pdev->dev;
Mika Kuoppalac114f762015-06-25 18:35:13 +03002996
Chris Wilson34c998b2016-08-04 07:52:24 +01002997 if (INTEL_GEN(dev_priv) <= 5)
2998 ret = i915_gmch_probe(ggtt);
2999 else if (INTEL_GEN(dev_priv) < 8)
3000 ret = gen6_gmch_probe(ggtt);
3001 else
3002 ret = gen8_gmch_probe(ggtt);
Ben Widawskya54c0c22013-01-24 14:45:00 -08003003 if (ret)
Ben Widawskybaa09f52013-01-24 13:49:57 -08003004 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003005
Chris Wilsondb9309a2017-01-05 15:30:23 +00003006 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3007 * This is easier than doing range restriction on the fly, as we
3008 * currently don't have any bits spare to pass in this upper
3009 * restriction!
3010 */
3011 if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
3012 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3013 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3014 }
3015
Chris Wilsonc890e2d2016-03-18 10:42:59 +02003016 if ((ggtt->base.total - 1) >> 32) {
3017 DRM_ERROR("We never expected a Global GTT with more than 32bits"
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003018 " of address space! Found %lldM!\n",
Chris Wilsonc890e2d2016-03-18 10:42:59 +02003019 ggtt->base.total >> 20);
3020 ggtt->base.total = 1ULL << 32;
3021 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3022 }
3023
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003024 if (ggtt->mappable_end > ggtt->base.total) {
3025 DRM_ERROR("mappable aperture extends past end of GGTT,"
3026 " aperture=%llx, total=%llx\n",
3027 ggtt->mappable_end, ggtt->base.total);
3028 ggtt->mappable_end = ggtt->base.total;
3029 }
3030
Ben Widawskybaa09f52013-01-24 13:49:57 -08003031 /* GMADR is the PCI mmio aperture into the global GTT. */
Mika Kuoppalac44ef602015-06-25 18:35:05 +03003032 DRM_INFO("Memory usable by graphics device = %lluM\n",
Joonas Lahtinen62106b42016-03-18 10:42:57 +02003033 ggtt->base.total >> 20);
3034 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
Chris Wilsonedd1f2f2017-01-06 15:20:11 +00003035 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
Chris Wilson80debff2017-05-25 13:16:12 +01003036 if (intel_vtd_active())
Daniel Vetter5db6c732014-03-31 16:23:04 +02003037 DRM_INFO("VT-d active for gfx access\n");
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08003038
Ben Widawskye76e9ae2012-11-04 09:21:27 -08003039 return 0;
Chris Wilson0088e522016-08-04 07:52:21 +01003040}
3041
3042/**
3043 * i915_ggtt_init_hw - Initialize GGTT hardware
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003044 * @dev_priv: i915 device
Chris Wilson0088e522016-08-04 07:52:21 +01003045 */
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003046int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
Chris Wilson0088e522016-08-04 07:52:21 +01003047{
Chris Wilson0088e522016-08-04 07:52:21 +01003048 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3049 int ret;
3050
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003051 INIT_LIST_HEAD(&dev_priv->vm_list);
3052
Chris Wilsona6508de2017-02-06 08:45:47 +00003053 /* Note that we use page colouring to enforce a guard page at the
3054 * end of the address space. This is required as the CS may prefetch
3055 * beyond the end of the batch buffer, across the page boundary,
3056 * and beyond the end of the GTT if we do not provide a guard.
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003057 */
Chris Wilson80b204b2016-10-28 13:58:58 +01003058 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson80b204b2016-10-28 13:58:58 +01003059 i915_address_space_init(&ggtt->base, dev_priv, "[global]");
Chris Wilsona6508de2017-02-06 08:45:47 +00003060 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003061 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
Chris Wilson80b204b2016-10-28 13:58:58 +01003062 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003063
Chris Wilsonf7bbe782016-08-19 16:54:27 +01003064 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
3065 dev_priv->ggtt.mappable_base,
3066 dev_priv->ggtt.mappable_end)) {
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01003067 ret = -EIO;
3068 goto out_gtt_cleanup;
3069 }
3070
3071 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
3072
Chris Wilson0088e522016-08-04 07:52:21 +01003073 /*
3074 * Initialise stolen early so that we may reserve preallocated
3075 * objects for the BIOS to KMS transition.
3076 */
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +00003077 ret = i915_gem_init_stolen(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +01003078 if (ret)
3079 goto out_gtt_cleanup;
3080
3081 return 0;
Imre Deaka4eba472016-01-19 15:26:32 +02003082
3083out_gtt_cleanup:
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003084 ggtt->base.cleanup(&ggtt->base);
Imre Deaka4eba472016-01-19 15:26:32 +02003085 return ret;
Daniel Vetter644ec022012-03-26 09:45:40 +02003086}
Ben Widawsky6f65e292013-12-06 14:10:56 -08003087
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003088int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
Ville Syrjäläac840ae2016-05-06 21:35:55 +03003089{
Chris Wilson97d6d7a2016-08-04 07:52:22 +01003090 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
Ville Syrjäläac840ae2016-05-06 21:35:55 +03003091 return -EIO;
3092
3093 return 0;
3094}
3095
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003096void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3097{
Chris Wilson04f7b24e2017-06-01 10:04:46 +01003098 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3099
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003100 i915->ggtt.invalidate = guc_ggtt_invalidate;
3101}
3102
3103void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3104{
Chris Wilson04f7b24e2017-06-01 10:04:46 +01003105 /* We should only be called after i915_ggtt_enable_guc() */
3106 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3107
3108 i915->ggtt.invalidate = gen6_ggtt_invalidate;
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003109}
3110
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00003111void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
Daniel Vetterfa423312015-04-14 17:35:23 +02003112{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003113 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003114 struct drm_i915_gem_object *obj, *on;
Daniel Vetterfa423312015-04-14 17:35:23 +02003115
Chris Wilsondc979972016-05-10 14:10:04 +01003116 i915_check_and_clear_faults(dev_priv);
Daniel Vetterfa423312015-04-14 17:35:23 +02003117
3118 /* First fill our portion of the GTT with scratch pages */
Chris Wilson381b9432017-02-15 08:43:54 +00003119 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
Daniel Vetterfa423312015-04-14 17:35:23 +02003120
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003121 ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3122
3123 /* clflush objects bound into the GGTT and rebind them. */
3124 list_for_each_entry_safe(obj, on,
Joonas Lahtinen56cea322016-11-02 12:16:04 +02003125 &dev_priv->mm.bound_list, global_link) {
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003126 bool ggtt_bound = false;
3127 struct i915_vma *vma;
3128
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003129 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003130 if (vma->vm != &ggtt->base)
Tvrtko Ursulin2c3d9982015-07-06 15:15:01 +01003131 continue;
Daniel Vetterfa423312015-04-14 17:35:23 +02003132
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003133 if (!i915_vma_unbind(vma))
3134 continue;
3135
Tvrtko Ursulin2c3d9982015-07-06 15:15:01 +01003136 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3137 PIN_UPDATE));
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003138 ggtt_bound = true;
Tvrtko Ursulin2c3d9982015-07-06 15:15:01 +01003139 }
3140
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003141 if (ggtt_bound)
Chris Wilson975f7ff2016-05-14 07:26:34 +01003142 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
Daniel Vetterfa423312015-04-14 17:35:23 +02003143 }
3144
Chris Wilsonfbb30a5c2016-09-09 21:19:57 +01003145 ggtt->base.closed = false;
3146
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00003147 if (INTEL_GEN(dev_priv) >= 8) {
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02003148 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
Daniel Vetterfa423312015-04-14 17:35:23 +02003149 chv_setup_private_ppat(dev_priv);
3150 else
3151 bdw_setup_private_ppat(dev_priv);
3152
3153 return;
3154 }
3155
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00003156 if (USES_PPGTT(dev_priv)) {
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003157 struct i915_address_space *vm;
3158
Daniel Vetterfa423312015-04-14 17:35:23 +02003159 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
Joonas Lahtinene5716f52016-04-07 11:08:03 +03003160 struct i915_hw_ppgtt *ppgtt;
Daniel Vetterfa423312015-04-14 17:35:23 +02003161
Chris Wilson2bfa9962016-08-04 07:52:25 +01003162 if (i915_is_ggtt(vm))
Daniel Vetterfa423312015-04-14 17:35:23 +02003163 ppgtt = dev_priv->mm.aliasing_ppgtt;
Joonas Lahtinene5716f52016-04-07 11:08:03 +03003164 else
3165 ppgtt = i915_vm_to_ppgtt(vm);
Daniel Vetterfa423312015-04-14 17:35:23 +02003166
Chris Wilson16a011c2017-02-15 08:43:45 +00003167 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
Daniel Vetterfa423312015-04-14 17:35:23 +02003168 }
3169 }
3170
Chris Wilson7c3f86b2017-01-12 11:00:49 +00003171 i915_ggtt_invalidate(dev_priv);
Daniel Vetterfa423312015-04-14 17:35:23 +02003172}
3173
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003174static struct scatterlist *
Ville Syrjälä2d7f3bd2016-01-14 15:22:11 +02003175rotate_pages(const dma_addr_t *in, unsigned int offset,
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003176 unsigned int width, unsigned int height,
Ville Syrjälä87130252016-01-20 21:05:23 +02003177 unsigned int stride,
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003178 struct sg_table *st, struct scatterlist *sg)
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003179{
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003180 unsigned int column, row;
3181 unsigned int src_idx;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003182
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003183 for (column = 0; column < width; column++) {
Ville Syrjälä87130252016-01-20 21:05:23 +02003184 src_idx = stride * (height - 1) + column;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003185 for (row = 0; row < height; row++) {
3186 st->nents++;
3187 /* We don't need the pages, but need to initialize
3188 * the entries so the sg list can be happily traversed.
3189 * The only thing we need are DMA addresses.
3190 */
3191 sg_set_page(sg, NULL, PAGE_SIZE, 0);
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003192 sg_dma_address(sg) = in[offset + src_idx];
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003193 sg_dma_len(sg) = PAGE_SIZE;
3194 sg = sg_next(sg);
Ville Syrjälä87130252016-01-20 21:05:23 +02003195 src_idx -= stride;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003196 }
3197 }
Tvrtko Ursulin804beb42015-09-21 10:45:33 +01003198
3199 return sg;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003200}
3201
Chris Wilsonba7a5742017-02-15 08:43:35 +00003202static noinline struct sg_table *
3203intel_rotate_pages(struct intel_rotation_info *rot_info,
3204 struct drm_i915_gem_object *obj)
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003205{
Chris Wilson75c7b0b2017-02-15 08:43:57 +00003206 const unsigned long n_pages = obj->base.size / PAGE_SIZE;
Ville Syrjälä6687c902015-09-15 13:16:41 +03003207 unsigned int size = intel_rotation_info_size(rot_info);
Dave Gordon85d12252016-05-20 11:54:06 +01003208 struct sgt_iter sgt_iter;
3209 dma_addr_t dma_addr;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003210 unsigned long i;
3211 dma_addr_t *page_addr_list;
3212 struct sg_table *st;
Tvrtko Ursulin89e3e142015-09-21 10:45:34 +01003213 struct scatterlist *sg;
Tvrtko Ursulin1d00dad2015-03-25 10:15:26 +00003214 int ret = -ENOMEM;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003215
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003216 /* Allocate a temporary list of source pages for random access. */
Michal Hocko20981052017-05-17 14:23:12 +02003217 page_addr_list = kvmalloc_array(n_pages,
Chris Wilsonf2a85e12016-04-08 12:11:13 +01003218 sizeof(dma_addr_t),
3219 GFP_TEMPORARY);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003220 if (!page_addr_list)
3221 return ERR_PTR(ret);
3222
3223 /* Allocate target SG list. */
3224 st = kmalloc(sizeof(*st), GFP_KERNEL);
3225 if (!st)
3226 goto err_st_alloc;
3227
Ville Syrjälä6687c902015-09-15 13:16:41 +03003228 ret = sg_alloc_table(st, size, GFP_KERNEL);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003229 if (ret)
3230 goto err_sg_alloc;
3231
3232 /* Populate source page list from the object. */
3233 i = 0;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003234 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
Dave Gordon85d12252016-05-20 11:54:06 +01003235 page_addr_list[i++] = dma_addr;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003236
Dave Gordon85d12252016-05-20 11:54:06 +01003237 GEM_BUG_ON(i != n_pages);
Ville Syrjälä11f20322016-02-15 22:54:46 +02003238 st->nents = 0;
3239 sg = st->sgl;
3240
Ville Syrjälä6687c902015-09-15 13:16:41 +03003241 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3242 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3243 rot_info->plane[i].width, rot_info->plane[i].height,
3244 rot_info->plane[i].stride, st, sg);
Tvrtko Ursulin89e3e142015-09-21 10:45:34 +01003245 }
3246
Ville Syrjälä6687c902015-09-15 13:16:41 +03003247 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3248 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003249
Michal Hocko20981052017-05-17 14:23:12 +02003250 kvfree(page_addr_list);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003251
3252 return st;
3253
3254err_sg_alloc:
3255 kfree(st);
3256err_st_alloc:
Michal Hocko20981052017-05-17 14:23:12 +02003257 kvfree(page_addr_list);
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003258
Ville Syrjälä6687c902015-09-15 13:16:41 +03003259 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3260 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3261
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003262 return ERR_PTR(ret);
3263}
3264
Chris Wilsonba7a5742017-02-15 08:43:35 +00003265static noinline struct sg_table *
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003266intel_partial_pages(const struct i915_ggtt_view *view,
3267 struct drm_i915_gem_object *obj)
3268{
3269 struct sg_table *st;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003270 struct scatterlist *sg, *iter;
Chris Wilson8bab11932017-01-14 00:28:25 +00003271 unsigned int count = view->partial.size;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003272 unsigned int offset;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003273 int ret = -ENOMEM;
3274
3275 st = kmalloc(sizeof(*st), GFP_KERNEL);
3276 if (!st)
3277 goto err_st_alloc;
3278
Chris Wilsond2a84a72016-10-28 13:58:34 +01003279 ret = sg_alloc_table(st, count, GFP_KERNEL);
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003280 if (ret)
3281 goto err_sg_alloc;
3282
Chris Wilson8bab11932017-01-14 00:28:25 +00003283 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
Chris Wilsond2a84a72016-10-28 13:58:34 +01003284 GEM_BUG_ON(!iter);
3285
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003286 sg = st->sgl;
3287 st->nents = 0;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003288 do {
3289 unsigned int len;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003290
Chris Wilsond2a84a72016-10-28 13:58:34 +01003291 len = min(iter->length - (offset << PAGE_SHIFT),
3292 count << PAGE_SHIFT);
3293 sg_set_page(sg, NULL, len, 0);
3294 sg_dma_address(sg) =
3295 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3296 sg_dma_len(sg) = len;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003297
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003298 st->nents++;
Chris Wilsond2a84a72016-10-28 13:58:34 +01003299 count -= len >> PAGE_SHIFT;
3300 if (count == 0) {
3301 sg_mark_end(sg);
3302 return st;
3303 }
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003304
Chris Wilsond2a84a72016-10-28 13:58:34 +01003305 sg = __sg_next(sg);
3306 iter = __sg_next(iter);
3307 offset = 0;
3308 } while (1);
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +03003309
3310err_sg_alloc:
3311 kfree(st);
3312err_st_alloc:
3313 return ERR_PTR(ret);
3314}
3315
Daniel Vetter70b9f6f2015-04-14 17:35:27 +02003316static int
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003317i915_get_ggtt_vma_pages(struct i915_vma *vma)
3318{
Chris Wilsonba7a5742017-02-15 08:43:35 +00003319 int ret;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003320
Chris Wilson2c3a3f42016-11-04 10:30:01 +00003321 /* The vma->pages are only valid within the lifespan of the borrowed
3322 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3323 * must be the vma->pages. A simple rule is that vma->pages must only
3324 * be accessed when the obj->mm.pages are pinned.
3325 */
3326 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3327
Chris Wilsonba7a5742017-02-15 08:43:35 +00003328 switch (vma->ggtt_view.type) {
3329 case I915_GGTT_VIEW_NORMAL:
3330 vma->pages = vma->obj->mm.pages;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003331 return 0;
3332
Chris Wilsonba7a5742017-02-15 08:43:35 +00003333 case I915_GGTT_VIEW_ROTATED:
Chris Wilson247177d2016-08-15 10:48:47 +01003334 vma->pages =
Chris Wilsonba7a5742017-02-15 08:43:35 +00003335 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3336 break;
3337
3338 case I915_GGTT_VIEW_PARTIAL:
Chris Wilson247177d2016-08-15 10:48:47 +01003339 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
Chris Wilsonba7a5742017-02-15 08:43:35 +00003340 break;
3341
3342 default:
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003343 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3344 vma->ggtt_view.type);
Chris Wilsonba7a5742017-02-15 08:43:35 +00003345 return -EINVAL;
3346 }
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003347
Chris Wilsonba7a5742017-02-15 08:43:35 +00003348 ret = 0;
3349 if (unlikely(IS_ERR(vma->pages))) {
Chris Wilson247177d2016-08-15 10:48:47 +01003350 ret = PTR_ERR(vma->pages);
3351 vma->pages = NULL;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003352 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3353 vma->ggtt_view.type, ret);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003354 }
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00003355 return ret;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003356}
3357
Chris Wilsone007b192017-01-11 11:23:10 +00003358/**
Chris Wilson625d9882017-01-11 11:23:11 +00003359 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
Chris Wilsona4dbf7c2017-01-12 16:45:59 +00003360 * @vm: the &struct i915_address_space
3361 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3362 * @size: how much space to allocate inside the GTT,
3363 * must be #I915_GTT_PAGE_SIZE aligned
3364 * @offset: where to insert inside the GTT,
3365 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3366 * (@offset + @size) must fit within the address space
3367 * @color: color to apply to node, if this node is not from a VMA,
3368 * color must be #I915_COLOR_UNEVICTABLE
3369 * @flags: control search and eviction behaviour
Chris Wilson625d9882017-01-11 11:23:11 +00003370 *
3371 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3372 * the address space (using @size and @color). If the @node does not fit, it
3373 * tries to evict any overlapping nodes from the GTT, including any
3374 * neighbouring nodes if the colors do not match (to ensure guard pages between
3375 * differing domains). See i915_gem_evict_for_node() for the gory details
3376 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3377 * evicting active overlapping objects, and any overlapping node that is pinned
3378 * or marked as unevictable will also result in failure.
3379 *
3380 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3381 * asked to wait for eviction and interrupted.
3382 */
3383int i915_gem_gtt_reserve(struct i915_address_space *vm,
3384 struct drm_mm_node *node,
3385 u64 size, u64 offset, unsigned long color,
3386 unsigned int flags)
3387{
3388 int err;
3389
3390 GEM_BUG_ON(!size);
3391 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3392 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3393 GEM_BUG_ON(range_overflows(offset, size, vm->total));
Chris Wilson3fec7ec2017-01-15 13:47:46 +00003394 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
Chris Wilson9734ad12017-01-15 17:27:40 +00003395 GEM_BUG_ON(drm_mm_node_allocated(node));
Chris Wilson625d9882017-01-11 11:23:11 +00003396
3397 node->size = size;
3398 node->start = offset;
3399 node->color = color;
3400
3401 err = drm_mm_reserve_node(&vm->mm, node);
3402 if (err != -ENOSPC)
3403 return err;
3404
Chris Wilson616d9ce2017-06-16 15:05:21 +01003405 if (flags & PIN_NOEVICT)
3406 return -ENOSPC;
3407
Chris Wilson625d9882017-01-11 11:23:11 +00003408 err = i915_gem_evict_for_node(vm, node, flags);
3409 if (err == 0)
3410 err = drm_mm_reserve_node(&vm->mm, node);
3411
3412 return err;
3413}
3414
Chris Wilson606fec92017-01-11 11:23:12 +00003415static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3416{
3417 u64 range, addr;
3418
3419 GEM_BUG_ON(range_overflows(start, len, end));
3420 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3421
3422 range = round_down(end - len, align) - round_up(start, align);
3423 if (range) {
3424 if (sizeof(unsigned long) == sizeof(u64)) {
3425 addr = get_random_long();
3426 } else {
3427 addr = get_random_int();
3428 if (range > U32_MAX) {
3429 addr <<= 32;
3430 addr |= get_random_int();
3431 }
3432 }
3433 div64_u64_rem(addr, range, &addr);
3434 start += addr;
3435 }
3436
3437 return round_up(start, align);
3438}
3439
Chris Wilson625d9882017-01-11 11:23:11 +00003440/**
Chris Wilsone007b192017-01-11 11:23:10 +00003441 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
Chris Wilsona4dbf7c2017-01-12 16:45:59 +00003442 * @vm: the &struct i915_address_space
3443 * @node: the &struct drm_mm_node (typically i915_vma.node)
3444 * @size: how much space to allocate inside the GTT,
3445 * must be #I915_GTT_PAGE_SIZE aligned
3446 * @alignment: required alignment of starting offset, may be 0 but
3447 * if specified, this must be a power-of-two and at least
3448 * #I915_GTT_MIN_ALIGNMENT
3449 * @color: color to apply to node
3450 * @start: start of any range restriction inside GTT (0 for all),
Chris Wilsone007b192017-01-11 11:23:10 +00003451 * must be #I915_GTT_PAGE_SIZE aligned
Chris Wilsona4dbf7c2017-01-12 16:45:59 +00003452 * @end: end of any range restriction inside GTT (U64_MAX for all),
3453 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3454 * @flags: control search and eviction behaviour
Chris Wilsone007b192017-01-11 11:23:10 +00003455 *
3456 * i915_gem_gtt_insert() first searches for an available hole into which
3457 * is can insert the node. The hole address is aligned to @alignment and
3458 * its @size must then fit entirely within the [@start, @end] bounds. The
3459 * nodes on either side of the hole must match @color, or else a guard page
3460 * will be inserted between the two nodes (or the node evicted). If no
Chris Wilson606fec92017-01-11 11:23:12 +00003461 * suitable hole is found, first a victim is randomly selected and tested
3462 * for eviction, otherwise then the LRU list of objects within the GTT
Chris Wilsone007b192017-01-11 11:23:10 +00003463 * is scanned to find the first set of replacement nodes to create the hole.
3464 * Those old overlapping nodes are evicted from the GTT (and so must be
3465 * rebound before any future use). Any node that is currently pinned cannot
3466 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3467 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3468 * searching for an eviction candidate. See i915_gem_evict_something() for
3469 * the gory details on the eviction algorithm.
3470 *
3471 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3472 * asked to wait for eviction and interrupted.
3473 */
3474int i915_gem_gtt_insert(struct i915_address_space *vm,
3475 struct drm_mm_node *node,
3476 u64 size, u64 alignment, unsigned long color,
3477 u64 start, u64 end, unsigned int flags)
3478{
Chris Wilson4e64e552017-02-02 21:04:38 +00003479 enum drm_mm_insert_mode mode;
Chris Wilson606fec92017-01-11 11:23:12 +00003480 u64 offset;
Chris Wilsone007b192017-01-11 11:23:10 +00003481 int err;
3482
3483 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3484 GEM_BUG_ON(!size);
3485 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3486 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3487 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3488 GEM_BUG_ON(start >= end);
3489 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3490 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Chris Wilson3fec7ec2017-01-15 13:47:46 +00003491 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
Chris Wilson9734ad12017-01-15 17:27:40 +00003492 GEM_BUG_ON(drm_mm_node_allocated(node));
Chris Wilsone007b192017-01-11 11:23:10 +00003493
3494 if (unlikely(range_overflows(start, size, end)))
3495 return -ENOSPC;
3496
3497 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3498 return -ENOSPC;
3499
Chris Wilson4e64e552017-02-02 21:04:38 +00003500 mode = DRM_MM_INSERT_BEST;
3501 if (flags & PIN_HIGH)
3502 mode = DRM_MM_INSERT_HIGH;
3503 if (flags & PIN_MAPPABLE)
3504 mode = DRM_MM_INSERT_LOW;
Chris Wilsone007b192017-01-11 11:23:10 +00003505
3506 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3507 * so we know that we always have a minimum alignment of 4096.
3508 * The drm_mm range manager is optimised to return results
3509 * with zero alignment, so where possible use the optimal
3510 * path.
3511 */
3512 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3513 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3514 alignment = 0;
3515
Chris Wilson4e64e552017-02-02 21:04:38 +00003516 err = drm_mm_insert_node_in_range(&vm->mm, node,
3517 size, alignment, color,
3518 start, end, mode);
Chris Wilsone007b192017-01-11 11:23:10 +00003519 if (err != -ENOSPC)
3520 return err;
3521
Chris Wilson616d9ce2017-06-16 15:05:21 +01003522 if (flags & PIN_NOEVICT)
3523 return -ENOSPC;
3524
Chris Wilson606fec92017-01-11 11:23:12 +00003525 /* No free space, pick a slot at random.
3526 *
3527 * There is a pathological case here using a GTT shared between
3528 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3529 *
3530 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3531 * (64k objects) (448k objects)
3532 *
3533 * Now imagine that the eviction LRU is ordered top-down (just because
3534 * pathology meets real life), and that we need to evict an object to
3535 * make room inside the aperture. The eviction scan then has to walk
3536 * the 448k list before it finds one within range. And now imagine that
3537 * it has to search for a new hole between every byte inside the memcpy,
3538 * for several simultaneous clients.
3539 *
3540 * On a full-ppgtt system, if we have run out of available space, there
3541 * will be lots and lots of objects in the eviction list! Again,
3542 * searching that LRU list may be slow if we are also applying any
3543 * range restrictions (e.g. restriction to low 4GiB) and so, for
3544 * simplicity and similarilty between different GTT, try the single
3545 * random replacement first.
3546 */
3547 offset = random_offset(start, end,
3548 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3549 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3550 if (err != -ENOSPC)
3551 return err;
3552
3553 /* Randomly selected placement is pinned, do a search */
Chris Wilsone007b192017-01-11 11:23:10 +00003554 err = i915_gem_evict_something(vm, size, alignment, color,
3555 start, end, flags);
3556 if (err)
3557 return err;
3558
Chris Wilson4e64e552017-02-02 21:04:38 +00003559 return drm_mm_insert_node_in_range(&vm->mm, node,
3560 size, alignment, color,
3561 start, end, DRM_MM_INSERT_EVICT);
Chris Wilsone007b192017-01-11 11:23:10 +00003562}
Chris Wilson3b5bb0a2017-02-13 17:15:18 +00003563
3564#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3565#include "selftests/mock_gtt.c"
Chris Wilson1c428192017-02-13 17:15:38 +00003566#include "selftests/i915_gem_gtt.c"
Chris Wilson3b5bb0a2017-02-13 17:15:18 +00003567#endif