blob: 7727103d4918d6d20ee6c2c1584c0d7805ee3cac [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
Ben Widawskyc4ac5242014-02-19 22:05:47 -08003 * Copyright © 2011-2014 Intel Corporation
Daniel Vetter76aaf222010-11-05 22:23:30 +01004 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
Daniel Vetter0e46ce22014-01-08 16:10:27 +010026#include <linux/seq_file.h>
David Howells760285e2012-10-02 18:01:07 +010027#include <drm/drmP.h>
28#include <drm/i915_drm.h>
Daniel Vetter76aaf222010-11-05 22:23:30 +010029#include "i915_drv.h"
30#include "i915_trace.h"
31#include "intel_drv.h"
32
Daniel Vetter93a25a92014-03-06 09:40:43 +010033bool intel_enable_ppgtt(struct drm_device *dev, bool full)
34{
35 if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
36 return false;
37
38 if (i915.enable_ppgtt == 1 && full)
39 return false;
40
41#ifdef CONFIG_INTEL_IOMMU
42 /* Disable ppgtt on SNB if VT-d is on. */
43 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
44 DRM_INFO("Disabling PPGTT because VT-d is on\n");
45 return false;
46 }
47#endif
48
49 /* Full ppgtt disabled by default for now due to issues. */
50 if (full)
51 return false; /* HAS_PPGTT(dev) */
52 else
53 return HAS_ALIASING_PPGTT(dev);
54}
55
Ben Widawsky6670a5a2013-06-27 16:30:04 -070056#define GEN6_PPGTT_PD_ENTRIES 512
57#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
Ben Widawskyd31eb102013-11-02 21:07:17 -070058typedef uint64_t gen8_gtt_pte_t;
Ben Widawsky37aca442013-11-04 20:47:32 -080059typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
Ben Widawsky6670a5a2013-06-27 16:30:04 -070060
Ben Widawsky26b1ff32012-11-04 09:21:31 -080061/* PPGTT stuff */
62#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
Ben Widawsky0d8ff152013-07-04 11:02:03 -070063#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
Ben Widawsky26b1ff32012-11-04 09:21:31 -080064
65#define GEN6_PDE_VALID (1 << 0)
66/* gen6+ has bit 11-4 for physical addr bit 39-32 */
67#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
68
69#define GEN6_PTE_VALID (1 << 0)
70#define GEN6_PTE_UNCACHED (1 << 1)
71#define HSW_PTE_UNCACHED (0)
72#define GEN6_PTE_CACHE_LLC (2 << 1)
Chris Wilson350ec882013-08-06 13:17:02 +010073#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
Ben Widawsky26b1ff32012-11-04 09:21:31 -080074#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
Ben Widawsky0d8ff152013-07-04 11:02:03 -070075#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
76
77/* Cacheability Control is a 4-bit value. The low three bits are stored in *
78 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
79 */
80#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
81 (((bits) & 0x8) << (11 - 3)))
Ben Widawsky87a6b682013-08-04 23:47:29 -070082#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
Ben Widawsky0d8ff152013-07-04 11:02:03 -070083#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
Ben Widawsky4d15c142013-07-04 11:02:06 -070084#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
Chris Wilsonc51e9702013-11-22 10:37:53 +000085#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
Chris Wilson651d7942013-08-08 14:41:10 +010086#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
Chris Wilsonc51e9702013-11-22 10:37:53 +000087#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
Ben Widawsky26b1ff32012-11-04 09:21:31 -080088
Ben Widawsky459108b2013-11-02 21:07:23 -070089#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
Ben Widawsky37aca442013-11-04 20:47:32 -080090#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
Ben Widawsky7ad47cf2014-02-20 11:51:21 -080091
92/* GEN8 legacy style addressis defined as a 3 level page table:
93 * 31:30 | 29:21 | 20:12 | 11:0
94 * PDPE | PDE | PTE | offset
95 * The difference as compared to normal x86 3 level page table is the PDPEs are
96 * programmed via register.
97 */
98#define GEN8_PDPE_SHIFT 30
99#define GEN8_PDPE_MASK 0x3
100#define GEN8_PDE_SHIFT 21
101#define GEN8_PDE_MASK 0x1ff
102#define GEN8_PTE_SHIFT 12
103#define GEN8_PTE_MASK 0x1ff
Ben Widawsky37aca442013-11-04 20:47:32 -0800104
Ben Widawskyfbe5d362013-11-04 19:56:49 -0800105#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
106#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
107#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
108#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
109
Ben Widawsky6f65e292013-12-06 14:10:56 -0800110static void ppgtt_bind_vma(struct i915_vma *vma,
111 enum i915_cache_level cache_level,
112 u32 flags);
113static void ppgtt_unbind_vma(struct i915_vma *vma);
Ben Widawskyeeb94882013-12-06 14:11:10 -0800114static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
Ben Widawsky6f65e292013-12-06 14:10:56 -0800115
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700116static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
117 enum i915_cache_level level,
118 bool valid)
119{
120 gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
121 pte |= addr;
Ben Widawskyfbe5d362013-11-04 19:56:49 -0800122 if (level != I915_CACHE_NONE)
123 pte |= PPAT_CACHED_INDEX;
124 else
125 pte |= PPAT_UNCACHED_INDEX;
Ben Widawsky94ec8f62013-11-02 21:07:18 -0700126 return pte;
127}
128
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800129static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
130 dma_addr_t addr,
131 enum i915_cache_level level)
132{
133 gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
134 pde |= addr;
135 if (level != I915_CACHE_NONE)
136 pde |= PPAT_CACHED_PDE_INDEX;
137 else
138 pde |= PPAT_UNCACHED_INDEX;
139 return pde;
140}
141
Chris Wilson350ec882013-08-06 13:17:02 +0100142static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -0700143 enum i915_cache_level level,
144 bool valid)
Ben Widawsky54d12522012-09-24 16:44:32 -0700145{
Ben Widawskyb35b3802013-10-16 09:18:21 -0700146 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Ben Widawsky54d12522012-09-24 16:44:32 -0700147 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -0700148
149 switch (level) {
Chris Wilson350ec882013-08-06 13:17:02 +0100150 case I915_CACHE_L3_LLC:
151 case I915_CACHE_LLC:
152 pte |= GEN6_PTE_CACHE_LLC;
153 break;
154 case I915_CACHE_NONE:
155 pte |= GEN6_PTE_UNCACHED;
156 break;
157 default:
158 WARN_ON(1);
159 }
160
161 return pte;
162}
163
164static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -0700165 enum i915_cache_level level,
166 bool valid)
Chris Wilson350ec882013-08-06 13:17:02 +0100167{
Ben Widawskyb35b3802013-10-16 09:18:21 -0700168 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Chris Wilson350ec882013-08-06 13:17:02 +0100169 pte |= GEN6_PTE_ADDR_ENCODE(addr);
170
171 switch (level) {
172 case I915_CACHE_L3_LLC:
173 pte |= GEN7_PTE_CACHE_L3_LLC;
Ben Widawskye7210c32012-10-19 09:33:22 -0700174 break;
175 case I915_CACHE_LLC:
176 pte |= GEN6_PTE_CACHE_LLC;
177 break;
178 case I915_CACHE_NONE:
Kenneth Graunke91197082013-04-22 00:53:51 -0700179 pte |= GEN6_PTE_UNCACHED;
Ben Widawskye7210c32012-10-19 09:33:22 -0700180 break;
181 default:
Chris Wilson350ec882013-08-06 13:17:02 +0100182 WARN_ON(1);
Ben Widawskye7210c32012-10-19 09:33:22 -0700183 }
184
Ben Widawsky54d12522012-09-24 16:44:32 -0700185 return pte;
186}
187
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700188#define BYT_PTE_WRITEABLE (1 << 1)
189#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
190
Ben Widawsky80a74f72013-06-27 16:30:19 -0700191static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -0700192 enum i915_cache_level level,
193 bool valid)
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700194{
Ben Widawskyb35b3802013-10-16 09:18:21 -0700195 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700196 pte |= GEN6_PTE_ADDR_ENCODE(addr);
197
198 /* Mark the page as writeable. Other platforms don't have a
199 * setting for read-only/writable, so this matches that behavior.
200 */
201 pte |= BYT_PTE_WRITEABLE;
202
203 if (level != I915_CACHE_NONE)
204 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
205
206 return pte;
207}
208
Ben Widawsky80a74f72013-06-27 16:30:19 -0700209static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -0700210 enum i915_cache_level level,
211 bool valid)
Kenneth Graunke91197082013-04-22 00:53:51 -0700212{
Ben Widawskyb35b3802013-10-16 09:18:21 -0700213 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Ben Widawsky0d8ff152013-07-04 11:02:03 -0700214 pte |= HSW_PTE_ADDR_ENCODE(addr);
Kenneth Graunke91197082013-04-22 00:53:51 -0700215
216 if (level != I915_CACHE_NONE)
Ben Widawsky87a6b682013-08-04 23:47:29 -0700217 pte |= HSW_WB_LLC_AGE3;
Kenneth Graunke91197082013-04-22 00:53:51 -0700218
219 return pte;
220}
221
Ben Widawsky4d15c142013-07-04 11:02:06 -0700222static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -0700223 enum i915_cache_level level,
224 bool valid)
Ben Widawsky4d15c142013-07-04 11:02:06 -0700225{
Ben Widawskyb35b3802013-10-16 09:18:21 -0700226 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Ben Widawsky4d15c142013-07-04 11:02:06 -0700227 pte |= HSW_PTE_ADDR_ENCODE(addr);
228
Chris Wilson651d7942013-08-08 14:41:10 +0100229 switch (level) {
230 case I915_CACHE_NONE:
231 break;
232 case I915_CACHE_WT:
Chris Wilsonc51e9702013-11-22 10:37:53 +0000233 pte |= HSW_WT_ELLC_LLC_AGE3;
Chris Wilson651d7942013-08-08 14:41:10 +0100234 break;
235 default:
Chris Wilsonc51e9702013-11-22 10:37:53 +0000236 pte |= HSW_WB_ELLC_LLC_AGE3;
Chris Wilson651d7942013-08-08 14:41:10 +0100237 break;
238 }
Ben Widawsky4d15c142013-07-04 11:02:06 -0700239
240 return pte;
241}
242
Ben Widawsky94e409c2013-11-04 22:29:36 -0800243/* Broadwell Page Directory Pointer Descriptors */
244static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
Ben Widawskye178f702013-12-06 14:10:47 -0800245 uint64_t val, bool synchronous)
Ben Widawsky94e409c2013-11-04 22:29:36 -0800246{
Ben Widawskye178f702013-12-06 14:10:47 -0800247 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800248 int ret;
249
250 BUG_ON(entry >= 4);
251
Ben Widawskye178f702013-12-06 14:10:47 -0800252 if (synchronous) {
253 I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
254 I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
255 return 0;
256 }
257
Ben Widawsky94e409c2013-11-04 22:29:36 -0800258 ret = intel_ring_begin(ring, 6);
259 if (ret)
260 return ret;
261
262 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
263 intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
264 intel_ring_emit(ring, (u32)(val >> 32));
265 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
266 intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
267 intel_ring_emit(ring, (u32)(val));
268 intel_ring_advance(ring);
269
270 return 0;
271}
272
Ben Widawskyeeb94882013-12-06 14:11:10 -0800273static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
274 struct intel_ring_buffer *ring,
275 bool synchronous)
Ben Widawsky94e409c2013-11-04 22:29:36 -0800276{
Ben Widawskyeeb94882013-12-06 14:11:10 -0800277 int i, ret;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800278
279 /* bit of a hack to find the actual last used pd */
280 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
281
Ben Widawsky94e409c2013-11-04 22:29:36 -0800282 for (i = used_pd - 1; i >= 0; i--) {
283 dma_addr_t addr = ppgtt->pd_dma_addr[i];
Ben Widawskyeeb94882013-12-06 14:11:10 -0800284 ret = gen8_write_pdp(ring, i, addr, synchronous);
285 if (ret)
286 return ret;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800287 }
Ben Widawskyd595bd42013-11-25 09:54:32 -0800288
Ben Widawskyeeb94882013-12-06 14:11:10 -0800289 return 0;
Ben Widawsky94e409c2013-11-04 22:29:36 -0800290}
291
Ben Widawsky459108b2013-11-02 21:07:23 -0700292static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
Ben Widawsky782f1492014-02-20 11:50:33 -0800293 uint64_t start,
294 uint64_t length,
Ben Widawsky459108b2013-11-02 21:07:23 -0700295 bool use_scratch)
296{
297 struct i915_hw_ppgtt *ppgtt =
298 container_of(vm, struct i915_hw_ppgtt, base);
299 gen8_gtt_pte_t *pt_vaddr, scratch_pte;
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800300 unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
301 unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
302 unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
Ben Widawsky782f1492014-02-20 11:50:33 -0800303 unsigned num_entries = length >> PAGE_SHIFT;
Ben Widawsky459108b2013-11-02 21:07:23 -0700304 unsigned last_pte, i;
305
306 scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
307 I915_CACHE_LLC, use_scratch);
308
309 while (num_entries) {
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800310 struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
Ben Widawsky459108b2013-11-02 21:07:23 -0700311
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800312 last_pte = pte + num_entries;
Ben Widawsky459108b2013-11-02 21:07:23 -0700313 if (last_pte > GEN8_PTES_PER_PAGE)
314 last_pte = GEN8_PTES_PER_PAGE;
315
316 pt_vaddr = kmap_atomic(page_table);
317
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800318 for (i = pte; i < last_pte; i++) {
Ben Widawsky459108b2013-11-02 21:07:23 -0700319 pt_vaddr[i] = scratch_pte;
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800320 num_entries--;
321 }
Ben Widawsky459108b2013-11-02 21:07:23 -0700322
323 kunmap_atomic(pt_vaddr);
324
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800325 pte = 0;
326 if (++pde == GEN8_PDES_PER_PAGE) {
327 pdpe++;
328 pde = 0;
329 }
Ben Widawsky459108b2013-11-02 21:07:23 -0700330 }
331}
332
Ben Widawsky9df15b42013-11-02 21:07:24 -0700333static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
334 struct sg_table *pages,
Ben Widawsky782f1492014-02-20 11:50:33 -0800335 uint64_t start,
Ben Widawsky9df15b42013-11-02 21:07:24 -0700336 enum i915_cache_level cache_level)
337{
338 struct i915_hw_ppgtt *ppgtt =
339 container_of(vm, struct i915_hw_ppgtt, base);
340 gen8_gtt_pte_t *pt_vaddr;
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800341 unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
342 unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
343 unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
Ben Widawsky9df15b42013-11-02 21:07:24 -0700344 struct sg_page_iter sg_iter;
345
Chris Wilson6f1cc992013-12-31 15:50:31 +0000346 pt_vaddr = NULL;
Ben Widawsky9df15b42013-11-02 21:07:24 -0700347
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800348 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
349 if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
350 break;
351
352 if (pt_vaddr == NULL)
353 pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
354
355 pt_vaddr[pte] =
Chris Wilson6f1cc992013-12-31 15:50:31 +0000356 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
357 cache_level, true);
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800358 if (++pte == GEN8_PTES_PER_PAGE) {
Ben Widawsky9df15b42013-11-02 21:07:24 -0700359 kunmap_atomic(pt_vaddr);
Chris Wilson6f1cc992013-12-31 15:50:31 +0000360 pt_vaddr = NULL;
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800361 if (++pde == GEN8_PDES_PER_PAGE) {
362 pdpe++;
363 pde = 0;
364 }
365 pte = 0;
Ben Widawsky9df15b42013-11-02 21:07:24 -0700366 }
367 }
Chris Wilson6f1cc992013-12-31 15:50:31 +0000368 if (pt_vaddr)
369 kunmap_atomic(pt_vaddr);
Ben Widawsky9df15b42013-11-02 21:07:24 -0700370}
371
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800372static void gen8_free_page_tables(struct page **pt_pages)
Ben Widawskyb45a6712014-02-12 14:28:44 -0800373{
374 int i;
375
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800376 if (pt_pages == NULL)
377 return;
Ben Widawskyb45a6712014-02-12 14:28:44 -0800378
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800379 for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
380 if (pt_pages[i])
381 __free_pages(pt_pages[i], 0);
382}
383
384static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
385{
386 int i;
387
388 for (i = 0; i < ppgtt->num_pd_pages; i++) {
389 gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
390 kfree(ppgtt->gen8_pt_pages[i]);
391 kfree(ppgtt->gen8_pt_dma_addr[i]);
392 }
393
Ben Widawskyb45a6712014-02-12 14:28:44 -0800394 __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
395}
396
397static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
398{
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800399 struct pci_dev *hwdev = ppgtt->base.dev->pdev;
Ben Widawskyb45a6712014-02-12 14:28:44 -0800400 int i, j;
401
402 for (i = 0; i < ppgtt->num_pd_pages; i++) {
403 /* TODO: In the future we'll support sparse mappings, so this
404 * will have to change. */
405 if (!ppgtt->pd_dma_addr[i])
406 continue;
407
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800408 pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
409 PCI_DMA_BIDIRECTIONAL);
Ben Widawskyb45a6712014-02-12 14:28:44 -0800410
411 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
412 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
413 if (addr)
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800414 pci_unmap_page(hwdev, addr, PAGE_SIZE,
415 PCI_DMA_BIDIRECTIONAL);
Ben Widawskyb45a6712014-02-12 14:28:44 -0800416 }
417 }
418}
419
Ben Widawsky37aca442013-11-04 20:47:32 -0800420static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
421{
422 struct i915_hw_ppgtt *ppgtt =
423 container_of(vm, struct i915_hw_ppgtt, base);
Ben Widawsky37aca442013-11-04 20:47:32 -0800424
Ben Widawsky7e0d96b2013-12-06 14:11:26 -0800425 list_del(&vm->global_link);
Ben Widawsky686e1f6f2013-11-25 09:54:34 -0800426 drm_mm_takedown(&vm->mm);
427
Ben Widawskyb45a6712014-02-12 14:28:44 -0800428 gen8_ppgtt_unmap_pages(ppgtt);
429 gen8_ppgtt_free(ppgtt);
Ben Widawsky37aca442013-11-04 20:47:32 -0800430}
431
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800432static struct page **__gen8_alloc_page_tables(void)
433{
434 struct page **pt_pages;
435 int i;
436
437 pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
438 if (!pt_pages)
439 return ERR_PTR(-ENOMEM);
440
441 for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
442 pt_pages[i] = alloc_page(GFP_KERNEL);
443 if (!pt_pages[i])
444 goto bail;
445 }
446
447 return pt_pages;
448
449bail:
450 gen8_free_page_tables(pt_pages);
451 kfree(pt_pages);
452 return ERR_PTR(-ENOMEM);
453}
454
Ben Widawskybf2b4ed2014-02-19 22:05:43 -0800455static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
456 const int max_pdp)
457{
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800458 struct page **pt_pages[GEN8_LEGACY_PDPS];
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800459 int i, ret;
Ben Widawskybf2b4ed2014-02-19 22:05:43 -0800460
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800461 for (i = 0; i < max_pdp; i++) {
462 pt_pages[i] = __gen8_alloc_page_tables();
463 if (IS_ERR(pt_pages[i])) {
464 ret = PTR_ERR(pt_pages[i]);
465 goto unwind_out;
466 }
467 }
Ben Widawskybf2b4ed2014-02-19 22:05:43 -0800468
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800469 /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
470 * "atomic" - for cleanup purposes.
471 */
472 for (i = 0; i < max_pdp; i++)
473 ppgtt->gen8_pt_pages[i] = pt_pages[i];
474
Ben Widawskybf2b4ed2014-02-19 22:05:43 -0800475 return 0;
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800476
477unwind_out:
478 while (i--) {
479 gen8_free_page_tables(pt_pages[i]);
480 kfree(pt_pages[i]);
481 }
482
483 return ret;
Ben Widawskybf2b4ed2014-02-19 22:05:43 -0800484}
485
486static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
487{
488 int i;
489
490 for (i = 0; i < ppgtt->num_pd_pages; i++) {
491 ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
492 sizeof(dma_addr_t),
493 GFP_KERNEL);
494 if (!ppgtt->gen8_pt_dma_addr[i])
495 return -ENOMEM;
496 }
497
498 return 0;
499}
500
501static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
502 const int max_pdp)
503{
504 ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
505 if (!ppgtt->pd_pages)
506 return -ENOMEM;
507
508 ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
509 BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
510
511 return 0;
512}
513
514static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
515 const int max_pdp)
516{
517 int ret;
518
519 ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
520 if (ret)
521 return ret;
522
523 ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
524 if (ret) {
525 __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
526 return ret;
527 }
528
529 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
530
531 ret = gen8_ppgtt_allocate_dma(ppgtt);
532 if (ret)
533 gen8_ppgtt_free(ppgtt);
534
535 return ret;
536}
537
538static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
539 const int pd)
540{
541 dma_addr_t pd_addr;
542 int ret;
543
544 pd_addr = pci_map_page(ppgtt->base.dev->pdev,
545 &ppgtt->pd_pages[pd], 0,
546 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
547
548 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
549 if (ret)
550 return ret;
551
552 ppgtt->pd_dma_addr[pd] = pd_addr;
553
554 return 0;
555}
556
557static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
558 const int pd,
559 const int pt)
560{
561 dma_addr_t pt_addr;
562 struct page *p;
563 int ret;
564
Ben Widawsky7ad47cf2014-02-20 11:51:21 -0800565 p = ppgtt->gen8_pt_pages[pd][pt];
Ben Widawskybf2b4ed2014-02-19 22:05:43 -0800566 pt_addr = pci_map_page(ppgtt->base.dev->pdev,
567 p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
568 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
569 if (ret)
570 return ret;
571
572 ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
573
574 return 0;
575}
576
Ben Widawsky37aca442013-11-04 20:47:32 -0800577/**
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800578 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
579 * with a net effect resembling a 2-level page table in normal x86 terms. Each
580 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
581 * space.
Ben Widawsky37aca442013-11-04 20:47:32 -0800582 *
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800583 * FIXME: split allocation into smaller pieces. For now we only ever do this
584 * once, but with full PPGTT, the multiple contiguous allocations will be bad.
Ben Widawsky37aca442013-11-04 20:47:32 -0800585 * TODO: Do something with the size parameter
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800586 */
Ben Widawsky37aca442013-11-04 20:47:32 -0800587static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
588{
Ben Widawsky37aca442013-11-04 20:47:32 -0800589 const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
Ben Widawskybf2b4ed2014-02-19 22:05:43 -0800590 const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800591 int i, j, ret;
Ben Widawsky37aca442013-11-04 20:47:32 -0800592
593 if (size % (1<<30))
594 DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
595
Ben Widawskybf2b4ed2014-02-19 22:05:43 -0800596 /* 1. Do all our allocations for page directories and page tables. */
597 ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
598 if (ret)
599 return ret;
Ben Widawsky37aca442013-11-04 20:47:32 -0800600
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800601 /*
Ben Widawskybf2b4ed2014-02-19 22:05:43 -0800602 * 2. Create DMA mappings for the page directories and page tables.
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800603 */
604 for (i = 0; i < max_pdp; i++) {
Ben Widawskybf2b4ed2014-02-19 22:05:43 -0800605 ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800606 if (ret)
607 goto bail;
608
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800609 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
Ben Widawskybf2b4ed2014-02-19 22:05:43 -0800610 ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800611 if (ret)
612 goto bail;
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800613 }
614 }
615
616 /*
617 * 3. Map all the page directory entires to point to the page tables
618 * we've allocated.
619 *
620 * For now, the PPGTT helper functions all require that the PDEs are
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800621 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800622 * will never need to touch the PDEs again.
623 */
Ben Widawskyb1fe6672013-11-04 21:20:14 -0800624 for (i = 0; i < max_pdp; i++) {
625 gen8_ppgtt_pde_t *pd_vaddr;
626 pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
627 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
628 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
629 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
630 I915_CACHE_LLC);
631 }
632 kunmap_atomic(pd_vaddr);
633 }
634
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800635 ppgtt->enable = gen8_ppgtt_enable;
636 ppgtt->switch_mm = gen8_mm_switch;
637 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
638 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
639 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
640 ppgtt->base.start = 0;
Ben Widawsky5abbcca2014-02-21 13:06:34 -0800641 ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800642
Ben Widawsky5abbcca2014-02-21 13:06:34 -0800643 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
Ben Widawsky459108b2013-11-02 21:07:23 -0700644
Ben Widawsky37aca442013-11-04 20:47:32 -0800645 DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
646 ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
647 DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
Ben Widawsky5abbcca2014-02-21 13:06:34 -0800648 ppgtt->num_pd_entries,
649 (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
Ben Widawsky28cf5412013-11-02 21:07:26 -0700650 return 0;
Ben Widawsky37aca442013-11-04 20:47:32 -0800651
Ben Widawskyf3a964b2014-02-19 22:05:42 -0800652bail:
653 gen8_ppgtt_unmap_pages(ppgtt);
654 gen8_ppgtt_free(ppgtt);
Ben Widawsky37aca442013-11-04 20:47:32 -0800655 return ret;
656}
657
Ben Widawsky87d60b62013-12-06 14:11:29 -0800658static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
659{
660 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
661 struct i915_address_space *vm = &ppgtt->base;
662 gen6_gtt_pte_t __iomem *pd_addr;
663 gen6_gtt_pte_t scratch_pte;
664 uint32_t pd_entry;
665 int pte, pde;
666
667 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
668
669 pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
670 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
671
672 seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
673 ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
674 for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
675 u32 expected;
676 gen6_gtt_pte_t *pt_vaddr;
677 dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
678 pd_entry = readl(pd_addr + pde);
679 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
680
681 if (pd_entry != expected)
682 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
683 pde,
684 pd_entry,
685 expected);
686 seq_printf(m, "\tPDE: %x\n", pd_entry);
687
688 pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
689 for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
690 unsigned long va =
691 (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
692 (pte * PAGE_SIZE);
693 int i;
694 bool found = false;
695 for (i = 0; i < 4; i++)
696 if (pt_vaddr[pte + i] != scratch_pte)
697 found = true;
698 if (!found)
699 continue;
700
701 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
702 for (i = 0; i < 4; i++) {
703 if (pt_vaddr[pte + i] != scratch_pte)
704 seq_printf(m, " %08x", pt_vaddr[pte + i]);
705 else
706 seq_puts(m, " SCRATCH ");
707 }
708 seq_puts(m, "\n");
709 }
710 kunmap_atomic(pt_vaddr);
711 }
712}
713
Ben Widawsky3e302542013-04-23 23:15:32 -0700714static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky61973492013-04-08 18:43:54 -0700715{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700716 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
Ben Widawsky61973492013-04-08 18:43:54 -0700717 gen6_gtt_pte_t __iomem *pd_addr;
718 uint32_t pd_entry;
719 int i;
720
Ben Widawsky0a732872013-04-23 23:15:30 -0700721 WARN_ON(ppgtt->pd_offset & 0x3f);
Ben Widawsky61973492013-04-08 18:43:54 -0700722 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
723 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
724 for (i = 0; i < ppgtt->num_pd_entries; i++) {
725 dma_addr_t pt_addr;
726
727 pt_addr = ppgtt->pt_dma_addr[i];
728 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
729 pd_entry |= GEN6_PDE_VALID;
730
731 writel(pd_entry, pd_addr + i);
732 }
733 readl(pd_addr);
Ben Widawsky3e302542013-04-23 23:15:32 -0700734}
735
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800736static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky3e302542013-04-23 23:15:32 -0700737{
Ben Widawsky3e302542013-04-23 23:15:32 -0700738 BUG_ON(ppgtt->pd_offset & 0x3f);
739
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800740 return (ppgtt->pd_offset / 64) << 16;
741}
Ben Widawsky61973492013-04-08 18:43:54 -0700742
Ben Widawsky90252e52013-12-06 14:11:12 -0800743static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
744 struct intel_ring_buffer *ring,
745 bool synchronous)
746{
747 struct drm_device *dev = ppgtt->base.dev;
748 struct drm_i915_private *dev_priv = dev->dev_private;
749 int ret;
Ben Widawsky61973492013-04-08 18:43:54 -0700750
Ben Widawsky90252e52013-12-06 14:11:12 -0800751 /* If we're in reset, we can assume the GPU is sufficiently idle to
752 * manually frob these bits. Ideally we could use the ring functions,
753 * except our error handling makes it quite difficult (can't use
754 * intel_ring_begin, ring->flush, or intel_ring_advance)
755 *
756 * FIXME: We should try not to special case reset
757 */
758 if (synchronous ||
759 i915_reset_in_progress(&dev_priv->gpu_error)) {
760 WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
761 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
762 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
763 POSTING_READ(RING_PP_DIR_BASE(ring));
764 return 0;
Ben Widawsky61973492013-04-08 18:43:54 -0700765 }
766
Ben Widawsky90252e52013-12-06 14:11:12 -0800767 /* NB: TLBs must be flushed and invalidated before a switch */
768 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
769 if (ret)
770 return ret;
771
772 ret = intel_ring_begin(ring, 6);
773 if (ret)
774 return ret;
775
776 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
777 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
778 intel_ring_emit(ring, PP_DIR_DCLV_2G);
779 intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
780 intel_ring_emit(ring, get_pd_offset(ppgtt));
781 intel_ring_emit(ring, MI_NOOP);
782 intel_ring_advance(ring);
783
784 return 0;
785}
786
Ben Widawsky48a10382013-12-06 14:11:11 -0800787static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
788 struct intel_ring_buffer *ring,
789 bool synchronous)
790{
791 struct drm_device *dev = ppgtt->base.dev;
792 struct drm_i915_private *dev_priv = dev->dev_private;
793 int ret;
794
795 /* If we're in reset, we can assume the GPU is sufficiently idle to
796 * manually frob these bits. Ideally we could use the ring functions,
797 * except our error handling makes it quite difficult (can't use
798 * intel_ring_begin, ring->flush, or intel_ring_advance)
799 *
800 * FIXME: We should try not to special case reset
801 */
802 if (synchronous ||
803 i915_reset_in_progress(&dev_priv->gpu_error)) {
804 WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
805 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
806 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
807 POSTING_READ(RING_PP_DIR_BASE(ring));
808 return 0;
809 }
810
811 /* NB: TLBs must be flushed and invalidated before a switch */
812 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
813 if (ret)
814 return ret;
815
816 ret = intel_ring_begin(ring, 6);
817 if (ret)
818 return ret;
819
820 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
821 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
822 intel_ring_emit(ring, PP_DIR_DCLV_2G);
823 intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
824 intel_ring_emit(ring, get_pd_offset(ppgtt));
825 intel_ring_emit(ring, MI_NOOP);
826 intel_ring_advance(ring);
827
Ben Widawsky90252e52013-12-06 14:11:12 -0800828 /* XXX: RCS is the only one to auto invalidate the TLBs? */
829 if (ring->id != RCS) {
830 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
831 if (ret)
832 return ret;
833 }
834
Ben Widawsky48a10382013-12-06 14:11:11 -0800835 return 0;
836}
837
Ben Widawskyeeb94882013-12-06 14:11:10 -0800838static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
839 struct intel_ring_buffer *ring,
840 bool synchronous)
841{
842 struct drm_device *dev = ppgtt->base.dev;
843 struct drm_i915_private *dev_priv = dev->dev_private;
844
Ben Widawsky48a10382013-12-06 14:11:11 -0800845 if (!synchronous)
846 return 0;
847
Ben Widawskyeeb94882013-12-06 14:11:10 -0800848 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
849 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
850
851 POSTING_READ(RING_PP_DIR_DCLV(ring));
852
853 return 0;
854}
855
856static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
857{
858 struct drm_device *dev = ppgtt->base.dev;
859 struct drm_i915_private *dev_priv = dev->dev_private;
860 struct intel_ring_buffer *ring;
861 int j, ret;
862
863 for_each_ring(ring, dev_priv, j) {
864 I915_WRITE(RING_MODE_GEN7(ring),
865 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Ben Widawskyd2ff7192013-12-06 14:11:27 -0800866
867 /* We promise to do a switch later with FULL PPGTT. If this is
868 * aliasing, this is the one and only switch we'll do */
869 if (USES_FULL_PPGTT(dev))
870 continue;
871
Ben Widawskyeeb94882013-12-06 14:11:10 -0800872 ret = ppgtt->switch_mm(ppgtt, ring, true);
873 if (ret)
874 goto err_out;
875 }
876
877 return 0;
878
879err_out:
880 for_each_ring(ring, dev_priv, j)
881 I915_WRITE(RING_MODE_GEN7(ring),
882 _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
883 return ret;
884}
885
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800886static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
887{
888 struct drm_device *dev = ppgtt->base.dev;
889 drm_i915_private_t *dev_priv = dev->dev_private;
890 struct intel_ring_buffer *ring;
891 uint32_t ecochk, ecobits;
892 int i;
893
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800894 ecobits = I915_READ(GAC_ECO_BITS);
895 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
896
897 ecochk = I915_READ(GAM_ECOCHK);
898 if (IS_HASWELL(dev)) {
899 ecochk |= ECOCHK_PPGTT_WB_HSW;
900 } else {
901 ecochk |= ECOCHK_PPGTT_LLC_IVB;
902 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
903 }
904 I915_WRITE(GAM_ECOCHK, ecochk);
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800905
Ben Widawsky61973492013-04-08 18:43:54 -0700906 for_each_ring(ring, dev_priv, i) {
Ben Widawskyeeb94882013-12-06 14:11:10 -0800907 int ret;
908 /* GFX_MODE is per-ring on gen7+ */
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800909 I915_WRITE(RING_MODE_GEN7(ring),
910 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Ben Widawsky61973492013-04-08 18:43:54 -0700911
Ben Widawskyd2ff7192013-12-06 14:11:27 -0800912 /* We promise to do a switch later with FULL PPGTT. If this is
913 * aliasing, this is the one and only switch we'll do */
914 if (USES_FULL_PPGTT(dev))
915 continue;
916
Ben Widawskyeeb94882013-12-06 14:11:10 -0800917 ret = ppgtt->switch_mm(ppgtt, ring, true);
918 if (ret)
919 return ret;
Ben Widawsky61973492013-04-08 18:43:54 -0700920 }
Ben Widawskyd2ff7192013-12-06 14:11:27 -0800921
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800922 return 0;
923}
924
Ben Widawskya3d67d22013-12-06 14:11:06 -0800925static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky61973492013-04-08 18:43:54 -0700926{
Ben Widawskya3d67d22013-12-06 14:11:06 -0800927 struct drm_device *dev = ppgtt->base.dev;
Ben Widawsky61973492013-04-08 18:43:54 -0700928 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky61973492013-04-08 18:43:54 -0700929 struct intel_ring_buffer *ring;
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800930 uint32_t ecochk, gab_ctl, ecobits;
Ben Widawsky61973492013-04-08 18:43:54 -0700931 int i;
932
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800933 ecobits = I915_READ(GAC_ECO_BITS);
934 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
935 ECOBITS_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -0700936
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800937 gab_ctl = I915_READ(GAB_CTL);
938 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
Ben Widawsky61973492013-04-08 18:43:54 -0700939
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800940 ecochk = I915_READ(GAM_ECOCHK);
941 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -0700942
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800943 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Ben Widawsky61973492013-04-08 18:43:54 -0700944
945 for_each_ring(ring, dev_priv, i) {
Ben Widawskyeeb94882013-12-06 14:11:10 -0800946 int ret = ppgtt->switch_mm(ppgtt, ring, true);
947 if (ret)
948 return ret;
Ben Widawsky61973492013-04-08 18:43:54 -0700949 }
Ben Widawskyb4a74e32013-12-06 14:11:09 -0800950
Ben Widawskyb7c36d22013-04-08 18:43:56 -0700951 return 0;
Ben Widawsky61973492013-04-08 18:43:54 -0700952}
953
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100954/* PPGTT support for Sandybdrige/Gen6 and later */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700955static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
Ben Widawsky782f1492014-02-20 11:50:33 -0800956 uint64_t start,
957 uint64_t length,
Ben Widawsky828c7902013-10-16 09:21:30 -0700958 bool use_scratch)
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100959{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700960 struct i915_hw_ppgtt *ppgtt =
961 container_of(vm, struct i915_hw_ppgtt, base);
Ben Widawskye7c2b582013-04-08 18:43:48 -0700962 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
Ben Widawsky782f1492014-02-20 11:50:33 -0800963 unsigned first_entry = start >> PAGE_SHIFT;
964 unsigned num_entries = length >> PAGE_SHIFT;
Daniel Vettera15326a2013-03-19 23:48:39 +0100965 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100966 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
967 unsigned last_pte, i;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100968
Ben Widawskyb35b3802013-10-16 09:18:21 -0700969 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100970
Daniel Vetter7bddb012012-02-09 17:15:47 +0100971 while (num_entries) {
972 last_pte = first_pte + num_entries;
973 if (last_pte > I915_PPGTT_PT_ENTRIES)
974 last_pte = I915_PPGTT_PT_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100975
Daniel Vettera15326a2013-03-19 23:48:39 +0100976 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100977
978 for (i = first_pte; i < last_pte; i++)
979 pt_vaddr[i] = scratch_pte;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100980
981 kunmap_atomic(pt_vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100982
Daniel Vetter7bddb012012-02-09 17:15:47 +0100983 num_entries -= last_pte - first_pte;
984 first_pte = 0;
Daniel Vettera15326a2013-03-19 23:48:39 +0100985 act_pt++;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100986 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100987}
988
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700989static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
Daniel Vetterdef886c2013-01-24 14:44:56 -0800990 struct sg_table *pages,
Ben Widawsky782f1492014-02-20 11:50:33 -0800991 uint64_t start,
Daniel Vetterdef886c2013-01-24 14:44:56 -0800992 enum i915_cache_level cache_level)
993{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700994 struct i915_hw_ppgtt *ppgtt =
995 container_of(vm, struct i915_hw_ppgtt, base);
Ben Widawskye7c2b582013-04-08 18:43:48 -0700996 gen6_gtt_pte_t *pt_vaddr;
Ben Widawsky782f1492014-02-20 11:50:33 -0800997 unsigned first_entry = start >> PAGE_SHIFT;
Daniel Vettera15326a2013-03-19 23:48:39 +0100998 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Imre Deak6e995e22013-02-18 19:28:04 +0200999 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
1000 struct sg_page_iter sg_iter;
Daniel Vetterdef886c2013-01-24 14:44:56 -08001001
Chris Wilsoncc797142013-12-31 15:50:30 +00001002 pt_vaddr = NULL;
Imre Deak6e995e22013-02-18 19:28:04 +02001003 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
Chris Wilsoncc797142013-12-31 15:50:30 +00001004 if (pt_vaddr == NULL)
1005 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Daniel Vetterdef886c2013-01-24 14:44:56 -08001006
Chris Wilsoncc797142013-12-31 15:50:30 +00001007 pt_vaddr[act_pte] =
1008 vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
1009 cache_level, true);
Imre Deak6e995e22013-02-18 19:28:04 +02001010 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
1011 kunmap_atomic(pt_vaddr);
Chris Wilsoncc797142013-12-31 15:50:30 +00001012 pt_vaddr = NULL;
Daniel Vettera15326a2013-03-19 23:48:39 +01001013 act_pt++;
Imre Deak6e995e22013-02-18 19:28:04 +02001014 act_pte = 0;
Daniel Vetterdef886c2013-01-24 14:44:56 -08001015 }
Daniel Vetterdef886c2013-01-24 14:44:56 -08001016 }
Chris Wilsoncc797142013-12-31 15:50:30 +00001017 if (pt_vaddr)
1018 kunmap_atomic(pt_vaddr);
Daniel Vetterdef886c2013-01-24 14:44:56 -08001019}
1020
Ben Widawskya00d8252014-02-19 22:05:48 -08001021static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001022{
Daniel Vetter3440d262013-01-24 13:49:56 -08001023 int i;
1024
1025 if (ppgtt->pt_dma_addr) {
1026 for (i = 0; i < ppgtt->num_pd_entries; i++)
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001027 pci_unmap_page(ppgtt->base.dev->pdev,
Daniel Vetter3440d262013-01-24 13:49:56 -08001028 ppgtt->pt_dma_addr[i],
1029 4096, PCI_DMA_BIDIRECTIONAL);
1030 }
Ben Widawskya00d8252014-02-19 22:05:48 -08001031}
1032
1033static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
1034{
1035 int i;
Daniel Vetter3440d262013-01-24 13:49:56 -08001036
1037 kfree(ppgtt->pt_dma_addr);
1038 for (i = 0; i < ppgtt->num_pd_entries; i++)
1039 __free_page(ppgtt->pt_pages[i]);
1040 kfree(ppgtt->pt_pages);
Daniel Vetter3440d262013-01-24 13:49:56 -08001041}
1042
Ben Widawskya00d8252014-02-19 22:05:48 -08001043static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1044{
1045 struct i915_hw_ppgtt *ppgtt =
1046 container_of(vm, struct i915_hw_ppgtt, base);
1047
1048 list_del(&vm->global_link);
1049 drm_mm_takedown(&ppgtt->base.mm);
1050 drm_mm_remove_node(&ppgtt->node);
1051
1052 gen6_ppgtt_unmap_pages(ppgtt);
1053 gen6_ppgtt_free(ppgtt);
1054}
1055
Ben Widawskyb1465202014-02-19 22:05:49 -08001056static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
Daniel Vetter3440d262013-01-24 13:49:56 -08001057{
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001058#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
1059#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001060 struct drm_device *dev = ppgtt->base.dev;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001061 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskye3cc1992013-12-06 14:11:08 -08001062 bool retried = false;
Ben Widawskyb1465202014-02-19 22:05:49 -08001063 int ret;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001064
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001065 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1066 * allocator works in address space sizes, so it's multiplied by page
1067 * size. We allocate at the top of the GTT to avoid fragmentation.
1068 */
1069 BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
Ben Widawskye3cc1992013-12-06 14:11:08 -08001070alloc:
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001071 ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
1072 &ppgtt->node, GEN6_PD_SIZE,
1073 GEN6_PD_ALIGN, 0,
1074 0, dev_priv->gtt.base.total,
1075 DRM_MM_SEARCH_DEFAULT);
Ben Widawskye3cc1992013-12-06 14:11:08 -08001076 if (ret == -ENOSPC && !retried) {
1077 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
1078 GEN6_PD_SIZE, GEN6_PD_ALIGN,
Daniel Vetterd47c3ea2014-02-14 14:01:18 +01001079 I915_CACHE_NONE, 0);
Ben Widawskye3cc1992013-12-06 14:11:08 -08001080 if (ret)
1081 return ret;
1082
1083 retried = true;
1084 goto alloc;
1085 }
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001086
1087 if (ppgtt->node.start < dev_priv->gtt.mappable_end)
1088 DRM_DEBUG("Forced to use aperture for PDEs\n");
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001089
Ben Widawsky6670a5a2013-06-27 16:30:04 -07001090 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
Ben Widawskyb1465202014-02-19 22:05:49 -08001091 return ret;
1092}
1093
1094static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
1095{
1096 int i;
1097
1098 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
1099 GFP_KERNEL);
1100
1101 if (!ppgtt->pt_pages)
1102 return -ENOMEM;
1103
1104 for (i = 0; i < ppgtt->num_pd_entries; i++) {
1105 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
1106 if (!ppgtt->pt_pages[i]) {
1107 gen6_ppgtt_free(ppgtt);
1108 return -ENOMEM;
1109 }
1110 }
1111
1112 return 0;
1113}
1114
1115static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1116{
1117 int ret;
1118
1119 ret = gen6_ppgtt_allocate_page_directories(ppgtt);
1120 if (ret)
1121 return ret;
1122
1123 ret = gen6_ppgtt_allocate_page_tables(ppgtt);
1124 if (ret) {
1125 drm_mm_remove_node(&ppgtt->node);
1126 return ret;
1127 }
1128
1129 ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
1130 GFP_KERNEL);
1131 if (!ppgtt->pt_dma_addr) {
1132 drm_mm_remove_node(&ppgtt->node);
1133 gen6_ppgtt_free(ppgtt);
1134 return -ENOMEM;
1135 }
1136
1137 return 0;
1138}
1139
1140static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
1141{
1142 struct drm_device *dev = ppgtt->base.dev;
1143 int i;
1144
1145 for (i = 0; i < ppgtt->num_pd_entries; i++) {
1146 dma_addr_t pt_addr;
1147
1148 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
1149 PCI_DMA_BIDIRECTIONAL);
1150
1151 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
1152 gen6_ppgtt_unmap_pages(ppgtt);
1153 return -EIO;
1154 }
1155
1156 ppgtt->pt_dma_addr[i] = pt_addr;
1157 }
1158
1159 return 0;
1160}
1161
1162static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1163{
1164 struct drm_device *dev = ppgtt->base.dev;
1165 struct drm_i915_private *dev_priv = dev->dev_private;
1166 int ret;
1167
1168 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
Ben Widawsky48a10382013-12-06 14:11:11 -08001169 if (IS_GEN6(dev)) {
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001170 ppgtt->enable = gen6_ppgtt_enable;
Ben Widawsky48a10382013-12-06 14:11:11 -08001171 ppgtt->switch_mm = gen6_mm_switch;
Ben Widawsky90252e52013-12-06 14:11:12 -08001172 } else if (IS_HASWELL(dev)) {
1173 ppgtt->enable = gen7_ppgtt_enable;
1174 ppgtt->switch_mm = hsw_mm_switch;
Ben Widawsky48a10382013-12-06 14:11:11 -08001175 } else if (IS_GEN7(dev)) {
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001176 ppgtt->enable = gen7_ppgtt_enable;
Ben Widawsky48a10382013-12-06 14:11:11 -08001177 ppgtt->switch_mm = gen7_mm_switch;
1178 } else
Ben Widawskyb4a74e32013-12-06 14:11:09 -08001179 BUG();
Ben Widawskyb1465202014-02-19 22:05:49 -08001180
1181 ret = gen6_ppgtt_alloc(ppgtt);
1182 if (ret)
1183 return ret;
1184
1185 ret = gen6_ppgtt_setup_page_tables(ppgtt);
1186 if (ret) {
1187 gen6_ppgtt_free(ppgtt);
1188 return ret;
1189 }
1190
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001191 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1192 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1193 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
Ben Widawsky686e1f6f2013-11-25 09:54:34 -08001194 ppgtt->base.start = 0;
Ben Widawsky5a6c93f2014-03-08 11:58:17 -08001195 ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
Ben Widawskyb1465202014-02-19 22:05:49 -08001196 ppgtt->debug_dump = gen6_dump_ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001197
Ben Widawskyb1465202014-02-19 22:05:49 -08001198 ppgtt->pd_offset =
1199 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001200
Ben Widawsky782f1492014-02-20 11:50:33 -08001201 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001202
Ben Widawskyc8d4c0d2013-12-06 14:11:07 -08001203 DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
1204 ppgtt->node.size >> 20,
1205 ppgtt->node.start / PAGE_SIZE);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001206
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001207 return 0;
Daniel Vetter3440d262013-01-24 13:49:56 -08001208}
1209
Ben Widawsky246cbfb2013-12-06 14:11:14 -08001210int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
Daniel Vetter3440d262013-01-24 13:49:56 -08001211{
1212 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskyd6660ad2013-12-06 14:11:13 -08001213 int ret = 0;
Daniel Vetter3440d262013-01-24 13:49:56 -08001214
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001215 ppgtt->base.dev = dev;
Ben Widawsky8407bb92014-03-08 11:58:16 -08001216 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
Daniel Vetter3440d262013-01-24 13:49:56 -08001217
Ben Widawsky3ed124b2013-04-08 18:43:53 -07001218 if (INTEL_INFO(dev)->gen < 8)
1219 ret = gen6_ppgtt_init(ppgtt);
Daniel Vetter8fe6bd22013-11-02 21:07:01 -07001220 else if (IS_GEN8(dev))
Ben Widawsky37aca442013-11-04 20:47:32 -08001221 ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
Ben Widawsky3ed124b2013-04-08 18:43:53 -07001222 else
1223 BUG();
1224
Ben Widawskyc7c48df2013-12-06 14:11:15 -08001225 if (!ret) {
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08001226 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskyc7c48df2013-12-06 14:11:15 -08001227 kref_init(&ppgtt->ref);
Ben Widawsky93bd8642013-07-16 16:50:06 -07001228 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
1229 ppgtt->base.total);
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08001230 i915_init_vm(dev_priv, &ppgtt->base);
1231 if (INTEL_INFO(dev)->gen < 8) {
Ben Widawsky9f273d42013-12-06 14:11:16 -08001232 gen6_write_pdes(ppgtt);
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08001233 DRM_DEBUG("Adding PPGTT at offset %x\n",
1234 ppgtt->pd_offset << 10);
1235 }
Ben Widawsky93bd8642013-07-16 16:50:06 -07001236 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001237
1238 return ret;
1239}
1240
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08001241static void
Ben Widawsky6f65e292013-12-06 14:10:56 -08001242ppgtt_bind_vma(struct i915_vma *vma,
1243 enum i915_cache_level cache_level,
1244 u32 flags)
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001245{
Ben Widawsky6f65e292013-12-06 14:10:56 -08001246 WARN_ON(flags);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001247
Ben Widawsky782f1492014-02-20 11:50:33 -08001248 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
1249 cache_level);
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001250}
1251
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08001252static void ppgtt_unbind_vma(struct i915_vma *vma)
Daniel Vetter7bddb012012-02-09 17:15:47 +01001253{
Ben Widawsky6f65e292013-12-06 14:10:56 -08001254 vma->vm->clear_range(vma->vm,
Ben Widawsky782f1492014-02-20 11:50:33 -08001255 vma->node.start,
1256 vma->obj->base.size,
Ben Widawsky6f65e292013-12-06 14:10:56 -08001257 true);
Daniel Vetter7bddb012012-02-09 17:15:47 +01001258}
1259
Ben Widawskya81cc002013-01-18 12:30:31 -08001260extern int intel_iommu_gfx_mapped;
1261/* Certain Gen5 chipsets require require idling the GPU before
1262 * unmapping anything from the GTT when VT-d is enabled.
1263 */
1264static inline bool needs_idle_maps(struct drm_device *dev)
1265{
1266#ifdef CONFIG_INTEL_IOMMU
1267 /* Query intel_iommu to see if we need the workaround. Presumably that
1268 * was loaded first.
1269 */
1270 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
1271 return true;
1272#endif
1273 return false;
1274}
1275
Ben Widawsky5c042282011-10-17 15:51:55 -07001276static bool do_idling(struct drm_i915_private *dev_priv)
1277{
1278 bool ret = dev_priv->mm.interruptible;
1279
Ben Widawskya81cc002013-01-18 12:30:31 -08001280 if (unlikely(dev_priv->gtt.do_idle_maps)) {
Ben Widawsky5c042282011-10-17 15:51:55 -07001281 dev_priv->mm.interruptible = false;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07001282 if (i915_gpu_idle(dev_priv->dev)) {
Ben Widawsky5c042282011-10-17 15:51:55 -07001283 DRM_ERROR("Couldn't idle GPU\n");
1284 /* Wait a bit, in hopes it avoids the hang */
1285 udelay(10);
1286 }
1287 }
1288
1289 return ret;
1290}
1291
1292static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
1293{
Ben Widawskya81cc002013-01-18 12:30:31 -08001294 if (unlikely(dev_priv->gtt.do_idle_maps))
Ben Widawsky5c042282011-10-17 15:51:55 -07001295 dev_priv->mm.interruptible = interruptible;
1296}
1297
Ben Widawsky828c7902013-10-16 09:21:30 -07001298void i915_check_and_clear_faults(struct drm_device *dev)
1299{
1300 struct drm_i915_private *dev_priv = dev->dev_private;
1301 struct intel_ring_buffer *ring;
1302 int i;
1303
1304 if (INTEL_INFO(dev)->gen < 6)
1305 return;
1306
1307 for_each_ring(ring, dev_priv, i) {
1308 u32 fault_reg;
1309 fault_reg = I915_READ(RING_FAULT_REG(ring));
1310 if (fault_reg & RING_FAULT_VALID) {
1311 DRM_DEBUG_DRIVER("Unexpected fault\n"
1312 "\tAddr: 0x%08lx\\n"
1313 "\tAddress space: %s\n"
1314 "\tSource ID: %d\n"
1315 "\tType: %d\n",
1316 fault_reg & PAGE_MASK,
1317 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
1318 RING_FAULT_SRCID(fault_reg),
1319 RING_FAULT_FAULT_TYPE(fault_reg));
1320 I915_WRITE(RING_FAULT_REG(ring),
1321 fault_reg & ~RING_FAULT_VALID);
1322 }
1323 }
1324 POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
1325}
1326
1327void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
1328{
1329 struct drm_i915_private *dev_priv = dev->dev_private;
1330
1331 /* Don't bother messing with faults pre GEN6 as we have little
1332 * documentation supporting that it's a good idea.
1333 */
1334 if (INTEL_INFO(dev)->gen < 6)
1335 return;
1336
1337 i915_check_and_clear_faults(dev);
1338
1339 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
Ben Widawsky782f1492014-02-20 11:50:33 -08001340 dev_priv->gtt.base.start,
1341 dev_priv->gtt.base.total,
Ben Widawsky828c7902013-10-16 09:21:30 -07001342 false);
1343}
1344
Daniel Vetter76aaf222010-11-05 22:23:30 +01001345void i915_gem_restore_gtt_mappings(struct drm_device *dev)
1346{
1347 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001348 struct drm_i915_gem_object *obj;
Ben Widawsky80da2162013-12-06 14:11:17 -08001349 struct i915_address_space *vm;
Daniel Vetter76aaf222010-11-05 22:23:30 +01001350
Ben Widawsky828c7902013-10-16 09:21:30 -07001351 i915_check_and_clear_faults(dev);
1352
Chris Wilsonbee4a182011-01-21 10:54:32 +00001353 /* First fill our portion of the GTT with scratch pages */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001354 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
Ben Widawsky782f1492014-02-20 11:50:33 -08001355 dev_priv->gtt.base.start,
1356 dev_priv->gtt.base.total,
Ben Widawsky828c7902013-10-16 09:21:30 -07001357 true);
Chris Wilsonbee4a182011-01-21 10:54:32 +00001358
Ben Widawsky35c20a62013-05-31 11:28:48 -07001359 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Ben Widawsky6f65e292013-12-06 14:10:56 -08001360 struct i915_vma *vma = i915_gem_obj_to_vma(obj,
1361 &dev_priv->gtt.base);
1362 if (!vma)
1363 continue;
1364
Chris Wilson2c225692013-08-09 12:26:45 +01001365 i915_gem_clflush_object(obj, obj->pin_display);
Ben Widawsky6f65e292013-12-06 14:10:56 -08001366 /* The bind_vma code tries to be smart about tracking mappings.
1367 * Unfortunately above, we've just wiped out the mappings
1368 * without telling our object about it. So we need to fake it.
1369 */
1370 obj->has_global_gtt_mapping = 0;
1371 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
Daniel Vetter76aaf222010-11-05 22:23:30 +01001372 }
1373
Ben Widawsky80da2162013-12-06 14:11:17 -08001374
1375 if (INTEL_INFO(dev)->gen >= 8)
1376 return;
1377
1378 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
1379 /* TODO: Perhaps it shouldn't be gen6 specific */
1380 if (i915_is_ggtt(vm)) {
1381 if (dev_priv->mm.aliasing_ppgtt)
1382 gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
1383 continue;
1384 }
1385
1386 gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
Daniel Vetter76aaf222010-11-05 22:23:30 +01001387 }
1388
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001389 i915_gem_chipset_flush(dev);
Daniel Vetter76aaf222010-11-05 22:23:30 +01001390}
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01001391
Daniel Vetter74163902012-02-15 23:50:21 +01001392int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01001393{
Chris Wilson9da3da62012-06-01 15:20:22 +01001394 if (obj->has_dma_mapping)
Daniel Vetter74163902012-02-15 23:50:21 +01001395 return 0;
Chris Wilson9da3da62012-06-01 15:20:22 +01001396
1397 if (!dma_map_sg(&obj->base.dev->pdev->dev,
1398 obj->pages->sgl, obj->pages->nents,
1399 PCI_DMA_BIDIRECTIONAL))
1400 return -ENOSPC;
1401
1402 return 0;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01001403}
1404
Ben Widawsky94ec8f62013-11-02 21:07:18 -07001405static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
1406{
1407#ifdef writeq
1408 writeq(pte, addr);
1409#else
1410 iowrite32((u32)pte, addr);
1411 iowrite32(pte >> 32, addr + 4);
1412#endif
1413}
1414
1415static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
1416 struct sg_table *st,
Ben Widawsky782f1492014-02-20 11:50:33 -08001417 uint64_t start,
Ben Widawsky94ec8f62013-11-02 21:07:18 -07001418 enum i915_cache_level level)
1419{
1420 struct drm_i915_private *dev_priv = vm->dev->dev_private;
Ben Widawsky782f1492014-02-20 11:50:33 -08001421 unsigned first_entry = start >> PAGE_SHIFT;
Ben Widawsky94ec8f62013-11-02 21:07:18 -07001422 gen8_gtt_pte_t __iomem *gtt_entries =
1423 (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
1424 int i = 0;
1425 struct sg_page_iter sg_iter;
1426 dma_addr_t addr;
1427
1428 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1429 addr = sg_dma_address(sg_iter.sg) +
1430 (sg_iter.sg_pgoffset << PAGE_SHIFT);
1431 gen8_set_pte(&gtt_entries[i],
1432 gen8_pte_encode(addr, level, true));
1433 i++;
1434 }
1435
1436 /*
1437 * XXX: This serves as a posting read to make sure that the PTE has
1438 * actually been updated. There is some concern that even though
1439 * registers and PTEs are within the same BAR that they are potentially
1440 * of NUMA access patterns. Therefore, even with the way we assume
1441 * hardware should work, we must keep this posting read for paranoia.
1442 */
1443 if (i != 0)
1444 WARN_ON(readq(&gtt_entries[i-1])
1445 != gen8_pte_encode(addr, level, true));
1446
Ben Widawsky94ec8f62013-11-02 21:07:18 -07001447 /* This next bit makes the above posting read even more important. We
1448 * want to flush the TLBs only after we're certain all the PTE updates
1449 * have finished.
1450 */
1451 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
1452 POSTING_READ(GFX_FLSH_CNTL_GEN6);
Ben Widawsky94ec8f62013-11-02 21:07:18 -07001453}
1454
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001455/*
1456 * Binds an object into the global gtt with the specified cache level. The object
1457 * will be accessible to the GPU via commands whose operands reference offsets
1458 * within the global GTT as well as accessible by the GPU through the GMADR
1459 * mapped BAR (dev_priv->mm.gtt->gtt).
1460 */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001461static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001462 struct sg_table *st,
Ben Widawsky782f1492014-02-20 11:50:33 -08001463 uint64_t start,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001464 enum i915_cache_level level)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001465{
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001466 struct drm_i915_private *dev_priv = vm->dev->dev_private;
Ben Widawsky782f1492014-02-20 11:50:33 -08001467 unsigned first_entry = start >> PAGE_SHIFT;
Ben Widawskye7c2b582013-04-08 18:43:48 -07001468 gen6_gtt_pte_t __iomem *gtt_entries =
1469 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
Imre Deak6e995e22013-02-18 19:28:04 +02001470 int i = 0;
1471 struct sg_page_iter sg_iter;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001472 dma_addr_t addr;
1473
Imre Deak6e995e22013-02-18 19:28:04 +02001474 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001475 addr = sg_page_iter_dma_address(&sg_iter);
Ben Widawskyb35b3802013-10-16 09:18:21 -07001476 iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
Imre Deak6e995e22013-02-18 19:28:04 +02001477 i++;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001478 }
1479
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001480 /* XXX: This serves as a posting read to make sure that the PTE has
1481 * actually been updated. There is some concern that even though
1482 * registers and PTEs are within the same BAR that they are potentially
1483 * of NUMA access patterns. Therefore, even with the way we assume
1484 * hardware should work, we must keep this posting read for paranoia.
1485 */
1486 if (i != 0)
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001487 WARN_ON(readl(&gtt_entries[i-1]) !=
Ben Widawskyb35b3802013-10-16 09:18:21 -07001488 vm->pte_encode(addr, level, true));
Ben Widawsky0f9b91c2012-11-04 09:21:30 -08001489
1490 /* This next bit makes the above posting read even more important. We
1491 * want to flush the TLBs only after we're certain all the PTE updates
1492 * have finished.
1493 */
1494 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
1495 POSTING_READ(GFX_FLSH_CNTL_GEN6);
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001496}
1497
Ben Widawsky94ec8f62013-11-02 21:07:18 -07001498static void gen8_ggtt_clear_range(struct i915_address_space *vm,
Ben Widawsky782f1492014-02-20 11:50:33 -08001499 uint64_t start,
1500 uint64_t length,
Ben Widawsky94ec8f62013-11-02 21:07:18 -07001501 bool use_scratch)
1502{
1503 struct drm_i915_private *dev_priv = vm->dev->dev_private;
Ben Widawsky782f1492014-02-20 11:50:33 -08001504 unsigned first_entry = start >> PAGE_SHIFT;
1505 unsigned num_entries = length >> PAGE_SHIFT;
Ben Widawsky94ec8f62013-11-02 21:07:18 -07001506 gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
1507 (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
1508 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
1509 int i;
1510
1511 if (WARN(num_entries > max_entries,
1512 "First entry = %d; Num entries = %d (max=%d)\n",
1513 first_entry, num_entries, max_entries))
1514 num_entries = max_entries;
1515
1516 scratch_pte = gen8_pte_encode(vm->scratch.addr,
1517 I915_CACHE_LLC,
1518 use_scratch);
1519 for (i = 0; i < num_entries; i++)
1520 gen8_set_pte(&gtt_base[i], scratch_pte);
1521 readl(gtt_base);
1522}
1523
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001524static void gen6_ggtt_clear_range(struct i915_address_space *vm,
Ben Widawsky782f1492014-02-20 11:50:33 -08001525 uint64_t start,
1526 uint64_t length,
Ben Widawsky828c7902013-10-16 09:21:30 -07001527 bool use_scratch)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001528{
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001529 struct drm_i915_private *dev_priv = vm->dev->dev_private;
Ben Widawsky782f1492014-02-20 11:50:33 -08001530 unsigned first_entry = start >> PAGE_SHIFT;
1531 unsigned num_entries = length >> PAGE_SHIFT;
Ben Widawskye7c2b582013-04-08 18:43:48 -07001532 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
1533 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
Ben Widawskya54c0c22013-01-24 14:45:00 -08001534 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001535 int i;
1536
1537 if (WARN(num_entries > max_entries,
1538 "First entry = %d; Num entries = %d (max=%d)\n",
1539 first_entry, num_entries, max_entries))
1540 num_entries = max_entries;
1541
Ben Widawsky828c7902013-10-16 09:21:30 -07001542 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
1543
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001544 for (i = 0; i < num_entries; i++)
1545 iowrite32(scratch_pte, &gtt_base[i]);
1546 readl(gtt_base);
1547}
1548
Ben Widawsky6f65e292013-12-06 14:10:56 -08001549
1550static void i915_ggtt_bind_vma(struct i915_vma *vma,
1551 enum i915_cache_level cache_level,
1552 u32 unused)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001553{
Ben Widawsky6f65e292013-12-06 14:10:56 -08001554 const unsigned long entry = vma->node.start >> PAGE_SHIFT;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001555 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
1556 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
1557
Ben Widawsky6f65e292013-12-06 14:10:56 -08001558 BUG_ON(!i915_is_ggtt(vma->vm));
1559 intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
1560 vma->obj->has_global_gtt_mapping = 1;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001561}
1562
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001563static void i915_ggtt_clear_range(struct i915_address_space *vm,
Ben Widawsky782f1492014-02-20 11:50:33 -08001564 uint64_t start,
1565 uint64_t length,
Ben Widawsky828c7902013-10-16 09:21:30 -07001566 bool unused)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001567{
Ben Widawsky782f1492014-02-20 11:50:33 -08001568 unsigned first_entry = start >> PAGE_SHIFT;
1569 unsigned num_entries = length >> PAGE_SHIFT;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001570 intel_gtt_clear_range(first_entry, num_entries);
1571}
1572
Ben Widawsky6f65e292013-12-06 14:10:56 -08001573static void i915_ggtt_unbind_vma(struct i915_vma *vma)
Chris Wilsond5bd1442011-04-14 06:48:26 +01001574{
Ben Widawsky6f65e292013-12-06 14:10:56 -08001575 const unsigned int first = vma->node.start >> PAGE_SHIFT;
1576 const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001577
Ben Widawsky6f65e292013-12-06 14:10:56 -08001578 BUG_ON(!i915_is_ggtt(vma->vm));
1579 vma->obj->has_global_gtt_mapping = 0;
1580 intel_gtt_clear_range(first, size);
Chris Wilsond5bd1442011-04-14 06:48:26 +01001581}
1582
Ben Widawsky6f65e292013-12-06 14:10:56 -08001583static void ggtt_bind_vma(struct i915_vma *vma,
1584 enum i915_cache_level cache_level,
1585 u32 flags)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01001586{
Ben Widawsky6f65e292013-12-06 14:10:56 -08001587 struct drm_device *dev = vma->vm->dev;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001588 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky6f65e292013-12-06 14:10:56 -08001589 struct drm_i915_gem_object *obj = vma->obj;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001590
Ben Widawsky6f65e292013-12-06 14:10:56 -08001591 /* If there is no aliasing PPGTT, or the caller needs a global mapping,
1592 * or we have a global mapping already but the cacheability flags have
1593 * changed, set the global PTEs.
1594 *
1595 * If there is an aliasing PPGTT it is anecdotally faster, so use that
1596 * instead if none of the above hold true.
1597 *
1598 * NB: A global mapping should only be needed for special regions like
1599 * "gtt mappable", SNB errata, or if specified via special execbuf
1600 * flags. At all other times, the GPU will use the aliasing PPGTT.
1601 */
1602 if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
1603 if (!obj->has_global_gtt_mapping ||
1604 (cache_level != obj->cache_level)) {
Ben Widawsky782f1492014-02-20 11:50:33 -08001605 vma->vm->insert_entries(vma->vm, obj->pages,
1606 vma->node.start,
Ben Widawsky6f65e292013-12-06 14:10:56 -08001607 cache_level);
1608 obj->has_global_gtt_mapping = 1;
1609 }
1610 }
Daniel Vetter74898d72012-02-15 23:50:22 +01001611
Ben Widawsky6f65e292013-12-06 14:10:56 -08001612 if (dev_priv->mm.aliasing_ppgtt &&
1613 (!obj->has_aliasing_ppgtt_mapping ||
1614 (cache_level != obj->cache_level))) {
1615 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1616 appgtt->base.insert_entries(&appgtt->base,
Ben Widawsky782f1492014-02-20 11:50:33 -08001617 vma->obj->pages,
1618 vma->node.start,
1619 cache_level);
Ben Widawsky6f65e292013-12-06 14:10:56 -08001620 vma->obj->has_aliasing_ppgtt_mapping = 1;
1621 }
1622}
1623
1624static void ggtt_unbind_vma(struct i915_vma *vma)
1625{
1626 struct drm_device *dev = vma->vm->dev;
1627 struct drm_i915_private *dev_priv = dev->dev_private;
1628 struct drm_i915_gem_object *obj = vma->obj;
Ben Widawsky6f65e292013-12-06 14:10:56 -08001629
1630 if (obj->has_global_gtt_mapping) {
Ben Widawsky782f1492014-02-20 11:50:33 -08001631 vma->vm->clear_range(vma->vm,
1632 vma->node.start,
1633 obj->base.size,
Ben Widawsky6f65e292013-12-06 14:10:56 -08001634 true);
1635 obj->has_global_gtt_mapping = 0;
1636 }
1637
1638 if (obj->has_aliasing_ppgtt_mapping) {
1639 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1640 appgtt->base.clear_range(&appgtt->base,
Ben Widawsky782f1492014-02-20 11:50:33 -08001641 vma->node.start,
1642 obj->base.size,
Ben Widawsky6f65e292013-12-06 14:10:56 -08001643 true);
1644 obj->has_aliasing_ppgtt_mapping = 0;
1645 }
Daniel Vetter74163902012-02-15 23:50:21 +01001646}
1647
1648void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
1649{
Ben Widawsky5c042282011-10-17 15:51:55 -07001650 struct drm_device *dev = obj->base.dev;
1651 struct drm_i915_private *dev_priv = dev->dev_private;
1652 bool interruptible;
1653
1654 interruptible = do_idling(dev_priv);
1655
Chris Wilson9da3da62012-06-01 15:20:22 +01001656 if (!obj->has_dma_mapping)
1657 dma_unmap_sg(&dev->pdev->dev,
1658 obj->pages->sgl, obj->pages->nents,
1659 PCI_DMA_BIDIRECTIONAL);
Ben Widawsky5c042282011-10-17 15:51:55 -07001660
1661 undo_idling(dev_priv, interruptible);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01001662}
Daniel Vetter644ec022012-03-26 09:45:40 +02001663
Chris Wilson42d6ab42012-07-26 11:49:32 +01001664static void i915_gtt_color_adjust(struct drm_mm_node *node,
1665 unsigned long color,
1666 unsigned long *start,
1667 unsigned long *end)
1668{
1669 if (node->color != color)
1670 *start += 4096;
1671
1672 if (!list_empty(&node->node_list)) {
1673 node = list_entry(node->node_list.next,
1674 struct drm_mm_node,
1675 node_list);
1676 if (node->allocated && node->color != color)
1677 *end -= 4096;
1678 }
1679}
Ben Widawskyfbe5d362013-11-04 19:56:49 -08001680
Ben Widawskyd7e50082012-12-18 10:31:25 -08001681void i915_gem_setup_global_gtt(struct drm_device *dev,
1682 unsigned long start,
1683 unsigned long mappable_end,
1684 unsigned long end)
Daniel Vetter644ec022012-03-26 09:45:40 +02001685{
Ben Widawskye78891c2013-01-25 16:41:04 -08001686 /* Let GEM Manage all of the aperture.
1687 *
1688 * However, leave one page at the end still bound to the scratch page.
1689 * There are a number of places where the hardware apparently prefetches
1690 * past the end of the object, and we've seen multiple hangs with the
1691 * GPU head pointer stuck in a batchbuffer bound at the last page of the
1692 * aperture. One page should be enough to keep any prefetching inside
1693 * of the aperture.
1694 */
Ben Widawsky40d749802013-07-31 16:59:59 -07001695 struct drm_i915_private *dev_priv = dev->dev_private;
1696 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
Chris Wilsoned2f3452012-11-15 11:32:19 +00001697 struct drm_mm_node *entry;
1698 struct drm_i915_gem_object *obj;
1699 unsigned long hole_start, hole_end;
Daniel Vetter644ec022012-03-26 09:45:40 +02001700
Ben Widawsky35451cb2013-01-17 12:45:13 -08001701 BUG_ON(mappable_end > end);
1702
Chris Wilsoned2f3452012-11-15 11:32:19 +00001703 /* Subtract the guard page ... */
Ben Widawsky40d749802013-07-31 16:59:59 -07001704 drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
Chris Wilson42d6ab42012-07-26 11:49:32 +01001705 if (!HAS_LLC(dev))
Ben Widawsky93bd8642013-07-16 16:50:06 -07001706 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
Daniel Vetter644ec022012-03-26 09:45:40 +02001707
Chris Wilsoned2f3452012-11-15 11:32:19 +00001708 /* Mark any preallocated objects as occupied */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001709 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Ben Widawsky40d749802013-07-31 16:59:59 -07001710 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
Ben Widawskyb3a070c2013-07-05 14:41:02 -07001711 int ret;
Ben Widawskyedd41a82013-07-05 14:41:05 -07001712 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
Ben Widawskyc6cfb322013-07-05 14:41:06 -07001713 i915_gem_obj_ggtt_offset(obj), obj->base.size);
Chris Wilsoned2f3452012-11-15 11:32:19 +00001714
Ben Widawskyc6cfb322013-07-05 14:41:06 -07001715 WARN_ON(i915_gem_obj_ggtt_bound(obj));
Ben Widawsky40d749802013-07-31 16:59:59 -07001716 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
Ben Widawskyc6cfb322013-07-05 14:41:06 -07001717 if (ret)
Ben Widawskyb3a070c2013-07-05 14:41:02 -07001718 DRM_DEBUG_KMS("Reservation failed\n");
Chris Wilsoned2f3452012-11-15 11:32:19 +00001719 obj->has_global_gtt_mapping = 1;
1720 }
1721
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001722 dev_priv->gtt.base.start = start;
1723 dev_priv->gtt.base.total = end - start;
Daniel Vetter644ec022012-03-26 09:45:40 +02001724
Chris Wilsoned2f3452012-11-15 11:32:19 +00001725 /* Clear any non-preallocated blocks */
Ben Widawsky40d749802013-07-31 16:59:59 -07001726 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
Chris Wilsoned2f3452012-11-15 11:32:19 +00001727 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
1728 hole_start, hole_end);
Ben Widawsky782f1492014-02-20 11:50:33 -08001729 ggtt_vm->clear_range(ggtt_vm, hole_start,
1730 hole_end - hole_start, true);
Chris Wilsoned2f3452012-11-15 11:32:19 +00001731 }
1732
1733 /* And finally clear the reserved guard page */
Ben Widawsky782f1492014-02-20 11:50:33 -08001734 ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001735}
1736
Ben Widawskyd7e50082012-12-18 10:31:25 -08001737void i915_gem_init_global_gtt(struct drm_device *dev)
1738{
1739 struct drm_i915_private *dev_priv = dev->dev_private;
1740 unsigned long gtt_size, mappable_size;
Ben Widawskyd7e50082012-12-18 10:31:25 -08001741
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001742 gtt_size = dev_priv->gtt.base.total;
Ben Widawsky93d18792013-01-17 12:45:17 -08001743 mappable_size = dev_priv->gtt.mappable_end;
Ben Widawskyd7e50082012-12-18 10:31:25 -08001744
Ben Widawskye78891c2013-01-25 16:41:04 -08001745 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001746}
1747
1748static int setup_scratch_page(struct drm_device *dev)
1749{
1750 struct drm_i915_private *dev_priv = dev->dev_private;
1751 struct page *page;
1752 dma_addr_t dma_addr;
1753
1754 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1755 if (page == NULL)
1756 return -ENOMEM;
1757 get_page(page);
1758 set_pages_uc(page, 1);
1759
1760#ifdef CONFIG_INTEL_IOMMU
1761 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
1762 PCI_DMA_BIDIRECTIONAL);
1763 if (pci_dma_mapping_error(dev->pdev, dma_addr))
1764 return -EINVAL;
1765#else
1766 dma_addr = page_to_phys(page);
1767#endif
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001768 dev_priv->gtt.base.scratch.page = page;
1769 dev_priv->gtt.base.scratch.addr = dma_addr;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001770
1771 return 0;
1772}
1773
1774static void teardown_scratch_page(struct drm_device *dev)
1775{
1776 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001777 struct page *page = dev_priv->gtt.base.scratch.page;
1778
1779 set_pages_wb(page, 1);
1780 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001781 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001782 put_page(page);
1783 __free_page(page);
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001784}
1785
1786static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
1787{
1788 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
1789 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
1790 return snb_gmch_ctl << 20;
1791}
1792
Ben Widawsky9459d252013-11-03 16:53:55 -08001793static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1794{
1795 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
1796 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1797 if (bdw_gmch_ctl)
1798 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1799 return bdw_gmch_ctl << 20;
1800}
1801
Ben Widawskybaa09f52013-01-24 13:49:57 -08001802static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001803{
1804 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
1805 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
1806 return snb_gmch_ctl << 25; /* 32 MB units */
1807}
1808
Ben Widawsky9459d252013-11-03 16:53:55 -08001809static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
1810{
1811 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
1812 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
1813 return bdw_gmch_ctl << 25; /* 32 MB units */
1814}
1815
Ben Widawsky63340132013-11-04 19:32:22 -08001816static int ggtt_probe_common(struct drm_device *dev,
1817 size_t gtt_size)
1818{
1819 struct drm_i915_private *dev_priv = dev->dev_private;
Bjorn Helgaas21c34602013-12-21 10:52:52 -07001820 phys_addr_t gtt_phys_addr;
Ben Widawsky63340132013-11-04 19:32:22 -08001821 int ret;
1822
1823 /* For Modern GENs the PTEs and register space are split in the BAR */
Bjorn Helgaas21c34602013-12-21 10:52:52 -07001824 gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
Ben Widawsky63340132013-11-04 19:32:22 -08001825 (pci_resource_len(dev->pdev, 0) / 2);
1826
Bjorn Helgaas21c34602013-12-21 10:52:52 -07001827 dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
Ben Widawsky63340132013-11-04 19:32:22 -08001828 if (!dev_priv->gtt.gsm) {
1829 DRM_ERROR("Failed to map the gtt page table\n");
1830 return -ENOMEM;
1831 }
1832
1833 ret = setup_scratch_page(dev);
1834 if (ret) {
1835 DRM_ERROR("Scratch setup failed\n");
1836 /* iounmap will also get called at remove, but meh */
1837 iounmap(dev_priv->gtt.gsm);
1838 }
1839
1840 return ret;
1841}
1842
Ben Widawskyfbe5d362013-11-04 19:56:49 -08001843/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
1844 * bits. When using advanced contexts each context stores its own PAT, but
1845 * writing this data shouldn't be harmful even in those cases. */
1846static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
1847{
1848#define GEN8_PPAT_UC (0<<0)
1849#define GEN8_PPAT_WC (1<<0)
1850#define GEN8_PPAT_WT (2<<0)
1851#define GEN8_PPAT_WB (3<<0)
1852#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
1853/* FIXME(BDW): Bspec is completely confused about cache control bits. */
1854#define GEN8_PPAT_LLC (1<<2)
1855#define GEN8_PPAT_LLCELLC (2<<2)
1856#define GEN8_PPAT_LLCeLLC (3<<2)
1857#define GEN8_PPAT_AGE(x) (x<<4)
1858#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
1859 uint64_t pat;
1860
1861 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
1862 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
1863 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
1864 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
1865 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
1866 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
1867 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
1868 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
1869
1870 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
1871 * write would work. */
1872 I915_WRITE(GEN8_PRIVATE_PAT, pat);
1873 I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
1874}
1875
Ben Widawsky63340132013-11-04 19:32:22 -08001876static int gen8_gmch_probe(struct drm_device *dev,
1877 size_t *gtt_total,
1878 size_t *stolen,
1879 phys_addr_t *mappable_base,
1880 unsigned long *mappable_end)
1881{
1882 struct drm_i915_private *dev_priv = dev->dev_private;
1883 unsigned int gtt_size;
1884 u16 snb_gmch_ctl;
1885 int ret;
1886
1887 /* TODO: We're not aware of mappable constraints on gen8 yet */
1888 *mappable_base = pci_resource_start(dev->pdev, 2);
1889 *mappable_end = pci_resource_len(dev->pdev, 2);
1890
1891 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
1892 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
1893
1894 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1895
1896 *stolen = gen8_get_stolen_size(snb_gmch_ctl);
1897
1898 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
Ben Widawskyd31eb102013-11-02 21:07:17 -07001899 *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
Ben Widawsky63340132013-11-04 19:32:22 -08001900
Ben Widawskyfbe5d362013-11-04 19:56:49 -08001901 gen8_setup_private_ppat(dev_priv);
1902
Ben Widawsky63340132013-11-04 19:32:22 -08001903 ret = ggtt_probe_common(dev, gtt_size);
1904
Ben Widawsky94ec8f62013-11-02 21:07:18 -07001905 dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
1906 dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
Ben Widawsky63340132013-11-04 19:32:22 -08001907
1908 return ret;
1909}
1910
Ben Widawskybaa09f52013-01-24 13:49:57 -08001911static int gen6_gmch_probe(struct drm_device *dev,
1912 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -08001913 size_t *stolen,
1914 phys_addr_t *mappable_base,
1915 unsigned long *mappable_end)
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001916{
1917 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybaa09f52013-01-24 13:49:57 -08001918 unsigned int gtt_size;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001919 u16 snb_gmch_ctl;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001920 int ret;
1921
Ben Widawsky41907dd2013-02-08 11:32:47 -08001922 *mappable_base = pci_resource_start(dev->pdev, 2);
1923 *mappable_end = pci_resource_len(dev->pdev, 2);
1924
Ben Widawskybaa09f52013-01-24 13:49:57 -08001925 /* 64/512MB is the current min/max we actually know of, but this is just
1926 * a coarse sanity check.
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001927 */
Ben Widawsky41907dd2013-02-08 11:32:47 -08001928 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
Ben Widawskybaa09f52013-01-24 13:49:57 -08001929 DRM_ERROR("Unknown GMADR size (%lx)\n",
1930 dev_priv->gtt.mappable_end);
1931 return -ENXIO;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001932 }
1933
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001934 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
1935 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
Ben Widawskybaa09f52013-01-24 13:49:57 -08001936 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -08001937
Ben Widawskyc4ae25e2013-05-01 11:00:34 -07001938 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -08001939
Ben Widawsky63340132013-11-04 19:32:22 -08001940 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -08001941 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
1942
Ben Widawsky63340132013-11-04 19:32:22 -08001943 ret = ggtt_probe_common(dev, gtt_size);
Ben Widawskybaa09f52013-01-24 13:49:57 -08001944
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001945 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
1946 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
Ben Widawskybaa09f52013-01-24 13:49:57 -08001947
1948 return ret;
1949}
1950
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001951static void gen6_gmch_remove(struct i915_address_space *vm)
Ben Widawskybaa09f52013-01-24 13:49:57 -08001952{
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001953
1954 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
Ben Widawsky5ed16782013-11-25 09:54:43 -08001955
1956 drm_mm_takedown(&vm->mm);
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001957 iounmap(gtt->gsm);
1958 teardown_scratch_page(vm->dev);
Ben Widawskybaa09f52013-01-24 13:49:57 -08001959}
1960
1961static int i915_gmch_probe(struct drm_device *dev,
1962 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -08001963 size_t *stolen,
1964 phys_addr_t *mappable_base,
1965 unsigned long *mappable_end)
Ben Widawskybaa09f52013-01-24 13:49:57 -08001966{
1967 struct drm_i915_private *dev_priv = dev->dev_private;
1968 int ret;
1969
Ben Widawskybaa09f52013-01-24 13:49:57 -08001970 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
1971 if (!ret) {
1972 DRM_ERROR("failed to set up gmch\n");
1973 return -EIO;
1974 }
1975
Ben Widawsky41907dd2013-02-08 11:32:47 -08001976 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -08001977
1978 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001979 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
Ben Widawskybaa09f52013-01-24 13:49:57 -08001980
Chris Wilsonc0a7f812013-12-30 12:16:15 +00001981 if (unlikely(dev_priv->gtt.do_idle_maps))
1982 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
1983
Ben Widawskybaa09f52013-01-24 13:49:57 -08001984 return 0;
1985}
1986
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001987static void i915_gmch_remove(struct i915_address_space *vm)
Ben Widawskybaa09f52013-01-24 13:49:57 -08001988{
1989 intel_gmch_remove();
1990}
1991
1992int i915_gem_gtt_init(struct drm_device *dev)
1993{
1994 struct drm_i915_private *dev_priv = dev->dev_private;
1995 struct i915_gtt *gtt = &dev_priv->gtt;
Ben Widawskybaa09f52013-01-24 13:49:57 -08001996 int ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001997
Ben Widawskybaa09f52013-01-24 13:49:57 -08001998 if (INTEL_INFO(dev)->gen <= 5) {
Ben Widawskyb2f21b42013-06-27 16:30:20 -07001999 gtt->gtt_probe = i915_gmch_probe;
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002000 gtt->base.cleanup = i915_gmch_remove;
Ben Widawsky63340132013-11-04 19:32:22 -08002001 } else if (INTEL_INFO(dev)->gen < 8) {
Ben Widawskyb2f21b42013-06-27 16:30:20 -07002002 gtt->gtt_probe = gen6_gmch_probe;
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002003 gtt->base.cleanup = gen6_gmch_remove;
Ben Widawsky4d15c142013-07-04 11:02:06 -07002004 if (IS_HASWELL(dev) && dev_priv->ellc_size)
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002005 gtt->base.pte_encode = iris_pte_encode;
Ben Widawsky4d15c142013-07-04 11:02:06 -07002006 else if (IS_HASWELL(dev))
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002007 gtt->base.pte_encode = hsw_pte_encode;
Ben Widawskyb2f21b42013-06-27 16:30:20 -07002008 else if (IS_VALLEYVIEW(dev))
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002009 gtt->base.pte_encode = byt_pte_encode;
Chris Wilson350ec882013-08-06 13:17:02 +01002010 else if (INTEL_INFO(dev)->gen >= 7)
2011 gtt->base.pte_encode = ivb_pte_encode;
Ben Widawskyb2f21b42013-06-27 16:30:20 -07002012 else
Chris Wilson350ec882013-08-06 13:17:02 +01002013 gtt->base.pte_encode = snb_pte_encode;
Ben Widawsky63340132013-11-04 19:32:22 -08002014 } else {
2015 dev_priv->gtt.gtt_probe = gen8_gmch_probe;
2016 dev_priv->gtt.base.cleanup = gen6_gmch_remove;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002017 }
2018
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002019 ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
Ben Widawskyb2f21b42013-06-27 16:30:20 -07002020 &gtt->mappable_base, &gtt->mappable_end);
Ben Widawskya54c0c22013-01-24 14:45:00 -08002021 if (ret)
Ben Widawskybaa09f52013-01-24 13:49:57 -08002022 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002023
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002024 gtt->base.dev = dev;
2025
Ben Widawskybaa09f52013-01-24 13:49:57 -08002026 /* GMADR is the PCI mmio aperture into the global GTT. */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07002027 DRM_INFO("Memory usable by graphics device = %zdM\n",
2028 gtt->base.total >> 20);
Ben Widawskyb2f21b42013-06-27 16:30:20 -07002029 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
2030 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08002031
Ben Widawskye76e9ae2012-11-04 09:21:27 -08002032 return 0;
Daniel Vetter644ec022012-03-26 09:45:40 +02002033}
Ben Widawsky6f65e292013-12-06 14:10:56 -08002034
2035static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2036 struct i915_address_space *vm)
2037{
2038 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
2039 if (vma == NULL)
2040 return ERR_PTR(-ENOMEM);
2041
2042 INIT_LIST_HEAD(&vma->vma_link);
2043 INIT_LIST_HEAD(&vma->mm_list);
2044 INIT_LIST_HEAD(&vma->exec_list);
2045 vma->vm = vm;
2046 vma->obj = obj;
2047
2048 switch (INTEL_INFO(vm->dev)->gen) {
2049 case 8:
2050 case 7:
2051 case 6:
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08002052 if (i915_is_ggtt(vm)) {
2053 vma->unbind_vma = ggtt_unbind_vma;
2054 vma->bind_vma = ggtt_bind_vma;
2055 } else {
2056 vma->unbind_vma = ppgtt_unbind_vma;
2057 vma->bind_vma = ppgtt_bind_vma;
2058 }
Ben Widawsky6f65e292013-12-06 14:10:56 -08002059 break;
2060 case 5:
2061 case 4:
2062 case 3:
2063 case 2:
2064 BUG_ON(!i915_is_ggtt(vm));
2065 vma->unbind_vma = i915_ggtt_unbind_vma;
2066 vma->bind_vma = i915_ggtt_bind_vma;
2067 break;
2068 default:
2069 BUG();
2070 }
2071
2072 /* Keep GGTT vmas first to make debug easier */
2073 if (i915_is_ggtt(vm))
2074 list_add(&vma->vma_link, &obj->vma_list);
2075 else
2076 list_add_tail(&vma->vma_link, &obj->vma_list);
2077
2078 return vma;
2079}
2080
2081struct i915_vma *
2082i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2083 struct i915_address_space *vm)
2084{
2085 struct i915_vma *vma;
2086
2087 vma = i915_gem_obj_to_vma(obj, vm);
2088 if (!vma)
2089 vma = __i915_gem_vma_create(obj, vm);
2090
2091 return vma;
2092}