blob: 32aa69d7ef20af2abb122b54f042283d28e8a1ab [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
David Howells760285e2012-10-02 18:01:07 +010025#include <drm/drmP.h>
26#include <drm/i915_drm.h>
Daniel Vetter76aaf222010-11-05 22:23:30 +010027#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30
Ben Widawsky6670a5a2013-06-27 16:30:04 -070031#define GEN6_PPGTT_PD_ENTRIES 512
32#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
33
Ben Widawsky26b1ff32012-11-04 09:21:31 -080034/* PPGTT stuff */
35#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
Ben Widawsky0d8ff152013-07-04 11:02:03 -070036#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
Ben Widawsky26b1ff32012-11-04 09:21:31 -080037
38#define GEN6_PDE_VALID (1 << 0)
39/* gen6+ has bit 11-4 for physical addr bit 39-32 */
40#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
41
42#define GEN6_PTE_VALID (1 << 0)
43#define GEN6_PTE_UNCACHED (1 << 1)
44#define HSW_PTE_UNCACHED (0)
45#define GEN6_PTE_CACHE_LLC (2 << 1)
Chris Wilson350ec882013-08-06 13:17:02 +010046#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
Ben Widawsky26b1ff32012-11-04 09:21:31 -080047#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
Ben Widawsky0d8ff152013-07-04 11:02:03 -070048#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
49
50/* Cacheability Control is a 4-bit value. The low three bits are stored in *
51 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
52 */
53#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
54 (((bits) & 0x8) << (11 - 3)))
Ben Widawsky87a6b682013-08-04 23:47:29 -070055#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
Ben Widawsky0d8ff152013-07-04 11:02:03 -070056#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
Ben Widawsky4d15c142013-07-04 11:02:06 -070057#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
Chris Wilson651d7942013-08-08 14:41:10 +010058#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
Ben Widawsky26b1ff32012-11-04 09:21:31 -080059
Chris Wilson350ec882013-08-06 13:17:02 +010060static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -070061 enum i915_cache_level level,
62 bool valid)
Ben Widawsky54d12522012-09-24 16:44:32 -070063{
Ben Widawskyb35b3802013-10-16 09:18:21 -070064 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Ben Widawsky54d12522012-09-24 16:44:32 -070065 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -070066
67 switch (level) {
Chris Wilson350ec882013-08-06 13:17:02 +010068 case I915_CACHE_L3_LLC:
69 case I915_CACHE_LLC:
70 pte |= GEN6_PTE_CACHE_LLC;
71 break;
72 case I915_CACHE_NONE:
73 pte |= GEN6_PTE_UNCACHED;
74 break;
75 default:
76 WARN_ON(1);
77 }
78
79 return pte;
80}
81
82static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -070083 enum i915_cache_level level,
84 bool valid)
Chris Wilson350ec882013-08-06 13:17:02 +010085{
Ben Widawskyb35b3802013-10-16 09:18:21 -070086 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Chris Wilson350ec882013-08-06 13:17:02 +010087 pte |= GEN6_PTE_ADDR_ENCODE(addr);
88
89 switch (level) {
90 case I915_CACHE_L3_LLC:
91 pte |= GEN7_PTE_CACHE_L3_LLC;
Ben Widawskye7210c32012-10-19 09:33:22 -070092 break;
93 case I915_CACHE_LLC:
94 pte |= GEN6_PTE_CACHE_LLC;
95 break;
96 case I915_CACHE_NONE:
Kenneth Graunke91197082013-04-22 00:53:51 -070097 pte |= GEN6_PTE_UNCACHED;
Ben Widawskye7210c32012-10-19 09:33:22 -070098 break;
99 default:
Chris Wilson350ec882013-08-06 13:17:02 +0100100 WARN_ON(1);
Ben Widawskye7210c32012-10-19 09:33:22 -0700101 }
102
Ben Widawsky54d12522012-09-24 16:44:32 -0700103 return pte;
104}
105
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700106#define BYT_PTE_WRITEABLE (1 << 1)
107#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
108
Ben Widawsky80a74f72013-06-27 16:30:19 -0700109static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -0700110 enum i915_cache_level level,
111 bool valid)
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700112{
Ben Widawskyb35b3802013-10-16 09:18:21 -0700113 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700114 pte |= GEN6_PTE_ADDR_ENCODE(addr);
115
116 /* Mark the page as writeable. Other platforms don't have a
117 * setting for read-only/writable, so this matches that behavior.
118 */
119 pte |= BYT_PTE_WRITEABLE;
120
121 if (level != I915_CACHE_NONE)
122 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
123
124 return pte;
125}
126
Ben Widawsky80a74f72013-06-27 16:30:19 -0700127static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -0700128 enum i915_cache_level level,
129 bool valid)
Kenneth Graunke91197082013-04-22 00:53:51 -0700130{
Ben Widawskyb35b3802013-10-16 09:18:21 -0700131 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Ben Widawsky0d8ff152013-07-04 11:02:03 -0700132 pte |= HSW_PTE_ADDR_ENCODE(addr);
Kenneth Graunke91197082013-04-22 00:53:51 -0700133
134 if (level != I915_CACHE_NONE)
Ben Widawsky87a6b682013-08-04 23:47:29 -0700135 pte |= HSW_WB_LLC_AGE3;
Kenneth Graunke91197082013-04-22 00:53:51 -0700136
137 return pte;
138}
139
Ben Widawsky4d15c142013-07-04 11:02:06 -0700140static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -0700141 enum i915_cache_level level,
142 bool valid)
Ben Widawsky4d15c142013-07-04 11:02:06 -0700143{
Ben Widawskyb35b3802013-10-16 09:18:21 -0700144 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Ben Widawsky4d15c142013-07-04 11:02:06 -0700145 pte |= HSW_PTE_ADDR_ENCODE(addr);
146
Chris Wilson651d7942013-08-08 14:41:10 +0100147 switch (level) {
148 case I915_CACHE_NONE:
149 break;
150 case I915_CACHE_WT:
151 pte |= HSW_WT_ELLC_LLC_AGE0;
152 break;
153 default:
Ben Widawsky4d15c142013-07-04 11:02:06 -0700154 pte |= HSW_WB_ELLC_LLC_AGE0;
Chris Wilson651d7942013-08-08 14:41:10 +0100155 break;
156 }
Ben Widawsky4d15c142013-07-04 11:02:06 -0700157
158 return pte;
159}
160
Ben Widawsky3e302542013-04-23 23:15:32 -0700161static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky61973492013-04-08 18:43:54 -0700162{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700163 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
Ben Widawsky61973492013-04-08 18:43:54 -0700164 gen6_gtt_pte_t __iomem *pd_addr;
165 uint32_t pd_entry;
166 int i;
167
Ben Widawsky0a732872013-04-23 23:15:30 -0700168 WARN_ON(ppgtt->pd_offset & 0x3f);
Ben Widawsky61973492013-04-08 18:43:54 -0700169 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
170 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
171 for (i = 0; i < ppgtt->num_pd_entries; i++) {
172 dma_addr_t pt_addr;
173
174 pt_addr = ppgtt->pt_dma_addr[i];
175 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
176 pd_entry |= GEN6_PDE_VALID;
177
178 writel(pd_entry, pd_addr + i);
179 }
180 readl(pd_addr);
Ben Widawsky3e302542013-04-23 23:15:32 -0700181}
182
183static int gen6_ppgtt_enable(struct drm_device *dev)
184{
185 drm_i915_private_t *dev_priv = dev->dev_private;
186 uint32_t pd_offset;
187 struct intel_ring_buffer *ring;
188 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
189 int i;
190
191 BUG_ON(ppgtt->pd_offset & 0x3f);
192
193 gen6_write_pdes(ppgtt);
Ben Widawsky61973492013-04-08 18:43:54 -0700194
195 pd_offset = ppgtt->pd_offset;
196 pd_offset /= 64; /* in cachelines, */
197 pd_offset <<= 16;
198
199 if (INTEL_INFO(dev)->gen == 6) {
200 uint32_t ecochk, gab_ctl, ecobits;
201
202 ecobits = I915_READ(GAC_ECO_BITS);
Ville Syrjälä3b9d7882013-04-04 15:13:40 +0300203 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
204 ECOBITS_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -0700205
206 gab_ctl = I915_READ(GAB_CTL);
207 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
208
209 ecochk = I915_READ(GAM_ECOCHK);
210 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
211 ECOCHK_PPGTT_CACHE64B);
212 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
213 } else if (INTEL_INFO(dev)->gen >= 7) {
Ville Syrjäläa6f429a2013-04-04 15:13:42 +0300214 uint32_t ecochk, ecobits;
Ville Syrjäläa65c2fc2013-04-04 15:13:41 +0300215
216 ecobits = I915_READ(GAC_ECO_BITS);
217 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
218
Ville Syrjäläa6f429a2013-04-04 15:13:42 +0300219 ecochk = I915_READ(GAM_ECOCHK);
220 if (IS_HASWELL(dev)) {
221 ecochk |= ECOCHK_PPGTT_WB_HSW;
222 } else {
223 ecochk |= ECOCHK_PPGTT_LLC_IVB;
224 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
225 }
226 I915_WRITE(GAM_ECOCHK, ecochk);
Ben Widawsky61973492013-04-08 18:43:54 -0700227 /* GFX_MODE is per-ring on gen7+ */
228 }
229
230 for_each_ring(ring, dev_priv, i) {
231 if (INTEL_INFO(dev)->gen >= 7)
232 I915_WRITE(RING_MODE_GEN7(ring),
233 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
234
235 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
236 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
237 }
Ben Widawskyb7c36d22013-04-08 18:43:56 -0700238 return 0;
Ben Widawsky61973492013-04-08 18:43:54 -0700239}
240
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100241/* PPGTT support for Sandybdrige/Gen6 and later */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700242static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100243 unsigned first_entry,
244 unsigned num_entries)
245{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700246 struct i915_hw_ppgtt *ppgtt =
247 container_of(vm, struct i915_hw_ppgtt, base);
Ben Widawskye7c2b582013-04-08 18:43:48 -0700248 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
Daniel Vettera15326a2013-03-19 23:48:39 +0100249 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100250 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
251 unsigned last_pte, i;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100252
Ben Widawskyb35b3802013-10-16 09:18:21 -0700253 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100254
Daniel Vetter7bddb012012-02-09 17:15:47 +0100255 while (num_entries) {
256 last_pte = first_pte + num_entries;
257 if (last_pte > I915_PPGTT_PT_ENTRIES)
258 last_pte = I915_PPGTT_PT_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100259
Daniel Vettera15326a2013-03-19 23:48:39 +0100260 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100261
262 for (i = first_pte; i < last_pte; i++)
263 pt_vaddr[i] = scratch_pte;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100264
265 kunmap_atomic(pt_vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100266
Daniel Vetter7bddb012012-02-09 17:15:47 +0100267 num_entries -= last_pte - first_pte;
268 first_pte = 0;
Daniel Vettera15326a2013-03-19 23:48:39 +0100269 act_pt++;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100270 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100271}
272
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700273static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
Daniel Vetterdef886c2013-01-24 14:44:56 -0800274 struct sg_table *pages,
275 unsigned first_entry,
276 enum i915_cache_level cache_level)
277{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700278 struct i915_hw_ppgtt *ppgtt =
279 container_of(vm, struct i915_hw_ppgtt, base);
Ben Widawskye7c2b582013-04-08 18:43:48 -0700280 gen6_gtt_pte_t *pt_vaddr;
Daniel Vettera15326a2013-03-19 23:48:39 +0100281 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Imre Deak6e995e22013-02-18 19:28:04 +0200282 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
283 struct sg_page_iter sg_iter;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800284
Daniel Vettera15326a2013-03-19 23:48:39 +0100285 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +0200286 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
287 dma_addr_t page_addr;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800288
Imre Deak2db76d72013-03-26 15:14:18 +0200289 page_addr = sg_page_iter_dma_address(&sg_iter);
Ben Widawskyb35b3802013-10-16 09:18:21 -0700290 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
Imre Deak6e995e22013-02-18 19:28:04 +0200291 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
292 kunmap_atomic(pt_vaddr);
Daniel Vettera15326a2013-03-19 23:48:39 +0100293 act_pt++;
294 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +0200295 act_pte = 0;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800296
Daniel Vetterdef886c2013-01-24 14:44:56 -0800297 }
Daniel Vetterdef886c2013-01-24 14:44:56 -0800298 }
Imre Deak6e995e22013-02-18 19:28:04 +0200299 kunmap_atomic(pt_vaddr);
Daniel Vetterdef886c2013-01-24 14:44:56 -0800300}
301
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700302static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100303{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700304 struct i915_hw_ppgtt *ppgtt =
305 container_of(vm, struct i915_hw_ppgtt, base);
Daniel Vetter3440d262013-01-24 13:49:56 -0800306 int i;
307
Ben Widawsky93bd8642013-07-16 16:50:06 -0700308 drm_mm_takedown(&ppgtt->base.mm);
309
Daniel Vetter3440d262013-01-24 13:49:56 -0800310 if (ppgtt->pt_dma_addr) {
311 for (i = 0; i < ppgtt->num_pd_entries; i++)
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700312 pci_unmap_page(ppgtt->base.dev->pdev,
Daniel Vetter3440d262013-01-24 13:49:56 -0800313 ppgtt->pt_dma_addr[i],
314 4096, PCI_DMA_BIDIRECTIONAL);
315 }
316
317 kfree(ppgtt->pt_dma_addr);
318 for (i = 0; i < ppgtt->num_pd_entries; i++)
319 __free_page(ppgtt->pt_pages[i]);
320 kfree(ppgtt->pt_pages);
321 kfree(ppgtt);
322}
323
324static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
325{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700326 struct drm_device *dev = ppgtt->base.dev;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100327 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100328 unsigned first_pd_entry_in_global_pt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100329 int i;
330 int ret = -ENOMEM;
331
332 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
333 * entries. For aliasing ppgtt support we just steal them at the end for
334 * now. */
Daniel Vettere1b73cb2013-05-21 09:52:16 +0200335 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100336
Chris Wilson08c45262013-07-30 19:04:37 +0100337 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
Ben Widawsky6670a5a2013-06-27 16:30:04 -0700338 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
Ben Widawsky61973492013-04-08 18:43:54 -0700339 ppgtt->enable = gen6_ppgtt_enable;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700340 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
341 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
342 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
343 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100344 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
345 GFP_KERNEL);
346 if (!ppgtt->pt_pages)
Daniel Vetter3440d262013-01-24 13:49:56 -0800347 return -ENOMEM;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100348
349 for (i = 0; i < ppgtt->num_pd_entries; i++) {
350 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
351 if (!ppgtt->pt_pages[i])
352 goto err_pt_alloc;
353 }
354
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800355 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
356 GFP_KERNEL);
357 if (!ppgtt->pt_dma_addr)
358 goto err_pt_alloc;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100359
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800360 for (i = 0; i < ppgtt->num_pd_entries; i++) {
361 dma_addr_t pt_addr;
Daniel Vetter211c5682012-04-10 17:29:17 +0200362
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800363 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
364 PCI_DMA_BIDIRECTIONAL);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100365
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800366 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
367 ret = -EIO;
368 goto err_pd_pin;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100369
Daniel Vetter211c5682012-04-10 17:29:17 +0200370 }
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800371 ppgtt->pt_dma_addr[i] = pt_addr;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100372 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100373
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700374 ppgtt->base.clear_range(&ppgtt->base, 0,
375 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100376
Ben Widawskye7c2b582013-04-08 18:43:48 -0700377 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100378
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100379 return 0;
380
381err_pd_pin:
382 if (ppgtt->pt_dma_addr) {
383 for (i--; i >= 0; i--)
384 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
385 4096, PCI_DMA_BIDIRECTIONAL);
386 }
387err_pt_alloc:
388 kfree(ppgtt->pt_dma_addr);
389 for (i = 0; i < ppgtt->num_pd_entries; i++) {
390 if (ppgtt->pt_pages[i])
391 __free_page(ppgtt->pt_pages[i]);
392 }
393 kfree(ppgtt->pt_pages);
Daniel Vetter3440d262013-01-24 13:49:56 -0800394
395 return ret;
396}
397
398static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
399{
400 struct drm_i915_private *dev_priv = dev->dev_private;
401 struct i915_hw_ppgtt *ppgtt;
402 int ret;
403
404 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
405 if (!ppgtt)
406 return -ENOMEM;
407
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700408 ppgtt->base.dev = dev;
Daniel Vetter3440d262013-01-24 13:49:56 -0800409
Ben Widawsky3ed124b2013-04-08 18:43:53 -0700410 if (INTEL_INFO(dev)->gen < 8)
411 ret = gen6_ppgtt_init(ppgtt);
412 else
413 BUG();
414
Daniel Vetter3440d262013-01-24 13:49:56 -0800415 if (ret)
416 kfree(ppgtt);
Ben Widawsky93bd8642013-07-16 16:50:06 -0700417 else {
Daniel Vetter3440d262013-01-24 13:49:56 -0800418 dev_priv->mm.aliasing_ppgtt = ppgtt;
Ben Widawsky93bd8642013-07-16 16:50:06 -0700419 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
420 ppgtt->base.total);
421 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100422
423 return ret;
424}
425
426void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
427{
428 struct drm_i915_private *dev_priv = dev->dev_private;
429 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100430
431 if (!ppgtt)
432 return;
433
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700434 ppgtt->base.cleanup(&ppgtt->base);
Ben Widawsky5963cf02013-04-08 18:43:55 -0700435 dev_priv->mm.aliasing_ppgtt = NULL;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100436}
437
Daniel Vetter7bddb012012-02-09 17:15:47 +0100438void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
439 struct drm_i915_gem_object *obj,
440 enum i915_cache_level cache_level)
441{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700442 ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
443 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
444 cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100445}
446
447void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
448 struct drm_i915_gem_object *obj)
449{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700450 ppgtt->base.clear_range(&ppgtt->base,
451 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
452 obj->base.size >> PAGE_SHIFT);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100453}
454
Ben Widawskya81cc002013-01-18 12:30:31 -0800455extern int intel_iommu_gfx_mapped;
456/* Certain Gen5 chipsets require require idling the GPU before
457 * unmapping anything from the GTT when VT-d is enabled.
458 */
459static inline bool needs_idle_maps(struct drm_device *dev)
460{
461#ifdef CONFIG_INTEL_IOMMU
462 /* Query intel_iommu to see if we need the workaround. Presumably that
463 * was loaded first.
464 */
465 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
466 return true;
467#endif
468 return false;
469}
470
Ben Widawsky5c042282011-10-17 15:51:55 -0700471static bool do_idling(struct drm_i915_private *dev_priv)
472{
473 bool ret = dev_priv->mm.interruptible;
474
Ben Widawskya81cc002013-01-18 12:30:31 -0800475 if (unlikely(dev_priv->gtt.do_idle_maps)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700476 dev_priv->mm.interruptible = false;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700477 if (i915_gpu_idle(dev_priv->dev)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700478 DRM_ERROR("Couldn't idle GPU\n");
479 /* Wait a bit, in hopes it avoids the hang */
480 udelay(10);
481 }
482 }
483
484 return ret;
485}
486
487static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
488{
Ben Widawskya81cc002013-01-18 12:30:31 -0800489 if (unlikely(dev_priv->gtt.do_idle_maps))
Ben Widawsky5c042282011-10-17 15:51:55 -0700490 dev_priv->mm.interruptible = interruptible;
491}
492
Daniel Vetter76aaf222010-11-05 22:23:30 +0100493void i915_gem_restore_gtt_mappings(struct drm_device *dev)
494{
495 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000496 struct drm_i915_gem_object *obj;
Daniel Vetter76aaf222010-11-05 22:23:30 +0100497
Chris Wilsonbee4a182011-01-21 10:54:32 +0000498 /* First fill our portion of the GTT with scratch pages */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700499 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
500 dev_priv->gtt.base.start / PAGE_SIZE,
501 dev_priv->gtt.base.total / PAGE_SIZE);
Chris Wilsonbee4a182011-01-21 10:54:32 +0000502
Ben Widawsky35c20a62013-05-31 11:28:48 -0700503 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilson2c225692013-08-09 12:26:45 +0100504 i915_gem_clflush_object(obj, obj->pin_display);
Daniel Vetter74163902012-02-15 23:50:21 +0100505 i915_gem_gtt_bind_object(obj, obj->cache_level);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100506 }
507
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800508 i915_gem_chipset_flush(dev);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100509}
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100510
Daniel Vetter74163902012-02-15 23:50:21 +0100511int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100512{
Chris Wilson9da3da62012-06-01 15:20:22 +0100513 if (obj->has_dma_mapping)
Daniel Vetter74163902012-02-15 23:50:21 +0100514 return 0;
Chris Wilson9da3da62012-06-01 15:20:22 +0100515
516 if (!dma_map_sg(&obj->base.dev->pdev->dev,
517 obj->pages->sgl, obj->pages->nents,
518 PCI_DMA_BIDIRECTIONAL))
519 return -ENOSPC;
520
521 return 0;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100522}
523
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800524/*
525 * Binds an object into the global gtt with the specified cache level. The object
526 * will be accessible to the GPU via commands whose operands reference offsets
527 * within the global GTT as well as accessible by the GPU through the GMADR
528 * mapped BAR (dev_priv->mm.gtt->gtt).
529 */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700530static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800531 struct sg_table *st,
532 unsigned int first_entry,
533 enum i915_cache_level level)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800534{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700535 struct drm_i915_private *dev_priv = vm->dev->dev_private;
Ben Widawskye7c2b582013-04-08 18:43:48 -0700536 gen6_gtt_pte_t __iomem *gtt_entries =
537 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
Imre Deak6e995e22013-02-18 19:28:04 +0200538 int i = 0;
539 struct sg_page_iter sg_iter;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800540 dma_addr_t addr;
541
Imre Deak6e995e22013-02-18 19:28:04 +0200542 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +0200543 addr = sg_page_iter_dma_address(&sg_iter);
Ben Widawskyb35b3802013-10-16 09:18:21 -0700544 iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
Imre Deak6e995e22013-02-18 19:28:04 +0200545 i++;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800546 }
547
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800548 /* XXX: This serves as a posting read to make sure that the PTE has
549 * actually been updated. There is some concern that even though
550 * registers and PTEs are within the same BAR that they are potentially
551 * of NUMA access patterns. Therefore, even with the way we assume
552 * hardware should work, we must keep this posting read for paranoia.
553 */
554 if (i != 0)
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700555 WARN_ON(readl(&gtt_entries[i-1]) !=
Ben Widawskyb35b3802013-10-16 09:18:21 -0700556 vm->pte_encode(addr, level, true));
Ben Widawsky0f9b91c2012-11-04 09:21:30 -0800557
558 /* This next bit makes the above posting read even more important. We
559 * want to flush the TLBs only after we're certain all the PTE updates
560 * have finished.
561 */
562 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
563 POSTING_READ(GFX_FLSH_CNTL_GEN6);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800564}
565
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700566static void gen6_ggtt_clear_range(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800567 unsigned int first_entry,
568 unsigned int num_entries)
569{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700570 struct drm_i915_private *dev_priv = vm->dev->dev_private;
Ben Widawskye7c2b582013-04-08 18:43:48 -0700571 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
572 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
Ben Widawskya54c0c22013-01-24 14:45:00 -0800573 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800574 int i;
575
576 if (WARN(num_entries > max_entries,
577 "First entry = %d; Num entries = %d (max=%d)\n",
578 first_entry, num_entries, max_entries))
579 num_entries = max_entries;
580
Ben Widawskyb35b3802013-10-16 09:18:21 -0700581 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800582 for (i = 0; i < num_entries; i++)
583 iowrite32(scratch_pte, &gtt_base[i]);
584 readl(gtt_base);
585}
586
587
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700588static void i915_ggtt_insert_entries(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800589 struct sg_table *st,
590 unsigned int pg_start,
591 enum i915_cache_level cache_level)
592{
593 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
594 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
595
596 intel_gtt_insert_sg_entries(st, pg_start, flags);
597
598}
599
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700600static void i915_ggtt_clear_range(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800601 unsigned int first_entry,
602 unsigned int num_entries)
603{
604 intel_gtt_clear_range(first_entry, num_entries);
605}
606
607
Daniel Vetter74163902012-02-15 23:50:21 +0100608void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
609 enum i915_cache_level cache_level)
Chris Wilsond5bd1442011-04-14 06:48:26 +0100610{
611 struct drm_device *dev = obj->base.dev;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800612 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700613 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800614
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700615 dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
616 entry,
617 cache_level);
Chris Wilsond5bd1442011-04-14 06:48:26 +0100618
Daniel Vetter74898d72012-02-15 23:50:22 +0100619 obj->has_global_gtt_mapping = 1;
Chris Wilsond5bd1442011-04-14 06:48:26 +0100620}
621
Chris Wilson05394f32010-11-08 19:18:58 +0000622void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100623{
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800624 struct drm_device *dev = obj->base.dev;
625 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700626 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800627
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700628 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
629 entry,
630 obj->base.size >> PAGE_SHIFT);
Daniel Vetter74898d72012-02-15 23:50:22 +0100631
632 obj->has_global_gtt_mapping = 0;
Daniel Vetter74163902012-02-15 23:50:21 +0100633}
634
635void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
636{
Ben Widawsky5c042282011-10-17 15:51:55 -0700637 struct drm_device *dev = obj->base.dev;
638 struct drm_i915_private *dev_priv = dev->dev_private;
639 bool interruptible;
640
641 interruptible = do_idling(dev_priv);
642
Chris Wilson9da3da62012-06-01 15:20:22 +0100643 if (!obj->has_dma_mapping)
644 dma_unmap_sg(&dev->pdev->dev,
645 obj->pages->sgl, obj->pages->nents,
646 PCI_DMA_BIDIRECTIONAL);
Ben Widawsky5c042282011-10-17 15:51:55 -0700647
648 undo_idling(dev_priv, interruptible);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100649}
Daniel Vetter644ec022012-03-26 09:45:40 +0200650
Chris Wilson42d6ab42012-07-26 11:49:32 +0100651static void i915_gtt_color_adjust(struct drm_mm_node *node,
652 unsigned long color,
653 unsigned long *start,
654 unsigned long *end)
655{
656 if (node->color != color)
657 *start += 4096;
658
659 if (!list_empty(&node->node_list)) {
660 node = list_entry(node->node_list.next,
661 struct drm_mm_node,
662 node_list);
663 if (node->allocated && node->color != color)
664 *end -= 4096;
665 }
666}
Ben Widawskyd7e50082012-12-18 10:31:25 -0800667void i915_gem_setup_global_gtt(struct drm_device *dev,
668 unsigned long start,
669 unsigned long mappable_end,
670 unsigned long end)
Daniel Vetter644ec022012-03-26 09:45:40 +0200671{
Ben Widawskye78891c2013-01-25 16:41:04 -0800672 /* Let GEM Manage all of the aperture.
673 *
674 * However, leave one page at the end still bound to the scratch page.
675 * There are a number of places where the hardware apparently prefetches
676 * past the end of the object, and we've seen multiple hangs with the
677 * GPU head pointer stuck in a batchbuffer bound at the last page of the
678 * aperture. One page should be enough to keep any prefetching inside
679 * of the aperture.
680 */
Ben Widawsky40d749802013-07-31 16:59:59 -0700681 struct drm_i915_private *dev_priv = dev->dev_private;
682 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
Chris Wilsoned2f3452012-11-15 11:32:19 +0000683 struct drm_mm_node *entry;
684 struct drm_i915_gem_object *obj;
685 unsigned long hole_start, hole_end;
Daniel Vetter644ec022012-03-26 09:45:40 +0200686
Ben Widawsky35451cb2013-01-17 12:45:13 -0800687 BUG_ON(mappable_end > end);
688
Chris Wilsoned2f3452012-11-15 11:32:19 +0000689 /* Subtract the guard page ... */
Ben Widawsky40d749802013-07-31 16:59:59 -0700690 drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
Chris Wilson42d6ab42012-07-26 11:49:32 +0100691 if (!HAS_LLC(dev))
Ben Widawsky93bd8642013-07-16 16:50:06 -0700692 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
Daniel Vetter644ec022012-03-26 09:45:40 +0200693
Chris Wilsoned2f3452012-11-15 11:32:19 +0000694 /* Mark any preallocated objects as occupied */
Ben Widawsky35c20a62013-05-31 11:28:48 -0700695 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Ben Widawsky40d749802013-07-31 16:59:59 -0700696 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700697 int ret;
Ben Widawskyedd41a82013-07-05 14:41:05 -0700698 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
Ben Widawskyc6cfb322013-07-05 14:41:06 -0700699 i915_gem_obj_ggtt_offset(obj), obj->base.size);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000700
Ben Widawskyc6cfb322013-07-05 14:41:06 -0700701 WARN_ON(i915_gem_obj_ggtt_bound(obj));
Ben Widawsky40d749802013-07-31 16:59:59 -0700702 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
Ben Widawskyc6cfb322013-07-05 14:41:06 -0700703 if (ret)
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700704 DRM_DEBUG_KMS("Reservation failed\n");
Chris Wilsoned2f3452012-11-15 11:32:19 +0000705 obj->has_global_gtt_mapping = 1;
Ben Widawsky2f633152013-07-17 12:19:03 -0700706 list_add(&vma->vma_link, &obj->vma_list);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000707 }
708
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700709 dev_priv->gtt.base.start = start;
710 dev_priv->gtt.base.total = end - start;
Daniel Vetter644ec022012-03-26 09:45:40 +0200711
Chris Wilsoned2f3452012-11-15 11:32:19 +0000712 /* Clear any non-preallocated blocks */
Ben Widawsky40d749802013-07-31 16:59:59 -0700713 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700714 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
Chris Wilsoned2f3452012-11-15 11:32:19 +0000715 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
716 hole_start, hole_end);
Ben Widawsky40d749802013-07-31 16:59:59 -0700717 ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000718 }
719
720 /* And finally clear the reserved guard page */
Ben Widawsky40d749802013-07-31 16:59:59 -0700721 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800722}
723
Ben Widawskyd7e50082012-12-18 10:31:25 -0800724static bool
725intel_enable_ppgtt(struct drm_device *dev)
726{
727 if (i915_enable_ppgtt >= 0)
728 return i915_enable_ppgtt;
729
730#ifdef CONFIG_INTEL_IOMMU
731 /* Disable ppgtt on SNB if VT-d is on. */
732 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
733 return false;
734#endif
735
736 return true;
737}
738
739void i915_gem_init_global_gtt(struct drm_device *dev)
740{
741 struct drm_i915_private *dev_priv = dev->dev_private;
742 unsigned long gtt_size, mappable_size;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800743
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700744 gtt_size = dev_priv->gtt.base.total;
Ben Widawsky93d18792013-01-17 12:45:17 -0800745 mappable_size = dev_priv->gtt.mappable_end;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800746
747 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
Ben Widawskye78891c2013-01-25 16:41:04 -0800748 int ret;
Ben Widawsky3eb1c002013-04-08 18:43:52 -0700749
750 if (INTEL_INFO(dev)->gen <= 7) {
751 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
752 * aperture accordingly when using aliasing ppgtt. */
Ben Widawsky6670a5a2013-06-27 16:30:04 -0700753 gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
Ben Widawsky3eb1c002013-04-08 18:43:52 -0700754 }
Ben Widawskyd7e50082012-12-18 10:31:25 -0800755
756 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
757
758 ret = i915_gem_init_aliasing_ppgtt(dev);
Ben Widawskye78891c2013-01-25 16:41:04 -0800759 if (!ret)
Ben Widawskyd7e50082012-12-18 10:31:25 -0800760 return;
Ben Widawskye78891c2013-01-25 16:41:04 -0800761
762 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
Ben Widawsky93bd8642013-07-16 16:50:06 -0700763 drm_mm_takedown(&dev_priv->gtt.base.mm);
Ben Widawsky6670a5a2013-06-27 16:30:04 -0700764 gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800765 }
Ben Widawskye78891c2013-01-25 16:41:04 -0800766 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800767}
768
769static int setup_scratch_page(struct drm_device *dev)
770{
771 struct drm_i915_private *dev_priv = dev->dev_private;
772 struct page *page;
773 dma_addr_t dma_addr;
774
775 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
776 if (page == NULL)
777 return -ENOMEM;
778 get_page(page);
779 set_pages_uc(page, 1);
780
781#ifdef CONFIG_INTEL_IOMMU
782 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
783 PCI_DMA_BIDIRECTIONAL);
784 if (pci_dma_mapping_error(dev->pdev, dma_addr))
785 return -EINVAL;
786#else
787 dma_addr = page_to_phys(page);
788#endif
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700789 dev_priv->gtt.base.scratch.page = page;
790 dev_priv->gtt.base.scratch.addr = dma_addr;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800791
792 return 0;
793}
794
795static void teardown_scratch_page(struct drm_device *dev)
796{
797 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700798 struct page *page = dev_priv->gtt.base.scratch.page;
799
800 set_pages_wb(page, 1);
801 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800802 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700803 put_page(page);
804 __free_page(page);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800805}
806
807static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
808{
809 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
810 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
811 return snb_gmch_ctl << 20;
812}
813
Ben Widawskybaa09f52013-01-24 13:49:57 -0800814static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800815{
816 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
817 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
818 return snb_gmch_ctl << 25; /* 32 MB units */
819}
820
Ben Widawskybaa09f52013-01-24 13:49:57 -0800821static int gen6_gmch_probe(struct drm_device *dev,
822 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800823 size_t *stolen,
824 phys_addr_t *mappable_base,
825 unsigned long *mappable_end)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800826{
827 struct drm_i915_private *dev_priv = dev->dev_private;
828 phys_addr_t gtt_bus_addr;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800829 unsigned int gtt_size;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800830 u16 snb_gmch_ctl;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800831 int ret;
832
Ben Widawsky41907dd2013-02-08 11:32:47 -0800833 *mappable_base = pci_resource_start(dev->pdev, 2);
834 *mappable_end = pci_resource_len(dev->pdev, 2);
835
Ben Widawskybaa09f52013-01-24 13:49:57 -0800836 /* 64/512MB is the current min/max we actually know of, but this is just
837 * a coarse sanity check.
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800838 */
Ben Widawsky41907dd2013-02-08 11:32:47 -0800839 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
Ben Widawskybaa09f52013-01-24 13:49:57 -0800840 DRM_ERROR("Unknown GMADR size (%lx)\n",
841 dev_priv->gtt.mappable_end);
842 return -ENXIO;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800843 }
844
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800845 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
846 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
Ben Widawskybaa09f52013-01-24 13:49:57 -0800847 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
848 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
849
Ben Widawskyc4ae25e2013-05-01 11:00:34 -0700850 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
Ben Widawskye7c2b582013-04-08 18:43:48 -0700851 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800852
Ben Widawskya93e4162013-04-08 18:43:47 -0700853 /* For Modern GENs the PTEs and register space are split in the BAR */
854 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
855 (pci_resource_len(dev->pdev, 0) / 2);
856
Ben Widawskybaa09f52013-01-24 13:49:57 -0800857 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
858 if (!dev_priv->gtt.gsm) {
859 DRM_ERROR("Failed to map the gtt page table\n");
860 return -ENOMEM;
861 }
862
863 ret = setup_scratch_page(dev);
864 if (ret)
865 DRM_ERROR("Scratch setup failed\n");
866
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700867 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
868 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800869
870 return ret;
871}
872
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700873static void gen6_gmch_remove(struct i915_address_space *vm)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800874{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700875
876 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
877 iounmap(gtt->gsm);
878 teardown_scratch_page(vm->dev);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800879}
880
881static int i915_gmch_probe(struct drm_device *dev,
882 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800883 size_t *stolen,
884 phys_addr_t *mappable_base,
885 unsigned long *mappable_end)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800886{
887 struct drm_i915_private *dev_priv = dev->dev_private;
888 int ret;
889
Ben Widawskybaa09f52013-01-24 13:49:57 -0800890 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
891 if (!ret) {
892 DRM_ERROR("failed to set up gmch\n");
893 return -EIO;
894 }
895
Ben Widawsky41907dd2013-02-08 11:32:47 -0800896 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800897
898 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700899 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
900 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800901
902 return 0;
903}
904
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700905static void i915_gmch_remove(struct i915_address_space *vm)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800906{
907 intel_gmch_remove();
908}
909
910int i915_gem_gtt_init(struct drm_device *dev)
911{
912 struct drm_i915_private *dev_priv = dev->dev_private;
913 struct i915_gtt *gtt = &dev_priv->gtt;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800914 int ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800915
Ben Widawskybaa09f52013-01-24 13:49:57 -0800916 if (INTEL_INFO(dev)->gen <= 5) {
Ben Widawskyb2f21b42013-06-27 16:30:20 -0700917 gtt->gtt_probe = i915_gmch_probe;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700918 gtt->base.cleanup = i915_gmch_remove;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800919 } else {
Ben Widawskyb2f21b42013-06-27 16:30:20 -0700920 gtt->gtt_probe = gen6_gmch_probe;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700921 gtt->base.cleanup = gen6_gmch_remove;
Ben Widawsky4d15c142013-07-04 11:02:06 -0700922 if (IS_HASWELL(dev) && dev_priv->ellc_size)
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700923 gtt->base.pte_encode = iris_pte_encode;
Ben Widawsky4d15c142013-07-04 11:02:06 -0700924 else if (IS_HASWELL(dev))
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700925 gtt->base.pte_encode = hsw_pte_encode;
Ben Widawskyb2f21b42013-06-27 16:30:20 -0700926 else if (IS_VALLEYVIEW(dev))
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700927 gtt->base.pte_encode = byt_pte_encode;
Chris Wilson350ec882013-08-06 13:17:02 +0100928 else if (INTEL_INFO(dev)->gen >= 7)
929 gtt->base.pte_encode = ivb_pte_encode;
Ben Widawskyb2f21b42013-06-27 16:30:20 -0700930 else
Chris Wilson350ec882013-08-06 13:17:02 +0100931 gtt->base.pte_encode = snb_pte_encode;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800932 }
933
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700934 ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
Ben Widawskyb2f21b42013-06-27 16:30:20 -0700935 &gtt->mappable_base, &gtt->mappable_end);
Ben Widawskya54c0c22013-01-24 14:45:00 -0800936 if (ret)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800937 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800938
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700939 gtt->base.dev = dev;
940
Ben Widawskybaa09f52013-01-24 13:49:57 -0800941 /* GMADR is the PCI mmio aperture into the global GTT. */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700942 DRM_INFO("Memory usable by graphics device = %zdM\n",
943 gtt->base.total >> 20);
Ben Widawskyb2f21b42013-06-27 16:30:20 -0700944 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
945 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800946
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800947 return 0;
Daniel Vetter644ec022012-03-26 09:45:40 +0200948}