blob: 85b3d5d4deecc57e4f2023078dd53c77d816ff0a [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
David Howells760285e2012-10-02 18:01:07 +010025#include <drm/drmP.h>
26#include <drm/i915_drm.h>
Daniel Vetter76aaf222010-11-05 22:23:30 +010027#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30
Ben Widawsky26b1ff32012-11-04 09:21:31 -080031/* PPGTT stuff */
32#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
33
34#define GEN6_PDE_VALID (1 << 0)
35/* gen6+ has bit 11-4 for physical addr bit 39-32 */
36#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
37
38#define GEN6_PTE_VALID (1 << 0)
39#define GEN6_PTE_UNCACHED (1 << 1)
40#define HSW_PTE_UNCACHED (0)
41#define GEN6_PTE_CACHE_LLC (2 << 1)
42#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
43#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
44
Kenneth Graunke2d04bef2013-04-22 00:53:49 -070045static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
46 dma_addr_t addr,
47 enum i915_cache_level level)
Ben Widawsky54d12522012-09-24 16:44:32 -070048{
Ben Widawskye7c2b582013-04-08 18:43:48 -070049 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky54d12522012-09-24 16:44:32 -070050 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -070051
52 switch (level) {
53 case I915_CACHE_LLC_MLC:
Kenneth Graunke91197082013-04-22 00:53:51 -070054 pte |= GEN6_PTE_CACHE_LLC_MLC;
Ben Widawskye7210c32012-10-19 09:33:22 -070055 break;
56 case I915_CACHE_LLC:
57 pte |= GEN6_PTE_CACHE_LLC;
58 break;
59 case I915_CACHE_NONE:
Kenneth Graunke91197082013-04-22 00:53:51 -070060 pte |= GEN6_PTE_UNCACHED;
Ben Widawskye7210c32012-10-19 09:33:22 -070061 break;
62 default:
63 BUG();
64 }
65
Ben Widawsky54d12522012-09-24 16:44:32 -070066 return pte;
67}
68
Kenneth Graunke93c34e72013-04-22 00:53:50 -070069#define BYT_PTE_WRITEABLE (1 << 1)
70#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
71
72static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
73 dma_addr_t addr,
74 enum i915_cache_level level)
75{
76 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
77 pte |= GEN6_PTE_ADDR_ENCODE(addr);
78
79 /* Mark the page as writeable. Other platforms don't have a
80 * setting for read-only/writable, so this matches that behavior.
81 */
82 pte |= BYT_PTE_WRITEABLE;
83
84 if (level != I915_CACHE_NONE)
85 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
86
87 return pte;
88}
89
Kenneth Graunke91197082013-04-22 00:53:51 -070090static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
91 dma_addr_t addr,
92 enum i915_cache_level level)
93{
94 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
95 pte |= GEN6_PTE_ADDR_ENCODE(addr);
96
97 if (level != I915_CACHE_NONE)
98 pte |= GEN6_PTE_CACHE_LLC;
99
100 return pte;
101}
102
Ben Widawsky3e302542013-04-23 23:15:32 -0700103static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky61973492013-04-08 18:43:54 -0700104{
Ben Widawsky3e302542013-04-23 23:15:32 -0700105 struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
Ben Widawsky61973492013-04-08 18:43:54 -0700106 gen6_gtt_pte_t __iomem *pd_addr;
107 uint32_t pd_entry;
108 int i;
109
Ben Widawsky0a732872013-04-23 23:15:30 -0700110 WARN_ON(ppgtt->pd_offset & 0x3f);
Ben Widawsky61973492013-04-08 18:43:54 -0700111 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
112 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
113 for (i = 0; i < ppgtt->num_pd_entries; i++) {
114 dma_addr_t pt_addr;
115
116 pt_addr = ppgtt->pt_dma_addr[i];
117 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
118 pd_entry |= GEN6_PDE_VALID;
119
120 writel(pd_entry, pd_addr + i);
121 }
122 readl(pd_addr);
Ben Widawsky3e302542013-04-23 23:15:32 -0700123}
124
125static int gen6_ppgtt_enable(struct drm_device *dev)
126{
127 drm_i915_private_t *dev_priv = dev->dev_private;
128 uint32_t pd_offset;
129 struct intel_ring_buffer *ring;
130 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
131 int i;
132
133 BUG_ON(ppgtt->pd_offset & 0x3f);
134
135 gen6_write_pdes(ppgtt);
Ben Widawsky61973492013-04-08 18:43:54 -0700136
137 pd_offset = ppgtt->pd_offset;
138 pd_offset /= 64; /* in cachelines, */
139 pd_offset <<= 16;
140
141 if (INTEL_INFO(dev)->gen == 6) {
142 uint32_t ecochk, gab_ctl, ecobits;
143
144 ecobits = I915_READ(GAC_ECO_BITS);
Ville Syrjälä3b9d7882013-04-04 15:13:40 +0300145 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
146 ECOBITS_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -0700147
148 gab_ctl = I915_READ(GAB_CTL);
149 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
150
151 ecochk = I915_READ(GAM_ECOCHK);
152 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
153 ECOCHK_PPGTT_CACHE64B);
154 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
155 } else if (INTEL_INFO(dev)->gen >= 7) {
Ville Syrjäläa6f429a2013-04-04 15:13:42 +0300156 uint32_t ecochk, ecobits;
Ville Syrjäläa65c2fc2013-04-04 15:13:41 +0300157
158 ecobits = I915_READ(GAC_ECO_BITS);
159 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
160
Ville Syrjäläa6f429a2013-04-04 15:13:42 +0300161 ecochk = I915_READ(GAM_ECOCHK);
162 if (IS_HASWELL(dev)) {
163 ecochk |= ECOCHK_PPGTT_WB_HSW;
164 } else {
165 ecochk |= ECOCHK_PPGTT_LLC_IVB;
166 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
167 }
168 I915_WRITE(GAM_ECOCHK, ecochk);
Ben Widawsky61973492013-04-08 18:43:54 -0700169 /* GFX_MODE is per-ring on gen7+ */
170 }
171
172 for_each_ring(ring, dev_priv, i) {
173 if (INTEL_INFO(dev)->gen >= 7)
174 I915_WRITE(RING_MODE_GEN7(ring),
175 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
176
177 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
178 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
179 }
Ben Widawskyb7c36d22013-04-08 18:43:56 -0700180 return 0;
Ben Widawsky61973492013-04-08 18:43:54 -0700181}
182
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100183/* PPGTT support for Sandybdrige/Gen6 and later */
Daniel Vetterdef886c2013-01-24 14:44:56 -0800184static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100185 unsigned first_entry,
186 unsigned num_entries)
187{
Ben Widawskye7c2b582013-04-08 18:43:48 -0700188 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
Daniel Vettera15326a2013-03-19 23:48:39 +0100189 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100190 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
191 unsigned last_pte, i;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100192
Kenneth Graunke2d04bef2013-04-22 00:53:49 -0700193 scratch_pte = ppgtt->pte_encode(ppgtt->dev,
194 ppgtt->scratch_page_dma_addr,
195 I915_CACHE_LLC);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100196
Daniel Vetter7bddb012012-02-09 17:15:47 +0100197 while (num_entries) {
198 last_pte = first_pte + num_entries;
199 if (last_pte > I915_PPGTT_PT_ENTRIES)
200 last_pte = I915_PPGTT_PT_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100201
Daniel Vettera15326a2013-03-19 23:48:39 +0100202 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100203
204 for (i = first_pte; i < last_pte; i++)
205 pt_vaddr[i] = scratch_pte;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100206
207 kunmap_atomic(pt_vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100208
Daniel Vetter7bddb012012-02-09 17:15:47 +0100209 num_entries -= last_pte - first_pte;
210 first_pte = 0;
Daniel Vettera15326a2013-03-19 23:48:39 +0100211 act_pt++;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100212 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100213}
214
Daniel Vetterdef886c2013-01-24 14:44:56 -0800215static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
216 struct sg_table *pages,
217 unsigned first_entry,
218 enum i915_cache_level cache_level)
219{
Ben Widawskye7c2b582013-04-08 18:43:48 -0700220 gen6_gtt_pte_t *pt_vaddr;
Daniel Vettera15326a2013-03-19 23:48:39 +0100221 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Imre Deak6e995e22013-02-18 19:28:04 +0200222 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
223 struct sg_page_iter sg_iter;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800224
Daniel Vettera15326a2013-03-19 23:48:39 +0100225 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +0200226 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
227 dma_addr_t page_addr;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800228
Imre Deak2db76d72013-03-26 15:14:18 +0200229 page_addr = sg_page_iter_dma_address(&sg_iter);
Kenneth Graunke2d04bef2013-04-22 00:53:49 -0700230 pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
231 cache_level);
Imre Deak6e995e22013-02-18 19:28:04 +0200232 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
233 kunmap_atomic(pt_vaddr);
Daniel Vettera15326a2013-03-19 23:48:39 +0100234 act_pt++;
235 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +0200236 act_pte = 0;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800237
Daniel Vetterdef886c2013-01-24 14:44:56 -0800238 }
Daniel Vetterdef886c2013-01-24 14:44:56 -0800239 }
Imre Deak6e995e22013-02-18 19:28:04 +0200240 kunmap_atomic(pt_vaddr);
Daniel Vetterdef886c2013-01-24 14:44:56 -0800241}
242
Daniel Vetter3440d262013-01-24 13:49:56 -0800243static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100244{
Daniel Vetter3440d262013-01-24 13:49:56 -0800245 int i;
246
247 if (ppgtt->pt_dma_addr) {
248 for (i = 0; i < ppgtt->num_pd_entries; i++)
249 pci_unmap_page(ppgtt->dev->pdev,
250 ppgtt->pt_dma_addr[i],
251 4096, PCI_DMA_BIDIRECTIONAL);
252 }
253
254 kfree(ppgtt->pt_dma_addr);
255 for (i = 0; i < ppgtt->num_pd_entries; i++)
256 __free_page(ppgtt->pt_pages[i]);
257 kfree(ppgtt->pt_pages);
258 kfree(ppgtt);
259}
260
261static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
262{
263 struct drm_device *dev = ppgtt->dev;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100264 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100265 unsigned first_pd_entry_in_global_pt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100266 int i;
267 int ret = -ENOMEM;
268
269 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
270 * entries. For aliasing ppgtt support we just steal them at the end for
271 * now. */
Ben Widawskya54c0c22013-01-24 14:45:00 -0800272 first_pd_entry_in_global_pt =
273 gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100274
Kenneth Graunke91197082013-04-22 00:53:51 -0700275 if (IS_HASWELL(dev)) {
276 ppgtt->pte_encode = hsw_pte_encode;
277 } else if (IS_VALLEYVIEW(dev)) {
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700278 ppgtt->pte_encode = byt_pte_encode;
279 } else {
280 ppgtt->pte_encode = gen6_pte_encode;
281 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100282 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
Ben Widawsky61973492013-04-08 18:43:54 -0700283 ppgtt->enable = gen6_ppgtt_enable;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800284 ppgtt->clear_range = gen6_ppgtt_clear_range;
285 ppgtt->insert_entries = gen6_ppgtt_insert_entries;
Daniel Vetter3440d262013-01-24 13:49:56 -0800286 ppgtt->cleanup = gen6_ppgtt_cleanup;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100287 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
288 GFP_KERNEL);
289 if (!ppgtt->pt_pages)
Daniel Vetter3440d262013-01-24 13:49:56 -0800290 return -ENOMEM;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100291
292 for (i = 0; i < ppgtt->num_pd_entries; i++) {
293 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
294 if (!ppgtt->pt_pages[i])
295 goto err_pt_alloc;
296 }
297
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800298 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
299 GFP_KERNEL);
300 if (!ppgtt->pt_dma_addr)
301 goto err_pt_alloc;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100302
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800303 for (i = 0; i < ppgtt->num_pd_entries; i++) {
304 dma_addr_t pt_addr;
Daniel Vetter211c5682012-04-10 17:29:17 +0200305
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800306 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
307 PCI_DMA_BIDIRECTIONAL);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100308
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800309 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
310 ret = -EIO;
311 goto err_pd_pin;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100312
Daniel Vetter211c5682012-04-10 17:29:17 +0200313 }
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800314 ppgtt->pt_dma_addr[i] = pt_addr;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100315 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100316
Daniel Vetterdef886c2013-01-24 14:44:56 -0800317 ppgtt->clear_range(ppgtt, 0,
318 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100319
Ben Widawskye7c2b582013-04-08 18:43:48 -0700320 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100321
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100322 return 0;
323
324err_pd_pin:
325 if (ppgtt->pt_dma_addr) {
326 for (i--; i >= 0; i--)
327 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
328 4096, PCI_DMA_BIDIRECTIONAL);
329 }
330err_pt_alloc:
331 kfree(ppgtt->pt_dma_addr);
332 for (i = 0; i < ppgtt->num_pd_entries; i++) {
333 if (ppgtt->pt_pages[i])
334 __free_page(ppgtt->pt_pages[i]);
335 }
336 kfree(ppgtt->pt_pages);
Daniel Vetter3440d262013-01-24 13:49:56 -0800337
338 return ret;
339}
340
341static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
342{
343 struct drm_i915_private *dev_priv = dev->dev_private;
344 struct i915_hw_ppgtt *ppgtt;
345 int ret;
346
347 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
348 if (!ppgtt)
349 return -ENOMEM;
350
351 ppgtt->dev = dev;
Ben Widawsky1e7d12d2013-04-08 18:43:51 -0700352 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
Daniel Vetter3440d262013-01-24 13:49:56 -0800353
Ben Widawsky3ed124b2013-04-08 18:43:53 -0700354 if (INTEL_INFO(dev)->gen < 8)
355 ret = gen6_ppgtt_init(ppgtt);
356 else
357 BUG();
358
Daniel Vetter3440d262013-01-24 13:49:56 -0800359 if (ret)
360 kfree(ppgtt);
361 else
362 dev_priv->mm.aliasing_ppgtt = ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100363
364 return ret;
365}
366
367void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
368{
369 struct drm_i915_private *dev_priv = dev->dev_private;
370 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100371
372 if (!ppgtt)
373 return;
374
Daniel Vetter3440d262013-01-24 13:49:56 -0800375 ppgtt->cleanup(ppgtt);
Ben Widawsky5963cf02013-04-08 18:43:55 -0700376 dev_priv->mm.aliasing_ppgtt = NULL;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100377}
378
Daniel Vetter7bddb012012-02-09 17:15:47 +0100379void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
380 struct drm_i915_gem_object *obj,
381 enum i915_cache_level cache_level)
382{
Daniel Vetterdef886c2013-01-24 14:44:56 -0800383 ppgtt->insert_entries(ppgtt, obj->pages,
384 obj->gtt_space->start >> PAGE_SHIFT,
385 cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100386}
387
388void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
389 struct drm_i915_gem_object *obj)
390{
Daniel Vetterdef886c2013-01-24 14:44:56 -0800391 ppgtt->clear_range(ppgtt,
392 obj->gtt_space->start >> PAGE_SHIFT,
393 obj->base.size >> PAGE_SHIFT);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100394}
395
Ben Widawskya81cc002013-01-18 12:30:31 -0800396extern int intel_iommu_gfx_mapped;
397/* Certain Gen5 chipsets require require idling the GPU before
398 * unmapping anything from the GTT when VT-d is enabled.
399 */
400static inline bool needs_idle_maps(struct drm_device *dev)
401{
402#ifdef CONFIG_INTEL_IOMMU
403 /* Query intel_iommu to see if we need the workaround. Presumably that
404 * was loaded first.
405 */
406 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
407 return true;
408#endif
409 return false;
410}
411
Ben Widawsky5c042282011-10-17 15:51:55 -0700412static bool do_idling(struct drm_i915_private *dev_priv)
413{
414 bool ret = dev_priv->mm.interruptible;
415
Ben Widawskya81cc002013-01-18 12:30:31 -0800416 if (unlikely(dev_priv->gtt.do_idle_maps)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700417 dev_priv->mm.interruptible = false;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700418 if (i915_gpu_idle(dev_priv->dev)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700419 DRM_ERROR("Couldn't idle GPU\n");
420 /* Wait a bit, in hopes it avoids the hang */
421 udelay(10);
422 }
423 }
424
425 return ret;
426}
427
428static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
429{
Ben Widawskya81cc002013-01-18 12:30:31 -0800430 if (unlikely(dev_priv->gtt.do_idle_maps))
Ben Widawsky5c042282011-10-17 15:51:55 -0700431 dev_priv->mm.interruptible = interruptible;
432}
433
Daniel Vetter76aaf222010-11-05 22:23:30 +0100434void i915_gem_restore_gtt_mappings(struct drm_device *dev)
435{
436 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000437 struct drm_i915_gem_object *obj;
Daniel Vetter76aaf222010-11-05 22:23:30 +0100438
Chris Wilsonbee4a182011-01-21 10:54:32 +0000439 /* First fill our portion of the GTT with scratch pages */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800440 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
441 dev_priv->gtt.total / PAGE_SIZE);
Chris Wilsonbee4a182011-01-21 10:54:32 +0000442
Chris Wilson6c085a72012-08-20 11:40:46 +0200443 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
Chris Wilsona8e93122010-12-08 14:28:54 +0000444 i915_gem_clflush_object(obj);
Daniel Vetter74163902012-02-15 23:50:21 +0100445 i915_gem_gtt_bind_object(obj, obj->cache_level);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100446 }
447
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800448 i915_gem_chipset_flush(dev);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100449}
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100450
Daniel Vetter74163902012-02-15 23:50:21 +0100451int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100452{
Chris Wilson9da3da62012-06-01 15:20:22 +0100453 if (obj->has_dma_mapping)
Daniel Vetter74163902012-02-15 23:50:21 +0100454 return 0;
Chris Wilson9da3da62012-06-01 15:20:22 +0100455
456 if (!dma_map_sg(&obj->base.dev->pdev->dev,
457 obj->pages->sgl, obj->pages->nents,
458 PCI_DMA_BIDIRECTIONAL))
459 return -ENOSPC;
460
461 return 0;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100462}
463
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800464/*
465 * Binds an object into the global gtt with the specified cache level. The object
466 * will be accessible to the GPU via commands whose operands reference offsets
467 * within the global GTT as well as accessible by the GPU through the GMADR
468 * mapped BAR (dev_priv->mm.gtt->gtt).
469 */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800470static void gen6_ggtt_insert_entries(struct drm_device *dev,
471 struct sg_table *st,
472 unsigned int first_entry,
473 enum i915_cache_level level)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800474{
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800475 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskye7c2b582013-04-08 18:43:48 -0700476 gen6_gtt_pte_t __iomem *gtt_entries =
477 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
Imre Deak6e995e22013-02-18 19:28:04 +0200478 int i = 0;
479 struct sg_page_iter sg_iter;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800480 dma_addr_t addr;
481
Imre Deak6e995e22013-02-18 19:28:04 +0200482 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +0200483 addr = sg_page_iter_dma_address(&sg_iter);
Kenneth Graunke2d04bef2013-04-22 00:53:49 -0700484 iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
485 &gtt_entries[i]);
Imre Deak6e995e22013-02-18 19:28:04 +0200486 i++;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800487 }
488
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800489 /* XXX: This serves as a posting read to make sure that the PTE has
490 * actually been updated. There is some concern that even though
491 * registers and PTEs are within the same BAR that they are potentially
492 * of NUMA access patterns. Therefore, even with the way we assume
493 * hardware should work, we must keep this posting read for paranoia.
494 */
495 if (i != 0)
Daniel Vetter960e3e42013-01-24 14:44:57 -0800496 WARN_ON(readl(&gtt_entries[i-1])
Kenneth Graunke2d04bef2013-04-22 00:53:49 -0700497 != dev_priv->gtt.pte_encode(dev, addr, level));
Ben Widawsky0f9b91c2012-11-04 09:21:30 -0800498
499 /* This next bit makes the above posting read even more important. We
500 * want to flush the TLBs only after we're certain all the PTE updates
501 * have finished.
502 */
503 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
504 POSTING_READ(GFX_FLSH_CNTL_GEN6);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800505}
506
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800507static void gen6_ggtt_clear_range(struct drm_device *dev,
508 unsigned int first_entry,
509 unsigned int num_entries)
510{
511 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskye7c2b582013-04-08 18:43:48 -0700512 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
513 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
Ben Widawskya54c0c22013-01-24 14:45:00 -0800514 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800515 int i;
516
517 if (WARN(num_entries > max_entries,
518 "First entry = %d; Num entries = %d (max=%d)\n",
519 first_entry, num_entries, max_entries))
520 num_entries = max_entries;
521
Kenneth Graunke2d04bef2013-04-22 00:53:49 -0700522 scratch_pte = dev_priv->gtt.pte_encode(dev,
523 dev_priv->gtt.scratch_page_dma,
524 I915_CACHE_LLC);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800525 for (i = 0; i < num_entries; i++)
526 iowrite32(scratch_pte, &gtt_base[i]);
527 readl(gtt_base);
528}
529
530
531static void i915_ggtt_insert_entries(struct drm_device *dev,
532 struct sg_table *st,
533 unsigned int pg_start,
534 enum i915_cache_level cache_level)
535{
536 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
537 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
538
539 intel_gtt_insert_sg_entries(st, pg_start, flags);
540
541}
542
543static void i915_ggtt_clear_range(struct drm_device *dev,
544 unsigned int first_entry,
545 unsigned int num_entries)
546{
547 intel_gtt_clear_range(first_entry, num_entries);
548}
549
550
Daniel Vetter74163902012-02-15 23:50:21 +0100551void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
552 enum i915_cache_level cache_level)
Chris Wilsond5bd1442011-04-14 06:48:26 +0100553{
554 struct drm_device *dev = obj->base.dev;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800555 struct drm_i915_private *dev_priv = dev->dev_private;
556
557 dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
558 obj->gtt_space->start >> PAGE_SHIFT,
559 cache_level);
Chris Wilsond5bd1442011-04-14 06:48:26 +0100560
Daniel Vetter74898d72012-02-15 23:50:22 +0100561 obj->has_global_gtt_mapping = 1;
Chris Wilsond5bd1442011-04-14 06:48:26 +0100562}
563
Chris Wilson05394f32010-11-08 19:18:58 +0000564void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100565{
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800566 struct drm_device *dev = obj->base.dev;
567 struct drm_i915_private *dev_priv = dev->dev_private;
568
569 dev_priv->gtt.gtt_clear_range(obj->base.dev,
570 obj->gtt_space->start >> PAGE_SHIFT,
571 obj->base.size >> PAGE_SHIFT);
Daniel Vetter74898d72012-02-15 23:50:22 +0100572
573 obj->has_global_gtt_mapping = 0;
Daniel Vetter74163902012-02-15 23:50:21 +0100574}
575
576void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
577{
Ben Widawsky5c042282011-10-17 15:51:55 -0700578 struct drm_device *dev = obj->base.dev;
579 struct drm_i915_private *dev_priv = dev->dev_private;
580 bool interruptible;
581
582 interruptible = do_idling(dev_priv);
583
Chris Wilson9da3da62012-06-01 15:20:22 +0100584 if (!obj->has_dma_mapping)
585 dma_unmap_sg(&dev->pdev->dev,
586 obj->pages->sgl, obj->pages->nents,
587 PCI_DMA_BIDIRECTIONAL);
Ben Widawsky5c042282011-10-17 15:51:55 -0700588
589 undo_idling(dev_priv, interruptible);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100590}
Daniel Vetter644ec022012-03-26 09:45:40 +0200591
Chris Wilson42d6ab42012-07-26 11:49:32 +0100592static void i915_gtt_color_adjust(struct drm_mm_node *node,
593 unsigned long color,
594 unsigned long *start,
595 unsigned long *end)
596{
597 if (node->color != color)
598 *start += 4096;
599
600 if (!list_empty(&node->node_list)) {
601 node = list_entry(node->node_list.next,
602 struct drm_mm_node,
603 node_list);
604 if (node->allocated && node->color != color)
605 *end -= 4096;
606 }
607}
Ben Widawskyd7e50082012-12-18 10:31:25 -0800608void i915_gem_setup_global_gtt(struct drm_device *dev,
609 unsigned long start,
610 unsigned long mappable_end,
611 unsigned long end)
Daniel Vetter644ec022012-03-26 09:45:40 +0200612{
Ben Widawskye78891c2013-01-25 16:41:04 -0800613 /* Let GEM Manage all of the aperture.
614 *
615 * However, leave one page at the end still bound to the scratch page.
616 * There are a number of places where the hardware apparently prefetches
617 * past the end of the object, and we've seen multiple hangs with the
618 * GPU head pointer stuck in a batchbuffer bound at the last page of the
619 * aperture. One page should be enough to keep any prefetching inside
620 * of the aperture.
621 */
Daniel Vetter644ec022012-03-26 09:45:40 +0200622 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsoned2f3452012-11-15 11:32:19 +0000623 struct drm_mm_node *entry;
624 struct drm_i915_gem_object *obj;
625 unsigned long hole_start, hole_end;
Daniel Vetter644ec022012-03-26 09:45:40 +0200626
Ben Widawsky35451cb2013-01-17 12:45:13 -0800627 BUG_ON(mappable_end > end);
628
Chris Wilsoned2f3452012-11-15 11:32:19 +0000629 /* Subtract the guard page ... */
Daniel Vetterd1dd20a2012-03-26 09:45:42 +0200630 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
Chris Wilson42d6ab42012-07-26 11:49:32 +0100631 if (!HAS_LLC(dev))
632 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
Daniel Vetter644ec022012-03-26 09:45:40 +0200633
Chris Wilsoned2f3452012-11-15 11:32:19 +0000634 /* Mark any preallocated objects as occupied */
635 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
636 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
637 obj->gtt_offset, obj->base.size);
638
639 BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
640 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
641 obj->gtt_offset,
642 obj->base.size,
643 false);
644 obj->has_global_gtt_mapping = 1;
645 }
646
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800647 dev_priv->gtt.start = start;
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800648 dev_priv->gtt.total = end - start;
Daniel Vetter644ec022012-03-26 09:45:40 +0200649
Chris Wilsoned2f3452012-11-15 11:32:19 +0000650 /* Clear any non-preallocated blocks */
651 drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
652 hole_start, hole_end) {
653 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
654 hole_start, hole_end);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800655 dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
656 (hole_end-hole_start) / PAGE_SIZE);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000657 }
658
659 /* And finally clear the reserved guard page */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800660 dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800661}
662
Ben Widawskyd7e50082012-12-18 10:31:25 -0800663static bool
664intel_enable_ppgtt(struct drm_device *dev)
665{
666 if (i915_enable_ppgtt >= 0)
667 return i915_enable_ppgtt;
668
669#ifdef CONFIG_INTEL_IOMMU
670 /* Disable ppgtt on SNB if VT-d is on. */
671 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
672 return false;
673#endif
674
675 return true;
676}
677
678void i915_gem_init_global_gtt(struct drm_device *dev)
679{
680 struct drm_i915_private *dev_priv = dev->dev_private;
681 unsigned long gtt_size, mappable_size;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800682
Ben Widawskya54c0c22013-01-24 14:45:00 -0800683 gtt_size = dev_priv->gtt.total;
Ben Widawsky93d18792013-01-17 12:45:17 -0800684 mappable_size = dev_priv->gtt.mappable_end;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800685
686 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
Ben Widawskye78891c2013-01-25 16:41:04 -0800687 int ret;
Ben Widawsky3eb1c002013-04-08 18:43:52 -0700688
689 if (INTEL_INFO(dev)->gen <= 7) {
690 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
691 * aperture accordingly when using aliasing ppgtt. */
692 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
693 }
Ben Widawskyd7e50082012-12-18 10:31:25 -0800694
695 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
696
697 ret = i915_gem_init_aliasing_ppgtt(dev);
Ben Widawskye78891c2013-01-25 16:41:04 -0800698 if (!ret)
Ben Widawskyd7e50082012-12-18 10:31:25 -0800699 return;
Ben Widawskye78891c2013-01-25 16:41:04 -0800700
701 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
702 drm_mm_takedown(&dev_priv->mm.gtt_space);
703 gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800704 }
Ben Widawskye78891c2013-01-25 16:41:04 -0800705 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800706}
707
708static int setup_scratch_page(struct drm_device *dev)
709{
710 struct drm_i915_private *dev_priv = dev->dev_private;
711 struct page *page;
712 dma_addr_t dma_addr;
713
714 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
715 if (page == NULL)
716 return -ENOMEM;
717 get_page(page);
718 set_pages_uc(page, 1);
719
720#ifdef CONFIG_INTEL_IOMMU
721 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
722 PCI_DMA_BIDIRECTIONAL);
723 if (pci_dma_mapping_error(dev->pdev, dma_addr))
724 return -EINVAL;
725#else
726 dma_addr = page_to_phys(page);
727#endif
Ben Widawsky9c61a322013-01-18 12:30:32 -0800728 dev_priv->gtt.scratch_page = page;
729 dev_priv->gtt.scratch_page_dma = dma_addr;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800730
731 return 0;
732}
733
734static void teardown_scratch_page(struct drm_device *dev)
735{
736 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky9c61a322013-01-18 12:30:32 -0800737 set_pages_wb(dev_priv->gtt.scratch_page, 1);
738 pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800739 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
Ben Widawsky9c61a322013-01-18 12:30:32 -0800740 put_page(dev_priv->gtt.scratch_page);
741 __free_page(dev_priv->gtt.scratch_page);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800742}
743
744static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
745{
746 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
747 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
748 return snb_gmch_ctl << 20;
749}
750
Ben Widawskybaa09f52013-01-24 13:49:57 -0800751static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800752{
753 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
754 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
755 return snb_gmch_ctl << 25; /* 32 MB units */
756}
757
Ben Widawskybaa09f52013-01-24 13:49:57 -0800758static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawsky03752f52012-11-04 09:21:28 -0800759{
760 static const int stolen_decoder[] = {
761 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
762 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
763 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
764 return stolen_decoder[snb_gmch_ctl] << 20;
765}
766
Ben Widawskybaa09f52013-01-24 13:49:57 -0800767static int gen6_gmch_probe(struct drm_device *dev,
768 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800769 size_t *stolen,
770 phys_addr_t *mappable_base,
771 unsigned long *mappable_end)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800772{
773 struct drm_i915_private *dev_priv = dev->dev_private;
774 phys_addr_t gtt_bus_addr;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800775 unsigned int gtt_size;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800776 u16 snb_gmch_ctl;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800777 int ret;
778
Ben Widawsky41907dd2013-02-08 11:32:47 -0800779 *mappable_base = pci_resource_start(dev->pdev, 2);
780 *mappable_end = pci_resource_len(dev->pdev, 2);
781
Ben Widawskybaa09f52013-01-24 13:49:57 -0800782 /* 64/512MB is the current min/max we actually know of, but this is just
783 * a coarse sanity check.
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800784 */
Ben Widawsky41907dd2013-02-08 11:32:47 -0800785 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
Ben Widawskybaa09f52013-01-24 13:49:57 -0800786 DRM_ERROR("Unknown GMADR size (%lx)\n",
787 dev_priv->gtt.mappable_end);
788 return -ENXIO;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800789 }
790
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800791 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
792 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
Ben Widawskybaa09f52013-01-24 13:49:57 -0800793 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
794 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
795
Jesse Barnes086ddcc2013-03-01 14:08:29 -0800796 if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
Ben Widawskybaa09f52013-01-24 13:49:57 -0800797 *stolen = gen7_get_stolen_size(snb_gmch_ctl);
798 else
799 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
800
Ben Widawskye7c2b582013-04-08 18:43:48 -0700801 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800802
Ben Widawskya93e4162013-04-08 18:43:47 -0700803 /* For Modern GENs the PTEs and register space are split in the BAR */
804 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
805 (pci_resource_len(dev->pdev, 0) / 2);
806
Ben Widawskybaa09f52013-01-24 13:49:57 -0800807 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
808 if (!dev_priv->gtt.gsm) {
809 DRM_ERROR("Failed to map the gtt page table\n");
810 return -ENOMEM;
811 }
812
813 ret = setup_scratch_page(dev);
814 if (ret)
815 DRM_ERROR("Scratch setup failed\n");
816
817 dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
818 dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
819
820 return ret;
821}
822
Changlong Xied93c6232013-01-31 11:32:50 +0800823static void gen6_gmch_remove(struct drm_device *dev)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800824{
825 struct drm_i915_private *dev_priv = dev->dev_private;
826 iounmap(dev_priv->gtt.gsm);
827 teardown_scratch_page(dev_priv->dev);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800828}
829
830static int i915_gmch_probe(struct drm_device *dev,
831 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800832 size_t *stolen,
833 phys_addr_t *mappable_base,
834 unsigned long *mappable_end)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800835{
836 struct drm_i915_private *dev_priv = dev->dev_private;
837 int ret;
838
Ben Widawskybaa09f52013-01-24 13:49:57 -0800839 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
840 if (!ret) {
841 DRM_ERROR("failed to set up gmch\n");
842 return -EIO;
843 }
844
Ben Widawsky41907dd2013-02-08 11:32:47 -0800845 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800846
847 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
848 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
849 dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
850
851 return 0;
852}
853
854static void i915_gmch_remove(struct drm_device *dev)
855{
856 intel_gmch_remove();
857}
858
859int i915_gem_gtt_init(struct drm_device *dev)
860{
861 struct drm_i915_private *dev_priv = dev->dev_private;
862 struct i915_gtt *gtt = &dev_priv->gtt;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800863 int ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800864
Ben Widawskybaa09f52013-01-24 13:49:57 -0800865 if (INTEL_INFO(dev)->gen <= 5) {
866 dev_priv->gtt.gtt_probe = i915_gmch_probe;
867 dev_priv->gtt.gtt_remove = i915_gmch_remove;
868 } else {
869 dev_priv->gtt.gtt_probe = gen6_gmch_probe;
870 dev_priv->gtt.gtt_remove = gen6_gmch_remove;
Kenneth Graunke91197082013-04-22 00:53:51 -0700871 if (IS_HASWELL(dev)) {
872 dev_priv->gtt.pte_encode = hsw_pte_encode;
873 } else if (IS_VALLEYVIEW(dev)) {
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700874 dev_priv->gtt.pte_encode = byt_pte_encode;
875 } else {
876 dev_priv->gtt.pte_encode = gen6_pte_encode;
877 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800878 }
879
Ben Widawskybaa09f52013-01-24 13:49:57 -0800880 ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800881 &dev_priv->gtt.stolen_size,
882 &gtt->mappable_base,
883 &gtt->mappable_end);
Ben Widawskya54c0c22013-01-24 14:45:00 -0800884 if (ret)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800885 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800886
Ben Widawskybaa09f52013-01-24 13:49:57 -0800887 /* GMADR is the PCI mmio aperture into the global GTT. */
888 DRM_INFO("Memory usable by graphics device = %zdM\n",
889 dev_priv->gtt.total >> 20);
890 DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
891 dev_priv->gtt.mappable_end >> 20);
892 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
893 dev_priv->gtt.stolen_size >> 20);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800894
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800895 return 0;
Daniel Vetter644ec022012-03-26 09:45:40 +0200896}