blob: a336c4d609a2d1f4c0650c095cf4d8e77d325e9f [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
David Howells760285e2012-10-02 18:01:07 +010025#include <drm/drmP.h>
26#include <drm/i915_drm.h>
Daniel Vetter76aaf222010-11-05 22:23:30 +010027#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30
Ben Widawskye7c2b582013-04-08 18:43:48 -070031typedef uint32_t gen6_gtt_pte_t;
Ben Widawskyf61c0602012-10-22 11:44:43 -070032
Ben Widawsky26b1ff32012-11-04 09:21:31 -080033/* PPGTT stuff */
34#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
35
36#define GEN6_PDE_VALID (1 << 0)
37/* gen6+ has bit 11-4 for physical addr bit 39-32 */
38#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
39
40#define GEN6_PTE_VALID (1 << 0)
41#define GEN6_PTE_UNCACHED (1 << 1)
42#define HSW_PTE_UNCACHED (0)
43#define GEN6_PTE_CACHE_LLC (2 << 1)
44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
46
Ben Widawskye7c2b582013-04-08 18:43:48 -070047static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
Ben Widawskyc81dbe02013-04-08 18:43:50 -070048 dma_addr_t addr,
49 enum i915_cache_level level)
Ben Widawsky54d12522012-09-24 16:44:32 -070050{
Ben Widawskye7c2b582013-04-08 18:43:48 -070051 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky54d12522012-09-24 16:44:32 -070052 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -070053
54 switch (level) {
55 case I915_CACHE_LLC_MLC:
56 /* Haswell doesn't set L3 this way */
57 if (IS_HASWELL(dev))
58 pte |= GEN6_PTE_CACHE_LLC;
59 else
60 pte |= GEN6_PTE_CACHE_LLC_MLC;
61 break;
62 case I915_CACHE_LLC:
63 pte |= GEN6_PTE_CACHE_LLC;
64 break;
65 case I915_CACHE_NONE:
66 if (IS_HASWELL(dev))
67 pte |= HSW_PTE_UNCACHED;
68 else
69 pte |= GEN6_PTE_UNCACHED;
70 break;
71 default:
72 BUG();
73 }
74
Ben Widawsky54d12522012-09-24 16:44:32 -070075 return pte;
76}
77
Ben Widawskyb7c36d22013-04-08 18:43:56 -070078static int gen6_ppgtt_enable(struct drm_device *dev)
Ben Widawsky61973492013-04-08 18:43:54 -070079{
80 drm_i915_private_t *dev_priv = dev->dev_private;
81 uint32_t pd_offset;
82 struct intel_ring_buffer *ring;
83 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
84 gen6_gtt_pte_t __iomem *pd_addr;
85 uint32_t pd_entry;
86 int i;
87
88 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
89 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
90 for (i = 0; i < ppgtt->num_pd_entries; i++) {
91 dma_addr_t pt_addr;
92
93 pt_addr = ppgtt->pt_dma_addr[i];
94 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
95 pd_entry |= GEN6_PDE_VALID;
96
97 writel(pd_entry, pd_addr + i);
98 }
99 readl(pd_addr);
100
101 pd_offset = ppgtt->pd_offset;
102 pd_offset /= 64; /* in cachelines, */
103 pd_offset <<= 16;
104
105 if (INTEL_INFO(dev)->gen == 6) {
106 uint32_t ecochk, gab_ctl, ecobits;
107
108 ecobits = I915_READ(GAC_ECO_BITS);
Ville Syrjälä3b9d7882013-04-04 15:13:40 +0300109 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
110 ECOBITS_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -0700111
112 gab_ctl = I915_READ(GAB_CTL);
113 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
114
115 ecochk = I915_READ(GAM_ECOCHK);
116 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
117 ECOCHK_PPGTT_CACHE64B);
118 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
119 } else if (INTEL_INFO(dev)->gen >= 7) {
120 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
121 /* GFX_MODE is per-ring on gen7+ */
122 }
123
124 for_each_ring(ring, dev_priv, i) {
125 if (INTEL_INFO(dev)->gen >= 7)
126 I915_WRITE(RING_MODE_GEN7(ring),
127 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
128
129 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
130 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
131 }
Ben Widawskyb7c36d22013-04-08 18:43:56 -0700132 return 0;
Ben Widawsky61973492013-04-08 18:43:54 -0700133}
134
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100135/* PPGTT support for Sandybdrige/Gen6 and later */
Daniel Vetterdef886c2013-01-24 14:44:56 -0800136static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100137 unsigned first_entry,
138 unsigned num_entries)
139{
Ben Widawskye7c2b582013-04-08 18:43:48 -0700140 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
Daniel Vettera15326a2013-03-19 23:48:39 +0100141 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100142 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
143 unsigned last_pte, i;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100144
Daniel Vetter960e3e42013-01-24 14:44:57 -0800145 scratch_pte = gen6_pte_encode(ppgtt->dev,
146 ppgtt->scratch_page_dma_addr,
147 I915_CACHE_LLC);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100148
Daniel Vetter7bddb012012-02-09 17:15:47 +0100149 while (num_entries) {
150 last_pte = first_pte + num_entries;
151 if (last_pte > I915_PPGTT_PT_ENTRIES)
152 last_pte = I915_PPGTT_PT_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100153
Daniel Vettera15326a2013-03-19 23:48:39 +0100154 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100155
156 for (i = first_pte; i < last_pte; i++)
157 pt_vaddr[i] = scratch_pte;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100158
159 kunmap_atomic(pt_vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100160
Daniel Vetter7bddb012012-02-09 17:15:47 +0100161 num_entries -= last_pte - first_pte;
162 first_pte = 0;
Daniel Vettera15326a2013-03-19 23:48:39 +0100163 act_pt++;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100164 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100165}
166
Daniel Vetterdef886c2013-01-24 14:44:56 -0800167static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
168 struct sg_table *pages,
169 unsigned first_entry,
170 enum i915_cache_level cache_level)
171{
Ben Widawskye7c2b582013-04-08 18:43:48 -0700172 gen6_gtt_pte_t *pt_vaddr;
Daniel Vettera15326a2013-03-19 23:48:39 +0100173 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Imre Deak6e995e22013-02-18 19:28:04 +0200174 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
175 struct sg_page_iter sg_iter;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800176
Daniel Vettera15326a2013-03-19 23:48:39 +0100177 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +0200178 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
179 dma_addr_t page_addr;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800180
Imre Deak2db76d72013-03-26 15:14:18 +0200181 page_addr = sg_page_iter_dma_address(&sg_iter);
Imre Deak6e995e22013-02-18 19:28:04 +0200182 pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
Daniel Vetter6ddc4fc2013-03-19 23:37:08 +0100183 cache_level);
Imre Deak6e995e22013-02-18 19:28:04 +0200184 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
185 kunmap_atomic(pt_vaddr);
Daniel Vettera15326a2013-03-19 23:48:39 +0100186 act_pt++;
187 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +0200188 act_pte = 0;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800189
Daniel Vetterdef886c2013-01-24 14:44:56 -0800190 }
Daniel Vetterdef886c2013-01-24 14:44:56 -0800191 }
Imre Deak6e995e22013-02-18 19:28:04 +0200192 kunmap_atomic(pt_vaddr);
Daniel Vetterdef886c2013-01-24 14:44:56 -0800193}
194
Daniel Vetter3440d262013-01-24 13:49:56 -0800195static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100196{
Daniel Vetter3440d262013-01-24 13:49:56 -0800197 int i;
198
199 if (ppgtt->pt_dma_addr) {
200 for (i = 0; i < ppgtt->num_pd_entries; i++)
201 pci_unmap_page(ppgtt->dev->pdev,
202 ppgtt->pt_dma_addr[i],
203 4096, PCI_DMA_BIDIRECTIONAL);
204 }
205
206 kfree(ppgtt->pt_dma_addr);
207 for (i = 0; i < ppgtt->num_pd_entries; i++)
208 __free_page(ppgtt->pt_pages[i]);
209 kfree(ppgtt->pt_pages);
210 kfree(ppgtt);
211}
212
213static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
214{
215 struct drm_device *dev = ppgtt->dev;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100216 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100217 unsigned first_pd_entry_in_global_pt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100218 int i;
219 int ret = -ENOMEM;
220
221 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
222 * entries. For aliasing ppgtt support we just steal them at the end for
223 * now. */
Ben Widawskya54c0c22013-01-24 14:45:00 -0800224 first_pd_entry_in_global_pt =
225 gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100226
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100227 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
Ben Widawsky61973492013-04-08 18:43:54 -0700228 ppgtt->enable = gen6_ppgtt_enable;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800229 ppgtt->clear_range = gen6_ppgtt_clear_range;
230 ppgtt->insert_entries = gen6_ppgtt_insert_entries;
Daniel Vetter3440d262013-01-24 13:49:56 -0800231 ppgtt->cleanup = gen6_ppgtt_cleanup;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100232 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
233 GFP_KERNEL);
234 if (!ppgtt->pt_pages)
Daniel Vetter3440d262013-01-24 13:49:56 -0800235 return -ENOMEM;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100236
237 for (i = 0; i < ppgtt->num_pd_entries; i++) {
238 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
239 if (!ppgtt->pt_pages[i])
240 goto err_pt_alloc;
241 }
242
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800243 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
244 GFP_KERNEL);
245 if (!ppgtt->pt_dma_addr)
246 goto err_pt_alloc;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100247
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800248 for (i = 0; i < ppgtt->num_pd_entries; i++) {
249 dma_addr_t pt_addr;
Daniel Vetter211c5682012-04-10 17:29:17 +0200250
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800251 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
252 PCI_DMA_BIDIRECTIONAL);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100253
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800254 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
255 ret = -EIO;
256 goto err_pd_pin;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100257
Daniel Vetter211c5682012-04-10 17:29:17 +0200258 }
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800259 ppgtt->pt_dma_addr[i] = pt_addr;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100260 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100261
Daniel Vetterdef886c2013-01-24 14:44:56 -0800262 ppgtt->clear_range(ppgtt, 0,
263 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100264
Ben Widawskye7c2b582013-04-08 18:43:48 -0700265 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100266
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100267 return 0;
268
269err_pd_pin:
270 if (ppgtt->pt_dma_addr) {
271 for (i--; i >= 0; i--)
272 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
273 4096, PCI_DMA_BIDIRECTIONAL);
274 }
275err_pt_alloc:
276 kfree(ppgtt->pt_dma_addr);
277 for (i = 0; i < ppgtt->num_pd_entries; i++) {
278 if (ppgtt->pt_pages[i])
279 __free_page(ppgtt->pt_pages[i]);
280 }
281 kfree(ppgtt->pt_pages);
Daniel Vetter3440d262013-01-24 13:49:56 -0800282
283 return ret;
284}
285
286static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
287{
288 struct drm_i915_private *dev_priv = dev->dev_private;
289 struct i915_hw_ppgtt *ppgtt;
290 int ret;
291
292 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
293 if (!ppgtt)
294 return -ENOMEM;
295
296 ppgtt->dev = dev;
Ben Widawsky1e7d12d2013-04-08 18:43:51 -0700297 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
Daniel Vetter3440d262013-01-24 13:49:56 -0800298
Ben Widawsky3ed124b2013-04-08 18:43:53 -0700299 if (INTEL_INFO(dev)->gen < 8)
300 ret = gen6_ppgtt_init(ppgtt);
301 else
302 BUG();
303
Daniel Vetter3440d262013-01-24 13:49:56 -0800304 if (ret)
305 kfree(ppgtt);
306 else
307 dev_priv->mm.aliasing_ppgtt = ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100308
309 return ret;
310}
311
312void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
313{
314 struct drm_i915_private *dev_priv = dev->dev_private;
315 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100316
317 if (!ppgtt)
318 return;
319
Daniel Vetter3440d262013-01-24 13:49:56 -0800320 ppgtt->cleanup(ppgtt);
Ben Widawsky5963cf02013-04-08 18:43:55 -0700321 dev_priv->mm.aliasing_ppgtt = NULL;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100322}
323
Daniel Vetter7bddb012012-02-09 17:15:47 +0100324void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
325 struct drm_i915_gem_object *obj,
326 enum i915_cache_level cache_level)
327{
Daniel Vetterdef886c2013-01-24 14:44:56 -0800328 ppgtt->insert_entries(ppgtt, obj->pages,
329 obj->gtt_space->start >> PAGE_SHIFT,
330 cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100331}
332
333void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
334 struct drm_i915_gem_object *obj)
335{
Daniel Vetterdef886c2013-01-24 14:44:56 -0800336 ppgtt->clear_range(ppgtt,
337 obj->gtt_space->start >> PAGE_SHIFT,
338 obj->base.size >> PAGE_SHIFT);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100339}
340
Ben Widawskya81cc002013-01-18 12:30:31 -0800341extern int intel_iommu_gfx_mapped;
342/* Certain Gen5 chipsets require require idling the GPU before
343 * unmapping anything from the GTT when VT-d is enabled.
344 */
345static inline bool needs_idle_maps(struct drm_device *dev)
346{
347#ifdef CONFIG_INTEL_IOMMU
348 /* Query intel_iommu to see if we need the workaround. Presumably that
349 * was loaded first.
350 */
351 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
352 return true;
353#endif
354 return false;
355}
356
Ben Widawsky5c042282011-10-17 15:51:55 -0700357static bool do_idling(struct drm_i915_private *dev_priv)
358{
359 bool ret = dev_priv->mm.interruptible;
360
Ben Widawskya81cc002013-01-18 12:30:31 -0800361 if (unlikely(dev_priv->gtt.do_idle_maps)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700362 dev_priv->mm.interruptible = false;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700363 if (i915_gpu_idle(dev_priv->dev)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700364 DRM_ERROR("Couldn't idle GPU\n");
365 /* Wait a bit, in hopes it avoids the hang */
366 udelay(10);
367 }
368 }
369
370 return ret;
371}
372
373static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
374{
Ben Widawskya81cc002013-01-18 12:30:31 -0800375 if (unlikely(dev_priv->gtt.do_idle_maps))
Ben Widawsky5c042282011-10-17 15:51:55 -0700376 dev_priv->mm.interruptible = interruptible;
377}
378
Daniel Vetter76aaf222010-11-05 22:23:30 +0100379void i915_gem_restore_gtt_mappings(struct drm_device *dev)
380{
381 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000382 struct drm_i915_gem_object *obj;
Daniel Vetter76aaf222010-11-05 22:23:30 +0100383
Chris Wilsonbee4a182011-01-21 10:54:32 +0000384 /* First fill our portion of the GTT with scratch pages */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800385 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
386 dev_priv->gtt.total / PAGE_SIZE);
Chris Wilsonbee4a182011-01-21 10:54:32 +0000387
Chris Wilson6c085a72012-08-20 11:40:46 +0200388 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
Chris Wilsona8e93122010-12-08 14:28:54 +0000389 i915_gem_clflush_object(obj);
Daniel Vetter74163902012-02-15 23:50:21 +0100390 i915_gem_gtt_bind_object(obj, obj->cache_level);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100391 }
392
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800393 i915_gem_chipset_flush(dev);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100394}
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100395
Daniel Vetter74163902012-02-15 23:50:21 +0100396int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100397{
Chris Wilson9da3da62012-06-01 15:20:22 +0100398 if (obj->has_dma_mapping)
Daniel Vetter74163902012-02-15 23:50:21 +0100399 return 0;
Chris Wilson9da3da62012-06-01 15:20:22 +0100400
401 if (!dma_map_sg(&obj->base.dev->pdev->dev,
402 obj->pages->sgl, obj->pages->nents,
403 PCI_DMA_BIDIRECTIONAL))
404 return -ENOSPC;
405
406 return 0;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100407}
408
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800409/*
410 * Binds an object into the global gtt with the specified cache level. The object
411 * will be accessible to the GPU via commands whose operands reference offsets
412 * within the global GTT as well as accessible by the GPU through the GMADR
413 * mapped BAR (dev_priv->mm.gtt->gtt).
414 */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800415static void gen6_ggtt_insert_entries(struct drm_device *dev,
416 struct sg_table *st,
417 unsigned int first_entry,
418 enum i915_cache_level level)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800419{
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800420 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskye7c2b582013-04-08 18:43:48 -0700421 gen6_gtt_pte_t __iomem *gtt_entries =
422 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
Imre Deak6e995e22013-02-18 19:28:04 +0200423 int i = 0;
424 struct sg_page_iter sg_iter;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800425 dma_addr_t addr;
426
Imre Deak6e995e22013-02-18 19:28:04 +0200427 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +0200428 addr = sg_page_iter_dma_address(&sg_iter);
Imre Deak6e995e22013-02-18 19:28:04 +0200429 iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
430 i++;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800431 }
432
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800433 /* XXX: This serves as a posting read to make sure that the PTE has
434 * actually been updated. There is some concern that even though
435 * registers and PTEs are within the same BAR that they are potentially
436 * of NUMA access patterns. Therefore, even with the way we assume
437 * hardware should work, we must keep this posting read for paranoia.
438 */
439 if (i != 0)
Daniel Vetter960e3e42013-01-24 14:44:57 -0800440 WARN_ON(readl(&gtt_entries[i-1])
441 != gen6_pte_encode(dev, addr, level));
Ben Widawsky0f9b91c2012-11-04 09:21:30 -0800442
443 /* This next bit makes the above posting read even more important. We
444 * want to flush the TLBs only after we're certain all the PTE updates
445 * have finished.
446 */
447 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
448 POSTING_READ(GFX_FLSH_CNTL_GEN6);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800449}
450
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800451static void gen6_ggtt_clear_range(struct drm_device *dev,
452 unsigned int first_entry,
453 unsigned int num_entries)
454{
455 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskye7c2b582013-04-08 18:43:48 -0700456 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
457 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
Ben Widawskya54c0c22013-01-24 14:45:00 -0800458 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800459 int i;
460
461 if (WARN(num_entries > max_entries,
462 "First entry = %d; Num entries = %d (max=%d)\n",
463 first_entry, num_entries, max_entries))
464 num_entries = max_entries;
465
Daniel Vetter960e3e42013-01-24 14:44:57 -0800466 scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
467 I915_CACHE_LLC);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800468 for (i = 0; i < num_entries; i++)
469 iowrite32(scratch_pte, &gtt_base[i]);
470 readl(gtt_base);
471}
472
473
474static void i915_ggtt_insert_entries(struct drm_device *dev,
475 struct sg_table *st,
476 unsigned int pg_start,
477 enum i915_cache_level cache_level)
478{
479 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
480 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
481
482 intel_gtt_insert_sg_entries(st, pg_start, flags);
483
484}
485
486static void i915_ggtt_clear_range(struct drm_device *dev,
487 unsigned int first_entry,
488 unsigned int num_entries)
489{
490 intel_gtt_clear_range(first_entry, num_entries);
491}
492
493
Daniel Vetter74163902012-02-15 23:50:21 +0100494void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
495 enum i915_cache_level cache_level)
Chris Wilsond5bd1442011-04-14 06:48:26 +0100496{
497 struct drm_device *dev = obj->base.dev;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800498 struct drm_i915_private *dev_priv = dev->dev_private;
499
500 dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
501 obj->gtt_space->start >> PAGE_SHIFT,
502 cache_level);
Chris Wilsond5bd1442011-04-14 06:48:26 +0100503
Daniel Vetter74898d72012-02-15 23:50:22 +0100504 obj->has_global_gtt_mapping = 1;
Chris Wilsond5bd1442011-04-14 06:48:26 +0100505}
506
Chris Wilson05394f32010-11-08 19:18:58 +0000507void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100508{
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800509 struct drm_device *dev = obj->base.dev;
510 struct drm_i915_private *dev_priv = dev->dev_private;
511
512 dev_priv->gtt.gtt_clear_range(obj->base.dev,
513 obj->gtt_space->start >> PAGE_SHIFT,
514 obj->base.size >> PAGE_SHIFT);
Daniel Vetter74898d72012-02-15 23:50:22 +0100515
516 obj->has_global_gtt_mapping = 0;
Daniel Vetter74163902012-02-15 23:50:21 +0100517}
518
519void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
520{
Ben Widawsky5c042282011-10-17 15:51:55 -0700521 struct drm_device *dev = obj->base.dev;
522 struct drm_i915_private *dev_priv = dev->dev_private;
523 bool interruptible;
524
525 interruptible = do_idling(dev_priv);
526
Chris Wilson9da3da62012-06-01 15:20:22 +0100527 if (!obj->has_dma_mapping)
528 dma_unmap_sg(&dev->pdev->dev,
529 obj->pages->sgl, obj->pages->nents,
530 PCI_DMA_BIDIRECTIONAL);
Ben Widawsky5c042282011-10-17 15:51:55 -0700531
532 undo_idling(dev_priv, interruptible);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100533}
Daniel Vetter644ec022012-03-26 09:45:40 +0200534
Chris Wilson42d6ab42012-07-26 11:49:32 +0100535static void i915_gtt_color_adjust(struct drm_mm_node *node,
536 unsigned long color,
537 unsigned long *start,
538 unsigned long *end)
539{
540 if (node->color != color)
541 *start += 4096;
542
543 if (!list_empty(&node->node_list)) {
544 node = list_entry(node->node_list.next,
545 struct drm_mm_node,
546 node_list);
547 if (node->allocated && node->color != color)
548 *end -= 4096;
549 }
550}
Ben Widawskyd7e50082012-12-18 10:31:25 -0800551void i915_gem_setup_global_gtt(struct drm_device *dev,
552 unsigned long start,
553 unsigned long mappable_end,
554 unsigned long end)
Daniel Vetter644ec022012-03-26 09:45:40 +0200555{
Ben Widawskye78891c2013-01-25 16:41:04 -0800556 /* Let GEM Manage all of the aperture.
557 *
558 * However, leave one page at the end still bound to the scratch page.
559 * There are a number of places where the hardware apparently prefetches
560 * past the end of the object, and we've seen multiple hangs with the
561 * GPU head pointer stuck in a batchbuffer bound at the last page of the
562 * aperture. One page should be enough to keep any prefetching inside
563 * of the aperture.
564 */
Daniel Vetter644ec022012-03-26 09:45:40 +0200565 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsoned2f3452012-11-15 11:32:19 +0000566 struct drm_mm_node *entry;
567 struct drm_i915_gem_object *obj;
568 unsigned long hole_start, hole_end;
Daniel Vetter644ec022012-03-26 09:45:40 +0200569
Ben Widawsky35451cb2013-01-17 12:45:13 -0800570 BUG_ON(mappable_end > end);
571
Chris Wilsoned2f3452012-11-15 11:32:19 +0000572 /* Subtract the guard page ... */
Daniel Vetterd1dd20a2012-03-26 09:45:42 +0200573 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
Chris Wilson42d6ab42012-07-26 11:49:32 +0100574 if (!HAS_LLC(dev))
575 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
Daniel Vetter644ec022012-03-26 09:45:40 +0200576
Chris Wilsoned2f3452012-11-15 11:32:19 +0000577 /* Mark any preallocated objects as occupied */
578 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
579 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
580 obj->gtt_offset, obj->base.size);
581
582 BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
583 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
584 obj->gtt_offset,
585 obj->base.size,
586 false);
587 obj->has_global_gtt_mapping = 1;
588 }
589
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800590 dev_priv->gtt.start = start;
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800591 dev_priv->gtt.total = end - start;
Daniel Vetter644ec022012-03-26 09:45:40 +0200592
Chris Wilsoned2f3452012-11-15 11:32:19 +0000593 /* Clear any non-preallocated blocks */
594 drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
595 hole_start, hole_end) {
596 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
597 hole_start, hole_end);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800598 dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
599 (hole_end-hole_start) / PAGE_SIZE);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000600 }
601
602 /* And finally clear the reserved guard page */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800603 dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800604}
605
Ben Widawskyd7e50082012-12-18 10:31:25 -0800606static bool
607intel_enable_ppgtt(struct drm_device *dev)
608{
609 if (i915_enable_ppgtt >= 0)
610 return i915_enable_ppgtt;
611
612#ifdef CONFIG_INTEL_IOMMU
613 /* Disable ppgtt on SNB if VT-d is on. */
614 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
615 return false;
616#endif
617
618 return true;
619}
620
621void i915_gem_init_global_gtt(struct drm_device *dev)
622{
623 struct drm_i915_private *dev_priv = dev->dev_private;
624 unsigned long gtt_size, mappable_size;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800625
Ben Widawskya54c0c22013-01-24 14:45:00 -0800626 gtt_size = dev_priv->gtt.total;
Ben Widawsky93d18792013-01-17 12:45:17 -0800627 mappable_size = dev_priv->gtt.mappable_end;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800628
629 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
Ben Widawskye78891c2013-01-25 16:41:04 -0800630 int ret;
Ben Widawsky3eb1c002013-04-08 18:43:52 -0700631
632 if (INTEL_INFO(dev)->gen <= 7) {
633 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
634 * aperture accordingly when using aliasing ppgtt. */
635 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
636 }
Ben Widawskyd7e50082012-12-18 10:31:25 -0800637
638 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
639
640 ret = i915_gem_init_aliasing_ppgtt(dev);
Ben Widawskye78891c2013-01-25 16:41:04 -0800641 if (!ret)
Ben Widawskyd7e50082012-12-18 10:31:25 -0800642 return;
Ben Widawskye78891c2013-01-25 16:41:04 -0800643
644 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
645 drm_mm_takedown(&dev_priv->mm.gtt_space);
646 gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800647 }
Ben Widawskye78891c2013-01-25 16:41:04 -0800648 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800649}
650
651static int setup_scratch_page(struct drm_device *dev)
652{
653 struct drm_i915_private *dev_priv = dev->dev_private;
654 struct page *page;
655 dma_addr_t dma_addr;
656
657 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
658 if (page == NULL)
659 return -ENOMEM;
660 get_page(page);
661 set_pages_uc(page, 1);
662
663#ifdef CONFIG_INTEL_IOMMU
664 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
665 PCI_DMA_BIDIRECTIONAL);
666 if (pci_dma_mapping_error(dev->pdev, dma_addr))
667 return -EINVAL;
668#else
669 dma_addr = page_to_phys(page);
670#endif
Ben Widawsky9c61a322013-01-18 12:30:32 -0800671 dev_priv->gtt.scratch_page = page;
672 dev_priv->gtt.scratch_page_dma = dma_addr;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800673
674 return 0;
675}
676
677static void teardown_scratch_page(struct drm_device *dev)
678{
679 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky9c61a322013-01-18 12:30:32 -0800680 set_pages_wb(dev_priv->gtt.scratch_page, 1);
681 pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800682 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
Ben Widawsky9c61a322013-01-18 12:30:32 -0800683 put_page(dev_priv->gtt.scratch_page);
684 __free_page(dev_priv->gtt.scratch_page);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800685}
686
687static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
688{
689 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
690 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
691 return snb_gmch_ctl << 20;
692}
693
Ben Widawskybaa09f52013-01-24 13:49:57 -0800694static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800695{
696 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
697 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
698 return snb_gmch_ctl << 25; /* 32 MB units */
699}
700
Ben Widawskybaa09f52013-01-24 13:49:57 -0800701static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawsky03752f52012-11-04 09:21:28 -0800702{
703 static const int stolen_decoder[] = {
704 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
705 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
706 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
707 return stolen_decoder[snb_gmch_ctl] << 20;
708}
709
Ben Widawskybaa09f52013-01-24 13:49:57 -0800710static int gen6_gmch_probe(struct drm_device *dev,
711 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800712 size_t *stolen,
713 phys_addr_t *mappable_base,
714 unsigned long *mappable_end)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800715{
716 struct drm_i915_private *dev_priv = dev->dev_private;
717 phys_addr_t gtt_bus_addr;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800718 unsigned int gtt_size;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800719 u16 snb_gmch_ctl;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800720 int ret;
721
Ben Widawsky41907dd2013-02-08 11:32:47 -0800722 *mappable_base = pci_resource_start(dev->pdev, 2);
723 *mappable_end = pci_resource_len(dev->pdev, 2);
724
Ben Widawskybaa09f52013-01-24 13:49:57 -0800725 /* 64/512MB is the current min/max we actually know of, but this is just
726 * a coarse sanity check.
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800727 */
Ben Widawsky41907dd2013-02-08 11:32:47 -0800728 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
Ben Widawskybaa09f52013-01-24 13:49:57 -0800729 DRM_ERROR("Unknown GMADR size (%lx)\n",
730 dev_priv->gtt.mappable_end);
731 return -ENXIO;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800732 }
733
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800734 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
735 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
Ben Widawskybaa09f52013-01-24 13:49:57 -0800736 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
737 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
738
Jesse Barnes086ddcc2013-03-01 14:08:29 -0800739 if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
Ben Widawskybaa09f52013-01-24 13:49:57 -0800740 *stolen = gen7_get_stolen_size(snb_gmch_ctl);
741 else
742 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
743
Ben Widawskye7c2b582013-04-08 18:43:48 -0700744 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800745
Ben Widawskya93e4162013-04-08 18:43:47 -0700746 /* For Modern GENs the PTEs and register space are split in the BAR */
747 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
748 (pci_resource_len(dev->pdev, 0) / 2);
749
Ben Widawskybaa09f52013-01-24 13:49:57 -0800750 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
751 if (!dev_priv->gtt.gsm) {
752 DRM_ERROR("Failed to map the gtt page table\n");
753 return -ENOMEM;
754 }
755
756 ret = setup_scratch_page(dev);
757 if (ret)
758 DRM_ERROR("Scratch setup failed\n");
759
760 dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
761 dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
762
763 return ret;
764}
765
Changlong Xied93c6232013-01-31 11:32:50 +0800766static void gen6_gmch_remove(struct drm_device *dev)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800767{
768 struct drm_i915_private *dev_priv = dev->dev_private;
769 iounmap(dev_priv->gtt.gsm);
770 teardown_scratch_page(dev_priv->dev);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800771}
772
773static int i915_gmch_probe(struct drm_device *dev,
774 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800775 size_t *stolen,
776 phys_addr_t *mappable_base,
777 unsigned long *mappable_end)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800778{
779 struct drm_i915_private *dev_priv = dev->dev_private;
780 int ret;
781
Ben Widawskybaa09f52013-01-24 13:49:57 -0800782 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
783 if (!ret) {
784 DRM_ERROR("failed to set up gmch\n");
785 return -EIO;
786 }
787
Ben Widawsky41907dd2013-02-08 11:32:47 -0800788 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800789
790 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
791 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
792 dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
793
794 return 0;
795}
796
797static void i915_gmch_remove(struct drm_device *dev)
798{
799 intel_gmch_remove();
800}
801
802int i915_gem_gtt_init(struct drm_device *dev)
803{
804 struct drm_i915_private *dev_priv = dev->dev_private;
805 struct i915_gtt *gtt = &dev_priv->gtt;
806 unsigned long gtt_size;
807 int ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800808
Ben Widawskybaa09f52013-01-24 13:49:57 -0800809 if (INTEL_INFO(dev)->gen <= 5) {
810 dev_priv->gtt.gtt_probe = i915_gmch_probe;
811 dev_priv->gtt.gtt_remove = i915_gmch_remove;
812 } else {
813 dev_priv->gtt.gtt_probe = gen6_gmch_probe;
814 dev_priv->gtt.gtt_remove = gen6_gmch_remove;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800815 }
816
Ben Widawskybaa09f52013-01-24 13:49:57 -0800817 ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800818 &dev_priv->gtt.stolen_size,
819 &gtt->mappable_base,
820 &gtt->mappable_end);
Ben Widawskya54c0c22013-01-24 14:45:00 -0800821 if (ret)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800822 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800823
Ben Widawskye7c2b582013-04-08 18:43:48 -0700824 gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gen6_gtt_pte_t);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800825
Ben Widawskybaa09f52013-01-24 13:49:57 -0800826 /* GMADR is the PCI mmio aperture into the global GTT. */
827 DRM_INFO("Memory usable by graphics device = %zdM\n",
828 dev_priv->gtt.total >> 20);
829 DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
830 dev_priv->gtt.mappable_end >> 20);
831 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
832 dev_priv->gtt.stolen_size >> 20);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800833
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800834 return 0;
Daniel Vetter644ec022012-03-26 09:45:40 +0200835}