blob: 2d7d3a94257b654f7961065530e4bc49cd864612 [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
David Howells760285e2012-10-02 18:01:07 +010025#include <drm/drmP.h>
26#include <drm/i915_drm.h>
Daniel Vetter76aaf222010-11-05 22:23:30 +010027#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30
Ben Widawskyf61c0602012-10-22 11:44:43 -070031typedef uint32_t gtt_pte_t;
32
Ben Widawsky26b1ff32012-11-04 09:21:31 -080033/* PPGTT stuff */
34#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
35
36#define GEN6_PDE_VALID (1 << 0)
37/* gen6+ has bit 11-4 for physical addr bit 39-32 */
38#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
39
40#define GEN6_PTE_VALID (1 << 0)
41#define GEN6_PTE_UNCACHED (1 << 1)
42#define HSW_PTE_UNCACHED (0)
43#define GEN6_PTE_CACHE_LLC (2 << 1)
44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
46
Daniel Vetter960e3e42013-01-24 14:44:57 -080047static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
48 dma_addr_t addr,
49 enum i915_cache_level level)
Ben Widawsky54d12522012-09-24 16:44:32 -070050{
51 gtt_pte_t pte = GEN6_PTE_VALID;
52 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -070053
54 switch (level) {
55 case I915_CACHE_LLC_MLC:
56 /* Haswell doesn't set L3 this way */
57 if (IS_HASWELL(dev))
58 pte |= GEN6_PTE_CACHE_LLC;
59 else
60 pte |= GEN6_PTE_CACHE_LLC_MLC;
61 break;
62 case I915_CACHE_LLC:
63 pte |= GEN6_PTE_CACHE_LLC;
64 break;
65 case I915_CACHE_NONE:
66 if (IS_HASWELL(dev))
67 pte |= HSW_PTE_UNCACHED;
68 else
69 pte |= GEN6_PTE_UNCACHED;
70 break;
71 default:
72 BUG();
73 }
74
Ben Widawsky54d12522012-09-24 16:44:32 -070075
76 return pte;
77}
78
Daniel Vetter1d2a3142012-02-09 17:15:46 +010079/* PPGTT support for Sandybdrige/Gen6 and later */
Daniel Vetterdef886c2013-01-24 14:44:56 -080080static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
Daniel Vetter1d2a3142012-02-09 17:15:46 +010081 unsigned first_entry,
82 unsigned num_entries)
83{
Ben Widawskyf61c0602012-10-22 11:44:43 -070084 gtt_pte_t *pt_vaddr;
85 gtt_pte_t scratch_pte;
Daniel Vetter7bddb012012-02-09 17:15:47 +010086 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
87 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
88 unsigned last_pte, i;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010089
Daniel Vetter960e3e42013-01-24 14:44:57 -080090 scratch_pte = gen6_pte_encode(ppgtt->dev,
91 ppgtt->scratch_page_dma_addr,
92 I915_CACHE_LLC);
Daniel Vetter1d2a3142012-02-09 17:15:46 +010093
Daniel Vetter7bddb012012-02-09 17:15:47 +010094 while (num_entries) {
95 last_pte = first_pte + num_entries;
96 if (last_pte > I915_PPGTT_PT_ENTRIES)
97 last_pte = I915_PPGTT_PT_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010098
Daniel Vetter7bddb012012-02-09 17:15:47 +010099 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
100
101 for (i = first_pte; i < last_pte; i++)
102 pt_vaddr[i] = scratch_pte;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100103
104 kunmap_atomic(pt_vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100105
Daniel Vetter7bddb012012-02-09 17:15:47 +0100106 num_entries -= last_pte - first_pte;
107 first_pte = 0;
108 act_pd++;
109 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100110}
111
Daniel Vetterdef886c2013-01-24 14:44:56 -0800112static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
113 struct sg_table *pages,
114 unsigned first_entry,
115 enum i915_cache_level cache_level)
116{
117 gtt_pte_t *pt_vaddr;
118 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
119 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
120 unsigned i, j, m, segment_len;
121 dma_addr_t page_addr;
122 struct scatterlist *sg;
123
124 /* init sg walking */
125 sg = pages->sgl;
126 i = 0;
127 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
128 m = 0;
129
130 while (i < pages->nents) {
131 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
132
133 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
134 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
Daniel Vetter960e3e42013-01-24 14:44:57 -0800135 pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
136 cache_level);
Daniel Vetterdef886c2013-01-24 14:44:56 -0800137
138 /* grab the next page */
139 if (++m == segment_len) {
140 if (++i == pages->nents)
141 break;
142
143 sg = sg_next(sg);
144 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
145 m = 0;
146 }
147 }
148
149 kunmap_atomic(pt_vaddr);
150
151 first_pte = 0;
152 act_pd++;
153 }
154}
155
Daniel Vetter3440d262013-01-24 13:49:56 -0800156static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100157{
Daniel Vetter3440d262013-01-24 13:49:56 -0800158 int i;
159
160 if (ppgtt->pt_dma_addr) {
161 for (i = 0; i < ppgtt->num_pd_entries; i++)
162 pci_unmap_page(ppgtt->dev->pdev,
163 ppgtt->pt_dma_addr[i],
164 4096, PCI_DMA_BIDIRECTIONAL);
165 }
166
167 kfree(ppgtt->pt_dma_addr);
168 for (i = 0; i < ppgtt->num_pd_entries; i++)
169 __free_page(ppgtt->pt_pages[i]);
170 kfree(ppgtt->pt_pages);
171 kfree(ppgtt);
172}
173
174static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
175{
176 struct drm_device *dev = ppgtt->dev;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100177 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100178 unsigned first_pd_entry_in_global_pt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100179 int i;
180 int ret = -ENOMEM;
181
182 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
183 * entries. For aliasing ppgtt support we just steal them at the end for
184 * now. */
Ben Widawskya54c0c22013-01-24 14:45:00 -0800185 first_pd_entry_in_global_pt =
186 gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100187
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100188 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800189 ppgtt->clear_range = gen6_ppgtt_clear_range;
190 ppgtt->insert_entries = gen6_ppgtt_insert_entries;
Daniel Vetter3440d262013-01-24 13:49:56 -0800191 ppgtt->cleanup = gen6_ppgtt_cleanup;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100192 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
193 GFP_KERNEL);
194 if (!ppgtt->pt_pages)
Daniel Vetter3440d262013-01-24 13:49:56 -0800195 return -ENOMEM;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100196
197 for (i = 0; i < ppgtt->num_pd_entries; i++) {
198 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
199 if (!ppgtt->pt_pages[i])
200 goto err_pt_alloc;
201 }
202
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800203 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
204 GFP_KERNEL);
205 if (!ppgtt->pt_dma_addr)
206 goto err_pt_alloc;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100207
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800208 for (i = 0; i < ppgtt->num_pd_entries; i++) {
209 dma_addr_t pt_addr;
Daniel Vetter211c5682012-04-10 17:29:17 +0200210
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800211 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
212 PCI_DMA_BIDIRECTIONAL);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100213
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800214 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
215 ret = -EIO;
216 goto err_pd_pin;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100217
Daniel Vetter211c5682012-04-10 17:29:17 +0200218 }
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800219 ppgtt->pt_dma_addr[i] = pt_addr;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100220 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100221
Ben Widawsky9c61a322013-01-18 12:30:32 -0800222 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100223
Daniel Vetterdef886c2013-01-24 14:44:56 -0800224 ppgtt->clear_range(ppgtt, 0,
225 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100226
Ben Widawskyf61c0602012-10-22 11:44:43 -0700227 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100228
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100229 return 0;
230
231err_pd_pin:
232 if (ppgtt->pt_dma_addr) {
233 for (i--; i >= 0; i--)
234 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
235 4096, PCI_DMA_BIDIRECTIONAL);
236 }
237err_pt_alloc:
238 kfree(ppgtt->pt_dma_addr);
239 for (i = 0; i < ppgtt->num_pd_entries; i++) {
240 if (ppgtt->pt_pages[i])
241 __free_page(ppgtt->pt_pages[i]);
242 }
243 kfree(ppgtt->pt_pages);
Daniel Vetter3440d262013-01-24 13:49:56 -0800244
245 return ret;
246}
247
248static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
249{
250 struct drm_i915_private *dev_priv = dev->dev_private;
251 struct i915_hw_ppgtt *ppgtt;
252 int ret;
253
254 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
255 if (!ppgtt)
256 return -ENOMEM;
257
258 ppgtt->dev = dev;
259
260 ret = gen6_ppgtt_init(ppgtt);
261 if (ret)
262 kfree(ppgtt);
263 else
264 dev_priv->mm.aliasing_ppgtt = ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100265
266 return ret;
267}
268
269void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
270{
271 struct drm_i915_private *dev_priv = dev->dev_private;
272 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100273
274 if (!ppgtt)
275 return;
276
Daniel Vetter3440d262013-01-24 13:49:56 -0800277 ppgtt->cleanup(ppgtt);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100278}
279
Daniel Vetter7bddb012012-02-09 17:15:47 +0100280void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
281 struct drm_i915_gem_object *obj,
282 enum i915_cache_level cache_level)
283{
Daniel Vetterdef886c2013-01-24 14:44:56 -0800284 ppgtt->insert_entries(ppgtt, obj->pages,
285 obj->gtt_space->start >> PAGE_SHIFT,
286 cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100287}
288
289void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
290 struct drm_i915_gem_object *obj)
291{
Daniel Vetterdef886c2013-01-24 14:44:56 -0800292 ppgtt->clear_range(ppgtt,
293 obj->gtt_space->start >> PAGE_SHIFT,
294 obj->base.size >> PAGE_SHIFT);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100295}
296
Ben Widawsky26b1ff32012-11-04 09:21:31 -0800297void i915_gem_init_ppgtt(struct drm_device *dev)
298{
299 drm_i915_private_t *dev_priv = dev->dev_private;
300 uint32_t pd_offset;
301 struct intel_ring_buffer *ring;
302 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Ben Widawsky079a43f2012-12-18 10:31:24 -0800303 gtt_pte_t __iomem *pd_addr;
Ben Widawsky26b1ff32012-11-04 09:21:31 -0800304 uint32_t pd_entry;
305 int i;
306
307 if (!dev_priv->mm.aliasing_ppgtt)
308 return;
309
310
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800311 pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
Ben Widawsky26b1ff32012-11-04 09:21:31 -0800312 for (i = 0; i < ppgtt->num_pd_entries; i++) {
313 dma_addr_t pt_addr;
314
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800315 pt_addr = ppgtt->pt_dma_addr[i];
Ben Widawsky26b1ff32012-11-04 09:21:31 -0800316 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
317 pd_entry |= GEN6_PDE_VALID;
318
319 writel(pd_entry, pd_addr + i);
320 }
321 readl(pd_addr);
322
323 pd_offset = ppgtt->pd_offset;
324 pd_offset /= 64; /* in cachelines, */
325 pd_offset <<= 16;
326
327 if (INTEL_INFO(dev)->gen == 6) {
328 uint32_t ecochk, gab_ctl, ecobits;
329
330 ecobits = I915_READ(GAC_ECO_BITS);
331 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
332
333 gab_ctl = I915_READ(GAB_CTL);
334 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
335
336 ecochk = I915_READ(GAM_ECOCHK);
337 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
338 ECOCHK_PPGTT_CACHE64B);
339 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
340 } else if (INTEL_INFO(dev)->gen >= 7) {
341 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
342 /* GFX_MODE is per-ring on gen7+ */
343 }
344
345 for_each_ring(ring, dev_priv, i) {
346 if (INTEL_INFO(dev)->gen >= 7)
347 I915_WRITE(RING_MODE_GEN7(ring),
348 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
349
350 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
351 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
352 }
353}
354
Ben Widawskya81cc002013-01-18 12:30:31 -0800355extern int intel_iommu_gfx_mapped;
356/* Certain Gen5 chipsets require require idling the GPU before
357 * unmapping anything from the GTT when VT-d is enabled.
358 */
359static inline bool needs_idle_maps(struct drm_device *dev)
360{
361#ifdef CONFIG_INTEL_IOMMU
362 /* Query intel_iommu to see if we need the workaround. Presumably that
363 * was loaded first.
364 */
365 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
366 return true;
367#endif
368 return false;
369}
370
Ben Widawsky5c042282011-10-17 15:51:55 -0700371static bool do_idling(struct drm_i915_private *dev_priv)
372{
373 bool ret = dev_priv->mm.interruptible;
374
Ben Widawskya81cc002013-01-18 12:30:31 -0800375 if (unlikely(dev_priv->gtt.do_idle_maps)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700376 dev_priv->mm.interruptible = false;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700377 if (i915_gpu_idle(dev_priv->dev)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700378 DRM_ERROR("Couldn't idle GPU\n");
379 /* Wait a bit, in hopes it avoids the hang */
380 udelay(10);
381 }
382 }
383
384 return ret;
385}
386
387static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
388{
Ben Widawskya81cc002013-01-18 12:30:31 -0800389 if (unlikely(dev_priv->gtt.do_idle_maps))
Ben Widawsky5c042282011-10-17 15:51:55 -0700390 dev_priv->mm.interruptible = interruptible;
391}
392
Daniel Vetter76aaf222010-11-05 22:23:30 +0100393void i915_gem_restore_gtt_mappings(struct drm_device *dev)
394{
395 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000396 struct drm_i915_gem_object *obj;
Daniel Vetter76aaf222010-11-05 22:23:30 +0100397
Chris Wilsonbee4a182011-01-21 10:54:32 +0000398 /* First fill our portion of the GTT with scratch pages */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800399 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
400 dev_priv->gtt.total / PAGE_SIZE);
Chris Wilsonbee4a182011-01-21 10:54:32 +0000401
Chris Wilson6c085a72012-08-20 11:40:46 +0200402 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
Chris Wilsona8e93122010-12-08 14:28:54 +0000403 i915_gem_clflush_object(obj);
Daniel Vetter74163902012-02-15 23:50:21 +0100404 i915_gem_gtt_bind_object(obj, obj->cache_level);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100405 }
406
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800407 i915_gem_chipset_flush(dev);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100408}
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100409
Daniel Vetter74163902012-02-15 23:50:21 +0100410int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100411{
Chris Wilson9da3da62012-06-01 15:20:22 +0100412 if (obj->has_dma_mapping)
Daniel Vetter74163902012-02-15 23:50:21 +0100413 return 0;
Chris Wilson9da3da62012-06-01 15:20:22 +0100414
415 if (!dma_map_sg(&obj->base.dev->pdev->dev,
416 obj->pages->sgl, obj->pages->nents,
417 PCI_DMA_BIDIRECTIONAL))
418 return -ENOSPC;
419
420 return 0;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100421}
422
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800423/*
424 * Binds an object into the global gtt with the specified cache level. The object
425 * will be accessible to the GPU via commands whose operands reference offsets
426 * within the global GTT as well as accessible by the GPU through the GMADR
427 * mapped BAR (dev_priv->mm.gtt->gtt).
428 */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800429static void gen6_ggtt_insert_entries(struct drm_device *dev,
430 struct sg_table *st,
431 unsigned int first_entry,
432 enum i915_cache_level level)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800433{
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800434 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800435 struct scatterlist *sg = st->sgl;
Ben Widawsky1c451402012-12-18 10:31:27 -0800436 gtt_pte_t __iomem *gtt_entries =
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800437 (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800438 int unused, i = 0;
439 unsigned int len, m = 0;
440 dma_addr_t addr;
441
442 for_each_sg(st->sgl, sg, st->nents, unused) {
443 len = sg_dma_len(sg) >> PAGE_SHIFT;
444 for (m = 0; m < len; m++) {
445 addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
Daniel Vetter960e3e42013-01-24 14:44:57 -0800446 iowrite32(gen6_pte_encode(dev, addr, level),
447 &gtt_entries[i]);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800448 i++;
449 }
450 }
451
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800452 /* XXX: This serves as a posting read to make sure that the PTE has
453 * actually been updated. There is some concern that even though
454 * registers and PTEs are within the same BAR that they are potentially
455 * of NUMA access patterns. Therefore, even with the way we assume
456 * hardware should work, we must keep this posting read for paranoia.
457 */
458 if (i != 0)
Daniel Vetter960e3e42013-01-24 14:44:57 -0800459 WARN_ON(readl(&gtt_entries[i-1])
460 != gen6_pte_encode(dev, addr, level));
Ben Widawsky0f9b91c2012-11-04 09:21:30 -0800461
462 /* This next bit makes the above posting read even more important. We
463 * want to flush the TLBs only after we're certain all the PTE updates
464 * have finished.
465 */
466 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
467 POSTING_READ(GFX_FLSH_CNTL_GEN6);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800468}
469
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800470static void gen6_ggtt_clear_range(struct drm_device *dev,
471 unsigned int first_entry,
472 unsigned int num_entries)
473{
474 struct drm_i915_private *dev_priv = dev->dev_private;
475 gtt_pte_t scratch_pte;
476 gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
Ben Widawskya54c0c22013-01-24 14:45:00 -0800477 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800478 int i;
479
480 if (WARN(num_entries > max_entries,
481 "First entry = %d; Num entries = %d (max=%d)\n",
482 first_entry, num_entries, max_entries))
483 num_entries = max_entries;
484
Daniel Vetter960e3e42013-01-24 14:44:57 -0800485 scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
486 I915_CACHE_LLC);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800487 for (i = 0; i < num_entries; i++)
488 iowrite32(scratch_pte, &gtt_base[i]);
489 readl(gtt_base);
490}
491
492
493static void i915_ggtt_insert_entries(struct drm_device *dev,
494 struct sg_table *st,
495 unsigned int pg_start,
496 enum i915_cache_level cache_level)
497{
498 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
499 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
500
501 intel_gtt_insert_sg_entries(st, pg_start, flags);
502
503}
504
505static void i915_ggtt_clear_range(struct drm_device *dev,
506 unsigned int first_entry,
507 unsigned int num_entries)
508{
509 intel_gtt_clear_range(first_entry, num_entries);
510}
511
512
Daniel Vetter74163902012-02-15 23:50:21 +0100513void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
514 enum i915_cache_level cache_level)
Chris Wilsond5bd1442011-04-14 06:48:26 +0100515{
516 struct drm_device *dev = obj->base.dev;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800517 struct drm_i915_private *dev_priv = dev->dev_private;
518
519 dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
520 obj->gtt_space->start >> PAGE_SHIFT,
521 cache_level);
Chris Wilsond5bd1442011-04-14 06:48:26 +0100522
Daniel Vetter74898d72012-02-15 23:50:22 +0100523 obj->has_global_gtt_mapping = 1;
Chris Wilsond5bd1442011-04-14 06:48:26 +0100524}
525
Chris Wilson05394f32010-11-08 19:18:58 +0000526void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100527{
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800528 struct drm_device *dev = obj->base.dev;
529 struct drm_i915_private *dev_priv = dev->dev_private;
530
531 dev_priv->gtt.gtt_clear_range(obj->base.dev,
532 obj->gtt_space->start >> PAGE_SHIFT,
533 obj->base.size >> PAGE_SHIFT);
Daniel Vetter74898d72012-02-15 23:50:22 +0100534
535 obj->has_global_gtt_mapping = 0;
Daniel Vetter74163902012-02-15 23:50:21 +0100536}
537
538void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
539{
Ben Widawsky5c042282011-10-17 15:51:55 -0700540 struct drm_device *dev = obj->base.dev;
541 struct drm_i915_private *dev_priv = dev->dev_private;
542 bool interruptible;
543
544 interruptible = do_idling(dev_priv);
545
Chris Wilson9da3da62012-06-01 15:20:22 +0100546 if (!obj->has_dma_mapping)
547 dma_unmap_sg(&dev->pdev->dev,
548 obj->pages->sgl, obj->pages->nents,
549 PCI_DMA_BIDIRECTIONAL);
Ben Widawsky5c042282011-10-17 15:51:55 -0700550
551 undo_idling(dev_priv, interruptible);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100552}
Daniel Vetter644ec022012-03-26 09:45:40 +0200553
Chris Wilson42d6ab42012-07-26 11:49:32 +0100554static void i915_gtt_color_adjust(struct drm_mm_node *node,
555 unsigned long color,
556 unsigned long *start,
557 unsigned long *end)
558{
559 if (node->color != color)
560 *start += 4096;
561
562 if (!list_empty(&node->node_list)) {
563 node = list_entry(node->node_list.next,
564 struct drm_mm_node,
565 node_list);
566 if (node->allocated && node->color != color)
567 *end -= 4096;
568 }
569}
Ben Widawskyd7e50082012-12-18 10:31:25 -0800570void i915_gem_setup_global_gtt(struct drm_device *dev,
571 unsigned long start,
572 unsigned long mappable_end,
573 unsigned long end)
Daniel Vetter644ec022012-03-26 09:45:40 +0200574{
Ben Widawskye78891c2013-01-25 16:41:04 -0800575 /* Let GEM Manage all of the aperture.
576 *
577 * However, leave one page at the end still bound to the scratch page.
578 * There are a number of places where the hardware apparently prefetches
579 * past the end of the object, and we've seen multiple hangs with the
580 * GPU head pointer stuck in a batchbuffer bound at the last page of the
581 * aperture. One page should be enough to keep any prefetching inside
582 * of the aperture.
583 */
Daniel Vetter644ec022012-03-26 09:45:40 +0200584 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsoned2f3452012-11-15 11:32:19 +0000585 struct drm_mm_node *entry;
586 struct drm_i915_gem_object *obj;
587 unsigned long hole_start, hole_end;
Daniel Vetter644ec022012-03-26 09:45:40 +0200588
Ben Widawsky35451cb2013-01-17 12:45:13 -0800589 BUG_ON(mappable_end > end);
590
Chris Wilsoned2f3452012-11-15 11:32:19 +0000591 /* Subtract the guard page ... */
Daniel Vetterd1dd20a2012-03-26 09:45:42 +0200592 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
Chris Wilson42d6ab42012-07-26 11:49:32 +0100593 if (!HAS_LLC(dev))
594 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
Daniel Vetter644ec022012-03-26 09:45:40 +0200595
Chris Wilsoned2f3452012-11-15 11:32:19 +0000596 /* Mark any preallocated objects as occupied */
597 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
598 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
599 obj->gtt_offset, obj->base.size);
600
601 BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
602 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
603 obj->gtt_offset,
604 obj->base.size,
605 false);
606 obj->has_global_gtt_mapping = 1;
607 }
608
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800609 dev_priv->gtt.start = start;
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800610 dev_priv->gtt.total = end - start;
Daniel Vetter644ec022012-03-26 09:45:40 +0200611
Chris Wilsoned2f3452012-11-15 11:32:19 +0000612 /* Clear any non-preallocated blocks */
613 drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
614 hole_start, hole_end) {
615 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
616 hole_start, hole_end);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800617 dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
618 (hole_end-hole_start) / PAGE_SIZE);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000619 }
620
621 /* And finally clear the reserved guard page */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800622 dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800623}
624
Ben Widawskyd7e50082012-12-18 10:31:25 -0800625static bool
626intel_enable_ppgtt(struct drm_device *dev)
627{
628 if (i915_enable_ppgtt >= 0)
629 return i915_enable_ppgtt;
630
631#ifdef CONFIG_INTEL_IOMMU
632 /* Disable ppgtt on SNB if VT-d is on. */
633 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
634 return false;
635#endif
636
637 return true;
638}
639
640void i915_gem_init_global_gtt(struct drm_device *dev)
641{
642 struct drm_i915_private *dev_priv = dev->dev_private;
643 unsigned long gtt_size, mappable_size;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800644
Ben Widawskya54c0c22013-01-24 14:45:00 -0800645 gtt_size = dev_priv->gtt.total;
Ben Widawsky93d18792013-01-17 12:45:17 -0800646 mappable_size = dev_priv->gtt.mappable_end;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800647
648 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
Ben Widawskye78891c2013-01-25 16:41:04 -0800649 int ret;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800650 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
651 * aperture accordingly when using aliasing ppgtt. */
652 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
653
654 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
655
656 ret = i915_gem_init_aliasing_ppgtt(dev);
Ben Widawskye78891c2013-01-25 16:41:04 -0800657 if (!ret)
Ben Widawskyd7e50082012-12-18 10:31:25 -0800658 return;
Ben Widawskye78891c2013-01-25 16:41:04 -0800659
660 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
661 drm_mm_takedown(&dev_priv->mm.gtt_space);
662 gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800663 }
Ben Widawskye78891c2013-01-25 16:41:04 -0800664 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800665}
666
667static int setup_scratch_page(struct drm_device *dev)
668{
669 struct drm_i915_private *dev_priv = dev->dev_private;
670 struct page *page;
671 dma_addr_t dma_addr;
672
673 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
674 if (page == NULL)
675 return -ENOMEM;
676 get_page(page);
677 set_pages_uc(page, 1);
678
679#ifdef CONFIG_INTEL_IOMMU
680 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
681 PCI_DMA_BIDIRECTIONAL);
682 if (pci_dma_mapping_error(dev->pdev, dma_addr))
683 return -EINVAL;
684#else
685 dma_addr = page_to_phys(page);
686#endif
Ben Widawsky9c61a322013-01-18 12:30:32 -0800687 dev_priv->gtt.scratch_page = page;
688 dev_priv->gtt.scratch_page_dma = dma_addr;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800689
690 return 0;
691}
692
693static void teardown_scratch_page(struct drm_device *dev)
694{
695 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky9c61a322013-01-18 12:30:32 -0800696 set_pages_wb(dev_priv->gtt.scratch_page, 1);
697 pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800698 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
Ben Widawsky9c61a322013-01-18 12:30:32 -0800699 put_page(dev_priv->gtt.scratch_page);
700 __free_page(dev_priv->gtt.scratch_page);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800701}
702
703static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
704{
705 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
706 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
707 return snb_gmch_ctl << 20;
708}
709
Ben Widawskybaa09f52013-01-24 13:49:57 -0800710static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800711{
712 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
713 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
714 return snb_gmch_ctl << 25; /* 32 MB units */
715}
716
Ben Widawskybaa09f52013-01-24 13:49:57 -0800717static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawsky03752f52012-11-04 09:21:28 -0800718{
719 static const int stolen_decoder[] = {
720 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
721 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
722 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
723 return stolen_decoder[snb_gmch_ctl] << 20;
724}
725
Ben Widawskybaa09f52013-01-24 13:49:57 -0800726static int gen6_gmch_probe(struct drm_device *dev,
727 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800728 size_t *stolen,
729 phys_addr_t *mappable_base,
730 unsigned long *mappable_end)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800731{
732 struct drm_i915_private *dev_priv = dev->dev_private;
733 phys_addr_t gtt_bus_addr;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800734 unsigned int gtt_size;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800735 u16 snb_gmch_ctl;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800736 int ret;
737
Ben Widawsky41907dd2013-02-08 11:32:47 -0800738 *mappable_base = pci_resource_start(dev->pdev, 2);
739 *mappable_end = pci_resource_len(dev->pdev, 2);
740
Ben Widawskybaa09f52013-01-24 13:49:57 -0800741 /* 64/512MB is the current min/max we actually know of, but this is just
742 * a coarse sanity check.
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800743 */
Ben Widawsky41907dd2013-02-08 11:32:47 -0800744 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
Ben Widawskybaa09f52013-01-24 13:49:57 -0800745 DRM_ERROR("Unknown GMADR size (%lx)\n",
746 dev_priv->gtt.mappable_end);
747 return -ENXIO;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800748 }
749
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800750 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
751 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
Ben Widawskybaa09f52013-01-24 13:49:57 -0800752 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
753 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
754
Jesse Barnes086ddcc2013-03-01 14:08:29 -0800755 if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
Ben Widawskybaa09f52013-01-24 13:49:57 -0800756 *stolen = gen7_get_stolen_size(snb_gmch_ctl);
757 else
758 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
759
760 *gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT;
761
762 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
763 gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
764 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
765 if (!dev_priv->gtt.gsm) {
766 DRM_ERROR("Failed to map the gtt page table\n");
767 return -ENOMEM;
768 }
769
770 ret = setup_scratch_page(dev);
771 if (ret)
772 DRM_ERROR("Scratch setup failed\n");
773
774 dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
775 dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
776
777 return ret;
778}
779
Changlong Xied93c6232013-01-31 11:32:50 +0800780static void gen6_gmch_remove(struct drm_device *dev)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800781{
782 struct drm_i915_private *dev_priv = dev->dev_private;
783 iounmap(dev_priv->gtt.gsm);
784 teardown_scratch_page(dev_priv->dev);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800785}
786
787static int i915_gmch_probe(struct drm_device *dev,
788 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800789 size_t *stolen,
790 phys_addr_t *mappable_base,
791 unsigned long *mappable_end)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800792{
793 struct drm_i915_private *dev_priv = dev->dev_private;
794 int ret;
795
Ben Widawskybaa09f52013-01-24 13:49:57 -0800796 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
797 if (!ret) {
798 DRM_ERROR("failed to set up gmch\n");
799 return -EIO;
800 }
801
Ben Widawsky41907dd2013-02-08 11:32:47 -0800802 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800803
804 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
805 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
806 dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
807
808 return 0;
809}
810
811static void i915_gmch_remove(struct drm_device *dev)
812{
813 intel_gmch_remove();
814}
815
816int i915_gem_gtt_init(struct drm_device *dev)
817{
818 struct drm_i915_private *dev_priv = dev->dev_private;
819 struct i915_gtt *gtt = &dev_priv->gtt;
820 unsigned long gtt_size;
821 int ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800822
Ben Widawskybaa09f52013-01-24 13:49:57 -0800823 if (INTEL_INFO(dev)->gen <= 5) {
824 dev_priv->gtt.gtt_probe = i915_gmch_probe;
825 dev_priv->gtt.gtt_remove = i915_gmch_remove;
826 } else {
827 dev_priv->gtt.gtt_probe = gen6_gmch_probe;
828 dev_priv->gtt.gtt_remove = gen6_gmch_remove;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800829 }
830
Ben Widawskybaa09f52013-01-24 13:49:57 -0800831 ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800832 &dev_priv->gtt.stolen_size,
833 &gtt->mappable_base,
834 &gtt->mappable_end);
Ben Widawskya54c0c22013-01-24 14:45:00 -0800835 if (ret)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800836 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800837
Ben Widawskybaa09f52013-01-24 13:49:57 -0800838 gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800839
Ben Widawskybaa09f52013-01-24 13:49:57 -0800840 /* GMADR is the PCI mmio aperture into the global GTT. */
841 DRM_INFO("Memory usable by graphics device = %zdM\n",
842 dev_priv->gtt.total >> 20);
843 DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
844 dev_priv->gtt.mappable_end >> 20);
845 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
846 dev_priv->gtt.stolen_size >> 20);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800847
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800848 return 0;
Daniel Vetter644ec022012-03-26 09:45:40 +0200849}