blob: f63dbc7a89ce552f818c1067a5d2f982bad4c5ae [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
David Howells760285e2012-10-02 18:01:07 +010025#include <drm/drmP.h>
26#include <drm/i915_drm.h>
Daniel Vetter76aaf222010-11-05 22:23:30 +010027#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30
Ben Widawskyf61c0602012-10-22 11:44:43 -070031typedef uint32_t gtt_pte_t;
32
Ben Widawsky26b1ff32012-11-04 09:21:31 -080033/* PPGTT stuff */
34#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
35
36#define GEN6_PDE_VALID (1 << 0)
37/* gen6+ has bit 11-4 for physical addr bit 39-32 */
38#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
39
40#define GEN6_PTE_VALID (1 << 0)
41#define GEN6_PTE_UNCACHED (1 << 1)
42#define HSW_PTE_UNCACHED (0)
43#define GEN6_PTE_CACHE_LLC (2 << 1)
44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
46
Ben Widawsky54d12522012-09-24 16:44:32 -070047static inline gtt_pte_t pte_encode(struct drm_device *dev,
48 dma_addr_t addr,
Ben Widawskye7210c32012-10-19 09:33:22 -070049 enum i915_cache_level level)
Ben Widawsky54d12522012-09-24 16:44:32 -070050{
51 gtt_pte_t pte = GEN6_PTE_VALID;
52 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -070053
54 switch (level) {
55 case I915_CACHE_LLC_MLC:
56 /* Haswell doesn't set L3 this way */
57 if (IS_HASWELL(dev))
58 pte |= GEN6_PTE_CACHE_LLC;
59 else
60 pte |= GEN6_PTE_CACHE_LLC_MLC;
61 break;
62 case I915_CACHE_LLC:
63 pte |= GEN6_PTE_CACHE_LLC;
64 break;
65 case I915_CACHE_NONE:
66 if (IS_HASWELL(dev))
67 pte |= HSW_PTE_UNCACHED;
68 else
69 pte |= GEN6_PTE_UNCACHED;
70 break;
71 default:
72 BUG();
73 }
74
Ben Widawsky54d12522012-09-24 16:44:32 -070075
76 return pte;
77}
78
Daniel Vetter1d2a3142012-02-09 17:15:46 +010079/* PPGTT support for Sandybdrige/Gen6 and later */
Daniel Vetterdef886c2013-01-24 14:44:56 -080080static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
Daniel Vetter1d2a3142012-02-09 17:15:46 +010081 unsigned first_entry,
82 unsigned num_entries)
83{
Ben Widawskyf61c0602012-10-22 11:44:43 -070084 gtt_pte_t *pt_vaddr;
85 gtt_pte_t scratch_pte;
Daniel Vetter7bddb012012-02-09 17:15:47 +010086 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
87 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
88 unsigned last_pte, i;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010089
Ben Widawsky54d12522012-09-24 16:44:32 -070090 scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
Ben Widawskye7210c32012-10-19 09:33:22 -070091 I915_CACHE_LLC);
Daniel Vetter1d2a3142012-02-09 17:15:46 +010092
Daniel Vetter7bddb012012-02-09 17:15:47 +010093 while (num_entries) {
94 last_pte = first_pte + num_entries;
95 if (last_pte > I915_PPGTT_PT_ENTRIES)
96 last_pte = I915_PPGTT_PT_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010097
Daniel Vetter7bddb012012-02-09 17:15:47 +010098 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
99
100 for (i = first_pte; i < last_pte; i++)
101 pt_vaddr[i] = scratch_pte;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100102
103 kunmap_atomic(pt_vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100104
Daniel Vetter7bddb012012-02-09 17:15:47 +0100105 num_entries -= last_pte - first_pte;
106 first_pte = 0;
107 act_pd++;
108 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100109}
110
Daniel Vetterdef886c2013-01-24 14:44:56 -0800111static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
112 struct sg_table *pages,
113 unsigned first_entry,
114 enum i915_cache_level cache_level)
115{
116 gtt_pte_t *pt_vaddr;
117 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
118 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
119 unsigned i, j, m, segment_len;
120 dma_addr_t page_addr;
121 struct scatterlist *sg;
122
123 /* init sg walking */
124 sg = pages->sgl;
125 i = 0;
126 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
127 m = 0;
128
129 while (i < pages->nents) {
130 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
131
132 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
133 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
134 pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
135 cache_level);
136
137 /* grab the next page */
138 if (++m == segment_len) {
139 if (++i == pages->nents)
140 break;
141
142 sg = sg_next(sg);
143 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
144 m = 0;
145 }
146 }
147
148 kunmap_atomic(pt_vaddr);
149
150 first_pte = 0;
151 act_pd++;
152 }
153}
154
155static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100156{
157 struct drm_i915_private *dev_priv = dev->dev_private;
158 struct i915_hw_ppgtt *ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100159 unsigned first_pd_entry_in_global_pt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100160 int i;
161 int ret = -ENOMEM;
162
163 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
164 * entries. For aliasing ppgtt support we just steal them at the end for
165 * now. */
Chris Wilson9a0f9382012-08-24 09:12:22 +0100166 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100167
168 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
169 if (!ppgtt)
170 return ret;
171
Ben Widawsky8f2c59f2012-09-24 08:55:51 -0700172 ppgtt->dev = dev;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100173 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800174 ppgtt->clear_range = gen6_ppgtt_clear_range;
175 ppgtt->insert_entries = gen6_ppgtt_insert_entries;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100176 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
177 GFP_KERNEL);
178 if (!ppgtt->pt_pages)
179 goto err_ppgtt;
180
181 for (i = 0; i < ppgtt->num_pd_entries; i++) {
182 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
183 if (!ppgtt->pt_pages[i])
184 goto err_pt_alloc;
185 }
186
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800187 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
188 GFP_KERNEL);
189 if (!ppgtt->pt_dma_addr)
190 goto err_pt_alloc;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100191
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800192 for (i = 0; i < ppgtt->num_pd_entries; i++) {
193 dma_addr_t pt_addr;
Daniel Vetter211c5682012-04-10 17:29:17 +0200194
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800195 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
196 PCI_DMA_BIDIRECTIONAL);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100197
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800198 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
199 ret = -EIO;
200 goto err_pd_pin;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100201
Daniel Vetter211c5682012-04-10 17:29:17 +0200202 }
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800203 ppgtt->pt_dma_addr[i] = pt_addr;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100204 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100205
Ben Widawsky9c61a322013-01-18 12:30:32 -0800206 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100207
Daniel Vetterdef886c2013-01-24 14:44:56 -0800208 ppgtt->clear_range(ppgtt, 0,
209 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100210
Ben Widawskyf61c0602012-10-22 11:44:43 -0700211 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100212
213 dev_priv->mm.aliasing_ppgtt = ppgtt;
214
215 return 0;
216
217err_pd_pin:
218 if (ppgtt->pt_dma_addr) {
219 for (i--; i >= 0; i--)
220 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
221 4096, PCI_DMA_BIDIRECTIONAL);
222 }
223err_pt_alloc:
224 kfree(ppgtt->pt_dma_addr);
225 for (i = 0; i < ppgtt->num_pd_entries; i++) {
226 if (ppgtt->pt_pages[i])
227 __free_page(ppgtt->pt_pages[i]);
228 }
229 kfree(ppgtt->pt_pages);
230err_ppgtt:
231 kfree(ppgtt);
232
233 return ret;
234}
235
236void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
237{
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
240 int i;
241
242 if (!ppgtt)
243 return;
244
245 if (ppgtt->pt_dma_addr) {
246 for (i = 0; i < ppgtt->num_pd_entries; i++)
247 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
248 4096, PCI_DMA_BIDIRECTIONAL);
249 }
250
251 kfree(ppgtt->pt_dma_addr);
252 for (i = 0; i < ppgtt->num_pd_entries; i++)
253 __free_page(ppgtt->pt_pages[i]);
254 kfree(ppgtt->pt_pages);
255 kfree(ppgtt);
256}
257
Daniel Vetter7bddb012012-02-09 17:15:47 +0100258void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
259 struct drm_i915_gem_object *obj,
260 enum i915_cache_level cache_level)
261{
Daniel Vetterdef886c2013-01-24 14:44:56 -0800262 ppgtt->insert_entries(ppgtt, obj->pages,
263 obj->gtt_space->start >> PAGE_SHIFT,
264 cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100265}
266
267void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
268 struct drm_i915_gem_object *obj)
269{
Daniel Vetterdef886c2013-01-24 14:44:56 -0800270 ppgtt->clear_range(ppgtt,
271 obj->gtt_space->start >> PAGE_SHIFT,
272 obj->base.size >> PAGE_SHIFT);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100273}
274
Ben Widawsky26b1ff32012-11-04 09:21:31 -0800275void i915_gem_init_ppgtt(struct drm_device *dev)
276{
277 drm_i915_private_t *dev_priv = dev->dev_private;
278 uint32_t pd_offset;
279 struct intel_ring_buffer *ring;
280 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Ben Widawsky079a43f2012-12-18 10:31:24 -0800281 gtt_pte_t __iomem *pd_addr;
Ben Widawsky26b1ff32012-11-04 09:21:31 -0800282 uint32_t pd_entry;
283 int i;
284
285 if (!dev_priv->mm.aliasing_ppgtt)
286 return;
287
288
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800289 pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
Ben Widawsky26b1ff32012-11-04 09:21:31 -0800290 for (i = 0; i < ppgtt->num_pd_entries; i++) {
291 dma_addr_t pt_addr;
292
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800293 pt_addr = ppgtt->pt_dma_addr[i];
Ben Widawsky26b1ff32012-11-04 09:21:31 -0800294 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
295 pd_entry |= GEN6_PDE_VALID;
296
297 writel(pd_entry, pd_addr + i);
298 }
299 readl(pd_addr);
300
301 pd_offset = ppgtt->pd_offset;
302 pd_offset /= 64; /* in cachelines, */
303 pd_offset <<= 16;
304
305 if (INTEL_INFO(dev)->gen == 6) {
306 uint32_t ecochk, gab_ctl, ecobits;
307
308 ecobits = I915_READ(GAC_ECO_BITS);
309 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
310
311 gab_ctl = I915_READ(GAB_CTL);
312 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
313
314 ecochk = I915_READ(GAM_ECOCHK);
315 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
316 ECOCHK_PPGTT_CACHE64B);
317 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
318 } else if (INTEL_INFO(dev)->gen >= 7) {
319 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
320 /* GFX_MODE is per-ring on gen7+ */
321 }
322
323 for_each_ring(ring, dev_priv, i) {
324 if (INTEL_INFO(dev)->gen >= 7)
325 I915_WRITE(RING_MODE_GEN7(ring),
326 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
327
328 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
329 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
330 }
331}
332
Ben Widawskya81cc002013-01-18 12:30:31 -0800333extern int intel_iommu_gfx_mapped;
334/* Certain Gen5 chipsets require require idling the GPU before
335 * unmapping anything from the GTT when VT-d is enabled.
336 */
337static inline bool needs_idle_maps(struct drm_device *dev)
338{
339#ifdef CONFIG_INTEL_IOMMU
340 /* Query intel_iommu to see if we need the workaround. Presumably that
341 * was loaded first.
342 */
343 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
344 return true;
345#endif
346 return false;
347}
348
Ben Widawsky5c042282011-10-17 15:51:55 -0700349static bool do_idling(struct drm_i915_private *dev_priv)
350{
351 bool ret = dev_priv->mm.interruptible;
352
Ben Widawskya81cc002013-01-18 12:30:31 -0800353 if (unlikely(dev_priv->gtt.do_idle_maps)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700354 dev_priv->mm.interruptible = false;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700355 if (i915_gpu_idle(dev_priv->dev)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700356 DRM_ERROR("Couldn't idle GPU\n");
357 /* Wait a bit, in hopes it avoids the hang */
358 udelay(10);
359 }
360 }
361
362 return ret;
363}
364
365static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
366{
Ben Widawskya81cc002013-01-18 12:30:31 -0800367 if (unlikely(dev_priv->gtt.do_idle_maps))
Ben Widawsky5c042282011-10-17 15:51:55 -0700368 dev_priv->mm.interruptible = interruptible;
369}
370
Daniel Vetter76aaf222010-11-05 22:23:30 +0100371void i915_gem_restore_gtt_mappings(struct drm_device *dev)
372{
373 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000374 struct drm_i915_gem_object *obj;
Daniel Vetter76aaf222010-11-05 22:23:30 +0100375
Chris Wilsonbee4a182011-01-21 10:54:32 +0000376 /* First fill our portion of the GTT with scratch pages */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800377 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
378 dev_priv->gtt.total / PAGE_SIZE);
Chris Wilsonbee4a182011-01-21 10:54:32 +0000379
Chris Wilson6c085a72012-08-20 11:40:46 +0200380 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
Chris Wilsona8e93122010-12-08 14:28:54 +0000381 i915_gem_clflush_object(obj);
Daniel Vetter74163902012-02-15 23:50:21 +0100382 i915_gem_gtt_bind_object(obj, obj->cache_level);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100383 }
384
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800385 i915_gem_chipset_flush(dev);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100386}
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100387
Daniel Vetter74163902012-02-15 23:50:21 +0100388int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100389{
Chris Wilson9da3da62012-06-01 15:20:22 +0100390 if (obj->has_dma_mapping)
Daniel Vetter74163902012-02-15 23:50:21 +0100391 return 0;
Chris Wilson9da3da62012-06-01 15:20:22 +0100392
393 if (!dma_map_sg(&obj->base.dev->pdev->dev,
394 obj->pages->sgl, obj->pages->nents,
395 PCI_DMA_BIDIRECTIONAL))
396 return -ENOSPC;
397
398 return 0;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100399}
400
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800401/*
402 * Binds an object into the global gtt with the specified cache level. The object
403 * will be accessible to the GPU via commands whose operands reference offsets
404 * within the global GTT as well as accessible by the GPU through the GMADR
405 * mapped BAR (dev_priv->mm.gtt->gtt).
406 */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800407static void gen6_ggtt_insert_entries(struct drm_device *dev,
408 struct sg_table *st,
409 unsigned int first_entry,
410 enum i915_cache_level level)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800411{
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800412 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800413 struct scatterlist *sg = st->sgl;
Ben Widawsky1c451402012-12-18 10:31:27 -0800414 gtt_pte_t __iomem *gtt_entries =
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800415 (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800416 int unused, i = 0;
417 unsigned int len, m = 0;
418 dma_addr_t addr;
419
420 for_each_sg(st->sgl, sg, st->nents, unused) {
421 len = sg_dma_len(sg) >> PAGE_SHIFT;
422 for (m = 0; m < len; m++) {
423 addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
Ben Widawskyccdf56c2012-11-06 09:50:16 +0000424 iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800425 i++;
426 }
427 }
428
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800429 /* XXX: This serves as a posting read to make sure that the PTE has
430 * actually been updated. There is some concern that even though
431 * registers and PTEs are within the same BAR that they are potentially
432 * of NUMA access patterns. Therefore, even with the way we assume
433 * hardware should work, we must keep this posting read for paranoia.
434 */
435 if (i != 0)
436 WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
Ben Widawsky0f9b91c2012-11-04 09:21:30 -0800437
438 /* This next bit makes the above posting read even more important. We
439 * want to flush the TLBs only after we're certain all the PTE updates
440 * have finished.
441 */
442 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
443 POSTING_READ(GFX_FLSH_CNTL_GEN6);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800444}
445
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800446static void gen6_ggtt_clear_range(struct drm_device *dev,
447 unsigned int first_entry,
448 unsigned int num_entries)
449{
450 struct drm_i915_private *dev_priv = dev->dev_private;
451 gtt_pte_t scratch_pte;
452 gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
453 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
454 int i;
455
456 if (WARN(num_entries > max_entries,
457 "First entry = %d; Num entries = %d (max=%d)\n",
458 first_entry, num_entries, max_entries))
459 num_entries = max_entries;
460
461 scratch_pte = pte_encode(dev, dev_priv->gtt.scratch_page_dma, I915_CACHE_LLC);
462 for (i = 0; i < num_entries; i++)
463 iowrite32(scratch_pte, &gtt_base[i]);
464 readl(gtt_base);
465}
466
467
468static void i915_ggtt_insert_entries(struct drm_device *dev,
469 struct sg_table *st,
470 unsigned int pg_start,
471 enum i915_cache_level cache_level)
472{
473 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
474 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
475
476 intel_gtt_insert_sg_entries(st, pg_start, flags);
477
478}
479
480static void i915_ggtt_clear_range(struct drm_device *dev,
481 unsigned int first_entry,
482 unsigned int num_entries)
483{
484 intel_gtt_clear_range(first_entry, num_entries);
485}
486
487
Daniel Vetter74163902012-02-15 23:50:21 +0100488void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
489 enum i915_cache_level cache_level)
Chris Wilsond5bd1442011-04-14 06:48:26 +0100490{
491 struct drm_device *dev = obj->base.dev;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800492 struct drm_i915_private *dev_priv = dev->dev_private;
493
494 dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
495 obj->gtt_space->start >> PAGE_SHIFT,
496 cache_level);
Chris Wilsond5bd1442011-04-14 06:48:26 +0100497
Daniel Vetter74898d72012-02-15 23:50:22 +0100498 obj->has_global_gtt_mapping = 1;
Chris Wilsond5bd1442011-04-14 06:48:26 +0100499}
500
Chris Wilson05394f32010-11-08 19:18:58 +0000501void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100502{
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800503 struct drm_device *dev = obj->base.dev;
504 struct drm_i915_private *dev_priv = dev->dev_private;
505
506 dev_priv->gtt.gtt_clear_range(obj->base.dev,
507 obj->gtt_space->start >> PAGE_SHIFT,
508 obj->base.size >> PAGE_SHIFT);
Daniel Vetter74898d72012-02-15 23:50:22 +0100509
510 obj->has_global_gtt_mapping = 0;
Daniel Vetter74163902012-02-15 23:50:21 +0100511}
512
513void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
514{
Ben Widawsky5c042282011-10-17 15:51:55 -0700515 struct drm_device *dev = obj->base.dev;
516 struct drm_i915_private *dev_priv = dev->dev_private;
517 bool interruptible;
518
519 interruptible = do_idling(dev_priv);
520
Chris Wilson9da3da62012-06-01 15:20:22 +0100521 if (!obj->has_dma_mapping)
522 dma_unmap_sg(&dev->pdev->dev,
523 obj->pages->sgl, obj->pages->nents,
524 PCI_DMA_BIDIRECTIONAL);
Ben Widawsky5c042282011-10-17 15:51:55 -0700525
526 undo_idling(dev_priv, interruptible);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100527}
Daniel Vetter644ec022012-03-26 09:45:40 +0200528
Chris Wilson42d6ab42012-07-26 11:49:32 +0100529static void i915_gtt_color_adjust(struct drm_mm_node *node,
530 unsigned long color,
531 unsigned long *start,
532 unsigned long *end)
533{
534 if (node->color != color)
535 *start += 4096;
536
537 if (!list_empty(&node->node_list)) {
538 node = list_entry(node->node_list.next,
539 struct drm_mm_node,
540 node_list);
541 if (node->allocated && node->color != color)
542 *end -= 4096;
543 }
544}
545
Ben Widawskyd7e50082012-12-18 10:31:25 -0800546void i915_gem_setup_global_gtt(struct drm_device *dev,
547 unsigned long start,
548 unsigned long mappable_end,
549 unsigned long end)
Daniel Vetter644ec022012-03-26 09:45:40 +0200550{
551 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsoned2f3452012-11-15 11:32:19 +0000552 struct drm_mm_node *entry;
553 struct drm_i915_gem_object *obj;
554 unsigned long hole_start, hole_end;
Daniel Vetter644ec022012-03-26 09:45:40 +0200555
Ben Widawsky35451cb2013-01-17 12:45:13 -0800556 BUG_ON(mappable_end > end);
557
Chris Wilsoned2f3452012-11-15 11:32:19 +0000558 /* Subtract the guard page ... */
Daniel Vetterd1dd20a2012-03-26 09:45:42 +0200559 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
Chris Wilson42d6ab42012-07-26 11:49:32 +0100560 if (!HAS_LLC(dev))
561 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
Daniel Vetter644ec022012-03-26 09:45:40 +0200562
Chris Wilsoned2f3452012-11-15 11:32:19 +0000563 /* Mark any preallocated objects as occupied */
564 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
565 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
566 obj->gtt_offset, obj->base.size);
567
568 BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
569 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
570 obj->gtt_offset,
571 obj->base.size,
572 false);
573 obj->has_global_gtt_mapping = 1;
574 }
575
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800576 dev_priv->gtt.start = start;
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800577 dev_priv->gtt.total = end - start;
Daniel Vetter644ec022012-03-26 09:45:40 +0200578
Chris Wilsoned2f3452012-11-15 11:32:19 +0000579 /* Clear any non-preallocated blocks */
580 drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
581 hole_start, hole_end) {
582 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
583 hole_start, hole_end);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800584 dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
585 (hole_end-hole_start) / PAGE_SIZE);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000586 }
587
588 /* And finally clear the reserved guard page */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800589 dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800590}
591
Ben Widawskyd7e50082012-12-18 10:31:25 -0800592static bool
593intel_enable_ppgtt(struct drm_device *dev)
594{
595 if (i915_enable_ppgtt >= 0)
596 return i915_enable_ppgtt;
597
598#ifdef CONFIG_INTEL_IOMMU
599 /* Disable ppgtt on SNB if VT-d is on. */
600 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
601 return false;
602#endif
603
604 return true;
605}
606
607void i915_gem_init_global_gtt(struct drm_device *dev)
608{
609 struct drm_i915_private *dev_priv = dev->dev_private;
610 unsigned long gtt_size, mappable_size;
611 int ret;
612
613 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
Ben Widawsky93d18792013-01-17 12:45:17 -0800614 mappable_size = dev_priv->gtt.mappable_end;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800615
616 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
617 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
618 * aperture accordingly when using aliasing ppgtt. */
619 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
620
621 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
622
623 ret = i915_gem_init_aliasing_ppgtt(dev);
624 if (ret) {
625 mutex_unlock(&dev->struct_mutex);
626 return;
627 }
628 } else {
629 /* Let GEM Manage all of the aperture.
630 *
631 * However, leave one page at the end still bound to the scratch
632 * page. There are a number of places where the hardware
633 * apparently prefetches past the end of the object, and we've
634 * seen multiple hangs with the GPU head pointer stuck in a
635 * batchbuffer bound at the last page of the aperture. One page
636 * should be enough to keep any prefetching inside of the
637 * aperture.
638 */
639 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
640 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800641}
642
643static int setup_scratch_page(struct drm_device *dev)
644{
645 struct drm_i915_private *dev_priv = dev->dev_private;
646 struct page *page;
647 dma_addr_t dma_addr;
648
649 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
650 if (page == NULL)
651 return -ENOMEM;
652 get_page(page);
653 set_pages_uc(page, 1);
654
655#ifdef CONFIG_INTEL_IOMMU
656 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
657 PCI_DMA_BIDIRECTIONAL);
658 if (pci_dma_mapping_error(dev->pdev, dma_addr))
659 return -EINVAL;
660#else
661 dma_addr = page_to_phys(page);
662#endif
Ben Widawsky9c61a322013-01-18 12:30:32 -0800663 dev_priv->gtt.scratch_page = page;
664 dev_priv->gtt.scratch_page_dma = dma_addr;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800665
666 return 0;
667}
668
669static void teardown_scratch_page(struct drm_device *dev)
670{
671 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky9c61a322013-01-18 12:30:32 -0800672 set_pages_wb(dev_priv->gtt.scratch_page, 1);
673 pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800674 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
Ben Widawsky9c61a322013-01-18 12:30:32 -0800675 put_page(dev_priv->gtt.scratch_page);
676 __free_page(dev_priv->gtt.scratch_page);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800677}
678
679static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
680{
681 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
682 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
683 return snb_gmch_ctl << 20;
684}
685
686static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
687{
688 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
689 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
690 return snb_gmch_ctl << 25; /* 32 MB units */
691}
692
Ben Widawsky03752f52012-11-04 09:21:28 -0800693static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
694{
695 static const int stolen_decoder[] = {
696 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
697 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
698 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
699 return stolen_decoder[snb_gmch_ctl] << 20;
700}
701
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800702int i915_gem_gtt_init(struct drm_device *dev)
703{
704 struct drm_i915_private *dev_priv = dev->dev_private;
705 phys_addr_t gtt_bus_addr;
706 u16 snb_gmch_ctl;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800707 int ret;
708
Ben Widawskydabb7a92013-01-17 12:45:16 -0800709 dev_priv->gtt.mappable_base = pci_resource_start(dev->pdev, 2);
Ben Widawsky93d18792013-01-17 12:45:17 -0800710 dev_priv->gtt.mappable_end = pci_resource_len(dev->pdev, 2);
Ben Widawskydabb7a92013-01-17 12:45:16 -0800711
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800712 /* On modern platforms we need not worry ourself with the legacy
713 * hostbridge query stuff. Skip it entirely
714 */
715 if (INTEL_INFO(dev)->gen < 6) {
716 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
717 if (!ret) {
718 DRM_ERROR("failed to set up gmch\n");
719 return -EIO;
720 }
721
722 dev_priv->mm.gtt = intel_gtt_get();
723 if (!dev_priv->mm.gtt) {
724 DRM_ERROR("Failed to initialize GTT\n");
725 intel_gmch_remove();
726 return -ENODEV;
727 }
Ben Widawskya81cc002013-01-18 12:30:31 -0800728
729 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev);
730
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800731 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
732 dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
733
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800734 return 0;
735 }
736
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800737 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
738 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
739
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800740 dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
741 if (!dev_priv->mm.gtt)
742 return -ENOMEM;
Zhenyu Wang20652092012-12-13 23:47:47 +0800743
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800744 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
Ben Widawskyb5c62152012-11-19 12:23:44 -0800745 gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800746
747 /* i9xx_setup */
748 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
749 dev_priv->mm.gtt->gtt_total_entries =
750 gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
Ben Widawsky03752f52012-11-04 09:21:28 -0800751 if (INTEL_INFO(dev)->gen < 7)
752 dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
753 else
754 dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800755
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800756 /* 64/512MB is the current min/max we actually know of, but this is just a
757 * coarse sanity check.
758 */
Ben Widawsky93d18792013-01-17 12:45:17 -0800759 if ((dev_priv->gtt.mappable_end < (64<<20) ||
760 (dev_priv->gtt.mappable_end > (512<<20)))) {
761 DRM_ERROR("Unknown GMADR size (%lx)\n",
762 dev_priv->gtt.mappable_end);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800763 ret = -ENXIO;
764 goto err_out;
765 }
766
767 ret = setup_scratch_page(dev);
768 if (ret) {
769 DRM_ERROR("Scratch setup failed\n");
770 goto err_out;
771 }
772
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800773 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr,
774 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
775 if (!dev_priv->gtt.gsm) {
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800776 DRM_ERROR("Failed to map the gtt page table\n");
777 teardown_scratch_page(dev);
778 ret = -ENOMEM;
779 goto err_out;
780 }
781
782 /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
Chris Wilsond640c4b2012-11-11 09:34:45 +0000783 DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
Ben Widawsky93d18792013-01-17 12:45:17 -0800784 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", dev_priv->gtt.mappable_end >> 20);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800785 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
786
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800787 dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
788 dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
789
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800790 return 0;
791
792err_out:
793 kfree(dev_priv->mm.gtt);
794 if (INTEL_INFO(dev)->gen < 6)
795 intel_gmch_remove();
796 return ret;
797}
798
799void i915_gem_gtt_fini(struct drm_device *dev)
800{
801 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800802 iounmap(dev_priv->gtt.gsm);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800803 teardown_scratch_page(dev);
804 if (INTEL_INFO(dev)->gen < 6)
805 intel_gmch_remove();
806 kfree(dev_priv->mm.gtt);
Daniel Vetter644ec022012-03-26 09:45:40 +0200807}