blob: afa56e978e8057333714a79a4ea100ef093d03e9 [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
David Howells760285e2012-10-02 18:01:07 +010025#include <drm/drmP.h>
26#include <drm/i915_drm.h>
Daniel Vetter76aaf222010-11-05 22:23:30 +010027#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30
Ben Widawskyf61c0602012-10-22 11:44:43 -070031typedef uint32_t gtt_pte_t;
32
Ben Widawsky26b1ff32012-11-04 09:21:31 -080033/* PPGTT stuff */
34#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
35
36#define GEN6_PDE_VALID (1 << 0)
37/* gen6+ has bit 11-4 for physical addr bit 39-32 */
38#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
39
40#define GEN6_PTE_VALID (1 << 0)
41#define GEN6_PTE_UNCACHED (1 << 1)
42#define HSW_PTE_UNCACHED (0)
43#define GEN6_PTE_CACHE_LLC (2 << 1)
44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
46
Ben Widawsky54d12522012-09-24 16:44:32 -070047static inline gtt_pte_t pte_encode(struct drm_device *dev,
48 dma_addr_t addr,
Ben Widawskye7210c32012-10-19 09:33:22 -070049 enum i915_cache_level level)
Ben Widawsky54d12522012-09-24 16:44:32 -070050{
51 gtt_pte_t pte = GEN6_PTE_VALID;
52 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -070053
54 switch (level) {
55 case I915_CACHE_LLC_MLC:
56 /* Haswell doesn't set L3 this way */
57 if (IS_HASWELL(dev))
58 pte |= GEN6_PTE_CACHE_LLC;
59 else
60 pte |= GEN6_PTE_CACHE_LLC_MLC;
61 break;
62 case I915_CACHE_LLC:
63 pte |= GEN6_PTE_CACHE_LLC;
64 break;
65 case I915_CACHE_NONE:
66 if (IS_HASWELL(dev))
67 pte |= HSW_PTE_UNCACHED;
68 else
69 pte |= GEN6_PTE_UNCACHED;
70 break;
71 default:
72 BUG();
73 }
74
Ben Widawsky54d12522012-09-24 16:44:32 -070075
76 return pte;
77}
78
Daniel Vetter1d2a3142012-02-09 17:15:46 +010079/* PPGTT support for Sandybdrige/Gen6 and later */
80static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
81 unsigned first_entry,
82 unsigned num_entries)
83{
Ben Widawskyf61c0602012-10-22 11:44:43 -070084 gtt_pte_t *pt_vaddr;
85 gtt_pte_t scratch_pte;
Daniel Vetter7bddb012012-02-09 17:15:47 +010086 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
87 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
88 unsigned last_pte, i;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010089
Ben Widawsky54d12522012-09-24 16:44:32 -070090 scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
Ben Widawskye7210c32012-10-19 09:33:22 -070091 I915_CACHE_LLC);
Daniel Vetter1d2a3142012-02-09 17:15:46 +010092
Daniel Vetter7bddb012012-02-09 17:15:47 +010093 while (num_entries) {
94 last_pte = first_pte + num_entries;
95 if (last_pte > I915_PPGTT_PT_ENTRIES)
96 last_pte = I915_PPGTT_PT_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010097
Daniel Vetter7bddb012012-02-09 17:15:47 +010098 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
99
100 for (i = first_pte; i < last_pte; i++)
101 pt_vaddr[i] = scratch_pte;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100102
103 kunmap_atomic(pt_vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100104
Daniel Vetter7bddb012012-02-09 17:15:47 +0100105 num_entries -= last_pte - first_pte;
106 first_pte = 0;
107 act_pd++;
108 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100109}
110
111int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
112{
113 struct drm_i915_private *dev_priv = dev->dev_private;
114 struct i915_hw_ppgtt *ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100115 unsigned first_pd_entry_in_global_pt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100116 int i;
117 int ret = -ENOMEM;
118
119 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
120 * entries. For aliasing ppgtt support we just steal them at the end for
121 * now. */
Chris Wilson9a0f9382012-08-24 09:12:22 +0100122 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100123
124 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
125 if (!ppgtt)
126 return ret;
127
Ben Widawsky8f2c59f2012-09-24 08:55:51 -0700128 ppgtt->dev = dev;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100129 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
130 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
131 GFP_KERNEL);
132 if (!ppgtt->pt_pages)
133 goto err_ppgtt;
134
135 for (i = 0; i < ppgtt->num_pd_entries; i++) {
136 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
137 if (!ppgtt->pt_pages[i])
138 goto err_pt_alloc;
139 }
140
141 if (dev_priv->mm.gtt->needs_dmar) {
142 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
143 *ppgtt->num_pd_entries,
144 GFP_KERNEL);
145 if (!ppgtt->pt_dma_addr)
146 goto err_pt_alloc;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100147
Daniel Vetter211c5682012-04-10 17:29:17 +0200148 for (i = 0; i < ppgtt->num_pd_entries; i++) {
149 dma_addr_t pt_addr;
150
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100151 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
152 0, 4096,
153 PCI_DMA_BIDIRECTIONAL);
154
155 if (pci_dma_mapping_error(dev->pdev,
156 pt_addr)) {
157 ret = -EIO;
158 goto err_pd_pin;
159
160 }
161 ppgtt->pt_dma_addr[i] = pt_addr;
Daniel Vetter211c5682012-04-10 17:29:17 +0200162 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100163 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100164
165 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
166
167 i915_ppgtt_clear_range(ppgtt, 0,
168 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
169
Ben Widawskyf61c0602012-10-22 11:44:43 -0700170 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100171
172 dev_priv->mm.aliasing_ppgtt = ppgtt;
173
174 return 0;
175
176err_pd_pin:
177 if (ppgtt->pt_dma_addr) {
178 for (i--; i >= 0; i--)
179 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
180 4096, PCI_DMA_BIDIRECTIONAL);
181 }
182err_pt_alloc:
183 kfree(ppgtt->pt_dma_addr);
184 for (i = 0; i < ppgtt->num_pd_entries; i++) {
185 if (ppgtt->pt_pages[i])
186 __free_page(ppgtt->pt_pages[i]);
187 }
188 kfree(ppgtt->pt_pages);
189err_ppgtt:
190 kfree(ppgtt);
191
192 return ret;
193}
194
195void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
196{
197 struct drm_i915_private *dev_priv = dev->dev_private;
198 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
199 int i;
200
201 if (!ppgtt)
202 return;
203
204 if (ppgtt->pt_dma_addr) {
205 for (i = 0; i < ppgtt->num_pd_entries; i++)
206 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
207 4096, PCI_DMA_BIDIRECTIONAL);
208 }
209
210 kfree(ppgtt->pt_dma_addr);
211 for (i = 0; i < ppgtt->num_pd_entries; i++)
212 __free_page(ppgtt->pt_pages[i]);
213 kfree(ppgtt->pt_pages);
214 kfree(ppgtt);
215}
216
Daniel Vetter7bddb012012-02-09 17:15:47 +0100217static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
Chris Wilson9da3da62012-06-01 15:20:22 +0100218 const struct sg_table *pages,
Daniel Vetter7bddb012012-02-09 17:15:47 +0100219 unsigned first_entry,
Ben Widawskye7210c32012-10-19 09:33:22 -0700220 enum i915_cache_level cache_level)
Daniel Vetter7bddb012012-02-09 17:15:47 +0100221{
Ben Widawsky54d12522012-09-24 16:44:32 -0700222 gtt_pte_t *pt_vaddr;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100223 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
224 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
225 unsigned i, j, m, segment_len;
226 dma_addr_t page_addr;
227 struct scatterlist *sg;
228
229 /* init sg walking */
Chris Wilson9da3da62012-06-01 15:20:22 +0100230 sg = pages->sgl;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100231 i = 0;
232 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
233 m = 0;
234
Chris Wilson9da3da62012-06-01 15:20:22 +0100235 while (i < pages->nents) {
Daniel Vetter7bddb012012-02-09 17:15:47 +0100236 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
237
238 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
239 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
Ben Widawsky54d12522012-09-24 16:44:32 -0700240 pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
Ben Widawskye7210c32012-10-19 09:33:22 -0700241 cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100242
243 /* grab the next page */
Chris Wilson9da3da62012-06-01 15:20:22 +0100244 if (++m == segment_len) {
245 if (++i == pages->nents)
Daniel Vetter7bddb012012-02-09 17:15:47 +0100246 break;
247
Chris Wilson9da3da62012-06-01 15:20:22 +0100248 sg = sg_next(sg);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100249 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
250 m = 0;
251 }
252 }
253
254 kunmap_atomic(pt_vaddr);
255
256 first_pte = 0;
257 act_pd++;
258 }
259}
260
Daniel Vetter7bddb012012-02-09 17:15:47 +0100261void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
262 struct drm_i915_gem_object *obj,
263 enum i915_cache_level cache_level)
264{
Chris Wilson9da3da62012-06-01 15:20:22 +0100265 i915_ppgtt_insert_sg_entries(ppgtt,
Chris Wilson2f745ad2012-09-04 21:02:58 +0100266 obj->pages,
Chris Wilson9da3da62012-06-01 15:20:22 +0100267 obj->gtt_space->start >> PAGE_SHIFT,
Ben Widawskye7210c32012-10-19 09:33:22 -0700268 cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100269}
270
271void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
272 struct drm_i915_gem_object *obj)
273{
274 i915_ppgtt_clear_range(ppgtt,
275 obj->gtt_space->start >> PAGE_SHIFT,
276 obj->base.size >> PAGE_SHIFT);
277}
278
Ben Widawsky26b1ff32012-11-04 09:21:31 -0800279void i915_gem_init_ppgtt(struct drm_device *dev)
280{
281 drm_i915_private_t *dev_priv = dev->dev_private;
282 uint32_t pd_offset;
283 struct intel_ring_buffer *ring;
284 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
285 uint32_t __iomem *pd_addr;
286 uint32_t pd_entry;
287 int i;
288
289 if (!dev_priv->mm.aliasing_ppgtt)
290 return;
291
292
293 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
294 for (i = 0; i < ppgtt->num_pd_entries; i++) {
295 dma_addr_t pt_addr;
296
297 if (dev_priv->mm.gtt->needs_dmar)
298 pt_addr = ppgtt->pt_dma_addr[i];
299 else
300 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
301
302 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
303 pd_entry |= GEN6_PDE_VALID;
304
305 writel(pd_entry, pd_addr + i);
306 }
307 readl(pd_addr);
308
309 pd_offset = ppgtt->pd_offset;
310 pd_offset /= 64; /* in cachelines, */
311 pd_offset <<= 16;
312
313 if (INTEL_INFO(dev)->gen == 6) {
314 uint32_t ecochk, gab_ctl, ecobits;
315
316 ecobits = I915_READ(GAC_ECO_BITS);
317 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
318
319 gab_ctl = I915_READ(GAB_CTL);
320 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
321
322 ecochk = I915_READ(GAM_ECOCHK);
323 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
324 ECOCHK_PPGTT_CACHE64B);
325 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
326 } else if (INTEL_INFO(dev)->gen >= 7) {
327 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
328 /* GFX_MODE is per-ring on gen7+ */
329 }
330
331 for_each_ring(ring, dev_priv, i) {
332 if (INTEL_INFO(dev)->gen >= 7)
333 I915_WRITE(RING_MODE_GEN7(ring),
334 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
335
336 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
337 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
338 }
339}
340
Ben Widawsky5c042282011-10-17 15:51:55 -0700341static bool do_idling(struct drm_i915_private *dev_priv)
342{
343 bool ret = dev_priv->mm.interruptible;
344
345 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
346 dev_priv->mm.interruptible = false;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700347 if (i915_gpu_idle(dev_priv->dev)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700348 DRM_ERROR("Couldn't idle GPU\n");
349 /* Wait a bit, in hopes it avoids the hang */
350 udelay(10);
351 }
352 }
353
354 return ret;
355}
356
357static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
358{
359 if (unlikely(dev_priv->mm.gtt->do_idle_maps))
360 dev_priv->mm.interruptible = interruptible;
361}
362
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800363
364static void i915_ggtt_clear_range(struct drm_device *dev,
365 unsigned first_entry,
366 unsigned num_entries)
367{
368 struct drm_i915_private *dev_priv = dev->dev_private;
369 gtt_pte_t scratch_pte;
370 volatile void __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
371 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
372
373 if (INTEL_INFO(dev)->gen < 6) {
374 intel_gtt_clear_range(first_entry, num_entries);
375 return;
376 }
377
378 if (WARN(num_entries > max_entries,
379 "First entry = %d; Num entries = %d (max=%d)\n",
380 first_entry, num_entries, max_entries))
381 num_entries = max_entries;
382
383 scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
384 memset_io(gtt_base, scratch_pte, num_entries * sizeof(scratch_pte));
385 readl(gtt_base);
386}
387
Daniel Vetter76aaf222010-11-05 22:23:30 +0100388void i915_gem_restore_gtt_mappings(struct drm_device *dev)
389{
390 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000391 struct drm_i915_gem_object *obj;
Daniel Vetter76aaf222010-11-05 22:23:30 +0100392
Chris Wilsonbee4a182011-01-21 10:54:32 +0000393 /* First fill our portion of the GTT with scratch pages */
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800394 i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
Chris Wilsonbee4a182011-01-21 10:54:32 +0000395 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
396
Chris Wilson6c085a72012-08-20 11:40:46 +0200397 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
Chris Wilsona8e93122010-12-08 14:28:54 +0000398 i915_gem_clflush_object(obj);
Daniel Vetter74163902012-02-15 23:50:21 +0100399 i915_gem_gtt_bind_object(obj, obj->cache_level);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100400 }
401
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800402 i915_gem_chipset_flush(dev);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100403}
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100404
Daniel Vetter74163902012-02-15 23:50:21 +0100405int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100406{
Chris Wilson9da3da62012-06-01 15:20:22 +0100407 if (obj->has_dma_mapping)
Daniel Vetter74163902012-02-15 23:50:21 +0100408 return 0;
Chris Wilson9da3da62012-06-01 15:20:22 +0100409
410 if (!dma_map_sg(&obj->base.dev->pdev->dev,
411 obj->pages->sgl, obj->pages->nents,
412 PCI_DMA_BIDIRECTIONAL))
413 return -ENOSPC;
414
415 return 0;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100416}
417
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800418/*
419 * Binds an object into the global gtt with the specified cache level. The object
420 * will be accessible to the GPU via commands whose operands reference offsets
421 * within the global GTT as well as accessible by the GPU through the GMADR
422 * mapped BAR (dev_priv->mm.gtt->gtt).
423 */
424static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
425 enum i915_cache_level level)
426{
427 struct drm_device *dev = obj->base.dev;
428 struct drm_i915_private *dev_priv = dev->dev_private;
429 struct sg_table *st = obj->pages;
430 struct scatterlist *sg = st->sgl;
431 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
432 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
433 gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
434 int unused, i = 0;
435 unsigned int len, m = 0;
436 dma_addr_t addr;
437
438 for_each_sg(st->sgl, sg, st->nents, unused) {
439 len = sg_dma_len(sg) >> PAGE_SHIFT;
440 for (m = 0; m < len; m++) {
441 addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
442 gtt_entries[i] = pte_encode(dev, addr, level);
443 i++;
444 }
445 }
446
447 BUG_ON(i > max_entries);
448 BUG_ON(i != obj->base.size / PAGE_SIZE);
449
450 /* XXX: This serves as a posting read to make sure that the PTE has
451 * actually been updated. There is some concern that even though
452 * registers and PTEs are within the same BAR that they are potentially
453 * of NUMA access patterns. Therefore, even with the way we assume
454 * hardware should work, we must keep this posting read for paranoia.
455 */
456 if (i != 0)
457 WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
Ben Widawsky0f9b91c2012-11-04 09:21:30 -0800458
459 /* This next bit makes the above posting read even more important. We
460 * want to flush the TLBs only after we're certain all the PTE updates
461 * have finished.
462 */
463 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
464 POSTING_READ(GFX_FLSH_CNTL_GEN6);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800465}
466
Daniel Vetter74163902012-02-15 23:50:21 +0100467void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
468 enum i915_cache_level cache_level)
Chris Wilsond5bd1442011-04-14 06:48:26 +0100469{
470 struct drm_device *dev = obj->base.dev;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800471 if (INTEL_INFO(dev)->gen < 6) {
472 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
473 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
474 intel_gtt_insert_sg_entries(obj->pages,
475 obj->gtt_space->start >> PAGE_SHIFT,
476 flags);
477 } else {
478 gen6_ggtt_bind_object(obj, cache_level);
479 }
Chris Wilsond5bd1442011-04-14 06:48:26 +0100480
Daniel Vetter74898d72012-02-15 23:50:22 +0100481 obj->has_global_gtt_mapping = 1;
Chris Wilsond5bd1442011-04-14 06:48:26 +0100482}
483
Chris Wilson05394f32010-11-08 19:18:58 +0000484void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100485{
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800486 i915_ggtt_clear_range(obj->base.dev,
487 obj->gtt_space->start >> PAGE_SHIFT,
Daniel Vetter74163902012-02-15 23:50:21 +0100488 obj->base.size >> PAGE_SHIFT);
Daniel Vetter74898d72012-02-15 23:50:22 +0100489
490 obj->has_global_gtt_mapping = 0;
Daniel Vetter74163902012-02-15 23:50:21 +0100491}
492
493void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
494{
Ben Widawsky5c042282011-10-17 15:51:55 -0700495 struct drm_device *dev = obj->base.dev;
496 struct drm_i915_private *dev_priv = dev->dev_private;
497 bool interruptible;
498
499 interruptible = do_idling(dev_priv);
500
Chris Wilson9da3da62012-06-01 15:20:22 +0100501 if (!obj->has_dma_mapping)
502 dma_unmap_sg(&dev->pdev->dev,
503 obj->pages->sgl, obj->pages->nents,
504 PCI_DMA_BIDIRECTIONAL);
Ben Widawsky5c042282011-10-17 15:51:55 -0700505
506 undo_idling(dev_priv, interruptible);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100507}
Daniel Vetter644ec022012-03-26 09:45:40 +0200508
Chris Wilson42d6ab42012-07-26 11:49:32 +0100509static void i915_gtt_color_adjust(struct drm_mm_node *node,
510 unsigned long color,
511 unsigned long *start,
512 unsigned long *end)
513{
514 if (node->color != color)
515 *start += 4096;
516
517 if (!list_empty(&node->node_list)) {
518 node = list_entry(node->node_list.next,
519 struct drm_mm_node,
520 node_list);
521 if (node->allocated && node->color != color)
522 *end -= 4096;
523 }
524}
525
Daniel Vetter644ec022012-03-26 09:45:40 +0200526void i915_gem_init_global_gtt(struct drm_device *dev,
527 unsigned long start,
528 unsigned long mappable_end,
529 unsigned long end)
530{
531 drm_i915_private_t *dev_priv = dev->dev_private;
532
Daniel Vetterd1dd20a2012-03-26 09:45:42 +0200533 /* Substract the guard page ... */
534 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
Chris Wilson42d6ab42012-07-26 11:49:32 +0100535 if (!HAS_LLC(dev))
536 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
Daniel Vetter644ec022012-03-26 09:45:40 +0200537
538 dev_priv->mm.gtt_start = start;
539 dev_priv->mm.gtt_mappable_end = mappable_end;
540 dev_priv->mm.gtt_end = end;
541 dev_priv->mm.gtt_total = end - start;
542 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
543
Daniel Vetterd1dd20a2012-03-26 09:45:42 +0200544 /* ... but ensure that we clear the entire range. */
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800545 i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
546}
547
548static int setup_scratch_page(struct drm_device *dev)
549{
550 struct drm_i915_private *dev_priv = dev->dev_private;
551 struct page *page;
552 dma_addr_t dma_addr;
553
554 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
555 if (page == NULL)
556 return -ENOMEM;
557 get_page(page);
558 set_pages_uc(page, 1);
559
560#ifdef CONFIG_INTEL_IOMMU
561 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
562 PCI_DMA_BIDIRECTIONAL);
563 if (pci_dma_mapping_error(dev->pdev, dma_addr))
564 return -EINVAL;
565#else
566 dma_addr = page_to_phys(page);
567#endif
568 dev_priv->mm.gtt->scratch_page = page;
569 dev_priv->mm.gtt->scratch_page_dma = dma_addr;
570
571 return 0;
572}
573
574static void teardown_scratch_page(struct drm_device *dev)
575{
576 struct drm_i915_private *dev_priv = dev->dev_private;
577 set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
578 pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
579 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
580 put_page(dev_priv->mm.gtt->scratch_page);
581 __free_page(dev_priv->mm.gtt->scratch_page);
582}
583
584static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
585{
586 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
587 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
588 return snb_gmch_ctl << 20;
589}
590
591static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
592{
593 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
594 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
595 return snb_gmch_ctl << 25; /* 32 MB units */
596}
597
Ben Widawsky03752f52012-11-04 09:21:28 -0800598static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
599{
600 static const int stolen_decoder[] = {
601 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
602 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
603 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
604 return stolen_decoder[snb_gmch_ctl] << 20;
605}
606
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800607int i915_gem_gtt_init(struct drm_device *dev)
608{
609 struct drm_i915_private *dev_priv = dev->dev_private;
610 phys_addr_t gtt_bus_addr;
611 u16 snb_gmch_ctl;
612 u32 tmp;
613 int ret;
614
615 /* On modern platforms we need not worry ourself with the legacy
616 * hostbridge query stuff. Skip it entirely
617 */
618 if (INTEL_INFO(dev)->gen < 6) {
619 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
620 if (!ret) {
621 DRM_ERROR("failed to set up gmch\n");
622 return -EIO;
623 }
624
625 dev_priv->mm.gtt = intel_gtt_get();
626 if (!dev_priv->mm.gtt) {
627 DRM_ERROR("Failed to initialize GTT\n");
628 intel_gmch_remove();
629 return -ENODEV;
630 }
631 return 0;
632 }
633
634 dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
635 if (!dev_priv->mm.gtt)
636 return -ENOMEM;
637
638 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
639 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
640
641 pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_0, &tmp);
642 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
643 gtt_bus_addr = (tmp & PCI_BASE_ADDRESS_MEM_MASK) + (2<<20);
644
645 pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_2, &tmp);
646 dev_priv->mm.gtt->gma_bus_addr = tmp & PCI_BASE_ADDRESS_MEM_MASK;
647
648 /* i9xx_setup */
649 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
650 dev_priv->mm.gtt->gtt_total_entries =
651 gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
Ben Widawsky03752f52012-11-04 09:21:28 -0800652 if (INTEL_INFO(dev)->gen < 7)
653 dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
654 else
655 dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800656
657 dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
658 /* 64/512MB is the current min/max we actually know of, but this is just a
659 * coarse sanity check.
660 */
661 if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
662 dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
663 DRM_ERROR("Unknown GMADR entries (%d)\n",
664 dev_priv->mm.gtt->gtt_mappable_entries);
665 ret = -ENXIO;
666 goto err_out;
667 }
668
669 ret = setup_scratch_page(dev);
670 if (ret) {
671 DRM_ERROR("Scratch setup failed\n");
672 goto err_out;
673 }
674
Ben Widawsky0f9b91c2012-11-04 09:21:30 -0800675 dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
676 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800677 if (!dev_priv->mm.gtt->gtt) {
678 DRM_ERROR("Failed to map the gtt page table\n");
679 teardown_scratch_page(dev);
680 ret = -ENOMEM;
681 goto err_out;
682 }
683
684 /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
685 DRM_INFO("Memory Usable by graphics device = %dK\n", dev_priv->mm.gtt->gtt_total_entries >> 10);
686 DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
687 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
688
689 return 0;
690
691err_out:
692 kfree(dev_priv->mm.gtt);
693 if (INTEL_INFO(dev)->gen < 6)
694 intel_gmch_remove();
695 return ret;
696}
697
698void i915_gem_gtt_fini(struct drm_device *dev)
699{
700 struct drm_i915_private *dev_priv = dev->dev_private;
701 iounmap(dev_priv->mm.gtt->gtt);
702 teardown_scratch_page(dev);
703 if (INTEL_INFO(dev)->gen < 6)
704 intel_gmch_remove();
705 kfree(dev_priv->mm.gtt);
Daniel Vetter644ec022012-03-26 09:45:40 +0200706}