blob: a3e509a71655932cca0a2c57e90a4e108fbf4009 [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
David Howells760285e2012-10-02 18:01:07 +010025#include <drm/drmP.h>
26#include <drm/i915_drm.h>
Daniel Vetter76aaf222010-11-05 22:23:30 +010027#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30
Ben Widawskyf61c0602012-10-22 11:44:43 -070031typedef uint32_t gtt_pte_t;
32
Ben Widawsky54d12522012-09-24 16:44:32 -070033static inline gtt_pte_t pte_encode(struct drm_device *dev,
34 dma_addr_t addr,
Ben Widawskye7210c32012-10-19 09:33:22 -070035 enum i915_cache_level level)
Ben Widawsky54d12522012-09-24 16:44:32 -070036{
37 gtt_pte_t pte = GEN6_PTE_VALID;
38 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -070039
40 switch (level) {
41 case I915_CACHE_LLC_MLC:
42 /* Haswell doesn't set L3 this way */
43 if (IS_HASWELL(dev))
44 pte |= GEN6_PTE_CACHE_LLC;
45 else
46 pte |= GEN6_PTE_CACHE_LLC_MLC;
47 break;
48 case I915_CACHE_LLC:
49 pte |= GEN6_PTE_CACHE_LLC;
50 break;
51 case I915_CACHE_NONE:
52 if (IS_HASWELL(dev))
53 pte |= HSW_PTE_UNCACHED;
54 else
55 pte |= GEN6_PTE_UNCACHED;
56 break;
57 default:
58 BUG();
59 }
60
Ben Widawsky54d12522012-09-24 16:44:32 -070061
62 return pte;
63}
64
Daniel Vetter1d2a3142012-02-09 17:15:46 +010065/* PPGTT support for Sandybdrige/Gen6 and later */
66static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
67 unsigned first_entry,
68 unsigned num_entries)
69{
Ben Widawskyf61c0602012-10-22 11:44:43 -070070 gtt_pte_t *pt_vaddr;
71 gtt_pte_t scratch_pte;
Daniel Vetter7bddb012012-02-09 17:15:47 +010072 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
73 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
74 unsigned last_pte, i;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010075
Ben Widawsky54d12522012-09-24 16:44:32 -070076 scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
Ben Widawskye7210c32012-10-19 09:33:22 -070077 I915_CACHE_LLC);
Daniel Vetter1d2a3142012-02-09 17:15:46 +010078
Daniel Vetter7bddb012012-02-09 17:15:47 +010079 while (num_entries) {
80 last_pte = first_pte + num_entries;
81 if (last_pte > I915_PPGTT_PT_ENTRIES)
82 last_pte = I915_PPGTT_PT_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010083
Daniel Vetter7bddb012012-02-09 17:15:47 +010084 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
85
86 for (i = first_pte; i < last_pte; i++)
87 pt_vaddr[i] = scratch_pte;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010088
89 kunmap_atomic(pt_vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +010090
Daniel Vetter7bddb012012-02-09 17:15:47 +010091 num_entries -= last_pte - first_pte;
92 first_pte = 0;
93 act_pd++;
94 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +010095}
96
97int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
98{
99 struct drm_i915_private *dev_priv = dev->dev_private;
100 struct i915_hw_ppgtt *ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100101 unsigned first_pd_entry_in_global_pt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100102 int i;
103 int ret = -ENOMEM;
104
105 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
106 * entries. For aliasing ppgtt support we just steal them at the end for
107 * now. */
Chris Wilson9a0f9382012-08-24 09:12:22 +0100108 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100109
110 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
111 if (!ppgtt)
112 return ret;
113
Ben Widawsky8f2c59f2012-09-24 08:55:51 -0700114 ppgtt->dev = dev;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100115 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
116 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
117 GFP_KERNEL);
118 if (!ppgtt->pt_pages)
119 goto err_ppgtt;
120
121 for (i = 0; i < ppgtt->num_pd_entries; i++) {
122 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
123 if (!ppgtt->pt_pages[i])
124 goto err_pt_alloc;
125 }
126
127 if (dev_priv->mm.gtt->needs_dmar) {
128 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
129 *ppgtt->num_pd_entries,
130 GFP_KERNEL);
131 if (!ppgtt->pt_dma_addr)
132 goto err_pt_alloc;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100133
Daniel Vetter211c5682012-04-10 17:29:17 +0200134 for (i = 0; i < ppgtt->num_pd_entries; i++) {
135 dma_addr_t pt_addr;
136
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100137 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
138 0, 4096,
139 PCI_DMA_BIDIRECTIONAL);
140
141 if (pci_dma_mapping_error(dev->pdev,
142 pt_addr)) {
143 ret = -EIO;
144 goto err_pd_pin;
145
146 }
147 ppgtt->pt_dma_addr[i] = pt_addr;
Daniel Vetter211c5682012-04-10 17:29:17 +0200148 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100149 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100150
151 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
152
153 i915_ppgtt_clear_range(ppgtt, 0,
154 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
155
Ben Widawskyf61c0602012-10-22 11:44:43 -0700156 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100157
158 dev_priv->mm.aliasing_ppgtt = ppgtt;
159
160 return 0;
161
162err_pd_pin:
163 if (ppgtt->pt_dma_addr) {
164 for (i--; i >= 0; i--)
165 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
166 4096, PCI_DMA_BIDIRECTIONAL);
167 }
168err_pt_alloc:
169 kfree(ppgtt->pt_dma_addr);
170 for (i = 0; i < ppgtt->num_pd_entries; i++) {
171 if (ppgtt->pt_pages[i])
172 __free_page(ppgtt->pt_pages[i]);
173 }
174 kfree(ppgtt->pt_pages);
175err_ppgtt:
176 kfree(ppgtt);
177
178 return ret;
179}
180
181void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
182{
183 struct drm_i915_private *dev_priv = dev->dev_private;
184 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
185 int i;
186
187 if (!ppgtt)
188 return;
189
190 if (ppgtt->pt_dma_addr) {
191 for (i = 0; i < ppgtt->num_pd_entries; i++)
192 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
193 4096, PCI_DMA_BIDIRECTIONAL);
194 }
195
196 kfree(ppgtt->pt_dma_addr);
197 for (i = 0; i < ppgtt->num_pd_entries; i++)
198 __free_page(ppgtt->pt_pages[i]);
199 kfree(ppgtt->pt_pages);
200 kfree(ppgtt);
201}
202
Daniel Vetter7bddb012012-02-09 17:15:47 +0100203static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
Chris Wilson9da3da62012-06-01 15:20:22 +0100204 const struct sg_table *pages,
Daniel Vetter7bddb012012-02-09 17:15:47 +0100205 unsigned first_entry,
Ben Widawskye7210c32012-10-19 09:33:22 -0700206 enum i915_cache_level cache_level)
Daniel Vetter7bddb012012-02-09 17:15:47 +0100207{
Ben Widawsky54d12522012-09-24 16:44:32 -0700208 gtt_pte_t *pt_vaddr;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100209 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
210 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
211 unsigned i, j, m, segment_len;
212 dma_addr_t page_addr;
213 struct scatterlist *sg;
214
215 /* init sg walking */
Chris Wilson9da3da62012-06-01 15:20:22 +0100216 sg = pages->sgl;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100217 i = 0;
218 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
219 m = 0;
220
Chris Wilson9da3da62012-06-01 15:20:22 +0100221 while (i < pages->nents) {
Daniel Vetter7bddb012012-02-09 17:15:47 +0100222 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
223
224 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
225 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
Ben Widawsky54d12522012-09-24 16:44:32 -0700226 pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
Ben Widawskye7210c32012-10-19 09:33:22 -0700227 cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100228
229 /* grab the next page */
Chris Wilson9da3da62012-06-01 15:20:22 +0100230 if (++m == segment_len) {
231 if (++i == pages->nents)
Daniel Vetter7bddb012012-02-09 17:15:47 +0100232 break;
233
Chris Wilson9da3da62012-06-01 15:20:22 +0100234 sg = sg_next(sg);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100235 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
236 m = 0;
237 }
238 }
239
240 kunmap_atomic(pt_vaddr);
241
242 first_pte = 0;
243 act_pd++;
244 }
245}
246
Daniel Vetter7bddb012012-02-09 17:15:47 +0100247void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
248 struct drm_i915_gem_object *obj,
249 enum i915_cache_level cache_level)
250{
Chris Wilson9da3da62012-06-01 15:20:22 +0100251 i915_ppgtt_insert_sg_entries(ppgtt,
Chris Wilson2f745ad2012-09-04 21:02:58 +0100252 obj->pages,
Chris Wilson9da3da62012-06-01 15:20:22 +0100253 obj->gtt_space->start >> PAGE_SHIFT,
Ben Widawskye7210c32012-10-19 09:33:22 -0700254 cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100255}
256
257void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
258 struct drm_i915_gem_object *obj)
259{
260 i915_ppgtt_clear_range(ppgtt,
261 obj->gtt_space->start >> PAGE_SHIFT,
262 obj->base.size >> PAGE_SHIFT);
263}
264
Ben Widawsky5c042282011-10-17 15:51:55 -0700265static bool do_idling(struct drm_i915_private *dev_priv)
266{
267 bool ret = dev_priv->mm.interruptible;
268
269 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
270 dev_priv->mm.interruptible = false;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700271 if (i915_gpu_idle(dev_priv->dev)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700272 DRM_ERROR("Couldn't idle GPU\n");
273 /* Wait a bit, in hopes it avoids the hang */
274 udelay(10);
275 }
276 }
277
278 return ret;
279}
280
281static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
282{
283 if (unlikely(dev_priv->mm.gtt->do_idle_maps))
284 dev_priv->mm.interruptible = interruptible;
285}
286
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800287
288static void i915_ggtt_clear_range(struct drm_device *dev,
289 unsigned first_entry,
290 unsigned num_entries)
291{
292 struct drm_i915_private *dev_priv = dev->dev_private;
293 gtt_pte_t scratch_pte;
294 volatile void __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
295 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
296
297 if (INTEL_INFO(dev)->gen < 6) {
298 intel_gtt_clear_range(first_entry, num_entries);
299 return;
300 }
301
302 if (WARN(num_entries > max_entries,
303 "First entry = %d; Num entries = %d (max=%d)\n",
304 first_entry, num_entries, max_entries))
305 num_entries = max_entries;
306
307 scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
308 memset_io(gtt_base, scratch_pte, num_entries * sizeof(scratch_pte));
309 readl(gtt_base);
310}
311
Daniel Vetter76aaf222010-11-05 22:23:30 +0100312void i915_gem_restore_gtt_mappings(struct drm_device *dev)
313{
314 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000315 struct drm_i915_gem_object *obj;
Daniel Vetter76aaf222010-11-05 22:23:30 +0100316
Chris Wilsonbee4a182011-01-21 10:54:32 +0000317 /* First fill our portion of the GTT with scratch pages */
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800318 i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
Chris Wilsonbee4a182011-01-21 10:54:32 +0000319 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
320
Chris Wilson6c085a72012-08-20 11:40:46 +0200321 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
Chris Wilsona8e93122010-12-08 14:28:54 +0000322 i915_gem_clflush_object(obj);
Daniel Vetter74163902012-02-15 23:50:21 +0100323 i915_gem_gtt_bind_object(obj, obj->cache_level);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100324 }
325
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800326 i915_gem_chipset_flush(dev);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100327}
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100328
Daniel Vetter74163902012-02-15 23:50:21 +0100329int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100330{
Chris Wilson9da3da62012-06-01 15:20:22 +0100331 if (obj->has_dma_mapping)
Daniel Vetter74163902012-02-15 23:50:21 +0100332 return 0;
Chris Wilson9da3da62012-06-01 15:20:22 +0100333
334 if (!dma_map_sg(&obj->base.dev->pdev->dev,
335 obj->pages->sgl, obj->pages->nents,
336 PCI_DMA_BIDIRECTIONAL))
337 return -ENOSPC;
338
339 return 0;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100340}
341
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800342/*
343 * Binds an object into the global gtt with the specified cache level. The object
344 * will be accessible to the GPU via commands whose operands reference offsets
345 * within the global GTT as well as accessible by the GPU through the GMADR
346 * mapped BAR (dev_priv->mm.gtt->gtt).
347 */
348static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
349 enum i915_cache_level level)
350{
351 struct drm_device *dev = obj->base.dev;
352 struct drm_i915_private *dev_priv = dev->dev_private;
353 struct sg_table *st = obj->pages;
354 struct scatterlist *sg = st->sgl;
355 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
356 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
357 gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
358 int unused, i = 0;
359 unsigned int len, m = 0;
360 dma_addr_t addr;
361
362 for_each_sg(st->sgl, sg, st->nents, unused) {
363 len = sg_dma_len(sg) >> PAGE_SHIFT;
364 for (m = 0; m < len; m++) {
365 addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
366 gtt_entries[i] = pte_encode(dev, addr, level);
367 i++;
368 }
369 }
370
371 BUG_ON(i > max_entries);
372 BUG_ON(i != obj->base.size / PAGE_SIZE);
373
374 /* XXX: This serves as a posting read to make sure that the PTE has
375 * actually been updated. There is some concern that even though
376 * registers and PTEs are within the same BAR that they are potentially
377 * of NUMA access patterns. Therefore, even with the way we assume
378 * hardware should work, we must keep this posting read for paranoia.
379 */
380 if (i != 0)
381 WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
Ben Widawsky0f9b91c2012-11-04 09:21:30 -0800382
383 /* This next bit makes the above posting read even more important. We
384 * want to flush the TLBs only after we're certain all the PTE updates
385 * have finished.
386 */
387 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
388 POSTING_READ(GFX_FLSH_CNTL_GEN6);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800389}
390
Daniel Vetter74163902012-02-15 23:50:21 +0100391void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
392 enum i915_cache_level cache_level)
Chris Wilsond5bd1442011-04-14 06:48:26 +0100393{
394 struct drm_device *dev = obj->base.dev;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800395 if (INTEL_INFO(dev)->gen < 6) {
396 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
397 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
398 intel_gtt_insert_sg_entries(obj->pages,
399 obj->gtt_space->start >> PAGE_SHIFT,
400 flags);
401 } else {
402 gen6_ggtt_bind_object(obj, cache_level);
403 }
Chris Wilsond5bd1442011-04-14 06:48:26 +0100404
Daniel Vetter74898d72012-02-15 23:50:22 +0100405 obj->has_global_gtt_mapping = 1;
Chris Wilsond5bd1442011-04-14 06:48:26 +0100406}
407
Chris Wilson05394f32010-11-08 19:18:58 +0000408void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100409{
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800410 i915_ggtt_clear_range(obj->base.dev,
411 obj->gtt_space->start >> PAGE_SHIFT,
Daniel Vetter74163902012-02-15 23:50:21 +0100412 obj->base.size >> PAGE_SHIFT);
Daniel Vetter74898d72012-02-15 23:50:22 +0100413
414 obj->has_global_gtt_mapping = 0;
Daniel Vetter74163902012-02-15 23:50:21 +0100415}
416
417void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
418{
Ben Widawsky5c042282011-10-17 15:51:55 -0700419 struct drm_device *dev = obj->base.dev;
420 struct drm_i915_private *dev_priv = dev->dev_private;
421 bool interruptible;
422
423 interruptible = do_idling(dev_priv);
424
Chris Wilson9da3da62012-06-01 15:20:22 +0100425 if (!obj->has_dma_mapping)
426 dma_unmap_sg(&dev->pdev->dev,
427 obj->pages->sgl, obj->pages->nents,
428 PCI_DMA_BIDIRECTIONAL);
Ben Widawsky5c042282011-10-17 15:51:55 -0700429
430 undo_idling(dev_priv, interruptible);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100431}
Daniel Vetter644ec022012-03-26 09:45:40 +0200432
Chris Wilson42d6ab42012-07-26 11:49:32 +0100433static void i915_gtt_color_adjust(struct drm_mm_node *node,
434 unsigned long color,
435 unsigned long *start,
436 unsigned long *end)
437{
438 if (node->color != color)
439 *start += 4096;
440
441 if (!list_empty(&node->node_list)) {
442 node = list_entry(node->node_list.next,
443 struct drm_mm_node,
444 node_list);
445 if (node->allocated && node->color != color)
446 *end -= 4096;
447 }
448}
449
Daniel Vetter644ec022012-03-26 09:45:40 +0200450void i915_gem_init_global_gtt(struct drm_device *dev,
451 unsigned long start,
452 unsigned long mappable_end,
453 unsigned long end)
454{
455 drm_i915_private_t *dev_priv = dev->dev_private;
456
Daniel Vetterd1dd20a2012-03-26 09:45:42 +0200457 /* Substract the guard page ... */
458 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
Chris Wilson42d6ab42012-07-26 11:49:32 +0100459 if (!HAS_LLC(dev))
460 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
Daniel Vetter644ec022012-03-26 09:45:40 +0200461
462 dev_priv->mm.gtt_start = start;
463 dev_priv->mm.gtt_mappable_end = mappable_end;
464 dev_priv->mm.gtt_end = end;
465 dev_priv->mm.gtt_total = end - start;
466 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
467
Daniel Vetterd1dd20a2012-03-26 09:45:42 +0200468 /* ... but ensure that we clear the entire range. */
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800469 i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
470}
471
472static int setup_scratch_page(struct drm_device *dev)
473{
474 struct drm_i915_private *dev_priv = dev->dev_private;
475 struct page *page;
476 dma_addr_t dma_addr;
477
478 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
479 if (page == NULL)
480 return -ENOMEM;
481 get_page(page);
482 set_pages_uc(page, 1);
483
484#ifdef CONFIG_INTEL_IOMMU
485 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
486 PCI_DMA_BIDIRECTIONAL);
487 if (pci_dma_mapping_error(dev->pdev, dma_addr))
488 return -EINVAL;
489#else
490 dma_addr = page_to_phys(page);
491#endif
492 dev_priv->mm.gtt->scratch_page = page;
493 dev_priv->mm.gtt->scratch_page_dma = dma_addr;
494
495 return 0;
496}
497
498static void teardown_scratch_page(struct drm_device *dev)
499{
500 struct drm_i915_private *dev_priv = dev->dev_private;
501 set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
502 pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
503 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
504 put_page(dev_priv->mm.gtt->scratch_page);
505 __free_page(dev_priv->mm.gtt->scratch_page);
506}
507
508static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
509{
510 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
511 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
512 return snb_gmch_ctl << 20;
513}
514
515static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
516{
517 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
518 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
519 return snb_gmch_ctl << 25; /* 32 MB units */
520}
521
Ben Widawsky03752f52012-11-04 09:21:28 -0800522static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
523{
524 static const int stolen_decoder[] = {
525 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
526 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
527 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
528 return stolen_decoder[snb_gmch_ctl] << 20;
529}
530
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800531int i915_gem_gtt_init(struct drm_device *dev)
532{
533 struct drm_i915_private *dev_priv = dev->dev_private;
534 phys_addr_t gtt_bus_addr;
535 u16 snb_gmch_ctl;
536 u32 tmp;
537 int ret;
538
539 /* On modern platforms we need not worry ourself with the legacy
540 * hostbridge query stuff. Skip it entirely
541 */
542 if (INTEL_INFO(dev)->gen < 6) {
543 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
544 if (!ret) {
545 DRM_ERROR("failed to set up gmch\n");
546 return -EIO;
547 }
548
549 dev_priv->mm.gtt = intel_gtt_get();
550 if (!dev_priv->mm.gtt) {
551 DRM_ERROR("Failed to initialize GTT\n");
552 intel_gmch_remove();
553 return -ENODEV;
554 }
555 return 0;
556 }
557
558 dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
559 if (!dev_priv->mm.gtt)
560 return -ENOMEM;
561
562 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
563 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
564
565 pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_0, &tmp);
566 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
567 gtt_bus_addr = (tmp & PCI_BASE_ADDRESS_MEM_MASK) + (2<<20);
568
569 pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_2, &tmp);
570 dev_priv->mm.gtt->gma_bus_addr = tmp & PCI_BASE_ADDRESS_MEM_MASK;
571
572 /* i9xx_setup */
573 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
574 dev_priv->mm.gtt->gtt_total_entries =
575 gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
Ben Widawsky03752f52012-11-04 09:21:28 -0800576 if (INTEL_INFO(dev)->gen < 7)
577 dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
578 else
579 dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800580
581 dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
582 /* 64/512MB is the current min/max we actually know of, but this is just a
583 * coarse sanity check.
584 */
585 if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
586 dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
587 DRM_ERROR("Unknown GMADR entries (%d)\n",
588 dev_priv->mm.gtt->gtt_mappable_entries);
589 ret = -ENXIO;
590 goto err_out;
591 }
592
593 ret = setup_scratch_page(dev);
594 if (ret) {
595 DRM_ERROR("Scratch setup failed\n");
596 goto err_out;
597 }
598
Ben Widawsky0f9b91c2012-11-04 09:21:30 -0800599 dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
600 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800601 if (!dev_priv->mm.gtt->gtt) {
602 DRM_ERROR("Failed to map the gtt page table\n");
603 teardown_scratch_page(dev);
604 ret = -ENOMEM;
605 goto err_out;
606 }
607
608 /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
609 DRM_INFO("Memory Usable by graphics device = %dK\n", dev_priv->mm.gtt->gtt_total_entries >> 10);
610 DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
611 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
612
613 return 0;
614
615err_out:
616 kfree(dev_priv->mm.gtt);
617 if (INTEL_INFO(dev)->gen < 6)
618 intel_gmch_remove();
619 return ret;
620}
621
622void i915_gem_gtt_fini(struct drm_device *dev)
623{
624 struct drm_i915_private *dev_priv = dev->dev_private;
625 iounmap(dev_priv->mm.gtt->gtt);
626 teardown_scratch_page(dev);
627 if (INTEL_INFO(dev)->gen < 6)
628 intel_gmch_remove();
629 kfree(dev_priv->mm.gtt);
Daniel Vetter644ec022012-03-26 09:45:40 +0200630}