blob: e0c9bddb7d9238b5f4695b8857685c76452f0f8c [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "i915_drm.h"
28#include "i915_drv.h"
29#include "i915_trace.h"
30#include "intel_drv.h"
31
Daniel Vetter1d2a3142012-02-09 17:15:46 +010032/* PPGTT support for Sandybdrige/Gen6 and later */
33static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
34 unsigned first_entry,
35 unsigned num_entries)
36{
Daniel Vetter1d2a3142012-02-09 17:15:46 +010037 uint32_t *pt_vaddr;
38 uint32_t scratch_pte;
Daniel Vetter7bddb012012-02-09 17:15:47 +010039 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
40 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
41 unsigned last_pte, i;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010042
43 scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
44 scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
45
Daniel Vetter7bddb012012-02-09 17:15:47 +010046 while (num_entries) {
47 last_pte = first_pte + num_entries;
48 if (last_pte > I915_PPGTT_PT_ENTRIES)
49 last_pte = I915_PPGTT_PT_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010050
Daniel Vetter7bddb012012-02-09 17:15:47 +010051 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
52
53 for (i = first_pte; i < last_pte; i++)
54 pt_vaddr[i] = scratch_pte;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010055
56 kunmap_atomic(pt_vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +010057
Daniel Vetter7bddb012012-02-09 17:15:47 +010058 num_entries -= last_pte - first_pte;
59 first_pte = 0;
60 act_pd++;
61 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +010062}
63
64int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
65{
66 struct drm_i915_private *dev_priv = dev->dev_private;
67 struct i915_hw_ppgtt *ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010068 unsigned first_pd_entry_in_global_pt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010069 int i;
70 int ret = -ENOMEM;
71
72 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
73 * entries. For aliasing ppgtt support we just steal them at the end for
74 * now. */
75 first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
76
77 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
78 if (!ppgtt)
79 return ret;
80
81 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
82 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
83 GFP_KERNEL);
84 if (!ppgtt->pt_pages)
85 goto err_ppgtt;
86
87 for (i = 0; i < ppgtt->num_pd_entries; i++) {
88 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
89 if (!ppgtt->pt_pages[i])
90 goto err_pt_alloc;
91 }
92
93 if (dev_priv->mm.gtt->needs_dmar) {
94 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
95 *ppgtt->num_pd_entries,
96 GFP_KERNEL);
97 if (!ppgtt->pt_dma_addr)
98 goto err_pt_alloc;
Daniel Vetter1d2a3142012-02-09 17:15:46 +010099
Daniel Vetter211c5682012-04-10 17:29:17 +0200100 for (i = 0; i < ppgtt->num_pd_entries; i++) {
101 dma_addr_t pt_addr;
102
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100103 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
104 0, 4096,
105 PCI_DMA_BIDIRECTIONAL);
106
107 if (pci_dma_mapping_error(dev->pdev,
108 pt_addr)) {
109 ret = -EIO;
110 goto err_pd_pin;
111
112 }
113 ppgtt->pt_dma_addr[i] = pt_addr;
Daniel Vetter211c5682012-04-10 17:29:17 +0200114 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100115 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100116
117 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
118
119 i915_ppgtt_clear_range(ppgtt, 0,
120 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
121
122 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
123
124 dev_priv->mm.aliasing_ppgtt = ppgtt;
125
126 return 0;
127
128err_pd_pin:
129 if (ppgtt->pt_dma_addr) {
130 for (i--; i >= 0; i--)
131 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
132 4096, PCI_DMA_BIDIRECTIONAL);
133 }
134err_pt_alloc:
135 kfree(ppgtt->pt_dma_addr);
136 for (i = 0; i < ppgtt->num_pd_entries; i++) {
137 if (ppgtt->pt_pages[i])
138 __free_page(ppgtt->pt_pages[i]);
139 }
140 kfree(ppgtt->pt_pages);
141err_ppgtt:
142 kfree(ppgtt);
143
144 return ret;
145}
146
147void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
148{
149 struct drm_i915_private *dev_priv = dev->dev_private;
150 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
151 int i;
152
153 if (!ppgtt)
154 return;
155
156 if (ppgtt->pt_dma_addr) {
157 for (i = 0; i < ppgtt->num_pd_entries; i++)
158 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
159 4096, PCI_DMA_BIDIRECTIONAL);
160 }
161
162 kfree(ppgtt->pt_dma_addr);
163 for (i = 0; i < ppgtt->num_pd_entries; i++)
164 __free_page(ppgtt->pt_pages[i]);
165 kfree(ppgtt->pt_pages);
166 kfree(ppgtt);
167}
168
Daniel Vetter7bddb012012-02-09 17:15:47 +0100169static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
Chris Wilson9da3da62012-06-01 15:20:22 +0100170 const struct sg_table *pages,
Daniel Vetter7bddb012012-02-09 17:15:47 +0100171 unsigned first_entry,
172 uint32_t pte_flags)
173{
174 uint32_t *pt_vaddr, pte;
175 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
176 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
177 unsigned i, j, m, segment_len;
178 dma_addr_t page_addr;
179 struct scatterlist *sg;
180
181 /* init sg walking */
Chris Wilson9da3da62012-06-01 15:20:22 +0100182 sg = pages->sgl;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100183 i = 0;
184 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
185 m = 0;
186
Chris Wilson9da3da62012-06-01 15:20:22 +0100187 while (i < pages->nents) {
Daniel Vetter7bddb012012-02-09 17:15:47 +0100188 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
189
190 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
191 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
192 pte = GEN6_PTE_ADDR_ENCODE(page_addr);
193 pt_vaddr[j] = pte | pte_flags;
194
195 /* grab the next page */
Chris Wilson9da3da62012-06-01 15:20:22 +0100196 if (++m == segment_len) {
197 if (++i == pages->nents)
Daniel Vetter7bddb012012-02-09 17:15:47 +0100198 break;
199
Chris Wilson9da3da62012-06-01 15:20:22 +0100200 sg = sg_next(sg);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100201 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
202 m = 0;
203 }
204 }
205
206 kunmap_atomic(pt_vaddr);
207
208 first_pte = 0;
209 act_pd++;
210 }
211}
212
Daniel Vetter7bddb012012-02-09 17:15:47 +0100213void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
214 struct drm_i915_gem_object *obj,
215 enum i915_cache_level cache_level)
216{
Daniel Vetter7bddb012012-02-09 17:15:47 +0100217 uint32_t pte_flags = GEN6_PTE_VALID;
218
219 switch (cache_level) {
220 case I915_CACHE_LLC_MLC:
221 pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
222 break;
223 case I915_CACHE_LLC:
224 pte_flags |= GEN6_PTE_CACHE_LLC;
225 break;
226 case I915_CACHE_NONE:
Chris Wilson9da3da62012-06-01 15:20:22 +0100227 if (IS_HASWELL(obj->base.dev))
Daniel Vettera843af12012-08-14 11:42:14 -0300228 pte_flags |= HSW_PTE_UNCACHED;
229 else
230 pte_flags |= GEN6_PTE_UNCACHED;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100231 break;
232 default:
233 BUG();
234 }
235
Chris Wilson9da3da62012-06-01 15:20:22 +0100236 i915_ppgtt_insert_sg_entries(ppgtt,
237 obj->sg_table ?: obj->pages,
238 obj->gtt_space->start >> PAGE_SHIFT,
239 pte_flags);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100240}
241
242void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
243 struct drm_i915_gem_object *obj)
244{
245 i915_ppgtt_clear_range(ppgtt,
246 obj->gtt_space->start >> PAGE_SHIFT,
247 obj->base.size >> PAGE_SHIFT);
248}
249
Chris Wilson93dfb402011-03-29 16:59:50 -0700250/* XXX kill agp_type! */
251static unsigned int cache_level_to_agp_type(struct drm_device *dev,
252 enum i915_cache_level cache_level)
253{
254 switch (cache_level) {
255 case I915_CACHE_LLC_MLC:
256 if (INTEL_INFO(dev)->gen >= 6)
257 return AGP_USER_CACHED_MEMORY_LLC_MLC;
258 /* Older chipsets do not have this extra level of CPU
259 * cacheing, so fallthrough and request the PTE simply
260 * as cached.
261 */
262 case I915_CACHE_LLC:
263 return AGP_USER_CACHED_MEMORY;
264 default:
265 case I915_CACHE_NONE:
266 return AGP_USER_MEMORY;
267 }
268}
269
Ben Widawsky5c042282011-10-17 15:51:55 -0700270static bool do_idling(struct drm_i915_private *dev_priv)
271{
272 bool ret = dev_priv->mm.interruptible;
273
274 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
275 dev_priv->mm.interruptible = false;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700276 if (i915_gpu_idle(dev_priv->dev)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700277 DRM_ERROR("Couldn't idle GPU\n");
278 /* Wait a bit, in hopes it avoids the hang */
279 udelay(10);
280 }
281 }
282
283 return ret;
284}
285
286static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
287{
288 if (unlikely(dev_priv->mm.gtt->do_idle_maps))
289 dev_priv->mm.interruptible = interruptible;
290}
291
Daniel Vetter76aaf222010-11-05 22:23:30 +0100292void i915_gem_restore_gtt_mappings(struct drm_device *dev)
293{
294 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000295 struct drm_i915_gem_object *obj;
Daniel Vetter76aaf222010-11-05 22:23:30 +0100296
Chris Wilsonbee4a182011-01-21 10:54:32 +0000297 /* First fill our portion of the GTT with scratch pages */
298 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
299 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
300
Chris Wilson6c085a72012-08-20 11:40:46 +0200301 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
Chris Wilsona8e93122010-12-08 14:28:54 +0000302 i915_gem_clflush_object(obj);
Daniel Vetter74163902012-02-15 23:50:21 +0100303 i915_gem_gtt_bind_object(obj, obj->cache_level);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100304 }
305
Daniel Vetter76aaf222010-11-05 22:23:30 +0100306 intel_gtt_chipset_flush();
307}
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100308
Daniel Vetter74163902012-02-15 23:50:21 +0100309int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100310{
Chris Wilson9da3da62012-06-01 15:20:22 +0100311 if (obj->has_dma_mapping)
Daniel Vetter74163902012-02-15 23:50:21 +0100312 return 0;
Chris Wilson9da3da62012-06-01 15:20:22 +0100313
314 if (!dma_map_sg(&obj->base.dev->pdev->dev,
315 obj->pages->sgl, obj->pages->nents,
316 PCI_DMA_BIDIRECTIONAL))
317 return -ENOSPC;
318
319 return 0;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100320}
321
Daniel Vetter74163902012-02-15 23:50:21 +0100322void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
323 enum i915_cache_level cache_level)
Chris Wilsond5bd1442011-04-14 06:48:26 +0100324{
325 struct drm_device *dev = obj->base.dev;
Chris Wilsond5bd1442011-04-14 06:48:26 +0100326 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
327
Chris Wilson9da3da62012-06-01 15:20:22 +0100328 intel_gtt_insert_sg_entries(obj->sg_table ?: obj->pages,
329 obj->gtt_space->start >> PAGE_SHIFT,
330 agp_type);
Daniel Vetter74898d72012-02-15 23:50:22 +0100331 obj->has_global_gtt_mapping = 1;
Chris Wilsond5bd1442011-04-14 06:48:26 +0100332}
333
Chris Wilson05394f32010-11-08 19:18:58 +0000334void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100335{
Daniel Vetter74163902012-02-15 23:50:21 +0100336 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
337 obj->base.size >> PAGE_SHIFT);
Daniel Vetter74898d72012-02-15 23:50:22 +0100338
339 obj->has_global_gtt_mapping = 0;
Daniel Vetter74163902012-02-15 23:50:21 +0100340}
341
342void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
343{
Ben Widawsky5c042282011-10-17 15:51:55 -0700344 struct drm_device *dev = obj->base.dev;
345 struct drm_i915_private *dev_priv = dev->dev_private;
346 bool interruptible;
347
348 interruptible = do_idling(dev_priv);
349
Chris Wilson9da3da62012-06-01 15:20:22 +0100350 if (!obj->has_dma_mapping)
351 dma_unmap_sg(&dev->pdev->dev,
352 obj->pages->sgl, obj->pages->nents,
353 PCI_DMA_BIDIRECTIONAL);
Ben Widawsky5c042282011-10-17 15:51:55 -0700354
355 undo_idling(dev_priv, interruptible);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100356}
Daniel Vetter644ec022012-03-26 09:45:40 +0200357
Chris Wilson42d6ab42012-07-26 11:49:32 +0100358static void i915_gtt_color_adjust(struct drm_mm_node *node,
359 unsigned long color,
360 unsigned long *start,
361 unsigned long *end)
362{
363 if (node->color != color)
364 *start += 4096;
365
366 if (!list_empty(&node->node_list)) {
367 node = list_entry(node->node_list.next,
368 struct drm_mm_node,
369 node_list);
370 if (node->allocated && node->color != color)
371 *end -= 4096;
372 }
373}
374
Daniel Vetter644ec022012-03-26 09:45:40 +0200375void i915_gem_init_global_gtt(struct drm_device *dev,
376 unsigned long start,
377 unsigned long mappable_end,
378 unsigned long end)
379{
380 drm_i915_private_t *dev_priv = dev->dev_private;
381
Daniel Vetterd1dd20a2012-03-26 09:45:42 +0200382 /* Substract the guard page ... */
383 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
Chris Wilson42d6ab42012-07-26 11:49:32 +0100384 if (!HAS_LLC(dev))
385 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
Daniel Vetter644ec022012-03-26 09:45:40 +0200386
387 dev_priv->mm.gtt_start = start;
388 dev_priv->mm.gtt_mappable_end = mappable_end;
389 dev_priv->mm.gtt_end = end;
390 dev_priv->mm.gtt_total = end - start;
391 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
392
Daniel Vetterd1dd20a2012-03-26 09:45:42 +0200393 /* ... but ensure that we clear the entire range. */
Daniel Vetter644ec022012-03-26 09:45:40 +0200394 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
395}