blob: 216e7a19e63d273c49d00afaf04544dbbae870ff [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
David Howells760285e2012-10-02 18:01:07 +010025#include <drm/drmP.h>
26#include <drm/i915_drm.h>
Daniel Vetter76aaf222010-11-05 22:23:30 +010027#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30
Ben Widawsky6670a5a2013-06-27 16:30:04 -070031#define GEN6_PPGTT_PD_ENTRIES 512
32#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
33
Ben Widawsky26b1ff32012-11-04 09:21:31 -080034/* PPGTT stuff */
35#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
36
37#define GEN6_PDE_VALID (1 << 0)
38/* gen6+ has bit 11-4 for physical addr bit 39-32 */
39#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
40
41#define GEN6_PTE_VALID (1 << 0)
42#define GEN6_PTE_UNCACHED (1 << 1)
43#define HSW_PTE_UNCACHED (0)
44#define GEN6_PTE_CACHE_LLC (2 << 1)
45#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
46#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
47
Kenneth Graunke2d04bef2013-04-22 00:53:49 -070048static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
49 dma_addr_t addr,
50 enum i915_cache_level level)
Ben Widawsky54d12522012-09-24 16:44:32 -070051{
Ben Widawskye7c2b582013-04-08 18:43:48 -070052 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky54d12522012-09-24 16:44:32 -070053 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -070054
55 switch (level) {
56 case I915_CACHE_LLC_MLC:
Kenneth Graunke91197082013-04-22 00:53:51 -070057 pte |= GEN6_PTE_CACHE_LLC_MLC;
Ben Widawskye7210c32012-10-19 09:33:22 -070058 break;
59 case I915_CACHE_LLC:
60 pte |= GEN6_PTE_CACHE_LLC;
61 break;
62 case I915_CACHE_NONE:
Kenneth Graunke91197082013-04-22 00:53:51 -070063 pte |= GEN6_PTE_UNCACHED;
Ben Widawskye7210c32012-10-19 09:33:22 -070064 break;
65 default:
66 BUG();
67 }
68
Ben Widawsky54d12522012-09-24 16:44:32 -070069 return pte;
70}
71
Kenneth Graunke93c34e72013-04-22 00:53:50 -070072#define BYT_PTE_WRITEABLE (1 << 1)
73#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
74
75static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
76 dma_addr_t addr,
77 enum i915_cache_level level)
78{
79 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
80 pte |= GEN6_PTE_ADDR_ENCODE(addr);
81
82 /* Mark the page as writeable. Other platforms don't have a
83 * setting for read-only/writable, so this matches that behavior.
84 */
85 pte |= BYT_PTE_WRITEABLE;
86
87 if (level != I915_CACHE_NONE)
88 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
89
90 return pte;
91}
92
Kenneth Graunke91197082013-04-22 00:53:51 -070093static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
94 dma_addr_t addr,
95 enum i915_cache_level level)
96{
97 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
98 pte |= GEN6_PTE_ADDR_ENCODE(addr);
99
100 if (level != I915_CACHE_NONE)
101 pte |= GEN6_PTE_CACHE_LLC;
102
103 return pte;
104}
105
Ben Widawsky3e302542013-04-23 23:15:32 -0700106static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky61973492013-04-08 18:43:54 -0700107{
Ben Widawsky3e302542013-04-23 23:15:32 -0700108 struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
Ben Widawsky61973492013-04-08 18:43:54 -0700109 gen6_gtt_pte_t __iomem *pd_addr;
110 uint32_t pd_entry;
111 int i;
112
Ben Widawsky0a732872013-04-23 23:15:30 -0700113 WARN_ON(ppgtt->pd_offset & 0x3f);
Ben Widawsky61973492013-04-08 18:43:54 -0700114 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
115 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
116 for (i = 0; i < ppgtt->num_pd_entries; i++) {
117 dma_addr_t pt_addr;
118
119 pt_addr = ppgtt->pt_dma_addr[i];
120 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
121 pd_entry |= GEN6_PDE_VALID;
122
123 writel(pd_entry, pd_addr + i);
124 }
125 readl(pd_addr);
Ben Widawsky3e302542013-04-23 23:15:32 -0700126}
127
128static int gen6_ppgtt_enable(struct drm_device *dev)
129{
130 drm_i915_private_t *dev_priv = dev->dev_private;
131 uint32_t pd_offset;
132 struct intel_ring_buffer *ring;
133 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
134 int i;
135
136 BUG_ON(ppgtt->pd_offset & 0x3f);
137
138 gen6_write_pdes(ppgtt);
Ben Widawsky61973492013-04-08 18:43:54 -0700139
140 pd_offset = ppgtt->pd_offset;
141 pd_offset /= 64; /* in cachelines, */
142 pd_offset <<= 16;
143
144 if (INTEL_INFO(dev)->gen == 6) {
145 uint32_t ecochk, gab_ctl, ecobits;
146
147 ecobits = I915_READ(GAC_ECO_BITS);
Ville Syrjälä3b9d7882013-04-04 15:13:40 +0300148 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
149 ECOBITS_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -0700150
151 gab_ctl = I915_READ(GAB_CTL);
152 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
153
154 ecochk = I915_READ(GAM_ECOCHK);
155 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
156 ECOCHK_PPGTT_CACHE64B);
157 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
158 } else if (INTEL_INFO(dev)->gen >= 7) {
Ville Syrjäläa6f429a2013-04-04 15:13:42 +0300159 uint32_t ecochk, ecobits;
Ville Syrjäläa65c2fc2013-04-04 15:13:41 +0300160
161 ecobits = I915_READ(GAC_ECO_BITS);
162 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
163
Ville Syrjäläa6f429a2013-04-04 15:13:42 +0300164 ecochk = I915_READ(GAM_ECOCHK);
165 if (IS_HASWELL(dev)) {
166 ecochk |= ECOCHK_PPGTT_WB_HSW;
167 } else {
168 ecochk |= ECOCHK_PPGTT_LLC_IVB;
169 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
170 }
171 I915_WRITE(GAM_ECOCHK, ecochk);
Ben Widawsky61973492013-04-08 18:43:54 -0700172 /* GFX_MODE is per-ring on gen7+ */
173 }
174
175 for_each_ring(ring, dev_priv, i) {
176 if (INTEL_INFO(dev)->gen >= 7)
177 I915_WRITE(RING_MODE_GEN7(ring),
178 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
179
180 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
181 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
182 }
Ben Widawskyb7c36d22013-04-08 18:43:56 -0700183 return 0;
Ben Widawsky61973492013-04-08 18:43:54 -0700184}
185
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100186/* PPGTT support for Sandybdrige/Gen6 and later */
Daniel Vetterdef886c2013-01-24 14:44:56 -0800187static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100188 unsigned first_entry,
189 unsigned num_entries)
190{
Ben Widawskye7c2b582013-04-08 18:43:48 -0700191 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
Daniel Vettera15326a2013-03-19 23:48:39 +0100192 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100193 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
194 unsigned last_pte, i;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100195
Kenneth Graunke2d04bef2013-04-22 00:53:49 -0700196 scratch_pte = ppgtt->pte_encode(ppgtt->dev,
197 ppgtt->scratch_page_dma_addr,
198 I915_CACHE_LLC);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100199
Daniel Vetter7bddb012012-02-09 17:15:47 +0100200 while (num_entries) {
201 last_pte = first_pte + num_entries;
202 if (last_pte > I915_PPGTT_PT_ENTRIES)
203 last_pte = I915_PPGTT_PT_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100204
Daniel Vettera15326a2013-03-19 23:48:39 +0100205 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100206
207 for (i = first_pte; i < last_pte; i++)
208 pt_vaddr[i] = scratch_pte;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100209
210 kunmap_atomic(pt_vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100211
Daniel Vetter7bddb012012-02-09 17:15:47 +0100212 num_entries -= last_pte - first_pte;
213 first_pte = 0;
Daniel Vettera15326a2013-03-19 23:48:39 +0100214 act_pt++;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100215 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100216}
217
Daniel Vetterdef886c2013-01-24 14:44:56 -0800218static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
219 struct sg_table *pages,
220 unsigned first_entry,
221 enum i915_cache_level cache_level)
222{
Ben Widawskye7c2b582013-04-08 18:43:48 -0700223 gen6_gtt_pte_t *pt_vaddr;
Daniel Vettera15326a2013-03-19 23:48:39 +0100224 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Imre Deak6e995e22013-02-18 19:28:04 +0200225 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
226 struct sg_page_iter sg_iter;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800227
Daniel Vettera15326a2013-03-19 23:48:39 +0100228 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +0200229 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
230 dma_addr_t page_addr;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800231
Imre Deak2db76d72013-03-26 15:14:18 +0200232 page_addr = sg_page_iter_dma_address(&sg_iter);
Kenneth Graunke2d04bef2013-04-22 00:53:49 -0700233 pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
234 cache_level);
Imre Deak6e995e22013-02-18 19:28:04 +0200235 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
236 kunmap_atomic(pt_vaddr);
Daniel Vettera15326a2013-03-19 23:48:39 +0100237 act_pt++;
238 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +0200239 act_pte = 0;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800240
Daniel Vetterdef886c2013-01-24 14:44:56 -0800241 }
Daniel Vetterdef886c2013-01-24 14:44:56 -0800242 }
Imre Deak6e995e22013-02-18 19:28:04 +0200243 kunmap_atomic(pt_vaddr);
Daniel Vetterdef886c2013-01-24 14:44:56 -0800244}
245
Daniel Vetter3440d262013-01-24 13:49:56 -0800246static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100247{
Daniel Vetter3440d262013-01-24 13:49:56 -0800248 int i;
249
250 if (ppgtt->pt_dma_addr) {
251 for (i = 0; i < ppgtt->num_pd_entries; i++)
252 pci_unmap_page(ppgtt->dev->pdev,
253 ppgtt->pt_dma_addr[i],
254 4096, PCI_DMA_BIDIRECTIONAL);
255 }
256
257 kfree(ppgtt->pt_dma_addr);
258 for (i = 0; i < ppgtt->num_pd_entries; i++)
259 __free_page(ppgtt->pt_pages[i]);
260 kfree(ppgtt->pt_pages);
261 kfree(ppgtt);
262}
263
264static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
265{
266 struct drm_device *dev = ppgtt->dev;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100267 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100268 unsigned first_pd_entry_in_global_pt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100269 int i;
270 int ret = -ENOMEM;
271
272 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
273 * entries. For aliasing ppgtt support we just steal them at the end for
274 * now. */
Daniel Vettere1b73cb2013-05-21 09:52:16 +0200275 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100276
Kenneth Graunke91197082013-04-22 00:53:51 -0700277 if (IS_HASWELL(dev)) {
278 ppgtt->pte_encode = hsw_pte_encode;
279 } else if (IS_VALLEYVIEW(dev)) {
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700280 ppgtt->pte_encode = byt_pte_encode;
281 } else {
282 ppgtt->pte_encode = gen6_pte_encode;
283 }
Ben Widawsky6670a5a2013-06-27 16:30:04 -0700284 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
Ben Widawsky61973492013-04-08 18:43:54 -0700285 ppgtt->enable = gen6_ppgtt_enable;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800286 ppgtt->clear_range = gen6_ppgtt_clear_range;
287 ppgtt->insert_entries = gen6_ppgtt_insert_entries;
Daniel Vetter3440d262013-01-24 13:49:56 -0800288 ppgtt->cleanup = gen6_ppgtt_cleanup;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100289 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
290 GFP_KERNEL);
291 if (!ppgtt->pt_pages)
Daniel Vetter3440d262013-01-24 13:49:56 -0800292 return -ENOMEM;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100293
294 for (i = 0; i < ppgtt->num_pd_entries; i++) {
295 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
296 if (!ppgtt->pt_pages[i])
297 goto err_pt_alloc;
298 }
299
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800300 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
301 GFP_KERNEL);
302 if (!ppgtt->pt_dma_addr)
303 goto err_pt_alloc;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100304
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800305 for (i = 0; i < ppgtt->num_pd_entries; i++) {
306 dma_addr_t pt_addr;
Daniel Vetter211c5682012-04-10 17:29:17 +0200307
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800308 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
309 PCI_DMA_BIDIRECTIONAL);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100310
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800311 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
312 ret = -EIO;
313 goto err_pd_pin;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100314
Daniel Vetter211c5682012-04-10 17:29:17 +0200315 }
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800316 ppgtt->pt_dma_addr[i] = pt_addr;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100317 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100318
Daniel Vetterdef886c2013-01-24 14:44:56 -0800319 ppgtt->clear_range(ppgtt, 0,
320 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100321
Ben Widawskye7c2b582013-04-08 18:43:48 -0700322 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100323
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100324 return 0;
325
326err_pd_pin:
327 if (ppgtt->pt_dma_addr) {
328 for (i--; i >= 0; i--)
329 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
330 4096, PCI_DMA_BIDIRECTIONAL);
331 }
332err_pt_alloc:
333 kfree(ppgtt->pt_dma_addr);
334 for (i = 0; i < ppgtt->num_pd_entries; i++) {
335 if (ppgtt->pt_pages[i])
336 __free_page(ppgtt->pt_pages[i]);
337 }
338 kfree(ppgtt->pt_pages);
Daniel Vetter3440d262013-01-24 13:49:56 -0800339
340 return ret;
341}
342
343static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
344{
345 struct drm_i915_private *dev_priv = dev->dev_private;
346 struct i915_hw_ppgtt *ppgtt;
347 int ret;
348
349 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
350 if (!ppgtt)
351 return -ENOMEM;
352
353 ppgtt->dev = dev;
Ben Widawsky1e7d12d2013-04-08 18:43:51 -0700354 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
Daniel Vetter3440d262013-01-24 13:49:56 -0800355
Ben Widawsky3ed124b2013-04-08 18:43:53 -0700356 if (INTEL_INFO(dev)->gen < 8)
357 ret = gen6_ppgtt_init(ppgtt);
358 else
359 BUG();
360
Daniel Vetter3440d262013-01-24 13:49:56 -0800361 if (ret)
362 kfree(ppgtt);
363 else
364 dev_priv->mm.aliasing_ppgtt = ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100365
366 return ret;
367}
368
369void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
370{
371 struct drm_i915_private *dev_priv = dev->dev_private;
372 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100373
374 if (!ppgtt)
375 return;
376
Daniel Vetter3440d262013-01-24 13:49:56 -0800377 ppgtt->cleanup(ppgtt);
Ben Widawsky5963cf02013-04-08 18:43:55 -0700378 dev_priv->mm.aliasing_ppgtt = NULL;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100379}
380
Daniel Vetter7bddb012012-02-09 17:15:47 +0100381void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
382 struct drm_i915_gem_object *obj,
383 enum i915_cache_level cache_level)
384{
Daniel Vetterdef886c2013-01-24 14:44:56 -0800385 ppgtt->insert_entries(ppgtt, obj->pages,
386 obj->gtt_space->start >> PAGE_SHIFT,
387 cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100388}
389
390void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
391 struct drm_i915_gem_object *obj)
392{
Daniel Vetterdef886c2013-01-24 14:44:56 -0800393 ppgtt->clear_range(ppgtt,
394 obj->gtt_space->start >> PAGE_SHIFT,
395 obj->base.size >> PAGE_SHIFT);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100396}
397
Ben Widawskya81cc002013-01-18 12:30:31 -0800398extern int intel_iommu_gfx_mapped;
399/* Certain Gen5 chipsets require require idling the GPU before
400 * unmapping anything from the GTT when VT-d is enabled.
401 */
402static inline bool needs_idle_maps(struct drm_device *dev)
403{
404#ifdef CONFIG_INTEL_IOMMU
405 /* Query intel_iommu to see if we need the workaround. Presumably that
406 * was loaded first.
407 */
408 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
409 return true;
410#endif
411 return false;
412}
413
Ben Widawsky5c042282011-10-17 15:51:55 -0700414static bool do_idling(struct drm_i915_private *dev_priv)
415{
416 bool ret = dev_priv->mm.interruptible;
417
Ben Widawskya81cc002013-01-18 12:30:31 -0800418 if (unlikely(dev_priv->gtt.do_idle_maps)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700419 dev_priv->mm.interruptible = false;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700420 if (i915_gpu_idle(dev_priv->dev)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700421 DRM_ERROR("Couldn't idle GPU\n");
422 /* Wait a bit, in hopes it avoids the hang */
423 udelay(10);
424 }
425 }
426
427 return ret;
428}
429
430static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
431{
Ben Widawskya81cc002013-01-18 12:30:31 -0800432 if (unlikely(dev_priv->gtt.do_idle_maps))
Ben Widawsky5c042282011-10-17 15:51:55 -0700433 dev_priv->mm.interruptible = interruptible;
434}
435
Daniel Vetter76aaf222010-11-05 22:23:30 +0100436void i915_gem_restore_gtt_mappings(struct drm_device *dev)
437{
438 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000439 struct drm_i915_gem_object *obj;
Daniel Vetter76aaf222010-11-05 22:23:30 +0100440
Chris Wilsonbee4a182011-01-21 10:54:32 +0000441 /* First fill our portion of the GTT with scratch pages */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800442 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
443 dev_priv->gtt.total / PAGE_SIZE);
Chris Wilsonbee4a182011-01-21 10:54:32 +0000444
Ben Widawsky35c20a62013-05-31 11:28:48 -0700445 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilsona8e93122010-12-08 14:28:54 +0000446 i915_gem_clflush_object(obj);
Daniel Vetter74163902012-02-15 23:50:21 +0100447 i915_gem_gtt_bind_object(obj, obj->cache_level);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100448 }
449
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800450 i915_gem_chipset_flush(dev);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100451}
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100452
Daniel Vetter74163902012-02-15 23:50:21 +0100453int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100454{
Chris Wilson9da3da62012-06-01 15:20:22 +0100455 if (obj->has_dma_mapping)
Daniel Vetter74163902012-02-15 23:50:21 +0100456 return 0;
Chris Wilson9da3da62012-06-01 15:20:22 +0100457
458 if (!dma_map_sg(&obj->base.dev->pdev->dev,
459 obj->pages->sgl, obj->pages->nents,
460 PCI_DMA_BIDIRECTIONAL))
461 return -ENOSPC;
462
463 return 0;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100464}
465
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800466/*
467 * Binds an object into the global gtt with the specified cache level. The object
468 * will be accessible to the GPU via commands whose operands reference offsets
469 * within the global GTT as well as accessible by the GPU through the GMADR
470 * mapped BAR (dev_priv->mm.gtt->gtt).
471 */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800472static void gen6_ggtt_insert_entries(struct drm_device *dev,
473 struct sg_table *st,
474 unsigned int first_entry,
475 enum i915_cache_level level)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800476{
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800477 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskye7c2b582013-04-08 18:43:48 -0700478 gen6_gtt_pte_t __iomem *gtt_entries =
479 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
Imre Deak6e995e22013-02-18 19:28:04 +0200480 int i = 0;
481 struct sg_page_iter sg_iter;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800482 dma_addr_t addr;
483
Imre Deak6e995e22013-02-18 19:28:04 +0200484 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +0200485 addr = sg_page_iter_dma_address(&sg_iter);
Kenneth Graunke2d04bef2013-04-22 00:53:49 -0700486 iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
487 &gtt_entries[i]);
Imre Deak6e995e22013-02-18 19:28:04 +0200488 i++;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800489 }
490
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800491 /* XXX: This serves as a posting read to make sure that the PTE has
492 * actually been updated. There is some concern that even though
493 * registers and PTEs are within the same BAR that they are potentially
494 * of NUMA access patterns. Therefore, even with the way we assume
495 * hardware should work, we must keep this posting read for paranoia.
496 */
497 if (i != 0)
Daniel Vetter960e3e42013-01-24 14:44:57 -0800498 WARN_ON(readl(&gtt_entries[i-1])
Kenneth Graunke2d04bef2013-04-22 00:53:49 -0700499 != dev_priv->gtt.pte_encode(dev, addr, level));
Ben Widawsky0f9b91c2012-11-04 09:21:30 -0800500
501 /* This next bit makes the above posting read even more important. We
502 * want to flush the TLBs only after we're certain all the PTE updates
503 * have finished.
504 */
505 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
506 POSTING_READ(GFX_FLSH_CNTL_GEN6);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800507}
508
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800509static void gen6_ggtt_clear_range(struct drm_device *dev,
510 unsigned int first_entry,
511 unsigned int num_entries)
512{
513 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskye7c2b582013-04-08 18:43:48 -0700514 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
515 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
Ben Widawskya54c0c22013-01-24 14:45:00 -0800516 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800517 int i;
518
519 if (WARN(num_entries > max_entries,
520 "First entry = %d; Num entries = %d (max=%d)\n",
521 first_entry, num_entries, max_entries))
522 num_entries = max_entries;
523
Kenneth Graunke2d04bef2013-04-22 00:53:49 -0700524 scratch_pte = dev_priv->gtt.pte_encode(dev,
525 dev_priv->gtt.scratch_page_dma,
526 I915_CACHE_LLC);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800527 for (i = 0; i < num_entries; i++)
528 iowrite32(scratch_pte, &gtt_base[i]);
529 readl(gtt_base);
530}
531
532
533static void i915_ggtt_insert_entries(struct drm_device *dev,
534 struct sg_table *st,
535 unsigned int pg_start,
536 enum i915_cache_level cache_level)
537{
538 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
539 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
540
541 intel_gtt_insert_sg_entries(st, pg_start, flags);
542
543}
544
545static void i915_ggtt_clear_range(struct drm_device *dev,
546 unsigned int first_entry,
547 unsigned int num_entries)
548{
549 intel_gtt_clear_range(first_entry, num_entries);
550}
551
552
Daniel Vetter74163902012-02-15 23:50:21 +0100553void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
554 enum i915_cache_level cache_level)
Chris Wilsond5bd1442011-04-14 06:48:26 +0100555{
556 struct drm_device *dev = obj->base.dev;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800557 struct drm_i915_private *dev_priv = dev->dev_private;
558
559 dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
560 obj->gtt_space->start >> PAGE_SHIFT,
561 cache_level);
Chris Wilsond5bd1442011-04-14 06:48:26 +0100562
Daniel Vetter74898d72012-02-15 23:50:22 +0100563 obj->has_global_gtt_mapping = 1;
Chris Wilsond5bd1442011-04-14 06:48:26 +0100564}
565
Chris Wilson05394f32010-11-08 19:18:58 +0000566void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100567{
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800568 struct drm_device *dev = obj->base.dev;
569 struct drm_i915_private *dev_priv = dev->dev_private;
570
571 dev_priv->gtt.gtt_clear_range(obj->base.dev,
572 obj->gtt_space->start >> PAGE_SHIFT,
573 obj->base.size >> PAGE_SHIFT);
Daniel Vetter74898d72012-02-15 23:50:22 +0100574
575 obj->has_global_gtt_mapping = 0;
Daniel Vetter74163902012-02-15 23:50:21 +0100576}
577
578void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
579{
Ben Widawsky5c042282011-10-17 15:51:55 -0700580 struct drm_device *dev = obj->base.dev;
581 struct drm_i915_private *dev_priv = dev->dev_private;
582 bool interruptible;
583
584 interruptible = do_idling(dev_priv);
585
Chris Wilson9da3da62012-06-01 15:20:22 +0100586 if (!obj->has_dma_mapping)
587 dma_unmap_sg(&dev->pdev->dev,
588 obj->pages->sgl, obj->pages->nents,
589 PCI_DMA_BIDIRECTIONAL);
Ben Widawsky5c042282011-10-17 15:51:55 -0700590
591 undo_idling(dev_priv, interruptible);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100592}
Daniel Vetter644ec022012-03-26 09:45:40 +0200593
Chris Wilson42d6ab42012-07-26 11:49:32 +0100594static void i915_gtt_color_adjust(struct drm_mm_node *node,
595 unsigned long color,
596 unsigned long *start,
597 unsigned long *end)
598{
599 if (node->color != color)
600 *start += 4096;
601
602 if (!list_empty(&node->node_list)) {
603 node = list_entry(node->node_list.next,
604 struct drm_mm_node,
605 node_list);
606 if (node->allocated && node->color != color)
607 *end -= 4096;
608 }
609}
Ben Widawskyd7e50082012-12-18 10:31:25 -0800610void i915_gem_setup_global_gtt(struct drm_device *dev,
611 unsigned long start,
612 unsigned long mappable_end,
613 unsigned long end)
Daniel Vetter644ec022012-03-26 09:45:40 +0200614{
Ben Widawskye78891c2013-01-25 16:41:04 -0800615 /* Let GEM Manage all of the aperture.
616 *
617 * However, leave one page at the end still bound to the scratch page.
618 * There are a number of places where the hardware apparently prefetches
619 * past the end of the object, and we've seen multiple hangs with the
620 * GPU head pointer stuck in a batchbuffer bound at the last page of the
621 * aperture. One page should be enough to keep any prefetching inside
622 * of the aperture.
623 */
Daniel Vetter644ec022012-03-26 09:45:40 +0200624 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsoned2f3452012-11-15 11:32:19 +0000625 struct drm_mm_node *entry;
626 struct drm_i915_gem_object *obj;
627 unsigned long hole_start, hole_end;
Daniel Vetter644ec022012-03-26 09:45:40 +0200628
Ben Widawsky35451cb2013-01-17 12:45:13 -0800629 BUG_ON(mappable_end > end);
630
Chris Wilsoned2f3452012-11-15 11:32:19 +0000631 /* Subtract the guard page ... */
Daniel Vetterd1dd20a2012-03-26 09:45:42 +0200632 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
Chris Wilson42d6ab42012-07-26 11:49:32 +0100633 if (!HAS_LLC(dev))
634 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
Daniel Vetter644ec022012-03-26 09:45:40 +0200635
Chris Wilsoned2f3452012-11-15 11:32:19 +0000636 /* Mark any preallocated objects as occupied */
Ben Widawsky35c20a62013-05-31 11:28:48 -0700637 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilsoned2f3452012-11-15 11:32:19 +0000638 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
639 obj->gtt_offset, obj->base.size);
640
641 BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
642 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
643 obj->gtt_offset,
644 obj->base.size,
645 false);
646 obj->has_global_gtt_mapping = 1;
647 }
648
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800649 dev_priv->gtt.start = start;
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800650 dev_priv->gtt.total = end - start;
Daniel Vetter644ec022012-03-26 09:45:40 +0200651
Chris Wilsoned2f3452012-11-15 11:32:19 +0000652 /* Clear any non-preallocated blocks */
653 drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
654 hole_start, hole_end) {
655 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
656 hole_start, hole_end);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800657 dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
658 (hole_end-hole_start) / PAGE_SIZE);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000659 }
660
661 /* And finally clear the reserved guard page */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800662 dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800663}
664
Ben Widawskyd7e50082012-12-18 10:31:25 -0800665static bool
666intel_enable_ppgtt(struct drm_device *dev)
667{
668 if (i915_enable_ppgtt >= 0)
669 return i915_enable_ppgtt;
670
671#ifdef CONFIG_INTEL_IOMMU
672 /* Disable ppgtt on SNB if VT-d is on. */
673 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
674 return false;
675#endif
676
677 return true;
678}
679
680void i915_gem_init_global_gtt(struct drm_device *dev)
681{
682 struct drm_i915_private *dev_priv = dev->dev_private;
683 unsigned long gtt_size, mappable_size;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800684
Ben Widawskya54c0c22013-01-24 14:45:00 -0800685 gtt_size = dev_priv->gtt.total;
Ben Widawsky93d18792013-01-17 12:45:17 -0800686 mappable_size = dev_priv->gtt.mappable_end;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800687
688 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
Ben Widawskye78891c2013-01-25 16:41:04 -0800689 int ret;
Ben Widawsky3eb1c002013-04-08 18:43:52 -0700690
691 if (INTEL_INFO(dev)->gen <= 7) {
692 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
693 * aperture accordingly when using aliasing ppgtt. */
Ben Widawsky6670a5a2013-06-27 16:30:04 -0700694 gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
Ben Widawsky3eb1c002013-04-08 18:43:52 -0700695 }
Ben Widawskyd7e50082012-12-18 10:31:25 -0800696
697 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
698
699 ret = i915_gem_init_aliasing_ppgtt(dev);
Ben Widawskye78891c2013-01-25 16:41:04 -0800700 if (!ret)
Ben Widawskyd7e50082012-12-18 10:31:25 -0800701 return;
Ben Widawskye78891c2013-01-25 16:41:04 -0800702
703 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
704 drm_mm_takedown(&dev_priv->mm.gtt_space);
Ben Widawsky6670a5a2013-06-27 16:30:04 -0700705 gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800706 }
Ben Widawskye78891c2013-01-25 16:41:04 -0800707 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800708}
709
710static int setup_scratch_page(struct drm_device *dev)
711{
712 struct drm_i915_private *dev_priv = dev->dev_private;
713 struct page *page;
714 dma_addr_t dma_addr;
715
716 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
717 if (page == NULL)
718 return -ENOMEM;
719 get_page(page);
720 set_pages_uc(page, 1);
721
722#ifdef CONFIG_INTEL_IOMMU
723 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
724 PCI_DMA_BIDIRECTIONAL);
725 if (pci_dma_mapping_error(dev->pdev, dma_addr))
726 return -EINVAL;
727#else
728 dma_addr = page_to_phys(page);
729#endif
Ben Widawsky9c61a322013-01-18 12:30:32 -0800730 dev_priv->gtt.scratch_page = page;
731 dev_priv->gtt.scratch_page_dma = dma_addr;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800732
733 return 0;
734}
735
736static void teardown_scratch_page(struct drm_device *dev)
737{
738 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky9c61a322013-01-18 12:30:32 -0800739 set_pages_wb(dev_priv->gtt.scratch_page, 1);
740 pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800741 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
Ben Widawsky9c61a322013-01-18 12:30:32 -0800742 put_page(dev_priv->gtt.scratch_page);
743 __free_page(dev_priv->gtt.scratch_page);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800744}
745
746static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
747{
748 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
749 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
750 return snb_gmch_ctl << 20;
751}
752
Ben Widawskybaa09f52013-01-24 13:49:57 -0800753static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800754{
755 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
756 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
757 return snb_gmch_ctl << 25; /* 32 MB units */
758}
759
Ben Widawskybaa09f52013-01-24 13:49:57 -0800760static int gen6_gmch_probe(struct drm_device *dev,
761 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800762 size_t *stolen,
763 phys_addr_t *mappable_base,
764 unsigned long *mappable_end)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800765{
766 struct drm_i915_private *dev_priv = dev->dev_private;
767 phys_addr_t gtt_bus_addr;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800768 unsigned int gtt_size;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800769 u16 snb_gmch_ctl;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800770 int ret;
771
Ben Widawsky41907dd2013-02-08 11:32:47 -0800772 *mappable_base = pci_resource_start(dev->pdev, 2);
773 *mappable_end = pci_resource_len(dev->pdev, 2);
774
Ben Widawskybaa09f52013-01-24 13:49:57 -0800775 /* 64/512MB is the current min/max we actually know of, but this is just
776 * a coarse sanity check.
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800777 */
Ben Widawsky41907dd2013-02-08 11:32:47 -0800778 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
Ben Widawskybaa09f52013-01-24 13:49:57 -0800779 DRM_ERROR("Unknown GMADR size (%lx)\n",
780 dev_priv->gtt.mappable_end);
781 return -ENXIO;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800782 }
783
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800784 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
785 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
Ben Widawskybaa09f52013-01-24 13:49:57 -0800786 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
787 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
788
Ben Widawskyc4ae25e2013-05-01 11:00:34 -0700789 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
Ben Widawskye7c2b582013-04-08 18:43:48 -0700790 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800791
Ben Widawskya93e4162013-04-08 18:43:47 -0700792 /* For Modern GENs the PTEs and register space are split in the BAR */
793 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
794 (pci_resource_len(dev->pdev, 0) / 2);
795
Ben Widawskybaa09f52013-01-24 13:49:57 -0800796 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
797 if (!dev_priv->gtt.gsm) {
798 DRM_ERROR("Failed to map the gtt page table\n");
799 return -ENOMEM;
800 }
801
802 ret = setup_scratch_page(dev);
803 if (ret)
804 DRM_ERROR("Scratch setup failed\n");
805
806 dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
807 dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
808
809 return ret;
810}
811
Changlong Xied93c6232013-01-31 11:32:50 +0800812static void gen6_gmch_remove(struct drm_device *dev)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800813{
814 struct drm_i915_private *dev_priv = dev->dev_private;
815 iounmap(dev_priv->gtt.gsm);
816 teardown_scratch_page(dev_priv->dev);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800817}
818
819static int i915_gmch_probe(struct drm_device *dev,
820 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800821 size_t *stolen,
822 phys_addr_t *mappable_base,
823 unsigned long *mappable_end)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800824{
825 struct drm_i915_private *dev_priv = dev->dev_private;
826 int ret;
827
Ben Widawskybaa09f52013-01-24 13:49:57 -0800828 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
829 if (!ret) {
830 DRM_ERROR("failed to set up gmch\n");
831 return -EIO;
832 }
833
Ben Widawsky41907dd2013-02-08 11:32:47 -0800834 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800835
836 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
837 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
838 dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
839
840 return 0;
841}
842
843static void i915_gmch_remove(struct drm_device *dev)
844{
845 intel_gmch_remove();
846}
847
848int i915_gem_gtt_init(struct drm_device *dev)
849{
850 struct drm_i915_private *dev_priv = dev->dev_private;
851 struct i915_gtt *gtt = &dev_priv->gtt;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800852 int ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800853
Ben Widawskybaa09f52013-01-24 13:49:57 -0800854 if (INTEL_INFO(dev)->gen <= 5) {
855 dev_priv->gtt.gtt_probe = i915_gmch_probe;
856 dev_priv->gtt.gtt_remove = i915_gmch_remove;
857 } else {
858 dev_priv->gtt.gtt_probe = gen6_gmch_probe;
859 dev_priv->gtt.gtt_remove = gen6_gmch_remove;
Kenneth Graunke91197082013-04-22 00:53:51 -0700860 if (IS_HASWELL(dev)) {
861 dev_priv->gtt.pte_encode = hsw_pte_encode;
862 } else if (IS_VALLEYVIEW(dev)) {
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700863 dev_priv->gtt.pte_encode = byt_pte_encode;
864 } else {
865 dev_priv->gtt.pte_encode = gen6_pte_encode;
866 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800867 }
868
Ben Widawskybaa09f52013-01-24 13:49:57 -0800869 ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800870 &dev_priv->gtt.stolen_size,
871 &gtt->mappable_base,
872 &gtt->mappable_end);
Ben Widawskya54c0c22013-01-24 14:45:00 -0800873 if (ret)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800874 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800875
Ben Widawskybaa09f52013-01-24 13:49:57 -0800876 /* GMADR is the PCI mmio aperture into the global GTT. */
877 DRM_INFO("Memory usable by graphics device = %zdM\n",
878 dev_priv->gtt.total >> 20);
879 DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
880 dev_priv->gtt.mappable_end >> 20);
881 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
882 dev_priv->gtt.stolen_size >> 20);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800883
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800884 return 0;
Daniel Vetter644ec022012-03-26 09:45:40 +0200885}