blob: 3b639a94dddf72b1fdebcf2f00e46f9d60c17bb8 [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
David Howells760285e2012-10-02 18:01:07 +010025#include <drm/drmP.h>
26#include <drm/i915_drm.h>
Daniel Vetter76aaf222010-11-05 22:23:30 +010027#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30
Ben Widawsky6670a5a2013-06-27 16:30:04 -070031#define GEN6_PPGTT_PD_ENTRIES 512
32#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
33
Ben Widawsky26b1ff32012-11-04 09:21:31 -080034/* PPGTT stuff */
35#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
Ben Widawsky0d8ff152013-07-04 11:02:03 -070036#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
Ben Widawsky26b1ff32012-11-04 09:21:31 -080037
38#define GEN6_PDE_VALID (1 << 0)
39/* gen6+ has bit 11-4 for physical addr bit 39-32 */
40#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
41
42#define GEN6_PTE_VALID (1 << 0)
43#define GEN6_PTE_UNCACHED (1 << 1)
44#define HSW_PTE_UNCACHED (0)
45#define GEN6_PTE_CACHE_LLC (2 << 1)
46#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
47#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
Ben Widawsky0d8ff152013-07-04 11:02:03 -070048#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
49
50/* Cacheability Control is a 4-bit value. The low three bits are stored in *
51 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
52 */
53#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
54 (((bits) & 0x8) << (11 - 3)))
55#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
Ben Widawsky4d15c142013-07-04 11:02:06 -070056#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
Ben Widawsky26b1ff32012-11-04 09:21:31 -080057
Ben Widawsky80a74f72013-06-27 16:30:19 -070058static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr,
Kenneth Graunke2d04bef2013-04-22 00:53:49 -070059 enum i915_cache_level level)
Ben Widawsky54d12522012-09-24 16:44:32 -070060{
Ben Widawskye7c2b582013-04-08 18:43:48 -070061 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky54d12522012-09-24 16:44:32 -070062 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -070063
64 switch (level) {
65 case I915_CACHE_LLC_MLC:
Kenneth Graunke91197082013-04-22 00:53:51 -070066 pte |= GEN6_PTE_CACHE_LLC_MLC;
Ben Widawskye7210c32012-10-19 09:33:22 -070067 break;
68 case I915_CACHE_LLC:
69 pte |= GEN6_PTE_CACHE_LLC;
70 break;
71 case I915_CACHE_NONE:
Kenneth Graunke91197082013-04-22 00:53:51 -070072 pte |= GEN6_PTE_UNCACHED;
Ben Widawskye7210c32012-10-19 09:33:22 -070073 break;
74 default:
75 BUG();
76 }
77
Ben Widawsky54d12522012-09-24 16:44:32 -070078 return pte;
79}
80
Kenneth Graunke93c34e72013-04-22 00:53:50 -070081#define BYT_PTE_WRITEABLE (1 << 1)
82#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
83
Ben Widawsky80a74f72013-06-27 16:30:19 -070084static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
Kenneth Graunke93c34e72013-04-22 00:53:50 -070085 enum i915_cache_level level)
86{
87 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
88 pte |= GEN6_PTE_ADDR_ENCODE(addr);
89
90 /* Mark the page as writeable. Other platforms don't have a
91 * setting for read-only/writable, so this matches that behavior.
92 */
93 pte |= BYT_PTE_WRITEABLE;
94
95 if (level != I915_CACHE_NONE)
96 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
97
98 return pte;
99}
100
Ben Widawsky80a74f72013-06-27 16:30:19 -0700101static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
Kenneth Graunke91197082013-04-22 00:53:51 -0700102 enum i915_cache_level level)
103{
104 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
Ben Widawsky0d8ff152013-07-04 11:02:03 -0700105 pte |= HSW_PTE_ADDR_ENCODE(addr);
Kenneth Graunke91197082013-04-22 00:53:51 -0700106
107 if (level != I915_CACHE_NONE)
Ben Widawsky0d8ff152013-07-04 11:02:03 -0700108 pte |= HSW_WB_LLC_AGE0;
Kenneth Graunke91197082013-04-22 00:53:51 -0700109
110 return pte;
111}
112
Ben Widawsky4d15c142013-07-04 11:02:06 -0700113static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
114 enum i915_cache_level level)
115{
116 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
117 pte |= HSW_PTE_ADDR_ENCODE(addr);
118
119 if (level != I915_CACHE_NONE)
120 pte |= HSW_WB_ELLC_LLC_AGE0;
121
122 return pte;
123}
124
Ben Widawsky3e302542013-04-23 23:15:32 -0700125static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky61973492013-04-08 18:43:54 -0700126{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700127 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
Ben Widawsky61973492013-04-08 18:43:54 -0700128 gen6_gtt_pte_t __iomem *pd_addr;
129 uint32_t pd_entry;
130 int i;
131
Ben Widawsky0a732872013-04-23 23:15:30 -0700132 WARN_ON(ppgtt->pd_offset & 0x3f);
Ben Widawsky61973492013-04-08 18:43:54 -0700133 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
134 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
135 for (i = 0; i < ppgtt->num_pd_entries; i++) {
136 dma_addr_t pt_addr;
137
138 pt_addr = ppgtt->pt_dma_addr[i];
139 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
140 pd_entry |= GEN6_PDE_VALID;
141
142 writel(pd_entry, pd_addr + i);
143 }
144 readl(pd_addr);
Ben Widawsky3e302542013-04-23 23:15:32 -0700145}
146
147static int gen6_ppgtt_enable(struct drm_device *dev)
148{
149 drm_i915_private_t *dev_priv = dev->dev_private;
150 uint32_t pd_offset;
151 struct intel_ring_buffer *ring;
152 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
153 int i;
154
155 BUG_ON(ppgtt->pd_offset & 0x3f);
156
157 gen6_write_pdes(ppgtt);
Ben Widawsky61973492013-04-08 18:43:54 -0700158
159 pd_offset = ppgtt->pd_offset;
160 pd_offset /= 64; /* in cachelines, */
161 pd_offset <<= 16;
162
163 if (INTEL_INFO(dev)->gen == 6) {
164 uint32_t ecochk, gab_ctl, ecobits;
165
166 ecobits = I915_READ(GAC_ECO_BITS);
Ville Syrjälä3b9d7882013-04-04 15:13:40 +0300167 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
168 ECOBITS_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -0700169
170 gab_ctl = I915_READ(GAB_CTL);
171 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
172
173 ecochk = I915_READ(GAM_ECOCHK);
174 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
175 ECOCHK_PPGTT_CACHE64B);
176 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
177 } else if (INTEL_INFO(dev)->gen >= 7) {
Ville Syrjäläa6f429a2013-04-04 15:13:42 +0300178 uint32_t ecochk, ecobits;
Ville Syrjäläa65c2fc2013-04-04 15:13:41 +0300179
180 ecobits = I915_READ(GAC_ECO_BITS);
181 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
182
Ville Syrjäläa6f429a2013-04-04 15:13:42 +0300183 ecochk = I915_READ(GAM_ECOCHK);
184 if (IS_HASWELL(dev)) {
185 ecochk |= ECOCHK_PPGTT_WB_HSW;
186 } else {
187 ecochk |= ECOCHK_PPGTT_LLC_IVB;
188 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
189 }
190 I915_WRITE(GAM_ECOCHK, ecochk);
Ben Widawsky61973492013-04-08 18:43:54 -0700191 /* GFX_MODE is per-ring on gen7+ */
192 }
193
194 for_each_ring(ring, dev_priv, i) {
195 if (INTEL_INFO(dev)->gen >= 7)
196 I915_WRITE(RING_MODE_GEN7(ring),
197 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
198
199 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
200 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
201 }
Ben Widawskyb7c36d22013-04-08 18:43:56 -0700202 return 0;
Ben Widawsky61973492013-04-08 18:43:54 -0700203}
204
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100205/* PPGTT support for Sandybdrige/Gen6 and later */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700206static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100207 unsigned first_entry,
208 unsigned num_entries)
209{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700210 struct i915_hw_ppgtt *ppgtt =
211 container_of(vm, struct i915_hw_ppgtt, base);
Ben Widawskye7c2b582013-04-08 18:43:48 -0700212 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
Daniel Vettera15326a2013-03-19 23:48:39 +0100213 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100214 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
215 unsigned last_pte, i;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100216
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700217 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100218
Daniel Vetter7bddb012012-02-09 17:15:47 +0100219 while (num_entries) {
220 last_pte = first_pte + num_entries;
221 if (last_pte > I915_PPGTT_PT_ENTRIES)
222 last_pte = I915_PPGTT_PT_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100223
Daniel Vettera15326a2013-03-19 23:48:39 +0100224 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100225
226 for (i = first_pte; i < last_pte; i++)
227 pt_vaddr[i] = scratch_pte;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100228
229 kunmap_atomic(pt_vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100230
Daniel Vetter7bddb012012-02-09 17:15:47 +0100231 num_entries -= last_pte - first_pte;
232 first_pte = 0;
Daniel Vettera15326a2013-03-19 23:48:39 +0100233 act_pt++;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100234 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100235}
236
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700237static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
Daniel Vetterdef886c2013-01-24 14:44:56 -0800238 struct sg_table *pages,
239 unsigned first_entry,
240 enum i915_cache_level cache_level)
241{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700242 struct i915_hw_ppgtt *ppgtt =
243 container_of(vm, struct i915_hw_ppgtt, base);
Ben Widawskye7c2b582013-04-08 18:43:48 -0700244 gen6_gtt_pte_t *pt_vaddr;
Daniel Vettera15326a2013-03-19 23:48:39 +0100245 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Imre Deak6e995e22013-02-18 19:28:04 +0200246 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
247 struct sg_page_iter sg_iter;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800248
Daniel Vettera15326a2013-03-19 23:48:39 +0100249 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +0200250 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
251 dma_addr_t page_addr;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800252
Imre Deak2db76d72013-03-26 15:14:18 +0200253 page_addr = sg_page_iter_dma_address(&sg_iter);
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700254 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
Imre Deak6e995e22013-02-18 19:28:04 +0200255 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
256 kunmap_atomic(pt_vaddr);
Daniel Vettera15326a2013-03-19 23:48:39 +0100257 act_pt++;
258 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +0200259 act_pte = 0;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800260
Daniel Vetterdef886c2013-01-24 14:44:56 -0800261 }
Daniel Vetterdef886c2013-01-24 14:44:56 -0800262 }
Imre Deak6e995e22013-02-18 19:28:04 +0200263 kunmap_atomic(pt_vaddr);
Daniel Vetterdef886c2013-01-24 14:44:56 -0800264}
265
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700266static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100267{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700268 struct i915_hw_ppgtt *ppgtt =
269 container_of(vm, struct i915_hw_ppgtt, base);
Daniel Vetter3440d262013-01-24 13:49:56 -0800270 int i;
271
Ben Widawsky93bd8642013-07-16 16:50:06 -0700272 drm_mm_takedown(&ppgtt->base.mm);
273
Daniel Vetter3440d262013-01-24 13:49:56 -0800274 if (ppgtt->pt_dma_addr) {
275 for (i = 0; i < ppgtt->num_pd_entries; i++)
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700276 pci_unmap_page(ppgtt->base.dev->pdev,
Daniel Vetter3440d262013-01-24 13:49:56 -0800277 ppgtt->pt_dma_addr[i],
278 4096, PCI_DMA_BIDIRECTIONAL);
279 }
280
281 kfree(ppgtt->pt_dma_addr);
282 for (i = 0; i < ppgtt->num_pd_entries; i++)
283 __free_page(ppgtt->pt_pages[i]);
284 kfree(ppgtt->pt_pages);
285 kfree(ppgtt);
286}
287
288static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
289{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700290 struct drm_device *dev = ppgtt->base.dev;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100291 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100292 unsigned first_pd_entry_in_global_pt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100293 int i;
294 int ret = -ENOMEM;
295
296 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
297 * entries. For aliasing ppgtt support we just steal them at the end for
298 * now. */
Daniel Vettere1b73cb2013-05-21 09:52:16 +0200299 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100300
Kenneth Graunke91197082013-04-22 00:53:51 -0700301 if (IS_HASWELL(dev)) {
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700302 ppgtt->base.pte_encode = hsw_pte_encode;
Kenneth Graunke91197082013-04-22 00:53:51 -0700303 } else if (IS_VALLEYVIEW(dev)) {
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700304 ppgtt->base.pte_encode = byt_pte_encode;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700305 } else {
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700306 ppgtt->base.pte_encode = gen6_pte_encode;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700307 }
Ben Widawsky6670a5a2013-06-27 16:30:04 -0700308 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
Ben Widawsky61973492013-04-08 18:43:54 -0700309 ppgtt->enable = gen6_ppgtt_enable;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700310 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
311 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
312 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
313 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100314 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
315 GFP_KERNEL);
316 if (!ppgtt->pt_pages)
Daniel Vetter3440d262013-01-24 13:49:56 -0800317 return -ENOMEM;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100318
319 for (i = 0; i < ppgtt->num_pd_entries; i++) {
320 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
321 if (!ppgtt->pt_pages[i])
322 goto err_pt_alloc;
323 }
324
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800325 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
326 GFP_KERNEL);
327 if (!ppgtt->pt_dma_addr)
328 goto err_pt_alloc;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100329
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800330 for (i = 0; i < ppgtt->num_pd_entries; i++) {
331 dma_addr_t pt_addr;
Daniel Vetter211c5682012-04-10 17:29:17 +0200332
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800333 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
334 PCI_DMA_BIDIRECTIONAL);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100335
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800336 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
337 ret = -EIO;
338 goto err_pd_pin;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100339
Daniel Vetter211c5682012-04-10 17:29:17 +0200340 }
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800341 ppgtt->pt_dma_addr[i] = pt_addr;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100342 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100343
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700344 ppgtt->base.clear_range(&ppgtt->base, 0,
345 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100346
Ben Widawskye7c2b582013-04-08 18:43:48 -0700347 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100348
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100349 return 0;
350
351err_pd_pin:
352 if (ppgtt->pt_dma_addr) {
353 for (i--; i >= 0; i--)
354 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
355 4096, PCI_DMA_BIDIRECTIONAL);
356 }
357err_pt_alloc:
358 kfree(ppgtt->pt_dma_addr);
359 for (i = 0; i < ppgtt->num_pd_entries; i++) {
360 if (ppgtt->pt_pages[i])
361 __free_page(ppgtt->pt_pages[i]);
362 }
363 kfree(ppgtt->pt_pages);
Daniel Vetter3440d262013-01-24 13:49:56 -0800364
365 return ret;
366}
367
368static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
369{
370 struct drm_i915_private *dev_priv = dev->dev_private;
371 struct i915_hw_ppgtt *ppgtt;
372 int ret;
373
374 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
375 if (!ppgtt)
376 return -ENOMEM;
377
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700378 ppgtt->base.dev = dev;
Daniel Vetter3440d262013-01-24 13:49:56 -0800379
Ben Widawsky3ed124b2013-04-08 18:43:53 -0700380 if (INTEL_INFO(dev)->gen < 8)
381 ret = gen6_ppgtt_init(ppgtt);
382 else
383 BUG();
384
Daniel Vetter3440d262013-01-24 13:49:56 -0800385 if (ret)
386 kfree(ppgtt);
Ben Widawsky93bd8642013-07-16 16:50:06 -0700387 else {
Daniel Vetter3440d262013-01-24 13:49:56 -0800388 dev_priv->mm.aliasing_ppgtt = ppgtt;
Ben Widawsky93bd8642013-07-16 16:50:06 -0700389 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
390 ppgtt->base.total);
391 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100392
393 return ret;
394}
395
396void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
397{
398 struct drm_i915_private *dev_priv = dev->dev_private;
399 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100400
401 if (!ppgtt)
402 return;
403
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700404 ppgtt->base.cleanup(&ppgtt->base);
Ben Widawsky5963cf02013-04-08 18:43:55 -0700405 dev_priv->mm.aliasing_ppgtt = NULL;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100406}
407
Daniel Vetter7bddb012012-02-09 17:15:47 +0100408void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
409 struct drm_i915_gem_object *obj,
410 enum i915_cache_level cache_level)
411{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700412 ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
413 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
414 cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100415}
416
417void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
418 struct drm_i915_gem_object *obj)
419{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700420 ppgtt->base.clear_range(&ppgtt->base,
421 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
422 obj->base.size >> PAGE_SHIFT);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100423}
424
Ben Widawskya81cc002013-01-18 12:30:31 -0800425extern int intel_iommu_gfx_mapped;
426/* Certain Gen5 chipsets require require idling the GPU before
427 * unmapping anything from the GTT when VT-d is enabled.
428 */
429static inline bool needs_idle_maps(struct drm_device *dev)
430{
431#ifdef CONFIG_INTEL_IOMMU
432 /* Query intel_iommu to see if we need the workaround. Presumably that
433 * was loaded first.
434 */
435 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
436 return true;
437#endif
438 return false;
439}
440
Ben Widawsky5c042282011-10-17 15:51:55 -0700441static bool do_idling(struct drm_i915_private *dev_priv)
442{
443 bool ret = dev_priv->mm.interruptible;
444
Ben Widawskya81cc002013-01-18 12:30:31 -0800445 if (unlikely(dev_priv->gtt.do_idle_maps)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700446 dev_priv->mm.interruptible = false;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700447 if (i915_gpu_idle(dev_priv->dev)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700448 DRM_ERROR("Couldn't idle GPU\n");
449 /* Wait a bit, in hopes it avoids the hang */
450 udelay(10);
451 }
452 }
453
454 return ret;
455}
456
457static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
458{
Ben Widawskya81cc002013-01-18 12:30:31 -0800459 if (unlikely(dev_priv->gtt.do_idle_maps))
Ben Widawsky5c042282011-10-17 15:51:55 -0700460 dev_priv->mm.interruptible = interruptible;
461}
462
Daniel Vetter76aaf222010-11-05 22:23:30 +0100463void i915_gem_restore_gtt_mappings(struct drm_device *dev)
464{
465 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000466 struct drm_i915_gem_object *obj;
Daniel Vetter76aaf222010-11-05 22:23:30 +0100467
Chris Wilsonbee4a182011-01-21 10:54:32 +0000468 /* First fill our portion of the GTT with scratch pages */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700469 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
470 dev_priv->gtt.base.start / PAGE_SIZE,
471 dev_priv->gtt.base.total / PAGE_SIZE);
Chris Wilsonbee4a182011-01-21 10:54:32 +0000472
Ben Widawsky35c20a62013-05-31 11:28:48 -0700473 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilsona8e93122010-12-08 14:28:54 +0000474 i915_gem_clflush_object(obj);
Daniel Vetter74163902012-02-15 23:50:21 +0100475 i915_gem_gtt_bind_object(obj, obj->cache_level);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100476 }
477
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800478 i915_gem_chipset_flush(dev);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100479}
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100480
Daniel Vetter74163902012-02-15 23:50:21 +0100481int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100482{
Chris Wilson9da3da62012-06-01 15:20:22 +0100483 if (obj->has_dma_mapping)
Daniel Vetter74163902012-02-15 23:50:21 +0100484 return 0;
Chris Wilson9da3da62012-06-01 15:20:22 +0100485
486 if (!dma_map_sg(&obj->base.dev->pdev->dev,
487 obj->pages->sgl, obj->pages->nents,
488 PCI_DMA_BIDIRECTIONAL))
489 return -ENOSPC;
490
491 return 0;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100492}
493
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800494/*
495 * Binds an object into the global gtt with the specified cache level. The object
496 * will be accessible to the GPU via commands whose operands reference offsets
497 * within the global GTT as well as accessible by the GPU through the GMADR
498 * mapped BAR (dev_priv->mm.gtt->gtt).
499 */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700500static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800501 struct sg_table *st,
502 unsigned int first_entry,
503 enum i915_cache_level level)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800504{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700505 struct drm_i915_private *dev_priv = vm->dev->dev_private;
Ben Widawskye7c2b582013-04-08 18:43:48 -0700506 gen6_gtt_pte_t __iomem *gtt_entries =
507 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
Imre Deak6e995e22013-02-18 19:28:04 +0200508 int i = 0;
509 struct sg_page_iter sg_iter;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800510 dma_addr_t addr;
511
Imre Deak6e995e22013-02-18 19:28:04 +0200512 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +0200513 addr = sg_page_iter_dma_address(&sg_iter);
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700514 iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
Imre Deak6e995e22013-02-18 19:28:04 +0200515 i++;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800516 }
517
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800518 /* XXX: This serves as a posting read to make sure that the PTE has
519 * actually been updated. There is some concern that even though
520 * registers and PTEs are within the same BAR that they are potentially
521 * of NUMA access patterns. Therefore, even with the way we assume
522 * hardware should work, we must keep this posting read for paranoia.
523 */
524 if (i != 0)
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700525 WARN_ON(readl(&gtt_entries[i-1]) !=
526 vm->pte_encode(addr, level));
Ben Widawsky0f9b91c2012-11-04 09:21:30 -0800527
528 /* This next bit makes the above posting read even more important. We
529 * want to flush the TLBs only after we're certain all the PTE updates
530 * have finished.
531 */
532 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
533 POSTING_READ(GFX_FLSH_CNTL_GEN6);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800534}
535
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700536static void gen6_ggtt_clear_range(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800537 unsigned int first_entry,
538 unsigned int num_entries)
539{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700540 struct drm_i915_private *dev_priv = vm->dev->dev_private;
Ben Widawskye7c2b582013-04-08 18:43:48 -0700541 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
542 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
Ben Widawskya54c0c22013-01-24 14:45:00 -0800543 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800544 int i;
545
546 if (WARN(num_entries > max_entries,
547 "First entry = %d; Num entries = %d (max=%d)\n",
548 first_entry, num_entries, max_entries))
549 num_entries = max_entries;
550
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700551 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800552 for (i = 0; i < num_entries; i++)
553 iowrite32(scratch_pte, &gtt_base[i]);
554 readl(gtt_base);
555}
556
557
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700558static void i915_ggtt_insert_entries(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800559 struct sg_table *st,
560 unsigned int pg_start,
561 enum i915_cache_level cache_level)
562{
563 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
564 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
565
566 intel_gtt_insert_sg_entries(st, pg_start, flags);
567
568}
569
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700570static void i915_ggtt_clear_range(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800571 unsigned int first_entry,
572 unsigned int num_entries)
573{
574 intel_gtt_clear_range(first_entry, num_entries);
575}
576
577
Daniel Vetter74163902012-02-15 23:50:21 +0100578void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
579 enum i915_cache_level cache_level)
Chris Wilsond5bd1442011-04-14 06:48:26 +0100580{
581 struct drm_device *dev = obj->base.dev;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800582 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700583 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800584
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700585 dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
586 entry,
587 cache_level);
Chris Wilsond5bd1442011-04-14 06:48:26 +0100588
Daniel Vetter74898d72012-02-15 23:50:22 +0100589 obj->has_global_gtt_mapping = 1;
Chris Wilsond5bd1442011-04-14 06:48:26 +0100590}
591
Chris Wilson05394f32010-11-08 19:18:58 +0000592void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100593{
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800594 struct drm_device *dev = obj->base.dev;
595 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700596 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800597
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700598 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
599 entry,
600 obj->base.size >> PAGE_SHIFT);
Daniel Vetter74898d72012-02-15 23:50:22 +0100601
602 obj->has_global_gtt_mapping = 0;
Daniel Vetter74163902012-02-15 23:50:21 +0100603}
604
605void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
606{
Ben Widawsky5c042282011-10-17 15:51:55 -0700607 struct drm_device *dev = obj->base.dev;
608 struct drm_i915_private *dev_priv = dev->dev_private;
609 bool interruptible;
610
611 interruptible = do_idling(dev_priv);
612
Chris Wilson9da3da62012-06-01 15:20:22 +0100613 if (!obj->has_dma_mapping)
614 dma_unmap_sg(&dev->pdev->dev,
615 obj->pages->sgl, obj->pages->nents,
616 PCI_DMA_BIDIRECTIONAL);
Ben Widawsky5c042282011-10-17 15:51:55 -0700617
618 undo_idling(dev_priv, interruptible);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100619}
Daniel Vetter644ec022012-03-26 09:45:40 +0200620
Chris Wilson42d6ab42012-07-26 11:49:32 +0100621static void i915_gtt_color_adjust(struct drm_mm_node *node,
622 unsigned long color,
623 unsigned long *start,
624 unsigned long *end)
625{
626 if (node->color != color)
627 *start += 4096;
628
629 if (!list_empty(&node->node_list)) {
630 node = list_entry(node->node_list.next,
631 struct drm_mm_node,
632 node_list);
633 if (node->allocated && node->color != color)
634 *end -= 4096;
635 }
636}
Ben Widawskyd7e50082012-12-18 10:31:25 -0800637void i915_gem_setup_global_gtt(struct drm_device *dev,
638 unsigned long start,
639 unsigned long mappable_end,
640 unsigned long end)
Daniel Vetter644ec022012-03-26 09:45:40 +0200641{
Ben Widawskye78891c2013-01-25 16:41:04 -0800642 /* Let GEM Manage all of the aperture.
643 *
644 * However, leave one page at the end still bound to the scratch page.
645 * There are a number of places where the hardware apparently prefetches
646 * past the end of the object, and we've seen multiple hangs with the
647 * GPU head pointer stuck in a batchbuffer bound at the last page of the
648 * aperture. One page should be enough to keep any prefetching inside
649 * of the aperture.
650 */
Daniel Vetter644ec022012-03-26 09:45:40 +0200651 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsoned2f3452012-11-15 11:32:19 +0000652 struct drm_mm_node *entry;
653 struct drm_i915_gem_object *obj;
654 unsigned long hole_start, hole_end;
Daniel Vetter644ec022012-03-26 09:45:40 +0200655
Ben Widawsky35451cb2013-01-17 12:45:13 -0800656 BUG_ON(mappable_end > end);
657
Chris Wilsoned2f3452012-11-15 11:32:19 +0000658 /* Subtract the guard page ... */
Ben Widawsky93bd8642013-07-16 16:50:06 -0700659 drm_mm_init(&dev_priv->gtt.base.mm, start, end - start - PAGE_SIZE);
Chris Wilson42d6ab42012-07-26 11:49:32 +0100660 if (!HAS_LLC(dev))
Ben Widawsky93bd8642013-07-16 16:50:06 -0700661 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
Daniel Vetter644ec022012-03-26 09:45:40 +0200662
Chris Wilsoned2f3452012-11-15 11:32:19 +0000663 /* Mark any preallocated objects as occupied */
Ben Widawsky35c20a62013-05-31 11:28:48 -0700664 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Ben Widawsky2f633152013-07-17 12:19:03 -0700665 struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700666 int ret;
Ben Widawskyedd41a82013-07-05 14:41:05 -0700667 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
Ben Widawskyc6cfb322013-07-05 14:41:06 -0700668 i915_gem_obj_ggtt_offset(obj), obj->base.size);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000669
Ben Widawskyc6cfb322013-07-05 14:41:06 -0700670 WARN_ON(i915_gem_obj_ggtt_bound(obj));
Ben Widawsky2f633152013-07-17 12:19:03 -0700671 ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
Ben Widawskyc6cfb322013-07-05 14:41:06 -0700672 if (ret)
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700673 DRM_DEBUG_KMS("Reservation failed\n");
Chris Wilsoned2f3452012-11-15 11:32:19 +0000674 obj->has_global_gtt_mapping = 1;
Ben Widawsky2f633152013-07-17 12:19:03 -0700675 list_add(&vma->vma_link, &obj->vma_list);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000676 }
677
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700678 dev_priv->gtt.base.start = start;
679 dev_priv->gtt.base.total = end - start;
Daniel Vetter644ec022012-03-26 09:45:40 +0200680
Chris Wilsoned2f3452012-11-15 11:32:19 +0000681 /* Clear any non-preallocated blocks */
Ben Widawsky93bd8642013-07-16 16:50:06 -0700682 drm_mm_for_each_hole(entry, &dev_priv->gtt.base.mm,
Chris Wilsoned2f3452012-11-15 11:32:19 +0000683 hole_start, hole_end) {
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700684 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
Chris Wilsoned2f3452012-11-15 11:32:19 +0000685 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
686 hole_start, hole_end);
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700687 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
688 hole_start / PAGE_SIZE,
689 count);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000690 }
691
692 /* And finally clear the reserved guard page */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700693 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
694 end / PAGE_SIZE - 1, 1);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800695}
696
Ben Widawskyd7e50082012-12-18 10:31:25 -0800697static bool
698intel_enable_ppgtt(struct drm_device *dev)
699{
700 if (i915_enable_ppgtt >= 0)
701 return i915_enable_ppgtt;
702
703#ifdef CONFIG_INTEL_IOMMU
704 /* Disable ppgtt on SNB if VT-d is on. */
705 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
706 return false;
707#endif
708
709 return true;
710}
711
712void i915_gem_init_global_gtt(struct drm_device *dev)
713{
714 struct drm_i915_private *dev_priv = dev->dev_private;
715 unsigned long gtt_size, mappable_size;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800716
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700717 gtt_size = dev_priv->gtt.base.total;
Ben Widawsky93d18792013-01-17 12:45:17 -0800718 mappable_size = dev_priv->gtt.mappable_end;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800719
720 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
Ben Widawskye78891c2013-01-25 16:41:04 -0800721 int ret;
Ben Widawsky3eb1c002013-04-08 18:43:52 -0700722
723 if (INTEL_INFO(dev)->gen <= 7) {
724 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
725 * aperture accordingly when using aliasing ppgtt. */
Ben Widawsky6670a5a2013-06-27 16:30:04 -0700726 gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
Ben Widawsky3eb1c002013-04-08 18:43:52 -0700727 }
Ben Widawskyd7e50082012-12-18 10:31:25 -0800728
729 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
730
731 ret = i915_gem_init_aliasing_ppgtt(dev);
Ben Widawskye78891c2013-01-25 16:41:04 -0800732 if (!ret)
Ben Widawskyd7e50082012-12-18 10:31:25 -0800733 return;
Ben Widawskye78891c2013-01-25 16:41:04 -0800734
735 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
Ben Widawsky93bd8642013-07-16 16:50:06 -0700736 drm_mm_takedown(&dev_priv->gtt.base.mm);
Ben Widawsky6670a5a2013-06-27 16:30:04 -0700737 gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800738 }
Ben Widawskye78891c2013-01-25 16:41:04 -0800739 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800740}
741
742static int setup_scratch_page(struct drm_device *dev)
743{
744 struct drm_i915_private *dev_priv = dev->dev_private;
745 struct page *page;
746 dma_addr_t dma_addr;
747
748 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
749 if (page == NULL)
750 return -ENOMEM;
751 get_page(page);
752 set_pages_uc(page, 1);
753
754#ifdef CONFIG_INTEL_IOMMU
755 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
756 PCI_DMA_BIDIRECTIONAL);
757 if (pci_dma_mapping_error(dev->pdev, dma_addr))
758 return -EINVAL;
759#else
760 dma_addr = page_to_phys(page);
761#endif
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700762 dev_priv->gtt.base.scratch.page = page;
763 dev_priv->gtt.base.scratch.addr = dma_addr;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800764
765 return 0;
766}
767
768static void teardown_scratch_page(struct drm_device *dev)
769{
770 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700771 struct page *page = dev_priv->gtt.base.scratch.page;
772
773 set_pages_wb(page, 1);
774 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800775 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700776 put_page(page);
777 __free_page(page);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800778}
779
780static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
781{
782 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
783 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
784 return snb_gmch_ctl << 20;
785}
786
Ben Widawskybaa09f52013-01-24 13:49:57 -0800787static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800788{
789 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
790 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
791 return snb_gmch_ctl << 25; /* 32 MB units */
792}
793
Ben Widawskybaa09f52013-01-24 13:49:57 -0800794static int gen6_gmch_probe(struct drm_device *dev,
795 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800796 size_t *stolen,
797 phys_addr_t *mappable_base,
798 unsigned long *mappable_end)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800799{
800 struct drm_i915_private *dev_priv = dev->dev_private;
801 phys_addr_t gtt_bus_addr;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800802 unsigned int gtt_size;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800803 u16 snb_gmch_ctl;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800804 int ret;
805
Ben Widawsky41907dd2013-02-08 11:32:47 -0800806 *mappable_base = pci_resource_start(dev->pdev, 2);
807 *mappable_end = pci_resource_len(dev->pdev, 2);
808
Ben Widawskybaa09f52013-01-24 13:49:57 -0800809 /* 64/512MB is the current min/max we actually know of, but this is just
810 * a coarse sanity check.
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800811 */
Ben Widawsky41907dd2013-02-08 11:32:47 -0800812 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
Ben Widawskybaa09f52013-01-24 13:49:57 -0800813 DRM_ERROR("Unknown GMADR size (%lx)\n",
814 dev_priv->gtt.mappable_end);
815 return -ENXIO;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800816 }
817
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800818 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
819 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
Ben Widawskybaa09f52013-01-24 13:49:57 -0800820 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
821 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
822
Ben Widawskyc4ae25e2013-05-01 11:00:34 -0700823 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
Ben Widawskye7c2b582013-04-08 18:43:48 -0700824 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800825
Ben Widawskya93e4162013-04-08 18:43:47 -0700826 /* For Modern GENs the PTEs and register space are split in the BAR */
827 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
828 (pci_resource_len(dev->pdev, 0) / 2);
829
Ben Widawskybaa09f52013-01-24 13:49:57 -0800830 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
831 if (!dev_priv->gtt.gsm) {
832 DRM_ERROR("Failed to map the gtt page table\n");
833 return -ENOMEM;
834 }
835
836 ret = setup_scratch_page(dev);
837 if (ret)
838 DRM_ERROR("Scratch setup failed\n");
839
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700840 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
841 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800842
843 return ret;
844}
845
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700846static void gen6_gmch_remove(struct i915_address_space *vm)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800847{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700848
849 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
850 iounmap(gtt->gsm);
851 teardown_scratch_page(vm->dev);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800852}
853
854static int i915_gmch_probe(struct drm_device *dev,
855 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800856 size_t *stolen,
857 phys_addr_t *mappable_base,
858 unsigned long *mappable_end)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800859{
860 struct drm_i915_private *dev_priv = dev->dev_private;
861 int ret;
862
Ben Widawskybaa09f52013-01-24 13:49:57 -0800863 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
864 if (!ret) {
865 DRM_ERROR("failed to set up gmch\n");
866 return -EIO;
867 }
868
Ben Widawsky41907dd2013-02-08 11:32:47 -0800869 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800870
871 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700872 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
873 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800874
875 return 0;
876}
877
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700878static void i915_gmch_remove(struct i915_address_space *vm)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800879{
880 intel_gmch_remove();
881}
882
883int i915_gem_gtt_init(struct drm_device *dev)
884{
885 struct drm_i915_private *dev_priv = dev->dev_private;
886 struct i915_gtt *gtt = &dev_priv->gtt;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800887 int ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800888
Ben Widawskybaa09f52013-01-24 13:49:57 -0800889 if (INTEL_INFO(dev)->gen <= 5) {
Ben Widawskyb2f21b42013-06-27 16:30:20 -0700890 gtt->gtt_probe = i915_gmch_probe;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700891 gtt->base.cleanup = i915_gmch_remove;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800892 } else {
Ben Widawskyb2f21b42013-06-27 16:30:20 -0700893 gtt->gtt_probe = gen6_gmch_probe;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700894 gtt->base.cleanup = gen6_gmch_remove;
Ben Widawsky4d15c142013-07-04 11:02:06 -0700895 if (IS_HASWELL(dev) && dev_priv->ellc_size)
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700896 gtt->base.pte_encode = iris_pte_encode;
Ben Widawsky4d15c142013-07-04 11:02:06 -0700897 else if (IS_HASWELL(dev))
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700898 gtt->base.pte_encode = hsw_pte_encode;
Ben Widawskyb2f21b42013-06-27 16:30:20 -0700899 else if (IS_VALLEYVIEW(dev))
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700900 gtt->base.pte_encode = byt_pte_encode;
Ben Widawskyb2f21b42013-06-27 16:30:20 -0700901 else
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700902 gtt->base.pte_encode = gen6_pte_encode;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800903 }
904
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700905 ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
Ben Widawskyb2f21b42013-06-27 16:30:20 -0700906 &gtt->mappable_base, &gtt->mappable_end);
Ben Widawskya54c0c22013-01-24 14:45:00 -0800907 if (ret)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800908 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800909
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700910 gtt->base.dev = dev;
911
Ben Widawskybaa09f52013-01-24 13:49:57 -0800912 /* GMADR is the PCI mmio aperture into the global GTT. */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700913 DRM_INFO("Memory usable by graphics device = %zdM\n",
914 gtt->base.total >> 20);
Ben Widawskyb2f21b42013-06-27 16:30:20 -0700915 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
916 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800917
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800918 return 0;
Daniel Vetter644ec022012-03-26 09:45:40 +0200919}