blob: b66284e10032967a4709056ecb226de953c9b2fd [file] [log] [blame]
Daniel Vetter76aaf222010-11-05 22:23:30 +01001/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
David Howells760285e2012-10-02 18:01:07 +010025#include <drm/drmP.h>
26#include <drm/i915_drm.h>
Daniel Vetter76aaf222010-11-05 22:23:30 +010027#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30
Ben Widawsky6670a5a2013-06-27 16:30:04 -070031#define GEN6_PPGTT_PD_ENTRIES 512
32#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
Ben Widawskyd31eb102013-11-02 21:07:17 -070033typedef uint64_t gen8_gtt_pte_t;
Ben Widawsky6670a5a2013-06-27 16:30:04 -070034
Ben Widawsky26b1ff32012-11-04 09:21:31 -080035/* PPGTT stuff */
36#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
Ben Widawsky0d8ff152013-07-04 11:02:03 -070037#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
Ben Widawsky26b1ff32012-11-04 09:21:31 -080038
39#define GEN6_PDE_VALID (1 << 0)
40/* gen6+ has bit 11-4 for physical addr bit 39-32 */
41#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
42
43#define GEN6_PTE_VALID (1 << 0)
44#define GEN6_PTE_UNCACHED (1 << 1)
45#define HSW_PTE_UNCACHED (0)
46#define GEN6_PTE_CACHE_LLC (2 << 1)
Chris Wilson350ec882013-08-06 13:17:02 +010047#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
Ben Widawsky26b1ff32012-11-04 09:21:31 -080048#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
Ben Widawsky0d8ff152013-07-04 11:02:03 -070049#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
50
51/* Cacheability Control is a 4-bit value. The low three bits are stored in *
52 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
53 */
54#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
55 (((bits) & 0x8) << (11 - 3)))
Ben Widawsky87a6b682013-08-04 23:47:29 -070056#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
Ben Widawsky0d8ff152013-07-04 11:02:03 -070057#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
Ben Widawsky4d15c142013-07-04 11:02:06 -070058#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
Chris Wilson651d7942013-08-08 14:41:10 +010059#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
Ben Widawsky26b1ff32012-11-04 09:21:31 -080060
Chris Wilson350ec882013-08-06 13:17:02 +010061static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -070062 enum i915_cache_level level,
63 bool valid)
Ben Widawsky54d12522012-09-24 16:44:32 -070064{
Ben Widawskyb35b3802013-10-16 09:18:21 -070065 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Ben Widawsky54d12522012-09-24 16:44:32 -070066 pte |= GEN6_PTE_ADDR_ENCODE(addr);
Ben Widawskye7210c32012-10-19 09:33:22 -070067
68 switch (level) {
Chris Wilson350ec882013-08-06 13:17:02 +010069 case I915_CACHE_L3_LLC:
70 case I915_CACHE_LLC:
71 pte |= GEN6_PTE_CACHE_LLC;
72 break;
73 case I915_CACHE_NONE:
74 pte |= GEN6_PTE_UNCACHED;
75 break;
76 default:
77 WARN_ON(1);
78 }
79
80 return pte;
81}
82
83static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -070084 enum i915_cache_level level,
85 bool valid)
Chris Wilson350ec882013-08-06 13:17:02 +010086{
Ben Widawskyb35b3802013-10-16 09:18:21 -070087 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Chris Wilson350ec882013-08-06 13:17:02 +010088 pte |= GEN6_PTE_ADDR_ENCODE(addr);
89
90 switch (level) {
91 case I915_CACHE_L3_LLC:
92 pte |= GEN7_PTE_CACHE_L3_LLC;
Ben Widawskye7210c32012-10-19 09:33:22 -070093 break;
94 case I915_CACHE_LLC:
95 pte |= GEN6_PTE_CACHE_LLC;
96 break;
97 case I915_CACHE_NONE:
Kenneth Graunke91197082013-04-22 00:53:51 -070098 pte |= GEN6_PTE_UNCACHED;
Ben Widawskye7210c32012-10-19 09:33:22 -070099 break;
100 default:
Chris Wilson350ec882013-08-06 13:17:02 +0100101 WARN_ON(1);
Ben Widawskye7210c32012-10-19 09:33:22 -0700102 }
103
Ben Widawsky54d12522012-09-24 16:44:32 -0700104 return pte;
105}
106
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700107#define BYT_PTE_WRITEABLE (1 << 1)
108#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
109
Ben Widawsky80a74f72013-06-27 16:30:19 -0700110static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -0700111 enum i915_cache_level level,
112 bool valid)
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700113{
Ben Widawskyb35b3802013-10-16 09:18:21 -0700114 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Kenneth Graunke93c34e72013-04-22 00:53:50 -0700115 pte |= GEN6_PTE_ADDR_ENCODE(addr);
116
117 /* Mark the page as writeable. Other platforms don't have a
118 * setting for read-only/writable, so this matches that behavior.
119 */
120 pte |= BYT_PTE_WRITEABLE;
121
122 if (level != I915_CACHE_NONE)
123 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
124
125 return pte;
126}
127
Ben Widawsky80a74f72013-06-27 16:30:19 -0700128static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -0700129 enum i915_cache_level level,
130 bool valid)
Kenneth Graunke91197082013-04-22 00:53:51 -0700131{
Ben Widawskyb35b3802013-10-16 09:18:21 -0700132 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Ben Widawsky0d8ff152013-07-04 11:02:03 -0700133 pte |= HSW_PTE_ADDR_ENCODE(addr);
Kenneth Graunke91197082013-04-22 00:53:51 -0700134
135 if (level != I915_CACHE_NONE)
Ben Widawsky87a6b682013-08-04 23:47:29 -0700136 pte |= HSW_WB_LLC_AGE3;
Kenneth Graunke91197082013-04-22 00:53:51 -0700137
138 return pte;
139}
140
Ben Widawsky4d15c142013-07-04 11:02:06 -0700141static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
Ben Widawskyb35b3802013-10-16 09:18:21 -0700142 enum i915_cache_level level,
143 bool valid)
Ben Widawsky4d15c142013-07-04 11:02:06 -0700144{
Ben Widawskyb35b3802013-10-16 09:18:21 -0700145 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
Ben Widawsky4d15c142013-07-04 11:02:06 -0700146 pte |= HSW_PTE_ADDR_ENCODE(addr);
147
Chris Wilson651d7942013-08-08 14:41:10 +0100148 switch (level) {
149 case I915_CACHE_NONE:
150 break;
151 case I915_CACHE_WT:
152 pte |= HSW_WT_ELLC_LLC_AGE0;
153 break;
154 default:
Ben Widawsky4d15c142013-07-04 11:02:06 -0700155 pte |= HSW_WB_ELLC_LLC_AGE0;
Chris Wilson651d7942013-08-08 14:41:10 +0100156 break;
157 }
Ben Widawsky4d15c142013-07-04 11:02:06 -0700158
159 return pte;
160}
161
Ben Widawsky3e302542013-04-23 23:15:32 -0700162static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
Ben Widawsky61973492013-04-08 18:43:54 -0700163{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700164 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
Ben Widawsky61973492013-04-08 18:43:54 -0700165 gen6_gtt_pte_t __iomem *pd_addr;
166 uint32_t pd_entry;
167 int i;
168
Ben Widawsky0a732872013-04-23 23:15:30 -0700169 WARN_ON(ppgtt->pd_offset & 0x3f);
Ben Widawsky61973492013-04-08 18:43:54 -0700170 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
171 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
172 for (i = 0; i < ppgtt->num_pd_entries; i++) {
173 dma_addr_t pt_addr;
174
175 pt_addr = ppgtt->pt_dma_addr[i];
176 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
177 pd_entry |= GEN6_PDE_VALID;
178
179 writel(pd_entry, pd_addr + i);
180 }
181 readl(pd_addr);
Ben Widawsky3e302542013-04-23 23:15:32 -0700182}
183
184static int gen6_ppgtt_enable(struct drm_device *dev)
185{
186 drm_i915_private_t *dev_priv = dev->dev_private;
187 uint32_t pd_offset;
188 struct intel_ring_buffer *ring;
189 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
190 int i;
191
192 BUG_ON(ppgtt->pd_offset & 0x3f);
193
194 gen6_write_pdes(ppgtt);
Ben Widawsky61973492013-04-08 18:43:54 -0700195
196 pd_offset = ppgtt->pd_offset;
197 pd_offset /= 64; /* in cachelines, */
198 pd_offset <<= 16;
199
200 if (INTEL_INFO(dev)->gen == 6) {
201 uint32_t ecochk, gab_ctl, ecobits;
202
203 ecobits = I915_READ(GAC_ECO_BITS);
Ville Syrjälä3b9d7882013-04-04 15:13:40 +0300204 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
205 ECOBITS_PPGTT_CACHE64B);
Ben Widawsky61973492013-04-08 18:43:54 -0700206
207 gab_ctl = I915_READ(GAB_CTL);
208 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
209
210 ecochk = I915_READ(GAM_ECOCHK);
211 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
212 ECOCHK_PPGTT_CACHE64B);
213 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
214 } else if (INTEL_INFO(dev)->gen >= 7) {
Ville Syrjäläa6f429a2013-04-04 15:13:42 +0300215 uint32_t ecochk, ecobits;
Ville Syrjäläa65c2fc2013-04-04 15:13:41 +0300216
217 ecobits = I915_READ(GAC_ECO_BITS);
218 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
219
Ville Syrjäläa6f429a2013-04-04 15:13:42 +0300220 ecochk = I915_READ(GAM_ECOCHK);
221 if (IS_HASWELL(dev)) {
222 ecochk |= ECOCHK_PPGTT_WB_HSW;
223 } else {
224 ecochk |= ECOCHK_PPGTT_LLC_IVB;
225 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
226 }
227 I915_WRITE(GAM_ECOCHK, ecochk);
Ben Widawsky61973492013-04-08 18:43:54 -0700228 /* GFX_MODE is per-ring on gen7+ */
229 }
230
231 for_each_ring(ring, dev_priv, i) {
232 if (INTEL_INFO(dev)->gen >= 7)
233 I915_WRITE(RING_MODE_GEN7(ring),
234 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
235
236 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
237 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
238 }
Ben Widawskyb7c36d22013-04-08 18:43:56 -0700239 return 0;
Ben Widawsky61973492013-04-08 18:43:54 -0700240}
241
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100242/* PPGTT support for Sandybdrige/Gen6 and later */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700243static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100244 unsigned first_entry,
Ben Widawsky828c7902013-10-16 09:21:30 -0700245 unsigned num_entries,
246 bool use_scratch)
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100247{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700248 struct i915_hw_ppgtt *ppgtt =
249 container_of(vm, struct i915_hw_ppgtt, base);
Ben Widawskye7c2b582013-04-08 18:43:48 -0700250 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
Daniel Vettera15326a2013-03-19 23:48:39 +0100251 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100252 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
253 unsigned last_pte, i;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100254
Ben Widawskyb35b3802013-10-16 09:18:21 -0700255 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100256
Daniel Vetter7bddb012012-02-09 17:15:47 +0100257 while (num_entries) {
258 last_pte = first_pte + num_entries;
259 if (last_pte > I915_PPGTT_PT_ENTRIES)
260 last_pte = I915_PPGTT_PT_ENTRIES;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100261
Daniel Vettera15326a2013-03-19 23:48:39 +0100262 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100263
264 for (i = first_pte; i < last_pte; i++)
265 pt_vaddr[i] = scratch_pte;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100266
267 kunmap_atomic(pt_vaddr);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100268
Daniel Vetter7bddb012012-02-09 17:15:47 +0100269 num_entries -= last_pte - first_pte;
270 first_pte = 0;
Daniel Vettera15326a2013-03-19 23:48:39 +0100271 act_pt++;
Daniel Vetter7bddb012012-02-09 17:15:47 +0100272 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100273}
274
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700275static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
Daniel Vetterdef886c2013-01-24 14:44:56 -0800276 struct sg_table *pages,
277 unsigned first_entry,
278 enum i915_cache_level cache_level)
279{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700280 struct i915_hw_ppgtt *ppgtt =
281 container_of(vm, struct i915_hw_ppgtt, base);
Ben Widawskye7c2b582013-04-08 18:43:48 -0700282 gen6_gtt_pte_t *pt_vaddr;
Daniel Vettera15326a2013-03-19 23:48:39 +0100283 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
Imre Deak6e995e22013-02-18 19:28:04 +0200284 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
285 struct sg_page_iter sg_iter;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800286
Daniel Vettera15326a2013-03-19 23:48:39 +0100287 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +0200288 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
289 dma_addr_t page_addr;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800290
Imre Deak2db76d72013-03-26 15:14:18 +0200291 page_addr = sg_page_iter_dma_address(&sg_iter);
Ben Widawskyb35b3802013-10-16 09:18:21 -0700292 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
Imre Deak6e995e22013-02-18 19:28:04 +0200293 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
294 kunmap_atomic(pt_vaddr);
Daniel Vettera15326a2013-03-19 23:48:39 +0100295 act_pt++;
296 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
Imre Deak6e995e22013-02-18 19:28:04 +0200297 act_pte = 0;
Daniel Vetterdef886c2013-01-24 14:44:56 -0800298
Daniel Vetterdef886c2013-01-24 14:44:56 -0800299 }
Daniel Vetterdef886c2013-01-24 14:44:56 -0800300 }
Imre Deak6e995e22013-02-18 19:28:04 +0200301 kunmap_atomic(pt_vaddr);
Daniel Vetterdef886c2013-01-24 14:44:56 -0800302}
303
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700304static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100305{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700306 struct i915_hw_ppgtt *ppgtt =
307 container_of(vm, struct i915_hw_ppgtt, base);
Daniel Vetter3440d262013-01-24 13:49:56 -0800308 int i;
309
Ben Widawsky93bd8642013-07-16 16:50:06 -0700310 drm_mm_takedown(&ppgtt->base.mm);
311
Daniel Vetter3440d262013-01-24 13:49:56 -0800312 if (ppgtt->pt_dma_addr) {
313 for (i = 0; i < ppgtt->num_pd_entries; i++)
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700314 pci_unmap_page(ppgtt->base.dev->pdev,
Daniel Vetter3440d262013-01-24 13:49:56 -0800315 ppgtt->pt_dma_addr[i],
316 4096, PCI_DMA_BIDIRECTIONAL);
317 }
318
319 kfree(ppgtt->pt_dma_addr);
320 for (i = 0; i < ppgtt->num_pd_entries; i++)
321 __free_page(ppgtt->pt_pages[i]);
322 kfree(ppgtt->pt_pages);
323 kfree(ppgtt);
324}
325
326static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
327{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700328 struct drm_device *dev = ppgtt->base.dev;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100329 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100330 unsigned first_pd_entry_in_global_pt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100331 int i;
332 int ret = -ENOMEM;
333
334 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
335 * entries. For aliasing ppgtt support we just steal them at the end for
336 * now. */
Daniel Vettere1b73cb2013-05-21 09:52:16 +0200337 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100338
Chris Wilson08c45262013-07-30 19:04:37 +0100339 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
Ben Widawsky6670a5a2013-06-27 16:30:04 -0700340 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
Ben Widawsky61973492013-04-08 18:43:54 -0700341 ppgtt->enable = gen6_ppgtt_enable;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700342 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
343 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
344 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
345 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
Daniel Vettera1e22652013-09-21 00:35:38 +0200346 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100347 GFP_KERNEL);
348 if (!ppgtt->pt_pages)
Daniel Vetter3440d262013-01-24 13:49:56 -0800349 return -ENOMEM;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100350
351 for (i = 0; i < ppgtt->num_pd_entries; i++) {
352 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
353 if (!ppgtt->pt_pages[i])
354 goto err_pt_alloc;
355 }
356
Daniel Vettera1e22652013-09-21 00:35:38 +0200357 ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800358 GFP_KERNEL);
359 if (!ppgtt->pt_dma_addr)
360 goto err_pt_alloc;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100361
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800362 for (i = 0; i < ppgtt->num_pd_entries; i++) {
363 dma_addr_t pt_addr;
Daniel Vetter211c5682012-04-10 17:29:17 +0200364
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800365 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
366 PCI_DMA_BIDIRECTIONAL);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100367
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800368 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
369 ret = -EIO;
370 goto err_pd_pin;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100371
Daniel Vetter211c5682012-04-10 17:29:17 +0200372 }
Ben Widawsky8d2e6302013-01-18 12:30:33 -0800373 ppgtt->pt_dma_addr[i] = pt_addr;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100374 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100375
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700376 ppgtt->base.clear_range(&ppgtt->base, 0,
Ben Widawsky828c7902013-10-16 09:21:30 -0700377 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100378
Ben Widawskye7c2b582013-04-08 18:43:48 -0700379 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100380
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100381 return 0;
382
383err_pd_pin:
384 if (ppgtt->pt_dma_addr) {
385 for (i--; i >= 0; i--)
386 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
387 4096, PCI_DMA_BIDIRECTIONAL);
388 }
389err_pt_alloc:
390 kfree(ppgtt->pt_dma_addr);
391 for (i = 0; i < ppgtt->num_pd_entries; i++) {
392 if (ppgtt->pt_pages[i])
393 __free_page(ppgtt->pt_pages[i]);
394 }
395 kfree(ppgtt->pt_pages);
Daniel Vetter3440d262013-01-24 13:49:56 -0800396
397 return ret;
398}
399
400static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
401{
402 struct drm_i915_private *dev_priv = dev->dev_private;
403 struct i915_hw_ppgtt *ppgtt;
404 int ret;
405
406 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
407 if (!ppgtt)
408 return -ENOMEM;
409
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700410 ppgtt->base.dev = dev;
Daniel Vetter3440d262013-01-24 13:49:56 -0800411
Ben Widawsky3ed124b2013-04-08 18:43:53 -0700412 if (INTEL_INFO(dev)->gen < 8)
413 ret = gen6_ppgtt_init(ppgtt);
Daniel Vetter8fe6bd22013-11-02 21:07:01 -0700414 else if (IS_GEN8(dev))
415 ret = -ENOSYS;
Ben Widawsky3ed124b2013-04-08 18:43:53 -0700416 else
417 BUG();
418
Daniel Vetter3440d262013-01-24 13:49:56 -0800419 if (ret)
420 kfree(ppgtt);
Ben Widawsky93bd8642013-07-16 16:50:06 -0700421 else {
Daniel Vetter3440d262013-01-24 13:49:56 -0800422 dev_priv->mm.aliasing_ppgtt = ppgtt;
Ben Widawsky93bd8642013-07-16 16:50:06 -0700423 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
424 ppgtt->base.total);
425 }
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100426
427 return ret;
428}
429
430void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
431{
432 struct drm_i915_private *dev_priv = dev->dev_private;
433 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100434
435 if (!ppgtt)
436 return;
437
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700438 ppgtt->base.cleanup(&ppgtt->base);
Ben Widawsky5963cf02013-04-08 18:43:55 -0700439 dev_priv->mm.aliasing_ppgtt = NULL;
Daniel Vetter1d2a3142012-02-09 17:15:46 +0100440}
441
Daniel Vetter7bddb012012-02-09 17:15:47 +0100442void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
443 struct drm_i915_gem_object *obj,
444 enum i915_cache_level cache_level)
445{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700446 ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
447 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
448 cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100449}
450
451void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
452 struct drm_i915_gem_object *obj)
453{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700454 ppgtt->base.clear_range(&ppgtt->base,
455 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
Ben Widawsky828c7902013-10-16 09:21:30 -0700456 obj->base.size >> PAGE_SHIFT,
457 true);
Daniel Vetter7bddb012012-02-09 17:15:47 +0100458}
459
Ben Widawskya81cc002013-01-18 12:30:31 -0800460extern int intel_iommu_gfx_mapped;
461/* Certain Gen5 chipsets require require idling the GPU before
462 * unmapping anything from the GTT when VT-d is enabled.
463 */
464static inline bool needs_idle_maps(struct drm_device *dev)
465{
466#ifdef CONFIG_INTEL_IOMMU
467 /* Query intel_iommu to see if we need the workaround. Presumably that
468 * was loaded first.
469 */
470 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
471 return true;
472#endif
473 return false;
474}
475
Ben Widawsky5c042282011-10-17 15:51:55 -0700476static bool do_idling(struct drm_i915_private *dev_priv)
477{
478 bool ret = dev_priv->mm.interruptible;
479
Ben Widawskya81cc002013-01-18 12:30:31 -0800480 if (unlikely(dev_priv->gtt.do_idle_maps)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700481 dev_priv->mm.interruptible = false;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700482 if (i915_gpu_idle(dev_priv->dev)) {
Ben Widawsky5c042282011-10-17 15:51:55 -0700483 DRM_ERROR("Couldn't idle GPU\n");
484 /* Wait a bit, in hopes it avoids the hang */
485 udelay(10);
486 }
487 }
488
489 return ret;
490}
491
492static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
493{
Ben Widawskya81cc002013-01-18 12:30:31 -0800494 if (unlikely(dev_priv->gtt.do_idle_maps))
Ben Widawsky5c042282011-10-17 15:51:55 -0700495 dev_priv->mm.interruptible = interruptible;
496}
497
Ben Widawsky828c7902013-10-16 09:21:30 -0700498void i915_check_and_clear_faults(struct drm_device *dev)
499{
500 struct drm_i915_private *dev_priv = dev->dev_private;
501 struct intel_ring_buffer *ring;
502 int i;
503
504 if (INTEL_INFO(dev)->gen < 6)
505 return;
506
507 for_each_ring(ring, dev_priv, i) {
508 u32 fault_reg;
509 fault_reg = I915_READ(RING_FAULT_REG(ring));
510 if (fault_reg & RING_FAULT_VALID) {
511 DRM_DEBUG_DRIVER("Unexpected fault\n"
512 "\tAddr: 0x%08lx\\n"
513 "\tAddress space: %s\n"
514 "\tSource ID: %d\n"
515 "\tType: %d\n",
516 fault_reg & PAGE_MASK,
517 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
518 RING_FAULT_SRCID(fault_reg),
519 RING_FAULT_FAULT_TYPE(fault_reg));
520 I915_WRITE(RING_FAULT_REG(ring),
521 fault_reg & ~RING_FAULT_VALID);
522 }
523 }
524 POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
525}
526
527void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
528{
529 struct drm_i915_private *dev_priv = dev->dev_private;
530
531 /* Don't bother messing with faults pre GEN6 as we have little
532 * documentation supporting that it's a good idea.
533 */
534 if (INTEL_INFO(dev)->gen < 6)
535 return;
536
537 i915_check_and_clear_faults(dev);
538
539 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
540 dev_priv->gtt.base.start / PAGE_SIZE,
541 dev_priv->gtt.base.total / PAGE_SIZE,
542 false);
543}
544
Daniel Vetter76aaf222010-11-05 22:23:30 +0100545void i915_gem_restore_gtt_mappings(struct drm_device *dev)
546{
547 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000548 struct drm_i915_gem_object *obj;
Daniel Vetter76aaf222010-11-05 22:23:30 +0100549
Ben Widawsky828c7902013-10-16 09:21:30 -0700550 i915_check_and_clear_faults(dev);
551
Chris Wilsonbee4a182011-01-21 10:54:32 +0000552 /* First fill our portion of the GTT with scratch pages */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700553 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
554 dev_priv->gtt.base.start / PAGE_SIZE,
Ben Widawsky828c7902013-10-16 09:21:30 -0700555 dev_priv->gtt.base.total / PAGE_SIZE,
556 true);
Chris Wilsonbee4a182011-01-21 10:54:32 +0000557
Ben Widawsky35c20a62013-05-31 11:28:48 -0700558 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilson2c225692013-08-09 12:26:45 +0100559 i915_gem_clflush_object(obj, obj->pin_display);
Daniel Vetter74163902012-02-15 23:50:21 +0100560 i915_gem_gtt_bind_object(obj, obj->cache_level);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100561 }
562
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800563 i915_gem_chipset_flush(dev);
Daniel Vetter76aaf222010-11-05 22:23:30 +0100564}
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100565
Daniel Vetter74163902012-02-15 23:50:21 +0100566int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100567{
Chris Wilson9da3da62012-06-01 15:20:22 +0100568 if (obj->has_dma_mapping)
Daniel Vetter74163902012-02-15 23:50:21 +0100569 return 0;
Chris Wilson9da3da62012-06-01 15:20:22 +0100570
571 if (!dma_map_sg(&obj->base.dev->pdev->dev,
572 obj->pages->sgl, obj->pages->nents,
573 PCI_DMA_BIDIRECTIONAL))
574 return -ENOSPC;
575
576 return 0;
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100577}
578
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800579/*
580 * Binds an object into the global gtt with the specified cache level. The object
581 * will be accessible to the GPU via commands whose operands reference offsets
582 * within the global GTT as well as accessible by the GPU through the GMADR
583 * mapped BAR (dev_priv->mm.gtt->gtt).
584 */
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700585static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800586 struct sg_table *st,
587 unsigned int first_entry,
588 enum i915_cache_level level)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800589{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700590 struct drm_i915_private *dev_priv = vm->dev->dev_private;
Ben Widawskye7c2b582013-04-08 18:43:48 -0700591 gen6_gtt_pte_t __iomem *gtt_entries =
592 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
Imre Deak6e995e22013-02-18 19:28:04 +0200593 int i = 0;
594 struct sg_page_iter sg_iter;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800595 dma_addr_t addr;
596
Imre Deak6e995e22013-02-18 19:28:04 +0200597 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +0200598 addr = sg_page_iter_dma_address(&sg_iter);
Ben Widawskyb35b3802013-10-16 09:18:21 -0700599 iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
Imre Deak6e995e22013-02-18 19:28:04 +0200600 i++;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800601 }
602
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800603 /* XXX: This serves as a posting read to make sure that the PTE has
604 * actually been updated. There is some concern that even though
605 * registers and PTEs are within the same BAR that they are potentially
606 * of NUMA access patterns. Therefore, even with the way we assume
607 * hardware should work, we must keep this posting read for paranoia.
608 */
609 if (i != 0)
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700610 WARN_ON(readl(&gtt_entries[i-1]) !=
Ben Widawskyb35b3802013-10-16 09:18:21 -0700611 vm->pte_encode(addr, level, true));
Ben Widawsky0f9b91c2012-11-04 09:21:30 -0800612
613 /* This next bit makes the above posting read even more important. We
614 * want to flush the TLBs only after we're certain all the PTE updates
615 * have finished.
616 */
617 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
618 POSTING_READ(GFX_FLSH_CNTL_GEN6);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800619}
620
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700621static void gen6_ggtt_clear_range(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800622 unsigned int first_entry,
Ben Widawsky828c7902013-10-16 09:21:30 -0700623 unsigned int num_entries,
624 bool use_scratch)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800625{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700626 struct drm_i915_private *dev_priv = vm->dev->dev_private;
Ben Widawskye7c2b582013-04-08 18:43:48 -0700627 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
628 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
Ben Widawskya54c0c22013-01-24 14:45:00 -0800629 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800630 int i;
631
632 if (WARN(num_entries > max_entries,
633 "First entry = %d; Num entries = %d (max=%d)\n",
634 first_entry, num_entries, max_entries))
635 num_entries = max_entries;
636
Ben Widawsky828c7902013-10-16 09:21:30 -0700637 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
638
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800639 for (i = 0; i < num_entries; i++)
640 iowrite32(scratch_pte, &gtt_base[i]);
641 readl(gtt_base);
642}
643
644
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700645static void i915_ggtt_insert_entries(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800646 struct sg_table *st,
647 unsigned int pg_start,
648 enum i915_cache_level cache_level)
649{
650 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
651 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
652
653 intel_gtt_insert_sg_entries(st, pg_start, flags);
654
655}
656
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700657static void i915_ggtt_clear_range(struct i915_address_space *vm,
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800658 unsigned int first_entry,
Ben Widawsky828c7902013-10-16 09:21:30 -0700659 unsigned int num_entries,
660 bool unused)
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800661{
662 intel_gtt_clear_range(first_entry, num_entries);
663}
664
665
Daniel Vetter74163902012-02-15 23:50:21 +0100666void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
667 enum i915_cache_level cache_level)
Chris Wilsond5bd1442011-04-14 06:48:26 +0100668{
669 struct drm_device *dev = obj->base.dev;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800670 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700671 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800672
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700673 dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
674 entry,
675 cache_level);
Chris Wilsond5bd1442011-04-14 06:48:26 +0100676
Daniel Vetter74898d72012-02-15 23:50:22 +0100677 obj->has_global_gtt_mapping = 1;
Chris Wilsond5bd1442011-04-14 06:48:26 +0100678}
679
Chris Wilson05394f32010-11-08 19:18:58 +0000680void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100681{
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800682 struct drm_device *dev = obj->base.dev;
683 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700684 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800685
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700686 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
687 entry,
Ben Widawsky828c7902013-10-16 09:21:30 -0700688 obj->base.size >> PAGE_SHIFT,
689 true);
Daniel Vetter74898d72012-02-15 23:50:22 +0100690
691 obj->has_global_gtt_mapping = 0;
Daniel Vetter74163902012-02-15 23:50:21 +0100692}
693
694void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
695{
Ben Widawsky5c042282011-10-17 15:51:55 -0700696 struct drm_device *dev = obj->base.dev;
697 struct drm_i915_private *dev_priv = dev->dev_private;
698 bool interruptible;
699
700 interruptible = do_idling(dev_priv);
701
Chris Wilson9da3da62012-06-01 15:20:22 +0100702 if (!obj->has_dma_mapping)
703 dma_unmap_sg(&dev->pdev->dev,
704 obj->pages->sgl, obj->pages->nents,
705 PCI_DMA_BIDIRECTIONAL);
Ben Widawsky5c042282011-10-17 15:51:55 -0700706
707 undo_idling(dev_priv, interruptible);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +0100708}
Daniel Vetter644ec022012-03-26 09:45:40 +0200709
Chris Wilson42d6ab42012-07-26 11:49:32 +0100710static void i915_gtt_color_adjust(struct drm_mm_node *node,
711 unsigned long color,
712 unsigned long *start,
713 unsigned long *end)
714{
715 if (node->color != color)
716 *start += 4096;
717
718 if (!list_empty(&node->node_list)) {
719 node = list_entry(node->node_list.next,
720 struct drm_mm_node,
721 node_list);
722 if (node->allocated && node->color != color)
723 *end -= 4096;
724 }
725}
Ben Widawskyd7e50082012-12-18 10:31:25 -0800726void i915_gem_setup_global_gtt(struct drm_device *dev,
727 unsigned long start,
728 unsigned long mappable_end,
729 unsigned long end)
Daniel Vetter644ec022012-03-26 09:45:40 +0200730{
Ben Widawskye78891c2013-01-25 16:41:04 -0800731 /* Let GEM Manage all of the aperture.
732 *
733 * However, leave one page at the end still bound to the scratch page.
734 * There are a number of places where the hardware apparently prefetches
735 * past the end of the object, and we've seen multiple hangs with the
736 * GPU head pointer stuck in a batchbuffer bound at the last page of the
737 * aperture. One page should be enough to keep any prefetching inside
738 * of the aperture.
739 */
Ben Widawsky40d749802013-07-31 16:59:59 -0700740 struct drm_i915_private *dev_priv = dev->dev_private;
741 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
Chris Wilsoned2f3452012-11-15 11:32:19 +0000742 struct drm_mm_node *entry;
743 struct drm_i915_gem_object *obj;
744 unsigned long hole_start, hole_end;
Daniel Vetter644ec022012-03-26 09:45:40 +0200745
Ben Widawsky35451cb2013-01-17 12:45:13 -0800746 BUG_ON(mappable_end > end);
747
Chris Wilsoned2f3452012-11-15 11:32:19 +0000748 /* Subtract the guard page ... */
Ben Widawsky40d749802013-07-31 16:59:59 -0700749 drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
Chris Wilson42d6ab42012-07-26 11:49:32 +0100750 if (!HAS_LLC(dev))
Ben Widawsky93bd8642013-07-16 16:50:06 -0700751 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
Daniel Vetter644ec022012-03-26 09:45:40 +0200752
Chris Wilsoned2f3452012-11-15 11:32:19 +0000753 /* Mark any preallocated objects as occupied */
Ben Widawsky35c20a62013-05-31 11:28:48 -0700754 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Ben Widawsky40d749802013-07-31 16:59:59 -0700755 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700756 int ret;
Ben Widawskyedd41a82013-07-05 14:41:05 -0700757 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
Ben Widawskyc6cfb322013-07-05 14:41:06 -0700758 i915_gem_obj_ggtt_offset(obj), obj->base.size);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000759
Ben Widawskyc6cfb322013-07-05 14:41:06 -0700760 WARN_ON(i915_gem_obj_ggtt_bound(obj));
Ben Widawsky40d749802013-07-31 16:59:59 -0700761 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
Ben Widawskyc6cfb322013-07-05 14:41:06 -0700762 if (ret)
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700763 DRM_DEBUG_KMS("Reservation failed\n");
Chris Wilsoned2f3452012-11-15 11:32:19 +0000764 obj->has_global_gtt_mapping = 1;
Ben Widawsky2f633152013-07-17 12:19:03 -0700765 list_add(&vma->vma_link, &obj->vma_list);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000766 }
767
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700768 dev_priv->gtt.base.start = start;
769 dev_priv->gtt.base.total = end - start;
Daniel Vetter644ec022012-03-26 09:45:40 +0200770
Chris Wilsoned2f3452012-11-15 11:32:19 +0000771 /* Clear any non-preallocated blocks */
Ben Widawsky40d749802013-07-31 16:59:59 -0700772 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700773 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
Chris Wilsoned2f3452012-11-15 11:32:19 +0000774 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
775 hole_start, hole_end);
Ben Widawsky828c7902013-10-16 09:21:30 -0700776 ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
Chris Wilsoned2f3452012-11-15 11:32:19 +0000777 }
778
779 /* And finally clear the reserved guard page */
Ben Widawsky828c7902013-10-16 09:21:30 -0700780 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800781}
782
Ben Widawskyd7e50082012-12-18 10:31:25 -0800783static bool
784intel_enable_ppgtt(struct drm_device *dev)
785{
786 if (i915_enable_ppgtt >= 0)
787 return i915_enable_ppgtt;
788
789#ifdef CONFIG_INTEL_IOMMU
790 /* Disable ppgtt on SNB if VT-d is on. */
791 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
792 return false;
793#endif
794
795 return true;
796}
797
798void i915_gem_init_global_gtt(struct drm_device *dev)
799{
800 struct drm_i915_private *dev_priv = dev->dev_private;
801 unsigned long gtt_size, mappable_size;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800802
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700803 gtt_size = dev_priv->gtt.base.total;
Ben Widawsky93d18792013-01-17 12:45:17 -0800804 mappable_size = dev_priv->gtt.mappable_end;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800805
806 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
Ben Widawskye78891c2013-01-25 16:41:04 -0800807 int ret;
Ben Widawsky3eb1c002013-04-08 18:43:52 -0700808
809 if (INTEL_INFO(dev)->gen <= 7) {
810 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
811 * aperture accordingly when using aliasing ppgtt. */
Ben Widawsky6670a5a2013-06-27 16:30:04 -0700812 gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
Ben Widawsky3eb1c002013-04-08 18:43:52 -0700813 }
Ben Widawskyd7e50082012-12-18 10:31:25 -0800814
815 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
816
817 ret = i915_gem_init_aliasing_ppgtt(dev);
Ben Widawskye78891c2013-01-25 16:41:04 -0800818 if (!ret)
Ben Widawskyd7e50082012-12-18 10:31:25 -0800819 return;
Ben Widawskye78891c2013-01-25 16:41:04 -0800820
821 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
Ben Widawsky93bd8642013-07-16 16:50:06 -0700822 drm_mm_takedown(&dev_priv->gtt.base.mm);
Ben Widawsky6670a5a2013-06-27 16:30:04 -0700823 gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
Ben Widawskyd7e50082012-12-18 10:31:25 -0800824 }
Ben Widawskye78891c2013-01-25 16:41:04 -0800825 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800826}
827
828static int setup_scratch_page(struct drm_device *dev)
829{
830 struct drm_i915_private *dev_priv = dev->dev_private;
831 struct page *page;
832 dma_addr_t dma_addr;
833
834 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
835 if (page == NULL)
836 return -ENOMEM;
837 get_page(page);
838 set_pages_uc(page, 1);
839
840#ifdef CONFIG_INTEL_IOMMU
841 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
842 PCI_DMA_BIDIRECTIONAL);
843 if (pci_dma_mapping_error(dev->pdev, dma_addr))
844 return -EINVAL;
845#else
846 dma_addr = page_to_phys(page);
847#endif
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700848 dev_priv->gtt.base.scratch.page = page;
849 dev_priv->gtt.base.scratch.addr = dma_addr;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800850
851 return 0;
852}
853
854static void teardown_scratch_page(struct drm_device *dev)
855{
856 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700857 struct page *page = dev_priv->gtt.base.scratch.page;
858
859 set_pages_wb(page, 1);
860 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800861 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700862 put_page(page);
863 __free_page(page);
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800864}
865
866static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
867{
868 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
869 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
870 return snb_gmch_ctl << 20;
871}
872
Ben Widawsky9459d252013-11-03 16:53:55 -0800873static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
874{
875 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
876 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
877 if (bdw_gmch_ctl)
878 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
879 return bdw_gmch_ctl << 20;
880}
881
Ben Widawskybaa09f52013-01-24 13:49:57 -0800882static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800883{
884 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
885 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
886 return snb_gmch_ctl << 25; /* 32 MB units */
887}
888
Ben Widawsky9459d252013-11-03 16:53:55 -0800889static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
890{
891 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
892 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
893 return bdw_gmch_ctl << 25; /* 32 MB units */
894}
895
Ben Widawsky63340132013-11-04 19:32:22 -0800896static int ggtt_probe_common(struct drm_device *dev,
897 size_t gtt_size)
898{
899 struct drm_i915_private *dev_priv = dev->dev_private;
900 phys_addr_t gtt_bus_addr;
901 int ret;
902
903 /* For Modern GENs the PTEs and register space are split in the BAR */
904 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
905 (pci_resource_len(dev->pdev, 0) / 2);
906
907 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
908 if (!dev_priv->gtt.gsm) {
909 DRM_ERROR("Failed to map the gtt page table\n");
910 return -ENOMEM;
911 }
912
913 ret = setup_scratch_page(dev);
914 if (ret) {
915 DRM_ERROR("Scratch setup failed\n");
916 /* iounmap will also get called at remove, but meh */
917 iounmap(dev_priv->gtt.gsm);
918 }
919
920 return ret;
921}
922
923static int gen8_gmch_probe(struct drm_device *dev,
924 size_t *gtt_total,
925 size_t *stolen,
926 phys_addr_t *mappable_base,
927 unsigned long *mappable_end)
928{
929 struct drm_i915_private *dev_priv = dev->dev_private;
930 unsigned int gtt_size;
931 u16 snb_gmch_ctl;
932 int ret;
933
934 /* TODO: We're not aware of mappable constraints on gen8 yet */
935 *mappable_base = pci_resource_start(dev->pdev, 2);
936 *mappable_end = pci_resource_len(dev->pdev, 2);
937
938 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
939 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
940
941 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
942
943 *stolen = gen8_get_stolen_size(snb_gmch_ctl);
944
945 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
Ben Widawskyd31eb102013-11-02 21:07:17 -0700946 *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
Ben Widawsky63340132013-11-04 19:32:22 -0800947
948 ret = ggtt_probe_common(dev, gtt_size);
949
950 dev_priv->gtt.base.clear_range = NULL;
951 dev_priv->gtt.base.insert_entries = NULL;
952
953 return ret;
954}
955
Ben Widawskybaa09f52013-01-24 13:49:57 -0800956static int gen6_gmch_probe(struct drm_device *dev,
957 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -0800958 size_t *stolen,
959 phys_addr_t *mappable_base,
960 unsigned long *mappable_end)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800961{
962 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800963 unsigned int gtt_size;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800964 u16 snb_gmch_ctl;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800965 int ret;
966
Ben Widawsky41907dd2013-02-08 11:32:47 -0800967 *mappable_base = pci_resource_start(dev->pdev, 2);
968 *mappable_end = pci_resource_len(dev->pdev, 2);
969
Ben Widawskybaa09f52013-01-24 13:49:57 -0800970 /* 64/512MB is the current min/max we actually know of, but this is just
971 * a coarse sanity check.
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800972 */
Ben Widawsky41907dd2013-02-08 11:32:47 -0800973 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
Ben Widawskybaa09f52013-01-24 13:49:57 -0800974 DRM_ERROR("Unknown GMADR size (%lx)\n",
975 dev_priv->gtt.mappable_end);
976 return -ENXIO;
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800977 }
978
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800979 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
980 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
Ben Widawskybaa09f52013-01-24 13:49:57 -0800981 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800982
Ben Widawsky63340132013-11-04 19:32:22 -0800983 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800984
Ben Widawsky63340132013-11-04 19:32:22 -0800985 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
986 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
Ben Widawskya93e4162013-04-08 18:43:47 -0700987
Ben Widawsky63340132013-11-04 19:32:22 -0800988 ret = ggtt_probe_common(dev, gtt_size);
Ben Widawskybaa09f52013-01-24 13:49:57 -0800989
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700990 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
991 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
Ben Widawskybaa09f52013-01-24 13:49:57 -0800992
993 return ret;
994}
995
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700996static void gen6_gmch_remove(struct i915_address_space *vm)
Ben Widawskybaa09f52013-01-24 13:49:57 -0800997{
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700998
999 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
1000 iounmap(gtt->gsm);
1001 teardown_scratch_page(vm->dev);
Ben Widawskybaa09f52013-01-24 13:49:57 -08001002}
1003
1004static int i915_gmch_probe(struct drm_device *dev,
1005 size_t *gtt_total,
Ben Widawsky41907dd2013-02-08 11:32:47 -08001006 size_t *stolen,
1007 phys_addr_t *mappable_base,
1008 unsigned long *mappable_end)
Ben Widawskybaa09f52013-01-24 13:49:57 -08001009{
1010 struct drm_i915_private *dev_priv = dev->dev_private;
1011 int ret;
1012
Ben Widawskybaa09f52013-01-24 13:49:57 -08001013 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
1014 if (!ret) {
1015 DRM_ERROR("failed to set up gmch\n");
1016 return -EIO;
1017 }
1018
Ben Widawsky41907dd2013-02-08 11:32:47 -08001019 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
Ben Widawskybaa09f52013-01-24 13:49:57 -08001020
1021 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001022 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
1023 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
Ben Widawskybaa09f52013-01-24 13:49:57 -08001024
1025 return 0;
1026}
1027
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001028static void i915_gmch_remove(struct i915_address_space *vm)
Ben Widawskybaa09f52013-01-24 13:49:57 -08001029{
1030 intel_gmch_remove();
1031}
1032
1033int i915_gem_gtt_init(struct drm_device *dev)
1034{
1035 struct drm_i915_private *dev_priv = dev->dev_private;
1036 struct i915_gtt *gtt = &dev_priv->gtt;
Ben Widawskybaa09f52013-01-24 13:49:57 -08001037 int ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001038
Ben Widawskybaa09f52013-01-24 13:49:57 -08001039 if (INTEL_INFO(dev)->gen <= 5) {
Ben Widawskyb2f21b42013-06-27 16:30:20 -07001040 gtt->gtt_probe = i915_gmch_probe;
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001041 gtt->base.cleanup = i915_gmch_remove;
Ben Widawsky63340132013-11-04 19:32:22 -08001042 } else if (INTEL_INFO(dev)->gen < 8) {
Ben Widawskyb2f21b42013-06-27 16:30:20 -07001043 gtt->gtt_probe = gen6_gmch_probe;
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001044 gtt->base.cleanup = gen6_gmch_remove;
Ben Widawsky4d15c142013-07-04 11:02:06 -07001045 if (IS_HASWELL(dev) && dev_priv->ellc_size)
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001046 gtt->base.pte_encode = iris_pte_encode;
Ben Widawsky4d15c142013-07-04 11:02:06 -07001047 else if (IS_HASWELL(dev))
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001048 gtt->base.pte_encode = hsw_pte_encode;
Ben Widawskyb2f21b42013-06-27 16:30:20 -07001049 else if (IS_VALLEYVIEW(dev))
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001050 gtt->base.pte_encode = byt_pte_encode;
Chris Wilson350ec882013-08-06 13:17:02 +01001051 else if (INTEL_INFO(dev)->gen >= 7)
1052 gtt->base.pte_encode = ivb_pte_encode;
Ben Widawskyb2f21b42013-06-27 16:30:20 -07001053 else
Chris Wilson350ec882013-08-06 13:17:02 +01001054 gtt->base.pte_encode = snb_pte_encode;
Ben Widawsky63340132013-11-04 19:32:22 -08001055 } else {
1056 dev_priv->gtt.gtt_probe = gen8_gmch_probe;
1057 dev_priv->gtt.base.cleanup = gen6_gmch_remove;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001058 }
1059
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001060 ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
Ben Widawskyb2f21b42013-06-27 16:30:20 -07001061 &gtt->mappable_base, &gtt->mappable_end);
Ben Widawskya54c0c22013-01-24 14:45:00 -08001062 if (ret)
Ben Widawskybaa09f52013-01-24 13:49:57 -08001063 return ret;
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001064
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001065 gtt->base.dev = dev;
1066
Ben Widawskybaa09f52013-01-24 13:49:57 -08001067 /* GMADR is the PCI mmio aperture into the global GTT. */
Ben Widawsky853ba5d2013-07-16 16:50:05 -07001068 DRM_INFO("Memory usable by graphics device = %zdM\n",
1069 gtt->base.total >> 20);
Ben Widawskyb2f21b42013-06-27 16:30:20 -07001070 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
1071 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
Daniel Vetter7faf1ab2013-01-24 14:44:55 -08001072
Ben Widawskye76e9ae2012-11-04 09:21:27 -08001073 return 0;
Daniel Vetter644ec022012-03-26 09:45:40 +02001074}