blob: 87e389c6c9c0b522ede0e66d177d03a4d9a6a03f [file] [log] [blame]
Ben Widawsky0260c422014-03-22 22:47:21 -07001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Please try to maintain the following order within this file unless it makes
24 * sense to do otherwise. From top to bottom:
25 * 1. typedefs
26 * 2. #defines, and macros
27 * 3. structure definitions
28 * 4. function prototypes
29 *
30 * Within each section, please try to order by generation in ascending order,
31 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
32 */
33
34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__
36
Daniel Vetter4d884702014-08-06 15:04:47 +020037struct drm_i915_file_private;
38
Michel Thierry07749ef2015-03-16 16:00:54 +000039typedef uint32_t gen6_pte_t;
40typedef uint64_t gen8_pte_t;
41typedef uint64_t gen8_pde_t;
Ben Widawsky0260c422014-03-22 22:47:21 -070042
43#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
44
Michel Thierry07749ef2015-03-16 16:00:54 +000045
Ben Widawsky0260c422014-03-22 22:47:21 -070046/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
47#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
48#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
49#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
50#define GEN6_PTE_CACHE_LLC (2 << 1)
51#define GEN6_PTE_UNCACHED (1 << 1)
52#define GEN6_PTE_VALID (1 << 0)
53
Michel Thierry07749ef2015-03-16 16:00:54 +000054#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
55#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
56#define I915_PDES 512
57#define I915_PDE_MASK (I915_PDES - 1)
Ben Widawsky678d96f2015-03-16 16:00:56 +000058#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
Michel Thierry07749ef2015-03-16 16:00:54 +000059
60#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
61#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
Ben Widawsky0260c422014-03-22 22:47:21 -070062#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
Ben Widawsky678d96f2015-03-16 16:00:56 +000063#define GEN6_PDE_SHIFT 22
Ben Widawsky0260c422014-03-22 22:47:21 -070064#define GEN6_PDE_VALID (1 << 0)
65
66#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
67
68#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
69#define BYT_PTE_WRITEABLE (1 << 1)
70
71/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
72 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
73 */
74#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
75 (((bits) & 0x8) << (11 - 3)))
76#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
77#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
78#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
79#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
80#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
81#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
82#define HSW_PTE_UNCACHED (0)
83#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
84#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
85
86/* GEN8 legacy style address is defined as a 3 level page table:
87 * 31:30 | 29:21 | 20:12 | 11:0
88 * PDPE | PDE | PTE | offset
89 * The difference as compared to normal x86 3 level page table is the PDPEs are
90 * programmed via register.
91 */
92#define GEN8_PDPE_SHIFT 30
93#define GEN8_PDPE_MASK 0x3
94#define GEN8_PDE_SHIFT 21
95#define GEN8_PDE_MASK 0x1ff
96#define GEN8_PTE_SHIFT 12
97#define GEN8_PTE_MASK 0x1ff
Ben Widawsky76643602015-01-22 17:01:24 +000098#define GEN8_LEGACY_PDPES 4
Michel Thierry07749ef2015-03-16 16:00:54 +000099#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
Ben Widawsky0260c422014-03-22 22:47:21 -0700100
Michel Thierry6ac18502015-07-29 17:23:46 +0100101/* FIXME: Next patch will use dev */
102#define I915_PDPES_PER_PDP(dev) GEN8_LEGACY_PDPES
103
Ben Widawsky0260c422014-03-22 22:47:21 -0700104#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
105#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
106#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
107#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
108
Ville Syrjäläee0ce472014-04-09 13:28:01 +0300109#define CHV_PPAT_SNOOP (1<<6)
Ben Widawsky0260c422014-03-22 22:47:21 -0700110#define GEN8_PPAT_AGE(x) (x<<4)
111#define GEN8_PPAT_LLCeLLC (3<<2)
112#define GEN8_PPAT_LLCELLC (2<<2)
113#define GEN8_PPAT_LLC (1<<2)
114#define GEN8_PPAT_WB (3<<0)
115#define GEN8_PPAT_WT (2<<0)
116#define GEN8_PPAT_WC (1<<0)
117#define GEN8_PPAT_UC (0<<0)
118#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
119#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
120
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000121enum i915_ggtt_view_type {
122 I915_GGTT_VIEW_NORMAL = 0,
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +0300123 I915_GGTT_VIEW_ROTATED,
124 I915_GGTT_VIEW_PARTIAL,
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +0000125};
126
127struct intel_rotation_info {
128 unsigned int height;
129 unsigned int pitch;
130 uint32_t pixel_format;
131 uint64_t fb_modifier;
Tvrtko Ursulin84fe03f2015-06-23 14:26:46 +0100132 unsigned int width_pages, height_pages;
133 uint64_t size;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000134};
135
136struct i915_ggtt_view {
137 enum i915_ggtt_view_type type;
138
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +0300139 union {
140 struct {
141 unsigned long offset;
142 unsigned int size;
143 } partial;
144 } params;
145
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000146 struct sg_table *pages;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +0000147
148 union {
149 struct intel_rotation_info rotation_info;
150 };
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000151};
152
153extern const struct i915_ggtt_view i915_ggtt_view_normal;
Joonas Lahtinen9abc4642015-03-27 13:09:22 +0200154extern const struct i915_ggtt_view i915_ggtt_view_rotated;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000155
Ben Widawsky0260c422014-03-22 22:47:21 -0700156enum i915_cache_level;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000157
Ben Widawsky0260c422014-03-22 22:47:21 -0700158/**
159 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
160 * VMA's presence cannot be guaranteed before binding, or after unbinding the
161 * object into/from the address space.
162 *
163 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
164 * will always be <= an objects lifetime. So object refcounting should cover us.
165 */
166struct i915_vma {
167 struct drm_mm_node node;
168 struct drm_i915_gem_object *obj;
169 struct i915_address_space *vm;
170
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100171 /** Flags and address space this VMA is bound to */
172#define GLOBAL_BIND (1<<0)
173#define LOCAL_BIND (1<<1)
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100174 unsigned int bound : 4;
175
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000176 /**
177 * Support different GGTT views into the same object.
178 * This means there can be multiple VMA mappings per object and per VM.
179 * i915_ggtt_view_type is used to distinguish between those entries.
180 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
181 * assumed in GEM functions which take no ggtt view parameter.
182 */
183 struct i915_ggtt_view ggtt_view;
184
Ben Widawsky0260c422014-03-22 22:47:21 -0700185 /** This object's place on the active/inactive lists */
186 struct list_head mm_list;
187
188 struct list_head vma_link; /* Link in the object's VMA list */
189
190 /** This vma's place in the batchbuffer or on the eviction list */
191 struct list_head exec_list;
192
193 /**
194 * Used for performing relocations during execbuffer insertion.
195 */
196 struct hlist_node exec_node;
197 unsigned long exec_handle;
198 struct drm_i915_gem_exec_object2 *exec_entry;
199
200 /**
201 * How many users have pinned this object in GTT space. The following
Daniel Vetter4feb7652014-11-24 11:21:52 +0100202 * users can each hold at most one reference: pwrite/pread, execbuffer
203 * (objects are not allowed multiple times for the same batchbuffer),
204 * and the framebuffer code. When switching/pageflipping, the
205 * framebuffer code has at most two buffers pinned per crtc.
Ben Widawsky0260c422014-03-22 22:47:21 -0700206 *
207 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
208 * bits with absolutely no headroom. So use 4 bits. */
209 unsigned int pin_count:4;
210#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
Ben Widawsky0260c422014-03-22 22:47:21 -0700211};
212
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300213struct i915_page_dma {
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000214 struct page *page;
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300215 union {
216 dma_addr_t daddr;
217
218 /* For gen6/gen7 only. This is the offset in the GGTT
219 * where the page directory entries for PPGTT begin
220 */
221 uint32_t ggtt_offset;
222 };
223};
224
Mika Kuoppala567047b2015-06-25 18:35:12 +0300225#define px_base(px) (&(px)->base)
226#define px_page(px) (px_base(px)->page)
227#define px_dma(px) (px_base(px)->daddr)
228
Mika Kuoppalac114f762015-06-25 18:35:13 +0300229struct i915_page_scratch {
230 struct i915_page_dma base;
231};
232
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300233struct i915_page_table {
234 struct i915_page_dma base;
Ben Widawsky678d96f2015-03-16 16:00:56 +0000235
236 unsigned long *used_ptes;
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000237};
238
Michel Thierryec565b32015-04-08 12:13:23 +0100239struct i915_page_directory {
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300240 struct i915_page_dma base;
Ben Widawsky7324cc02015-02-24 16:22:35 +0000241
Michel Thierry33c88192015-04-08 12:13:33 +0100242 unsigned long *used_pdes;
Michel Thierryec565b32015-04-08 12:13:23 +0100243 struct i915_page_table *page_table[I915_PDES]; /* PDEs */
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000244};
245
Michel Thierryec565b32015-04-08 12:13:23 +0100246struct i915_page_directory_pointer {
Michel Thierry6ac18502015-07-29 17:23:46 +0100247 struct i915_page_dma base;
248
249 unsigned long *used_pdpes;
250 struct i915_page_directory **page_directory;
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000251};
252
Ben Widawsky0260c422014-03-22 22:47:21 -0700253struct i915_address_space {
254 struct drm_mm mm;
255 struct drm_device *dev;
256 struct list_head global_link;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300257 u64 start; /* Start offset always 0 for dri2 */
258 u64 total; /* size addr space maps (ex. 2GB for ggtt) */
Ben Widawsky0260c422014-03-22 22:47:21 -0700259
Mika Kuoppalac114f762015-06-25 18:35:13 +0300260 struct i915_page_scratch *scratch_page;
Mika Kuoppala79ab9372015-06-25 18:35:17 +0300261 struct i915_page_table *scratch_pt;
262 struct i915_page_directory *scratch_pd;
Ben Widawsky0260c422014-03-22 22:47:21 -0700263
264 /**
265 * List of objects currently involved in rendering.
266 *
267 * Includes buffers having the contents of their GPU caches
John Harrison97b2a6a2014-11-24 18:49:26 +0000268 * flushed, not necessarily primitives. last_read_req
Ben Widawsky0260c422014-03-22 22:47:21 -0700269 * represents when the rendering involved will be completed.
270 *
271 * A reference is held on the buffer while on this list.
272 */
273 struct list_head active_list;
274
275 /**
276 * LRU list of objects which are not in the ringbuffer and
277 * are ready to unbind, but are still in the GTT.
278 *
John Harrison97b2a6a2014-11-24 18:49:26 +0000279 * last_read_req is NULL while an object is in this list.
Ben Widawsky0260c422014-03-22 22:47:21 -0700280 *
281 * A reference is not held on the buffer while on this list,
282 * as merely being GTT-bound shouldn't prevent its being
283 * freed, and we'll pull it off the list in the free path.
284 */
285 struct list_head inactive_list;
286
287 /* FIXME: Need a more generic return type */
Michel Thierry07749ef2015-03-16 16:00:54 +0000288 gen6_pte_t (*pte_encode)(dma_addr_t addr,
289 enum i915_cache_level level,
290 bool valid, u32 flags); /* Create a valid PTE */
Daniel Vetterf329f5f2015-04-14 17:35:15 +0200291 /* flags for pte_encode */
292#define PTE_READ_ONLY (1<<0)
Ben Widawsky678d96f2015-03-16 16:00:56 +0000293 int (*allocate_va_range)(struct i915_address_space *vm,
294 uint64_t start,
295 uint64_t length);
Ben Widawsky0260c422014-03-22 22:47:21 -0700296 void (*clear_range)(struct i915_address_space *vm,
297 uint64_t start,
298 uint64_t length,
299 bool use_scratch);
300 void (*insert_entries)(struct i915_address_space *vm,
301 struct sg_table *st,
302 uint64_t start,
Akash Goel24f3a8c2014-06-17 10:59:42 +0530303 enum i915_cache_level cache_level, u32 flags);
Ben Widawsky0260c422014-03-22 22:47:21 -0700304 void (*cleanup)(struct i915_address_space *vm);
Daniel Vetter777dc5b2015-04-14 17:35:12 +0200305 /** Unmap an object from an address space. This usually consists of
306 * setting the valid PTE entries to a reserved scratch page. */
307 void (*unbind_vma)(struct i915_vma *vma);
308 /* Map an object into an address space with the given cache flags. */
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200309 int (*bind_vma)(struct i915_vma *vma,
310 enum i915_cache_level cache_level,
311 u32 flags);
Ben Widawsky0260c422014-03-22 22:47:21 -0700312};
313
314/* The Graphics Translation Table is the way in which GEN hardware translates a
315 * Graphics Virtual Address into a Physical Address. In addition to the normal
316 * collateral associated with any va->pa translations GEN hardware also has a
317 * portion of the GTT which can be mapped by the CPU and remain both coherent
318 * and correct (in cases like swizzling). That region is referred to as GMADR in
319 * the spec.
320 */
321struct i915_gtt {
322 struct i915_address_space base;
Ben Widawsky0260c422014-03-22 22:47:21 -0700323
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300324 size_t stolen_size; /* Total size of stolen memory */
325 u64 mappable_end; /* End offset that we can CPU map */
Ben Widawsky0260c422014-03-22 22:47:21 -0700326 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
327 phys_addr_t mappable_base; /* PA of our GMADR */
328
329 /** "Graphics Stolen Memory" holds the global PTEs */
330 void __iomem *gsm;
331
332 bool do_idle_maps;
333
334 int mtrr;
335
336 /* global gtt ops */
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300337 int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
Ben Widawsky0260c422014-03-22 22:47:21 -0700338 size_t *stolen, phys_addr_t *mappable_base,
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300339 u64 *mappable_end);
Ben Widawsky0260c422014-03-22 22:47:21 -0700340};
341
342struct i915_hw_ppgtt {
343 struct i915_address_space base;
344 struct kref ref;
345 struct drm_mm_node node;
Ben Widawsky563222a2015-03-19 12:53:28 +0000346 unsigned long pd_dirty_rings;
Ben Widawsky0260c422014-03-22 22:47:21 -0700347 union {
Michel Thierryec565b32015-04-08 12:13:23 +0100348 struct i915_page_directory_pointer pdp;
349 struct i915_page_directory pd;
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000350 };
Ben Widawsky0260c422014-03-22 22:47:21 -0700351
Daniel Vetter4d884702014-08-06 15:04:47 +0200352 struct drm_i915_file_private *file_priv;
Ben Widawsky0260c422014-03-22 22:47:21 -0700353
Ben Widawsky678d96f2015-03-16 16:00:56 +0000354 gen6_pte_t __iomem *pd_addr;
355
Ben Widawsky0260c422014-03-22 22:47:21 -0700356 int (*enable)(struct i915_hw_ppgtt *ppgtt);
357 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +0100358 struct drm_i915_gem_request *req);
Ben Widawsky0260c422014-03-22 22:47:21 -0700359 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
360};
361
Ben Widawsky678d96f2015-03-16 16:00:56 +0000362/* For each pde iterates over every pde between from start until start + length.
363 * If start, and start+length are not perfectly divisible, the macro will round
364 * down, and up as needed. The macro modifies pde, start, and length. Dev is
365 * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
366 * and length = 2G effectively iterates over every PDE in the system.
367 *
368 * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
369 */
370#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
Michel Thierryfdc454c2015-03-24 15:46:19 +0000371 for (iter = gen6_pde_index(start); \
372 pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
373 iter++, \
Ben Widawsky678d96f2015-03-16 16:00:56 +0000374 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
375 temp = min_t(unsigned, temp, length), \
376 start += temp, length -= temp)
377
Michel Thierry09942c62015-04-08 12:13:30 +0100378#define gen6_for_all_pdes(pt, ppgtt, iter) \
379 for (iter = 0; \
380 pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
381 iter++)
382
Ben Widawsky678d96f2015-03-16 16:00:56 +0000383static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
384{
385 const uint32_t mask = NUM_PTE(pde_shift) - 1;
386
387 return (address >> PAGE_SHIFT) & mask;
388}
389
390/* Helper to counts the number of PTEs within the given length. This count
391 * does not cross a page table boundary, so the max value would be
392 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
393*/
394static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
395 uint32_t pde_shift)
396{
397 const uint64_t mask = ~((1 << pde_shift) - 1);
398 uint64_t end;
399
400 WARN_ON(length == 0);
401 WARN_ON(offset_in_page(addr|length));
402
403 end = addr + length;
404
405 if ((addr & mask) != (end & mask))
406 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
407
408 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
409}
410
411static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
412{
413 return (addr >> shift) & I915_PDE_MASK;
414}
415
416static inline uint32_t gen6_pte_index(uint32_t addr)
417{
418 return i915_pte_index(addr, GEN6_PDE_SHIFT);
419}
420
421static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
422{
423 return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
424}
425
426static inline uint32_t gen6_pde_index(uint32_t addr)
427{
428 return i915_pde_index(addr, GEN6_PDE_SHIFT);
429}
430
Michel Thierry9271d952015-04-08 12:13:26 +0100431/* Equivalent to the gen6 version, For each pde iterates over every pde
432 * between from start until start + length. On gen8+ it simply iterates
433 * over every page directory entry in a page directory.
434 */
435#define gen8_for_each_pde(pt, pd, start, length, temp, iter) \
436 for (iter = gen8_pde_index(start); \
437 pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
438 iter++, \
439 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \
440 temp = min(temp, length), \
441 start += temp, length -= temp)
442
Michel Thierry6ac18502015-07-29 17:23:46 +0100443#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
444 for (iter = gen8_pdpe_index(start); \
445 pd = (pdp)->page_directory[iter], \
446 length > 0 && (iter < I915_PDPES_PER_PDP(dev)); \
Michel Thierry9271d952015-04-08 12:13:26 +0100447 iter++, \
448 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \
449 temp = min(temp, length), \
450 start += temp, length -= temp)
451
Michel Thierry9271d952015-04-08 12:13:26 +0100452static inline uint32_t gen8_pte_index(uint64_t address)
453{
454 return i915_pte_index(address, GEN8_PDE_SHIFT);
455}
456
457static inline uint32_t gen8_pde_index(uint64_t address)
458{
459 return i915_pde_index(address, GEN8_PDE_SHIFT);
460}
461
462static inline uint32_t gen8_pdpe_index(uint64_t address)
463{
464 return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
465}
466
467static inline uint32_t gen8_pml4e_index(uint64_t address)
468{
469 WARN_ON(1); /* For 64B */
470 return 0;
471}
472
Michel Thierry33c88192015-04-08 12:13:33 +0100473static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
474{
475 return i915_pte_count(address, length, GEN8_PDE_SHIFT);
476}
477
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300478static inline dma_addr_t
479i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
480{
481 return test_bit(n, ppgtt->pdp.used_pdpes) ?
Mika Kuoppala567047b2015-06-25 18:35:12 +0300482 px_dma(ppgtt->pdp.page_directory[n]) :
Mika Kuoppala79ab9372015-06-25 18:35:17 +0300483 px_dma(ppgtt->base.scratch_pd);
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300484}
485
Ben Widawsky0260c422014-03-22 22:47:21 -0700486int i915_gem_gtt_init(struct drm_device *dev);
487void i915_gem_init_global_gtt(struct drm_device *dev);
Daniel Vetter90d0a0e2014-08-06 15:04:56 +0200488void i915_global_gtt_cleanup(struct drm_device *dev);
Ben Widawsky0260c422014-03-22 22:47:21 -0700489
Daniel Vetteree960be2014-08-06 15:04:45 +0200490
491int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
Daniel Vetter82460d92014-08-06 20:19:53 +0200492int i915_ppgtt_init_hw(struct drm_device *dev);
John Harrisonb3dd6b92015-05-29 17:43:40 +0100493int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
Daniel Vetteree960be2014-08-06 15:04:45 +0200494void i915_ppgtt_release(struct kref *kref);
Daniel Vetter4d884702014-08-06 15:04:47 +0200495struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
496 struct drm_i915_file_private *fpriv);
Daniel Vetteree960be2014-08-06 15:04:45 +0200497static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
498{
499 if (ppgtt)
500 kref_get(&ppgtt->ref);
501}
502static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
503{
504 if (ppgtt)
505 kref_put(&ppgtt->ref, i915_ppgtt_release);
506}
Ben Widawsky0260c422014-03-22 22:47:21 -0700507
508void i915_check_and_clear_faults(struct drm_device *dev);
509void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
510void i915_gem_restore_gtt_mappings(struct drm_device *dev);
511
512int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
513void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
514
Joonas Lahtinen9abc4642015-03-27 13:09:22 +0200515static inline bool
516i915_ggtt_view_equal(const struct i915_ggtt_view *a,
517 const struct i915_ggtt_view *b)
518{
519 if (WARN_ON(!a || !b))
520 return false;
521
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +0300522 if (a->type != b->type)
523 return false;
524 if (a->type == I915_GGTT_VIEW_PARTIAL)
525 return !memcmp(&a->params, &b->params, sizeof(a->params));
526 return true;
Joonas Lahtinen9abc4642015-03-27 13:09:22 +0200527}
528
Joonas Lahtinen91e67112015-05-06 14:33:58 +0300529size_t
530i915_ggtt_view_size(struct drm_i915_gem_object *obj,
531 const struct i915_ggtt_view *view);
532
Ben Widawsky0260c422014-03-22 22:47:21 -0700533#endif