blob: deb9dbcb94012a673283918fa2aa6a3604f83d45 [file] [log] [blame]
Ben Widawsky0260c422014-03-22 22:47:21 -07001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Please try to maintain the following order within this file unless it makes
24 * sense to do otherwise. From top to bottom:
25 * 1. typedefs
26 * 2. #defines, and macros
27 * 3. structure definitions
28 * 4. function prototypes
29 *
30 * Within each section, please try to order by generation in ascending order,
31 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
32 */
33
34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__
36
Chris Wilson8ef85612016-04-28 09:56:39 +010037#include <linux/io-mapping.h>
38
Chris Wilsonb0decaf2016-08-04 07:52:44 +010039#include "i915_gem_request.h"
40
Daniel Vetter4d884702014-08-06 15:04:47 +020041struct drm_i915_file_private;
42
Michel Thierry07749ef2015-03-16 16:00:54 +000043typedef uint32_t gen6_pte_t;
44typedef uint64_t gen8_pte_t;
45typedef uint64_t gen8_pde_t;
Michel Thierry762d9932015-07-30 11:05:29 +010046typedef uint64_t gen8_ppgtt_pdpe_t;
47typedef uint64_t gen8_ppgtt_pml4e_t;
Ben Widawsky0260c422014-03-22 22:47:21 -070048
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030049#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
Ben Widawsky0260c422014-03-22 22:47:21 -070050
Ben Widawsky0260c422014-03-22 22:47:21 -070051/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
52#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
53#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
54#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
55#define GEN6_PTE_CACHE_LLC (2 << 1)
56#define GEN6_PTE_UNCACHED (1 << 1)
57#define GEN6_PTE_VALID (1 << 0)
58
Michel Thierry07749ef2015-03-16 16:00:54 +000059#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
60#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
61#define I915_PDES 512
62#define I915_PDE_MASK (I915_PDES - 1)
Ben Widawsky678d96f2015-03-16 16:00:56 +000063#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
Michel Thierry07749ef2015-03-16 16:00:54 +000064
65#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
66#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
Ben Widawsky0260c422014-03-22 22:47:21 -070067#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
Ben Widawsky678d96f2015-03-16 16:00:56 +000068#define GEN6_PDE_SHIFT 22
Ben Widawsky0260c422014-03-22 22:47:21 -070069#define GEN6_PDE_VALID (1 << 0)
70
71#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
72
73#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
74#define BYT_PTE_WRITEABLE (1 << 1)
75
76/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
77 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
78 */
79#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
80 (((bits) & 0x8) << (11 - 3)))
81#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
82#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
83#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
84#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
85#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
86#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
87#define HSW_PTE_UNCACHED (0)
88#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
89#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
90
91/* GEN8 legacy style address is defined as a 3 level page table:
92 * 31:30 | 29:21 | 20:12 | 11:0
93 * PDPE | PDE | PTE | offset
94 * The difference as compared to normal x86 3 level page table is the PDPEs are
95 * programmed via register.
Michel Thierry81ba8aef2015-08-03 09:52:01 +010096 *
97 * GEN8 48b legacy style address is defined as a 4 level page table:
98 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
99 * PML4E | PDPE | PDE | PTE | offset
Ben Widawsky0260c422014-03-22 22:47:21 -0700100 */
Michel Thierry81ba8aef2015-08-03 09:52:01 +0100101#define GEN8_PML4ES_PER_PML4 512
102#define GEN8_PML4E_SHIFT 39
Michel Thierry762d9932015-07-30 11:05:29 +0100103#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
Ben Widawsky0260c422014-03-22 22:47:21 -0700104#define GEN8_PDPE_SHIFT 30
Michel Thierry81ba8aef2015-08-03 09:52:01 +0100105/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
106 * tables */
107#define GEN8_PDPE_MASK 0x1ff
Ben Widawsky0260c422014-03-22 22:47:21 -0700108#define GEN8_PDE_SHIFT 21
109#define GEN8_PDE_MASK 0x1ff
110#define GEN8_PTE_SHIFT 12
111#define GEN8_PTE_MASK 0x1ff
Ben Widawsky76643602015-01-22 17:01:24 +0000112#define GEN8_LEGACY_PDPES 4
Michel Thierry07749ef2015-03-16 16:00:54 +0000113#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
Ben Widawsky0260c422014-03-22 22:47:21 -0700114
Michel Thierry81ba8aef2015-08-03 09:52:01 +0100115#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
116 GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
Michel Thierry6ac18502015-07-29 17:23:46 +0100117
Ben Widawsky0260c422014-03-22 22:47:21 -0700118#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
119#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
120#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
121#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
122
Ville Syrjäläee0ce472014-04-09 13:28:01 +0300123#define CHV_PPAT_SNOOP (1<<6)
Ben Widawsky0260c422014-03-22 22:47:21 -0700124#define GEN8_PPAT_AGE(x) (x<<4)
125#define GEN8_PPAT_LLCeLLC (3<<2)
126#define GEN8_PPAT_LLCELLC (2<<2)
127#define GEN8_PPAT_LLC (1<<2)
128#define GEN8_PPAT_WB (3<<0)
129#define GEN8_PPAT_WT (2<<0)
130#define GEN8_PPAT_WC (1<<0)
131#define GEN8_PPAT_UC (0<<0)
132#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
133#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
134
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000135enum i915_ggtt_view_type {
136 I915_GGTT_VIEW_NORMAL = 0,
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +0300137 I915_GGTT_VIEW_ROTATED,
138 I915_GGTT_VIEW_PARTIAL,
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +0000139};
140
141struct intel_rotation_info {
Tvrtko Ursulin89e3e142015-09-21 10:45:34 +0100142 unsigned int uv_offset;
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +0000143 uint32_t pixel_format;
Tvrtko Ursulindedf2782015-09-21 10:45:35 +0100144 unsigned int uv_start_page;
Ville Syrjälä1663b9d2016-02-15 22:54:45 +0200145 struct {
146 /* tiles */
147 unsigned int width, height;
148 } plane[2];
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000149};
150
151struct i915_ggtt_view {
152 enum i915_ggtt_view_type type;
153
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +0300154 union {
155 struct {
Michel Thierry088e0df2015-08-07 17:40:17 +0100156 u64 offset;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +0300157 unsigned int size;
158 } partial;
Ville Syrjälä7723f47d2016-01-20 21:05:22 +0200159 struct intel_rotation_info rotated;
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +0300160 } params;
161
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000162 struct sg_table *pages;
163};
164
165extern const struct i915_ggtt_view i915_ggtt_view_normal;
Joonas Lahtinen9abc4642015-03-27 13:09:22 +0200166extern const struct i915_ggtt_view i915_ggtt_view_rotated;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000167
Ben Widawsky0260c422014-03-22 22:47:21 -0700168enum i915_cache_level;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000169
Ben Widawsky0260c422014-03-22 22:47:21 -0700170/**
171 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
172 * VMA's presence cannot be guaranteed before binding, or after unbinding the
173 * object into/from the address space.
174 *
175 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
176 * will always be <= an objects lifetime. So object refcounting should cover us.
177 */
178struct i915_vma {
179 struct drm_mm_node node;
180 struct drm_i915_gem_object *obj;
181 struct i915_address_space *vm;
Chris Wilson8ef85612016-04-28 09:56:39 +0100182 void __iomem *iomap;
Ben Widawsky0260c422014-03-22 22:47:21 -0700183
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100184 unsigned int active;
185 struct i915_gem_active last_read[I915_NUM_ENGINES];
186
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100187 /** Flags and address space this VMA is bound to */
188#define GLOBAL_BIND (1<<0)
189#define LOCAL_BIND (1<<1)
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100190 unsigned int bound : 4;
Chris Wilson596c5922016-02-26 11:03:20 +0000191 bool is_ggtt : 1;
Chris Wilsonb1f788c2016-08-04 07:52:45 +0100192 bool closed : 1;
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100193
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000194 /**
195 * Support different GGTT views into the same object.
196 * This means there can be multiple VMA mappings per object and per VM.
197 * i915_ggtt_view_type is used to distinguish between those entries.
198 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
199 * assumed in GEM functions which take no ggtt view parameter.
200 */
201 struct i915_ggtt_view ggtt_view;
202
Ben Widawsky0260c422014-03-22 22:47:21 -0700203 /** This object's place on the active/inactive lists */
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000204 struct list_head vm_link;
Ben Widawsky0260c422014-03-22 22:47:21 -0700205
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000206 struct list_head obj_link; /* Link in the object's VMA list */
Ben Widawsky0260c422014-03-22 22:47:21 -0700207
208 /** This vma's place in the batchbuffer or on the eviction list */
209 struct list_head exec_list;
210
211 /**
212 * Used for performing relocations during execbuffer insertion.
213 */
214 struct hlist_node exec_node;
215 unsigned long exec_handle;
216 struct drm_i915_gem_exec_object2 *exec_entry;
217
218 /**
219 * How many users have pinned this object in GTT space. The following
Daniel Vetter4feb7652014-11-24 11:21:52 +0100220 * users can each hold at most one reference: pwrite/pread, execbuffer
221 * (objects are not allowed multiple times for the same batchbuffer),
222 * and the framebuffer code. When switching/pageflipping, the
223 * framebuffer code has at most two buffers pinned per crtc.
Ben Widawsky0260c422014-03-22 22:47:21 -0700224 *
225 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
226 * bits with absolutely no headroom. So use 4 bits. */
227 unsigned int pin_count:4;
228#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
Ben Widawsky0260c422014-03-22 22:47:21 -0700229};
230
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100231static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
232{
233 return vma->active;
234}
235
236static inline bool i915_vma_is_active(const struct i915_vma *vma)
237{
238 return i915_vma_get_active(vma);
239}
240
241static inline void i915_vma_set_active(struct i915_vma *vma,
242 unsigned int engine)
243{
244 vma->active |= BIT(engine);
245}
246
247static inline void i915_vma_clear_active(struct i915_vma *vma,
248 unsigned int engine)
249{
250 vma->active &= ~BIT(engine);
251}
252
253static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
254 unsigned int engine)
255{
256 return vma->active & BIT(engine);
257}
258
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300259struct i915_page_dma {
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000260 struct page *page;
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300261 union {
262 dma_addr_t daddr;
263
264 /* For gen6/gen7 only. This is the offset in the GGTT
265 * where the page directory entries for PPGTT begin
266 */
267 uint32_t ggtt_offset;
268 };
269};
270
Mika Kuoppala567047b2015-06-25 18:35:12 +0300271#define px_base(px) (&(px)->base)
272#define px_page(px) (px_base(px)->page)
273#define px_dma(px) (px_base(px)->daddr)
274
Mika Kuoppalac114f762015-06-25 18:35:13 +0300275struct i915_page_scratch {
276 struct i915_page_dma base;
277};
278
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300279struct i915_page_table {
280 struct i915_page_dma base;
Ben Widawsky678d96f2015-03-16 16:00:56 +0000281
282 unsigned long *used_ptes;
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000283};
284
Michel Thierryec565b32015-04-08 12:13:23 +0100285struct i915_page_directory {
Mika Kuoppala44159dd2015-06-25 18:35:07 +0300286 struct i915_page_dma base;
Ben Widawsky7324cc02015-02-24 16:22:35 +0000287
Michel Thierry33c88192015-04-08 12:13:33 +0100288 unsigned long *used_pdes;
Michel Thierryec565b32015-04-08 12:13:23 +0100289 struct i915_page_table *page_table[I915_PDES]; /* PDEs */
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000290};
291
Michel Thierryec565b32015-04-08 12:13:23 +0100292struct i915_page_directory_pointer {
Michel Thierry6ac18502015-07-29 17:23:46 +0100293 struct i915_page_dma base;
294
295 unsigned long *used_pdpes;
296 struct i915_page_directory **page_directory;
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000297};
298
Michel Thierry81ba8aef2015-08-03 09:52:01 +0100299struct i915_pml4 {
300 struct i915_page_dma base;
301
302 DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
303 struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
304};
305
Ben Widawsky0260c422014-03-22 22:47:21 -0700306struct i915_address_space {
307 struct drm_mm mm;
308 struct drm_device *dev;
Chris Wilson2bfa9962016-08-04 07:52:25 +0100309 /* Every address space belongs to a struct file - except for the global
310 * GTT that is owned by the driver (and so @file is set to NULL). In
311 * principle, no information should leak from one context to another
312 * (or between files/processes etc) unless explicitly shared by the
313 * owner. Tracking the owner is important in order to free up per-file
314 * objects along with the file, to aide resource tracking, and to
315 * assign blame.
316 */
317 struct drm_i915_file_private *file;
Ben Widawsky0260c422014-03-22 22:47:21 -0700318 struct list_head global_link;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300319 u64 start; /* Start offset always 0 for dri2 */
320 u64 total; /* size addr space maps (ex. 2GB for ggtt) */
Ben Widawsky0260c422014-03-22 22:47:21 -0700321
Mika Kuoppalac114f762015-06-25 18:35:13 +0300322 struct i915_page_scratch *scratch_page;
Mika Kuoppala79ab9372015-06-25 18:35:17 +0300323 struct i915_page_table *scratch_pt;
324 struct i915_page_directory *scratch_pd;
Michel Thierry69ab76f2015-07-29 17:23:55 +0100325 struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
Ben Widawsky0260c422014-03-22 22:47:21 -0700326
327 /**
328 * List of objects currently involved in rendering.
329 *
330 * Includes buffers having the contents of their GPU caches
John Harrison97b2a6a2014-11-24 18:49:26 +0000331 * flushed, not necessarily primitives. last_read_req
Ben Widawsky0260c422014-03-22 22:47:21 -0700332 * represents when the rendering involved will be completed.
333 *
334 * A reference is held on the buffer while on this list.
335 */
336 struct list_head active_list;
337
338 /**
339 * LRU list of objects which are not in the ringbuffer and
340 * are ready to unbind, but are still in the GTT.
341 *
John Harrison97b2a6a2014-11-24 18:49:26 +0000342 * last_read_req is NULL while an object is in this list.
Ben Widawsky0260c422014-03-22 22:47:21 -0700343 *
344 * A reference is not held on the buffer while on this list,
345 * as merely being GTT-bound shouldn't prevent its being
346 * freed, and we'll pull it off the list in the free path.
347 */
348 struct list_head inactive_list;
349
350 /* FIXME: Need a more generic return type */
Michel Thierry07749ef2015-03-16 16:00:54 +0000351 gen6_pte_t (*pte_encode)(dma_addr_t addr,
352 enum i915_cache_level level,
353 bool valid, u32 flags); /* Create a valid PTE */
Daniel Vetterf329f5f2015-04-14 17:35:15 +0200354 /* flags for pte_encode */
355#define PTE_READ_ONLY (1<<0)
Ben Widawsky678d96f2015-03-16 16:00:56 +0000356 int (*allocate_va_range)(struct i915_address_space *vm,
357 uint64_t start,
358 uint64_t length);
Ben Widawsky0260c422014-03-22 22:47:21 -0700359 void (*clear_range)(struct i915_address_space *vm,
360 uint64_t start,
361 uint64_t length,
362 bool use_scratch);
Chris Wilsond6473f52016-06-10 14:22:59 +0530363 void (*insert_page)(struct i915_address_space *vm,
364 dma_addr_t addr,
365 uint64_t offset,
366 enum i915_cache_level cache_level,
367 u32 flags);
Ben Widawsky0260c422014-03-22 22:47:21 -0700368 void (*insert_entries)(struct i915_address_space *vm,
369 struct sg_table *st,
370 uint64_t start,
Akash Goel24f3a8c2014-06-17 10:59:42 +0530371 enum i915_cache_level cache_level, u32 flags);
Ben Widawsky0260c422014-03-22 22:47:21 -0700372 void (*cleanup)(struct i915_address_space *vm);
Daniel Vetter777dc5b2015-04-14 17:35:12 +0200373 /** Unmap an object from an address space. This usually consists of
374 * setting the valid PTE entries to a reserved scratch page. */
375 void (*unbind_vma)(struct i915_vma *vma);
376 /* Map an object into an address space with the given cache flags. */
Daniel Vetter70b9f6f2015-04-14 17:35:27 +0200377 int (*bind_vma)(struct i915_vma *vma,
378 enum i915_cache_level cache_level,
379 u32 flags);
Ben Widawsky0260c422014-03-22 22:47:21 -0700380};
381
Chris Wilson2bfa9962016-08-04 07:52:25 +0100382#define i915_is_ggtt(V) (!(V)->file)
Chris Wilson596c5922016-02-26 11:03:20 +0000383
Ben Widawsky0260c422014-03-22 22:47:21 -0700384/* The Graphics Translation Table is the way in which GEN hardware translates a
385 * Graphics Virtual Address into a Physical Address. In addition to the normal
386 * collateral associated with any va->pa translations GEN hardware also has a
387 * portion of the GTT which can be mapped by the CPU and remain both coherent
388 * and correct (in cases like swizzling). That region is referred to as GMADR in
389 * the spec.
390 */
Joonas Lahtinen62106b42016-03-18 10:42:57 +0200391struct i915_ggtt {
Ben Widawsky0260c422014-03-22 22:47:21 -0700392 struct i915_address_space base;
Ben Widawsky0260c422014-03-22 22:47:21 -0700393
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300394 size_t stolen_size; /* Total size of stolen memory */
Paulo Zanonia9da5122015-09-14 15:19:57 -0300395 size_t stolen_usable_size; /* Total size minus BIOS reserved */
Sagar Arun Kamble274008e2016-02-06 00:13:29 +0530396 size_t stolen_reserved_base;
397 size_t stolen_reserved_size;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300398 u64 mappable_end; /* End offset that we can CPU map */
Ben Widawsky0260c422014-03-22 22:47:21 -0700399 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
400 phys_addr_t mappable_base; /* PA of our GMADR */
401
402 /** "Graphics Stolen Memory" holds the global PTEs */
403 void __iomem *gsm;
404
405 bool do_idle_maps;
406
407 int mtrr;
Ben Widawsky0260c422014-03-22 22:47:21 -0700408};
409
410struct i915_hw_ppgtt {
411 struct i915_address_space base;
412 struct kref ref;
413 struct drm_mm_node node;
Ben Widawsky563222a2015-03-19 12:53:28 +0000414 unsigned long pd_dirty_rings;
Ben Widawsky0260c422014-03-22 22:47:21 -0700415 union {
Michel Thierry81ba8aef2015-08-03 09:52:01 +0100416 struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
417 struct i915_page_directory_pointer pdp; /* GEN8+ */
418 struct i915_page_directory pd; /* GEN6-7 */
Ben Widawskyd7b3de92015-02-24 16:22:34 +0000419 };
Ben Widawsky0260c422014-03-22 22:47:21 -0700420
Ben Widawsky678d96f2015-03-16 16:00:56 +0000421 gen6_pte_t __iomem *pd_addr;
422
Ben Widawsky0260c422014-03-22 22:47:21 -0700423 int (*enable)(struct i915_hw_ppgtt *ppgtt);
424 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
John Harrisone85b26d2015-05-29 17:43:56 +0100425 struct drm_i915_gem_request *req);
Ben Widawsky0260c422014-03-22 22:47:21 -0700426 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
427};
428
Dave Gordon731f74c2016-06-24 19:37:46 +0100429/*
430 * gen6_for_each_pde() iterates over every pde from start until start+length.
431 * If start and start+length are not perfectly divisible, the macro will round
432 * down and up as needed. Start=0 and length=2G effectively iterates over
433 * every PDE in the system. The macro modifies ALL its parameters except 'pd',
434 * so each of the other parameters should preferably be a simple variable, or
435 * at most an lvalue with no side-effects!
Ben Widawsky678d96f2015-03-16 16:00:56 +0000436 */
Dave Gordon731f74c2016-06-24 19:37:46 +0100437#define gen6_for_each_pde(pt, pd, start, length, iter) \
438 for (iter = gen6_pde_index(start); \
439 length > 0 && iter < I915_PDES && \
440 (pt = (pd)->page_table[iter], true); \
441 ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
442 temp = min(temp - start, length); \
443 start += temp, length -= temp; }), ++iter)
Ben Widawsky678d96f2015-03-16 16:00:56 +0000444
Dave Gordon731f74c2016-06-24 19:37:46 +0100445#define gen6_for_all_pdes(pt, pd, iter) \
446 for (iter = 0; \
447 iter < I915_PDES && \
448 (pt = (pd)->page_table[iter], true); \
449 ++iter)
Michel Thierry09942c62015-04-08 12:13:30 +0100450
Ben Widawsky678d96f2015-03-16 16:00:56 +0000451static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
452{
453 const uint32_t mask = NUM_PTE(pde_shift) - 1;
454
455 return (address >> PAGE_SHIFT) & mask;
456}
457
458/* Helper to counts the number of PTEs within the given length. This count
459 * does not cross a page table boundary, so the max value would be
460 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
461*/
462static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
463 uint32_t pde_shift)
464{
Alan69603db2016-02-17 14:20:46 +0000465 const uint64_t mask = ~((1ULL << pde_shift) - 1);
Ben Widawsky678d96f2015-03-16 16:00:56 +0000466 uint64_t end;
467
468 WARN_ON(length == 0);
469 WARN_ON(offset_in_page(addr|length));
470
471 end = addr + length;
472
473 if ((addr & mask) != (end & mask))
474 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
475
476 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
477}
478
479static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
480{
481 return (addr >> shift) & I915_PDE_MASK;
482}
483
484static inline uint32_t gen6_pte_index(uint32_t addr)
485{
486 return i915_pte_index(addr, GEN6_PDE_SHIFT);
487}
488
489static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
490{
491 return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
492}
493
494static inline uint32_t gen6_pde_index(uint32_t addr)
495{
496 return i915_pde_index(addr, GEN6_PDE_SHIFT);
497}
498
Michel Thierry9271d952015-04-08 12:13:26 +0100499/* Equivalent to the gen6 version, For each pde iterates over every pde
500 * between from start until start + length. On gen8+ it simply iterates
501 * over every page directory entry in a page directory.
502 */
Dave Gordone8ebd8e2015-12-08 13:30:51 +0000503#define gen8_for_each_pde(pt, pd, start, length, iter) \
504 for (iter = gen8_pde_index(start); \
505 length > 0 && iter < I915_PDES && \
506 (pt = (pd)->page_table[iter], true); \
507 ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
508 temp = min(temp - start, length); \
509 start += temp, length -= temp; }), ++iter)
Michel Thierry9271d952015-04-08 12:13:26 +0100510
Dave Gordone8ebd8e2015-12-08 13:30:51 +0000511#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
512 for (iter = gen8_pdpe_index(start); \
513 length > 0 && iter < I915_PDPES_PER_PDP(dev) && \
514 (pd = (pdp)->page_directory[iter], true); \
515 ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
516 temp = min(temp - start, length); \
517 start += temp, length -= temp; }), ++iter)
Michel Thierry9271d952015-04-08 12:13:26 +0100518
Dave Gordone8ebd8e2015-12-08 13:30:51 +0000519#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
520 for (iter = gen8_pml4e_index(start); \
521 length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
522 (pdp = (pml4)->pdps[iter], true); \
523 ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
524 temp = min(temp - start, length); \
525 start += temp, length -= temp; }), ++iter)
Michel Thierry762d9932015-07-30 11:05:29 +0100526
Michel Thierry9271d952015-04-08 12:13:26 +0100527static inline uint32_t gen8_pte_index(uint64_t address)
528{
529 return i915_pte_index(address, GEN8_PDE_SHIFT);
530}
531
532static inline uint32_t gen8_pde_index(uint64_t address)
533{
534 return i915_pde_index(address, GEN8_PDE_SHIFT);
535}
536
537static inline uint32_t gen8_pdpe_index(uint64_t address)
538{
539 return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
540}
541
542static inline uint32_t gen8_pml4e_index(uint64_t address)
543{
Michel Thierry762d9932015-07-30 11:05:29 +0100544 return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
Michel Thierry9271d952015-04-08 12:13:26 +0100545}
546
Michel Thierry33c88192015-04-08 12:13:33 +0100547static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
548{
549 return i915_pte_count(address, length, GEN8_PDE_SHIFT);
550}
551
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300552static inline dma_addr_t
553i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
554{
555 return test_bit(n, ppgtt->pdp.used_pdpes) ?
Mika Kuoppala567047b2015-06-25 18:35:12 +0300556 px_dma(ppgtt->pdp.page_directory[n]) :
Mika Kuoppala79ab9372015-06-25 18:35:17 +0300557 px_dma(ppgtt->base.scratch_pd);
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300558}
559
Chris Wilson97d6d7a2016-08-04 07:52:22 +0100560int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
561int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
562int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +0100563int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
Chris Wilson97d6d7a2016-08-04 07:52:22 +0100564void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
Daniel Vetteree960be2014-08-06 15:04:45 +0200565
Daniel Vetter82460d92014-08-06 20:19:53 +0200566int i915_ppgtt_init_hw(struct drm_device *dev);
Daniel Vetteree960be2014-08-06 15:04:45 +0200567void i915_ppgtt_release(struct kref *kref);
Chris Wilson2bfa9962016-08-04 07:52:25 +0100568struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
Daniel Vetter4d884702014-08-06 15:04:47 +0200569 struct drm_i915_file_private *fpriv);
Daniel Vetteree960be2014-08-06 15:04:45 +0200570static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
571{
572 if (ppgtt)
573 kref_get(&ppgtt->ref);
574}
575static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
576{
577 if (ppgtt)
578 kref_put(&ppgtt->ref, i915_ppgtt_release);
579}
Ben Widawsky0260c422014-03-22 22:47:21 -0700580
Chris Wilsondc979972016-05-10 14:10:04 +0100581void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
Ben Widawsky0260c422014-03-22 22:47:21 -0700582void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
583void i915_gem_restore_gtt_mappings(struct drm_device *dev);
584
585int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
586void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
587
Joonas Lahtinen9abc4642015-03-27 13:09:22 +0200588static inline bool
589i915_ggtt_view_equal(const struct i915_ggtt_view *a,
590 const struct i915_ggtt_view *b)
591{
592 if (WARN_ON(!a || !b))
593 return false;
594
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +0300595 if (a->type != b->type)
596 return false;
Daniel Vetterce7f1722015-10-14 16:51:06 +0200597 if (a->type != I915_GGTT_VIEW_NORMAL)
Joonas Lahtinen8bd7ef12015-05-06 14:35:38 +0300598 return !memcmp(&a->params, &b->params, sizeof(a->params));
599 return true;
Joonas Lahtinen9abc4642015-03-27 13:09:22 +0200600}
601
Joonas Lahtinen91e67112015-05-06 14:33:58 +0300602size_t
603i915_ggtt_view_size(struct drm_i915_gem_object *obj,
604 const struct i915_ggtt_view *view);
605
Chris Wilson8ef85612016-04-28 09:56:39 +0100606/**
607 * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
608 * @vma: VMA to iomap
609 *
610 * The passed in VMA has to be pinned in the global GTT mappable region.
611 * An extra pinning of the VMA is acquired for the return iomapping,
612 * the caller must call i915_vma_unpin_iomap to relinquish the pinning
613 * after the iomapping is no longer required.
614 *
615 * Callers must hold the struct_mutex.
616 *
617 * Returns a valid iomapped pointer or ERR_PTR.
618 */
619void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
Chris Wilson406ea8d2016-07-20 13:31:55 +0100620#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
Chris Wilson8ef85612016-04-28 09:56:39 +0100621
622/**
623 * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
624 * @vma: VMA to unpin
625 *
626 * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
627 *
628 * Callers must hold the struct_mutex. This function is only valid to be
629 * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
630 */
631static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
632{
633 lockdep_assert_held(&vma->vm->dev->struct_mutex);
634 GEM_BUG_ON(vma->pin_count == 0);
635 GEM_BUG_ON(vma->iomap == NULL);
636 vma->pin_count--;
637}
638
Ben Widawsky0260c422014-03-22 22:47:21 -0700639#endif