Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2014 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Please try to maintain the following order within this file unless it makes |
| 24 | * sense to do otherwise. From top to bottom: |
| 25 | * 1. typedefs |
| 26 | * 2. #defines, and macros |
| 27 | * 3. structure definitions |
| 28 | * 4. function prototypes |
| 29 | * |
| 30 | * Within each section, please try to order by generation in ascending order, |
| 31 | * from top to bottom (ie. gen6 on the top, gen8 on the bottom). |
| 32 | */ |
| 33 | |
| 34 | #ifndef __I915_GEM_GTT_H__ |
| 35 | #define __I915_GEM_GTT_H__ |
| 36 | |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 37 | struct drm_i915_file_private; |
| 38 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 39 | typedef uint32_t gen6_pte_t; |
| 40 | typedef uint64_t gen8_pte_t; |
| 41 | typedef uint64_t gen8_pde_t; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 42 | |
| 43 | #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) |
| 44 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 45 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 46 | /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ |
| 47 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) |
| 48 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
| 49 | #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
| 50 | #define GEN6_PTE_CACHE_LLC (2 << 1) |
| 51 | #define GEN6_PTE_UNCACHED (1 << 1) |
| 52 | #define GEN6_PTE_VALID (1 << 0) |
| 53 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 54 | #define I915_PTES(pte_len) (PAGE_SIZE / (pte_len)) |
| 55 | #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) |
| 56 | #define I915_PDES 512 |
| 57 | #define I915_PDE_MASK (I915_PDES - 1) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 58 | #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 59 | |
| 60 | #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) |
| 61 | #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 62 | #define GEN6_PD_ALIGN (PAGE_SIZE * 16) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 63 | #define GEN6_PDE_SHIFT 22 |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 64 | #define GEN6_PDE_VALID (1 << 0) |
| 65 | |
| 66 | #define GEN7_PTE_CACHE_L3_LLC (3 << 1) |
| 67 | |
| 68 | #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) |
| 69 | #define BYT_PTE_WRITEABLE (1 << 1) |
| 70 | |
| 71 | /* Cacheability Control is a 4-bit value. The low three bits are stored in bits |
| 72 | * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. |
| 73 | */ |
| 74 | #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ |
| 75 | (((bits) & 0x8) << (11 - 3))) |
| 76 | #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) |
| 77 | #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) |
| 78 | #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) |
| 79 | #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) |
| 80 | #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) |
| 81 | #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) |
| 82 | #define HSW_PTE_UNCACHED (0) |
| 83 | #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) |
| 84 | #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) |
| 85 | |
| 86 | /* GEN8 legacy style address is defined as a 3 level page table: |
| 87 | * 31:30 | 29:21 | 20:12 | 11:0 |
| 88 | * PDPE | PDE | PTE | offset |
| 89 | * The difference as compared to normal x86 3 level page table is the PDPEs are |
| 90 | * programmed via register. |
| 91 | */ |
| 92 | #define GEN8_PDPE_SHIFT 30 |
| 93 | #define GEN8_PDPE_MASK 0x3 |
| 94 | #define GEN8_PDE_SHIFT 21 |
| 95 | #define GEN8_PDE_MASK 0x1ff |
| 96 | #define GEN8_PTE_SHIFT 12 |
| 97 | #define GEN8_PTE_MASK 0x1ff |
Ben Widawsky | 7664360 | 2015-01-22 17:01:24 +0000 | [diff] [blame] | 98 | #define GEN8_LEGACY_PDPES 4 |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 99 | #define GEN8_PTES I915_PTES(sizeof(gen8_pte_t)) |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 100 | |
| 101 | #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) |
| 102 | #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ |
| 103 | #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ |
| 104 | #define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ |
| 105 | |
Ville Syrjälä | ee0ce47 | 2014-04-09 13:28:01 +0300 | [diff] [blame] | 106 | #define CHV_PPAT_SNOOP (1<<6) |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 107 | #define GEN8_PPAT_AGE(x) (x<<4) |
| 108 | #define GEN8_PPAT_LLCeLLC (3<<2) |
| 109 | #define GEN8_PPAT_LLCELLC (2<<2) |
| 110 | #define GEN8_PPAT_LLC (1<<2) |
| 111 | #define GEN8_PPAT_WB (3<<0) |
| 112 | #define GEN8_PPAT_WT (2<<0) |
| 113 | #define GEN8_PPAT_WC (1<<0) |
| 114 | #define GEN8_PPAT_UC (0<<0) |
| 115 | #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) |
| 116 | #define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8)) |
| 117 | |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 118 | enum i915_ggtt_view_type { |
| 119 | I915_GGTT_VIEW_NORMAL = 0, |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 120 | I915_GGTT_VIEW_ROTATED |
| 121 | }; |
| 122 | |
| 123 | struct intel_rotation_info { |
| 124 | unsigned int height; |
| 125 | unsigned int pitch; |
| 126 | uint32_t pixel_format; |
| 127 | uint64_t fb_modifier; |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 128 | }; |
| 129 | |
| 130 | struct i915_ggtt_view { |
| 131 | enum i915_ggtt_view_type type; |
| 132 | |
| 133 | struct sg_table *pages; |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 134 | |
| 135 | union { |
| 136 | struct intel_rotation_info rotation_info; |
| 137 | }; |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 138 | }; |
| 139 | |
| 140 | extern const struct i915_ggtt_view i915_ggtt_view_normal; |
Joonas Lahtinen | 9abc464 | 2015-03-27 13:09:22 +0200 | [diff] [blame] | 141 | extern const struct i915_ggtt_view i915_ggtt_view_rotated; |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 142 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 143 | enum i915_cache_level; |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 144 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 145 | /** |
| 146 | * A VMA represents a GEM BO that is bound into an address space. Therefore, a |
| 147 | * VMA's presence cannot be guaranteed before binding, or after unbinding the |
| 148 | * object into/from the address space. |
| 149 | * |
| 150 | * To make things as simple as possible (ie. no refcounting), a VMA's lifetime |
| 151 | * will always be <= an objects lifetime. So object refcounting should cover us. |
| 152 | */ |
| 153 | struct i915_vma { |
| 154 | struct drm_mm_node node; |
| 155 | struct drm_i915_gem_object *obj; |
| 156 | struct i915_address_space *vm; |
| 157 | |
Tvrtko Ursulin | aff4376 | 2014-10-24 12:42:33 +0100 | [diff] [blame] | 158 | /** Flags and address space this VMA is bound to */ |
| 159 | #define GLOBAL_BIND (1<<0) |
| 160 | #define LOCAL_BIND (1<<1) |
| 161 | #define PTE_READ_ONLY (1<<2) |
| 162 | unsigned int bound : 4; |
| 163 | |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 164 | /** |
| 165 | * Support different GGTT views into the same object. |
| 166 | * This means there can be multiple VMA mappings per object and per VM. |
| 167 | * i915_ggtt_view_type is used to distinguish between those entries. |
| 168 | * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also |
| 169 | * assumed in GEM functions which take no ggtt view parameter. |
| 170 | */ |
| 171 | struct i915_ggtt_view ggtt_view; |
| 172 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 173 | /** This object's place on the active/inactive lists */ |
| 174 | struct list_head mm_list; |
| 175 | |
| 176 | struct list_head vma_link; /* Link in the object's VMA list */ |
| 177 | |
| 178 | /** This vma's place in the batchbuffer or on the eviction list */ |
| 179 | struct list_head exec_list; |
| 180 | |
| 181 | /** |
| 182 | * Used for performing relocations during execbuffer insertion. |
| 183 | */ |
| 184 | struct hlist_node exec_node; |
| 185 | unsigned long exec_handle; |
| 186 | struct drm_i915_gem_exec_object2 *exec_entry; |
| 187 | |
| 188 | /** |
| 189 | * How many users have pinned this object in GTT space. The following |
Daniel Vetter | 4feb765 | 2014-11-24 11:21:52 +0100 | [diff] [blame] | 190 | * users can each hold at most one reference: pwrite/pread, execbuffer |
| 191 | * (objects are not allowed multiple times for the same batchbuffer), |
| 192 | * and the framebuffer code. When switching/pageflipping, the |
| 193 | * framebuffer code has at most two buffers pinned per crtc. |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 194 | * |
| 195 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 |
| 196 | * bits with absolutely no headroom. So use 4 bits. */ |
| 197 | unsigned int pin_count:4; |
| 198 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf |
| 199 | |
| 200 | /** Unmap an object from an address space. This usually consists of |
| 201 | * setting the valid PTE entries to a reserved scratch page. */ |
| 202 | void (*unbind_vma)(struct i915_vma *vma); |
| 203 | /* Map an object into an address space with the given cache flags. */ |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 204 | void (*bind_vma)(struct i915_vma *vma, |
| 205 | enum i915_cache_level cache_level, |
| 206 | u32 flags); |
| 207 | }; |
| 208 | |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 209 | struct i915_page_table_entry { |
| 210 | struct page *page; |
Ben Widawsky | 7324cc0 | 2015-02-24 16:22:35 +0000 | [diff] [blame] | 211 | dma_addr_t daddr; |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 212 | |
| 213 | unsigned long *used_ptes; |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 214 | }; |
| 215 | |
| 216 | struct i915_page_directory_entry { |
| 217 | struct page *page; /* NULL for GEN6-GEN7 */ |
Ben Widawsky | 7324cc0 | 2015-02-24 16:22:35 +0000 | [diff] [blame] | 218 | union { |
| 219 | uint32_t pd_offset; |
| 220 | dma_addr_t daddr; |
| 221 | }; |
| 222 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 223 | struct i915_page_table_entry *page_table[I915_PDES]; /* PDEs */ |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 224 | }; |
| 225 | |
| 226 | struct i915_page_directory_pointer_entry { |
| 227 | /* struct page *page; */ |
Ben Widawsky | 06fda60 | 2015-02-24 16:22:36 +0000 | [diff] [blame] | 228 | struct i915_page_directory_entry *page_directory[GEN8_LEGACY_PDPES]; |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 229 | }; |
| 230 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 231 | struct i915_address_space { |
| 232 | struct drm_mm mm; |
| 233 | struct drm_device *dev; |
| 234 | struct list_head global_link; |
| 235 | unsigned long start; /* Start offset always 0 for dri2 */ |
| 236 | size_t total; /* size addr space maps (ex. 2GB for ggtt) */ |
| 237 | |
| 238 | struct { |
| 239 | dma_addr_t addr; |
| 240 | struct page *page; |
| 241 | } scratch; |
| 242 | |
| 243 | /** |
| 244 | * List of objects currently involved in rendering. |
| 245 | * |
| 246 | * Includes buffers having the contents of their GPU caches |
John Harrison | 97b2a6a | 2014-11-24 18:49:26 +0000 | [diff] [blame] | 247 | * flushed, not necessarily primitives. last_read_req |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 248 | * represents when the rendering involved will be completed. |
| 249 | * |
| 250 | * A reference is held on the buffer while on this list. |
| 251 | */ |
| 252 | struct list_head active_list; |
| 253 | |
| 254 | /** |
| 255 | * LRU list of objects which are not in the ringbuffer and |
| 256 | * are ready to unbind, but are still in the GTT. |
| 257 | * |
John Harrison | 97b2a6a | 2014-11-24 18:49:26 +0000 | [diff] [blame] | 258 | * last_read_req is NULL while an object is in this list. |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 259 | * |
| 260 | * A reference is not held on the buffer while on this list, |
| 261 | * as merely being GTT-bound shouldn't prevent its being |
| 262 | * freed, and we'll pull it off the list in the free path. |
| 263 | */ |
| 264 | struct list_head inactive_list; |
| 265 | |
| 266 | /* FIXME: Need a more generic return type */ |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 267 | gen6_pte_t (*pte_encode)(dma_addr_t addr, |
| 268 | enum i915_cache_level level, |
| 269 | bool valid, u32 flags); /* Create a valid PTE */ |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 270 | int (*allocate_va_range)(struct i915_address_space *vm, |
| 271 | uint64_t start, |
| 272 | uint64_t length); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 273 | void (*clear_range)(struct i915_address_space *vm, |
| 274 | uint64_t start, |
| 275 | uint64_t length, |
| 276 | bool use_scratch); |
| 277 | void (*insert_entries)(struct i915_address_space *vm, |
| 278 | struct sg_table *st, |
| 279 | uint64_t start, |
Akash Goel | 24f3a8c | 2014-06-17 10:59:42 +0530 | [diff] [blame] | 280 | enum i915_cache_level cache_level, u32 flags); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 281 | void (*cleanup)(struct i915_address_space *vm); |
| 282 | }; |
| 283 | |
| 284 | /* The Graphics Translation Table is the way in which GEN hardware translates a |
| 285 | * Graphics Virtual Address into a Physical Address. In addition to the normal |
| 286 | * collateral associated with any va->pa translations GEN hardware also has a |
| 287 | * portion of the GTT which can be mapped by the CPU and remain both coherent |
| 288 | * and correct (in cases like swizzling). That region is referred to as GMADR in |
| 289 | * the spec. |
| 290 | */ |
| 291 | struct i915_gtt { |
| 292 | struct i915_address_space base; |
| 293 | size_t stolen_size; /* Total size of stolen memory */ |
| 294 | |
| 295 | unsigned long mappable_end; /* End offset that we can CPU map */ |
| 296 | struct io_mapping *mappable; /* Mapping to our CPU mappable region */ |
| 297 | phys_addr_t mappable_base; /* PA of our GMADR */ |
| 298 | |
| 299 | /** "Graphics Stolen Memory" holds the global PTEs */ |
| 300 | void __iomem *gsm; |
| 301 | |
| 302 | bool do_idle_maps; |
| 303 | |
| 304 | int mtrr; |
| 305 | |
| 306 | /* global gtt ops */ |
| 307 | int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, |
| 308 | size_t *stolen, phys_addr_t *mappable_base, |
| 309 | unsigned long *mappable_end); |
| 310 | }; |
| 311 | |
| 312 | struct i915_hw_ppgtt { |
| 313 | struct i915_address_space base; |
| 314 | struct kref ref; |
| 315 | struct drm_mm_node node; |
Ben Widawsky | 563222a | 2015-03-19 12:53:28 +0000 | [diff] [blame] | 316 | unsigned long pd_dirty_rings; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 317 | unsigned num_pd_entries; |
| 318 | unsigned num_pd_pages; /* gen8+ */ |
| 319 | union { |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 320 | struct i915_page_directory_pointer_entry pdp; |
| 321 | struct i915_page_directory_entry pd; |
| 322 | }; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 323 | |
Michel Thierry | 4933d51 | 2015-03-24 15:46:22 +0000 | [diff] [blame] | 324 | struct i915_page_table_entry *scratch_pt; |
| 325 | |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 326 | struct drm_i915_file_private *file_priv; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 327 | |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 328 | gen6_pte_t __iomem *pd_addr; |
| 329 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 330 | int (*enable)(struct i915_hw_ppgtt *ppgtt); |
| 331 | int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, |
McAulay, Alistair | 6689c16 | 2014-08-15 18:51:35 +0100 | [diff] [blame] | 332 | struct intel_engine_cs *ring); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 333 | void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); |
| 334 | }; |
| 335 | |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 336 | /* For each pde iterates over every pde between from start until start + length. |
| 337 | * If start, and start+length are not perfectly divisible, the macro will round |
| 338 | * down, and up as needed. The macro modifies pde, start, and length. Dev is |
| 339 | * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0, |
| 340 | * and length = 2G effectively iterates over every PDE in the system. |
| 341 | * |
| 342 | * XXX: temp is not actually needed, but it saves doing the ALIGN operation. |
| 343 | */ |
| 344 | #define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ |
Michel Thierry | fdc454c | 2015-03-24 15:46:19 +0000 | [diff] [blame] | 345 | for (iter = gen6_pde_index(start); \ |
| 346 | pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \ |
| 347 | iter++, \ |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 348 | temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \ |
| 349 | temp = min_t(unsigned, temp, length), \ |
| 350 | start += temp, length -= temp) |
| 351 | |
| 352 | static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift) |
| 353 | { |
| 354 | const uint32_t mask = NUM_PTE(pde_shift) - 1; |
| 355 | |
| 356 | return (address >> PAGE_SHIFT) & mask; |
| 357 | } |
| 358 | |
| 359 | /* Helper to counts the number of PTEs within the given length. This count |
| 360 | * does not cross a page table boundary, so the max value would be |
| 361 | * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. |
| 362 | */ |
| 363 | static inline uint32_t i915_pte_count(uint64_t addr, size_t length, |
| 364 | uint32_t pde_shift) |
| 365 | { |
| 366 | const uint64_t mask = ~((1 << pde_shift) - 1); |
| 367 | uint64_t end; |
| 368 | |
| 369 | WARN_ON(length == 0); |
| 370 | WARN_ON(offset_in_page(addr|length)); |
| 371 | |
| 372 | end = addr + length; |
| 373 | |
| 374 | if ((addr & mask) != (end & mask)) |
| 375 | return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); |
| 376 | |
| 377 | return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); |
| 378 | } |
| 379 | |
| 380 | static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift) |
| 381 | { |
| 382 | return (addr >> shift) & I915_PDE_MASK; |
| 383 | } |
| 384 | |
| 385 | static inline uint32_t gen6_pte_index(uint32_t addr) |
| 386 | { |
| 387 | return i915_pte_index(addr, GEN6_PDE_SHIFT); |
| 388 | } |
| 389 | |
| 390 | static inline size_t gen6_pte_count(uint32_t addr, uint32_t length) |
| 391 | { |
| 392 | return i915_pte_count(addr, length, GEN6_PDE_SHIFT); |
| 393 | } |
| 394 | |
| 395 | static inline uint32_t gen6_pde_index(uint32_t addr) |
| 396 | { |
| 397 | return i915_pde_index(addr, GEN6_PDE_SHIFT); |
| 398 | } |
| 399 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 400 | int i915_gem_gtt_init(struct drm_device *dev); |
| 401 | void i915_gem_init_global_gtt(struct drm_device *dev); |
Daniel Vetter | 90d0a0e | 2014-08-06 15:04:56 +0200 | [diff] [blame] | 402 | void i915_global_gtt_cleanup(struct drm_device *dev); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 403 | |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 404 | |
| 405 | int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); |
Daniel Vetter | 82460d9 | 2014-08-06 20:19:53 +0200 | [diff] [blame] | 406 | int i915_ppgtt_init_hw(struct drm_device *dev); |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 407 | void i915_ppgtt_release(struct kref *kref); |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 408 | struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, |
| 409 | struct drm_i915_file_private *fpriv); |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 410 | static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) |
| 411 | { |
| 412 | if (ppgtt) |
| 413 | kref_get(&ppgtt->ref); |
| 414 | } |
| 415 | static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) |
| 416 | { |
| 417 | if (ppgtt) |
| 418 | kref_put(&ppgtt->ref, i915_ppgtt_release); |
| 419 | } |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 420 | |
| 421 | void i915_check_and_clear_faults(struct drm_device *dev); |
| 422 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev); |
| 423 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
| 424 | |
| 425 | int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); |
| 426 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); |
| 427 | |
Joonas Lahtinen | 9abc464 | 2015-03-27 13:09:22 +0200 | [diff] [blame] | 428 | static inline bool |
| 429 | i915_ggtt_view_equal(const struct i915_ggtt_view *a, |
| 430 | const struct i915_ggtt_view *b) |
| 431 | { |
| 432 | if (WARN_ON(!a || !b)) |
| 433 | return false; |
| 434 | |
| 435 | return a->type == b->type; |
| 436 | } |
| 437 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 438 | #endif |