Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2014 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Please try to maintain the following order within this file unless it makes |
| 24 | * sense to do otherwise. From top to bottom: |
| 25 | * 1. typedefs |
| 26 | * 2. #defines, and macros |
| 27 | * 3. structure definitions |
| 28 | * 4. function prototypes |
| 29 | * |
| 30 | * Within each section, please try to order by generation in ascending order, |
| 31 | * from top to bottom (ie. gen6 on the top, gen8 on the bottom). |
| 32 | */ |
| 33 | |
| 34 | #ifndef __I915_GEM_GTT_H__ |
| 35 | #define __I915_GEM_GTT_H__ |
| 36 | |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 37 | struct drm_i915_file_private; |
| 38 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 39 | typedef uint32_t gen6_pte_t; |
| 40 | typedef uint64_t gen8_pte_t; |
| 41 | typedef uint64_t gen8_pde_t; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 42 | |
| 43 | #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) |
| 44 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 45 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 46 | /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ |
| 47 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) |
| 48 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
| 49 | #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
| 50 | #define GEN6_PTE_CACHE_LLC (2 << 1) |
| 51 | #define GEN6_PTE_UNCACHED (1 << 1) |
| 52 | #define GEN6_PTE_VALID (1 << 0) |
| 53 | |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 54 | #define I915_PTES(pte_len) (PAGE_SIZE / (pte_len)) |
| 55 | #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) |
| 56 | #define I915_PDES 512 |
| 57 | #define I915_PDE_MASK (I915_PDES - 1) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 58 | #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 59 | |
| 60 | #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) |
| 61 | #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 62 | #define GEN6_PD_ALIGN (PAGE_SIZE * 16) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 63 | #define GEN6_PDE_SHIFT 22 |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 64 | #define GEN6_PDE_VALID (1 << 0) |
| 65 | |
| 66 | #define GEN7_PTE_CACHE_L3_LLC (3 << 1) |
| 67 | |
| 68 | #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) |
| 69 | #define BYT_PTE_WRITEABLE (1 << 1) |
| 70 | |
| 71 | /* Cacheability Control is a 4-bit value. The low three bits are stored in bits |
| 72 | * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. |
| 73 | */ |
| 74 | #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ |
| 75 | (((bits) & 0x8) << (11 - 3))) |
| 76 | #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) |
| 77 | #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) |
| 78 | #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) |
| 79 | #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) |
| 80 | #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) |
| 81 | #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) |
| 82 | #define HSW_PTE_UNCACHED (0) |
| 83 | #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) |
| 84 | #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) |
| 85 | |
| 86 | /* GEN8 legacy style address is defined as a 3 level page table: |
| 87 | * 31:30 | 29:21 | 20:12 | 11:0 |
| 88 | * PDPE | PDE | PTE | offset |
| 89 | * The difference as compared to normal x86 3 level page table is the PDPEs are |
| 90 | * programmed via register. |
| 91 | */ |
| 92 | #define GEN8_PDPE_SHIFT 30 |
| 93 | #define GEN8_PDPE_MASK 0x3 |
| 94 | #define GEN8_PDE_SHIFT 21 |
| 95 | #define GEN8_PDE_MASK 0x1ff |
| 96 | #define GEN8_PTE_SHIFT 12 |
| 97 | #define GEN8_PTE_MASK 0x1ff |
Ben Widawsky | 7664360 | 2015-01-22 17:01:24 +0000 | [diff] [blame] | 98 | #define GEN8_LEGACY_PDPES 4 |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 99 | #define GEN8_PTES I915_PTES(sizeof(gen8_pte_t)) |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 100 | |
| 101 | #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) |
| 102 | #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ |
| 103 | #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ |
| 104 | #define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ |
| 105 | |
Ville Syrjälä | ee0ce47 | 2014-04-09 13:28:01 +0300 | [diff] [blame] | 106 | #define CHV_PPAT_SNOOP (1<<6) |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 107 | #define GEN8_PPAT_AGE(x) (x<<4) |
| 108 | #define GEN8_PPAT_LLCeLLC (3<<2) |
| 109 | #define GEN8_PPAT_LLCELLC (2<<2) |
| 110 | #define GEN8_PPAT_LLC (1<<2) |
| 111 | #define GEN8_PPAT_WB (3<<0) |
| 112 | #define GEN8_PPAT_WT (2<<0) |
| 113 | #define GEN8_PPAT_WC (1<<0) |
| 114 | #define GEN8_PPAT_UC (0<<0) |
| 115 | #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) |
| 116 | #define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8)) |
| 117 | |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 118 | enum i915_ggtt_view_type { |
| 119 | I915_GGTT_VIEW_NORMAL = 0, |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 120 | I915_GGTT_VIEW_ROTATED, |
| 121 | I915_GGTT_VIEW_PARTIAL, |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 122 | }; |
| 123 | |
| 124 | struct intel_rotation_info { |
| 125 | unsigned int height; |
| 126 | unsigned int pitch; |
| 127 | uint32_t pixel_format; |
| 128 | uint64_t fb_modifier; |
Tvrtko Ursulin | 84fe03f | 2015-06-23 14:26:46 +0100 | [diff] [blame] | 129 | unsigned int width_pages, height_pages; |
| 130 | uint64_t size; |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 131 | }; |
| 132 | |
| 133 | struct i915_ggtt_view { |
| 134 | enum i915_ggtt_view_type type; |
| 135 | |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 136 | union { |
| 137 | struct { |
| 138 | unsigned long offset; |
| 139 | unsigned int size; |
| 140 | } partial; |
| 141 | } params; |
| 142 | |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 143 | struct sg_table *pages; |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 144 | |
| 145 | union { |
| 146 | struct intel_rotation_info rotation_info; |
| 147 | }; |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 148 | }; |
| 149 | |
| 150 | extern const struct i915_ggtt_view i915_ggtt_view_normal; |
Joonas Lahtinen | 9abc464 | 2015-03-27 13:09:22 +0200 | [diff] [blame] | 151 | extern const struct i915_ggtt_view i915_ggtt_view_rotated; |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 152 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 153 | enum i915_cache_level; |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 154 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 155 | /** |
| 156 | * A VMA represents a GEM BO that is bound into an address space. Therefore, a |
| 157 | * VMA's presence cannot be guaranteed before binding, or after unbinding the |
| 158 | * object into/from the address space. |
| 159 | * |
| 160 | * To make things as simple as possible (ie. no refcounting), a VMA's lifetime |
| 161 | * will always be <= an objects lifetime. So object refcounting should cover us. |
| 162 | */ |
| 163 | struct i915_vma { |
| 164 | struct drm_mm_node node; |
| 165 | struct drm_i915_gem_object *obj; |
| 166 | struct i915_address_space *vm; |
| 167 | |
Tvrtko Ursulin | aff4376 | 2014-10-24 12:42:33 +0100 | [diff] [blame] | 168 | /** Flags and address space this VMA is bound to */ |
| 169 | #define GLOBAL_BIND (1<<0) |
| 170 | #define LOCAL_BIND (1<<1) |
Tvrtko Ursulin | aff4376 | 2014-10-24 12:42:33 +0100 | [diff] [blame] | 171 | unsigned int bound : 4; |
| 172 | |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 173 | /** |
| 174 | * Support different GGTT views into the same object. |
| 175 | * This means there can be multiple VMA mappings per object and per VM. |
| 176 | * i915_ggtt_view_type is used to distinguish between those entries. |
| 177 | * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also |
| 178 | * assumed in GEM functions which take no ggtt view parameter. |
| 179 | */ |
| 180 | struct i915_ggtt_view ggtt_view; |
| 181 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 182 | /** This object's place on the active/inactive lists */ |
| 183 | struct list_head mm_list; |
| 184 | |
| 185 | struct list_head vma_link; /* Link in the object's VMA list */ |
| 186 | |
| 187 | /** This vma's place in the batchbuffer or on the eviction list */ |
| 188 | struct list_head exec_list; |
| 189 | |
| 190 | /** |
| 191 | * Used for performing relocations during execbuffer insertion. |
| 192 | */ |
| 193 | struct hlist_node exec_node; |
| 194 | unsigned long exec_handle; |
| 195 | struct drm_i915_gem_exec_object2 *exec_entry; |
| 196 | |
| 197 | /** |
| 198 | * How many users have pinned this object in GTT space. The following |
Daniel Vetter | 4feb765 | 2014-11-24 11:21:52 +0100 | [diff] [blame] | 199 | * users can each hold at most one reference: pwrite/pread, execbuffer |
| 200 | * (objects are not allowed multiple times for the same batchbuffer), |
| 201 | * and the framebuffer code. When switching/pageflipping, the |
| 202 | * framebuffer code has at most two buffers pinned per crtc. |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 203 | * |
| 204 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 |
| 205 | * bits with absolutely no headroom. So use 4 bits. */ |
| 206 | unsigned int pin_count:4; |
| 207 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 208 | }; |
| 209 | |
Mika Kuoppala | 44159dd | 2015-06-25 18:35:07 +0300 | [diff] [blame] | 210 | struct i915_page_dma { |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 211 | struct page *page; |
Mika Kuoppala | 44159dd | 2015-06-25 18:35:07 +0300 | [diff] [blame] | 212 | union { |
| 213 | dma_addr_t daddr; |
| 214 | |
| 215 | /* For gen6/gen7 only. This is the offset in the GGTT |
| 216 | * where the page directory entries for PPGTT begin |
| 217 | */ |
| 218 | uint32_t ggtt_offset; |
| 219 | }; |
| 220 | }; |
| 221 | |
Mika Kuoppala | 567047b | 2015-06-25 18:35:12 +0300 | [diff] [blame] | 222 | #define px_base(px) (&(px)->base) |
| 223 | #define px_page(px) (px_base(px)->page) |
| 224 | #define px_dma(px) (px_base(px)->daddr) |
| 225 | |
Mika Kuoppala | c114f76 | 2015-06-25 18:35:13 +0300 | [diff] [blame] | 226 | struct i915_page_scratch { |
| 227 | struct i915_page_dma base; |
| 228 | }; |
| 229 | |
Mika Kuoppala | 44159dd | 2015-06-25 18:35:07 +0300 | [diff] [blame] | 230 | struct i915_page_table { |
| 231 | struct i915_page_dma base; |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 232 | |
| 233 | unsigned long *used_ptes; |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 234 | }; |
| 235 | |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 236 | struct i915_page_directory { |
Mika Kuoppala | 44159dd | 2015-06-25 18:35:07 +0300 | [diff] [blame] | 237 | struct i915_page_dma base; |
Ben Widawsky | 7324cc0 | 2015-02-24 16:22:35 +0000 | [diff] [blame] | 238 | |
Michel Thierry | 33c8819 | 2015-04-08 12:13:33 +0100 | [diff] [blame] | 239 | unsigned long *used_pdes; |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 240 | struct i915_page_table *page_table[I915_PDES]; /* PDEs */ |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 241 | }; |
| 242 | |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 243 | struct i915_page_directory_pointer { |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 244 | /* struct page *page; */ |
Michel Thierry | 33c8819 | 2015-04-08 12:13:33 +0100 | [diff] [blame] | 245 | DECLARE_BITMAP(used_pdpes, GEN8_LEGACY_PDPES); |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 246 | struct i915_page_directory *page_directory[GEN8_LEGACY_PDPES]; |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 247 | }; |
| 248 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 249 | struct i915_address_space { |
| 250 | struct drm_mm mm; |
| 251 | struct drm_device *dev; |
| 252 | struct list_head global_link; |
Mika Kuoppala | c44ef60 | 2015-06-25 18:35:05 +0300 | [diff] [blame] | 253 | u64 start; /* Start offset always 0 for dri2 */ |
| 254 | u64 total; /* size addr space maps (ex. 2GB for ggtt) */ |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 255 | |
Mika Kuoppala | c114f76 | 2015-06-25 18:35:13 +0300 | [diff] [blame] | 256 | struct i915_page_scratch *scratch_page; |
Mika Kuoppala | 79ab937 | 2015-06-25 18:35:17 +0300 | [diff] [blame] | 257 | struct i915_page_table *scratch_pt; |
| 258 | struct i915_page_directory *scratch_pd; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 259 | |
| 260 | /** |
| 261 | * List of objects currently involved in rendering. |
| 262 | * |
| 263 | * Includes buffers having the contents of their GPU caches |
John Harrison | 97b2a6a | 2014-11-24 18:49:26 +0000 | [diff] [blame] | 264 | * flushed, not necessarily primitives. last_read_req |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 265 | * represents when the rendering involved will be completed. |
| 266 | * |
| 267 | * A reference is held on the buffer while on this list. |
| 268 | */ |
| 269 | struct list_head active_list; |
| 270 | |
| 271 | /** |
| 272 | * LRU list of objects which are not in the ringbuffer and |
| 273 | * are ready to unbind, but are still in the GTT. |
| 274 | * |
John Harrison | 97b2a6a | 2014-11-24 18:49:26 +0000 | [diff] [blame] | 275 | * last_read_req is NULL while an object is in this list. |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 276 | * |
| 277 | * A reference is not held on the buffer while on this list, |
| 278 | * as merely being GTT-bound shouldn't prevent its being |
| 279 | * freed, and we'll pull it off the list in the free path. |
| 280 | */ |
| 281 | struct list_head inactive_list; |
| 282 | |
| 283 | /* FIXME: Need a more generic return type */ |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 284 | gen6_pte_t (*pte_encode)(dma_addr_t addr, |
| 285 | enum i915_cache_level level, |
| 286 | bool valid, u32 flags); /* Create a valid PTE */ |
Daniel Vetter | f329f5f | 2015-04-14 17:35:15 +0200 | [diff] [blame] | 287 | /* flags for pte_encode */ |
| 288 | #define PTE_READ_ONLY (1<<0) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 289 | int (*allocate_va_range)(struct i915_address_space *vm, |
| 290 | uint64_t start, |
| 291 | uint64_t length); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 292 | void (*clear_range)(struct i915_address_space *vm, |
| 293 | uint64_t start, |
| 294 | uint64_t length, |
| 295 | bool use_scratch); |
| 296 | void (*insert_entries)(struct i915_address_space *vm, |
| 297 | struct sg_table *st, |
| 298 | uint64_t start, |
Akash Goel | 24f3a8c | 2014-06-17 10:59:42 +0530 | [diff] [blame] | 299 | enum i915_cache_level cache_level, u32 flags); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 300 | void (*cleanup)(struct i915_address_space *vm); |
Daniel Vetter | 777dc5b | 2015-04-14 17:35:12 +0200 | [diff] [blame] | 301 | /** Unmap an object from an address space. This usually consists of |
| 302 | * setting the valid PTE entries to a reserved scratch page. */ |
| 303 | void (*unbind_vma)(struct i915_vma *vma); |
| 304 | /* Map an object into an address space with the given cache flags. */ |
Daniel Vetter | 70b9f6f | 2015-04-14 17:35:27 +0200 | [diff] [blame] | 305 | int (*bind_vma)(struct i915_vma *vma, |
| 306 | enum i915_cache_level cache_level, |
| 307 | u32 flags); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 308 | }; |
| 309 | |
| 310 | /* The Graphics Translation Table is the way in which GEN hardware translates a |
| 311 | * Graphics Virtual Address into a Physical Address. In addition to the normal |
| 312 | * collateral associated with any va->pa translations GEN hardware also has a |
| 313 | * portion of the GTT which can be mapped by the CPU and remain both coherent |
| 314 | * and correct (in cases like swizzling). That region is referred to as GMADR in |
| 315 | * the spec. |
| 316 | */ |
| 317 | struct i915_gtt { |
| 318 | struct i915_address_space base; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 319 | |
Mika Kuoppala | c44ef60 | 2015-06-25 18:35:05 +0300 | [diff] [blame] | 320 | size_t stolen_size; /* Total size of stolen memory */ |
| 321 | u64 mappable_end; /* End offset that we can CPU map */ |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 322 | struct io_mapping *mappable; /* Mapping to our CPU mappable region */ |
| 323 | phys_addr_t mappable_base; /* PA of our GMADR */ |
| 324 | |
| 325 | /** "Graphics Stolen Memory" holds the global PTEs */ |
| 326 | void __iomem *gsm; |
| 327 | |
| 328 | bool do_idle_maps; |
| 329 | |
| 330 | int mtrr; |
| 331 | |
| 332 | /* global gtt ops */ |
Mika Kuoppala | c44ef60 | 2015-06-25 18:35:05 +0300 | [diff] [blame] | 333 | int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total, |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 334 | size_t *stolen, phys_addr_t *mappable_base, |
Mika Kuoppala | c44ef60 | 2015-06-25 18:35:05 +0300 | [diff] [blame] | 335 | u64 *mappable_end); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 336 | }; |
| 337 | |
| 338 | struct i915_hw_ppgtt { |
| 339 | struct i915_address_space base; |
| 340 | struct kref ref; |
| 341 | struct drm_mm_node node; |
Ben Widawsky | 563222a | 2015-03-19 12:53:28 +0000 | [diff] [blame] | 342 | unsigned long pd_dirty_rings; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 343 | union { |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 344 | struct i915_page_directory_pointer pdp; |
| 345 | struct i915_page_directory pd; |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 346 | }; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 347 | |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 348 | struct drm_i915_file_private *file_priv; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 349 | |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 350 | gen6_pte_t __iomem *pd_addr; |
| 351 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 352 | int (*enable)(struct i915_hw_ppgtt *ppgtt); |
| 353 | int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, |
John Harrison | e85b26d | 2015-05-29 17:43:56 +0100 | [diff] [blame] | 354 | struct drm_i915_gem_request *req); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 355 | void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); |
| 356 | }; |
| 357 | |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 358 | /* For each pde iterates over every pde between from start until start + length. |
| 359 | * If start, and start+length are not perfectly divisible, the macro will round |
| 360 | * down, and up as needed. The macro modifies pde, start, and length. Dev is |
| 361 | * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0, |
| 362 | * and length = 2G effectively iterates over every PDE in the system. |
| 363 | * |
| 364 | * XXX: temp is not actually needed, but it saves doing the ALIGN operation. |
| 365 | */ |
| 366 | #define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ |
Michel Thierry | fdc454c | 2015-03-24 15:46:19 +0000 | [diff] [blame] | 367 | for (iter = gen6_pde_index(start); \ |
| 368 | pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \ |
| 369 | iter++, \ |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 370 | temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \ |
| 371 | temp = min_t(unsigned, temp, length), \ |
| 372 | start += temp, length -= temp) |
| 373 | |
Michel Thierry | 09942c6 | 2015-04-08 12:13:30 +0100 | [diff] [blame] | 374 | #define gen6_for_all_pdes(pt, ppgtt, iter) \ |
| 375 | for (iter = 0; \ |
| 376 | pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \ |
| 377 | iter++) |
| 378 | |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 379 | static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift) |
| 380 | { |
| 381 | const uint32_t mask = NUM_PTE(pde_shift) - 1; |
| 382 | |
| 383 | return (address >> PAGE_SHIFT) & mask; |
| 384 | } |
| 385 | |
| 386 | /* Helper to counts the number of PTEs within the given length. This count |
| 387 | * does not cross a page table boundary, so the max value would be |
| 388 | * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. |
| 389 | */ |
| 390 | static inline uint32_t i915_pte_count(uint64_t addr, size_t length, |
| 391 | uint32_t pde_shift) |
| 392 | { |
| 393 | const uint64_t mask = ~((1 << pde_shift) - 1); |
| 394 | uint64_t end; |
| 395 | |
| 396 | WARN_ON(length == 0); |
| 397 | WARN_ON(offset_in_page(addr|length)); |
| 398 | |
| 399 | end = addr + length; |
| 400 | |
| 401 | if ((addr & mask) != (end & mask)) |
| 402 | return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); |
| 403 | |
| 404 | return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); |
| 405 | } |
| 406 | |
| 407 | static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift) |
| 408 | { |
| 409 | return (addr >> shift) & I915_PDE_MASK; |
| 410 | } |
| 411 | |
| 412 | static inline uint32_t gen6_pte_index(uint32_t addr) |
| 413 | { |
| 414 | return i915_pte_index(addr, GEN6_PDE_SHIFT); |
| 415 | } |
| 416 | |
| 417 | static inline size_t gen6_pte_count(uint32_t addr, uint32_t length) |
| 418 | { |
| 419 | return i915_pte_count(addr, length, GEN6_PDE_SHIFT); |
| 420 | } |
| 421 | |
| 422 | static inline uint32_t gen6_pde_index(uint32_t addr) |
| 423 | { |
| 424 | return i915_pde_index(addr, GEN6_PDE_SHIFT); |
| 425 | } |
| 426 | |
Michel Thierry | 9271d95 | 2015-04-08 12:13:26 +0100 | [diff] [blame] | 427 | /* Equivalent to the gen6 version, For each pde iterates over every pde |
| 428 | * between from start until start + length. On gen8+ it simply iterates |
| 429 | * over every page directory entry in a page directory. |
| 430 | */ |
| 431 | #define gen8_for_each_pde(pt, pd, start, length, temp, iter) \ |
| 432 | for (iter = gen8_pde_index(start); \ |
| 433 | pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \ |
| 434 | iter++, \ |
| 435 | temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \ |
| 436 | temp = min(temp, length), \ |
| 437 | start += temp, length -= temp) |
| 438 | |
| 439 | #define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \ |
| 440 | for (iter = gen8_pdpe_index(start); \ |
| 441 | pd = (pdp)->page_directory[iter], length > 0 && iter < GEN8_LEGACY_PDPES; \ |
| 442 | iter++, \ |
| 443 | temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \ |
| 444 | temp = min(temp, length), \ |
| 445 | start += temp, length -= temp) |
| 446 | |
| 447 | /* Clamp length to the next page_directory boundary */ |
| 448 | static inline uint64_t gen8_clamp_pd(uint64_t start, uint64_t length) |
| 449 | { |
| 450 | uint64_t next_pd = ALIGN(start + 1, 1 << GEN8_PDPE_SHIFT); |
| 451 | |
| 452 | if (next_pd > (start + length)) |
| 453 | return length; |
| 454 | |
| 455 | return next_pd - start; |
| 456 | } |
| 457 | |
| 458 | static inline uint32_t gen8_pte_index(uint64_t address) |
| 459 | { |
| 460 | return i915_pte_index(address, GEN8_PDE_SHIFT); |
| 461 | } |
| 462 | |
| 463 | static inline uint32_t gen8_pde_index(uint64_t address) |
| 464 | { |
| 465 | return i915_pde_index(address, GEN8_PDE_SHIFT); |
| 466 | } |
| 467 | |
| 468 | static inline uint32_t gen8_pdpe_index(uint64_t address) |
| 469 | { |
| 470 | return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK; |
| 471 | } |
| 472 | |
| 473 | static inline uint32_t gen8_pml4e_index(uint64_t address) |
| 474 | { |
| 475 | WARN_ON(1); /* For 64B */ |
| 476 | return 0; |
| 477 | } |
| 478 | |
Michel Thierry | 33c8819 | 2015-04-08 12:13:33 +0100 | [diff] [blame] | 479 | static inline size_t gen8_pte_count(uint64_t address, uint64_t length) |
| 480 | { |
| 481 | return i915_pte_count(address, length, GEN8_PDE_SHIFT); |
| 482 | } |
| 483 | |
Mika Kuoppala | d852c7b | 2015-06-25 18:35:06 +0300 | [diff] [blame] | 484 | static inline dma_addr_t |
| 485 | i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) |
| 486 | { |
| 487 | return test_bit(n, ppgtt->pdp.used_pdpes) ? |
Mika Kuoppala | 567047b | 2015-06-25 18:35:12 +0300 | [diff] [blame] | 488 | px_dma(ppgtt->pdp.page_directory[n]) : |
Mika Kuoppala | 79ab937 | 2015-06-25 18:35:17 +0300 | [diff] [blame] | 489 | px_dma(ppgtt->base.scratch_pd); |
Mika Kuoppala | d852c7b | 2015-06-25 18:35:06 +0300 | [diff] [blame] | 490 | } |
| 491 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 492 | int i915_gem_gtt_init(struct drm_device *dev); |
| 493 | void i915_gem_init_global_gtt(struct drm_device *dev); |
Daniel Vetter | 90d0a0e | 2014-08-06 15:04:56 +0200 | [diff] [blame] | 494 | void i915_global_gtt_cleanup(struct drm_device *dev); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 495 | |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 496 | |
| 497 | int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); |
Daniel Vetter | 82460d9 | 2014-08-06 20:19:53 +0200 | [diff] [blame] | 498 | int i915_ppgtt_init_hw(struct drm_device *dev); |
John Harrison | b3dd6b9 | 2015-05-29 17:43:40 +0100 | [diff] [blame] | 499 | int i915_ppgtt_init_ring(struct drm_i915_gem_request *req); |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 500 | void i915_ppgtt_release(struct kref *kref); |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 501 | struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, |
| 502 | struct drm_i915_file_private *fpriv); |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 503 | static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) |
| 504 | { |
| 505 | if (ppgtt) |
| 506 | kref_get(&ppgtt->ref); |
| 507 | } |
| 508 | static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) |
| 509 | { |
| 510 | if (ppgtt) |
| 511 | kref_put(&ppgtt->ref, i915_ppgtt_release); |
| 512 | } |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 513 | |
| 514 | void i915_check_and_clear_faults(struct drm_device *dev); |
| 515 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev); |
| 516 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
| 517 | |
| 518 | int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); |
| 519 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); |
| 520 | |
Joonas Lahtinen | 9abc464 | 2015-03-27 13:09:22 +0200 | [diff] [blame] | 521 | static inline bool |
| 522 | i915_ggtt_view_equal(const struct i915_ggtt_view *a, |
| 523 | const struct i915_ggtt_view *b) |
| 524 | { |
| 525 | if (WARN_ON(!a || !b)) |
| 526 | return false; |
| 527 | |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 528 | if (a->type != b->type) |
| 529 | return false; |
| 530 | if (a->type == I915_GGTT_VIEW_PARTIAL) |
| 531 | return !memcmp(&a->params, &b->params, sizeof(a->params)); |
| 532 | return true; |
Joonas Lahtinen | 9abc464 | 2015-03-27 13:09:22 +0200 | [diff] [blame] | 533 | } |
| 534 | |
Joonas Lahtinen | 91e6711 | 2015-05-06 14:33:58 +0300 | [diff] [blame] | 535 | size_t |
| 536 | i915_ggtt_view_size(struct drm_i915_gem_object *obj, |
| 537 | const struct i915_ggtt_view *view); |
| 538 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 539 | #endif |