Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2014 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Please try to maintain the following order within this file unless it makes |
| 24 | * sense to do otherwise. From top to bottom: |
| 25 | * 1. typedefs |
| 26 | * 2. #defines, and macros |
| 27 | * 3. structure definitions |
| 28 | * 4. function prototypes |
| 29 | * |
| 30 | * Within each section, please try to order by generation in ascending order, |
| 31 | * from top to bottom (ie. gen6 on the top, gen8 on the bottom). |
| 32 | */ |
| 33 | |
| 34 | #ifndef __I915_GEM_GTT_H__ |
| 35 | #define __I915_GEM_GTT_H__ |
| 36 | |
Chris Wilson | 8ef8561 | 2016-04-28 09:56:39 +0100 | [diff] [blame] | 37 | #include <linux/io-mapping.h> |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 38 | #include <linux/mm.h> |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 39 | #include <linux/pagevec.h> |
Chris Wilson | 8ef8561 | 2016-04-28 09:56:39 +0100 | [diff] [blame] | 40 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 41 | #include "i915_gem_timeline.h" |
Chris Wilson | b0decaf | 2016-08-04 07:52:44 +0100 | [diff] [blame] | 42 | #include "i915_gem_request.h" |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 43 | #include "i915_selftest.h" |
Chris Wilson | b0decaf | 2016-08-04 07:52:44 +0100 | [diff] [blame] | 44 | |
Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 45 | #define I915_GTT_PAGE_SIZE 4096UL |
| 46 | #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE |
| 47 | |
Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 48 | #define I915_FENCE_REG_NONE -1 |
| 49 | #define I915_MAX_NUM_FENCES 32 |
| 50 | /* 32 fences + sign bit for FENCE_REG_NONE */ |
| 51 | #define I915_MAX_NUM_FENCE_BITS 6 |
| 52 | |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 53 | struct drm_i915_file_private; |
Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 54 | struct drm_i915_fence_reg; |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 55 | |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 56 | typedef u32 gen6_pte_t; |
| 57 | typedef u64 gen8_pte_t; |
| 58 | typedef u64 gen8_pde_t; |
| 59 | typedef u64 gen8_ppgtt_pdpe_t; |
| 60 | typedef u64 gen8_ppgtt_pml4e_t; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 61 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 62 | #define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT) |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 63 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 64 | /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ |
| 65 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) |
| 66 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
| 67 | #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
| 68 | #define GEN6_PTE_CACHE_LLC (2 << 1) |
| 69 | #define GEN6_PTE_UNCACHED (1 << 1) |
| 70 | #define GEN6_PTE_VALID (1 << 0) |
| 71 | |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 72 | #define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len))) |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 73 | #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) |
| 74 | #define I915_PDES 512 |
| 75 | #define I915_PDE_MASK (I915_PDES - 1) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 76 | #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 77 | |
| 78 | #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) |
| 79 | #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 80 | #define GEN6_PD_ALIGN (PAGE_SIZE * 16) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 81 | #define GEN6_PDE_SHIFT 22 |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 82 | #define GEN6_PDE_VALID (1 << 0) |
| 83 | |
| 84 | #define GEN7_PTE_CACHE_L3_LLC (3 << 1) |
| 85 | |
| 86 | #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) |
| 87 | #define BYT_PTE_WRITEABLE (1 << 1) |
| 88 | |
| 89 | /* Cacheability Control is a 4-bit value. The low three bits are stored in bits |
| 90 | * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. |
| 91 | */ |
| 92 | #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ |
| 93 | (((bits) & 0x8) << (11 - 3))) |
| 94 | #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) |
| 95 | #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) |
| 96 | #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) |
| 97 | #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) |
| 98 | #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) |
| 99 | #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) |
| 100 | #define HSW_PTE_UNCACHED (0) |
| 101 | #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) |
| 102 | #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) |
| 103 | |
Mika Kuoppala | e716776 | 2017-02-28 17:28:10 +0200 | [diff] [blame] | 104 | /* GEN8 32b style address is defined as a 3 level page table: |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 105 | * 31:30 | 29:21 | 20:12 | 11:0 |
| 106 | * PDPE | PDE | PTE | offset |
| 107 | * The difference as compared to normal x86 3 level page table is the PDPEs are |
| 108 | * programmed via register. |
Mika Kuoppala | e716776 | 2017-02-28 17:28:10 +0200 | [diff] [blame] | 109 | */ |
| 110 | #define GEN8_3LVL_PDPES 4 |
| 111 | #define GEN8_PDE_SHIFT 21 |
| 112 | #define GEN8_PDE_MASK 0x1ff |
| 113 | #define GEN8_PTE_SHIFT 12 |
| 114 | #define GEN8_PTE_MASK 0x1ff |
| 115 | #define GEN8_PTES I915_PTES(sizeof(gen8_pte_t)) |
| 116 | |
| 117 | /* GEN8 48b style address is defined as a 4 level page table: |
Michel Thierry | 81ba8aef | 2015-08-03 09:52:01 +0100 | [diff] [blame] | 118 | * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 |
| 119 | * PML4E | PDPE | PDE | PTE | offset |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 120 | */ |
Michel Thierry | 81ba8aef | 2015-08-03 09:52:01 +0100 | [diff] [blame] | 121 | #define GEN8_PML4ES_PER_PML4 512 |
| 122 | #define GEN8_PML4E_SHIFT 39 |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 123 | #define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1) |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 124 | #define GEN8_PDPE_SHIFT 30 |
Michel Thierry | 81ba8aef | 2015-08-03 09:52:01 +0100 | [diff] [blame] | 125 | /* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page |
| 126 | * tables */ |
| 127 | #define GEN8_PDPE_MASK 0x1ff |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 128 | |
| 129 | #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) |
| 130 | #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ |
| 131 | #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ |
| 132 | #define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ |
| 133 | |
Ville Syrjälä | ee0ce47 | 2014-04-09 13:28:01 +0300 | [diff] [blame] | 134 | #define CHV_PPAT_SNOOP (1<<6) |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 135 | #define GEN8_PPAT_AGE(x) (x<<4) |
| 136 | #define GEN8_PPAT_LLCeLLC (3<<2) |
| 137 | #define GEN8_PPAT_LLCELLC (2<<2) |
| 138 | #define GEN8_PPAT_LLC (1<<2) |
| 139 | #define GEN8_PPAT_WB (3<<0) |
| 140 | #define GEN8_PPAT_WT (2<<0) |
| 141 | #define GEN8_PPAT_WC (1<<0) |
| 142 | #define GEN8_PPAT_UC (0<<0) |
| 143 | #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 144 | #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 145 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 146 | struct sg_table; |
| 147 | |
Tvrtko Ursulin | 50470bb | 2015-03-23 11:10:36 +0000 | [diff] [blame] | 148 | struct intel_rotation_info { |
Chris Wilson | 7ff19c5 | 2017-01-14 00:28:21 +0000 | [diff] [blame] | 149 | struct intel_rotation_plane_info { |
Ville Syrjälä | 1663b9d | 2016-02-15 22:54:45 +0200 | [diff] [blame] | 150 | /* tiles */ |
Ville Syrjälä | 6687c90 | 2015-09-15 13:16:41 +0300 | [diff] [blame] | 151 | unsigned int width, height, stride, offset; |
Ville Syrjälä | 1663b9d | 2016-02-15 22:54:45 +0200 | [diff] [blame] | 152 | } plane[2]; |
Chris Wilson | 8d9046a | 2017-01-14 00:28:22 +0000 | [diff] [blame] | 153 | } __packed; |
| 154 | |
| 155 | static inline void assert_intel_rotation_info_is_packed(void) |
| 156 | { |
| 157 | BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int)); |
| 158 | } |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 159 | |
Chris Wilson | 7ff19c5 | 2017-01-14 00:28:21 +0000 | [diff] [blame] | 160 | struct intel_partial_info { |
| 161 | u64 offset; |
| 162 | unsigned int size; |
Chris Wilson | 8d9046a | 2017-01-14 00:28:22 +0000 | [diff] [blame] | 163 | } __packed; |
| 164 | |
| 165 | static inline void assert_intel_partial_info_is_packed(void) |
| 166 | { |
| 167 | BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int)); |
| 168 | } |
Chris Wilson | 7ff19c5 | 2017-01-14 00:28:21 +0000 | [diff] [blame] | 169 | |
Chris Wilson | 992e418 | 2017-01-14 00:28:23 +0000 | [diff] [blame] | 170 | enum i915_ggtt_view_type { |
| 171 | I915_GGTT_VIEW_NORMAL = 0, |
| 172 | I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info), |
| 173 | I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info), |
| 174 | }; |
| 175 | |
| 176 | static inline void assert_i915_ggtt_view_type_is_unique(void) |
| 177 | { |
| 178 | /* As we encode the size of each branch inside the union into its type, |
| 179 | * we have to be careful that each branch has a unique size. |
| 180 | */ |
| 181 | switch ((enum i915_ggtt_view_type)0) { |
| 182 | case I915_GGTT_VIEW_NORMAL: |
| 183 | case I915_GGTT_VIEW_PARTIAL: |
| 184 | case I915_GGTT_VIEW_ROTATED: |
| 185 | /* gcc complains if these are identical cases */ |
| 186 | break; |
| 187 | } |
| 188 | } |
| 189 | |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 190 | struct i915_ggtt_view { |
| 191 | enum i915_ggtt_view_type type; |
Joonas Lahtinen | 8bd7ef1 | 2015-05-06 14:35:38 +0300 | [diff] [blame] | 192 | union { |
Chris Wilson | 992e418 | 2017-01-14 00:28:23 +0000 | [diff] [blame] | 193 | /* Members need to contain no holes/padding */ |
Chris Wilson | 7ff19c5 | 2017-01-14 00:28:21 +0000 | [diff] [blame] | 194 | struct intel_partial_info partial; |
Ville Syrjälä | 7723f47d | 2016-01-20 21:05:22 +0200 | [diff] [blame] | 195 | struct intel_rotation_info rotated; |
Chris Wilson | 8bab1193 | 2017-01-14 00:28:25 +0000 | [diff] [blame] | 196 | }; |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 197 | }; |
| 198 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 199 | enum i915_cache_level; |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 200 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 201 | struct i915_vma; |
Chris Wilson | bde13eb | 2016-08-15 10:49:07 +0100 | [diff] [blame] | 202 | |
Mika Kuoppala | 44159dd | 2015-06-25 18:35:07 +0300 | [diff] [blame] | 203 | struct i915_page_dma { |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 204 | struct page *page; |
Mika Kuoppala | 44159dd | 2015-06-25 18:35:07 +0300 | [diff] [blame] | 205 | union { |
| 206 | dma_addr_t daddr; |
| 207 | |
| 208 | /* For gen6/gen7 only. This is the offset in the GGTT |
| 209 | * where the page directory entries for PPGTT begin |
| 210 | */ |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 211 | u32 ggtt_offset; |
Mika Kuoppala | 44159dd | 2015-06-25 18:35:07 +0300 | [diff] [blame] | 212 | }; |
| 213 | }; |
| 214 | |
Mika Kuoppala | 567047b | 2015-06-25 18:35:12 +0300 | [diff] [blame] | 215 | #define px_base(px) (&(px)->base) |
| 216 | #define px_page(px) (px_base(px)->page) |
| 217 | #define px_dma(px) (px_base(px)->daddr) |
| 218 | |
Mika Kuoppala | 44159dd | 2015-06-25 18:35:07 +0300 | [diff] [blame] | 219 | struct i915_page_table { |
| 220 | struct i915_page_dma base; |
Chris Wilson | dd19674 | 2017-02-15 08:43:46 +0000 | [diff] [blame] | 221 | unsigned int used_ptes; |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 222 | }; |
| 223 | |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 224 | struct i915_page_directory { |
Mika Kuoppala | 44159dd | 2015-06-25 18:35:07 +0300 | [diff] [blame] | 225 | struct i915_page_dma base; |
Ben Widawsky | 7324cc0 | 2015-02-24 16:22:35 +0000 | [diff] [blame] | 226 | |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 227 | struct i915_page_table *page_table[I915_PDES]; /* PDEs */ |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 228 | unsigned int used_pdes; |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 229 | }; |
| 230 | |
Michel Thierry | ec565b3 | 2015-04-08 12:13:23 +0100 | [diff] [blame] | 231 | struct i915_page_directory_pointer { |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 232 | struct i915_page_dma base; |
Michel Thierry | 6ac1850 | 2015-07-29 17:23:46 +0100 | [diff] [blame] | 233 | struct i915_page_directory **page_directory; |
Chris Wilson | e2b763c | 2017-02-15 08:43:48 +0000 | [diff] [blame] | 234 | unsigned int used_pdpes; |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 235 | }; |
| 236 | |
Michel Thierry | 81ba8aef | 2015-08-03 09:52:01 +0100 | [diff] [blame] | 237 | struct i915_pml4 { |
| 238 | struct i915_page_dma base; |
Michel Thierry | 81ba8aef | 2015-08-03 09:52:01 +0100 | [diff] [blame] | 239 | struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4]; |
| 240 | }; |
| 241 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 242 | struct i915_address_space { |
| 243 | struct drm_mm mm; |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 244 | struct i915_gem_timeline timeline; |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 245 | struct drm_i915_private *i915; |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 246 | struct device *dma; |
Chris Wilson | 2bfa996 | 2016-08-04 07:52:25 +0100 | [diff] [blame] | 247 | /* Every address space belongs to a struct file - except for the global |
| 248 | * GTT that is owned by the driver (and so @file is set to NULL). In |
| 249 | * principle, no information should leak from one context to another |
| 250 | * (or between files/processes etc) unless explicitly shared by the |
| 251 | * owner. Tracking the owner is important in order to free up per-file |
| 252 | * objects along with the file, to aide resource tracking, and to |
| 253 | * assign blame. |
| 254 | */ |
| 255 | struct drm_i915_file_private *file; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 256 | struct list_head global_link; |
Mika Kuoppala | c44ef60 | 2015-06-25 18:35:05 +0300 | [diff] [blame] | 257 | u64 total; /* size addr space maps (ex. 2GB for ggtt) */ |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 258 | |
Chris Wilson | 50e046b | 2016-08-04 07:52:46 +0100 | [diff] [blame] | 259 | bool closed; |
| 260 | |
Chris Wilson | 8bcdd0f7 | 2016-08-22 08:44:30 +0100 | [diff] [blame] | 261 | struct i915_page_dma scratch_page; |
Mika Kuoppala | 79ab937 | 2015-06-25 18:35:17 +0300 | [diff] [blame] | 262 | struct i915_page_table *scratch_pt; |
| 263 | struct i915_page_directory *scratch_pd; |
Michel Thierry | 69ab76f | 2015-07-29 17:23:55 +0100 | [diff] [blame] | 264 | struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */ |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 265 | |
| 266 | /** |
| 267 | * List of objects currently involved in rendering. |
| 268 | * |
| 269 | * Includes buffers having the contents of their GPU caches |
John Harrison | 97b2a6a | 2014-11-24 18:49:26 +0000 | [diff] [blame] | 270 | * flushed, not necessarily primitives. last_read_req |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 271 | * represents when the rendering involved will be completed. |
| 272 | * |
| 273 | * A reference is held on the buffer while on this list. |
| 274 | */ |
| 275 | struct list_head active_list; |
| 276 | |
| 277 | /** |
| 278 | * LRU list of objects which are not in the ringbuffer and |
| 279 | * are ready to unbind, but are still in the GTT. |
| 280 | * |
John Harrison | 97b2a6a | 2014-11-24 18:49:26 +0000 | [diff] [blame] | 281 | * last_read_req is NULL while an object is in this list. |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 282 | * |
| 283 | * A reference is not held on the buffer while on this list, |
| 284 | * as merely being GTT-bound shouldn't prevent its being |
| 285 | * freed, and we'll pull it off the list in the free path. |
| 286 | */ |
| 287 | struct list_head inactive_list; |
| 288 | |
Chris Wilson | 50e046b | 2016-08-04 07:52:46 +0100 | [diff] [blame] | 289 | /** |
| 290 | * List of vma that have been unbound. |
| 291 | * |
| 292 | * A reference is not held on the buffer while on this list. |
| 293 | */ |
| 294 | struct list_head unbound_list; |
| 295 | |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 296 | struct pagevec free_pages; |
| 297 | bool pt_kmap_wc; |
| 298 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 299 | /* FIXME: Need a more generic return type */ |
Michel Thierry | 07749ef | 2015-03-16 16:00:54 +0000 | [diff] [blame] | 300 | gen6_pte_t (*pte_encode)(dma_addr_t addr, |
| 301 | enum i915_cache_level level, |
Michał Winiarski | 4fb84d9 | 2016-10-13 14:02:40 +0200 | [diff] [blame] | 302 | u32 flags); /* Create a valid PTE */ |
Daniel Vetter | f329f5f | 2015-04-14 17:35:15 +0200 | [diff] [blame] | 303 | /* flags for pte_encode */ |
| 304 | #define PTE_READ_ONLY (1<<0) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 305 | int (*allocate_va_range)(struct i915_address_space *vm, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 306 | u64 start, u64 length); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 307 | void (*clear_range)(struct i915_address_space *vm, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 308 | u64 start, u64 length); |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 309 | void (*insert_page)(struct i915_address_space *vm, |
| 310 | dma_addr_t addr, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 311 | u64 offset, |
Chris Wilson | d6473f5 | 2016-06-10 14:22:59 +0530 | [diff] [blame] | 312 | enum i915_cache_level cache_level, |
| 313 | u32 flags); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 314 | void (*insert_entries)(struct i915_address_space *vm, |
| 315 | struct sg_table *st, |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 316 | u64 start, |
| 317 | enum i915_cache_level cache_level, |
| 318 | u32 flags); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 319 | void (*cleanup)(struct i915_address_space *vm); |
Daniel Vetter | 777dc5b | 2015-04-14 17:35:12 +0200 | [diff] [blame] | 320 | /** Unmap an object from an address space. This usually consists of |
| 321 | * setting the valid PTE entries to a reserved scratch page. */ |
| 322 | void (*unbind_vma)(struct i915_vma *vma); |
| 323 | /* Map an object into an address space with the given cache flags. */ |
Daniel Vetter | 70b9f6f | 2015-04-14 17:35:27 +0200 | [diff] [blame] | 324 | int (*bind_vma)(struct i915_vma *vma, |
| 325 | enum i915_cache_level cache_level, |
| 326 | u32 flags); |
Chris Wilson | 8448661 | 2017-02-15 08:43:40 +0000 | [diff] [blame] | 327 | |
| 328 | I915_SELFTEST_DECLARE(struct fault_attr fault_attr); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 329 | }; |
| 330 | |
Chris Wilson | 2bfa996 | 2016-08-04 07:52:25 +0100 | [diff] [blame] | 331 | #define i915_is_ggtt(V) (!(V)->file) |
Chris Wilson | 596c592 | 2016-02-26 11:03:20 +0000 | [diff] [blame] | 332 | |
Mika Kuoppala | 3e49004 | 2017-02-28 17:28:07 +0200 | [diff] [blame] | 333 | static inline bool |
| 334 | i915_vm_is_48bit(const struct i915_address_space *vm) |
| 335 | { |
| 336 | return (vm->total - 1) >> 32; |
| 337 | } |
| 338 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 339 | /* The Graphics Translation Table is the way in which GEN hardware translates a |
| 340 | * Graphics Virtual Address into a Physical Address. In addition to the normal |
| 341 | * collateral associated with any va->pa translations GEN hardware also has a |
| 342 | * portion of the GTT which can be mapped by the CPU and remain both coherent |
| 343 | * and correct (in cases like swizzling). That region is referred to as GMADR in |
| 344 | * the spec. |
| 345 | */ |
Joonas Lahtinen | 62106b4 | 2016-03-18 10:42:57 +0200 | [diff] [blame] | 346 | struct i915_ggtt { |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 347 | struct i915_address_space base; |
Chris Wilson | f7bbe78 | 2016-08-19 16:54:27 +0100 | [diff] [blame] | 348 | struct io_mapping mappable; /* Mapping to our CPU mappable region */ |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 349 | |
Chris Wilson | edd1f2f | 2017-01-06 15:20:11 +0000 | [diff] [blame] | 350 | phys_addr_t mappable_base; /* PA of our GMADR */ |
| 351 | u64 mappable_end; /* End offset that we can CPU map */ |
| 352 | |
Paulo Zanoni | 3c6b29b | 2016-12-15 11:23:55 -0200 | [diff] [blame] | 353 | /* Stolen memory is segmented in hardware with different portions |
| 354 | * offlimits to certain functions. |
| 355 | * |
| 356 | * The drm_mm is initialised to the total accessible range, as found |
| 357 | * from the PCI config. On Broadwell+, this is further restricted to |
| 358 | * avoid the first page! The upper end of stolen memory is reserved for |
| 359 | * hardware functions and similarly removed from the accessible range. |
| 360 | */ |
Chris Wilson | edd1f2f | 2017-01-06 15:20:11 +0000 | [diff] [blame] | 361 | u32 stolen_size; /* Total size of stolen memory */ |
| 362 | u32 stolen_usable_size; /* Total size minus reserved ranges */ |
| 363 | u32 stolen_reserved_base; |
| 364 | u32 stolen_reserved_size; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 365 | |
| 366 | /** "Graphics Stolen Memory" holds the global PTEs */ |
| 367 | void __iomem *gsm; |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 368 | void (*invalidate)(struct drm_i915_private *dev_priv); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 369 | |
| 370 | bool do_idle_maps; |
| 371 | |
| 372 | int mtrr; |
Chris Wilson | 95374d7 | 2016-10-12 10:05:20 +0100 | [diff] [blame] | 373 | |
| 374 | struct drm_mm_node error_capture; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 375 | }; |
| 376 | |
| 377 | struct i915_hw_ppgtt { |
| 378 | struct i915_address_space base; |
| 379 | struct kref ref; |
| 380 | struct drm_mm_node node; |
Ben Widawsky | 563222a | 2015-03-19 12:53:28 +0000 | [diff] [blame] | 381 | unsigned long pd_dirty_rings; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 382 | union { |
Michel Thierry | 81ba8aef | 2015-08-03 09:52:01 +0100 | [diff] [blame] | 383 | struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */ |
| 384 | struct i915_page_directory_pointer pdp; /* GEN8+ */ |
| 385 | struct i915_page_directory pd; /* GEN6-7 */ |
Ben Widawsky | d7b3de9 | 2015-02-24 16:22:34 +0000 | [diff] [blame] | 386 | }; |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 387 | |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 388 | gen6_pte_t __iomem *pd_addr; |
| 389 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 390 | int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, |
John Harrison | e85b26d | 2015-05-29 17:43:56 +0100 | [diff] [blame] | 391 | struct drm_i915_gem_request *req); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 392 | void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); |
| 393 | }; |
| 394 | |
Dave Gordon | 731f74c | 2016-06-24 19:37:46 +0100 | [diff] [blame] | 395 | /* |
| 396 | * gen6_for_each_pde() iterates over every pde from start until start+length. |
| 397 | * If start and start+length are not perfectly divisible, the macro will round |
| 398 | * down and up as needed. Start=0 and length=2G effectively iterates over |
| 399 | * every PDE in the system. The macro modifies ALL its parameters except 'pd', |
| 400 | * so each of the other parameters should preferably be a simple variable, or |
| 401 | * at most an lvalue with no side-effects! |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 402 | */ |
Dave Gordon | 731f74c | 2016-06-24 19:37:46 +0100 | [diff] [blame] | 403 | #define gen6_for_each_pde(pt, pd, start, length, iter) \ |
| 404 | for (iter = gen6_pde_index(start); \ |
| 405 | length > 0 && iter < I915_PDES && \ |
| 406 | (pt = (pd)->page_table[iter], true); \ |
| 407 | ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \ |
| 408 | temp = min(temp - start, length); \ |
| 409 | start += temp, length -= temp; }), ++iter) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 410 | |
Dave Gordon | 731f74c | 2016-06-24 19:37:46 +0100 | [diff] [blame] | 411 | #define gen6_for_all_pdes(pt, pd, iter) \ |
| 412 | for (iter = 0; \ |
| 413 | iter < I915_PDES && \ |
| 414 | (pt = (pd)->page_table[iter], true); \ |
| 415 | ++iter) |
Michel Thierry | 09942c6 | 2015-04-08 12:13:30 +0100 | [diff] [blame] | 416 | |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 417 | static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 418 | { |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 419 | const u32 mask = NUM_PTE(pde_shift) - 1; |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 420 | |
| 421 | return (address >> PAGE_SHIFT) & mask; |
| 422 | } |
| 423 | |
| 424 | /* Helper to counts the number of PTEs within the given length. This count |
| 425 | * does not cross a page table boundary, so the max value would be |
| 426 | * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. |
| 427 | */ |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 428 | static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 429 | { |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 430 | const u64 mask = ~((1ULL << pde_shift) - 1); |
| 431 | u64 end; |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 432 | |
| 433 | WARN_ON(length == 0); |
| 434 | WARN_ON(offset_in_page(addr|length)); |
| 435 | |
| 436 | end = addr + length; |
| 437 | |
| 438 | if ((addr & mask) != (end & mask)) |
| 439 | return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); |
| 440 | |
| 441 | return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); |
| 442 | } |
| 443 | |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 444 | static inline u32 i915_pde_index(u64 addr, u32 shift) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 445 | { |
| 446 | return (addr >> shift) & I915_PDE_MASK; |
| 447 | } |
| 448 | |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 449 | static inline u32 gen6_pte_index(u32 addr) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 450 | { |
| 451 | return i915_pte_index(addr, GEN6_PDE_SHIFT); |
| 452 | } |
| 453 | |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 454 | static inline u32 gen6_pte_count(u32 addr, u32 length) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 455 | { |
| 456 | return i915_pte_count(addr, length, GEN6_PDE_SHIFT); |
| 457 | } |
| 458 | |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 459 | static inline u32 gen6_pde_index(u32 addr) |
Ben Widawsky | 678d96f | 2015-03-16 16:00:56 +0000 | [diff] [blame] | 460 | { |
| 461 | return i915_pde_index(addr, GEN6_PDE_SHIFT); |
| 462 | } |
| 463 | |
Mika Kuoppala | 3e49004 | 2017-02-28 17:28:07 +0200 | [diff] [blame] | 464 | static inline unsigned int |
| 465 | i915_pdpes_per_pdp(const struct i915_address_space *vm) |
| 466 | { |
| 467 | if (i915_vm_is_48bit(vm)) |
| 468 | return GEN8_PML4ES_PER_PML4; |
| 469 | |
Mika Kuoppala | e716776 | 2017-02-28 17:28:10 +0200 | [diff] [blame] | 470 | return GEN8_3LVL_PDPES; |
Mika Kuoppala | 3e49004 | 2017-02-28 17:28:07 +0200 | [diff] [blame] | 471 | } |
| 472 | |
Michel Thierry | 9271d95 | 2015-04-08 12:13:26 +0100 | [diff] [blame] | 473 | /* Equivalent to the gen6 version, For each pde iterates over every pde |
| 474 | * between from start until start + length. On gen8+ it simply iterates |
| 475 | * over every page directory entry in a page directory. |
| 476 | */ |
Dave Gordon | e8ebd8e | 2015-12-08 13:30:51 +0000 | [diff] [blame] | 477 | #define gen8_for_each_pde(pt, pd, start, length, iter) \ |
| 478 | for (iter = gen8_pde_index(start); \ |
| 479 | length > 0 && iter < I915_PDES && \ |
| 480 | (pt = (pd)->page_table[iter], true); \ |
| 481 | ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \ |
| 482 | temp = min(temp - start, length); \ |
| 483 | start += temp, length -= temp; }), ++iter) |
Michel Thierry | 9271d95 | 2015-04-08 12:13:26 +0100 | [diff] [blame] | 484 | |
Dave Gordon | e8ebd8e | 2015-12-08 13:30:51 +0000 | [diff] [blame] | 485 | #define gen8_for_each_pdpe(pd, pdp, start, length, iter) \ |
| 486 | for (iter = gen8_pdpe_index(start); \ |
Mika Kuoppala | 3e49004 | 2017-02-28 17:28:07 +0200 | [diff] [blame] | 487 | length > 0 && iter < i915_pdpes_per_pdp(vm) && \ |
Dave Gordon | e8ebd8e | 2015-12-08 13:30:51 +0000 | [diff] [blame] | 488 | (pd = (pdp)->page_directory[iter], true); \ |
| 489 | ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \ |
| 490 | temp = min(temp - start, length); \ |
| 491 | start += temp, length -= temp; }), ++iter) |
Michel Thierry | 9271d95 | 2015-04-08 12:13:26 +0100 | [diff] [blame] | 492 | |
Dave Gordon | e8ebd8e | 2015-12-08 13:30:51 +0000 | [diff] [blame] | 493 | #define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \ |
| 494 | for (iter = gen8_pml4e_index(start); \ |
| 495 | length > 0 && iter < GEN8_PML4ES_PER_PML4 && \ |
| 496 | (pdp = (pml4)->pdps[iter], true); \ |
| 497 | ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \ |
| 498 | temp = min(temp - start, length); \ |
| 499 | start += temp, length -= temp; }), ++iter) |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 500 | |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 501 | static inline u32 gen8_pte_index(u64 address) |
Michel Thierry | 9271d95 | 2015-04-08 12:13:26 +0100 | [diff] [blame] | 502 | { |
| 503 | return i915_pte_index(address, GEN8_PDE_SHIFT); |
| 504 | } |
| 505 | |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 506 | static inline u32 gen8_pde_index(u64 address) |
Michel Thierry | 9271d95 | 2015-04-08 12:13:26 +0100 | [diff] [blame] | 507 | { |
| 508 | return i915_pde_index(address, GEN8_PDE_SHIFT); |
| 509 | } |
| 510 | |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 511 | static inline u32 gen8_pdpe_index(u64 address) |
Michel Thierry | 9271d95 | 2015-04-08 12:13:26 +0100 | [diff] [blame] | 512 | { |
| 513 | return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK; |
| 514 | } |
| 515 | |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 516 | static inline u32 gen8_pml4e_index(u64 address) |
Michel Thierry | 9271d95 | 2015-04-08 12:13:26 +0100 | [diff] [blame] | 517 | { |
Michel Thierry | 762d993 | 2015-07-30 11:05:29 +0100 | [diff] [blame] | 518 | return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK; |
Michel Thierry | 9271d95 | 2015-04-08 12:13:26 +0100 | [diff] [blame] | 519 | } |
| 520 | |
Chris Wilson | 75c7b0b | 2017-02-15 08:43:57 +0000 | [diff] [blame] | 521 | static inline u64 gen8_pte_count(u64 address, u64 length) |
Michel Thierry | 33c8819 | 2015-04-08 12:13:33 +0100 | [diff] [blame] | 522 | { |
| 523 | return i915_pte_count(address, length, GEN8_PDE_SHIFT); |
| 524 | } |
| 525 | |
Mika Kuoppala | d852c7b | 2015-06-25 18:35:06 +0300 | [diff] [blame] | 526 | static inline dma_addr_t |
| 527 | i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) |
| 528 | { |
Chris Wilson | fe52e37 | 2017-02-15 08:43:47 +0000 | [diff] [blame] | 529 | return px_dma(ppgtt->pdp.page_directory[n]); |
Mika Kuoppala | d852c7b | 2015-06-25 18:35:06 +0300 | [diff] [blame] | 530 | } |
| 531 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 532 | static inline struct i915_ggtt * |
| 533 | i915_vm_to_ggtt(struct i915_address_space *vm) |
| 534 | { |
| 535 | GEM_BUG_ON(!i915_is_ggtt(vm)); |
| 536 | return container_of(vm, struct i915_ggtt, base); |
| 537 | } |
| 538 | |
Chris Wilson | 6cde9a0 | 2017-02-13 17:15:50 +0000 | [diff] [blame] | 539 | int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915); |
| 540 | void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915); |
| 541 | |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 542 | int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv); |
| 543 | int i915_ggtt_init_hw(struct drm_i915_private *dev_priv); |
| 544 | int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv); |
Chris Wilson | 7c3f86b | 2017-01-12 11:00:49 +0000 | [diff] [blame] | 545 | void i915_ggtt_enable_guc(struct drm_i915_private *i915); |
| 546 | void i915_ggtt_disable_guc(struct drm_i915_private *i915); |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 547 | int i915_gem_init_ggtt(struct drm_i915_private *dev_priv); |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 548 | void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 549 | |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 550 | int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 551 | void i915_ppgtt_release(struct kref *kref); |
Chris Wilson | 2bfa996 | 2016-08-04 07:52:25 +0100 | [diff] [blame] | 552 | struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv, |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 553 | struct drm_i915_file_private *fpriv, |
| 554 | const char *name); |
Chris Wilson | 0c7eeda | 2017-01-11 21:09:25 +0000 | [diff] [blame] | 555 | void i915_ppgtt_close(struct i915_address_space *vm); |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 556 | static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) |
| 557 | { |
| 558 | if (ppgtt) |
| 559 | kref_get(&ppgtt->ref); |
| 560 | } |
| 561 | static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) |
| 562 | { |
| 563 | if (ppgtt) |
| 564 | kref_put(&ppgtt->ref, i915_ppgtt_release); |
| 565 | } |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 566 | |
Chris Wilson | dc97997 | 2016-05-10 14:10:04 +0100 | [diff] [blame] | 567 | void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 568 | void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv); |
| 569 | void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 570 | |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 571 | int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, |
| 572 | struct sg_table *pages); |
| 573 | void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, |
| 574 | struct sg_table *pages); |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 575 | |
Chris Wilson | 625d988 | 2017-01-11 11:23:11 +0000 | [diff] [blame] | 576 | int i915_gem_gtt_reserve(struct i915_address_space *vm, |
| 577 | struct drm_mm_node *node, |
| 578 | u64 size, u64 offset, unsigned long color, |
| 579 | unsigned int flags); |
| 580 | |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 581 | int i915_gem_gtt_insert(struct i915_address_space *vm, |
| 582 | struct drm_mm_node *node, |
| 583 | u64 size, u64 alignment, unsigned long color, |
| 584 | u64 start, u64 end, unsigned int flags); |
| 585 | |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 586 | /* Flags used by pin/bind&friends. */ |
Chris Wilson | 305bc23 | 2016-08-04 16:32:33 +0100 | [diff] [blame] | 587 | #define PIN_NONBLOCK BIT(0) |
| 588 | #define PIN_MAPPABLE BIT(1) |
| 589 | #define PIN_ZONE_4G BIT(2) |
Chris Wilson | 8211887 | 2016-08-18 17:17:05 +0100 | [diff] [blame] | 590 | #define PIN_NONFAULT BIT(3) |
Chris Wilson | 305bc23 | 2016-08-04 16:32:33 +0100 | [diff] [blame] | 591 | |
| 592 | #define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ |
| 593 | #define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ |
| 594 | #define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */ |
| 595 | #define PIN_UPDATE BIT(8) |
| 596 | |
| 597 | #define PIN_HIGH BIT(9) |
| 598 | #define PIN_OFFSET_BIAS BIT(10) |
| 599 | #define PIN_OFFSET_FIXED BIT(11) |
Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 600 | #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 601 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 602 | #endif |