Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2008-2012 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Eric Anholt <eric@anholt.net> |
| 25 | * Chris Wilson <chris@chris-wilson.co.uk> |
| 26 | * |
| 27 | */ |
| 28 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 29 | #include <drm/drmP.h> |
| 30 | #include <drm/i915_drm.h> |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 31 | #include "i915_drv.h" |
| 32 | |
| 33 | /* |
| 34 | * The BIOS typically reserves some of the system's memory for the exclusive |
| 35 | * use of the integrated graphics. This memory is no longer available for |
| 36 | * use by the OS and so the user finds that his system has less memory |
| 37 | * available than he put in. We refer to this memory as stolen. |
| 38 | * |
| 39 | * The BIOS will allocate its framebuffer from the stolen memory. Our |
| 40 | * goal is try to reuse that object for our own fbcon which must always |
| 41 | * be available for panics. Anything else we can reuse the stolen memory |
| 42 | * for is a boon. |
| 43 | */ |
| 44 | |
Paulo Zanoni | a9da512 | 2015-09-14 15:19:57 -0300 | [diff] [blame] | 45 | int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, |
| 46 | struct drm_mm_node *node, u64 size, |
| 47 | unsigned alignment, u64 start, u64 end) |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 48 | { |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 49 | int ret; |
| 50 | |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 51 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
| 52 | return -ENODEV; |
| 53 | |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 54 | mutex_lock(&dev_priv->mm.stolen_lock); |
Paulo Zanoni | a9da512 | 2015-09-14 15:19:57 -0300 | [diff] [blame] | 55 | ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, |
| 56 | alignment, start, end, |
| 57 | DRM_MM_SEARCH_DEFAULT); |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 58 | mutex_unlock(&dev_priv->mm.stolen_lock); |
| 59 | |
| 60 | return ret; |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 61 | } |
| 62 | |
Paulo Zanoni | a9da512 | 2015-09-14 15:19:57 -0300 | [diff] [blame] | 63 | int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, |
| 64 | struct drm_mm_node *node, u64 size, |
| 65 | unsigned alignment) |
| 66 | { |
| 67 | return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, |
| 68 | alignment, 0, |
| 69 | dev_priv->gtt.stolen_usable_size); |
| 70 | } |
| 71 | |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 72 | void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, |
| 73 | struct drm_mm_node *node) |
| 74 | { |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 75 | mutex_lock(&dev_priv->mm.stolen_lock); |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 76 | drm_mm_remove_node(node); |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 77 | mutex_unlock(&dev_priv->mm.stolen_lock); |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 78 | } |
| 79 | |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 80 | static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 81 | { |
| 82 | struct drm_i915_private *dev_priv = dev->dev_private; |
Chris Wilson | eaba1b8 | 2013-07-04 12:28:35 +0100 | [diff] [blame] | 83 | struct resource *r; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 84 | u32 base; |
| 85 | |
Chris Wilson | 17fec8a | 2013-07-04 00:23:33 +0100 | [diff] [blame] | 86 | /* Almost universally we can find the Graphics Base of Stolen Memory |
| 87 | * at offset 0x5c in the igfx configuration space. On a few (desktop) |
| 88 | * machines this is also mirrored in the bridge device at different |
| 89 | * locations, or in the MCHBAR. On gen2, the layout is again slightly |
| 90 | * different with the Graphics Segment immediately following Top of |
| 91 | * Memory (or Top of Usable DRAM). Note it appears that TOUD is only |
| 92 | * reported by 865g, so we just use the top of memory as determined |
| 93 | * by the e820 probe. |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 94 | * |
Chris Wilson | 17fec8a | 2013-07-04 00:23:33 +0100 | [diff] [blame] | 95 | * XXX However gen2 requires an unavailable symbol. |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 96 | */ |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 97 | base = 0; |
Chris Wilson | 17fec8a | 2013-07-04 00:23:33 +0100 | [diff] [blame] | 98 | if (INTEL_INFO(dev)->gen >= 3) { |
| 99 | /* Read Graphics Base of Stolen Memory directly */ |
Jesse Barnes | c9cddff | 2013-05-08 10:45:13 -0700 | [diff] [blame] | 100 | pci_read_config_dword(dev->pdev, 0x5c, &base); |
| 101 | base &= ~((1<<20) - 1); |
Chris Wilson | 17fec8a | 2013-07-04 00:23:33 +0100 | [diff] [blame] | 102 | } else { /* GEN2 */ |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 103 | #if 0 |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 104 | /* Stolen is immediately above Top of Memory */ |
| 105 | base = max_low_pfn_mapped << PAGE_SHIFT; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 106 | #endif |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 107 | } |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 108 | |
Chris Wilson | eaba1b8 | 2013-07-04 12:28:35 +0100 | [diff] [blame] | 109 | if (base == 0) |
| 110 | return 0; |
| 111 | |
Ville Syrjälä | f1e1c21 | 2014-06-05 20:02:59 +0300 | [diff] [blame] | 112 | /* make sure we don't clobber the GTT if it's within stolen memory */ |
| 113 | if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) { |
| 114 | struct { |
| 115 | u32 start, end; |
| 116 | } stolen[2] = { |
| 117 | { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
| 118 | { .start = base, .end = base + dev_priv->gtt.stolen_size, }, |
| 119 | }; |
| 120 | u64 gtt_start, gtt_end; |
| 121 | |
| 122 | gtt_start = I915_READ(PGTBL_CTL); |
| 123 | if (IS_GEN4(dev)) |
| 124 | gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | |
| 125 | (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; |
| 126 | else |
| 127 | gtt_start &= PGTBL_ADDRESS_LO_MASK; |
| 128 | gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4; |
| 129 | |
| 130 | if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) |
| 131 | stolen[0].end = gtt_start; |
| 132 | if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) |
| 133 | stolen[1].start = gtt_end; |
| 134 | |
| 135 | /* pick the larger of the two chunks */ |
| 136 | if (stolen[0].end - stolen[0].start > |
| 137 | stolen[1].end - stolen[1].start) { |
| 138 | base = stolen[0].start; |
| 139 | dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start; |
| 140 | } else { |
| 141 | base = stolen[1].start; |
| 142 | dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start; |
| 143 | } |
| 144 | |
| 145 | if (stolen[0].start != stolen[1].start || |
| 146 | stolen[0].end != stolen[1].end) { |
| 147 | DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", |
| 148 | (unsigned long long) gtt_start, |
| 149 | (unsigned long long) gtt_end - 1); |
| 150 | DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", |
| 151 | base, base + (u32) dev_priv->gtt.stolen_size - 1); |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | |
Chris Wilson | eaba1b8 | 2013-07-04 12:28:35 +0100 | [diff] [blame] | 156 | /* Verify that nothing else uses this physical address. Stolen |
| 157 | * memory should be reserved by the BIOS and hidden from the |
| 158 | * kernel. So if the region is already marked as busy, something |
| 159 | * is seriously wrong. |
| 160 | */ |
| 161 | r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, |
| 162 | "Graphics Stolen Memory"); |
| 163 | if (r == NULL) { |
Akash Goel | 3617dc9 | 2014-01-13 16:25:21 +0530 | [diff] [blame] | 164 | /* |
| 165 | * One more attempt but this time requesting region from |
| 166 | * base + 1, as we have seen that this resolves the region |
| 167 | * conflict with the PCI Bus. |
| 168 | * This is a BIOS w/a: Some BIOS wrap stolen in the root |
| 169 | * PCI bus, but have an off-by-one error. Hence retry the |
| 170 | * reservation starting from 1 instead of 0. |
| 171 | */ |
| 172 | r = devm_request_mem_region(dev->dev, base + 1, |
| 173 | dev_priv->gtt.stolen_size - 1, |
| 174 | "Graphics Stolen Memory"); |
Daniel Vetter | 0b6d24c | 2014-04-11 15:55:17 +0200 | [diff] [blame] | 175 | /* |
| 176 | * GEN3 firmware likes to smash pci bridges into the stolen |
| 177 | * range. Apparently this works. |
| 178 | */ |
| 179 | if (r == NULL && !IS_GEN3(dev)) { |
Akash Goel | 3617dc9 | 2014-01-13 16:25:21 +0530 | [diff] [blame] | 180 | DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", |
| 181 | base, base + (uint32_t)dev_priv->gtt.stolen_size); |
| 182 | base = 0; |
| 183 | } |
Chris Wilson | eaba1b8 | 2013-07-04 12:28:35 +0100 | [diff] [blame] | 184 | } |
| 185 | |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 186 | return base; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 187 | } |
| 188 | |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 189 | void i915_gem_cleanup_stolen(struct drm_device *dev) |
| 190 | { |
Daniel Vetter | 4d7bb01 | 2012-12-18 15:24:37 +0100 | [diff] [blame] | 191 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 192 | |
Daniel Vetter | 446f8d8 | 2013-07-02 10:48:31 +0200 | [diff] [blame] | 193 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
| 194 | return; |
| 195 | |
Daniel Vetter | 4d7bb01 | 2012-12-18 15:24:37 +0100 | [diff] [blame] | 196 | drm_mm_takedown(&dev_priv->mm.stolen); |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 197 | } |
| 198 | |
Ville Syrjälä | 7d316ae | 2015-09-16 21:28:50 +0300 | [diff] [blame] | 199 | static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, |
| 200 | unsigned long *base, unsigned long *size) |
| 201 | { |
| 202 | uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? |
| 203 | CTG_STOLEN_RESERVED : |
| 204 | ELK_STOLEN_RESERVED); |
| 205 | unsigned long stolen_top = dev_priv->mm.stolen_base + |
| 206 | dev_priv->gtt.stolen_size; |
| 207 | |
| 208 | *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; |
| 209 | |
| 210 | WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); |
| 211 | |
| 212 | /* On these platforms, the register doesn't have a size field, so the |
| 213 | * size is the distance between the base and the top of the stolen |
| 214 | * memory. We also have the genuine case where base is zero and there's |
| 215 | * nothing reserved. */ |
| 216 | if (*base == 0) |
| 217 | *size = 0; |
| 218 | else |
| 219 | *size = stolen_top - *base; |
| 220 | } |
| 221 | |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 222 | static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, |
| 223 | unsigned long *base, unsigned long *size) |
| 224 | { |
| 225 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
| 226 | |
| 227 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
| 228 | |
| 229 | switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { |
| 230 | case GEN6_STOLEN_RESERVED_1M: |
| 231 | *size = 1024 * 1024; |
| 232 | break; |
| 233 | case GEN6_STOLEN_RESERVED_512K: |
| 234 | *size = 512 * 1024; |
| 235 | break; |
| 236 | case GEN6_STOLEN_RESERVED_256K: |
| 237 | *size = 256 * 1024; |
| 238 | break; |
| 239 | case GEN6_STOLEN_RESERVED_128K: |
| 240 | *size = 128 * 1024; |
| 241 | break; |
| 242 | default: |
| 243 | *size = 1024 * 1024; |
| 244 | MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, |
| 249 | unsigned long *base, unsigned long *size) |
| 250 | { |
| 251 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
| 252 | |
| 253 | *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; |
| 254 | |
| 255 | switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { |
| 256 | case GEN7_STOLEN_RESERVED_1M: |
| 257 | *size = 1024 * 1024; |
| 258 | break; |
| 259 | case GEN7_STOLEN_RESERVED_256K: |
| 260 | *size = 256 * 1024; |
| 261 | break; |
| 262 | default: |
| 263 | *size = 1024 * 1024; |
| 264 | MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); |
| 265 | } |
| 266 | } |
| 267 | |
| 268 | static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv, |
| 269 | unsigned long *base, unsigned long *size) |
| 270 | { |
| 271 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
| 272 | |
| 273 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
| 274 | |
| 275 | switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { |
| 276 | case GEN8_STOLEN_RESERVED_1M: |
| 277 | *size = 1024 * 1024; |
| 278 | break; |
| 279 | case GEN8_STOLEN_RESERVED_2M: |
| 280 | *size = 2 * 1024 * 1024; |
| 281 | break; |
| 282 | case GEN8_STOLEN_RESERVED_4M: |
| 283 | *size = 4 * 1024 * 1024; |
| 284 | break; |
| 285 | case GEN8_STOLEN_RESERVED_8M: |
| 286 | *size = 8 * 1024 * 1024; |
| 287 | break; |
| 288 | default: |
| 289 | *size = 8 * 1024 * 1024; |
| 290 | MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); |
| 291 | } |
| 292 | } |
| 293 | |
| 294 | static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, |
| 295 | unsigned long *base, unsigned long *size) |
| 296 | { |
| 297 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
| 298 | unsigned long stolen_top; |
| 299 | |
| 300 | stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size; |
| 301 | |
| 302 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
| 303 | |
| 304 | /* On these platforms, the register doesn't have a size field, so the |
| 305 | * size is the distance between the base and the top of the stolen |
| 306 | * memory. We also have the genuine case where base is zero and there's |
| 307 | * nothing reserved. */ |
| 308 | if (*base == 0) |
| 309 | *size = 0; |
| 310 | else |
| 311 | *size = stolen_top - *base; |
| 312 | } |
| 313 | |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 314 | int i915_gem_init_stolen(struct drm_device *dev) |
| 315 | { |
| 316 | struct drm_i915_private *dev_priv = dev->dev_private; |
Ville Syrjälä | d7884d6 | 2015-09-11 21:14:29 +0300 | [diff] [blame] | 317 | unsigned long reserved_total, reserved_base = 0, reserved_size; |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 318 | unsigned long stolen_top; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 319 | |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 320 | mutex_init(&dev_priv->mm.stolen_lock); |
| 321 | |
Chris Wilson | 0f4706d | 2014-03-18 14:50:50 +0200 | [diff] [blame] | 322 | #ifdef CONFIG_INTEL_IOMMU |
Daniel Vetter | fcc9fe1 | 2014-03-26 23:42:53 +0100 | [diff] [blame] | 323 | if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) { |
Chris Wilson | 0f4706d | 2014-03-18 14:50:50 +0200 | [diff] [blame] | 324 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); |
| 325 | return 0; |
| 326 | } |
| 327 | #endif |
| 328 | |
Chris Wilson | 6644a4e | 2013-09-05 13:40:25 +0100 | [diff] [blame] | 329 | if (dev_priv->gtt.stolen_size == 0) |
| 330 | return 0; |
| 331 | |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 332 | dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); |
| 333 | if (dev_priv->mm.stolen_base == 0) |
| 334 | return 0; |
| 335 | |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 336 | stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size; |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 337 | |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 338 | switch (INTEL_INFO(dev_priv)->gen) { |
| 339 | case 2: |
| 340 | case 3: |
Ville Syrjälä | 7d316ae | 2015-09-16 21:28:50 +0300 | [diff] [blame] | 341 | break; |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 342 | case 4: |
Ville Syrjälä | 7d316ae | 2015-09-16 21:28:50 +0300 | [diff] [blame] | 343 | if (IS_G4X(dev)) |
| 344 | g4x_get_stolen_reserved(dev_priv, &reserved_base, |
| 345 | &reserved_size); |
| 346 | break; |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 347 | case 5: |
| 348 | /* Assume the gen6 maximum for the older platforms. */ |
| 349 | reserved_size = 1024 * 1024; |
| 350 | reserved_base = stolen_top - reserved_size; |
| 351 | break; |
| 352 | case 6: |
| 353 | gen6_get_stolen_reserved(dev_priv, &reserved_base, |
| 354 | &reserved_size); |
| 355 | break; |
| 356 | case 7: |
| 357 | gen7_get_stolen_reserved(dev_priv, &reserved_base, |
| 358 | &reserved_size); |
| 359 | break; |
| 360 | default: |
| 361 | if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) |
| 362 | bdw_get_stolen_reserved(dev_priv, &reserved_base, |
| 363 | &reserved_size); |
| 364 | else |
| 365 | gen8_get_stolen_reserved(dev_priv, &reserved_base, |
| 366 | &reserved_size); |
| 367 | break; |
Daniel Vetter | 40bae73 | 2014-09-11 13:28:08 +0200 | [diff] [blame] | 368 | } |
Jesse Barnes | c9cddff | 2013-05-08 10:45:13 -0700 | [diff] [blame] | 369 | |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 370 | /* It is possible for the reserved base to be zero, but the register |
| 371 | * field for size doesn't have a zero option. */ |
| 372 | if (reserved_base == 0) { |
| 373 | reserved_size = 0; |
| 374 | reserved_base = stolen_top; |
| 375 | } |
| 376 | |
| 377 | if (reserved_base < dev_priv->mm.stolen_base || |
| 378 | reserved_base + reserved_size > stolen_top) { |
| 379 | DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n", |
| 380 | reserved_base, reserved_base + reserved_size, |
| 381 | dev_priv->mm.stolen_base, stolen_top); |
Daniel Vetter | 897f9ed | 2013-07-09 14:44:27 +0200 | [diff] [blame] | 382 | return 0; |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 383 | } |
| 384 | |
| 385 | /* It is possible for the reserved area to end before the end of stolen |
| 386 | * memory, so just consider the start. */ |
| 387 | reserved_total = stolen_top - reserved_base; |
| 388 | |
Thierry Reding | 8e9d597 | 2015-08-14 12:35:23 +0200 | [diff] [blame] | 389 | DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n", |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 390 | dev_priv->gtt.stolen_size >> 10, |
| 391 | (dev_priv->gtt.stolen_size - reserved_total) >> 10); |
Daniel Vetter | 897f9ed | 2013-07-09 14:44:27 +0200 | [diff] [blame] | 392 | |
Paulo Zanoni | a9da512 | 2015-09-14 15:19:57 -0300 | [diff] [blame] | 393 | dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size - |
| 394 | reserved_total; |
| 395 | |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 396 | /* Basic memrange allocator for stolen space */ |
Paulo Zanoni | a9da512 | 2015-09-14 15:19:57 -0300 | [diff] [blame] | 397 | drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size); |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 398 | |
| 399 | return 0; |
| 400 | } |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 401 | |
| 402 | static struct sg_table * |
| 403 | i915_pages_create_for_stolen(struct drm_device *dev, |
| 404 | u32 offset, u32 size) |
| 405 | { |
| 406 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 407 | struct sg_table *st; |
| 408 | struct scatterlist *sg; |
| 409 | |
| 410 | DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); |
Ben Widawsky | a54c0c2 | 2013-01-24 14:45:00 -0800 | [diff] [blame] | 411 | BUG_ON(offset > dev_priv->gtt.stolen_size - size); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 412 | |
| 413 | /* We hide that we have no struct page backing our stolen object |
| 414 | * by wrapping the contiguous physical allocation with a fake |
| 415 | * dma mapping in a single scatterlist. |
| 416 | */ |
| 417 | |
| 418 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
| 419 | if (st == NULL) |
| 420 | return NULL; |
| 421 | |
| 422 | if (sg_alloc_table(st, 1, GFP_KERNEL)) { |
| 423 | kfree(st); |
| 424 | return NULL; |
| 425 | } |
| 426 | |
| 427 | sg = st->sgl; |
Akash Goel | ec14ba4 | 2014-01-13 16:24:45 +0530 | [diff] [blame] | 428 | sg->offset = 0; |
Imre Deak | ed23abd | 2013-03-26 15:14:19 +0200 | [diff] [blame] | 429 | sg->length = size; |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 430 | |
| 431 | sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; |
| 432 | sg_dma_len(sg) = size; |
| 433 | |
| 434 | return st; |
| 435 | } |
| 436 | |
| 437 | static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) |
| 438 | { |
| 439 | BUG(); |
| 440 | return -EINVAL; |
| 441 | } |
| 442 | |
| 443 | static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) |
| 444 | { |
| 445 | /* Should only be called during free */ |
| 446 | sg_free_table(obj->pages); |
| 447 | kfree(obj->pages); |
| 448 | } |
| 449 | |
Chris Wilson | ef0cf27 | 2014-06-06 10:22:54 +0100 | [diff] [blame] | 450 | |
| 451 | static void |
| 452 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) |
| 453 | { |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 454 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
| 455 | |
Chris Wilson | ef0cf27 | 2014-06-06 10:22:54 +0100 | [diff] [blame] | 456 | if (obj->stolen) { |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 457 | i915_gem_stolen_remove_node(dev_priv, obj->stolen); |
Chris Wilson | ef0cf27 | 2014-06-06 10:22:54 +0100 | [diff] [blame] | 458 | kfree(obj->stolen); |
| 459 | obj->stolen = NULL; |
| 460 | } |
| 461 | } |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 462 | static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { |
| 463 | .get_pages = i915_gem_object_get_pages_stolen, |
| 464 | .put_pages = i915_gem_object_put_pages_stolen, |
Chris Wilson | ef0cf27 | 2014-06-06 10:22:54 +0100 | [diff] [blame] | 465 | .release = i915_gem_object_release_stolen, |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 466 | }; |
| 467 | |
| 468 | static struct drm_i915_gem_object * |
| 469 | _i915_gem_object_create_stolen(struct drm_device *dev, |
| 470 | struct drm_mm_node *stolen) |
| 471 | { |
| 472 | struct drm_i915_gem_object *obj; |
| 473 | |
Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 474 | obj = i915_gem_object_alloc(dev); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 475 | if (obj == NULL) |
| 476 | return NULL; |
| 477 | |
David Herrmann | 89c8233 | 2013-07-11 11:56:32 +0200 | [diff] [blame] | 478 | drm_gem_private_object_init(dev, &obj->base, stolen->size); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 479 | i915_gem_object_init(obj, &i915_gem_object_stolen_ops); |
| 480 | |
| 481 | obj->pages = i915_pages_create_for_stolen(dev, |
| 482 | stolen->start, stolen->size); |
| 483 | if (obj->pages == NULL) |
| 484 | goto cleanup; |
| 485 | |
Ben Widawsky | dd53e1b | 2013-05-31 14:46:19 -0700 | [diff] [blame] | 486 | i915_gem_object_pin_pages(obj); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 487 | obj->stolen = stolen; |
| 488 | |
Chris Wilson | d46f1c3 | 2013-08-08 14:41:06 +0100 | [diff] [blame] | 489 | obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; |
| 490 | obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 491 | |
| 492 | return obj; |
| 493 | |
| 494 | cleanup: |
Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 495 | i915_gem_object_free(obj); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 496 | return NULL; |
| 497 | } |
| 498 | |
| 499 | struct drm_i915_gem_object * |
| 500 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size) |
| 501 | { |
| 502 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 503 | struct drm_i915_gem_object *obj; |
| 504 | struct drm_mm_node *stolen; |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 505 | int ret; |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 506 | |
Daniel Vetter | 446f8d8 | 2013-07-02 10:48:31 +0200 | [diff] [blame] | 507 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 508 | return NULL; |
| 509 | |
| 510 | DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); |
| 511 | if (size == 0) |
| 512 | return NULL; |
| 513 | |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 514 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
| 515 | if (!stolen) |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 516 | return NULL; |
| 517 | |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 518 | ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 519 | if (ret) { |
| 520 | kfree(stolen); |
| 521 | return NULL; |
| 522 | } |
| 523 | |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 524 | obj = _i915_gem_object_create_stolen(dev, stolen); |
| 525 | if (obj) |
| 526 | return obj; |
| 527 | |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 528 | i915_gem_stolen_remove_node(dev_priv, stolen); |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 529 | kfree(stolen); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 530 | return NULL; |
| 531 | } |
| 532 | |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 533 | struct drm_i915_gem_object * |
| 534 | i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, |
| 535 | u32 stolen_offset, |
| 536 | u32 gtt_offset, |
| 537 | u32 size) |
| 538 | { |
| 539 | struct drm_i915_private *dev_priv = dev->dev_private; |
Ben Widawsky | 40d74980 | 2013-07-31 16:59:59 -0700 | [diff] [blame] | 540 | struct i915_address_space *ggtt = &dev_priv->gtt.base; |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 541 | struct drm_i915_gem_object *obj; |
| 542 | struct drm_mm_node *stolen; |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 543 | struct i915_vma *vma; |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 544 | int ret; |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 545 | |
Daniel Vetter | 446f8d8 | 2013-07-02 10:48:31 +0200 | [diff] [blame] | 546 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 547 | return NULL; |
| 548 | |
| 549 | DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", |
| 550 | stolen_offset, gtt_offset, size); |
| 551 | |
| 552 | /* KISS and expect everything to be page-aligned */ |
Daniel Vetter | f37b5c2 | 2015-02-10 23:12:27 +0100 | [diff] [blame] | 553 | if (WARN_ON(size == 0) || WARN_ON(size & 4095) || |
| 554 | WARN_ON(stolen_offset & 4095)) |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 555 | return NULL; |
| 556 | |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 557 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
| 558 | if (!stolen) |
| 559 | return NULL; |
| 560 | |
Ben Widawsky | 338710e | 2013-07-05 14:41:03 -0700 | [diff] [blame] | 561 | stolen->start = stolen_offset; |
| 562 | stolen->size = size; |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 563 | mutex_lock(&dev_priv->mm.stolen_lock); |
Ben Widawsky | 338710e | 2013-07-05 14:41:03 -0700 | [diff] [blame] | 564 | ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 565 | mutex_unlock(&dev_priv->mm.stolen_lock); |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 566 | if (ret) { |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 567 | DRM_DEBUG_KMS("failed to allocate stolen space\n"); |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 568 | kfree(stolen); |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 569 | return NULL; |
| 570 | } |
| 571 | |
| 572 | obj = _i915_gem_object_create_stolen(dev, stolen); |
| 573 | if (obj == NULL) { |
| 574 | DRM_DEBUG_KMS("failed to allocate stolen object\n"); |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 575 | i915_gem_stolen_remove_node(dev_priv, stolen); |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 576 | kfree(stolen); |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 577 | return NULL; |
| 578 | } |
| 579 | |
Jesse Barnes | 3727d55 | 2013-05-08 10:45:14 -0700 | [diff] [blame] | 580 | /* Some objects just need physical mem from stolen space */ |
Daniel Vetter | 190d6cd | 2013-07-04 13:06:28 +0200 | [diff] [blame] | 581 | if (gtt_offset == I915_GTT_OFFSET_NONE) |
Jesse Barnes | 3727d55 | 2013-05-08 10:45:14 -0700 | [diff] [blame] | 582 | return obj; |
| 583 | |
Daniel Vetter | e656a6c | 2013-08-14 14:14:04 +0200 | [diff] [blame] | 584 | vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); |
Dan Carpenter | db473b3 | 2013-07-19 08:45:46 +0300 | [diff] [blame] | 585 | if (IS_ERR(vma)) { |
| 586 | ret = PTR_ERR(vma); |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 587 | goto err_out; |
| 588 | } |
| 589 | |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 590 | /* To simplify the initialisation sequence between KMS and GTT, |
| 591 | * we allow construction of the stolen object prior to |
| 592 | * setting up the GTT space. The actual reservation will occur |
| 593 | * later. |
| 594 | */ |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 595 | vma->node.start = gtt_offset; |
| 596 | vma->node.size = size; |
Ben Widawsky | 40d74980 | 2013-07-31 16:59:59 -0700 | [diff] [blame] | 597 | if (drm_mm_initialized(&ggtt->mm)) { |
| 598 | ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 599 | if (ret) { |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 600 | DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); |
Daniel Vetter | 4a025e2 | 2013-08-14 10:01:32 +0200 | [diff] [blame] | 601 | goto err_vma; |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 602 | } |
Ben Widawsky | edd41a8 | 2013-07-05 14:41:05 -0700 | [diff] [blame] | 603 | } |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 604 | |
Tvrtko Ursulin | aff4376 | 2014-10-24 12:42:33 +0100 | [diff] [blame] | 605 | vma->bound |= GLOBAL_BIND; |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 606 | |
Ben Widawsky | 35c20a6 | 2013-05-31 11:28:48 -0700 | [diff] [blame] | 607 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); |
Ben Widawsky | ca191b1 | 2013-07-31 17:00:14 -0700 | [diff] [blame] | 608 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); |
Daniel Vetter | d8ccba8 | 2013-12-17 23:42:11 +0100 | [diff] [blame] | 609 | i915_gem_object_pin_pages(obj); |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 610 | |
| 611 | return obj; |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 612 | |
Daniel Vetter | 4a025e2 | 2013-08-14 10:01:32 +0200 | [diff] [blame] | 613 | err_vma: |
| 614 | i915_gem_vma_destroy(vma); |
Ben Widawsky | f7f1818 | 2013-07-17 12:19:02 -0700 | [diff] [blame] | 615 | err_out: |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 616 | i915_gem_stolen_remove_node(dev_priv, stolen); |
Dave Airlie | 32c913e | 2013-08-07 18:09:03 +1000 | [diff] [blame] | 617 | kfree(stolen); |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 618 | drm_gem_object_unreference(&obj->base); |
| 619 | return NULL; |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 620 | } |