Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2008-2012 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Eric Anholt <eric@anholt.net> |
| 25 | * Chris Wilson <chris@chris-wilson.co.uk> |
| 26 | * |
| 27 | */ |
| 28 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 29 | #include <drm/drmP.h> |
| 30 | #include <drm/i915_drm.h> |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 31 | #include "i915_drv.h" |
| 32 | |
Ville Syrjälä | 0ad98c7 | 2015-10-08 12:08:20 +0300 | [diff] [blame] | 33 | #define KB(x) ((x) * 1024) |
| 34 | #define MB(x) (KB(x) * 1024) |
| 35 | |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 36 | /* |
| 37 | * The BIOS typically reserves some of the system's memory for the exclusive |
| 38 | * use of the integrated graphics. This memory is no longer available for |
| 39 | * use by the OS and so the user finds that his system has less memory |
| 40 | * available than he put in. We refer to this memory as stolen. |
| 41 | * |
| 42 | * The BIOS will allocate its framebuffer from the stolen memory. Our |
| 43 | * goal is try to reuse that object for our own fbcon which must always |
| 44 | * be available for panics. Anything else we can reuse the stolen memory |
| 45 | * for is a boon. |
| 46 | */ |
| 47 | |
Paulo Zanoni | a9da512 | 2015-09-14 15:19:57 -0300 | [diff] [blame] | 48 | int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, |
| 49 | struct drm_mm_node *node, u64 size, |
| 50 | unsigned alignment, u64 start, u64 end) |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 51 | { |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 52 | int ret; |
| 53 | |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 54 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
| 55 | return -ENODEV; |
| 56 | |
Paulo Zanoni | 1ca36d4 | 2015-09-23 12:52:22 -0300 | [diff] [blame] | 57 | /* See the comment at the drm_mm_init() call for more about this check. |
Mika Kuoppala | 6e4f10c | 2016-06-07 17:18:56 +0300 | [diff] [blame] | 58 | * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete) |
| 59 | */ |
| 60 | if (start < 4096 && (IS_GEN8(dev_priv) || |
| 61 | IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0))) |
Paulo Zanoni | 1ca36d4 | 2015-09-23 12:52:22 -0300 | [diff] [blame] | 62 | start = 4096; |
| 63 | |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 64 | mutex_lock(&dev_priv->mm.stolen_lock); |
Paulo Zanoni | a9da512 | 2015-09-14 15:19:57 -0300 | [diff] [blame] | 65 | ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, |
| 66 | alignment, start, end, |
| 67 | DRM_MM_SEARCH_DEFAULT); |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 68 | mutex_unlock(&dev_priv->mm.stolen_lock); |
| 69 | |
| 70 | return ret; |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 71 | } |
| 72 | |
Paulo Zanoni | a9da512 | 2015-09-14 15:19:57 -0300 | [diff] [blame] | 73 | int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, |
| 74 | struct drm_mm_node *node, u64 size, |
| 75 | unsigned alignment) |
| 76 | { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 77 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
| 78 | |
Paulo Zanoni | a9da512 | 2015-09-14 15:19:57 -0300 | [diff] [blame] | 79 | return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 80 | alignment, 0, |
| 81 | ggtt->stolen_usable_size); |
Paulo Zanoni | a9da512 | 2015-09-14 15:19:57 -0300 | [diff] [blame] | 82 | } |
| 83 | |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 84 | void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, |
| 85 | struct drm_mm_node *node) |
| 86 | { |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 87 | mutex_lock(&dev_priv->mm.stolen_lock); |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 88 | drm_mm_remove_node(node); |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 89 | mutex_unlock(&dev_priv->mm.stolen_lock); |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 90 | } |
| 91 | |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 92 | static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 93 | { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 94 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 95 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Chris Wilson | eaba1b8 | 2013-07-04 12:28:35 +0100 | [diff] [blame] | 96 | struct resource *r; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 97 | u32 base; |
| 98 | |
Chris Wilson | 17fec8a | 2013-07-04 00:23:33 +0100 | [diff] [blame] | 99 | /* Almost universally we can find the Graphics Base of Stolen Memory |
Joonas Lahtinen | e10fa55 | 2016-04-15 12:03:39 +0300 | [diff] [blame] | 100 | * at register BSM (0x5c) in the igfx configuration space. On a few |
| 101 | * (desktop) machines this is also mirrored in the bridge device at |
| 102 | * different locations, or in the MCHBAR. |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 103 | * |
Ville Syrjälä | 0ad98c7 | 2015-10-08 12:08:20 +0300 | [diff] [blame] | 104 | * On 865 we just check the TOUD register. |
| 105 | * |
| 106 | * On 830/845/85x the stolen memory base isn't available in any |
| 107 | * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. |
| 108 | * |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 109 | */ |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 110 | base = 0; |
Chris Wilson | 17fec8a | 2013-07-04 00:23:33 +0100 | [diff] [blame] | 111 | if (INTEL_INFO(dev)->gen >= 3) { |
Joonas Lahtinen | e10fa55 | 2016-04-15 12:03:39 +0300 | [diff] [blame] | 112 | u32 bsm; |
| 113 | |
Joonas Lahtinen | c0dd346 | 2016-04-22 13:29:26 +0300 | [diff] [blame] | 114 | pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm); |
Joonas Lahtinen | e10fa55 | 2016-04-15 12:03:39 +0300 | [diff] [blame] | 115 | |
Joonas Lahtinen | c0dd346 | 2016-04-22 13:29:26 +0300 | [diff] [blame] | 116 | base = bsm & INTEL_BSM_MASK; |
Ville Syrjälä | 0ad98c7 | 2015-10-08 12:08:20 +0300 | [diff] [blame] | 117 | } else if (IS_I865G(dev)) { |
| 118 | u16 toud = 0; |
| 119 | |
| 120 | /* |
| 121 | * FIXME is the graphics stolen memory region |
| 122 | * always at TOUD? Ie. is it always the last |
| 123 | * one to be allocated by the BIOS? |
| 124 | */ |
| 125 | pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0), |
| 126 | I865_TOUD, &toud); |
| 127 | |
| 128 | base = toud << 16; |
| 129 | } else if (IS_I85X(dev)) { |
| 130 | u32 tseg_size = 0; |
| 131 | u32 tom; |
| 132 | u8 tmp; |
| 133 | |
| 134 | pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), |
| 135 | I85X_ESMRAMC, &tmp); |
| 136 | |
| 137 | if (tmp & TSEG_ENABLE) |
| 138 | tseg_size = MB(1); |
| 139 | |
| 140 | pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1), |
| 141 | I85X_DRB3, &tmp); |
| 142 | tom = tmp * MB(32); |
| 143 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 144 | base = tom - tseg_size - ggtt->stolen_size; |
Ville Syrjälä | 0ad98c7 | 2015-10-08 12:08:20 +0300 | [diff] [blame] | 145 | } else if (IS_845G(dev)) { |
| 146 | u32 tseg_size = 0; |
| 147 | u32 tom; |
| 148 | u8 tmp; |
| 149 | |
| 150 | pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), |
| 151 | I845_ESMRAMC, &tmp); |
| 152 | |
| 153 | if (tmp & TSEG_ENABLE) { |
| 154 | switch (tmp & I845_TSEG_SIZE_MASK) { |
| 155 | case I845_TSEG_SIZE_512K: |
| 156 | tseg_size = KB(512); |
| 157 | break; |
| 158 | case I845_TSEG_SIZE_1M: |
| 159 | tseg_size = MB(1); |
| 160 | break; |
| 161 | } |
| 162 | } |
| 163 | |
| 164 | pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), |
| 165 | I830_DRB3, &tmp); |
| 166 | tom = tmp * MB(32); |
| 167 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 168 | base = tom - tseg_size - ggtt->stolen_size; |
Ville Syrjälä | 0ad98c7 | 2015-10-08 12:08:20 +0300 | [diff] [blame] | 169 | } else if (IS_I830(dev)) { |
| 170 | u32 tseg_size = 0; |
| 171 | u32 tom; |
| 172 | u8 tmp; |
| 173 | |
| 174 | pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), |
| 175 | I830_ESMRAMC, &tmp); |
| 176 | |
| 177 | if (tmp & TSEG_ENABLE) { |
| 178 | if (tmp & I830_TSEG_SIZE_1M) |
| 179 | tseg_size = MB(1); |
| 180 | else |
| 181 | tseg_size = KB(512); |
| 182 | } |
| 183 | |
| 184 | pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), |
| 185 | I830_DRB3, &tmp); |
| 186 | tom = tmp * MB(32); |
| 187 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 188 | base = tom - tseg_size - ggtt->stolen_size; |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 189 | } |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 190 | |
Chris Wilson | eaba1b8 | 2013-07-04 12:28:35 +0100 | [diff] [blame] | 191 | if (base == 0) |
| 192 | return 0; |
| 193 | |
Ville Syrjälä | f1e1c21 | 2014-06-05 20:02:59 +0300 | [diff] [blame] | 194 | /* make sure we don't clobber the GTT if it's within stolen memory */ |
| 195 | if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) { |
| 196 | struct { |
| 197 | u32 start, end; |
| 198 | } stolen[2] = { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 199 | { .start = base, .end = base + ggtt->stolen_size, }, |
| 200 | { .start = base, .end = base + ggtt->stolen_size, }, |
Ville Syrjälä | f1e1c21 | 2014-06-05 20:02:59 +0300 | [diff] [blame] | 201 | }; |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 202 | u64 ggtt_start, ggtt_end; |
Ville Syrjälä | f1e1c21 | 2014-06-05 20:02:59 +0300 | [diff] [blame] | 203 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 204 | ggtt_start = I915_READ(PGTBL_CTL); |
Ville Syrjälä | f1e1c21 | 2014-06-05 20:02:59 +0300 | [diff] [blame] | 205 | if (IS_GEN4(dev)) |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 206 | ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | |
| 207 | (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; |
Ville Syrjälä | f1e1c21 | 2014-06-05 20:02:59 +0300 | [diff] [blame] | 208 | else |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 209 | ggtt_start &= PGTBL_ADDRESS_LO_MASK; |
| 210 | ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4; |
Ville Syrjälä | f1e1c21 | 2014-06-05 20:02:59 +0300 | [diff] [blame] | 211 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 212 | if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end) |
| 213 | stolen[0].end = ggtt_start; |
| 214 | if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end) |
| 215 | stolen[1].start = ggtt_end; |
Ville Syrjälä | f1e1c21 | 2014-06-05 20:02:59 +0300 | [diff] [blame] | 216 | |
| 217 | /* pick the larger of the two chunks */ |
| 218 | if (stolen[0].end - stolen[0].start > |
| 219 | stolen[1].end - stolen[1].start) { |
| 220 | base = stolen[0].start; |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 221 | ggtt->stolen_size = stolen[0].end - stolen[0].start; |
Ville Syrjälä | f1e1c21 | 2014-06-05 20:02:59 +0300 | [diff] [blame] | 222 | } else { |
| 223 | base = stolen[1].start; |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 224 | ggtt->stolen_size = stolen[1].end - stolen[1].start; |
Ville Syrjälä | f1e1c21 | 2014-06-05 20:02:59 +0300 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | if (stolen[0].start != stolen[1].start || |
| 228 | stolen[0].end != stolen[1].end) { |
| 229 | DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 230 | (unsigned long long)ggtt_start, |
| 231 | (unsigned long long)ggtt_end - 1); |
Ville Syrjälä | f1e1c21 | 2014-06-05 20:02:59 +0300 | [diff] [blame] | 232 | DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 233 | base, base + (u32)ggtt->stolen_size - 1); |
Ville Syrjälä | f1e1c21 | 2014-06-05 20:02:59 +0300 | [diff] [blame] | 234 | } |
| 235 | } |
| 236 | |
| 237 | |
Chris Wilson | eaba1b8 | 2013-07-04 12:28:35 +0100 | [diff] [blame] | 238 | /* Verify that nothing else uses this physical address. Stolen |
| 239 | * memory should be reserved by the BIOS and hidden from the |
| 240 | * kernel. So if the region is already marked as busy, something |
| 241 | * is seriously wrong. |
| 242 | */ |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 243 | r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size, |
Chris Wilson | eaba1b8 | 2013-07-04 12:28:35 +0100 | [diff] [blame] | 244 | "Graphics Stolen Memory"); |
| 245 | if (r == NULL) { |
Akash Goel | 3617dc9 | 2014-01-13 16:25:21 +0530 | [diff] [blame] | 246 | /* |
| 247 | * One more attempt but this time requesting region from |
| 248 | * base + 1, as we have seen that this resolves the region |
| 249 | * conflict with the PCI Bus. |
| 250 | * This is a BIOS w/a: Some BIOS wrap stolen in the root |
| 251 | * PCI bus, but have an off-by-one error. Hence retry the |
| 252 | * reservation starting from 1 instead of 0. |
| 253 | */ |
| 254 | r = devm_request_mem_region(dev->dev, base + 1, |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 255 | ggtt->stolen_size - 1, |
Akash Goel | 3617dc9 | 2014-01-13 16:25:21 +0530 | [diff] [blame] | 256 | "Graphics Stolen Memory"); |
Daniel Vetter | 0b6d24c | 2014-04-11 15:55:17 +0200 | [diff] [blame] | 257 | /* |
| 258 | * GEN3 firmware likes to smash pci bridges into the stolen |
| 259 | * range. Apparently this works. |
| 260 | */ |
| 261 | if (r == NULL && !IS_GEN3(dev)) { |
Akash Goel | 3617dc9 | 2014-01-13 16:25:21 +0530 | [diff] [blame] | 262 | DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 263 | base, base + (uint32_t)ggtt->stolen_size); |
Akash Goel | 3617dc9 | 2014-01-13 16:25:21 +0530 | [diff] [blame] | 264 | base = 0; |
| 265 | } |
Chris Wilson | eaba1b8 | 2013-07-04 12:28:35 +0100 | [diff] [blame] | 266 | } |
| 267 | |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 268 | return base; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 269 | } |
| 270 | |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 271 | void i915_gem_cleanup_stolen(struct drm_device *dev) |
| 272 | { |
Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 273 | struct drm_i915_private *dev_priv = to_i915(dev); |
Daniel Vetter | 4d7bb01 | 2012-12-18 15:24:37 +0100 | [diff] [blame] | 274 | |
Daniel Vetter | 446f8d8 | 2013-07-02 10:48:31 +0200 | [diff] [blame] | 275 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
| 276 | return; |
| 277 | |
Daniel Vetter | 4d7bb01 | 2012-12-18 15:24:37 +0100 | [diff] [blame] | 278 | drm_mm_takedown(&dev_priv->mm.stolen); |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 279 | } |
| 280 | |
Ville Syrjälä | 7d316ae | 2015-09-16 21:28:50 +0300 | [diff] [blame] | 281 | static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, |
| 282 | unsigned long *base, unsigned long *size) |
| 283 | { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 284 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Ville Syrjälä | 7d316ae | 2015-09-16 21:28:50 +0300 | [diff] [blame] | 285 | uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? |
| 286 | CTG_STOLEN_RESERVED : |
| 287 | ELK_STOLEN_RESERVED); |
| 288 | unsigned long stolen_top = dev_priv->mm.stolen_base + |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 289 | ggtt->stolen_size; |
Ville Syrjälä | 7d316ae | 2015-09-16 21:28:50 +0300 | [diff] [blame] | 290 | |
| 291 | *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; |
| 292 | |
| 293 | WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); |
| 294 | |
| 295 | /* On these platforms, the register doesn't have a size field, so the |
| 296 | * size is the distance between the base and the top of the stolen |
| 297 | * memory. We also have the genuine case where base is zero and there's |
| 298 | * nothing reserved. */ |
| 299 | if (*base == 0) |
| 300 | *size = 0; |
| 301 | else |
| 302 | *size = stolen_top - *base; |
| 303 | } |
| 304 | |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 305 | static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, |
| 306 | unsigned long *base, unsigned long *size) |
| 307 | { |
| 308 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
| 309 | |
| 310 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
| 311 | |
| 312 | switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { |
| 313 | case GEN6_STOLEN_RESERVED_1M: |
| 314 | *size = 1024 * 1024; |
| 315 | break; |
| 316 | case GEN6_STOLEN_RESERVED_512K: |
| 317 | *size = 512 * 1024; |
| 318 | break; |
| 319 | case GEN6_STOLEN_RESERVED_256K: |
| 320 | *size = 256 * 1024; |
| 321 | break; |
| 322 | case GEN6_STOLEN_RESERVED_128K: |
| 323 | *size = 128 * 1024; |
| 324 | break; |
| 325 | default: |
| 326 | *size = 1024 * 1024; |
| 327 | MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); |
| 328 | } |
| 329 | } |
| 330 | |
| 331 | static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, |
| 332 | unsigned long *base, unsigned long *size) |
| 333 | { |
| 334 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
| 335 | |
| 336 | *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; |
| 337 | |
| 338 | switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { |
| 339 | case GEN7_STOLEN_RESERVED_1M: |
| 340 | *size = 1024 * 1024; |
| 341 | break; |
| 342 | case GEN7_STOLEN_RESERVED_256K: |
| 343 | *size = 256 * 1024; |
| 344 | break; |
| 345 | default: |
| 346 | *size = 1024 * 1024; |
| 347 | MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); |
| 348 | } |
| 349 | } |
| 350 | |
| 351 | static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv, |
| 352 | unsigned long *base, unsigned long *size) |
| 353 | { |
| 354 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
| 355 | |
| 356 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
| 357 | |
| 358 | switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { |
| 359 | case GEN8_STOLEN_RESERVED_1M: |
| 360 | *size = 1024 * 1024; |
| 361 | break; |
| 362 | case GEN8_STOLEN_RESERVED_2M: |
| 363 | *size = 2 * 1024 * 1024; |
| 364 | break; |
| 365 | case GEN8_STOLEN_RESERVED_4M: |
| 366 | *size = 4 * 1024 * 1024; |
| 367 | break; |
| 368 | case GEN8_STOLEN_RESERVED_8M: |
| 369 | *size = 8 * 1024 * 1024; |
| 370 | break; |
| 371 | default: |
| 372 | *size = 8 * 1024 * 1024; |
| 373 | MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); |
| 374 | } |
| 375 | } |
| 376 | |
| 377 | static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, |
| 378 | unsigned long *base, unsigned long *size) |
| 379 | { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 380 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 381 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
| 382 | unsigned long stolen_top; |
| 383 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 384 | stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 385 | |
| 386 | *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; |
| 387 | |
| 388 | /* On these platforms, the register doesn't have a size field, so the |
| 389 | * size is the distance between the base and the top of the stolen |
| 390 | * memory. We also have the genuine case where base is zero and there's |
| 391 | * nothing reserved. */ |
| 392 | if (*base == 0) |
| 393 | *size = 0; |
| 394 | else |
| 395 | *size = stolen_top - *base; |
| 396 | } |
| 397 | |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 398 | int i915_gem_init_stolen(struct drm_device *dev) |
| 399 | { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 400 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 401 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Ville Syrjälä | d7884d6 | 2015-09-11 21:14:29 +0300 | [diff] [blame] | 402 | unsigned long reserved_total, reserved_base = 0, reserved_size; |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 403 | unsigned long stolen_top; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 404 | |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 405 | mutex_init(&dev_priv->mm.stolen_lock); |
| 406 | |
Chris Wilson | 0f4706d | 2014-03-18 14:50:50 +0200 | [diff] [blame] | 407 | #ifdef CONFIG_INTEL_IOMMU |
Daniel Vetter | fcc9fe1 | 2014-03-26 23:42:53 +0100 | [diff] [blame] | 408 | if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) { |
Chris Wilson | 0f4706d | 2014-03-18 14:50:50 +0200 | [diff] [blame] | 409 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); |
| 410 | return 0; |
| 411 | } |
| 412 | #endif |
| 413 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 414 | if (ggtt->stolen_size == 0) |
Chris Wilson | 6644a4e | 2013-09-05 13:40:25 +0100 | [diff] [blame] | 415 | return 0; |
| 416 | |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 417 | dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); |
| 418 | if (dev_priv->mm.stolen_base == 0) |
| 419 | return 0; |
| 420 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 421 | stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 422 | |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 423 | switch (INTEL_INFO(dev_priv)->gen) { |
| 424 | case 2: |
| 425 | case 3: |
Ville Syrjälä | 7d316ae | 2015-09-16 21:28:50 +0300 | [diff] [blame] | 426 | break; |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 427 | case 4: |
Ville Syrjälä | 7d316ae | 2015-09-16 21:28:50 +0300 | [diff] [blame] | 428 | if (IS_G4X(dev)) |
| 429 | g4x_get_stolen_reserved(dev_priv, &reserved_base, |
| 430 | &reserved_size); |
| 431 | break; |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 432 | case 5: |
| 433 | /* Assume the gen6 maximum for the older platforms. */ |
| 434 | reserved_size = 1024 * 1024; |
| 435 | reserved_base = stolen_top - reserved_size; |
| 436 | break; |
| 437 | case 6: |
| 438 | gen6_get_stolen_reserved(dev_priv, &reserved_base, |
| 439 | &reserved_size); |
| 440 | break; |
| 441 | case 7: |
| 442 | gen7_get_stolen_reserved(dev_priv, &reserved_base, |
| 443 | &reserved_size); |
| 444 | break; |
| 445 | default: |
Rodrigo Vivi | ef11bdb | 2015-10-28 04:16:45 -0700 | [diff] [blame] | 446 | if (IS_BROADWELL(dev_priv) || |
| 447 | IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev)) |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 448 | bdw_get_stolen_reserved(dev_priv, &reserved_base, |
| 449 | &reserved_size); |
| 450 | else |
| 451 | gen8_get_stolen_reserved(dev_priv, &reserved_base, |
| 452 | &reserved_size); |
| 453 | break; |
Daniel Vetter | 40bae73 | 2014-09-11 13:28:08 +0200 | [diff] [blame] | 454 | } |
Jesse Barnes | c9cddff | 2013-05-08 10:45:13 -0700 | [diff] [blame] | 455 | |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 456 | /* It is possible for the reserved base to be zero, but the register |
| 457 | * field for size doesn't have a zero option. */ |
| 458 | if (reserved_base == 0) { |
| 459 | reserved_size = 0; |
| 460 | reserved_base = stolen_top; |
| 461 | } |
| 462 | |
| 463 | if (reserved_base < dev_priv->mm.stolen_base || |
| 464 | reserved_base + reserved_size > stolen_top) { |
| 465 | DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n", |
| 466 | reserved_base, reserved_base + reserved_size, |
| 467 | dev_priv->mm.stolen_base, stolen_top); |
Daniel Vetter | 897f9ed | 2013-07-09 14:44:27 +0200 | [diff] [blame] | 468 | return 0; |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 469 | } |
| 470 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 471 | ggtt->stolen_reserved_base = reserved_base; |
| 472 | ggtt->stolen_reserved_size = reserved_size; |
Sagar Arun Kamble | 274008e | 2016-02-06 00:13:29 +0530 | [diff] [blame] | 473 | |
Paulo Zanoni | 3774eb5 | 2015-08-10 14:57:32 -0300 | [diff] [blame] | 474 | /* It is possible for the reserved area to end before the end of stolen |
| 475 | * memory, so just consider the start. */ |
| 476 | reserved_total = stolen_top - reserved_base; |
| 477 | |
Thierry Reding | 8e9d597 | 2015-08-14 12:35:23 +0200 | [diff] [blame] | 478 | DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n", |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 479 | ggtt->stolen_size >> 10, |
| 480 | (ggtt->stolen_size - reserved_total) >> 10); |
Daniel Vetter | 897f9ed | 2013-07-09 14:44:27 +0200 | [diff] [blame] | 481 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 482 | ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total; |
Paulo Zanoni | a9da512 | 2015-09-14 15:19:57 -0300 | [diff] [blame] | 483 | |
Paulo Zanoni | 1ca36d4 | 2015-09-23 12:52:22 -0300 | [diff] [blame] | 484 | /* |
| 485 | * Basic memrange allocator for stolen space. |
| 486 | * |
| 487 | * TODO: Notice that some platforms require us to not use the first page |
| 488 | * of the stolen memory but their BIOSes may still put the framebuffer |
| 489 | * on the first page. So we don't reserve this page for now because of |
| 490 | * that. Our current solution is to just prevent new nodes from being |
| 491 | * inserted on the first page - see the check we have at |
| 492 | * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon |
| 493 | * problem later. |
| 494 | */ |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 495 | drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size); |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 496 | |
| 497 | return 0; |
| 498 | } |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 499 | |
| 500 | static struct sg_table * |
| 501 | i915_pages_create_for_stolen(struct drm_device *dev, |
| 502 | u32 offset, u32 size) |
| 503 | { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 504 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 505 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 506 | struct sg_table *st; |
| 507 | struct scatterlist *sg; |
| 508 | |
| 509 | DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 510 | BUG_ON(offset > ggtt->stolen_size - size); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 511 | |
| 512 | /* We hide that we have no struct page backing our stolen object |
| 513 | * by wrapping the contiguous physical allocation with a fake |
| 514 | * dma mapping in a single scatterlist. |
| 515 | */ |
| 516 | |
| 517 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
| 518 | if (st == NULL) |
| 519 | return NULL; |
| 520 | |
| 521 | if (sg_alloc_table(st, 1, GFP_KERNEL)) { |
| 522 | kfree(st); |
| 523 | return NULL; |
| 524 | } |
| 525 | |
| 526 | sg = st->sgl; |
Akash Goel | ec14ba4 | 2014-01-13 16:24:45 +0530 | [diff] [blame] | 527 | sg->offset = 0; |
Imre Deak | ed23abd | 2013-03-26 15:14:19 +0200 | [diff] [blame] | 528 | sg->length = size; |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 529 | |
| 530 | sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; |
| 531 | sg_dma_len(sg) = size; |
| 532 | |
| 533 | return st; |
| 534 | } |
| 535 | |
| 536 | static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) |
| 537 | { |
| 538 | BUG(); |
| 539 | return -EINVAL; |
| 540 | } |
| 541 | |
| 542 | static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) |
| 543 | { |
| 544 | /* Should only be called during free */ |
| 545 | sg_free_table(obj->pages); |
| 546 | kfree(obj->pages); |
| 547 | } |
| 548 | |
Chris Wilson | ef0cf27 | 2014-06-06 10:22:54 +0100 | [diff] [blame] | 549 | |
| 550 | static void |
| 551 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) |
| 552 | { |
Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 553 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 554 | |
Chris Wilson | ef0cf27 | 2014-06-06 10:22:54 +0100 | [diff] [blame] | 555 | if (obj->stolen) { |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 556 | i915_gem_stolen_remove_node(dev_priv, obj->stolen); |
Chris Wilson | ef0cf27 | 2014-06-06 10:22:54 +0100 | [diff] [blame] | 557 | kfree(obj->stolen); |
| 558 | obj->stolen = NULL; |
| 559 | } |
| 560 | } |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 561 | static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { |
| 562 | .get_pages = i915_gem_object_get_pages_stolen, |
| 563 | .put_pages = i915_gem_object_put_pages_stolen, |
Chris Wilson | ef0cf27 | 2014-06-06 10:22:54 +0100 | [diff] [blame] | 564 | .release = i915_gem_object_release_stolen, |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 565 | }; |
| 566 | |
| 567 | static struct drm_i915_gem_object * |
| 568 | _i915_gem_object_create_stolen(struct drm_device *dev, |
| 569 | struct drm_mm_node *stolen) |
| 570 | { |
| 571 | struct drm_i915_gem_object *obj; |
| 572 | |
Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 573 | obj = i915_gem_object_alloc(dev); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 574 | if (obj == NULL) |
| 575 | return NULL; |
| 576 | |
David Herrmann | 89c8233 | 2013-07-11 11:56:32 +0200 | [diff] [blame] | 577 | drm_gem_private_object_init(dev, &obj->base, stolen->size); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 578 | i915_gem_object_init(obj, &i915_gem_object_stolen_ops); |
| 579 | |
| 580 | obj->pages = i915_pages_create_for_stolen(dev, |
| 581 | stolen->start, stolen->size); |
| 582 | if (obj->pages == NULL) |
| 583 | goto cleanup; |
| 584 | |
Ankitprasad Sharma | c523647 | 2015-12-22 11:50:44 +0530 | [diff] [blame] | 585 | obj->get_page.sg = obj->pages->sgl; |
| 586 | obj->get_page.last = 0; |
| 587 | |
Ben Widawsky | dd53e1b | 2013-05-31 14:46:19 -0700 | [diff] [blame] | 588 | i915_gem_object_pin_pages(obj); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 589 | obj->stolen = stolen; |
| 590 | |
Chris Wilson | d46f1c3 | 2013-08-08 14:41:06 +0100 | [diff] [blame] | 591 | obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; |
| 592 | obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 593 | |
| 594 | return obj; |
| 595 | |
| 596 | cleanup: |
Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 597 | i915_gem_object_free(obj); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 598 | return NULL; |
| 599 | } |
| 600 | |
| 601 | struct drm_i915_gem_object * |
| 602 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size) |
| 603 | { |
Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 604 | struct drm_i915_private *dev_priv = to_i915(dev); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 605 | struct drm_i915_gem_object *obj; |
| 606 | struct drm_mm_node *stolen; |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 607 | int ret; |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 608 | |
Daniel Vetter | 446f8d8 | 2013-07-02 10:48:31 +0200 | [diff] [blame] | 609 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 610 | return NULL; |
| 611 | |
| 612 | DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); |
| 613 | if (size == 0) |
| 614 | return NULL; |
| 615 | |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 616 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
| 617 | if (!stolen) |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 618 | return NULL; |
| 619 | |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 620 | ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 621 | if (ret) { |
| 622 | kfree(stolen); |
| 623 | return NULL; |
| 624 | } |
| 625 | |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 626 | obj = _i915_gem_object_create_stolen(dev, stolen); |
| 627 | if (obj) |
| 628 | return obj; |
| 629 | |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 630 | i915_gem_stolen_remove_node(dev_priv, stolen); |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 631 | kfree(stolen); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 632 | return NULL; |
| 633 | } |
| 634 | |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 635 | struct drm_i915_gem_object * |
| 636 | i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, |
| 637 | u32 stolen_offset, |
| 638 | u32 gtt_offset, |
| 639 | u32 size) |
| 640 | { |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 641 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 642 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 643 | struct drm_i915_gem_object *obj; |
| 644 | struct drm_mm_node *stolen; |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 645 | struct i915_vma *vma; |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 646 | int ret; |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 647 | |
Daniel Vetter | 446f8d8 | 2013-07-02 10:48:31 +0200 | [diff] [blame] | 648 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 649 | return NULL; |
| 650 | |
Tvrtko Ursulin | 12c83d9 | 2016-02-11 10:27:29 +0000 | [diff] [blame] | 651 | lockdep_assert_held(&dev->struct_mutex); |
| 652 | |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 653 | DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", |
| 654 | stolen_offset, gtt_offset, size); |
| 655 | |
| 656 | /* KISS and expect everything to be page-aligned */ |
Daniel Vetter | f37b5c2 | 2015-02-10 23:12:27 +0100 | [diff] [blame] | 657 | if (WARN_ON(size == 0) || WARN_ON(size & 4095) || |
| 658 | WARN_ON(stolen_offset & 4095)) |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 659 | return NULL; |
| 660 | |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 661 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
| 662 | if (!stolen) |
| 663 | return NULL; |
| 664 | |
Ben Widawsky | 338710e | 2013-07-05 14:41:03 -0700 | [diff] [blame] | 665 | stolen->start = stolen_offset; |
| 666 | stolen->size = size; |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 667 | mutex_lock(&dev_priv->mm.stolen_lock); |
Ben Widawsky | 338710e | 2013-07-05 14:41:03 -0700 | [diff] [blame] | 668 | ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 669 | mutex_unlock(&dev_priv->mm.stolen_lock); |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 670 | if (ret) { |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 671 | DRM_DEBUG_KMS("failed to allocate stolen space\n"); |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 672 | kfree(stolen); |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 673 | return NULL; |
| 674 | } |
| 675 | |
| 676 | obj = _i915_gem_object_create_stolen(dev, stolen); |
| 677 | if (obj == NULL) { |
| 678 | DRM_DEBUG_KMS("failed to allocate stolen object\n"); |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 679 | i915_gem_stolen_remove_node(dev_priv, stolen); |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 680 | kfree(stolen); |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 681 | return NULL; |
| 682 | } |
| 683 | |
Jesse Barnes | 3727d55 | 2013-05-08 10:45:14 -0700 | [diff] [blame] | 684 | /* Some objects just need physical mem from stolen space */ |
Daniel Vetter | 190d6cd | 2013-07-04 13:06:28 +0200 | [diff] [blame] | 685 | if (gtt_offset == I915_GTT_OFFSET_NONE) |
Jesse Barnes | 3727d55 | 2013-05-08 10:45:14 -0700 | [diff] [blame] | 686 | return obj; |
| 687 | |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 688 | vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base); |
Dan Carpenter | db473b3 | 2013-07-19 08:45:46 +0300 | [diff] [blame] | 689 | if (IS_ERR(vma)) { |
| 690 | ret = PTR_ERR(vma); |
Chris Wilson | 7c4a7d6 | 2015-09-24 11:57:45 +0100 | [diff] [blame] | 691 | goto err; |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 692 | } |
| 693 | |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 694 | /* To simplify the initialisation sequence between KMS and GTT, |
| 695 | * we allow construction of the stolen object prior to |
| 696 | * setting up the GTT space. The actual reservation will occur |
| 697 | * later. |
| 698 | */ |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 699 | vma->node.start = gtt_offset; |
| 700 | vma->node.size = size; |
Chris Wilson | 7c4a7d6 | 2015-09-24 11:57:45 +0100 | [diff] [blame] | 701 | |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 702 | ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node); |
| 703 | if (ret) { |
| 704 | DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); |
| 705 | goto err; |
Ben Widawsky | edd41a8 | 2013-07-05 14:41:05 -0700 | [diff] [blame] | 706 | } |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 707 | |
Chris Wilson | 3272db5 | 2016-08-04 16:32:32 +0100 | [diff] [blame] | 708 | vma->flags |= I915_VMA_GLOBAL_BIND; |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 709 | __i915_vma_set_map_and_fenceable(vma); |
Chris Wilson | 50e046b | 2016-08-04 07:52:46 +0100 | [diff] [blame] | 710 | list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); |
Chris Wilson | 15717de | 2016-08-04 07:52:26 +0100 | [diff] [blame] | 711 | obj->bind_count++; |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 712 | |
Ben Widawsky | 35c20a6 | 2013-05-31 11:28:48 -0700 | [diff] [blame] | 713 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); |
Daniel Vetter | d8ccba8 | 2013-12-17 23:42:11 +0100 | [diff] [blame] | 714 | i915_gem_object_pin_pages(obj); |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 715 | |
| 716 | return obj; |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 717 | |
Chris Wilson | 7c4a7d6 | 2015-09-24 11:57:45 +0100 | [diff] [blame] | 718 | err: |
Chris Wilson | f8c417c | 2016-07-20 13:31:53 +0100 | [diff] [blame] | 719 | i915_gem_object_put(obj); |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 720 | return NULL; |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 721 | } |