Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2008-2012 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Eric Anholt <eric@anholt.net> |
| 25 | * Chris Wilson <chris@chris-wilson.co.uk> |
| 26 | * |
| 27 | */ |
| 28 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 29 | #include <drm/drmP.h> |
| 30 | #include <drm/i915_drm.h> |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 31 | #include "i915_drv.h" |
| 32 | |
| 33 | /* |
| 34 | * The BIOS typically reserves some of the system's memory for the exclusive |
| 35 | * use of the integrated graphics. This memory is no longer available for |
| 36 | * use by the OS and so the user finds that his system has less memory |
| 37 | * available than he put in. We refer to this memory as stolen. |
| 38 | * |
| 39 | * The BIOS will allocate its framebuffer from the stolen memory. Our |
| 40 | * goal is try to reuse that object for our own fbcon which must always |
| 41 | * be available for panics. Anything else we can reuse the stolen memory |
| 42 | * for is a boon. |
| 43 | */ |
| 44 | |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 45 | static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 46 | { |
| 47 | struct drm_i915_private *dev_priv = dev->dev_private; |
Chris Wilson | eaba1b8 | 2013-07-04 12:28:35 +0100 | [diff] [blame] | 48 | struct resource *r; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 49 | u32 base; |
| 50 | |
Chris Wilson | 17fec8a | 2013-07-04 00:23:33 +0100 | [diff] [blame] | 51 | /* Almost universally we can find the Graphics Base of Stolen Memory |
| 52 | * at offset 0x5c in the igfx configuration space. On a few (desktop) |
| 53 | * machines this is also mirrored in the bridge device at different |
| 54 | * locations, or in the MCHBAR. On gen2, the layout is again slightly |
| 55 | * different with the Graphics Segment immediately following Top of |
| 56 | * Memory (or Top of Usable DRAM). Note it appears that TOUD is only |
| 57 | * reported by 865g, so we just use the top of memory as determined |
| 58 | * by the e820 probe. |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 59 | * |
Chris Wilson | 17fec8a | 2013-07-04 00:23:33 +0100 | [diff] [blame] | 60 | * XXX However gen2 requires an unavailable symbol. |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 61 | */ |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 62 | base = 0; |
Chris Wilson | 17fec8a | 2013-07-04 00:23:33 +0100 | [diff] [blame] | 63 | if (INTEL_INFO(dev)->gen >= 3) { |
| 64 | /* Read Graphics Base of Stolen Memory directly */ |
Jesse Barnes | c9cddff | 2013-05-08 10:45:13 -0700 | [diff] [blame] | 65 | pci_read_config_dword(dev->pdev, 0x5c, &base); |
| 66 | base &= ~((1<<20) - 1); |
Chris Wilson | 17fec8a | 2013-07-04 00:23:33 +0100 | [diff] [blame] | 67 | } else { /* GEN2 */ |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 68 | #if 0 |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 69 | /* Stolen is immediately above Top of Memory */ |
| 70 | base = max_low_pfn_mapped << PAGE_SHIFT; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 71 | #endif |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 72 | } |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 73 | |
Chris Wilson | eaba1b8 | 2013-07-04 12:28:35 +0100 | [diff] [blame] | 74 | if (base == 0) |
| 75 | return 0; |
| 76 | |
| 77 | /* Verify that nothing else uses this physical address. Stolen |
| 78 | * memory should be reserved by the BIOS and hidden from the |
| 79 | * kernel. So if the region is already marked as busy, something |
| 80 | * is seriously wrong. |
| 81 | */ |
| 82 | r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, |
| 83 | "Graphics Stolen Memory"); |
| 84 | if (r == NULL) { |
| 85 | DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", |
| 86 | base, base + (uint32_t)dev_priv->gtt.stolen_size); |
| 87 | base = 0; |
| 88 | } |
| 89 | |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 90 | return base; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 91 | } |
| 92 | |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 93 | static int i915_setup_compression(struct drm_device *dev, int size) |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 94 | { |
| 95 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 96 | struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 97 | int ret; |
| 98 | |
| 99 | compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL); |
| 100 | if (!compressed_fb) |
| 101 | goto err_llb; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 102 | |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 103 | /* Try to over-allocate to reduce reallocations and fragmentation */ |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 104 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, |
| 105 | size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); |
| 106 | if (ret) |
| 107 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, |
| 108 | size >>= 1, 4096, |
| 109 | DRM_MM_SEARCH_DEFAULT); |
| 110 | if (ret) |
| 111 | goto err_llb; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 112 | |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 113 | if (HAS_PCH_SPLIT(dev)) |
| 114 | I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); |
| 115 | else if (IS_GM45(dev)) { |
| 116 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
| 117 | } else { |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 118 | compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 119 | if (!compressed_llb) |
| 120 | goto err_fb; |
| 121 | |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 122 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb, |
| 123 | 4096, 4096, DRM_MM_SEARCH_DEFAULT); |
| 124 | if (ret) |
| 125 | goto err_fb; |
| 126 | |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 127 | dev_priv->fbc.compressed_llb = compressed_llb; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 128 | |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 129 | I915_WRITE(FBC_CFB_BASE, |
| 130 | dev_priv->mm.stolen_base + compressed_fb->start); |
| 131 | I915_WRITE(FBC_LL_BASE, |
| 132 | dev_priv->mm.stolen_base + compressed_llb->start); |
| 133 | } |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 134 | |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 135 | dev_priv->fbc.compressed_fb = compressed_fb; |
| 136 | dev_priv->fbc.size = size; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 137 | |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 138 | DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", |
| 139 | size); |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 140 | |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 141 | return 0; |
| 142 | |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 143 | err_fb: |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 144 | kfree(compressed_llb); |
| 145 | drm_mm_remove_node(compressed_fb); |
| 146 | err_llb: |
| 147 | kfree(compressed_fb); |
Chris Wilson | d824178 | 2013-04-27 12:44:16 +0100 | [diff] [blame] | 148 | pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 149 | return -ENOSPC; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 150 | } |
| 151 | |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 152 | int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 153 | { |
| 154 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 155 | |
Daniel Vetter | 446f8d8 | 2013-07-02 10:48:31 +0200 | [diff] [blame] | 156 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 157 | return -ENODEV; |
| 158 | |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 159 | if (size < dev_priv->fbc.size) |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 160 | return 0; |
| 161 | |
| 162 | /* Release any current block */ |
| 163 | i915_gem_stolen_cleanup_compression(dev); |
| 164 | |
| 165 | return i915_setup_compression(dev, size); |
| 166 | } |
| 167 | |
| 168 | void i915_gem_stolen_cleanup_compression(struct drm_device *dev) |
| 169 | { |
| 170 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 171 | |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 172 | if (dev_priv->fbc.size == 0) |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 173 | return; |
| 174 | |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 175 | if (dev_priv->fbc.compressed_fb) { |
| 176 | drm_mm_remove_node(dev_priv->fbc.compressed_fb); |
| 177 | kfree(dev_priv->fbc.compressed_fb); |
| 178 | } |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 179 | |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 180 | if (dev_priv->fbc.compressed_llb) { |
| 181 | drm_mm_remove_node(dev_priv->fbc.compressed_llb); |
| 182 | kfree(dev_priv->fbc.compressed_llb); |
| 183 | } |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 184 | |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 185 | dev_priv->fbc.size = 0; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | void i915_gem_cleanup_stolen(struct drm_device *dev) |
| 189 | { |
Daniel Vetter | 4d7bb01 | 2012-12-18 15:24:37 +0100 | [diff] [blame] | 190 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 191 | |
Daniel Vetter | 446f8d8 | 2013-07-02 10:48:31 +0200 | [diff] [blame] | 192 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
| 193 | return; |
| 194 | |
Chris Wilson | 11be49e | 2012-11-15 11:32:20 +0000 | [diff] [blame] | 195 | i915_gem_stolen_cleanup_compression(dev); |
Daniel Vetter | 4d7bb01 | 2012-12-18 15:24:37 +0100 | [diff] [blame] | 196 | drm_mm_takedown(&dev_priv->mm.stolen); |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | int i915_gem_init_stolen(struct drm_device *dev) |
| 200 | { |
| 201 | struct drm_i915_private *dev_priv = dev->dev_private; |
Jesse Barnes | c9cddff | 2013-05-08 10:45:13 -0700 | [diff] [blame] | 202 | int bios_reserved = 0; |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 203 | |
Chris Wilson | 6644a4e | 2013-09-05 13:40:25 +0100 | [diff] [blame] | 204 | if (dev_priv->gtt.stolen_size == 0) |
| 205 | return 0; |
| 206 | |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 207 | dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); |
| 208 | if (dev_priv->mm.stolen_base == 0) |
| 209 | return 0; |
| 210 | |
Ben Widawsky | a54c0c2 | 2013-01-24 14:45:00 -0800 | [diff] [blame] | 211 | DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n", |
| 212 | dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); |
Chris Wilson | e12a2d5 | 2012-11-15 11:32:18 +0000 | [diff] [blame] | 213 | |
Jesse Barnes | c9cddff | 2013-05-08 10:45:13 -0700 | [diff] [blame] | 214 | if (IS_VALLEYVIEW(dev)) |
| 215 | bios_reserved = 1024*1024; /* top 1M on VLV/BYT */ |
| 216 | |
Daniel Vetter | 897f9ed | 2013-07-09 14:44:27 +0200 | [diff] [blame] | 217 | if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size)) |
| 218 | return 0; |
| 219 | |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 220 | /* Basic memrange allocator for stolen space */ |
Jesse Barnes | c9cddff | 2013-05-08 10:45:13 -0700 | [diff] [blame] | 221 | drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size - |
| 222 | bios_reserved); |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 223 | |
| 224 | return 0; |
| 225 | } |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 226 | |
| 227 | static struct sg_table * |
| 228 | i915_pages_create_for_stolen(struct drm_device *dev, |
| 229 | u32 offset, u32 size) |
| 230 | { |
| 231 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 232 | struct sg_table *st; |
| 233 | struct scatterlist *sg; |
| 234 | |
| 235 | DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); |
Ben Widawsky | a54c0c2 | 2013-01-24 14:45:00 -0800 | [diff] [blame] | 236 | BUG_ON(offset > dev_priv->gtt.stolen_size - size); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 237 | |
| 238 | /* We hide that we have no struct page backing our stolen object |
| 239 | * by wrapping the contiguous physical allocation with a fake |
| 240 | * dma mapping in a single scatterlist. |
| 241 | */ |
| 242 | |
| 243 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
| 244 | if (st == NULL) |
| 245 | return NULL; |
| 246 | |
| 247 | if (sg_alloc_table(st, 1, GFP_KERNEL)) { |
| 248 | kfree(st); |
| 249 | return NULL; |
| 250 | } |
| 251 | |
| 252 | sg = st->sgl; |
Imre Deak | ed23abd | 2013-03-26 15:14:19 +0200 | [diff] [blame] | 253 | sg->offset = offset; |
| 254 | sg->length = size; |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 255 | |
| 256 | sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; |
| 257 | sg_dma_len(sg) = size; |
| 258 | |
| 259 | return st; |
| 260 | } |
| 261 | |
| 262 | static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) |
| 263 | { |
| 264 | BUG(); |
| 265 | return -EINVAL; |
| 266 | } |
| 267 | |
| 268 | static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) |
| 269 | { |
| 270 | /* Should only be called during free */ |
| 271 | sg_free_table(obj->pages); |
| 272 | kfree(obj->pages); |
| 273 | } |
| 274 | |
| 275 | static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { |
| 276 | .get_pages = i915_gem_object_get_pages_stolen, |
| 277 | .put_pages = i915_gem_object_put_pages_stolen, |
| 278 | }; |
| 279 | |
| 280 | static struct drm_i915_gem_object * |
| 281 | _i915_gem_object_create_stolen(struct drm_device *dev, |
| 282 | struct drm_mm_node *stolen) |
| 283 | { |
| 284 | struct drm_i915_gem_object *obj; |
| 285 | |
Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 286 | obj = i915_gem_object_alloc(dev); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 287 | if (obj == NULL) |
| 288 | return NULL; |
| 289 | |
David Herrmann | 89c8233 | 2013-07-11 11:56:32 +0200 | [diff] [blame] | 290 | drm_gem_private_object_init(dev, &obj->base, stolen->size); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 291 | i915_gem_object_init(obj, &i915_gem_object_stolen_ops); |
| 292 | |
| 293 | obj->pages = i915_pages_create_for_stolen(dev, |
| 294 | stolen->start, stolen->size); |
| 295 | if (obj->pages == NULL) |
| 296 | goto cleanup; |
| 297 | |
| 298 | obj->has_dma_mapping = true; |
Ben Widawsky | dd53e1b | 2013-05-31 14:46:19 -0700 | [diff] [blame] | 299 | i915_gem_object_pin_pages(obj); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 300 | obj->stolen = stolen; |
| 301 | |
Chris Wilson | d46f1c3 | 2013-08-08 14:41:06 +0100 | [diff] [blame] | 302 | obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; |
| 303 | obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 304 | |
| 305 | return obj; |
| 306 | |
| 307 | cleanup: |
Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 308 | i915_gem_object_free(obj); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 309 | return NULL; |
| 310 | } |
| 311 | |
| 312 | struct drm_i915_gem_object * |
| 313 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size) |
| 314 | { |
| 315 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 316 | struct drm_i915_gem_object *obj; |
| 317 | struct drm_mm_node *stolen; |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 318 | int ret; |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 319 | |
Daniel Vetter | 446f8d8 | 2013-07-02 10:48:31 +0200 | [diff] [blame] | 320 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 321 | return NULL; |
| 322 | |
| 323 | DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); |
| 324 | if (size == 0) |
| 325 | return NULL; |
| 326 | |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 327 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
| 328 | if (!stolen) |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 329 | return NULL; |
| 330 | |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 331 | ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size, |
| 332 | 4096, DRM_MM_SEARCH_DEFAULT); |
| 333 | if (ret) { |
| 334 | kfree(stolen); |
| 335 | return NULL; |
| 336 | } |
| 337 | |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 338 | obj = _i915_gem_object_create_stolen(dev, stolen); |
| 339 | if (obj) |
| 340 | return obj; |
| 341 | |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 342 | drm_mm_remove_node(stolen); |
| 343 | kfree(stolen); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 344 | return NULL; |
| 345 | } |
| 346 | |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 347 | struct drm_i915_gem_object * |
| 348 | i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, |
| 349 | u32 stolen_offset, |
| 350 | u32 gtt_offset, |
| 351 | u32 size) |
| 352 | { |
| 353 | struct drm_i915_private *dev_priv = dev->dev_private; |
Ben Widawsky | 40d74980 | 2013-07-31 16:59:59 -0700 | [diff] [blame] | 354 | struct i915_address_space *ggtt = &dev_priv->gtt.base; |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 355 | struct drm_i915_gem_object *obj; |
| 356 | struct drm_mm_node *stolen; |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 357 | struct i915_vma *vma; |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 358 | int ret; |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 359 | |
Daniel Vetter | 446f8d8 | 2013-07-02 10:48:31 +0200 | [diff] [blame] | 360 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 361 | return NULL; |
| 362 | |
| 363 | DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", |
| 364 | stolen_offset, gtt_offset, size); |
| 365 | |
| 366 | /* KISS and expect everything to be page-aligned */ |
| 367 | BUG_ON(stolen_offset & 4095); |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 368 | BUG_ON(size & 4095); |
| 369 | |
| 370 | if (WARN_ON(size == 0)) |
| 371 | return NULL; |
| 372 | |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 373 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
| 374 | if (!stolen) |
| 375 | return NULL; |
| 376 | |
Ben Widawsky | 338710e | 2013-07-05 14:41:03 -0700 | [diff] [blame] | 377 | stolen->start = stolen_offset; |
| 378 | stolen->size = size; |
| 379 | ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 380 | if (ret) { |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 381 | DRM_DEBUG_KMS("failed to allocate stolen space\n"); |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 382 | kfree(stolen); |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 383 | return NULL; |
| 384 | } |
| 385 | |
| 386 | obj = _i915_gem_object_create_stolen(dev, stolen); |
| 387 | if (obj == NULL) { |
| 388 | DRM_DEBUG_KMS("failed to allocate stolen object\n"); |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 389 | drm_mm_remove_node(stolen); |
| 390 | kfree(stolen); |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 391 | return NULL; |
| 392 | } |
| 393 | |
Jesse Barnes | 3727d55 | 2013-05-08 10:45:14 -0700 | [diff] [blame] | 394 | /* Some objects just need physical mem from stolen space */ |
Daniel Vetter | 190d6cd | 2013-07-04 13:06:28 +0200 | [diff] [blame] | 395 | if (gtt_offset == I915_GTT_OFFSET_NONE) |
Jesse Barnes | 3727d55 | 2013-05-08 10:45:14 -0700 | [diff] [blame] | 396 | return obj; |
| 397 | |
Daniel Vetter | e656a6c | 2013-08-14 14:14:04 +0200 | [diff] [blame] | 398 | vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); |
Dan Carpenter | db473b3 | 2013-07-19 08:45:46 +0300 | [diff] [blame] | 399 | if (IS_ERR(vma)) { |
| 400 | ret = PTR_ERR(vma); |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 401 | goto err_out; |
| 402 | } |
| 403 | |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 404 | /* To simplify the initialisation sequence between KMS and GTT, |
| 405 | * we allow construction of the stolen object prior to |
| 406 | * setting up the GTT space. The actual reservation will occur |
| 407 | * later. |
| 408 | */ |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 409 | vma->node.start = gtt_offset; |
| 410 | vma->node.size = size; |
Ben Widawsky | 40d74980 | 2013-07-31 16:59:59 -0700 | [diff] [blame] | 411 | if (drm_mm_initialized(&ggtt->mm)) { |
| 412 | ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 413 | if (ret) { |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 414 | DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); |
Daniel Vetter | 4a025e2 | 2013-08-14 10:01:32 +0200 | [diff] [blame] | 415 | goto err_vma; |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 416 | } |
Ben Widawsky | edd41a8 | 2013-07-05 14:41:05 -0700 | [diff] [blame] | 417 | } |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 418 | |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 419 | obj->has_global_gtt_mapping = 1; |
| 420 | |
Ben Widawsky | 35c20a6 | 2013-05-31 11:28:48 -0700 | [diff] [blame] | 421 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); |
Ben Widawsky | ca191b1 | 2013-07-31 17:00:14 -0700 | [diff] [blame] | 422 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); |
Daniel Vetter | d8ccba8 | 2013-12-17 23:42:11 +0100 | [diff] [blame^] | 423 | i915_gem_object_pin_pages(obj); |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 424 | |
| 425 | return obj; |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 426 | |
Daniel Vetter | 4a025e2 | 2013-08-14 10:01:32 +0200 | [diff] [blame] | 427 | err_vma: |
| 428 | i915_gem_vma_destroy(vma); |
Ben Widawsky | f7f1818 | 2013-07-17 12:19:02 -0700 | [diff] [blame] | 429 | err_out: |
Dave Airlie | 32c913e | 2013-08-07 18:09:03 +1000 | [diff] [blame] | 430 | drm_mm_remove_node(stolen); |
| 431 | kfree(stolen); |
Ben Widawsky | b3a070c | 2013-07-05 14:41:02 -0700 | [diff] [blame] | 432 | drm_gem_object_unreference(&obj->base); |
| 433 | return NULL; |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 434 | } |
| 435 | |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 436 | void |
| 437 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) |
| 438 | { |
| 439 | if (obj->stolen) { |
David Herrmann | 06e78ed | 2013-07-27 16:21:27 +0200 | [diff] [blame] | 440 | drm_mm_remove_node(obj->stolen); |
| 441 | kfree(obj->stolen); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 442 | obj->stolen = NULL; |
| 443 | } |
| 444 | } |