blob: 559f75450d35b58aed2faae6765d0b8598bb6175 [file] [log] [blame]
Chris Wilson9797fbf2012-04-24 15:47:39 +01001/*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/i915_drm.h>
Chris Wilson9797fbf2012-04-24 15:47:39 +010031#include "i915_drv.h"
32
33/*
34 * The BIOS typically reserves some of the system's memory for the exclusive
35 * use of the integrated graphics. This memory is no longer available for
36 * use by the OS and so the user finds that his system has less memory
37 * available than he put in. We refer to this memory as stolen.
38 *
39 * The BIOS will allocate its framebuffer from the stolen memory. Our
40 * goal is try to reuse that object for our own fbcon which must always
41 * be available for panics. Anything else we can reuse the stolen memory
42 * for is a boon.
43 */
44
Chris Wilsone12a2d52012-11-15 11:32:18 +000045static unsigned long i915_stolen_to_physical(struct drm_device *dev)
Chris Wilson9797fbf2012-04-24 15:47:39 +010046{
47 struct drm_i915_private *dev_priv = dev->dev_private;
48 struct pci_dev *pdev = dev_priv->bridge_dev;
49 u32 base;
50
Chris Wilson9797fbf2012-04-24 15:47:39 +010051 /* On the machines I have tested the Graphics Base of Stolen Memory
Chris Wilsone12a2d52012-11-15 11:32:18 +000052 * is unreliable, so on those compute the base by subtracting the
53 * stolen memory from the Top of Low Usable DRAM which is where the
54 * BIOS places the graphics stolen memory.
55 *
56 * On gen2, the layout is slightly different with the Graphics Segment
57 * immediately following Top of Memory (or Top of Usable DRAM). Note
58 * it appears that TOUD is only reported by 865g, so we just use the
59 * top of memory as determined by the e820 probe.
60 *
61 * XXX gen2 requires an unavailable symbol and 945gm fails with
62 * its value of TOLUD.
Chris Wilson9797fbf2012-04-24 15:47:39 +010063 */
Chris Wilsone12a2d52012-11-15 11:32:18 +000064 base = 0;
Jesse Barnesc9cddff2013-05-08 10:45:13 -070065 if (IS_VALLEYVIEW(dev)) {
66 pci_read_config_dword(dev->pdev, 0x5c, &base);
67 base &= ~((1<<20) - 1);
68 } else if (INTEL_INFO(dev)->gen >= 6) {
Chris Wilsone12a2d52012-11-15 11:32:18 +000069 /* Read Base Data of Stolen Memory Register (BDSM) directly.
70 * Note that there is also a MCHBAR miror at 0x1080c0 or
71 * we could use device 2:0x5c instead.
72 */
73 pci_read_config_dword(pdev, 0xB0, &base);
74 base &= ~4095; /* lower bits used for locking register */
75 } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
76 /* Read Graphics Base of Stolen Memory directly */
Chris Wilson9797fbf2012-04-24 15:47:39 +010077 pci_read_config_dword(pdev, 0xA4, &base);
Chris Wilsone12a2d52012-11-15 11:32:18 +000078#if 0
79 } else if (IS_GEN3(dev)) {
Chris Wilson9797fbf2012-04-24 15:47:39 +010080 u8 val;
Chris Wilsone12a2d52012-11-15 11:32:18 +000081 /* Stolen is immediately below Top of Low Usable DRAM */
Chris Wilson9797fbf2012-04-24 15:47:39 +010082 pci_read_config_byte(pdev, 0x9c, &val);
83 base = val >> 3 << 27;
Chris Wilsone12a2d52012-11-15 11:32:18 +000084 base -= dev_priv->mm.gtt->stolen_size;
85 } else {
86 /* Stolen is immediately above Top of Memory */
87 base = max_low_pfn_mapped << PAGE_SHIFT;
Chris Wilson9797fbf2012-04-24 15:47:39 +010088#endif
Chris Wilsone12a2d52012-11-15 11:32:18 +000089 }
Chris Wilson9797fbf2012-04-24 15:47:39 +010090
Chris Wilsone12a2d52012-11-15 11:32:18 +000091 return base;
Chris Wilson9797fbf2012-04-24 15:47:39 +010092}
93
Chris Wilson11be49e2012-11-15 11:32:20 +000094static int i915_setup_compression(struct drm_device *dev, int size)
Chris Wilson9797fbf2012-04-24 15:47:39 +010095{
96 struct drm_i915_private *dev_priv = dev->dev_private;
97 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
Chris Wilson9797fbf2012-04-24 15:47:39 +010098
Chris Wilson11be49e2012-11-15 11:32:20 +000099 /* Try to over-allocate to reduce reallocations and fragmentation */
100 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
101 size <<= 1, 4096, 0);
102 if (!compressed_fb)
103 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
104 size >>= 1, 4096, 0);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100105 if (compressed_fb)
106 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
107 if (!compressed_fb)
108 goto err;
109
Chris Wilson11be49e2012-11-15 11:32:20 +0000110 if (HAS_PCH_SPLIT(dev))
111 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
112 else if (IS_GM45(dev)) {
113 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
114 } else {
Chris Wilson9797fbf2012-04-24 15:47:39 +0100115 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
116 4096, 4096, 0);
117 if (compressed_llb)
118 compressed_llb = drm_mm_get_block(compressed_llb,
119 4096, 4096);
120 if (!compressed_llb)
121 goto err_fb;
122
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700123 dev_priv->fbc.compressed_llb = compressed_llb;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100124
Chris Wilson11be49e2012-11-15 11:32:20 +0000125 I915_WRITE(FBC_CFB_BASE,
126 dev_priv->mm.stolen_base + compressed_fb->start);
127 I915_WRITE(FBC_LL_BASE,
128 dev_priv->mm.stolen_base + compressed_llb->start);
129 }
Chris Wilson9797fbf2012-04-24 15:47:39 +0100130
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700131 dev_priv->fbc.compressed_fb = compressed_fb;
132 dev_priv->fbc.size = size;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100133
Chris Wilson11be49e2012-11-15 11:32:20 +0000134 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
135 size);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100136
Chris Wilson11be49e2012-11-15 11:32:20 +0000137 return 0;
138
Chris Wilson9797fbf2012-04-24 15:47:39 +0100139err_fb:
140 drm_mm_put_block(compressed_fb);
141err:
Chris Wilsond8241782013-04-27 12:44:16 +0100142 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
Chris Wilson11be49e2012-11-15 11:32:20 +0000143 return -ENOSPC;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100144}
145
Chris Wilson11be49e2012-11-15 11:32:20 +0000146int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
Chris Wilson9797fbf2012-04-24 15:47:39 +0100147{
148 struct drm_i915_private *dev_priv = dev->dev_private;
149
Chris Wilson11be49e2012-11-15 11:32:20 +0000150 if (dev_priv->mm.stolen_base == 0)
151 return -ENODEV;
152
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700153 if (size < dev_priv->fbc.size)
Chris Wilson11be49e2012-11-15 11:32:20 +0000154 return 0;
155
156 /* Release any current block */
157 i915_gem_stolen_cleanup_compression(dev);
158
159 return i915_setup_compression(dev, size);
160}
161
162void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
163{
164 struct drm_i915_private *dev_priv = dev->dev_private;
165
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700166 if (dev_priv->fbc.size == 0)
Chris Wilson11be49e2012-11-15 11:32:20 +0000167 return;
168
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700169 if (dev_priv->fbc.compressed_fb)
170 drm_mm_put_block(dev_priv->fbc.compressed_fb);
Chris Wilson11be49e2012-11-15 11:32:20 +0000171
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700172 if (dev_priv->fbc.compressed_llb)
173 drm_mm_put_block(dev_priv->fbc.compressed_llb);
Chris Wilson11be49e2012-11-15 11:32:20 +0000174
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700175 dev_priv->fbc.size = 0;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100176}
177
178void i915_gem_cleanup_stolen(struct drm_device *dev)
179{
Daniel Vetter4d7bb012012-12-18 15:24:37 +0100180 struct drm_i915_private *dev_priv = dev->dev_private;
181
Chris Wilson11be49e2012-11-15 11:32:20 +0000182 i915_gem_stolen_cleanup_compression(dev);
Daniel Vetter4d7bb012012-12-18 15:24:37 +0100183 drm_mm_takedown(&dev_priv->mm.stolen);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100184}
185
186int i915_gem_init_stolen(struct drm_device *dev)
187{
188 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesc9cddff2013-05-08 10:45:13 -0700189 int bios_reserved = 0;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100190
Chris Wilsone12a2d52012-11-15 11:32:18 +0000191 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
192 if (dev_priv->mm.stolen_base == 0)
193 return 0;
194
Ben Widawskya54c0c22013-01-24 14:45:00 -0800195 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
196 dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
Chris Wilsone12a2d52012-11-15 11:32:18 +0000197
Jesse Barnesc9cddff2013-05-08 10:45:13 -0700198 if (IS_VALLEYVIEW(dev))
199 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
200
Chris Wilson9797fbf2012-04-24 15:47:39 +0100201 /* Basic memrange allocator for stolen space */
Jesse Barnesc9cddff2013-05-08 10:45:13 -0700202 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
203 bios_reserved);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100204
205 return 0;
206}
Chris Wilson0104fdb2012-11-15 11:32:26 +0000207
208static struct sg_table *
209i915_pages_create_for_stolen(struct drm_device *dev,
210 u32 offset, u32 size)
211{
212 struct drm_i915_private *dev_priv = dev->dev_private;
213 struct sg_table *st;
214 struct scatterlist *sg;
215
216 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
Ben Widawskya54c0c22013-01-24 14:45:00 -0800217 BUG_ON(offset > dev_priv->gtt.stolen_size - size);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000218
219 /* We hide that we have no struct page backing our stolen object
220 * by wrapping the contiguous physical allocation with a fake
221 * dma mapping in a single scatterlist.
222 */
223
224 st = kmalloc(sizeof(*st), GFP_KERNEL);
225 if (st == NULL)
226 return NULL;
227
228 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
229 kfree(st);
230 return NULL;
231 }
232
233 sg = st->sgl;
Imre Deaked23abd2013-03-26 15:14:19 +0200234 sg->offset = offset;
235 sg->length = size;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000236
237 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
238 sg_dma_len(sg) = size;
239
240 return st;
241}
242
243static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
244{
245 BUG();
246 return -EINVAL;
247}
248
249static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
250{
251 /* Should only be called during free */
252 sg_free_table(obj->pages);
253 kfree(obj->pages);
254}
255
256static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
257 .get_pages = i915_gem_object_get_pages_stolen,
258 .put_pages = i915_gem_object_put_pages_stolen,
259};
260
261static struct drm_i915_gem_object *
262_i915_gem_object_create_stolen(struct drm_device *dev,
263 struct drm_mm_node *stolen)
264{
265 struct drm_i915_gem_object *obj;
266
Chris Wilson42dcedd2012-11-15 11:32:30 +0000267 obj = i915_gem_object_alloc(dev);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000268 if (obj == NULL)
269 return NULL;
270
271 if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
272 goto cleanup;
273
274 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
275
276 obj->pages = i915_pages_create_for_stolen(dev,
277 stolen->start, stolen->size);
278 if (obj->pages == NULL)
279 goto cleanup;
280
281 obj->has_dma_mapping = true;
Ben Widawskydd53e1b2013-05-31 14:46:19 -0700282 i915_gem_object_pin_pages(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000283 obj->stolen = stolen;
284
285 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
286 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
287 obj->cache_level = I915_CACHE_NONE;
288
289 return obj;
290
291cleanup:
Chris Wilson42dcedd2012-11-15 11:32:30 +0000292 i915_gem_object_free(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000293 return NULL;
294}
295
296struct drm_i915_gem_object *
297i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
298{
299 struct drm_i915_private *dev_priv = dev->dev_private;
300 struct drm_i915_gem_object *obj;
301 struct drm_mm_node *stolen;
302
303 if (dev_priv->mm.stolen_base == 0)
304 return NULL;
305
306 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
307 if (size == 0)
308 return NULL;
309
310 stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
311 if (stolen)
312 stolen = drm_mm_get_block(stolen, size, 4096);
313 if (stolen == NULL)
314 return NULL;
315
316 obj = _i915_gem_object_create_stolen(dev, stolen);
317 if (obj)
318 return obj;
319
320 drm_mm_put_block(stolen);
321 return NULL;
322}
323
Chris Wilson866d12b2013-02-19 13:31:37 -0800324struct drm_i915_gem_object *
325i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
326 u32 stolen_offset,
327 u32 gtt_offset,
328 u32 size)
329{
330 struct drm_i915_private *dev_priv = dev->dev_private;
331 struct drm_i915_gem_object *obj;
332 struct drm_mm_node *stolen;
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700333 int ret;
Chris Wilson866d12b2013-02-19 13:31:37 -0800334
335 if (dev_priv->mm.stolen_base == 0)
336 return NULL;
337
338 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
339 stolen_offset, gtt_offset, size);
340
341 /* KISS and expect everything to be page-aligned */
342 BUG_ON(stolen_offset & 4095);
Chris Wilson866d12b2013-02-19 13:31:37 -0800343 BUG_ON(size & 4095);
344
345 if (WARN_ON(size == 0))
346 return NULL;
347
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700348 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
349 if (!stolen)
350 return NULL;
351
Ben Widawsky338710e2013-07-05 14:41:03 -0700352 stolen->start = stolen_offset;
353 stolen->size = size;
354 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700355 if (ret) {
Chris Wilson866d12b2013-02-19 13:31:37 -0800356 DRM_DEBUG_KMS("failed to allocate stolen space\n");
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700357 kfree(stolen);
Chris Wilson866d12b2013-02-19 13:31:37 -0800358 return NULL;
359 }
360
361 obj = _i915_gem_object_create_stolen(dev, stolen);
362 if (obj == NULL) {
363 DRM_DEBUG_KMS("failed to allocate stolen object\n");
364 drm_mm_put_block(stolen);
365 return NULL;
366 }
367
Jesse Barnes3727d552013-05-08 10:45:14 -0700368 /* Some objects just need physical mem from stolen space */
Daniel Vetter190d6cd2013-07-04 13:06:28 +0200369 if (gtt_offset == I915_GTT_OFFSET_NONE)
Jesse Barnes3727d552013-05-08 10:45:14 -0700370 return obj;
371
Chris Wilson866d12b2013-02-19 13:31:37 -0800372 /* To simplify the initialisation sequence between KMS and GTT,
373 * we allow construction of the stolen object prior to
374 * setting up the GTT space. The actual reservation will occur
375 * later.
376 */
377 if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700378 obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
379 if (!obj->gtt_space) {
380 DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
381 goto unref_out;
382 }
383
Ben Widawsky338710e2013-07-05 14:41:03 -0700384 obj->gtt_space->start = gtt_offset;
385 obj->gtt_space->size = size;
386 ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space,
387 obj->gtt_space);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700388 if (ret) {
Chris Wilson866d12b2013-02-19 13:31:37 -0800389 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700390 goto free_out;
Chris Wilson866d12b2013-02-19 13:31:37 -0800391 }
Ben Widawskyedd41a82013-07-05 14:41:05 -0700392 } else {
393 if (WARN_ON(gtt_offset & ~PAGE_MASK))
394 DRM_DEBUG_KMS("Cannot preserve non page aligned offset\n");
395 obj->gtt_space =
396 (struct drm_mm_node *)((uintptr_t)(I915_GTT_RESERVED | gtt_offset));
397 }
Chris Wilson866d12b2013-02-19 13:31:37 -0800398
Chris Wilson866d12b2013-02-19 13:31:37 -0800399 obj->has_global_gtt_mapping = 1;
400
Ben Widawsky35c20a62013-05-31 11:28:48 -0700401 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
Chris Wilson866d12b2013-02-19 13:31:37 -0800402 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
403
404 return obj;
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700405
406free_out:
407 kfree(obj->gtt_space);
408 obj->gtt_space = NULL;
409unref_out:
410 drm_gem_object_unreference(&obj->base);
411 return NULL;
Chris Wilson866d12b2013-02-19 13:31:37 -0800412}
413
Chris Wilson0104fdb2012-11-15 11:32:26 +0000414void
415i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
416{
417 if (obj->stolen) {
418 drm_mm_put_block(obj->stolen);
419 obj->stolen = NULL;
420 }
421}