blob: d811b148817f698d8decbf7d5b23e9d59b7e4466 [file] [log] [blame]
Chris Wilson9797fbf2012-04-24 15:47:39 +01001/*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/i915_drm.h>
Chris Wilson9797fbf2012-04-24 15:47:39 +010031#include "i915_drv.h"
32
33/*
34 * The BIOS typically reserves some of the system's memory for the exclusive
35 * use of the integrated graphics. This memory is no longer available for
36 * use by the OS and so the user finds that his system has less memory
37 * available than he put in. We refer to this memory as stolen.
38 *
39 * The BIOS will allocate its framebuffer from the stolen memory. Our
40 * goal is try to reuse that object for our own fbcon which must always
41 * be available for panics. Anything else we can reuse the stolen memory
42 * for is a boon.
43 */
44
Paulo Zanonid713fd42015-07-02 19:25:07 -030045int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
46 struct drm_mm_node *node, u64 size,
47 unsigned alignment)
48{
49 if (!drm_mm_initialized(&dev_priv->mm.stolen))
50 return -ENODEV;
51
52 return drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
53 DRM_MM_SEARCH_DEFAULT);
54}
55
56void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
57 struct drm_mm_node *node)
58{
59 drm_mm_remove_node(node);
60}
61
Chris Wilsone12a2d52012-11-15 11:32:18 +000062static unsigned long i915_stolen_to_physical(struct drm_device *dev)
Chris Wilson9797fbf2012-04-24 15:47:39 +010063{
64 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsoneaba1b82013-07-04 12:28:35 +010065 struct resource *r;
Chris Wilson9797fbf2012-04-24 15:47:39 +010066 u32 base;
67
Chris Wilson17fec8a2013-07-04 00:23:33 +010068 /* Almost universally we can find the Graphics Base of Stolen Memory
69 * at offset 0x5c in the igfx configuration space. On a few (desktop)
70 * machines this is also mirrored in the bridge device at different
71 * locations, or in the MCHBAR. On gen2, the layout is again slightly
72 * different with the Graphics Segment immediately following Top of
73 * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
74 * reported by 865g, so we just use the top of memory as determined
75 * by the e820 probe.
Chris Wilsone12a2d52012-11-15 11:32:18 +000076 *
Chris Wilson17fec8a2013-07-04 00:23:33 +010077 * XXX However gen2 requires an unavailable symbol.
Chris Wilson9797fbf2012-04-24 15:47:39 +010078 */
Chris Wilsone12a2d52012-11-15 11:32:18 +000079 base = 0;
Chris Wilson17fec8a2013-07-04 00:23:33 +010080 if (INTEL_INFO(dev)->gen >= 3) {
81 /* Read Graphics Base of Stolen Memory directly */
Jesse Barnesc9cddff2013-05-08 10:45:13 -070082 pci_read_config_dword(dev->pdev, 0x5c, &base);
83 base &= ~((1<<20) - 1);
Chris Wilson17fec8a2013-07-04 00:23:33 +010084 } else { /* GEN2 */
Chris Wilsone12a2d52012-11-15 11:32:18 +000085#if 0
Chris Wilsone12a2d52012-11-15 11:32:18 +000086 /* Stolen is immediately above Top of Memory */
87 base = max_low_pfn_mapped << PAGE_SHIFT;
Chris Wilson9797fbf2012-04-24 15:47:39 +010088#endif
Chris Wilsone12a2d52012-11-15 11:32:18 +000089 }
Chris Wilson9797fbf2012-04-24 15:47:39 +010090
Chris Wilsoneaba1b82013-07-04 12:28:35 +010091 if (base == 0)
92 return 0;
93
Ville Syrjäläf1e1c212014-06-05 20:02:59 +030094 /* make sure we don't clobber the GTT if it's within stolen memory */
95 if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
96 struct {
97 u32 start, end;
98 } stolen[2] = {
99 { .start = base, .end = base + dev_priv->gtt.stolen_size, },
100 { .start = base, .end = base + dev_priv->gtt.stolen_size, },
101 };
102 u64 gtt_start, gtt_end;
103
104 gtt_start = I915_READ(PGTBL_CTL);
105 if (IS_GEN4(dev))
106 gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
107 (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
108 else
109 gtt_start &= PGTBL_ADDRESS_LO_MASK;
110 gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
111
112 if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
113 stolen[0].end = gtt_start;
114 if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
115 stolen[1].start = gtt_end;
116
117 /* pick the larger of the two chunks */
118 if (stolen[0].end - stolen[0].start >
119 stolen[1].end - stolen[1].start) {
120 base = stolen[0].start;
121 dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
122 } else {
123 base = stolen[1].start;
124 dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
125 }
126
127 if (stolen[0].start != stolen[1].start ||
128 stolen[0].end != stolen[1].end) {
129 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
130 (unsigned long long) gtt_start,
131 (unsigned long long) gtt_end - 1);
132 DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
133 base, base + (u32) dev_priv->gtt.stolen_size - 1);
134 }
135 }
136
137
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100138 /* Verify that nothing else uses this physical address. Stolen
139 * memory should be reserved by the BIOS and hidden from the
140 * kernel. So if the region is already marked as busy, something
141 * is seriously wrong.
142 */
143 r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
144 "Graphics Stolen Memory");
145 if (r == NULL) {
Akash Goel3617dc92014-01-13 16:25:21 +0530146 /*
147 * One more attempt but this time requesting region from
148 * base + 1, as we have seen that this resolves the region
149 * conflict with the PCI Bus.
150 * This is a BIOS w/a: Some BIOS wrap stolen in the root
151 * PCI bus, but have an off-by-one error. Hence retry the
152 * reservation starting from 1 instead of 0.
153 */
154 r = devm_request_mem_region(dev->dev, base + 1,
155 dev_priv->gtt.stolen_size - 1,
156 "Graphics Stolen Memory");
Daniel Vetter0b6d24c2014-04-11 15:55:17 +0200157 /*
158 * GEN3 firmware likes to smash pci bridges into the stolen
159 * range. Apparently this works.
160 */
161 if (r == NULL && !IS_GEN3(dev)) {
Akash Goel3617dc92014-01-13 16:25:21 +0530162 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
163 base, base + (uint32_t)dev_priv->gtt.stolen_size);
164 base = 0;
165 }
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100166 }
167
Chris Wilsone12a2d52012-11-15 11:32:18 +0000168 return base;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100169}
170
Ben Widawskyedc0fdb2014-06-19 12:06:11 -0700171static int find_compression_threshold(struct drm_device *dev,
172 struct drm_mm_node *node,
Ben Widawsky5e59f712014-06-30 10:41:24 -0700173 int size,
174 int fb_cpp)
Ben Widawskyedc0fdb2014-06-19 12:06:11 -0700175{
176 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky5e59f712014-06-30 10:41:24 -0700177 int compression_threshold = 1;
Ben Widawskyedc0fdb2014-06-19 12:06:11 -0700178 int ret;
179
Ben Widawsky5e59f712014-06-30 10:41:24 -0700180 /* HACK: This code depends on what we will do in *_enable_fbc. If that
181 * code changes, this code needs to change as well.
182 *
183 * The enable_fbc code will attempt to use one of our 2 compression
184 * thresholds, therefore, in that case, we only have 1 resort.
185 */
186
187 /* Try to over-allocate to reduce reallocations and fragmentation. */
Paulo Zanonid713fd42015-07-02 19:25:07 -0300188 ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096);
Ben Widawsky5e59f712014-06-30 10:41:24 -0700189 if (ret == 0)
Ben Widawskyedc0fdb2014-06-19 12:06:11 -0700190 return compression_threshold;
Ben Widawsky5e59f712014-06-30 10:41:24 -0700191
192again:
193 /* HW's ability to limit the CFB is 1:4 */
194 if (compression_threshold > 4 ||
195 (fb_cpp == 2 && compression_threshold == 2))
196 return 0;
197
Paulo Zanonid713fd42015-07-02 19:25:07 -0300198 ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096);
Ben Widawsky5e59f712014-06-30 10:41:24 -0700199 if (ret && INTEL_INFO(dev)->gen <= 4) {
200 return 0;
201 } else if (ret) {
202 compression_threshold <<= 1;
203 goto again;
204 } else {
205 return compression_threshold;
206 }
Ben Widawskyedc0fdb2014-06-19 12:06:11 -0700207}
208
Ben Widawsky5e59f712014-06-30 10:41:24 -0700209static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
Chris Wilson9797fbf2012-04-24 15:47:39 +0100210{
211 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskyc4213882014-06-19 12:06:10 -0700212 struct drm_mm_node *uninitialized_var(compressed_llb);
David Herrmann06e78ed2013-07-27 16:21:27 +0200213 int ret;
214
Ben Widawskyedc0fdb2014-06-19 12:06:11 -0700215 ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
Ben Widawsky5e59f712014-06-30 10:41:24 -0700216 size, fb_cpp);
Ben Widawskyedc0fdb2014-06-19 12:06:11 -0700217 if (!ret)
David Herrmann06e78ed2013-07-27 16:21:27 +0200218 goto err_llb;
Ben Widawsky5e59f712014-06-30 10:41:24 -0700219 else if (ret > 1) {
220 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
221
222 }
223
224 dev_priv->fbc.threshold = ret;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100225
Imre Deak46ec15f2015-03-26 17:35:40 +0200226 if (INTEL_INFO(dev_priv)->gen >= 5)
Ben Widawskyc4213882014-06-19 12:06:10 -0700227 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
Chris Wilson11be49e2012-11-15 11:32:20 +0000228 else if (IS_GM45(dev)) {
Ben Widawskyc4213882014-06-19 12:06:10 -0700229 I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
Chris Wilson11be49e2012-11-15 11:32:20 +0000230 } else {
David Herrmann06e78ed2013-07-27 16:21:27 +0200231 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100232 if (!compressed_llb)
233 goto err_fb;
234
Paulo Zanonid713fd42015-07-02 19:25:07 -0300235 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
236 4096, 4096);
David Herrmann06e78ed2013-07-27 16:21:27 +0200237 if (ret)
238 goto err_fb;
239
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700240 dev_priv->fbc.compressed_llb = compressed_llb;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100241
Chris Wilson11be49e2012-11-15 11:32:20 +0000242 I915_WRITE(FBC_CFB_BASE,
Ben Widawskyc4213882014-06-19 12:06:10 -0700243 dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
Chris Wilson11be49e2012-11-15 11:32:20 +0000244 I915_WRITE(FBC_LL_BASE,
245 dev_priv->mm.stolen_base + compressed_llb->start);
246 }
Chris Wilson9797fbf2012-04-24 15:47:39 +0100247
Jani Nikula60ee5cd2015-02-05 12:04:27 +0200248 dev_priv->fbc.uncompressed_size = size;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100249
Chris Wilson11be49e2012-11-15 11:32:20 +0000250 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
251 size);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100252
Chris Wilson11be49e2012-11-15 11:32:20 +0000253 return 0;
254
Chris Wilson9797fbf2012-04-24 15:47:39 +0100255err_fb:
David Herrmann06e78ed2013-07-27 16:21:27 +0200256 kfree(compressed_llb);
Paulo Zanonid713fd42015-07-02 19:25:07 -0300257 i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
David Herrmann06e78ed2013-07-27 16:21:27 +0200258err_llb:
Chris Wilsond8241782013-04-27 12:44:16 +0100259 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
Chris Wilson11be49e2012-11-15 11:32:20 +0000260 return -ENOSPC;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100261}
262
Ben Widawsky5e59f712014-06-30 10:41:24 -0700263int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
Chris Wilson9797fbf2012-04-24 15:47:39 +0100264{
265 struct drm_i915_private *dev_priv = dev->dev_private;
266
Daniel Vetter446f8d82013-07-02 10:48:31 +0200267 if (!drm_mm_initialized(&dev_priv->mm.stolen))
Chris Wilson11be49e2012-11-15 11:32:20 +0000268 return -ENODEV;
269
Paulo Zanonicb0a08c2015-02-13 17:23:47 -0200270 if (size <= dev_priv->fbc.uncompressed_size)
Chris Wilson11be49e2012-11-15 11:32:20 +0000271 return 0;
272
273 /* Release any current block */
274 i915_gem_stolen_cleanup_compression(dev);
275
Ben Widawsky5e59f712014-06-30 10:41:24 -0700276 return i915_setup_compression(dev, size, fb_cpp);
Chris Wilson11be49e2012-11-15 11:32:20 +0000277}
278
279void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
280{
281 struct drm_i915_private *dev_priv = dev->dev_private;
282
Jani Nikula60ee5cd2015-02-05 12:04:27 +0200283 if (dev_priv->fbc.uncompressed_size == 0)
Chris Wilson11be49e2012-11-15 11:32:20 +0000284 return;
285
Paulo Zanonid713fd42015-07-02 19:25:07 -0300286 i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
Chris Wilson11be49e2012-11-15 11:32:20 +0000287
David Herrmann06e78ed2013-07-27 16:21:27 +0200288 if (dev_priv->fbc.compressed_llb) {
Paulo Zanonid713fd42015-07-02 19:25:07 -0300289 i915_gem_stolen_remove_node(dev_priv,
290 dev_priv->fbc.compressed_llb);
David Herrmann06e78ed2013-07-27 16:21:27 +0200291 kfree(dev_priv->fbc.compressed_llb);
292 }
Chris Wilson11be49e2012-11-15 11:32:20 +0000293
Jani Nikula60ee5cd2015-02-05 12:04:27 +0200294 dev_priv->fbc.uncompressed_size = 0;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100295}
296
297void i915_gem_cleanup_stolen(struct drm_device *dev)
298{
Daniel Vetter4d7bb012012-12-18 15:24:37 +0100299 struct drm_i915_private *dev_priv = dev->dev_private;
300
Daniel Vetter446f8d82013-07-02 10:48:31 +0200301 if (!drm_mm_initialized(&dev_priv->mm.stolen))
302 return;
303
Chris Wilson11be49e2012-11-15 11:32:20 +0000304 i915_gem_stolen_cleanup_compression(dev);
Daniel Vetter4d7bb012012-12-18 15:24:37 +0100305 drm_mm_takedown(&dev_priv->mm.stolen);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100306}
307
308int i915_gem_init_stolen(struct drm_device *dev)
309{
310 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter40bae732014-09-11 13:28:08 +0200311 u32 tmp;
Jesse Barnesc9cddff2013-05-08 10:45:13 -0700312 int bios_reserved = 0;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100313
Chris Wilson0f4706d2014-03-18 14:50:50 +0200314#ifdef CONFIG_INTEL_IOMMU
Daniel Vetterfcc9fe12014-03-26 23:42:53 +0100315 if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
Chris Wilson0f4706d2014-03-18 14:50:50 +0200316 DRM_INFO("DMAR active, disabling use of stolen memory\n");
317 return 0;
318 }
319#endif
320
Chris Wilson6644a4e2013-09-05 13:40:25 +0100321 if (dev_priv->gtt.stolen_size == 0)
322 return 0;
323
Chris Wilsone12a2d52012-11-15 11:32:18 +0000324 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
325 if (dev_priv->mm.stolen_base == 0)
326 return 0;
327
Ben Widawskya54c0c22013-01-24 14:45:00 -0800328 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
329 dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
Chris Wilsone12a2d52012-11-15 11:32:18 +0000330
Daniel Vetter40bae732014-09-11 13:28:08 +0200331 if (INTEL_INFO(dev)->gen >= 8) {
332 tmp = I915_READ(GEN7_BIOS_RESERVED);
333 tmp >>= GEN8_BIOS_RESERVED_SHIFT;
334 tmp &= GEN8_BIOS_RESERVED_MASK;
335 bios_reserved = (1024*1024) << tmp;
336 } else if (IS_GEN7(dev)) {
337 tmp = I915_READ(GEN7_BIOS_RESERVED);
338 bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
339 256*1024 : 1024*1024;
340 }
Jesse Barnesc9cddff2013-05-08 10:45:13 -0700341
Daniel Vetter897f9ed2013-07-09 14:44:27 +0200342 if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
343 return 0;
344
Chris Wilson9797fbf2012-04-24 15:47:39 +0100345 /* Basic memrange allocator for stolen space */
Jesse Barnesc9cddff2013-05-08 10:45:13 -0700346 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
347 bios_reserved);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100348
349 return 0;
350}
Chris Wilson0104fdb2012-11-15 11:32:26 +0000351
352static struct sg_table *
353i915_pages_create_for_stolen(struct drm_device *dev,
354 u32 offset, u32 size)
355{
356 struct drm_i915_private *dev_priv = dev->dev_private;
357 struct sg_table *st;
358 struct scatterlist *sg;
359
360 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
Ben Widawskya54c0c22013-01-24 14:45:00 -0800361 BUG_ON(offset > dev_priv->gtt.stolen_size - size);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000362
363 /* We hide that we have no struct page backing our stolen object
364 * by wrapping the contiguous physical allocation with a fake
365 * dma mapping in a single scatterlist.
366 */
367
368 st = kmalloc(sizeof(*st), GFP_KERNEL);
369 if (st == NULL)
370 return NULL;
371
372 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
373 kfree(st);
374 return NULL;
375 }
376
377 sg = st->sgl;
Akash Goelec14ba42014-01-13 16:24:45 +0530378 sg->offset = 0;
Imre Deaked23abd2013-03-26 15:14:19 +0200379 sg->length = size;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000380
381 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
382 sg_dma_len(sg) = size;
383
384 return st;
385}
386
387static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
388{
389 BUG();
390 return -EINVAL;
391}
392
393static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
394{
395 /* Should only be called during free */
396 sg_free_table(obj->pages);
397 kfree(obj->pages);
398}
399
Chris Wilsonef0cf272014-06-06 10:22:54 +0100400
401static void
402i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
403{
Paulo Zanonid713fd42015-07-02 19:25:07 -0300404 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
405
Chris Wilsonef0cf272014-06-06 10:22:54 +0100406 if (obj->stolen) {
Paulo Zanonid713fd42015-07-02 19:25:07 -0300407 i915_gem_stolen_remove_node(dev_priv, obj->stolen);
Chris Wilsonef0cf272014-06-06 10:22:54 +0100408 kfree(obj->stolen);
409 obj->stolen = NULL;
410 }
411}
Chris Wilson0104fdb2012-11-15 11:32:26 +0000412static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
413 .get_pages = i915_gem_object_get_pages_stolen,
414 .put_pages = i915_gem_object_put_pages_stolen,
Chris Wilsonef0cf272014-06-06 10:22:54 +0100415 .release = i915_gem_object_release_stolen,
Chris Wilson0104fdb2012-11-15 11:32:26 +0000416};
417
418static struct drm_i915_gem_object *
419_i915_gem_object_create_stolen(struct drm_device *dev,
420 struct drm_mm_node *stolen)
421{
422 struct drm_i915_gem_object *obj;
423
Chris Wilson42dcedd2012-11-15 11:32:30 +0000424 obj = i915_gem_object_alloc(dev);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000425 if (obj == NULL)
426 return NULL;
427
David Herrmann89c82332013-07-11 11:56:32 +0200428 drm_gem_private_object_init(dev, &obj->base, stolen->size);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000429 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
430
431 obj->pages = i915_pages_create_for_stolen(dev,
432 stolen->start, stolen->size);
433 if (obj->pages == NULL)
434 goto cleanup;
435
436 obj->has_dma_mapping = true;
Ben Widawskydd53e1b2013-05-31 14:46:19 -0700437 i915_gem_object_pin_pages(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000438 obj->stolen = stolen;
439
Chris Wilsond46f1c32013-08-08 14:41:06 +0100440 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
441 obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000442
443 return obj;
444
445cleanup:
Chris Wilson42dcedd2012-11-15 11:32:30 +0000446 i915_gem_object_free(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000447 return NULL;
448}
449
450struct drm_i915_gem_object *
451i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
452{
453 struct drm_i915_private *dev_priv = dev->dev_private;
454 struct drm_i915_gem_object *obj;
455 struct drm_mm_node *stolen;
David Herrmann06e78ed2013-07-27 16:21:27 +0200456 int ret;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000457
Daniel Vetter446f8d82013-07-02 10:48:31 +0200458 if (!drm_mm_initialized(&dev_priv->mm.stolen))
Chris Wilson0104fdb2012-11-15 11:32:26 +0000459 return NULL;
460
461 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
462 if (size == 0)
463 return NULL;
464
David Herrmann06e78ed2013-07-27 16:21:27 +0200465 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
466 if (!stolen)
Chris Wilson0104fdb2012-11-15 11:32:26 +0000467 return NULL;
468
Paulo Zanonid713fd42015-07-02 19:25:07 -0300469 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
David Herrmann06e78ed2013-07-27 16:21:27 +0200470 if (ret) {
471 kfree(stolen);
472 return NULL;
473 }
474
Chris Wilson0104fdb2012-11-15 11:32:26 +0000475 obj = _i915_gem_object_create_stolen(dev, stolen);
476 if (obj)
477 return obj;
478
Paulo Zanonid713fd42015-07-02 19:25:07 -0300479 i915_gem_stolen_remove_node(dev_priv, stolen);
David Herrmann06e78ed2013-07-27 16:21:27 +0200480 kfree(stolen);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000481 return NULL;
482}
483
Chris Wilson866d12b2013-02-19 13:31:37 -0800484struct drm_i915_gem_object *
485i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
486 u32 stolen_offset,
487 u32 gtt_offset,
488 u32 size)
489{
490 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky40d749802013-07-31 16:59:59 -0700491 struct i915_address_space *ggtt = &dev_priv->gtt.base;
Chris Wilson866d12b2013-02-19 13:31:37 -0800492 struct drm_i915_gem_object *obj;
493 struct drm_mm_node *stolen;
Ben Widawsky2f633152013-07-17 12:19:03 -0700494 struct i915_vma *vma;
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700495 int ret;
Chris Wilson866d12b2013-02-19 13:31:37 -0800496
Daniel Vetter446f8d82013-07-02 10:48:31 +0200497 if (!drm_mm_initialized(&dev_priv->mm.stolen))
Chris Wilson866d12b2013-02-19 13:31:37 -0800498 return NULL;
499
500 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
501 stolen_offset, gtt_offset, size);
502
503 /* KISS and expect everything to be page-aligned */
Daniel Vetterf37b5c22015-02-10 23:12:27 +0100504 if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
505 WARN_ON(stolen_offset & 4095))
Chris Wilson866d12b2013-02-19 13:31:37 -0800506 return NULL;
507
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700508 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
509 if (!stolen)
510 return NULL;
511
Ben Widawsky338710e2013-07-05 14:41:03 -0700512 stolen->start = stolen_offset;
513 stolen->size = size;
514 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700515 if (ret) {
Chris Wilson866d12b2013-02-19 13:31:37 -0800516 DRM_DEBUG_KMS("failed to allocate stolen space\n");
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700517 kfree(stolen);
Chris Wilson866d12b2013-02-19 13:31:37 -0800518 return NULL;
519 }
520
521 obj = _i915_gem_object_create_stolen(dev, stolen);
522 if (obj == NULL) {
523 DRM_DEBUG_KMS("failed to allocate stolen object\n");
Paulo Zanonid713fd42015-07-02 19:25:07 -0300524 i915_gem_stolen_remove_node(dev_priv, stolen);
David Herrmann06e78ed2013-07-27 16:21:27 +0200525 kfree(stolen);
Chris Wilson866d12b2013-02-19 13:31:37 -0800526 return NULL;
527 }
528
Jesse Barnes3727d552013-05-08 10:45:14 -0700529 /* Some objects just need physical mem from stolen space */
Daniel Vetter190d6cd2013-07-04 13:06:28 +0200530 if (gtt_offset == I915_GTT_OFFSET_NONE)
Jesse Barnes3727d552013-05-08 10:45:14 -0700531 return obj;
532
Daniel Vettere656a6c2013-08-14 14:14:04 +0200533 vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
Dan Carpenterdb473b32013-07-19 08:45:46 +0300534 if (IS_ERR(vma)) {
535 ret = PTR_ERR(vma);
Ben Widawsky2f633152013-07-17 12:19:03 -0700536 goto err_out;
537 }
538
Chris Wilson866d12b2013-02-19 13:31:37 -0800539 /* To simplify the initialisation sequence between KMS and GTT,
540 * we allow construction of the stolen object prior to
541 * setting up the GTT space. The actual reservation will occur
542 * later.
543 */
Ben Widawsky2f633152013-07-17 12:19:03 -0700544 vma->node.start = gtt_offset;
545 vma->node.size = size;
Ben Widawsky40d749802013-07-31 16:59:59 -0700546 if (drm_mm_initialized(&ggtt->mm)) {
547 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700548 if (ret) {
Chris Wilson866d12b2013-02-19 13:31:37 -0800549 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
Daniel Vetter4a025e22013-08-14 10:01:32 +0200550 goto err_vma;
Chris Wilson866d12b2013-02-19 13:31:37 -0800551 }
Ben Widawskyedd41a82013-07-05 14:41:05 -0700552 }
Chris Wilson866d12b2013-02-19 13:31:37 -0800553
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100554 vma->bound |= GLOBAL_BIND;
Chris Wilson866d12b2013-02-19 13:31:37 -0800555
Ben Widawsky35c20a62013-05-31 11:28:48 -0700556 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -0700557 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
Daniel Vetterd8ccba82013-12-17 23:42:11 +0100558 i915_gem_object_pin_pages(obj);
Chris Wilson866d12b2013-02-19 13:31:37 -0800559
560 return obj;
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700561
Daniel Vetter4a025e22013-08-14 10:01:32 +0200562err_vma:
563 i915_gem_vma_destroy(vma);
Ben Widawskyf7f18182013-07-17 12:19:02 -0700564err_out:
Paulo Zanonid713fd42015-07-02 19:25:07 -0300565 i915_gem_stolen_remove_node(dev_priv, stolen);
Dave Airlie32c913e2013-08-07 18:09:03 +1000566 kfree(stolen);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700567 drm_gem_object_unreference(&obj->base);
568 return NULL;
Chris Wilson866d12b2013-02-19 13:31:37 -0800569}