blob: ea06da012d32bf48243a449a4b0276d8ea99ce77 [file] [log] [blame]
Chris Wilson9797fbf2012-04-24 15:47:39 +01001/*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/i915_drm.h>
Chris Wilson9797fbf2012-04-24 15:47:39 +010031#include "i915_drv.h"
32
Ville Syrjälä0ad98c72015-10-08 12:08:20 +030033#define KB(x) ((x) * 1024)
34#define MB(x) (KB(x) * 1024)
35
Chris Wilson9797fbf2012-04-24 15:47:39 +010036/*
37 * The BIOS typically reserves some of the system's memory for the exclusive
38 * use of the integrated graphics. This memory is no longer available for
39 * use by the OS and so the user finds that his system has less memory
40 * available than he put in. We refer to this memory as stolen.
41 *
42 * The BIOS will allocate its framebuffer from the stolen memory. Our
43 * goal is try to reuse that object for our own fbcon which must always
44 * be available for panics. Anything else we can reuse the stolen memory
45 * for is a boon.
46 */
47
Paulo Zanonia9da5122015-09-14 15:19:57 -030048int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
49 struct drm_mm_node *node, u64 size,
50 unsigned alignment, u64 start, u64 end)
Paulo Zanonid713fd42015-07-02 19:25:07 -030051{
Paulo Zanoni92e97d22015-07-02 19:25:09 -030052 int ret;
53
Paulo Zanonid713fd42015-07-02 19:25:07 -030054 if (!drm_mm_initialized(&dev_priv->mm.stolen))
55 return -ENODEV;
56
Paulo Zanoni1ca36d42015-09-23 12:52:22 -030057 /* See the comment at the drm_mm_init() call for more about this check.
58 * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
59 if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
60 start = 4096;
61
Paulo Zanoni92e97d22015-07-02 19:25:09 -030062 mutex_lock(&dev_priv->mm.stolen_lock);
Paulo Zanonia9da5122015-09-14 15:19:57 -030063 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
64 alignment, start, end,
65 DRM_MM_SEARCH_DEFAULT);
Paulo Zanoni92e97d22015-07-02 19:25:09 -030066 mutex_unlock(&dev_priv->mm.stolen_lock);
67
68 return ret;
Paulo Zanonid713fd42015-07-02 19:25:07 -030069}
70
Paulo Zanonia9da5122015-09-14 15:19:57 -030071int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
72 struct drm_mm_node *node, u64 size,
73 unsigned alignment)
74{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030075 struct i915_ggtt *ggtt = &dev_priv->ggtt;
76
Paulo Zanonia9da5122015-09-14 15:19:57 -030077 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030078 alignment, 0,
79 ggtt->stolen_usable_size);
Paulo Zanonia9da5122015-09-14 15:19:57 -030080}
81
Paulo Zanonid713fd42015-07-02 19:25:07 -030082void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
83 struct drm_mm_node *node)
84{
Paulo Zanoni92e97d22015-07-02 19:25:09 -030085 mutex_lock(&dev_priv->mm.stolen_lock);
Paulo Zanonid713fd42015-07-02 19:25:07 -030086 drm_mm_remove_node(node);
Paulo Zanoni92e97d22015-07-02 19:25:09 -030087 mutex_unlock(&dev_priv->mm.stolen_lock);
Paulo Zanonid713fd42015-07-02 19:25:07 -030088}
89
Chris Wilsone12a2d52012-11-15 11:32:18 +000090static unsigned long i915_stolen_to_physical(struct drm_device *dev)
Chris Wilson9797fbf2012-04-24 15:47:39 +010091{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030092 struct drm_i915_private *dev_priv = to_i915(dev);
93 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsoneaba1b82013-07-04 12:28:35 +010094 struct resource *r;
Chris Wilson9797fbf2012-04-24 15:47:39 +010095 u32 base;
96
Chris Wilson17fec8a2013-07-04 00:23:33 +010097 /* Almost universally we can find the Graphics Base of Stolen Memory
98 * at offset 0x5c in the igfx configuration space. On a few (desktop)
99 * machines this is also mirrored in the bridge device at different
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300100 * locations, or in the MCHBAR.
Chris Wilsone12a2d52012-11-15 11:32:18 +0000101 *
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300102 * On 865 we just check the TOUD register.
103 *
104 * On 830/845/85x the stolen memory base isn't available in any
105 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
106 *
Chris Wilson9797fbf2012-04-24 15:47:39 +0100107 */
Chris Wilsone12a2d52012-11-15 11:32:18 +0000108 base = 0;
Chris Wilson17fec8a2013-07-04 00:23:33 +0100109 if (INTEL_INFO(dev)->gen >= 3) {
110 /* Read Graphics Base of Stolen Memory directly */
Jesse Barnesc9cddff2013-05-08 10:45:13 -0700111 pci_read_config_dword(dev->pdev, 0x5c, &base);
112 base &= ~((1<<20) - 1);
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300113 } else if (IS_I865G(dev)) {
114 u16 toud = 0;
115
116 /*
117 * FIXME is the graphics stolen memory region
118 * always at TOUD? Ie. is it always the last
119 * one to be allocated by the BIOS?
120 */
121 pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
122 I865_TOUD, &toud);
123
124 base = toud << 16;
125 } else if (IS_I85X(dev)) {
126 u32 tseg_size = 0;
127 u32 tom;
128 u8 tmp;
129
130 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
131 I85X_ESMRAMC, &tmp);
132
133 if (tmp & TSEG_ENABLE)
134 tseg_size = MB(1);
135
136 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1),
137 I85X_DRB3, &tmp);
138 tom = tmp * MB(32);
139
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300140 base = tom - tseg_size - ggtt->stolen_size;
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300141 } else if (IS_845G(dev)) {
142 u32 tseg_size = 0;
143 u32 tom;
144 u8 tmp;
145
146 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
147 I845_ESMRAMC, &tmp);
148
149 if (tmp & TSEG_ENABLE) {
150 switch (tmp & I845_TSEG_SIZE_MASK) {
151 case I845_TSEG_SIZE_512K:
152 tseg_size = KB(512);
153 break;
154 case I845_TSEG_SIZE_1M:
155 tseg_size = MB(1);
156 break;
157 }
158 }
159
160 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
161 I830_DRB3, &tmp);
162 tom = tmp * MB(32);
163
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300164 base = tom - tseg_size - ggtt->stolen_size;
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300165 } else if (IS_I830(dev)) {
166 u32 tseg_size = 0;
167 u32 tom;
168 u8 tmp;
169
170 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
171 I830_ESMRAMC, &tmp);
172
173 if (tmp & TSEG_ENABLE) {
174 if (tmp & I830_TSEG_SIZE_1M)
175 tseg_size = MB(1);
176 else
177 tseg_size = KB(512);
178 }
179
180 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
181 I830_DRB3, &tmp);
182 tom = tmp * MB(32);
183
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300184 base = tom - tseg_size - ggtt->stolen_size;
Chris Wilsone12a2d52012-11-15 11:32:18 +0000185 }
Chris Wilson9797fbf2012-04-24 15:47:39 +0100186
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100187 if (base == 0)
188 return 0;
189
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300190 /* make sure we don't clobber the GTT if it's within stolen memory */
191 if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
192 struct {
193 u32 start, end;
194 } stolen[2] = {
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300195 { .start = base, .end = base + ggtt->stolen_size, },
196 { .start = base, .end = base + ggtt->stolen_size, },
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300197 };
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300198 u64 ggtt_start, ggtt_end;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300199
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300200 ggtt_start = I915_READ(PGTBL_CTL);
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300201 if (IS_GEN4(dev))
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300202 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
203 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300204 else
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300205 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
206 ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300207
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300208 if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
209 stolen[0].end = ggtt_start;
210 if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
211 stolen[1].start = ggtt_end;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300212
213 /* pick the larger of the two chunks */
214 if (stolen[0].end - stolen[0].start >
215 stolen[1].end - stolen[1].start) {
216 base = stolen[0].start;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300217 ggtt->stolen_size = stolen[0].end - stolen[0].start;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300218 } else {
219 base = stolen[1].start;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300220 ggtt->stolen_size = stolen[1].end - stolen[1].start;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300221 }
222
223 if (stolen[0].start != stolen[1].start ||
224 stolen[0].end != stolen[1].end) {
225 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300226 (unsigned long long)ggtt_start,
227 (unsigned long long)ggtt_end - 1);
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300228 DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300229 base, base + (u32)ggtt->stolen_size - 1);
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300230 }
231 }
232
233
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100234 /* Verify that nothing else uses this physical address. Stolen
235 * memory should be reserved by the BIOS and hidden from the
236 * kernel. So if the region is already marked as busy, something
237 * is seriously wrong.
238 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300239 r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100240 "Graphics Stolen Memory");
241 if (r == NULL) {
Akash Goel3617dc92014-01-13 16:25:21 +0530242 /*
243 * One more attempt but this time requesting region from
244 * base + 1, as we have seen that this resolves the region
245 * conflict with the PCI Bus.
246 * This is a BIOS w/a: Some BIOS wrap stolen in the root
247 * PCI bus, but have an off-by-one error. Hence retry the
248 * reservation starting from 1 instead of 0.
249 */
250 r = devm_request_mem_region(dev->dev, base + 1,
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300251 ggtt->stolen_size - 1,
Akash Goel3617dc92014-01-13 16:25:21 +0530252 "Graphics Stolen Memory");
Daniel Vetter0b6d24c2014-04-11 15:55:17 +0200253 /*
254 * GEN3 firmware likes to smash pci bridges into the stolen
255 * range. Apparently this works.
256 */
257 if (r == NULL && !IS_GEN3(dev)) {
Akash Goel3617dc92014-01-13 16:25:21 +0530258 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300259 base, base + (uint32_t)ggtt->stolen_size);
Akash Goel3617dc92014-01-13 16:25:21 +0530260 base = 0;
261 }
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100262 }
263
Chris Wilsone12a2d52012-11-15 11:32:18 +0000264 return base;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100265}
266
Chris Wilson9797fbf2012-04-24 15:47:39 +0100267void i915_gem_cleanup_stolen(struct drm_device *dev)
268{
Daniel Vetter4d7bb012012-12-18 15:24:37 +0100269 struct drm_i915_private *dev_priv = dev->dev_private;
270
Daniel Vetter446f8d82013-07-02 10:48:31 +0200271 if (!drm_mm_initialized(&dev_priv->mm.stolen))
272 return;
273
Daniel Vetter4d7bb012012-12-18 15:24:37 +0100274 drm_mm_takedown(&dev_priv->mm.stolen);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100275}
276
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300277static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
278 unsigned long *base, unsigned long *size)
279{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300280 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300281 uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
282 CTG_STOLEN_RESERVED :
283 ELK_STOLEN_RESERVED);
284 unsigned long stolen_top = dev_priv->mm.stolen_base +
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300285 ggtt->stolen_size;
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300286
287 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
288
289 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
290
291 /* On these platforms, the register doesn't have a size field, so the
292 * size is the distance between the base and the top of the stolen
293 * memory. We also have the genuine case where base is zero and there's
294 * nothing reserved. */
295 if (*base == 0)
296 *size = 0;
297 else
298 *size = stolen_top - *base;
299}
300
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300301static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
302 unsigned long *base, unsigned long *size)
303{
304 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
305
306 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
307
308 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
309 case GEN6_STOLEN_RESERVED_1M:
310 *size = 1024 * 1024;
311 break;
312 case GEN6_STOLEN_RESERVED_512K:
313 *size = 512 * 1024;
314 break;
315 case GEN6_STOLEN_RESERVED_256K:
316 *size = 256 * 1024;
317 break;
318 case GEN6_STOLEN_RESERVED_128K:
319 *size = 128 * 1024;
320 break;
321 default:
322 *size = 1024 * 1024;
323 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
324 }
325}
326
327static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
328 unsigned long *base, unsigned long *size)
329{
330 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
331
332 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
333
334 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
335 case GEN7_STOLEN_RESERVED_1M:
336 *size = 1024 * 1024;
337 break;
338 case GEN7_STOLEN_RESERVED_256K:
339 *size = 256 * 1024;
340 break;
341 default:
342 *size = 1024 * 1024;
343 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
344 }
345}
346
347static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
348 unsigned long *base, unsigned long *size)
349{
350 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
351
352 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
353
354 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
355 case GEN8_STOLEN_RESERVED_1M:
356 *size = 1024 * 1024;
357 break;
358 case GEN8_STOLEN_RESERVED_2M:
359 *size = 2 * 1024 * 1024;
360 break;
361 case GEN8_STOLEN_RESERVED_4M:
362 *size = 4 * 1024 * 1024;
363 break;
364 case GEN8_STOLEN_RESERVED_8M:
365 *size = 8 * 1024 * 1024;
366 break;
367 default:
368 *size = 8 * 1024 * 1024;
369 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
370 }
371}
372
373static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
374 unsigned long *base, unsigned long *size)
375{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300376 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300377 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
378 unsigned long stolen_top;
379
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300380 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300381
382 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
383
384 /* On these platforms, the register doesn't have a size field, so the
385 * size is the distance between the base and the top of the stolen
386 * memory. We also have the genuine case where base is zero and there's
387 * nothing reserved. */
388 if (*base == 0)
389 *size = 0;
390 else
391 *size = stolen_top - *base;
392}
393
Chris Wilson9797fbf2012-04-24 15:47:39 +0100394int i915_gem_init_stolen(struct drm_device *dev)
395{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300396 struct drm_i915_private *dev_priv = to_i915(dev);
397 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ville Syrjäläd7884d62015-09-11 21:14:29 +0300398 unsigned long reserved_total, reserved_base = 0, reserved_size;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300399 unsigned long stolen_top;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100400
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300401 mutex_init(&dev_priv->mm.stolen_lock);
402
Chris Wilson0f4706d2014-03-18 14:50:50 +0200403#ifdef CONFIG_INTEL_IOMMU
Daniel Vetterfcc9fe12014-03-26 23:42:53 +0100404 if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
Chris Wilson0f4706d2014-03-18 14:50:50 +0200405 DRM_INFO("DMAR active, disabling use of stolen memory\n");
406 return 0;
407 }
408#endif
409
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300410 if (ggtt->stolen_size == 0)
Chris Wilson6644a4e2013-09-05 13:40:25 +0100411 return 0;
412
Chris Wilsone12a2d52012-11-15 11:32:18 +0000413 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
414 if (dev_priv->mm.stolen_base == 0)
415 return 0;
416
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300417 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
Chris Wilsone12a2d52012-11-15 11:32:18 +0000418
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300419 switch (INTEL_INFO(dev_priv)->gen) {
420 case 2:
421 case 3:
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300422 break;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300423 case 4:
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300424 if (IS_G4X(dev))
425 g4x_get_stolen_reserved(dev_priv, &reserved_base,
426 &reserved_size);
427 break;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300428 case 5:
429 /* Assume the gen6 maximum for the older platforms. */
430 reserved_size = 1024 * 1024;
431 reserved_base = stolen_top - reserved_size;
432 break;
433 case 6:
434 gen6_get_stolen_reserved(dev_priv, &reserved_base,
435 &reserved_size);
436 break;
437 case 7:
438 gen7_get_stolen_reserved(dev_priv, &reserved_base,
439 &reserved_size);
440 break;
441 default:
Rodrigo Vivief11bdb2015-10-28 04:16:45 -0700442 if (IS_BROADWELL(dev_priv) ||
443 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300444 bdw_get_stolen_reserved(dev_priv, &reserved_base,
445 &reserved_size);
446 else
447 gen8_get_stolen_reserved(dev_priv, &reserved_base,
448 &reserved_size);
449 break;
Daniel Vetter40bae732014-09-11 13:28:08 +0200450 }
Jesse Barnesc9cddff2013-05-08 10:45:13 -0700451
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300452 /* It is possible for the reserved base to be zero, but the register
453 * field for size doesn't have a zero option. */
454 if (reserved_base == 0) {
455 reserved_size = 0;
456 reserved_base = stolen_top;
457 }
458
459 if (reserved_base < dev_priv->mm.stolen_base ||
460 reserved_base + reserved_size > stolen_top) {
461 DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
462 reserved_base, reserved_base + reserved_size,
463 dev_priv->mm.stolen_base, stolen_top);
Daniel Vetter897f9ed2013-07-09 14:44:27 +0200464 return 0;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300465 }
466
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300467 ggtt->stolen_reserved_base = reserved_base;
468 ggtt->stolen_reserved_size = reserved_size;
Sagar Arun Kamble274008e2016-02-06 00:13:29 +0530469
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300470 /* It is possible for the reserved area to end before the end of stolen
471 * memory, so just consider the start. */
472 reserved_total = stolen_top - reserved_base;
473
Thierry Reding8e9d5972015-08-14 12:35:23 +0200474 DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300475 ggtt->stolen_size >> 10,
476 (ggtt->stolen_size - reserved_total) >> 10);
Daniel Vetter897f9ed2013-07-09 14:44:27 +0200477
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300478 ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
Paulo Zanonia9da5122015-09-14 15:19:57 -0300479
Paulo Zanoni1ca36d42015-09-23 12:52:22 -0300480 /*
481 * Basic memrange allocator for stolen space.
482 *
483 * TODO: Notice that some platforms require us to not use the first page
484 * of the stolen memory but their BIOSes may still put the framebuffer
485 * on the first page. So we don't reserve this page for now because of
486 * that. Our current solution is to just prevent new nodes from being
487 * inserted on the first page - see the check we have at
488 * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
489 * problem later.
490 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300491 drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100492
493 return 0;
494}
Chris Wilson0104fdb2012-11-15 11:32:26 +0000495
496static struct sg_table *
497i915_pages_create_for_stolen(struct drm_device *dev,
498 u32 offset, u32 size)
499{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300500 struct drm_i915_private *dev_priv = to_i915(dev);
501 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000502 struct sg_table *st;
503 struct scatterlist *sg;
504
505 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300506 BUG_ON(offset > ggtt->stolen_size - size);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000507
508 /* We hide that we have no struct page backing our stolen object
509 * by wrapping the contiguous physical allocation with a fake
510 * dma mapping in a single scatterlist.
511 */
512
513 st = kmalloc(sizeof(*st), GFP_KERNEL);
514 if (st == NULL)
515 return NULL;
516
517 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
518 kfree(st);
519 return NULL;
520 }
521
522 sg = st->sgl;
Akash Goelec14ba42014-01-13 16:24:45 +0530523 sg->offset = 0;
Imre Deaked23abd2013-03-26 15:14:19 +0200524 sg->length = size;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000525
526 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
527 sg_dma_len(sg) = size;
528
529 return st;
530}
531
532static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
533{
534 BUG();
535 return -EINVAL;
536}
537
538static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
539{
540 /* Should only be called during free */
541 sg_free_table(obj->pages);
542 kfree(obj->pages);
543}
544
Chris Wilsonef0cf272014-06-06 10:22:54 +0100545
546static void
547i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
548{
Paulo Zanonid713fd42015-07-02 19:25:07 -0300549 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
550
Chris Wilsonef0cf272014-06-06 10:22:54 +0100551 if (obj->stolen) {
Paulo Zanonid713fd42015-07-02 19:25:07 -0300552 i915_gem_stolen_remove_node(dev_priv, obj->stolen);
Chris Wilsonef0cf272014-06-06 10:22:54 +0100553 kfree(obj->stolen);
554 obj->stolen = NULL;
555 }
556}
Chris Wilson0104fdb2012-11-15 11:32:26 +0000557static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
558 .get_pages = i915_gem_object_get_pages_stolen,
559 .put_pages = i915_gem_object_put_pages_stolen,
Chris Wilsonef0cf272014-06-06 10:22:54 +0100560 .release = i915_gem_object_release_stolen,
Chris Wilson0104fdb2012-11-15 11:32:26 +0000561};
562
563static struct drm_i915_gem_object *
564_i915_gem_object_create_stolen(struct drm_device *dev,
565 struct drm_mm_node *stolen)
566{
567 struct drm_i915_gem_object *obj;
568
Chris Wilson42dcedd2012-11-15 11:32:30 +0000569 obj = i915_gem_object_alloc(dev);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000570 if (obj == NULL)
571 return NULL;
572
David Herrmann89c82332013-07-11 11:56:32 +0200573 drm_gem_private_object_init(dev, &obj->base, stolen->size);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000574 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
575
576 obj->pages = i915_pages_create_for_stolen(dev,
577 stolen->start, stolen->size);
578 if (obj->pages == NULL)
579 goto cleanup;
580
Ankitprasad Sharmac5236472015-12-22 11:50:44 +0530581 obj->get_page.sg = obj->pages->sgl;
582 obj->get_page.last = 0;
583
Ben Widawskydd53e1b2013-05-31 14:46:19 -0700584 i915_gem_object_pin_pages(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000585 obj->stolen = stolen;
586
Chris Wilsond46f1c32013-08-08 14:41:06 +0100587 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
588 obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000589
590 return obj;
591
592cleanup:
Chris Wilson42dcedd2012-11-15 11:32:30 +0000593 i915_gem_object_free(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000594 return NULL;
595}
596
597struct drm_i915_gem_object *
598i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
599{
600 struct drm_i915_private *dev_priv = dev->dev_private;
601 struct drm_i915_gem_object *obj;
602 struct drm_mm_node *stolen;
David Herrmann06e78ed2013-07-27 16:21:27 +0200603 int ret;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000604
Daniel Vetter446f8d82013-07-02 10:48:31 +0200605 if (!drm_mm_initialized(&dev_priv->mm.stolen))
Chris Wilson0104fdb2012-11-15 11:32:26 +0000606 return NULL;
607
608 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
609 if (size == 0)
610 return NULL;
611
David Herrmann06e78ed2013-07-27 16:21:27 +0200612 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
613 if (!stolen)
Chris Wilson0104fdb2012-11-15 11:32:26 +0000614 return NULL;
615
Paulo Zanonid713fd42015-07-02 19:25:07 -0300616 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
David Herrmann06e78ed2013-07-27 16:21:27 +0200617 if (ret) {
618 kfree(stolen);
619 return NULL;
620 }
621
Chris Wilson0104fdb2012-11-15 11:32:26 +0000622 obj = _i915_gem_object_create_stolen(dev, stolen);
623 if (obj)
624 return obj;
625
Paulo Zanonid713fd42015-07-02 19:25:07 -0300626 i915_gem_stolen_remove_node(dev_priv, stolen);
David Herrmann06e78ed2013-07-27 16:21:27 +0200627 kfree(stolen);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000628 return NULL;
629}
630
Chris Wilson866d12b2013-02-19 13:31:37 -0800631struct drm_i915_gem_object *
632i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
633 u32 stolen_offset,
634 u32 gtt_offset,
635 u32 size)
636{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300637 struct drm_i915_private *dev_priv = to_i915(dev);
638 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson866d12b2013-02-19 13:31:37 -0800639 struct drm_i915_gem_object *obj;
640 struct drm_mm_node *stolen;
Ben Widawsky2f633152013-07-17 12:19:03 -0700641 struct i915_vma *vma;
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700642 int ret;
Chris Wilson866d12b2013-02-19 13:31:37 -0800643
Daniel Vetter446f8d82013-07-02 10:48:31 +0200644 if (!drm_mm_initialized(&dev_priv->mm.stolen))
Chris Wilson866d12b2013-02-19 13:31:37 -0800645 return NULL;
646
Tvrtko Ursulin12c83d92016-02-11 10:27:29 +0000647 lockdep_assert_held(&dev->struct_mutex);
648
Chris Wilson866d12b2013-02-19 13:31:37 -0800649 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
650 stolen_offset, gtt_offset, size);
651
652 /* KISS and expect everything to be page-aligned */
Daniel Vetterf37b5c22015-02-10 23:12:27 +0100653 if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
654 WARN_ON(stolen_offset & 4095))
Chris Wilson866d12b2013-02-19 13:31:37 -0800655 return NULL;
656
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700657 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
658 if (!stolen)
659 return NULL;
660
Ben Widawsky338710e2013-07-05 14:41:03 -0700661 stolen->start = stolen_offset;
662 stolen->size = size;
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300663 mutex_lock(&dev_priv->mm.stolen_lock);
Ben Widawsky338710e2013-07-05 14:41:03 -0700664 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300665 mutex_unlock(&dev_priv->mm.stolen_lock);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700666 if (ret) {
Chris Wilson866d12b2013-02-19 13:31:37 -0800667 DRM_DEBUG_KMS("failed to allocate stolen space\n");
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700668 kfree(stolen);
Chris Wilson866d12b2013-02-19 13:31:37 -0800669 return NULL;
670 }
671
672 obj = _i915_gem_object_create_stolen(dev, stolen);
673 if (obj == NULL) {
674 DRM_DEBUG_KMS("failed to allocate stolen object\n");
Paulo Zanonid713fd42015-07-02 19:25:07 -0300675 i915_gem_stolen_remove_node(dev_priv, stolen);
David Herrmann06e78ed2013-07-27 16:21:27 +0200676 kfree(stolen);
Chris Wilson866d12b2013-02-19 13:31:37 -0800677 return NULL;
678 }
679
Jesse Barnes3727d552013-05-08 10:45:14 -0700680 /* Some objects just need physical mem from stolen space */
Daniel Vetter190d6cd2013-07-04 13:06:28 +0200681 if (gtt_offset == I915_GTT_OFFSET_NONE)
Jesse Barnes3727d552013-05-08 10:45:14 -0700682 return obj;
683
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300684 vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
Dan Carpenterdb473b32013-07-19 08:45:46 +0300685 if (IS_ERR(vma)) {
686 ret = PTR_ERR(vma);
Chris Wilson7c4a7d62015-09-24 11:57:45 +0100687 goto err;
Ben Widawsky2f633152013-07-17 12:19:03 -0700688 }
689
Chris Wilson866d12b2013-02-19 13:31:37 -0800690 /* To simplify the initialisation sequence between KMS and GTT,
691 * we allow construction of the stolen object prior to
692 * setting up the GTT space. The actual reservation will occur
693 * later.
694 */
Ben Widawsky2f633152013-07-17 12:19:03 -0700695 vma->node.start = gtt_offset;
696 vma->node.size = size;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300697 if (drm_mm_initialized(&ggtt->base.mm)) {
698 ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700699 if (ret) {
Chris Wilson866d12b2013-02-19 13:31:37 -0800700 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
Chris Wilson7c4a7d62015-09-24 11:57:45 +0100701 goto err;
Chris Wilson866d12b2013-02-19 13:31:37 -0800702 }
Chris Wilson7c4a7d62015-09-24 11:57:45 +0100703
704 vma->bound |= GLOBAL_BIND;
Chris Wilsond0710ab2015-11-20 14:16:39 +0000705 __i915_vma_set_map_and_fenceable(vma);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300706 list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
Ben Widawskyedd41a82013-07-05 14:41:05 -0700707 }
Chris Wilson866d12b2013-02-19 13:31:37 -0800708
Ben Widawsky35c20a62013-05-31 11:28:48 -0700709 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
Daniel Vetterd8ccba82013-12-17 23:42:11 +0100710 i915_gem_object_pin_pages(obj);
Chris Wilson866d12b2013-02-19 13:31:37 -0800711
712 return obj;
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700713
Chris Wilson7c4a7d62015-09-24 11:57:45 +0100714err:
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700715 drm_gem_object_unreference(&obj->base);
716 return NULL;
Chris Wilson866d12b2013-02-19 13:31:37 -0800717}