blob: c1551988dc6cb4109882d65ac7f8d0a25d59f7ed [file] [log] [blame]
Chris Wilson9797fbf2012-04-24 15:47:39 +01001/*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/i915_drm.h>
Chris Wilson9797fbf2012-04-24 15:47:39 +010031#include "i915_drv.h"
32
Ville Syrjälä0ad98c72015-10-08 12:08:20 +030033#define KB(x) ((x) * 1024)
34#define MB(x) (KB(x) * 1024)
35
Chris Wilson9797fbf2012-04-24 15:47:39 +010036/*
37 * The BIOS typically reserves some of the system's memory for the exclusive
38 * use of the integrated graphics. This memory is no longer available for
39 * use by the OS and so the user finds that his system has less memory
40 * available than he put in. We refer to this memory as stolen.
41 *
42 * The BIOS will allocate its framebuffer from the stolen memory. Our
43 * goal is try to reuse that object for our own fbcon which must always
44 * be available for panics. Anything else we can reuse the stolen memory
45 * for is a boon.
46 */
47
Paulo Zanonia9da5122015-09-14 15:19:57 -030048int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
49 struct drm_mm_node *node, u64 size,
50 unsigned alignment, u64 start, u64 end)
Paulo Zanonid713fd42015-07-02 19:25:07 -030051{
Paulo Zanoni92e97d22015-07-02 19:25:09 -030052 int ret;
53
Paulo Zanonid713fd42015-07-02 19:25:07 -030054 if (!drm_mm_initialized(&dev_priv->mm.stolen))
55 return -ENODEV;
56
Paulo Zanoni1ca36d42015-09-23 12:52:22 -030057 /* See the comment at the drm_mm_init() call for more about this check.
Mika Kuoppala6e4f10c2016-06-07 17:18:56 +030058 * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
59 */
60 if (start < 4096 && (IS_GEN8(dev_priv) ||
61 IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
Paulo Zanoni1ca36d42015-09-23 12:52:22 -030062 start = 4096;
63
Paulo Zanoni92e97d22015-07-02 19:25:09 -030064 mutex_lock(&dev_priv->mm.stolen_lock);
Paulo Zanonia9da5122015-09-14 15:19:57 -030065 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
66 alignment, start, end,
67 DRM_MM_SEARCH_DEFAULT);
Paulo Zanoni92e97d22015-07-02 19:25:09 -030068 mutex_unlock(&dev_priv->mm.stolen_lock);
69
70 return ret;
Paulo Zanonid713fd42015-07-02 19:25:07 -030071}
72
Paulo Zanonia9da5122015-09-14 15:19:57 -030073int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
74 struct drm_mm_node *node, u64 size,
75 unsigned alignment)
76{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030077 struct i915_ggtt *ggtt = &dev_priv->ggtt;
78
Paulo Zanonia9da5122015-09-14 15:19:57 -030079 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030080 alignment, 0,
81 ggtt->stolen_usable_size);
Paulo Zanonia9da5122015-09-14 15:19:57 -030082}
83
Paulo Zanonid713fd42015-07-02 19:25:07 -030084void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
85 struct drm_mm_node *node)
86{
Paulo Zanoni92e97d22015-07-02 19:25:09 -030087 mutex_lock(&dev_priv->mm.stolen_lock);
Paulo Zanonid713fd42015-07-02 19:25:07 -030088 drm_mm_remove_node(node);
Paulo Zanoni92e97d22015-07-02 19:25:09 -030089 mutex_unlock(&dev_priv->mm.stolen_lock);
Paulo Zanonid713fd42015-07-02 19:25:07 -030090}
91
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +000092static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
Chris Wilson9797fbf2012-04-24 15:47:39 +010093{
David Weinehall52a05c32016-08-22 13:32:44 +030094 struct pci_dev *pdev = dev_priv->drm.pdev;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030095 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsoneaba1b82013-07-04 12:28:35 +010096 struct resource *r;
Chris Wilson9797fbf2012-04-24 15:47:39 +010097 u32 base;
98
Chris Wilson17fec8a2013-07-04 00:23:33 +010099 /* Almost universally we can find the Graphics Base of Stolen Memory
Joonas Lahtinene10fa552016-04-15 12:03:39 +0300100 * at register BSM (0x5c) in the igfx configuration space. On a few
101 * (desktop) machines this is also mirrored in the bridge device at
102 * different locations, or in the MCHBAR.
Chris Wilsone12a2d52012-11-15 11:32:18 +0000103 *
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300104 * On 865 we just check the TOUD register.
105 *
106 * On 830/845/85x the stolen memory base isn't available in any
107 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
108 *
Chris Wilson9797fbf2012-04-24 15:47:39 +0100109 */
Chris Wilsone12a2d52012-11-15 11:32:18 +0000110 base = 0;
Ville Syrjäläa9097be2016-10-31 22:37:20 +0200111 if (INTEL_GEN(dev_priv) >= 3) {
Joonas Lahtinene10fa552016-04-15 12:03:39 +0300112 u32 bsm;
113
David Weinehall52a05c32016-08-22 13:32:44 +0300114 pci_read_config_dword(pdev, INTEL_BSM, &bsm);
Joonas Lahtinene10fa552016-04-15 12:03:39 +0300115
Joonas Lahtinenc0dd3462016-04-22 13:29:26 +0300116 base = bsm & INTEL_BSM_MASK;
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100117 } else if (IS_I865G(dev_priv)) {
Ville Syrjäläd721b022016-08-08 13:58:39 +0300118 u32 tseg_size = 0;
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300119 u16 toud = 0;
Ville Syrjäläd721b022016-08-08 13:58:39 +0300120 u8 tmp;
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300121
David Weinehall52a05c32016-08-22 13:32:44 +0300122 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjäläd721b022016-08-08 13:58:39 +0300123 I845_ESMRAMC, &tmp);
124
125 if (tmp & TSEG_ENABLE) {
126 switch (tmp & I845_TSEG_SIZE_MASK) {
127 case I845_TSEG_SIZE_512K:
128 tseg_size = KB(512);
129 break;
130 case I845_TSEG_SIZE_1M:
131 tseg_size = MB(1);
132 break;
133 }
134 }
135
David Weinehall52a05c32016-08-22 13:32:44 +0300136 pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300137 I865_TOUD, &toud);
138
Ville Syrjäläd721b022016-08-08 13:58:39 +0300139 base = (toud << 16) + tseg_size;
Ville Syrjäläa9097be2016-10-31 22:37:20 +0200140 } else if (IS_I85X(dev_priv)) {
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300141 u32 tseg_size = 0;
142 u32 tom;
143 u8 tmp;
144
David Weinehall52a05c32016-08-22 13:32:44 +0300145 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300146 I85X_ESMRAMC, &tmp);
147
148 if (tmp & TSEG_ENABLE)
149 tseg_size = MB(1);
150
David Weinehall52a05c32016-08-22 13:32:44 +0300151 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300152 I85X_DRB3, &tmp);
153 tom = tmp * MB(32);
154
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300155 base = tom - tseg_size - ggtt->stolen_size;
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100156 } else if (IS_845G(dev_priv)) {
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300157 u32 tseg_size = 0;
158 u32 tom;
159 u8 tmp;
160
David Weinehall52a05c32016-08-22 13:32:44 +0300161 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300162 I845_ESMRAMC, &tmp);
163
164 if (tmp & TSEG_ENABLE) {
165 switch (tmp & I845_TSEG_SIZE_MASK) {
166 case I845_TSEG_SIZE_512K:
167 tseg_size = KB(512);
168 break;
169 case I845_TSEG_SIZE_1M:
170 tseg_size = MB(1);
171 break;
172 }
173 }
174
David Weinehall52a05c32016-08-22 13:32:44 +0300175 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300176 I830_DRB3, &tmp);
177 tom = tmp * MB(32);
178
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300179 base = tom - tseg_size - ggtt->stolen_size;
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100180 } else if (IS_I830(dev_priv)) {
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300181 u32 tseg_size = 0;
182 u32 tom;
183 u8 tmp;
184
David Weinehall52a05c32016-08-22 13:32:44 +0300185 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300186 I830_ESMRAMC, &tmp);
187
188 if (tmp & TSEG_ENABLE) {
189 if (tmp & I830_TSEG_SIZE_1M)
190 tseg_size = MB(1);
191 else
192 tseg_size = KB(512);
193 }
194
David Weinehall52a05c32016-08-22 13:32:44 +0300195 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300196 I830_DRB3, &tmp);
197 tom = tmp * MB(32);
198
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300199 base = tom - tseg_size - ggtt->stolen_size;
Chris Wilsone12a2d52012-11-15 11:32:18 +0000200 }
Chris Wilson9797fbf2012-04-24 15:47:39 +0100201
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100202 if (base == 0)
203 return 0;
204
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300205 /* make sure we don't clobber the GTT if it's within stolen memory */
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +0100206 if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) &&
207 !IS_G4X(dev_priv)) {
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300208 struct {
209 u32 start, end;
210 } stolen[2] = {
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300211 { .start = base, .end = base + ggtt->stolen_size, },
212 { .start = base, .end = base + ggtt->stolen_size, },
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300213 };
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300214 u64 ggtt_start, ggtt_end;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300215
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300216 ggtt_start = I915_READ(PGTBL_CTL);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +0100217 if (IS_GEN4(dev_priv))
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300218 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
219 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300220 else
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300221 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
222 ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300223
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300224 if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
225 stolen[0].end = ggtt_start;
226 if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
227 stolen[1].start = ggtt_end;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300228
229 /* pick the larger of the two chunks */
230 if (stolen[0].end - stolen[0].start >
231 stolen[1].end - stolen[1].start) {
232 base = stolen[0].start;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300233 ggtt->stolen_size = stolen[0].end - stolen[0].start;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300234 } else {
235 base = stolen[1].start;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300236 ggtt->stolen_size = stolen[1].end - stolen[1].start;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300237 }
238
239 if (stolen[0].start != stolen[1].start ||
240 stolen[0].end != stolen[1].end) {
241 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300242 (unsigned long long)ggtt_start,
243 (unsigned long long)ggtt_end - 1);
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300244 DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300245 base, base + (u32)ggtt->stolen_size - 1);
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300246 }
247 }
248
249
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100250 /* Verify that nothing else uses this physical address. Stolen
251 * memory should be reserved by the BIOS and hidden from the
252 * kernel. So if the region is already marked as busy, something
253 * is seriously wrong.
254 */
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +0000255 r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100256 "Graphics Stolen Memory");
257 if (r == NULL) {
Akash Goel3617dc92014-01-13 16:25:21 +0530258 /*
259 * One more attempt but this time requesting region from
260 * base + 1, as we have seen that this resolves the region
261 * conflict with the PCI Bus.
262 * This is a BIOS w/a: Some BIOS wrap stolen in the root
263 * PCI bus, but have an off-by-one error. Hence retry the
264 * reservation starting from 1 instead of 0.
265 */
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +0000266 r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300267 ggtt->stolen_size - 1,
Akash Goel3617dc92014-01-13 16:25:21 +0530268 "Graphics Stolen Memory");
Daniel Vetter0b6d24c2014-04-11 15:55:17 +0200269 /*
270 * GEN3 firmware likes to smash pci bridges into the stolen
271 * range. Apparently this works.
272 */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +0100273 if (r == NULL && !IS_GEN3(dev_priv)) {
Akash Goel3617dc92014-01-13 16:25:21 +0530274 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300275 base, base + (uint32_t)ggtt->stolen_size);
Akash Goel3617dc92014-01-13 16:25:21 +0530276 base = 0;
277 }
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100278 }
279
Chris Wilsone12a2d52012-11-15 11:32:18 +0000280 return base;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100281}
282
Chris Wilson9797fbf2012-04-24 15:47:39 +0100283void i915_gem_cleanup_stolen(struct drm_device *dev)
284{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100285 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter4d7bb012012-12-18 15:24:37 +0100286
Daniel Vetter446f8d82013-07-02 10:48:31 +0200287 if (!drm_mm_initialized(&dev_priv->mm.stolen))
288 return;
289
Daniel Vetter4d7bb012012-12-18 15:24:37 +0100290 drm_mm_takedown(&dev_priv->mm.stolen);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100291}
292
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300293static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
294 unsigned long *base, unsigned long *size)
295{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300296 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300297 uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
298 CTG_STOLEN_RESERVED :
299 ELK_STOLEN_RESERVED);
300 unsigned long stolen_top = dev_priv->mm.stolen_base +
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300301 ggtt->stolen_size;
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300302
303 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
304
305 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
306
307 /* On these platforms, the register doesn't have a size field, so the
308 * size is the distance between the base and the top of the stolen
309 * memory. We also have the genuine case where base is zero and there's
310 * nothing reserved. */
311 if (*base == 0)
312 *size = 0;
313 else
314 *size = stolen_top - *base;
315}
316
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300317static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
318 unsigned long *base, unsigned long *size)
319{
320 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
321
322 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
323
324 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
325 case GEN6_STOLEN_RESERVED_1M:
326 *size = 1024 * 1024;
327 break;
328 case GEN6_STOLEN_RESERVED_512K:
329 *size = 512 * 1024;
330 break;
331 case GEN6_STOLEN_RESERVED_256K:
332 *size = 256 * 1024;
333 break;
334 case GEN6_STOLEN_RESERVED_128K:
335 *size = 128 * 1024;
336 break;
337 default:
338 *size = 1024 * 1024;
339 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
340 }
341}
342
343static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
344 unsigned long *base, unsigned long *size)
345{
346 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
347
348 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
349
350 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
351 case GEN7_STOLEN_RESERVED_1M:
352 *size = 1024 * 1024;
353 break;
354 case GEN7_STOLEN_RESERVED_256K:
355 *size = 256 * 1024;
356 break;
357 default:
358 *size = 1024 * 1024;
359 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
360 }
361}
362
363static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
364 unsigned long *base, unsigned long *size)
365{
366 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
367
368 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
369
370 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
371 case GEN8_STOLEN_RESERVED_1M:
372 *size = 1024 * 1024;
373 break;
374 case GEN8_STOLEN_RESERVED_2M:
375 *size = 2 * 1024 * 1024;
376 break;
377 case GEN8_STOLEN_RESERVED_4M:
378 *size = 4 * 1024 * 1024;
379 break;
380 case GEN8_STOLEN_RESERVED_8M:
381 *size = 8 * 1024 * 1024;
382 break;
383 default:
384 *size = 8 * 1024 * 1024;
385 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
386 }
387}
388
389static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
390 unsigned long *base, unsigned long *size)
391{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300392 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300393 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
394 unsigned long stolen_top;
395
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300396 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300397
398 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
399
400 /* On these platforms, the register doesn't have a size field, so the
401 * size is the distance between the base and the top of the stolen
402 * memory. We also have the genuine case where base is zero and there's
403 * nothing reserved. */
404 if (*base == 0)
405 *size = 0;
406 else
407 *size = stolen_top - *base;
408}
409
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +0000410int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
Chris Wilson9797fbf2012-04-24 15:47:39 +0100411{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300412 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ville Syrjäläd7884d62015-09-11 21:14:29 +0300413 unsigned long reserved_total, reserved_base = 0, reserved_size;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300414 unsigned long stolen_top;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100415
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300416 mutex_init(&dev_priv->mm.stolen_lock);
417
Chris Wilson0f4706d2014-03-18 14:50:50 +0200418#ifdef CONFIG_INTEL_IOMMU
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +0000419 if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
Chris Wilson0f4706d2014-03-18 14:50:50 +0200420 DRM_INFO("DMAR active, disabling use of stolen memory\n");
421 return 0;
422 }
423#endif
424
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300425 if (ggtt->stolen_size == 0)
Chris Wilson6644a4e2013-09-05 13:40:25 +0100426 return 0;
427
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +0000428 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev_priv);
Chris Wilsone12a2d52012-11-15 11:32:18 +0000429 if (dev_priv->mm.stolen_base == 0)
430 return 0;
431
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300432 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
Chris Wilsone12a2d52012-11-15 11:32:18 +0000433
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300434 switch (INTEL_INFO(dev_priv)->gen) {
435 case 2:
436 case 3:
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300437 break;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300438 case 4:
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +0100439 if (IS_G4X(dev_priv))
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300440 g4x_get_stolen_reserved(dev_priv, &reserved_base,
441 &reserved_size);
442 break;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300443 case 5:
444 /* Assume the gen6 maximum for the older platforms. */
445 reserved_size = 1024 * 1024;
446 reserved_base = stolen_top - reserved_size;
447 break;
448 case 6:
449 gen6_get_stolen_reserved(dev_priv, &reserved_base,
450 &reserved_size);
451 break;
452 case 7:
453 gen7_get_stolen_reserved(dev_priv, &reserved_base,
454 &reserved_size);
455 break;
456 default:
Rodrigo Vivief11bdb2015-10-28 04:16:45 -0700457 if (IS_BROADWELL(dev_priv) ||
Tvrtko Ursulin08537232016-10-13 11:03:02 +0100458 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300459 bdw_get_stolen_reserved(dev_priv, &reserved_base,
460 &reserved_size);
461 else
462 gen8_get_stolen_reserved(dev_priv, &reserved_base,
463 &reserved_size);
464 break;
Daniel Vetter40bae732014-09-11 13:28:08 +0200465 }
Jesse Barnesc9cddff2013-05-08 10:45:13 -0700466
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300467 /* It is possible for the reserved base to be zero, but the register
468 * field for size doesn't have a zero option. */
469 if (reserved_base == 0) {
470 reserved_size = 0;
471 reserved_base = stolen_top;
472 }
473
474 if (reserved_base < dev_priv->mm.stolen_base ||
475 reserved_base + reserved_size > stolen_top) {
476 DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
477 reserved_base, reserved_base + reserved_size,
478 dev_priv->mm.stolen_base, stolen_top);
Daniel Vetter897f9ed2013-07-09 14:44:27 +0200479 return 0;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300480 }
481
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300482 ggtt->stolen_reserved_base = reserved_base;
483 ggtt->stolen_reserved_size = reserved_size;
Sagar Arun Kamble274008e2016-02-06 00:13:29 +0530484
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300485 /* It is possible for the reserved area to end before the end of stolen
486 * memory, so just consider the start. */
487 reserved_total = stolen_top - reserved_base;
488
Thierry Reding8e9d5972015-08-14 12:35:23 +0200489 DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300490 ggtt->stolen_size >> 10,
491 (ggtt->stolen_size - reserved_total) >> 10);
Daniel Vetter897f9ed2013-07-09 14:44:27 +0200492
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300493 ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
Paulo Zanonia9da5122015-09-14 15:19:57 -0300494
Paulo Zanoni1ca36d42015-09-23 12:52:22 -0300495 /*
496 * Basic memrange allocator for stolen space.
497 *
498 * TODO: Notice that some platforms require us to not use the first page
499 * of the stolen memory but their BIOSes may still put the framebuffer
500 * on the first page. So we don't reserve this page for now because of
501 * that. Our current solution is to just prevent new nodes from being
502 * inserted on the first page - see the check we have at
503 * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
504 * problem later.
505 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300506 drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100507
508 return 0;
509}
Chris Wilson0104fdb2012-11-15 11:32:26 +0000510
511static struct sg_table *
512i915_pages_create_for_stolen(struct drm_device *dev,
513 u32 offset, u32 size)
514{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300515 struct drm_i915_private *dev_priv = to_i915(dev);
516 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000517 struct sg_table *st;
518 struct scatterlist *sg;
519
520 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300521 BUG_ON(offset > ggtt->stolen_size - size);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000522
523 /* We hide that we have no struct page backing our stolen object
524 * by wrapping the contiguous physical allocation with a fake
525 * dma mapping in a single scatterlist.
526 */
527
528 st = kmalloc(sizeof(*st), GFP_KERNEL);
529 if (st == NULL)
530 return NULL;
531
532 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
533 kfree(st);
534 return NULL;
535 }
536
537 sg = st->sgl;
Akash Goelec14ba42014-01-13 16:24:45 +0530538 sg->offset = 0;
Imre Deaked23abd2013-03-26 15:14:19 +0200539 sg->length = size;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000540
541 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
542 sg_dma_len(sg) = size;
543
544 return st;
545}
546
Chris Wilson03ac84f2016-10-28 13:58:36 +0100547static struct sg_table *
548i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
Chris Wilson0104fdb2012-11-15 11:32:26 +0000549{
Chris Wilson03ac84f2016-10-28 13:58:36 +0100550 return i915_pages_create_for_stolen(obj->base.dev,
551 obj->stolen->start,
552 obj->stolen->size);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000553}
554
Chris Wilson03ac84f2016-10-28 13:58:36 +0100555static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
556 struct sg_table *pages)
Chris Wilson0104fdb2012-11-15 11:32:26 +0000557{
558 /* Should only be called during free */
Chris Wilson03ac84f2016-10-28 13:58:36 +0100559 sg_free_table(pages);
560 kfree(pages);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000561}
562
Chris Wilsonef0cf272014-06-06 10:22:54 +0100563static void
564i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
565{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100566 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Paulo Zanonid713fd42015-07-02 19:25:07 -0300567
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100568 __i915_gem_object_unpin_pages(obj);
569
Chris Wilsonef0cf272014-06-06 10:22:54 +0100570 if (obj->stolen) {
Paulo Zanonid713fd42015-07-02 19:25:07 -0300571 i915_gem_stolen_remove_node(dev_priv, obj->stolen);
Chris Wilsonef0cf272014-06-06 10:22:54 +0100572 kfree(obj->stolen);
573 obj->stolen = NULL;
574 }
575}
Chris Wilson0104fdb2012-11-15 11:32:26 +0000576static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
577 .get_pages = i915_gem_object_get_pages_stolen,
578 .put_pages = i915_gem_object_put_pages_stolen,
Chris Wilsonef0cf272014-06-06 10:22:54 +0100579 .release = i915_gem_object_release_stolen,
Chris Wilson0104fdb2012-11-15 11:32:26 +0000580};
581
582static struct drm_i915_gem_object *
583_i915_gem_object_create_stolen(struct drm_device *dev,
584 struct drm_mm_node *stolen)
585{
586 struct drm_i915_gem_object *obj;
587
Chris Wilson42dcedd2012-11-15 11:32:30 +0000588 obj = i915_gem_object_alloc(dev);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000589 if (obj == NULL)
590 return NULL;
591
David Herrmann89c82332013-07-11 11:56:32 +0200592 drm_gem_private_object_init(dev, &obj->base, stolen->size);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000593 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
594
Chris Wilson0104fdb2012-11-15 11:32:26 +0000595 obj->stolen = stolen;
Chris Wilsond46f1c32013-08-08 14:41:06 +0100596 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +0000597 obj->cache_level = HAS_LLC(to_i915(dev)) ?
598 I915_CACHE_LLC : I915_CACHE_NONE;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000599
Chris Wilson03ac84f2016-10-28 13:58:36 +0100600 if (i915_gem_object_pin_pages(obj))
601 goto cleanup;
602
Chris Wilson0104fdb2012-11-15 11:32:26 +0000603 return obj;
604
605cleanup:
Chris Wilson42dcedd2012-11-15 11:32:30 +0000606 i915_gem_object_free(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000607 return NULL;
608}
609
610struct drm_i915_gem_object *
611i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
612{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100613 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000614 struct drm_i915_gem_object *obj;
615 struct drm_mm_node *stolen;
David Herrmann06e78ed2013-07-27 16:21:27 +0200616 int ret;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000617
Daniel Vetter446f8d82013-07-02 10:48:31 +0200618 if (!drm_mm_initialized(&dev_priv->mm.stolen))
Chris Wilson0104fdb2012-11-15 11:32:26 +0000619 return NULL;
620
621 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
622 if (size == 0)
623 return NULL;
624
David Herrmann06e78ed2013-07-27 16:21:27 +0200625 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
626 if (!stolen)
Chris Wilson0104fdb2012-11-15 11:32:26 +0000627 return NULL;
628
Paulo Zanonid713fd42015-07-02 19:25:07 -0300629 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
David Herrmann06e78ed2013-07-27 16:21:27 +0200630 if (ret) {
631 kfree(stolen);
632 return NULL;
633 }
634
Chris Wilson0104fdb2012-11-15 11:32:26 +0000635 obj = _i915_gem_object_create_stolen(dev, stolen);
636 if (obj)
637 return obj;
638
Paulo Zanonid713fd42015-07-02 19:25:07 -0300639 i915_gem_stolen_remove_node(dev_priv, stolen);
David Herrmann06e78ed2013-07-27 16:21:27 +0200640 kfree(stolen);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000641 return NULL;
642}
643
Chris Wilson866d12b2013-02-19 13:31:37 -0800644struct drm_i915_gem_object *
645i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
646 u32 stolen_offset,
647 u32 gtt_offset,
648 u32 size)
649{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300650 struct drm_i915_private *dev_priv = to_i915(dev);
651 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson866d12b2013-02-19 13:31:37 -0800652 struct drm_i915_gem_object *obj;
653 struct drm_mm_node *stolen;
Ben Widawsky2f633152013-07-17 12:19:03 -0700654 struct i915_vma *vma;
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700655 int ret;
Chris Wilson866d12b2013-02-19 13:31:37 -0800656
Daniel Vetter446f8d82013-07-02 10:48:31 +0200657 if (!drm_mm_initialized(&dev_priv->mm.stolen))
Chris Wilson866d12b2013-02-19 13:31:37 -0800658 return NULL;
659
Tvrtko Ursulin12c83d92016-02-11 10:27:29 +0000660 lockdep_assert_held(&dev->struct_mutex);
661
Chris Wilson866d12b2013-02-19 13:31:37 -0800662 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
663 stolen_offset, gtt_offset, size);
664
665 /* KISS and expect everything to be page-aligned */
Daniel Vetterf37b5c22015-02-10 23:12:27 +0100666 if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
667 WARN_ON(stolen_offset & 4095))
Chris Wilson866d12b2013-02-19 13:31:37 -0800668 return NULL;
669
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700670 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
671 if (!stolen)
672 return NULL;
673
Ben Widawsky338710e2013-07-05 14:41:03 -0700674 stolen->start = stolen_offset;
675 stolen->size = size;
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300676 mutex_lock(&dev_priv->mm.stolen_lock);
Ben Widawsky338710e2013-07-05 14:41:03 -0700677 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300678 mutex_unlock(&dev_priv->mm.stolen_lock);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700679 if (ret) {
Chris Wilson866d12b2013-02-19 13:31:37 -0800680 DRM_DEBUG_KMS("failed to allocate stolen space\n");
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700681 kfree(stolen);
Chris Wilson866d12b2013-02-19 13:31:37 -0800682 return NULL;
683 }
684
685 obj = _i915_gem_object_create_stolen(dev, stolen);
686 if (obj == NULL) {
687 DRM_DEBUG_KMS("failed to allocate stolen object\n");
Paulo Zanonid713fd42015-07-02 19:25:07 -0300688 i915_gem_stolen_remove_node(dev_priv, stolen);
David Herrmann06e78ed2013-07-27 16:21:27 +0200689 kfree(stolen);
Chris Wilson866d12b2013-02-19 13:31:37 -0800690 return NULL;
691 }
692
Jesse Barnes3727d552013-05-08 10:45:14 -0700693 /* Some objects just need physical mem from stolen space */
Daniel Vetter190d6cd2013-07-04 13:06:28 +0200694 if (gtt_offset == I915_GTT_OFFSET_NONE)
Jesse Barnes3727d552013-05-08 10:45:14 -0700695 return obj;
696
Chris Wilson03ac84f2016-10-28 13:58:36 +0100697 ret = i915_gem_object_pin_pages(obj);
698 if (ret)
699 goto err;
700
Chris Wilson058d88c2016-08-15 10:49:06 +0100701 vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
Dan Carpenterdb473b32013-07-19 08:45:46 +0300702 if (IS_ERR(vma)) {
703 ret = PTR_ERR(vma);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100704 goto err_pages;
Ben Widawsky2f633152013-07-17 12:19:03 -0700705 }
706
Chris Wilson866d12b2013-02-19 13:31:37 -0800707 /* To simplify the initialisation sequence between KMS and GTT,
708 * we allow construction of the stolen object prior to
709 * setting up the GTT space. The actual reservation will occur
710 * later.
711 */
Ben Widawsky2f633152013-07-17 12:19:03 -0700712 vma->node.start = gtt_offset;
713 vma->node.size = size;
Chris Wilson7c4a7d62015-09-24 11:57:45 +0100714
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +0100715 ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
716 if (ret) {
717 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
Chris Wilson03ac84f2016-10-28 13:58:36 +0100718 goto err_pages;
Ben Widawskyedd41a82013-07-05 14:41:05 -0700719 }
Chris Wilson866d12b2013-02-19 13:31:37 -0800720
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100721 vma->pages = obj->mm.pages;
Chris Wilson3272db52016-08-04 16:32:32 +0100722 vma->flags |= I915_VMA_GLOBAL_BIND;
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +0100723 __i915_vma_set_map_and_fenceable(vma);
Chris Wilson50e046b2016-08-04 07:52:46 +0100724 list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200725 list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
Chris Wilson15717de2016-08-04 07:52:26 +0100726 obj->bind_count++;
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +0100727
Chris Wilson866d12b2013-02-19 13:31:37 -0800728 return obj;
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700729
Chris Wilson03ac84f2016-10-28 13:58:36 +0100730err_pages:
731 i915_gem_object_unpin_pages(obj);
Chris Wilson7c4a7d62015-09-24 11:57:45 +0100732err:
Chris Wilsonf8c417c2016-07-20 13:31:53 +0100733 i915_gem_object_put(obj);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700734 return NULL;
Chris Wilson866d12b2013-02-19 13:31:37 -0800735}