blob: f7633585ea38cf8be4305e1b49227dd8987bc6e1 [file] [log] [blame]
Chris Wilson9797fbf2012-04-24 15:47:39 +01001/*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/i915_drm.h>
Chris Wilson9797fbf2012-04-24 15:47:39 +010031#include "i915_drv.h"
32
Ville Syrjälä0ad98c72015-10-08 12:08:20 +030033#define KB(x) ((x) * 1024)
34#define MB(x) (KB(x) * 1024)
35
Chris Wilson9797fbf2012-04-24 15:47:39 +010036/*
37 * The BIOS typically reserves some of the system's memory for the exclusive
38 * use of the integrated graphics. This memory is no longer available for
39 * use by the OS and so the user finds that his system has less memory
40 * available than he put in. We refer to this memory as stolen.
41 *
42 * The BIOS will allocate its framebuffer from the stolen memory. Our
43 * goal is try to reuse that object for our own fbcon which must always
44 * be available for panics. Anything else we can reuse the stolen memory
45 * for is a boon.
46 */
47
Paulo Zanonia9da5122015-09-14 15:19:57 -030048int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
49 struct drm_mm_node *node, u64 size,
50 unsigned alignment, u64 start, u64 end)
Paulo Zanonid713fd42015-07-02 19:25:07 -030051{
Paulo Zanoni92e97d22015-07-02 19:25:09 -030052 int ret;
53
Paulo Zanonid713fd42015-07-02 19:25:07 -030054 if (!drm_mm_initialized(&dev_priv->mm.stolen))
55 return -ENODEV;
56
Paulo Zanoni1ca36d42015-09-23 12:52:22 -030057 /* See the comment at the drm_mm_init() call for more about this check.
Mika Kuoppala6e4f10c2016-06-07 17:18:56 +030058 * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
59 */
60 if (start < 4096 && (IS_GEN8(dev_priv) ||
61 IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
Paulo Zanoni1ca36d42015-09-23 12:52:22 -030062 start = 4096;
63
Paulo Zanoni92e97d22015-07-02 19:25:09 -030064 mutex_lock(&dev_priv->mm.stolen_lock);
Paulo Zanonia9da5122015-09-14 15:19:57 -030065 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
66 alignment, start, end,
67 DRM_MM_SEARCH_DEFAULT);
Paulo Zanoni92e97d22015-07-02 19:25:09 -030068 mutex_unlock(&dev_priv->mm.stolen_lock);
69
70 return ret;
Paulo Zanonid713fd42015-07-02 19:25:07 -030071}
72
Paulo Zanonia9da5122015-09-14 15:19:57 -030073int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
74 struct drm_mm_node *node, u64 size,
75 unsigned alignment)
76{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030077 struct i915_ggtt *ggtt = &dev_priv->ggtt;
78
Paulo Zanonia9da5122015-09-14 15:19:57 -030079 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030080 alignment, 0,
81 ggtt->stolen_usable_size);
Paulo Zanonia9da5122015-09-14 15:19:57 -030082}
83
Paulo Zanonid713fd42015-07-02 19:25:07 -030084void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
85 struct drm_mm_node *node)
86{
Paulo Zanoni92e97d22015-07-02 19:25:09 -030087 mutex_lock(&dev_priv->mm.stolen_lock);
Paulo Zanonid713fd42015-07-02 19:25:07 -030088 drm_mm_remove_node(node);
Paulo Zanoni92e97d22015-07-02 19:25:09 -030089 mutex_unlock(&dev_priv->mm.stolen_lock);
Paulo Zanonid713fd42015-07-02 19:25:07 -030090}
91
Chris Wilsone12a2d52012-11-15 11:32:18 +000092static unsigned long i915_stolen_to_physical(struct drm_device *dev)
Chris Wilson9797fbf2012-04-24 15:47:39 +010093{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030094 struct drm_i915_private *dev_priv = to_i915(dev);
95 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsoneaba1b82013-07-04 12:28:35 +010096 struct resource *r;
Chris Wilson9797fbf2012-04-24 15:47:39 +010097 u32 base;
98
Chris Wilson17fec8a2013-07-04 00:23:33 +010099 /* Almost universally we can find the Graphics Base of Stolen Memory
Joonas Lahtinene10fa552016-04-15 12:03:39 +0300100 * at register BSM (0x5c) in the igfx configuration space. On a few
101 * (desktop) machines this is also mirrored in the bridge device at
102 * different locations, or in the MCHBAR.
Chris Wilsone12a2d52012-11-15 11:32:18 +0000103 *
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300104 * On 865 we just check the TOUD register.
105 *
106 * On 830/845/85x the stolen memory base isn't available in any
107 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
108 *
Chris Wilson9797fbf2012-04-24 15:47:39 +0100109 */
Chris Wilsone12a2d52012-11-15 11:32:18 +0000110 base = 0;
Chris Wilson17fec8a2013-07-04 00:23:33 +0100111 if (INTEL_INFO(dev)->gen >= 3) {
Joonas Lahtinene10fa552016-04-15 12:03:39 +0300112 u32 bsm;
113
Joonas Lahtinenc0dd3462016-04-22 13:29:26 +0300114 pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
Joonas Lahtinene10fa552016-04-15 12:03:39 +0300115
Joonas Lahtinenc0dd3462016-04-22 13:29:26 +0300116 base = bsm & INTEL_BSM_MASK;
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300117 } else if (IS_I865G(dev)) {
Ville Syrjäläd721b022016-08-08 13:58:39 +0300118 u32 tseg_size = 0;
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300119 u16 toud = 0;
Ville Syrjäläd721b022016-08-08 13:58:39 +0300120 u8 tmp;
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300121
Ville Syrjäläd721b022016-08-08 13:58:39 +0300122 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
123 I845_ESMRAMC, &tmp);
124
125 if (tmp & TSEG_ENABLE) {
126 switch (tmp & I845_TSEG_SIZE_MASK) {
127 case I845_TSEG_SIZE_512K:
128 tseg_size = KB(512);
129 break;
130 case I845_TSEG_SIZE_1M:
131 tseg_size = MB(1);
132 break;
133 }
134 }
135
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300136 pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
137 I865_TOUD, &toud);
138
Ville Syrjäläd721b022016-08-08 13:58:39 +0300139 base = (toud << 16) + tseg_size;
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300140 } else if (IS_I85X(dev)) {
141 u32 tseg_size = 0;
142 u32 tom;
143 u8 tmp;
144
145 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
146 I85X_ESMRAMC, &tmp);
147
148 if (tmp & TSEG_ENABLE)
149 tseg_size = MB(1);
150
151 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1),
152 I85X_DRB3, &tmp);
153 tom = tmp * MB(32);
154
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300155 base = tom - tseg_size - ggtt->stolen_size;
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300156 } else if (IS_845G(dev)) {
157 u32 tseg_size = 0;
158 u32 tom;
159 u8 tmp;
160
161 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
162 I845_ESMRAMC, &tmp);
163
164 if (tmp & TSEG_ENABLE) {
165 switch (tmp & I845_TSEG_SIZE_MASK) {
166 case I845_TSEG_SIZE_512K:
167 tseg_size = KB(512);
168 break;
169 case I845_TSEG_SIZE_1M:
170 tseg_size = MB(1);
171 break;
172 }
173 }
174
175 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
176 I830_DRB3, &tmp);
177 tom = tmp * MB(32);
178
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300179 base = tom - tseg_size - ggtt->stolen_size;
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300180 } else if (IS_I830(dev)) {
181 u32 tseg_size = 0;
182 u32 tom;
183 u8 tmp;
184
185 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
186 I830_ESMRAMC, &tmp);
187
188 if (tmp & TSEG_ENABLE) {
189 if (tmp & I830_TSEG_SIZE_1M)
190 tseg_size = MB(1);
191 else
192 tseg_size = KB(512);
193 }
194
195 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
196 I830_DRB3, &tmp);
197 tom = tmp * MB(32);
198
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300199 base = tom - tseg_size - ggtt->stolen_size;
Chris Wilsone12a2d52012-11-15 11:32:18 +0000200 }
Chris Wilson9797fbf2012-04-24 15:47:39 +0100201
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100202 if (base == 0)
203 return 0;
204
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300205 /* make sure we don't clobber the GTT if it's within stolen memory */
206 if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
207 struct {
208 u32 start, end;
209 } stolen[2] = {
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300210 { .start = base, .end = base + ggtt->stolen_size, },
211 { .start = base, .end = base + ggtt->stolen_size, },
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300212 };
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300213 u64 ggtt_start, ggtt_end;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300214
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300215 ggtt_start = I915_READ(PGTBL_CTL);
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300216 if (IS_GEN4(dev))
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300217 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
218 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300219 else
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300220 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
221 ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300222
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300223 if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
224 stolen[0].end = ggtt_start;
225 if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
226 stolen[1].start = ggtt_end;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300227
228 /* pick the larger of the two chunks */
229 if (stolen[0].end - stolen[0].start >
230 stolen[1].end - stolen[1].start) {
231 base = stolen[0].start;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300232 ggtt->stolen_size = stolen[0].end - stolen[0].start;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300233 } else {
234 base = stolen[1].start;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300235 ggtt->stolen_size = stolen[1].end - stolen[1].start;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300236 }
237
238 if (stolen[0].start != stolen[1].start ||
239 stolen[0].end != stolen[1].end) {
240 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300241 (unsigned long long)ggtt_start,
242 (unsigned long long)ggtt_end - 1);
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300243 DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300244 base, base + (u32)ggtt->stolen_size - 1);
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300245 }
246 }
247
248
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100249 /* Verify that nothing else uses this physical address. Stolen
250 * memory should be reserved by the BIOS and hidden from the
251 * kernel. So if the region is already marked as busy, something
252 * is seriously wrong.
253 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300254 r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100255 "Graphics Stolen Memory");
256 if (r == NULL) {
Akash Goel3617dc92014-01-13 16:25:21 +0530257 /*
258 * One more attempt but this time requesting region from
259 * base + 1, as we have seen that this resolves the region
260 * conflict with the PCI Bus.
261 * This is a BIOS w/a: Some BIOS wrap stolen in the root
262 * PCI bus, but have an off-by-one error. Hence retry the
263 * reservation starting from 1 instead of 0.
264 */
265 r = devm_request_mem_region(dev->dev, base + 1,
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300266 ggtt->stolen_size - 1,
Akash Goel3617dc92014-01-13 16:25:21 +0530267 "Graphics Stolen Memory");
Daniel Vetter0b6d24c2014-04-11 15:55:17 +0200268 /*
269 * GEN3 firmware likes to smash pci bridges into the stolen
270 * range. Apparently this works.
271 */
272 if (r == NULL && !IS_GEN3(dev)) {
Akash Goel3617dc92014-01-13 16:25:21 +0530273 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300274 base, base + (uint32_t)ggtt->stolen_size);
Akash Goel3617dc92014-01-13 16:25:21 +0530275 base = 0;
276 }
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100277 }
278
Chris Wilsone12a2d52012-11-15 11:32:18 +0000279 return base;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100280}
281
Chris Wilson9797fbf2012-04-24 15:47:39 +0100282void i915_gem_cleanup_stolen(struct drm_device *dev)
283{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100284 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter4d7bb012012-12-18 15:24:37 +0100285
Daniel Vetter446f8d82013-07-02 10:48:31 +0200286 if (!drm_mm_initialized(&dev_priv->mm.stolen))
287 return;
288
Daniel Vetter4d7bb012012-12-18 15:24:37 +0100289 drm_mm_takedown(&dev_priv->mm.stolen);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100290}
291
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300292static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
293 unsigned long *base, unsigned long *size)
294{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300295 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300296 uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
297 CTG_STOLEN_RESERVED :
298 ELK_STOLEN_RESERVED);
299 unsigned long stolen_top = dev_priv->mm.stolen_base +
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300300 ggtt->stolen_size;
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300301
302 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
303
304 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
305
306 /* On these platforms, the register doesn't have a size field, so the
307 * size is the distance between the base and the top of the stolen
308 * memory. We also have the genuine case where base is zero and there's
309 * nothing reserved. */
310 if (*base == 0)
311 *size = 0;
312 else
313 *size = stolen_top - *base;
314}
315
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300316static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
317 unsigned long *base, unsigned long *size)
318{
319 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
320
321 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
322
323 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
324 case GEN6_STOLEN_RESERVED_1M:
325 *size = 1024 * 1024;
326 break;
327 case GEN6_STOLEN_RESERVED_512K:
328 *size = 512 * 1024;
329 break;
330 case GEN6_STOLEN_RESERVED_256K:
331 *size = 256 * 1024;
332 break;
333 case GEN6_STOLEN_RESERVED_128K:
334 *size = 128 * 1024;
335 break;
336 default:
337 *size = 1024 * 1024;
338 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
339 }
340}
341
342static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
343 unsigned long *base, unsigned long *size)
344{
345 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
346
347 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
348
349 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
350 case GEN7_STOLEN_RESERVED_1M:
351 *size = 1024 * 1024;
352 break;
353 case GEN7_STOLEN_RESERVED_256K:
354 *size = 256 * 1024;
355 break;
356 default:
357 *size = 1024 * 1024;
358 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
359 }
360}
361
362static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
363 unsigned long *base, unsigned long *size)
364{
365 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
366
367 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
368
369 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
370 case GEN8_STOLEN_RESERVED_1M:
371 *size = 1024 * 1024;
372 break;
373 case GEN8_STOLEN_RESERVED_2M:
374 *size = 2 * 1024 * 1024;
375 break;
376 case GEN8_STOLEN_RESERVED_4M:
377 *size = 4 * 1024 * 1024;
378 break;
379 case GEN8_STOLEN_RESERVED_8M:
380 *size = 8 * 1024 * 1024;
381 break;
382 default:
383 *size = 8 * 1024 * 1024;
384 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
385 }
386}
387
388static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
389 unsigned long *base, unsigned long *size)
390{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300391 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300392 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
393 unsigned long stolen_top;
394
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300395 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300396
397 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
398
399 /* On these platforms, the register doesn't have a size field, so the
400 * size is the distance between the base and the top of the stolen
401 * memory. We also have the genuine case where base is zero and there's
402 * nothing reserved. */
403 if (*base == 0)
404 *size = 0;
405 else
406 *size = stolen_top - *base;
407}
408
Chris Wilson9797fbf2012-04-24 15:47:39 +0100409int i915_gem_init_stolen(struct drm_device *dev)
410{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300411 struct drm_i915_private *dev_priv = to_i915(dev);
412 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ville Syrjäläd7884d62015-09-11 21:14:29 +0300413 unsigned long reserved_total, reserved_base = 0, reserved_size;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300414 unsigned long stolen_top;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100415
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300416 mutex_init(&dev_priv->mm.stolen_lock);
417
Chris Wilson0f4706d2014-03-18 14:50:50 +0200418#ifdef CONFIG_INTEL_IOMMU
Daniel Vetterfcc9fe12014-03-26 23:42:53 +0100419 if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
Chris Wilson0f4706d2014-03-18 14:50:50 +0200420 DRM_INFO("DMAR active, disabling use of stolen memory\n");
421 return 0;
422 }
423#endif
424
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300425 if (ggtt->stolen_size == 0)
Chris Wilson6644a4e2013-09-05 13:40:25 +0100426 return 0;
427
Chris Wilsone12a2d52012-11-15 11:32:18 +0000428 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
429 if (dev_priv->mm.stolen_base == 0)
430 return 0;
431
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300432 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
Chris Wilsone12a2d52012-11-15 11:32:18 +0000433
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300434 switch (INTEL_INFO(dev_priv)->gen) {
435 case 2:
436 case 3:
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300437 break;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300438 case 4:
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300439 if (IS_G4X(dev))
440 g4x_get_stolen_reserved(dev_priv, &reserved_base,
441 &reserved_size);
442 break;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300443 case 5:
444 /* Assume the gen6 maximum for the older platforms. */
445 reserved_size = 1024 * 1024;
446 reserved_base = stolen_top - reserved_size;
447 break;
448 case 6:
449 gen6_get_stolen_reserved(dev_priv, &reserved_base,
450 &reserved_size);
451 break;
452 case 7:
453 gen7_get_stolen_reserved(dev_priv, &reserved_base,
454 &reserved_size);
455 break;
456 default:
Rodrigo Vivief11bdb2015-10-28 04:16:45 -0700457 if (IS_BROADWELL(dev_priv) ||
458 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300459 bdw_get_stolen_reserved(dev_priv, &reserved_base,
460 &reserved_size);
461 else
462 gen8_get_stolen_reserved(dev_priv, &reserved_base,
463 &reserved_size);
464 break;
Daniel Vetter40bae732014-09-11 13:28:08 +0200465 }
Jesse Barnesc9cddff2013-05-08 10:45:13 -0700466
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300467 /* It is possible for the reserved base to be zero, but the register
468 * field for size doesn't have a zero option. */
469 if (reserved_base == 0) {
470 reserved_size = 0;
471 reserved_base = stolen_top;
472 }
473
474 if (reserved_base < dev_priv->mm.stolen_base ||
475 reserved_base + reserved_size > stolen_top) {
476 DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
477 reserved_base, reserved_base + reserved_size,
478 dev_priv->mm.stolen_base, stolen_top);
Daniel Vetter897f9ed2013-07-09 14:44:27 +0200479 return 0;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300480 }
481
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300482 ggtt->stolen_reserved_base = reserved_base;
483 ggtt->stolen_reserved_size = reserved_size;
Sagar Arun Kamble274008e2016-02-06 00:13:29 +0530484
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300485 /* It is possible for the reserved area to end before the end of stolen
486 * memory, so just consider the start. */
487 reserved_total = stolen_top - reserved_base;
488
Thierry Reding8e9d5972015-08-14 12:35:23 +0200489 DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300490 ggtt->stolen_size >> 10,
491 (ggtt->stolen_size - reserved_total) >> 10);
Daniel Vetter897f9ed2013-07-09 14:44:27 +0200492
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300493 ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
Paulo Zanonia9da5122015-09-14 15:19:57 -0300494
Paulo Zanoni1ca36d42015-09-23 12:52:22 -0300495 /*
496 * Basic memrange allocator for stolen space.
497 *
498 * TODO: Notice that some platforms require us to not use the first page
499 * of the stolen memory but their BIOSes may still put the framebuffer
500 * on the first page. So we don't reserve this page for now because of
501 * that. Our current solution is to just prevent new nodes from being
502 * inserted on the first page - see the check we have at
503 * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
504 * problem later.
505 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300506 drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100507
508 return 0;
509}
Chris Wilson0104fdb2012-11-15 11:32:26 +0000510
511static struct sg_table *
512i915_pages_create_for_stolen(struct drm_device *dev,
513 u32 offset, u32 size)
514{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300515 struct drm_i915_private *dev_priv = to_i915(dev);
516 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000517 struct sg_table *st;
518 struct scatterlist *sg;
519
520 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300521 BUG_ON(offset > ggtt->stolen_size - size);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000522
523 /* We hide that we have no struct page backing our stolen object
524 * by wrapping the contiguous physical allocation with a fake
525 * dma mapping in a single scatterlist.
526 */
527
528 st = kmalloc(sizeof(*st), GFP_KERNEL);
529 if (st == NULL)
530 return NULL;
531
532 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
533 kfree(st);
534 return NULL;
535 }
536
537 sg = st->sgl;
Akash Goelec14ba42014-01-13 16:24:45 +0530538 sg->offset = 0;
Imre Deaked23abd2013-03-26 15:14:19 +0200539 sg->length = size;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000540
541 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
542 sg_dma_len(sg) = size;
543
544 return st;
545}
546
547static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
548{
549 BUG();
550 return -EINVAL;
551}
552
553static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
554{
555 /* Should only be called during free */
556 sg_free_table(obj->pages);
557 kfree(obj->pages);
558}
559
Chris Wilsonef0cf272014-06-06 10:22:54 +0100560
561static void
562i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
563{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100564 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Paulo Zanonid713fd42015-07-02 19:25:07 -0300565
Chris Wilsonef0cf272014-06-06 10:22:54 +0100566 if (obj->stolen) {
Paulo Zanonid713fd42015-07-02 19:25:07 -0300567 i915_gem_stolen_remove_node(dev_priv, obj->stolen);
Chris Wilsonef0cf272014-06-06 10:22:54 +0100568 kfree(obj->stolen);
569 obj->stolen = NULL;
570 }
571}
Chris Wilson0104fdb2012-11-15 11:32:26 +0000572static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
573 .get_pages = i915_gem_object_get_pages_stolen,
574 .put_pages = i915_gem_object_put_pages_stolen,
Chris Wilsonef0cf272014-06-06 10:22:54 +0100575 .release = i915_gem_object_release_stolen,
Chris Wilson0104fdb2012-11-15 11:32:26 +0000576};
577
578static struct drm_i915_gem_object *
579_i915_gem_object_create_stolen(struct drm_device *dev,
580 struct drm_mm_node *stolen)
581{
582 struct drm_i915_gem_object *obj;
583
Chris Wilson42dcedd2012-11-15 11:32:30 +0000584 obj = i915_gem_object_alloc(dev);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000585 if (obj == NULL)
586 return NULL;
587
David Herrmann89c82332013-07-11 11:56:32 +0200588 drm_gem_private_object_init(dev, &obj->base, stolen->size);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000589 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
590
591 obj->pages = i915_pages_create_for_stolen(dev,
592 stolen->start, stolen->size);
593 if (obj->pages == NULL)
594 goto cleanup;
595
Ankitprasad Sharmac5236472015-12-22 11:50:44 +0530596 obj->get_page.sg = obj->pages->sgl;
597 obj->get_page.last = 0;
598
Ben Widawskydd53e1b2013-05-31 14:46:19 -0700599 i915_gem_object_pin_pages(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000600 obj->stolen = stolen;
601
Chris Wilsond46f1c32013-08-08 14:41:06 +0100602 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
603 obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000604
605 return obj;
606
607cleanup:
Chris Wilson42dcedd2012-11-15 11:32:30 +0000608 i915_gem_object_free(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000609 return NULL;
610}
611
612struct drm_i915_gem_object *
613i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
614{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100615 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000616 struct drm_i915_gem_object *obj;
617 struct drm_mm_node *stolen;
David Herrmann06e78ed2013-07-27 16:21:27 +0200618 int ret;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000619
Daniel Vetter446f8d82013-07-02 10:48:31 +0200620 if (!drm_mm_initialized(&dev_priv->mm.stolen))
Chris Wilson0104fdb2012-11-15 11:32:26 +0000621 return NULL;
622
623 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
624 if (size == 0)
625 return NULL;
626
David Herrmann06e78ed2013-07-27 16:21:27 +0200627 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
628 if (!stolen)
Chris Wilson0104fdb2012-11-15 11:32:26 +0000629 return NULL;
630
Paulo Zanonid713fd42015-07-02 19:25:07 -0300631 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
David Herrmann06e78ed2013-07-27 16:21:27 +0200632 if (ret) {
633 kfree(stolen);
634 return NULL;
635 }
636
Chris Wilson0104fdb2012-11-15 11:32:26 +0000637 obj = _i915_gem_object_create_stolen(dev, stolen);
638 if (obj)
639 return obj;
640
Paulo Zanonid713fd42015-07-02 19:25:07 -0300641 i915_gem_stolen_remove_node(dev_priv, stolen);
David Herrmann06e78ed2013-07-27 16:21:27 +0200642 kfree(stolen);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000643 return NULL;
644}
645
Chris Wilson866d12b2013-02-19 13:31:37 -0800646struct drm_i915_gem_object *
647i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
648 u32 stolen_offset,
649 u32 gtt_offset,
650 u32 size)
651{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300652 struct drm_i915_private *dev_priv = to_i915(dev);
653 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson866d12b2013-02-19 13:31:37 -0800654 struct drm_i915_gem_object *obj;
655 struct drm_mm_node *stolen;
Ben Widawsky2f633152013-07-17 12:19:03 -0700656 struct i915_vma *vma;
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700657 int ret;
Chris Wilson866d12b2013-02-19 13:31:37 -0800658
Daniel Vetter446f8d82013-07-02 10:48:31 +0200659 if (!drm_mm_initialized(&dev_priv->mm.stolen))
Chris Wilson866d12b2013-02-19 13:31:37 -0800660 return NULL;
661
Tvrtko Ursulin12c83d92016-02-11 10:27:29 +0000662 lockdep_assert_held(&dev->struct_mutex);
663
Chris Wilson866d12b2013-02-19 13:31:37 -0800664 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
665 stolen_offset, gtt_offset, size);
666
667 /* KISS and expect everything to be page-aligned */
Daniel Vetterf37b5c22015-02-10 23:12:27 +0100668 if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
669 WARN_ON(stolen_offset & 4095))
Chris Wilson866d12b2013-02-19 13:31:37 -0800670 return NULL;
671
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700672 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
673 if (!stolen)
674 return NULL;
675
Ben Widawsky338710e2013-07-05 14:41:03 -0700676 stolen->start = stolen_offset;
677 stolen->size = size;
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300678 mutex_lock(&dev_priv->mm.stolen_lock);
Ben Widawsky338710e2013-07-05 14:41:03 -0700679 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300680 mutex_unlock(&dev_priv->mm.stolen_lock);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700681 if (ret) {
Chris Wilson866d12b2013-02-19 13:31:37 -0800682 DRM_DEBUG_KMS("failed to allocate stolen space\n");
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700683 kfree(stolen);
Chris Wilson866d12b2013-02-19 13:31:37 -0800684 return NULL;
685 }
686
687 obj = _i915_gem_object_create_stolen(dev, stolen);
688 if (obj == NULL) {
689 DRM_DEBUG_KMS("failed to allocate stolen object\n");
Paulo Zanonid713fd42015-07-02 19:25:07 -0300690 i915_gem_stolen_remove_node(dev_priv, stolen);
David Herrmann06e78ed2013-07-27 16:21:27 +0200691 kfree(stolen);
Chris Wilson866d12b2013-02-19 13:31:37 -0800692 return NULL;
693 }
694
Jesse Barnes3727d552013-05-08 10:45:14 -0700695 /* Some objects just need physical mem from stolen space */
Daniel Vetter190d6cd2013-07-04 13:06:28 +0200696 if (gtt_offset == I915_GTT_OFFSET_NONE)
Jesse Barnes3727d552013-05-08 10:45:14 -0700697 return obj;
698
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300699 vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
Dan Carpenterdb473b32013-07-19 08:45:46 +0300700 if (IS_ERR(vma)) {
701 ret = PTR_ERR(vma);
Chris Wilson7c4a7d62015-09-24 11:57:45 +0100702 goto err;
Ben Widawsky2f633152013-07-17 12:19:03 -0700703 }
704
Chris Wilson866d12b2013-02-19 13:31:37 -0800705 /* To simplify the initialisation sequence between KMS and GTT,
706 * we allow construction of the stolen object prior to
707 * setting up the GTT space. The actual reservation will occur
708 * later.
709 */
Ben Widawsky2f633152013-07-17 12:19:03 -0700710 vma->node.start = gtt_offset;
711 vma->node.size = size;
Chris Wilson7c4a7d62015-09-24 11:57:45 +0100712
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +0100713 ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
714 if (ret) {
715 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
716 goto err;
Ben Widawskyedd41a82013-07-05 14:41:05 -0700717 }
Chris Wilson866d12b2013-02-19 13:31:37 -0800718
Chris Wilson3272db52016-08-04 16:32:32 +0100719 vma->flags |= I915_VMA_GLOBAL_BIND;
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +0100720 __i915_vma_set_map_and_fenceable(vma);
Chris Wilson50e046b2016-08-04 07:52:46 +0100721 list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
Chris Wilson15717de2016-08-04 07:52:26 +0100722 obj->bind_count++;
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +0100723
Ben Widawsky35c20a62013-05-31 11:28:48 -0700724 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
Daniel Vetterd8ccba82013-12-17 23:42:11 +0100725 i915_gem_object_pin_pages(obj);
Chris Wilson866d12b2013-02-19 13:31:37 -0800726
727 return obj;
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700728
Chris Wilson7c4a7d62015-09-24 11:57:45 +0100729err:
Chris Wilsonf8c417c2016-07-20 13:31:53 +0100730 i915_gem_object_put(obj);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700731 return NULL;
Chris Wilson866d12b2013-02-19 13:31:37 -0800732}