blob: a75de8c6ece0d081be9d64c8889c55f1a56dd30d [file] [log] [blame]
Chris Wilson9797fbf2012-04-24 15:47:39 +01001/*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/i915_drm.h>
Chris Wilson9797fbf2012-04-24 15:47:39 +010031#include "i915_drv.h"
32
Ville Syrjälä0ad98c72015-10-08 12:08:20 +030033#define KB(x) ((x) * 1024)
34#define MB(x) (KB(x) * 1024)
35
Chris Wilson9797fbf2012-04-24 15:47:39 +010036/*
37 * The BIOS typically reserves some of the system's memory for the exclusive
38 * use of the integrated graphics. This memory is no longer available for
39 * use by the OS and so the user finds that his system has less memory
40 * available than he put in. We refer to this memory as stolen.
41 *
42 * The BIOS will allocate its framebuffer from the stolen memory. Our
43 * goal is try to reuse that object for our own fbcon which must always
44 * be available for panics. Anything else we can reuse the stolen memory
45 * for is a boon.
46 */
47
Paulo Zanonia9da5122015-09-14 15:19:57 -030048int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
49 struct drm_mm_node *node, u64 size,
50 unsigned alignment, u64 start, u64 end)
Paulo Zanonid713fd42015-07-02 19:25:07 -030051{
Paulo Zanoni92e97d22015-07-02 19:25:09 -030052 int ret;
53
Paulo Zanonid713fd42015-07-02 19:25:07 -030054 if (!drm_mm_initialized(&dev_priv->mm.stolen))
55 return -ENODEV;
56
Paulo Zanoni92e97d22015-07-02 19:25:09 -030057 mutex_lock(&dev_priv->mm.stolen_lock);
Paulo Zanonia9da5122015-09-14 15:19:57 -030058 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
59 alignment, start, end,
60 DRM_MM_SEARCH_DEFAULT);
Paulo Zanoni92e97d22015-07-02 19:25:09 -030061 mutex_unlock(&dev_priv->mm.stolen_lock);
62
63 return ret;
Paulo Zanonid713fd42015-07-02 19:25:07 -030064}
65
Paulo Zanonia9da5122015-09-14 15:19:57 -030066int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
67 struct drm_mm_node *node, u64 size,
68 unsigned alignment)
69{
70 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
Paulo Zanoni3c6b29b2016-12-15 11:23:55 -020071 alignment, 0, U64_MAX);
Paulo Zanonia9da5122015-09-14 15:19:57 -030072}
73
Paulo Zanonid713fd42015-07-02 19:25:07 -030074void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
75 struct drm_mm_node *node)
76{
Paulo Zanoni92e97d22015-07-02 19:25:09 -030077 mutex_lock(&dev_priv->mm.stolen_lock);
Paulo Zanonid713fd42015-07-02 19:25:07 -030078 drm_mm_remove_node(node);
Paulo Zanoni92e97d22015-07-02 19:25:09 -030079 mutex_unlock(&dev_priv->mm.stolen_lock);
Paulo Zanonid713fd42015-07-02 19:25:07 -030080}
81
Chris Wilsonc8847382017-01-27 16:55:30 +000082static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
Chris Wilson9797fbf2012-04-24 15:47:39 +010083{
David Weinehall52a05c32016-08-22 13:32:44 +030084 struct pci_dev *pdev = dev_priv->drm.pdev;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030085 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsoneaba1b82013-07-04 12:28:35 +010086 struct resource *r;
Chris Wilsonc8847382017-01-27 16:55:30 +000087 dma_addr_t base;
Chris Wilson9797fbf2012-04-24 15:47:39 +010088
Chris Wilson17fec8a2013-07-04 00:23:33 +010089 /* Almost universally we can find the Graphics Base of Stolen Memory
Joonas Lahtinene10fa552016-04-15 12:03:39 +030090 * at register BSM (0x5c) in the igfx configuration space. On a few
91 * (desktop) machines this is also mirrored in the bridge device at
92 * different locations, or in the MCHBAR.
Chris Wilsone12a2d52012-11-15 11:32:18 +000093 *
Ville Syrjälä0ad98c72015-10-08 12:08:20 +030094 * On 865 we just check the TOUD register.
95 *
96 * On 830/845/85x the stolen memory base isn't available in any
97 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
98 *
Chris Wilson9797fbf2012-04-24 15:47:39 +010099 */
Chris Wilsone12a2d52012-11-15 11:32:18 +0000100 base = 0;
Ville Syrjäläa9097be2016-10-31 22:37:20 +0200101 if (INTEL_GEN(dev_priv) >= 3) {
Joonas Lahtinene10fa552016-04-15 12:03:39 +0300102 u32 bsm;
103
David Weinehall52a05c32016-08-22 13:32:44 +0300104 pci_read_config_dword(pdev, INTEL_BSM, &bsm);
Joonas Lahtinene10fa552016-04-15 12:03:39 +0300105
Joonas Lahtinenc0dd3462016-04-22 13:29:26 +0300106 base = bsm & INTEL_BSM_MASK;
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100107 } else if (IS_I865G(dev_priv)) {
Ville Syrjäläd721b022016-08-08 13:58:39 +0300108 u32 tseg_size = 0;
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300109 u16 toud = 0;
Ville Syrjäläd721b022016-08-08 13:58:39 +0300110 u8 tmp;
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300111
David Weinehall52a05c32016-08-22 13:32:44 +0300112 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjäläd721b022016-08-08 13:58:39 +0300113 I845_ESMRAMC, &tmp);
114
115 if (tmp & TSEG_ENABLE) {
116 switch (tmp & I845_TSEG_SIZE_MASK) {
117 case I845_TSEG_SIZE_512K:
118 tseg_size = KB(512);
119 break;
120 case I845_TSEG_SIZE_1M:
121 tseg_size = MB(1);
122 break;
123 }
124 }
125
David Weinehall52a05c32016-08-22 13:32:44 +0300126 pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300127 I865_TOUD, &toud);
128
Ville Syrjäläd721b022016-08-08 13:58:39 +0300129 base = (toud << 16) + tseg_size;
Ville Syrjäläa9097be2016-10-31 22:37:20 +0200130 } else if (IS_I85X(dev_priv)) {
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300131 u32 tseg_size = 0;
132 u32 tom;
133 u8 tmp;
134
David Weinehall52a05c32016-08-22 13:32:44 +0300135 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300136 I85X_ESMRAMC, &tmp);
137
138 if (tmp & TSEG_ENABLE)
139 tseg_size = MB(1);
140
David Weinehall52a05c32016-08-22 13:32:44 +0300141 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300142 I85X_DRB3, &tmp);
143 tom = tmp * MB(32);
144
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300145 base = tom - tseg_size - ggtt->stolen_size;
Jani Nikula2a307c22016-11-30 17:43:04 +0200146 } else if (IS_I845G(dev_priv)) {
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300147 u32 tseg_size = 0;
148 u32 tom;
149 u8 tmp;
150
David Weinehall52a05c32016-08-22 13:32:44 +0300151 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300152 I845_ESMRAMC, &tmp);
153
154 if (tmp & TSEG_ENABLE) {
155 switch (tmp & I845_TSEG_SIZE_MASK) {
156 case I845_TSEG_SIZE_512K:
157 tseg_size = KB(512);
158 break;
159 case I845_TSEG_SIZE_1M:
160 tseg_size = MB(1);
161 break;
162 }
163 }
164
David Weinehall52a05c32016-08-22 13:32:44 +0300165 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300166 I830_DRB3, &tmp);
167 tom = tmp * MB(32);
168
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300169 base = tom - tseg_size - ggtt->stolen_size;
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100170 } else if (IS_I830(dev_priv)) {
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300171 u32 tseg_size = 0;
172 u32 tom;
173 u8 tmp;
174
David Weinehall52a05c32016-08-22 13:32:44 +0300175 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300176 I830_ESMRAMC, &tmp);
177
178 if (tmp & TSEG_ENABLE) {
179 if (tmp & I830_TSEG_SIZE_1M)
180 tseg_size = MB(1);
181 else
182 tseg_size = KB(512);
183 }
184
David Weinehall52a05c32016-08-22 13:32:44 +0300185 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
Ville Syrjälä0ad98c72015-10-08 12:08:20 +0300186 I830_DRB3, &tmp);
187 tom = tmp * MB(32);
188
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300189 base = tom - tseg_size - ggtt->stolen_size;
Chris Wilsone12a2d52012-11-15 11:32:18 +0000190 }
Chris Wilson9797fbf2012-04-24 15:47:39 +0100191
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100192 if (base == 0)
193 return 0;
194
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300195 /* make sure we don't clobber the GTT if it's within stolen memory */
Jani Nikula73f67aa2016-12-07 22:48:09 +0200196 if (INTEL_GEN(dev_priv) <= 4 &&
197 !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300198 struct {
Chris Wilsonc8847382017-01-27 16:55:30 +0000199 dma_addr_t start, end;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300200 } stolen[2] = {
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300201 { .start = base, .end = base + ggtt->stolen_size, },
202 { .start = base, .end = base + ggtt->stolen_size, },
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300203 };
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300204 u64 ggtt_start, ggtt_end;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300205
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300206 ggtt_start = I915_READ(PGTBL_CTL);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +0100207 if (IS_GEN4(dev_priv))
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300208 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
209 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300210 else
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300211 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
212 ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300213
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300214 if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
215 stolen[0].end = ggtt_start;
216 if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
217 stolen[1].start = ggtt_end;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300218
219 /* pick the larger of the two chunks */
220 if (stolen[0].end - stolen[0].start >
221 stolen[1].end - stolen[1].start) {
222 base = stolen[0].start;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300223 ggtt->stolen_size = stolen[0].end - stolen[0].start;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300224 } else {
225 base = stolen[1].start;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300226 ggtt->stolen_size = stolen[1].end - stolen[1].start;
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300227 }
228
229 if (stolen[0].start != stolen[1].start ||
230 stolen[0].end != stolen[1].end) {
Chris Wilsonc8847382017-01-27 16:55:30 +0000231 dma_addr_t end = base + ggtt->stolen_size - 1;
Paulo Zanoni920bcd12017-01-26 18:19:07 -0200232
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300233 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300234 (unsigned long long)ggtt_start,
235 (unsigned long long)ggtt_end - 1);
Chris Wilsonc8847382017-01-27 16:55:30 +0000236 DRM_DEBUG_KMS("Stolen memory adjusted to %pad - %pad\n",
Paulo Zanoni920bcd12017-01-26 18:19:07 -0200237 &base, &end);
Ville Syrjäläf1e1c212014-06-05 20:02:59 +0300238 }
239 }
240
241
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100242 /* Verify that nothing else uses this physical address. Stolen
243 * memory should be reserved by the BIOS and hidden from the
244 * kernel. So if the region is already marked as busy, something
245 * is seriously wrong.
246 */
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +0000247 r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100248 "Graphics Stolen Memory");
249 if (r == NULL) {
Akash Goel3617dc92014-01-13 16:25:21 +0530250 /*
251 * One more attempt but this time requesting region from
252 * base + 1, as we have seen that this resolves the region
253 * conflict with the PCI Bus.
254 * This is a BIOS w/a: Some BIOS wrap stolen in the root
255 * PCI bus, but have an off-by-one error. Hence retry the
256 * reservation starting from 1 instead of 0.
257 */
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +0000258 r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300259 ggtt->stolen_size - 1,
Akash Goel3617dc92014-01-13 16:25:21 +0530260 "Graphics Stolen Memory");
Daniel Vetter0b6d24c2014-04-11 15:55:17 +0200261 /*
262 * GEN3 firmware likes to smash pci bridges into the stolen
263 * range. Apparently this works.
264 */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +0100265 if (r == NULL && !IS_GEN3(dev_priv)) {
Chris Wilsonc8847382017-01-27 16:55:30 +0000266 dma_addr_t end = base + ggtt->stolen_size;
Paulo Zanoni920bcd12017-01-26 18:19:07 -0200267
Chris Wilsonc8847382017-01-27 16:55:30 +0000268 DRM_ERROR("conflict detected with stolen region: [%pad - %pad]\n",
Paulo Zanoni920bcd12017-01-26 18:19:07 -0200269 &base, &end);
Akash Goel3617dc92014-01-13 16:25:21 +0530270 base = 0;
271 }
Chris Wilsoneaba1b82013-07-04 12:28:35 +0100272 }
273
Chris Wilsone12a2d52012-11-15 11:32:18 +0000274 return base;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100275}
276
Chris Wilson9797fbf2012-04-24 15:47:39 +0100277void i915_gem_cleanup_stolen(struct drm_device *dev)
278{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100279 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter4d7bb012012-12-18 15:24:37 +0100280
Daniel Vetter446f8d82013-07-02 10:48:31 +0200281 if (!drm_mm_initialized(&dev_priv->mm.stolen))
282 return;
283
Daniel Vetter4d7bb012012-12-18 15:24:37 +0100284 drm_mm_takedown(&dev_priv->mm.stolen);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100285}
286
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300287static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
Chris Wilsonc8847382017-01-27 16:55:30 +0000288 dma_addr_t *base, u32 *size)
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300289{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300290 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300291 uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
292 CTG_STOLEN_RESERVED :
293 ELK_STOLEN_RESERVED);
Chris Wilsonc8847382017-01-27 16:55:30 +0000294 dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300295
296 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
297
298 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
299
300 /* On these platforms, the register doesn't have a size field, so the
301 * size is the distance between the base and the top of the stolen
302 * memory. We also have the genuine case where base is zero and there's
303 * nothing reserved. */
304 if (*base == 0)
305 *size = 0;
306 else
307 *size = stolen_top - *base;
308}
309
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300310static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
Chris Wilsonc8847382017-01-27 16:55:30 +0000311 dma_addr_t *base, u32 *size)
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300312{
313 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
314
315 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
316
317 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
318 case GEN6_STOLEN_RESERVED_1M:
319 *size = 1024 * 1024;
320 break;
321 case GEN6_STOLEN_RESERVED_512K:
322 *size = 512 * 1024;
323 break;
324 case GEN6_STOLEN_RESERVED_256K:
325 *size = 256 * 1024;
326 break;
327 case GEN6_STOLEN_RESERVED_128K:
328 *size = 128 * 1024;
329 break;
330 default:
331 *size = 1024 * 1024;
332 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
333 }
334}
335
336static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
Chris Wilsonc8847382017-01-27 16:55:30 +0000337 dma_addr_t *base, u32 *size)
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300338{
339 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
340
341 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
342
343 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
344 case GEN7_STOLEN_RESERVED_1M:
345 *size = 1024 * 1024;
346 break;
347 case GEN7_STOLEN_RESERVED_256K:
348 *size = 256 * 1024;
349 break;
350 default:
351 *size = 1024 * 1024;
352 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
353 }
354}
355
Rodrigo Vivi9244f852016-12-18 13:36:27 -0800356static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
Chris Wilsonc8847382017-01-27 16:55:30 +0000357 dma_addr_t *base, u32 *size)
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300358{
359 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
360
361 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
362
363 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
364 case GEN8_STOLEN_RESERVED_1M:
365 *size = 1024 * 1024;
366 break;
367 case GEN8_STOLEN_RESERVED_2M:
368 *size = 2 * 1024 * 1024;
369 break;
370 case GEN8_STOLEN_RESERVED_4M:
371 *size = 4 * 1024 * 1024;
372 break;
373 case GEN8_STOLEN_RESERVED_8M:
374 *size = 8 * 1024 * 1024;
375 break;
376 default:
377 *size = 8 * 1024 * 1024;
378 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
379 }
380}
381
382static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
Chris Wilsonc8847382017-01-27 16:55:30 +0000383 dma_addr_t *base, u32 *size)
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300384{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300385 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300386 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
Chris Wilsonc8847382017-01-27 16:55:30 +0000387 dma_addr_t stolen_top;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300388
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300389 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300390
391 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
392
393 /* On these platforms, the register doesn't have a size field, so the
394 * size is the distance between the base and the top of the stolen
395 * memory. We also have the genuine case where base is zero and there's
396 * nothing reserved. */
397 if (*base == 0)
398 *size = 0;
399 else
400 *size = stolen_top - *base;
401}
402
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +0000403int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
Chris Wilson9797fbf2012-04-24 15:47:39 +0100404{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300405 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsonc8847382017-01-27 16:55:30 +0000406 dma_addr_t reserved_base, stolen_top;
Chris Wilsonedd1f2f2017-01-06 15:20:11 +0000407 u32 reserved_total, reserved_size;
408 u32 stolen_usable_start;
Chris Wilson9797fbf2012-04-24 15:47:39 +0100409
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300410 mutex_init(&dev_priv->mm.stolen_lock);
411
Chris Wilson0f4706d2014-03-18 14:50:50 +0200412#ifdef CONFIG_INTEL_IOMMU
Tvrtko Ursulin7ace3d32016-11-16 08:55:35 +0000413 if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
Chris Wilson0f4706d2014-03-18 14:50:50 +0200414 DRM_INFO("DMAR active, disabling use of stolen memory\n");
415 return 0;
416 }
417#endif
418
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300419 if (ggtt->stolen_size == 0)
Chris Wilson6644a4e2013-09-05 13:40:25 +0100420 return 0;
421
Chris Wilsonc8847382017-01-27 16:55:30 +0000422 dev_priv->mm.stolen_base = i915_stolen_to_dma(dev_priv);
Chris Wilsone12a2d52012-11-15 11:32:18 +0000423 if (dev_priv->mm.stolen_base == 0)
424 return 0;
425
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300426 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
Chris Wilson46fad802017-01-06 15:20:10 +0000427 reserved_base = 0;
428 reserved_size = 0;
Chris Wilsone12a2d52012-11-15 11:32:18 +0000429
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300430 switch (INTEL_INFO(dev_priv)->gen) {
431 case 2:
432 case 3:
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300433 break;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300434 case 4:
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +0100435 if (IS_G4X(dev_priv))
Chris Wilson46fad802017-01-06 15:20:10 +0000436 g4x_get_stolen_reserved(dev_priv,
437 &reserved_base, &reserved_size);
Ville Syrjälä7d316ae2015-09-16 21:28:50 +0300438 break;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300439 case 5:
440 /* Assume the gen6 maximum for the older platforms. */
441 reserved_size = 1024 * 1024;
442 reserved_base = stolen_top - reserved_size;
443 break;
444 case 6:
Chris Wilson46fad802017-01-06 15:20:10 +0000445 gen6_get_stolen_reserved(dev_priv,
446 &reserved_base, &reserved_size);
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300447 break;
448 case 7:
Chris Wilson46fad802017-01-06 15:20:10 +0000449 gen7_get_stolen_reserved(dev_priv,
450 &reserved_base, &reserved_size);
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300451 break;
452 default:
Rodrigo Vivi5af7edc52016-12-19 11:05:47 -0800453 if (IS_LP(dev_priv))
Chris Wilson46fad802017-01-06 15:20:10 +0000454 chv_get_stolen_reserved(dev_priv,
455 &reserved_base, &reserved_size);
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300456 else
Chris Wilson46fad802017-01-06 15:20:10 +0000457 bdw_get_stolen_reserved(dev_priv,
458 &reserved_base, &reserved_size);
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300459 break;
Daniel Vetter40bae732014-09-11 13:28:08 +0200460 }
Jesse Barnesc9cddff2013-05-08 10:45:13 -0700461
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300462 /* It is possible for the reserved base to be zero, but the register
463 * field for size doesn't have a zero option. */
464 if (reserved_base == 0) {
465 reserved_size = 0;
466 reserved_base = stolen_top;
467 }
468
469 if (reserved_base < dev_priv->mm.stolen_base ||
470 reserved_base + reserved_size > stolen_top) {
Chris Wilsonc8847382017-01-27 16:55:30 +0000471 dma_addr_t reserved_top = reserved_base + reserved_size;
472 DRM_DEBUG_KMS("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n",
Chris Wilson46fad802017-01-06 15:20:10 +0000473 &reserved_base, &reserved_top,
474 &dev_priv->mm.stolen_base, &stolen_top);
Daniel Vetter897f9ed2013-07-09 14:44:27 +0200475 return 0;
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300476 }
477
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300478 ggtt->stolen_reserved_base = reserved_base;
479 ggtt->stolen_reserved_size = reserved_size;
Sagar Arun Kamble274008e2016-02-06 00:13:29 +0530480
Paulo Zanoni3774eb52015-08-10 14:57:32 -0300481 /* It is possible for the reserved area to end before the end of stolen
482 * memory, so just consider the start. */
483 reserved_total = stolen_top - reserved_base;
484
Chris Wilsonedd1f2f2017-01-06 15:20:11 +0000485 DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300486 ggtt->stolen_size >> 10,
487 (ggtt->stolen_size - reserved_total) >> 10);
Daniel Vetter897f9ed2013-07-09 14:44:27 +0200488
Paulo Zanoni3c6b29b2016-12-15 11:23:55 -0200489 stolen_usable_start = 0;
490 /* WaSkipStolenMemoryFirstPage:bdw+ */
491 if (INTEL_GEN(dev_priv) >= 8)
492 stolen_usable_start = 4096;
Paulo Zanonia9da5122015-09-14 15:19:57 -0300493
Chris Wilsonedd1f2f2017-01-06 15:20:11 +0000494 ggtt->stolen_usable_size =
495 ggtt->stolen_size - reserved_total - stolen_usable_start;
Paulo Zanoni3c6b29b2016-12-15 11:23:55 -0200496
497 /* Basic memrange allocator for stolen space. */
498 drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
499 ggtt->stolen_usable_size);
Chris Wilson9797fbf2012-04-24 15:47:39 +0100500
501 return 0;
502}
Chris Wilson0104fdb2012-11-15 11:32:26 +0000503
504static struct sg_table *
505i915_pages_create_for_stolen(struct drm_device *dev,
506 u32 offset, u32 size)
507{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300508 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000509 struct sg_table *st;
510 struct scatterlist *sg;
511
Chris Wilsone8f9ae92017-01-06 15:20:12 +0000512 GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size));
Chris Wilson0104fdb2012-11-15 11:32:26 +0000513
514 /* We hide that we have no struct page backing our stolen object
515 * by wrapping the contiguous physical allocation with a fake
516 * dma mapping in a single scatterlist.
517 */
518
519 st = kmalloc(sizeof(*st), GFP_KERNEL);
520 if (st == NULL)
Matthew Auld43e157f2016-11-18 17:02:16 +0000521 return ERR_PTR(-ENOMEM);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000522
523 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
524 kfree(st);
Matthew Auld43e157f2016-11-18 17:02:16 +0000525 return ERR_PTR(-ENOMEM);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000526 }
527
528 sg = st->sgl;
Akash Goelec14ba42014-01-13 16:24:45 +0530529 sg->offset = 0;
Imre Deaked23abd2013-03-26 15:14:19 +0200530 sg->length = size;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000531
532 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
533 sg_dma_len(sg) = size;
534
535 return st;
536}
537
Chris Wilson03ac84f2016-10-28 13:58:36 +0100538static struct sg_table *
539i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
Chris Wilson0104fdb2012-11-15 11:32:26 +0000540{
Chris Wilson03ac84f2016-10-28 13:58:36 +0100541 return i915_pages_create_for_stolen(obj->base.dev,
542 obj->stolen->start,
543 obj->stolen->size);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000544}
545
Chris Wilson03ac84f2016-10-28 13:58:36 +0100546static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
547 struct sg_table *pages)
Chris Wilson0104fdb2012-11-15 11:32:26 +0000548{
Chris Wilson6288c792016-11-17 15:58:46 +0000549 /* Should only be called from i915_gem_object_release_stolen() */
Chris Wilson03ac84f2016-10-28 13:58:36 +0100550 sg_free_table(pages);
551 kfree(pages);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000552}
553
Chris Wilsonef0cf272014-06-06 10:22:54 +0100554static void
555i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
556{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100557 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson6288c792016-11-17 15:58:46 +0000558 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
559
560 GEM_BUG_ON(!stolen);
Paulo Zanonid713fd42015-07-02 19:25:07 -0300561
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100562 __i915_gem_object_unpin_pages(obj);
563
Chris Wilson6288c792016-11-17 15:58:46 +0000564 i915_gem_stolen_remove_node(dev_priv, stolen);
565 kfree(stolen);
Chris Wilsonef0cf272014-06-06 10:22:54 +0100566}
Chris Wilson6288c792016-11-17 15:58:46 +0000567
Chris Wilson0104fdb2012-11-15 11:32:26 +0000568static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
569 .get_pages = i915_gem_object_get_pages_stolen,
570 .put_pages = i915_gem_object_put_pages_stolen,
Chris Wilsonef0cf272014-06-06 10:22:54 +0100571 .release = i915_gem_object_release_stolen,
Chris Wilson0104fdb2012-11-15 11:32:26 +0000572};
573
574static struct drm_i915_gem_object *
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000575_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
Chris Wilson0104fdb2012-11-15 11:32:26 +0000576 struct drm_mm_node *stolen)
577{
578 struct drm_i915_gem_object *obj;
579
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000580 obj = i915_gem_object_alloc(dev_priv);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000581 if (obj == NULL)
582 return NULL;
583
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000584 drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000585 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
586
Chris Wilson0104fdb2012-11-15 11:32:26 +0000587 obj->stolen = stolen;
Chris Wilsond46f1c32013-08-08 14:41:06 +0100588 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000589 obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000590
Chris Wilson03ac84f2016-10-28 13:58:36 +0100591 if (i915_gem_object_pin_pages(obj))
592 goto cleanup;
593
Chris Wilson0104fdb2012-11-15 11:32:26 +0000594 return obj;
595
596cleanup:
Chris Wilson42dcedd2012-11-15 11:32:30 +0000597 i915_gem_object_free(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000598 return NULL;
599}
600
601struct drm_i915_gem_object *
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000602i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
Chris Wilson0104fdb2012-11-15 11:32:26 +0000603{
Chris Wilson0104fdb2012-11-15 11:32:26 +0000604 struct drm_i915_gem_object *obj;
605 struct drm_mm_node *stolen;
David Herrmann06e78ed2013-07-27 16:21:27 +0200606 int ret;
Chris Wilson0104fdb2012-11-15 11:32:26 +0000607
Daniel Vetter446f8d82013-07-02 10:48:31 +0200608 if (!drm_mm_initialized(&dev_priv->mm.stolen))
Chris Wilson0104fdb2012-11-15 11:32:26 +0000609 return NULL;
610
Chris Wilson0104fdb2012-11-15 11:32:26 +0000611 if (size == 0)
612 return NULL;
613
David Herrmann06e78ed2013-07-27 16:21:27 +0200614 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
615 if (!stolen)
Chris Wilson0104fdb2012-11-15 11:32:26 +0000616 return NULL;
617
Paulo Zanonid713fd42015-07-02 19:25:07 -0300618 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
David Herrmann06e78ed2013-07-27 16:21:27 +0200619 if (ret) {
620 kfree(stolen);
621 return NULL;
622 }
623
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000624 obj = _i915_gem_object_create_stolen(dev_priv, stolen);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000625 if (obj)
626 return obj;
627
Paulo Zanonid713fd42015-07-02 19:25:07 -0300628 i915_gem_stolen_remove_node(dev_priv, stolen);
David Herrmann06e78ed2013-07-27 16:21:27 +0200629 kfree(stolen);
Chris Wilson0104fdb2012-11-15 11:32:26 +0000630 return NULL;
631}
632
Chris Wilson866d12b2013-02-19 13:31:37 -0800633struct drm_i915_gem_object *
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000634i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
Chris Wilson866d12b2013-02-19 13:31:37 -0800635 u32 stolen_offset,
636 u32 gtt_offset,
637 u32 size)
638{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300639 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson866d12b2013-02-19 13:31:37 -0800640 struct drm_i915_gem_object *obj;
641 struct drm_mm_node *stolen;
Ben Widawsky2f633152013-07-17 12:19:03 -0700642 struct i915_vma *vma;
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700643 int ret;
Chris Wilson866d12b2013-02-19 13:31:37 -0800644
Daniel Vetter446f8d82013-07-02 10:48:31 +0200645 if (!drm_mm_initialized(&dev_priv->mm.stolen))
Chris Wilson866d12b2013-02-19 13:31:37 -0800646 return NULL;
647
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000648 lockdep_assert_held(&dev_priv->drm.struct_mutex);
Tvrtko Ursulin12c83d92016-02-11 10:27:29 +0000649
Chris Wilson866d12b2013-02-19 13:31:37 -0800650 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
651 stolen_offset, gtt_offset, size);
652
653 /* KISS and expect everything to be page-aligned */
Chris Wilsonf51455d2017-01-10 14:47:34 +0000654 if (WARN_ON(size == 0) ||
655 WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
656 WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
Chris Wilson866d12b2013-02-19 13:31:37 -0800657 return NULL;
658
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700659 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
660 if (!stolen)
661 return NULL;
662
Ben Widawsky338710e2013-07-05 14:41:03 -0700663 stolen->start = stolen_offset;
664 stolen->size = size;
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300665 mutex_lock(&dev_priv->mm.stolen_lock);
Ben Widawsky338710e2013-07-05 14:41:03 -0700666 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300667 mutex_unlock(&dev_priv->mm.stolen_lock);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700668 if (ret) {
Chris Wilson866d12b2013-02-19 13:31:37 -0800669 DRM_DEBUG_KMS("failed to allocate stolen space\n");
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700670 kfree(stolen);
Chris Wilson866d12b2013-02-19 13:31:37 -0800671 return NULL;
672 }
673
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000674 obj = _i915_gem_object_create_stolen(dev_priv, stolen);
Chris Wilson866d12b2013-02-19 13:31:37 -0800675 if (obj == NULL) {
676 DRM_DEBUG_KMS("failed to allocate stolen object\n");
Paulo Zanonid713fd42015-07-02 19:25:07 -0300677 i915_gem_stolen_remove_node(dev_priv, stolen);
David Herrmann06e78ed2013-07-27 16:21:27 +0200678 kfree(stolen);
Chris Wilson866d12b2013-02-19 13:31:37 -0800679 return NULL;
680 }
681
Jesse Barnes3727d552013-05-08 10:45:14 -0700682 /* Some objects just need physical mem from stolen space */
Daniel Vetter190d6cd2013-07-04 13:06:28 +0200683 if (gtt_offset == I915_GTT_OFFSET_NONE)
Jesse Barnes3727d552013-05-08 10:45:14 -0700684 return obj;
685
Chris Wilson03ac84f2016-10-28 13:58:36 +0100686 ret = i915_gem_object_pin_pages(obj);
687 if (ret)
688 goto err;
689
Chris Wilson718659a2017-01-16 15:21:28 +0000690 vma = i915_vma_instance(obj, &ggtt->base, NULL);
Dan Carpenterdb473b32013-07-19 08:45:46 +0300691 if (IS_ERR(vma)) {
692 ret = PTR_ERR(vma);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100693 goto err_pages;
Ben Widawsky2f633152013-07-17 12:19:03 -0700694 }
695
Chris Wilson866d12b2013-02-19 13:31:37 -0800696 /* To simplify the initialisation sequence between KMS and GTT,
697 * we allow construction of the stolen object prior to
698 * setting up the GTT space. The actual reservation will occur
699 * later.
700 */
Chris Wilson625d9882017-01-11 11:23:11 +0000701 ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
702 size, gtt_offset, obj->cache_level,
703 0);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +0100704 if (ret) {
705 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
Chris Wilson03ac84f2016-10-28 13:58:36 +0100706 goto err_pages;
Ben Widawskyedd41a82013-07-05 14:41:05 -0700707 }
Chris Wilson866d12b2013-02-19 13:31:37 -0800708
Chris Wilson44a0ec02017-01-19 19:26:58 +0000709 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
710
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100711 vma->pages = obj->mm.pages;
Chris Wilson3272db52016-08-04 16:32:32 +0100712 vma->flags |= I915_VMA_GLOBAL_BIND;
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +0100713 __i915_vma_set_map_and_fenceable(vma);
Chris Wilson50e046b2016-08-04 07:52:46 +0100714 list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200715 list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
Chris Wilson15717de2016-08-04 07:52:26 +0100716 obj->bind_count++;
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +0100717
Chris Wilson866d12b2013-02-19 13:31:37 -0800718 return obj;
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700719
Chris Wilson03ac84f2016-10-28 13:58:36 +0100720err_pages:
721 i915_gem_object_unpin_pages(obj);
Chris Wilson7c4a7d62015-09-24 11:57:45 +0100722err:
Chris Wilsonf8c417c2016-07-20 13:31:53 +0100723 i915_gem_object_put(obj);
Ben Widawskyb3a070c2013-07-05 14:41:02 -0700724 return NULL;
Chris Wilson866d12b2013-02-19 13:31:37 -0800725}