blob: 80682a418a70cc9db382998f59a4677edee2ec03 [file] [log] [blame]
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -02001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
Rodrigo Vivi94b83952014-12-08 06:46:31 -080024/**
25 * DOC: Frame Buffer Compression (FBC)
26 *
27 * FBC tries to save memory bandwidth (and so power consumption) by
28 * compressing the amount of memory used by the display. It is total
29 * transparent to user space and completely handled in the kernel.
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -020030 *
31 * The benefits of FBC are mostly visible with solid backgrounds and
Rodrigo Vivi94b83952014-12-08 06:46:31 -080032 * variation-less patterns. It comes from keeping the memory footprint small
33 * and having fewer memory pages opened and accessed for refreshing the display.
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -020034 *
Rodrigo Vivi94b83952014-12-08 06:46:31 -080035 * i915 is responsible to reserve stolen memory for FBC and configure its
36 * offset on proper registers. The hardware takes care of all
37 * compress/decompress. However there are many known cases where we have to
38 * forcibly disable it to allow proper screen updates.
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -020039 */
40
Rodrigo Vivi94b83952014-12-08 06:46:31 -080041#include "intel_drv.h"
42#include "i915_drv.h"
43
Paulo Zanoni9f218332015-09-23 12:52:27 -030044static inline bool fbc_supported(struct drm_i915_private *dev_priv)
45{
Paulo Zanoni8c400742016-01-29 18:57:39 -020046 return HAS_FBC(dev_priv);
Paulo Zanoni9f218332015-09-23 12:52:27 -030047}
48
Paulo Zanoni57105022015-11-04 17:10:46 -020049static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
50{
Paulo Zanoni5697d602016-11-11 14:57:41 -020051 return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8;
Paulo Zanoni57105022015-11-04 17:10:46 -020052}
53
Paulo Zanonie6cd6dc2015-10-16 17:55:40 -030054static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
55{
Paulo Zanoni5697d602016-11-11 14:57:41 -020056 return INTEL_GEN(dev_priv) < 4;
Paulo Zanonie6cd6dc2015-10-16 17:55:40 -030057}
58
Paulo Zanoni010cf732016-01-19 11:35:48 -020059static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
60{
Paulo Zanoni5697d602016-11-11 14:57:41 -020061 return INTEL_GEN(dev_priv) <= 3;
Paulo Zanoni010cf732016-01-19 11:35:48 -020062}
63
Paulo Zanoni2db33662015-09-14 15:20:03 -030064/*
65 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
66 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
67 * origin so the x and y offsets can actually fit the registers. As a
68 * consequence, the fence doesn't really start exactly at the display plane
69 * address we program because it starts at the real start of the buffer, so we
70 * have to take this into consideration here.
71 */
Juha-Pekka Heikkila31d1d3c2017-10-17 23:08:11 +030072static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
Paulo Zanoni2db33662015-09-14 15:20:03 -030073{
Juha-Pekka Heikkila31d1d3c2017-10-17 23:08:11 +030074 return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
Paulo Zanoni2db33662015-09-14 15:20:03 -030075}
76
Paulo Zanonic5ecd462015-10-15 14:19:21 -030077/*
78 * For SKL+, the plane source size used by the hardware is based on the value we
79 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
80 * we wrote to PIPESRC.
81 */
Paulo Zanoniaaf78d22016-01-19 11:35:42 -020082static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
Paulo Zanonic5ecd462015-10-15 14:19:21 -030083 int *width, int *height)
84{
Paulo Zanonic5ecd462015-10-15 14:19:21 -030085 if (width)
Ville Syrjälä73714c02017-03-31 21:00:56 +030086 *width = cache->plane.src_w;
Paulo Zanonic5ecd462015-10-15 14:19:21 -030087 if (height)
Ville Syrjälä73714c02017-03-31 21:00:56 +030088 *height = cache->plane.src_h;
Paulo Zanonic5ecd462015-10-15 14:19:21 -030089}
90
Paulo Zanoniaaf78d22016-01-19 11:35:42 -020091static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
92 struct intel_fbc_state_cache *cache)
Paulo Zanonic5ecd462015-10-15 14:19:21 -030093{
Paulo Zanonic5ecd462015-10-15 14:19:21 -030094 int lines;
95
Paulo Zanoniaaf78d22016-01-19 11:35:42 -020096 intel_fbc_get_plane_source_size(cache, NULL, &lines);
Paulo Zanoni79f26242016-10-21 13:55:45 -020097 if (INTEL_GEN(dev_priv) == 7)
Paulo Zanonic5ecd462015-10-15 14:19:21 -030098 lines = min(lines, 2048);
Paulo Zanoni79f26242016-10-21 13:55:45 -020099 else if (INTEL_GEN(dev_priv) >= 8)
100 lines = min(lines, 2560);
Paulo Zanonic5ecd462015-10-15 14:19:21 -0300101
102 /* Hardware needs the full buffer stride, not just the active area. */
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200103 return lines * cache->fb.stride;
Paulo Zanonic5ecd462015-10-15 14:19:21 -0300104}
105
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300106static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200107{
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200108 u32 fbc_ctl;
109
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200110 /* Disable compression */
111 fbc_ctl = I915_READ(FBC_CONTROL);
112 if ((fbc_ctl & FBC_CTL_EN) == 0)
113 return;
114
115 fbc_ctl &= ~FBC_CTL_EN;
116 I915_WRITE(FBC_CONTROL, fbc_ctl);
117
118 /* Wait for compressing bit to clear */
Chris Wilson8d90dfd2016-06-30 15:33:21 +0100119 if (intel_wait_for_register(dev_priv,
120 FBC_STATUS, FBC_STAT_COMPRESSING, 0,
121 10)) {
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200122 DRM_DEBUG_KMS("FBC idle timed out\n");
123 return;
124 }
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200125}
126
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200127static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200128{
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200129 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200130 int cfb_pitch;
131 int i;
132 u32 fbc_ctl;
133
Jani Nikula60ee5cd2015-02-05 12:04:27 +0200134 /* Note: fbc.threshold == 1 for i8xx */
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200135 cfb_pitch = params->cfb_size / FBC_LL_SIZE;
136 if (params->fb.stride < cfb_pitch)
137 cfb_pitch = params->fb.stride;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200138
139 /* FBC_CTL wants 32B or 64B units */
Paulo Zanoni7733b492015-07-07 15:26:04 -0300140 if (IS_GEN2(dev_priv))
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200141 cfb_pitch = (cfb_pitch / 32) - 1;
142 else
143 cfb_pitch = (cfb_pitch / 64) - 1;
144
145 /* Clear old tags */
146 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
Ville Syrjälä4d110c72015-09-18 20:03:18 +0300147 I915_WRITE(FBC_TAG(i), 0);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200148
Paulo Zanoni7733b492015-07-07 15:26:04 -0300149 if (IS_GEN4(dev_priv)) {
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200150 u32 fbc_ctl2;
151
152 /* Set it up... */
153 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
Ville Syrjäläed150302017-11-17 21:19:10 +0200154 fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200155 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200156 I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200157 }
158
159 /* enable it... */
160 fbc_ctl = I915_READ(FBC_CONTROL);
161 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
162 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
Paulo Zanoni7733b492015-07-07 15:26:04 -0300163 if (IS_I945GM(dev_priv))
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200164 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
165 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000166 fbc_ctl |= params->vma->fence->id;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200167 I915_WRITE(FBC_CONTROL, fbc_ctl);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200168}
169
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300170static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200171{
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200172 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
173}
174
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200175static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200176{
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200177 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200178 u32 dpfc_ctl;
179
Ville Syrjäläed150302017-11-17 21:19:10 +0200180 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
Ville Syrjälä801c8fe2016-11-18 21:53:04 +0200181 if (params->fb.format->cpp[0] == 2)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200182 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
183 else
184 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200185
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000186 if (params->vma->fence) {
187 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
Chris Wilson12ecf4b2016-08-19 16:54:24 +0100188 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
189 } else {
190 I915_WRITE(DPFC_FENCE_YOFF, 0);
191 }
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200192
193 /* enable it... */
194 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200195}
196
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300197static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200198{
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200199 u32 dpfc_ctl;
200
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200201 /* Disable compression */
202 dpfc_ctl = I915_READ(DPFC_CONTROL);
203 if (dpfc_ctl & DPFC_CTL_EN) {
204 dpfc_ctl &= ~DPFC_CTL_EN;
205 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200206 }
207}
208
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300209static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200210{
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200211 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
212}
213
Paulo Zanonid5ce4162015-11-04 17:10:45 -0200214/* This function forces a CFB recompression through the nuke operation. */
215static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200216{
Paulo Zanonidbef0f12015-02-13 17:23:46 -0200217 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
218 POSTING_READ(MSG_FBC_REND_STATE);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200219}
220
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200221static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200222{
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200223 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200224 u32 dpfc_ctl;
Paulo Zanonice65e472015-06-30 10:53:05 -0300225 int threshold = dev_priv->fbc.threshold;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200226
Ville Syrjäläed150302017-11-17 21:19:10 +0200227 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
Ville Syrjälä801c8fe2016-11-18 21:53:04 +0200228 if (params->fb.format->cpp[0] == 2)
Paulo Zanonice65e472015-06-30 10:53:05 -0300229 threshold++;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200230
Paulo Zanonice65e472015-06-30 10:53:05 -0300231 switch (threshold) {
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200232 case 4:
233 case 3:
234 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
235 break;
236 case 2:
237 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
238 break;
239 case 1:
240 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
241 break;
242 }
Chris Wilson12ecf4b2016-08-19 16:54:24 +0100243
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000244 if (params->vma->fence) {
Chris Wilson12ecf4b2016-08-19 16:54:24 +0100245 dpfc_ctl |= DPFC_CTL_FENCE_EN;
246 if (IS_GEN5(dev_priv))
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000247 dpfc_ctl |= params->vma->fence->id;
Chris Wilson12ecf4b2016-08-19 16:54:24 +0100248 if (IS_GEN6(dev_priv)) {
249 I915_WRITE(SNB_DPFC_CTL_SA,
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000250 SNB_CPU_FENCE_ENABLE |
251 params->vma->fence->id);
Chris Wilson12ecf4b2016-08-19 16:54:24 +0100252 I915_WRITE(DPFC_CPU_FENCE_OFFSET,
253 params->crtc.fence_y_offset);
254 }
255 } else {
256 if (IS_GEN6(dev_priv)) {
257 I915_WRITE(SNB_DPFC_CTL_SA, 0);
258 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
259 }
260 }
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200261
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200262 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000263 I915_WRITE(ILK_FBC_RT_BASE,
264 i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200265 /* enable it... */
266 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
267
Paulo Zanonid5ce4162015-11-04 17:10:45 -0200268 intel_fbc_recompress(dev_priv);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200269}
270
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300271static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200272{
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200273 u32 dpfc_ctl;
274
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200275 /* Disable compression */
276 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
277 if (dpfc_ctl & DPFC_CTL_EN) {
278 dpfc_ctl &= ~DPFC_CTL_EN;
279 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200280 }
281}
282
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300283static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200284{
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200285 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
286}
287
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200288static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200289{
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200290 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200291 u32 dpfc_ctl;
Paulo Zanonice65e472015-06-30 10:53:05 -0300292 int threshold = dev_priv->fbc.threshold;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200293
Praveen Paneri5654a162017-08-11 00:00:33 +0530294 /* Display WA #0529: skl, kbl, bxt. */
295 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
296 u32 val = I915_READ(CHICKEN_MISC_4);
297
298 val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
299
300 if (i915_gem_object_get_tiling(params->vma->obj) !=
301 I915_TILING_X)
302 val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
303
304 I915_WRITE(CHICKEN_MISC_4, val);
305 }
306
Paulo Zanonid8514d62015-06-12 14:36:21 -0300307 dpfc_ctl = 0;
Paulo Zanoni7733b492015-07-07 15:26:04 -0300308 if (IS_IVYBRIDGE(dev_priv))
Ville Syrjäläed150302017-11-17 21:19:10 +0200309 dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
Paulo Zanonid8514d62015-06-12 14:36:21 -0300310
Ville Syrjälä801c8fe2016-11-18 21:53:04 +0200311 if (params->fb.format->cpp[0] == 2)
Paulo Zanonice65e472015-06-30 10:53:05 -0300312 threshold++;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200313
Paulo Zanonice65e472015-06-30 10:53:05 -0300314 switch (threshold) {
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200315 case 4:
316 case 3:
317 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
318 break;
319 case 2:
320 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
321 break;
322 case 1:
323 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
324 break;
325 }
326
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000327 if (params->vma->fence) {
Chris Wilson12ecf4b2016-08-19 16:54:24 +0100328 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
329 I915_WRITE(SNB_DPFC_CTL_SA,
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000330 SNB_CPU_FENCE_ENABLE |
331 params->vma->fence->id);
Chris Wilson12ecf4b2016-08-19 16:54:24 +0100332 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
333 } else {
334 I915_WRITE(SNB_DPFC_CTL_SA,0);
335 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
336 }
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200337
338 if (dev_priv->fbc.false_color)
339 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
340
Paulo Zanoni7733b492015-07-07 15:26:04 -0300341 if (IS_IVYBRIDGE(dev_priv)) {
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200342 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
343 I915_WRITE(ILK_DISPLAY_CHICKEN1,
344 I915_READ(ILK_DISPLAY_CHICKEN1) |
345 ILK_FBCQ_DIS);
Paulo Zanoni40f40222015-09-14 15:20:01 -0300346 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200347 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200348 I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
349 I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200350 HSW_FBCQ_DIS);
351 }
352
Paulo Zanoni57012be92015-09-14 15:20:00 -0300353 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
354
Paulo Zanonid5ce4162015-11-04 17:10:45 -0200355 intel_fbc_recompress(dev_priv);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200356}
357
Paulo Zanoni8c400742016-01-29 18:57:39 -0200358static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
359{
Paulo Zanoni5697d602016-11-11 14:57:41 -0200360 if (INTEL_GEN(dev_priv) >= 5)
Paulo Zanoni8c400742016-01-29 18:57:39 -0200361 return ilk_fbc_is_active(dev_priv);
362 else if (IS_GM45(dev_priv))
363 return g4x_fbc_is_active(dev_priv);
364 else
365 return i8xx_fbc_is_active(dev_priv);
366}
367
368static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
369{
Paulo Zanoni5375ce92016-01-29 18:57:40 -0200370 struct intel_fbc *fbc = &dev_priv->fbc;
371
372 fbc->active = true;
373
Paulo Zanoni5697d602016-11-11 14:57:41 -0200374 if (INTEL_GEN(dev_priv) >= 7)
Paulo Zanoni8c400742016-01-29 18:57:39 -0200375 gen7_fbc_activate(dev_priv);
Paulo Zanoni5697d602016-11-11 14:57:41 -0200376 else if (INTEL_GEN(dev_priv) >= 5)
Paulo Zanoni8c400742016-01-29 18:57:39 -0200377 ilk_fbc_activate(dev_priv);
378 else if (IS_GM45(dev_priv))
379 g4x_fbc_activate(dev_priv);
380 else
381 i8xx_fbc_activate(dev_priv);
382}
383
384static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
385{
Paulo Zanoni5375ce92016-01-29 18:57:40 -0200386 struct intel_fbc *fbc = &dev_priv->fbc;
387
388 fbc->active = false;
389
Paulo Zanoni5697d602016-11-11 14:57:41 -0200390 if (INTEL_GEN(dev_priv) >= 5)
Paulo Zanoni8c400742016-01-29 18:57:39 -0200391 ilk_fbc_deactivate(dev_priv);
392 else if (IS_GM45(dev_priv))
393 g4x_fbc_deactivate(dev_priv);
394 else
395 i8xx_fbc_deactivate(dev_priv);
396}
397
Rodrigo Vivi94b83952014-12-08 06:46:31 -0800398/**
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300399 * intel_fbc_is_active - Is FBC active?
Paulo Zanoni7733b492015-07-07 15:26:04 -0300400 * @dev_priv: i915 device instance
Rodrigo Vivi94b83952014-12-08 06:46:31 -0800401 *
402 * This function is used to verify the current state of FBC.
Daniel Vetter2e7a5702016-06-01 23:40:36 +0200403 *
Rodrigo Vivi94b83952014-12-08 06:46:31 -0800404 * FIXME: This should be tracked in the plane config eventually
Daniel Vetter2e7a5702016-06-01 23:40:36 +0200405 * instead of queried at runtime for most callers.
Rodrigo Vivi94b83952014-12-08 06:46:31 -0800406 */
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300407bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200408{
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300409 return dev_priv->fbc.active;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200410}
411
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200412static void intel_fbc_work_fn(struct work_struct *__work)
413{
Paulo Zanoni128d7352015-10-26 16:27:49 -0200414 struct drm_i915_private *dev_priv =
415 container_of(__work, struct drm_i915_private, fbc.work.work);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200416 struct intel_fbc *fbc = &dev_priv->fbc;
417 struct intel_fbc_work *work = &fbc->work;
418 struct intel_crtc *crtc = fbc->crtc;
Chris Wilson91c8a322016-07-05 10:40:23 +0100419 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
Paulo Zanonica18d512016-01-21 18:03:05 -0200420
421 if (drm_crtc_vblank_get(&crtc->base)) {
Chris Wilson908b6e62017-08-25 16:02:15 +0100422 /* CRTC is now off, leave FBC deactivated */
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200423 mutex_lock(&fbc->lock);
Paulo Zanonica18d512016-01-21 18:03:05 -0200424 work->scheduled = false;
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200425 mutex_unlock(&fbc->lock);
Paulo Zanonica18d512016-01-21 18:03:05 -0200426 return;
427 }
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200428
Paulo Zanoni128d7352015-10-26 16:27:49 -0200429retry:
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200430 /* Delay the actual enabling to let pageflipping cease and the
431 * display to settle before starting the compression. Note that
432 * this delay also serves a second purpose: it allows for a
433 * vblank to pass after disabling the FBC before we attempt
434 * to modify the control registers.
435 *
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200436 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
Paulo Zanonica18d512016-01-21 18:03:05 -0200437 *
438 * It is also worth mentioning that since work->scheduled_vblank can be
439 * updated multiple times by the other threads, hitting the timeout is
440 * not an error condition. We'll just end up hitting the "goto retry"
441 * case below.
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200442 */
Paulo Zanonica18d512016-01-21 18:03:05 -0200443 wait_event_timeout(vblank->queue,
444 drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
445 msecs_to_jiffies(50));
Paulo Zanoni128d7352015-10-26 16:27:49 -0200446
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200447 mutex_lock(&fbc->lock);
Paulo Zanoni128d7352015-10-26 16:27:49 -0200448
449 /* Were we cancelled? */
450 if (!work->scheduled)
451 goto out;
452
453 /* Were we delayed again while this function was sleeping? */
Paulo Zanonica18d512016-01-21 18:03:05 -0200454 if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200455 mutex_unlock(&fbc->lock);
Paulo Zanoni128d7352015-10-26 16:27:49 -0200456 goto retry;
457 }
458
Paulo Zanoni8c400742016-01-29 18:57:39 -0200459 intel_fbc_hw_activate(dev_priv);
Paulo Zanoni128d7352015-10-26 16:27:49 -0200460
461 work->scheduled = false;
462
463out:
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200464 mutex_unlock(&fbc->lock);
Paulo Zanonica18d512016-01-21 18:03:05 -0200465 drm_crtc_vblank_put(&crtc->base);
Paulo Zanoni128d7352015-10-26 16:27:49 -0200466}
467
Paulo Zanoni128d7352015-10-26 16:27:49 -0200468static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
469{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100470 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200471 struct intel_fbc *fbc = &dev_priv->fbc;
472 struct intel_fbc_work *work = &fbc->work;
Paulo Zanoni128d7352015-10-26 16:27:49 -0200473
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200474 WARN_ON(!mutex_is_locked(&fbc->lock));
Daniel Vetter2ae9e362017-08-11 09:23:27 +0200475 if (WARN_ON(!fbc->enabled))
476 return;
Paulo Zanoni128d7352015-10-26 16:27:49 -0200477
Paulo Zanonica18d512016-01-21 18:03:05 -0200478 if (drm_crtc_vblank_get(&crtc->base)) {
479 DRM_ERROR("vblank not available for FBC on pipe %c\n",
480 pipe_name(crtc->pipe));
481 return;
482 }
483
Paulo Zanonie35be232016-01-18 15:56:58 -0200484 /* It is useless to call intel_fbc_cancel_work() or cancel_work() in
485 * this function since we're not releasing fbc.lock, so it won't have an
486 * opportunity to grab it to discover that it was cancelled. So we just
487 * update the expected jiffy count. */
Paulo Zanoni128d7352015-10-26 16:27:49 -0200488 work->scheduled = true;
Paulo Zanonica18d512016-01-21 18:03:05 -0200489 work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
490 drm_crtc_vblank_put(&crtc->base);
Paulo Zanoni128d7352015-10-26 16:27:49 -0200491
492 schedule_work(&work->work);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200493}
494
Chris Wilson4a3d1e02018-01-25 22:41:22 +0000495static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
496 const char *reason)
Paulo Zanoni25ad93f2015-07-02 19:25:10 -0300497{
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200498 struct intel_fbc *fbc = &dev_priv->fbc;
499
500 WARN_ON(!mutex_is_locked(&fbc->lock));
Paulo Zanoni25ad93f2015-07-02 19:25:10 -0300501
Paulo Zanonie35be232016-01-18 15:56:58 -0200502 /* Calling cancel_work() here won't help due to the fact that the work
503 * function grabs fbc->lock. Just set scheduled to false so the work
504 * function can know it was cancelled. */
505 fbc->work.scheduled = false;
Paulo Zanoni25ad93f2015-07-02 19:25:10 -0300506
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200507 if (fbc->active)
Paulo Zanoni8c400742016-01-29 18:57:39 -0200508 intel_fbc_hw_deactivate(dev_priv);
Chris Wilson4a3d1e02018-01-25 22:41:22 +0000509
510 fbc->no_fbc_reason = reason;
Paulo Zanoni754d1132015-10-13 19:13:25 -0300511}
512
Maarten Lankhorstfaf68d92016-06-14 14:24:20 +0200513static bool multiple_pipes_ok(struct intel_crtc *crtc,
514 struct intel_plane_state *plane_state)
Paulo Zanoni232fd932015-07-07 15:26:07 -0300515{
Maarten Lankhorstfaf68d92016-06-14 14:24:20 +0200516 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Paulo Zanoni010cf732016-01-19 11:35:48 -0200517 struct intel_fbc *fbc = &dev_priv->fbc;
518 enum pipe pipe = crtc->pipe;
Paulo Zanoni232fd932015-07-07 15:26:07 -0300519
Paulo Zanoni010cf732016-01-19 11:35:48 -0200520 /* Don't even bother tracking anything we don't need. */
521 if (!no_fbc_on_multiple_pipes(dev_priv))
Paulo Zanoni232fd932015-07-07 15:26:07 -0300522 return true;
523
Ville Syrjälä936e71e2016-07-26 19:06:59 +0300524 if (plane_state->base.visible)
Paulo Zanoni010cf732016-01-19 11:35:48 -0200525 fbc->visible_pipes_mask |= (1 << pipe);
526 else
527 fbc->visible_pipes_mask &= ~(1 << pipe);
Paulo Zanoni232fd932015-07-07 15:26:07 -0300528
Paulo Zanoni010cf732016-01-19 11:35:48 -0200529 return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
Paulo Zanoni232fd932015-07-07 15:26:07 -0300530}
531
Paulo Zanoni7733b492015-07-07 15:26:04 -0300532static int find_compression_threshold(struct drm_i915_private *dev_priv,
Paulo Zanonifc786722015-07-02 19:25:08 -0300533 struct drm_mm_node *node,
534 int size,
535 int fb_cpp)
536{
Paulo Zanonifc786722015-07-02 19:25:08 -0300537 int compression_threshold = 1;
538 int ret;
Paulo Zanonia9da5122015-09-14 15:19:57 -0300539 u64 end;
540
541 /* The FBC hardware for BDW/SKL doesn't have access to the stolen
542 * reserved range size, so it always assumes the maximum (8mb) is used.
543 * If we enable FBC using a CFB on that memory range we'll get FIFO
544 * underruns, even if that range is not reserved by the BIOS. */
Rodrigo Vivib976dc52017-01-23 10:32:37 -0800545 if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
Matthew Auld77894222017-12-11 15:18:18 +0000546 end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
Paulo Zanonia9da5122015-09-14 15:19:57 -0300547 else
Paulo Zanoni3c6b29b2016-12-15 11:23:55 -0200548 end = U64_MAX;
Paulo Zanonifc786722015-07-02 19:25:08 -0300549
550 /* HACK: This code depends on what we will do in *_enable_fbc. If that
551 * code changes, this code needs to change as well.
552 *
553 * The enable_fbc code will attempt to use one of our 2 compression
554 * thresholds, therefore, in that case, we only have 1 resort.
555 */
556
557 /* Try to over-allocate to reduce reallocations and fragmentation. */
Paulo Zanonia9da5122015-09-14 15:19:57 -0300558 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
559 4096, 0, end);
Paulo Zanonifc786722015-07-02 19:25:08 -0300560 if (ret == 0)
561 return compression_threshold;
562
563again:
564 /* HW's ability to limit the CFB is 1:4 */
565 if (compression_threshold > 4 ||
566 (fb_cpp == 2 && compression_threshold == 2))
567 return 0;
568
Paulo Zanonia9da5122015-09-14 15:19:57 -0300569 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
570 4096, 0, end);
Paulo Zanoni5697d602016-11-11 14:57:41 -0200571 if (ret && INTEL_GEN(dev_priv) <= 4) {
Paulo Zanonifc786722015-07-02 19:25:08 -0300572 return 0;
573 } else if (ret) {
574 compression_threshold <<= 1;
575 goto again;
576 } else {
577 return compression_threshold;
578 }
579}
580
Paulo Zanonic5ecd462015-10-15 14:19:21 -0300581static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
Paulo Zanonifc786722015-07-02 19:25:08 -0300582{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100583 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200584 struct intel_fbc *fbc = &dev_priv->fbc;
Paulo Zanonifc786722015-07-02 19:25:08 -0300585 struct drm_mm_node *uninitialized_var(compressed_llb);
Paulo Zanonic5ecd462015-10-15 14:19:21 -0300586 int size, fb_cpp, ret;
587
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200588 WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
Paulo Zanonic5ecd462015-10-15 14:19:21 -0300589
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200590 size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
Ville Syrjälä801c8fe2016-11-18 21:53:04 +0200591 fb_cpp = fbc->state_cache.fb.format->cpp[0];
Paulo Zanonifc786722015-07-02 19:25:08 -0300592
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200593 ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
Paulo Zanonifc786722015-07-02 19:25:08 -0300594 size, fb_cpp);
595 if (!ret)
596 goto err_llb;
597 else if (ret > 1) {
598 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
599
600 }
601
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200602 fbc->threshold = ret;
Paulo Zanonifc786722015-07-02 19:25:08 -0300603
Paulo Zanoni5697d602016-11-11 14:57:41 -0200604 if (INTEL_GEN(dev_priv) >= 5)
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200605 I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
Paulo Zanoni7733b492015-07-07 15:26:04 -0300606 else if (IS_GM45(dev_priv)) {
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200607 I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
Paulo Zanonifc786722015-07-02 19:25:08 -0300608 } else {
609 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
610 if (!compressed_llb)
611 goto err_fb;
612
613 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
614 4096, 4096);
615 if (ret)
616 goto err_fb;
617
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200618 fbc->compressed_llb = compressed_llb;
Paulo Zanonifc786722015-07-02 19:25:08 -0300619
Matthew Auld77894222017-12-11 15:18:18 +0000620 GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
621 fbc->compressed_fb.start,
622 U32_MAX));
623 GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
624 fbc->compressed_llb->start,
625 U32_MAX));
Paulo Zanonifc786722015-07-02 19:25:08 -0300626 I915_WRITE(FBC_CFB_BASE,
Matthew Auld77894222017-12-11 15:18:18 +0000627 dev_priv->dsm.start + fbc->compressed_fb.start);
Paulo Zanonifc786722015-07-02 19:25:08 -0300628 I915_WRITE(FBC_LL_BASE,
Matthew Auld77894222017-12-11 15:18:18 +0000629 dev_priv->dsm.start + compressed_llb->start);
Paulo Zanonifc786722015-07-02 19:25:08 -0300630 }
631
Paulo Zanonib8bf5d72015-09-14 15:19:58 -0300632 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200633 fbc->compressed_fb.size, fbc->threshold);
Paulo Zanonifc786722015-07-02 19:25:08 -0300634
635 return 0;
636
637err_fb:
638 kfree(compressed_llb);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200639 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
Paulo Zanonifc786722015-07-02 19:25:08 -0300640err_llb:
Chris Wilson8d0e9bc2017-02-23 12:20:37 +0000641 if (drm_mm_initialized(&dev_priv->mm.stolen))
642 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
Paulo Zanonifc786722015-07-02 19:25:08 -0300643 return -ENOSPC;
644}
645
Paulo Zanoni7733b492015-07-07 15:26:04 -0300646static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
Paulo Zanonifc786722015-07-02 19:25:08 -0300647{
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200648 struct intel_fbc *fbc = &dev_priv->fbc;
Paulo Zanonifc786722015-07-02 19:25:08 -0300649
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200650 if (drm_mm_node_allocated(&fbc->compressed_fb))
651 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
652
653 if (fbc->compressed_llb) {
654 i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
655 kfree(fbc->compressed_llb);
Paulo Zanonifc786722015-07-02 19:25:08 -0300656 }
Paulo Zanonifc786722015-07-02 19:25:08 -0300657}
658
Paulo Zanoni7733b492015-07-07 15:26:04 -0300659void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
Paulo Zanoni25ad93f2015-07-02 19:25:10 -0300660{
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200661 struct intel_fbc *fbc = &dev_priv->fbc;
662
Paulo Zanoni9f218332015-09-23 12:52:27 -0300663 if (!fbc_supported(dev_priv))
Paulo Zanoni0bf73c32015-07-03 15:40:54 -0300664 return;
665
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200666 mutex_lock(&fbc->lock);
Paulo Zanoni7733b492015-07-07 15:26:04 -0300667 __intel_fbc_cleanup_cfb(dev_priv);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200668 mutex_unlock(&fbc->lock);
Paulo Zanoni25ad93f2015-07-02 19:25:10 -0300669}
670
Paulo Zanoniadf70c62015-09-14 15:19:56 -0300671static bool stride_is_valid(struct drm_i915_private *dev_priv,
672 unsigned int stride)
673{
Maarten Lankhorst3f5b9332018-01-16 16:53:31 +0100674 /* This should have been caught earlier. */
675 if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
676 return false;
Paulo Zanoniadf70c62015-09-14 15:19:56 -0300677
678 /* Below are the additional FBC restrictions. */
Maarten Lankhorst3f5b9332018-01-16 16:53:31 +0100679 if (stride < 512)
680 return false;
Paulo Zanoniadf70c62015-09-14 15:19:56 -0300681
682 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
683 return stride == 4096 || stride == 8192;
684
685 if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
686 return false;
687
688 if (stride > 16384)
689 return false;
690
691 return true;
692}
693
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200694static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
695 uint32_t pixel_format)
Paulo Zanonib9e831d2015-09-21 19:48:06 -0300696{
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200697 switch (pixel_format) {
Paulo Zanonib9e831d2015-09-21 19:48:06 -0300698 case DRM_FORMAT_XRGB8888:
699 case DRM_FORMAT_XBGR8888:
700 return true;
701 case DRM_FORMAT_XRGB1555:
702 case DRM_FORMAT_RGB565:
703 /* 16bpp not supported on gen2 */
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200704 if (IS_GEN2(dev_priv))
Paulo Zanonib9e831d2015-09-21 19:48:06 -0300705 return false;
706 /* WaFbcOnly1to1Ratio:ctg */
707 if (IS_G4X(dev_priv))
708 return false;
709 return true;
710 default:
711 return false;
712 }
713}
714
Paulo Zanoni856312a2015-10-01 19:57:12 -0300715/*
716 * For some reason, the hardware tracking starts looking at whatever we
717 * programmed as the display plane base address register. It does not look at
718 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
719 * variables instead of just looking at the pipe/plane size.
720 */
721static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
Paulo Zanoni3c5f1742015-09-23 12:52:24 -0300722{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100723 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200724 struct intel_fbc *fbc = &dev_priv->fbc;
Paulo Zanoni856312a2015-10-01 19:57:12 -0300725 unsigned int effective_w, effective_h, max_w, max_h;
Paulo Zanoni3c5f1742015-09-23 12:52:24 -0300726
Paulo Zanoni5697d602016-11-11 14:57:41 -0200727 if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
Paulo Zanoni3c5f1742015-09-23 12:52:24 -0300728 max_w = 4096;
729 max_h = 4096;
Paulo Zanoni5697d602016-11-11 14:57:41 -0200730 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
Paulo Zanoni3c5f1742015-09-23 12:52:24 -0300731 max_w = 4096;
732 max_h = 2048;
733 } else {
734 max_w = 2048;
735 max_h = 1536;
736 }
737
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200738 intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
739 &effective_h);
Juha-Pekka Heikkilabf0a5d42017-10-17 23:08:07 +0300740 effective_w += fbc->state_cache.plane.adjusted_x;
741 effective_h += fbc->state_cache.plane.adjusted_y;
Paulo Zanoni856312a2015-10-01 19:57:12 -0300742
743 return effective_w <= max_w && effective_h <= max_h;
Paulo Zanoni3c5f1742015-09-23 12:52:24 -0300744}
745
Maarten Lankhorstfaf68d92016-06-14 14:24:20 +0200746static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
747 struct intel_crtc_state *crtc_state,
748 struct intel_plane_state *plane_state)
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200749{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100750 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200751 struct intel_fbc *fbc = &dev_priv->fbc;
752 struct intel_fbc_state_cache *cache = &fbc->state_cache;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200753 struct drm_framebuffer *fb = plane_state->base.fb;
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000754
755 cache->vma = NULL;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200756
757 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
758 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +0200759 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200760
761 cache->plane.rotation = plane_state->base.rotation;
Ville Syrjälä73714c02017-03-31 21:00:56 +0300762 /*
763 * Src coordinates are already rotated by 270 degrees for
764 * the 90/270 degree plane rotation cases (to match the
765 * GTT mapping), hence no need to account for rotation here.
766 */
Ville Syrjälä936e71e2016-07-26 19:06:59 +0300767 cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
768 cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
769 cache->plane.visible = plane_state->base.visible;
Juha-Pekka Heikkilabf0a5d42017-10-17 23:08:07 +0300770 cache->plane.adjusted_x = plane_state->main.x;
771 cache->plane.adjusted_y = plane_state->main.y;
Juha-Pekka Heikkila31d1d3c2017-10-17 23:08:11 +0300772 cache->plane.y = plane_state->base.src.y1 >> 16;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200773
774 if (!cache->plane.visible)
775 return;
776
Ville Syrjälä801c8fe2016-11-18 21:53:04 +0200777 cache->fb.format = fb->format;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200778 cache->fb.stride = fb->pitches[0];
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000779
780 cache->vma = plane_state->vma;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200781}
782
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200783static bool intel_fbc_can_activate(struct intel_crtc *crtc)
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200784{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100785 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200786 struct intel_fbc *fbc = &dev_priv->fbc;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200787 struct intel_fbc_state_cache *cache = &fbc->state_cache;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200788
Paulo Zanoni61a585d2016-09-13 10:38:57 -0300789 /* We don't need to use a state cache here since this information is
790 * global for all CRTC.
791 */
792 if (fbc->underrun_detected) {
793 fbc->no_fbc_reason = "underrun detected";
794 return false;
795 }
796
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000797 if (!cache->vma) {
Paulo Zanoni913a3a62016-01-19 11:35:54 -0200798 fbc->no_fbc_reason = "primary plane not visible";
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200799 return false;
800 }
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200801
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200802 if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) ||
803 (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) {
Paulo Zanoni913a3a62016-01-19 11:35:54 -0200804 fbc->no_fbc_reason = "incompatible mode";
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200805 return false;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200806 }
807
Paulo Zanoni45b32a22015-11-04 17:10:49 -0200808 if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
Paulo Zanoni913a3a62016-01-19 11:35:54 -0200809 fbc->no_fbc_reason = "mode too large for compression";
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200810 return false;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200811 }
Paulo Zanoni3c5f1742015-09-23 12:52:24 -0300812
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200813 /* The use of a CPU fence is mandatory in order to detect writes
814 * by the CPU to the scanout and trigger updates to the FBC.
Chris Wilson2efb8132016-08-18 17:17:06 +0100815 *
816 * Note that is possible for a tiled surface to be unmappable (and
817 * so have no fence associated with it) due to aperture constaints
818 * at the time of pinning.
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200819 */
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000820 if (!cache->vma->fence) {
Chris Wilsonc82dd882016-08-24 19:00:53 +0100821 fbc->no_fbc_reason = "framebuffer not tiled or fenced";
822 return false;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200823 }
Paulo Zanoni5697d602016-11-11 14:57:41 -0200824 if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
Robert Fossc2c446a2017-05-19 16:50:17 -0400825 cache->plane.rotation != DRM_MODE_ROTATE_0) {
Paulo Zanoni913a3a62016-01-19 11:35:54 -0200826 fbc->no_fbc_reason = "rotation unsupported";
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200827 return false;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200828 }
829
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200830 if (!stride_is_valid(dev_priv, cache->fb.stride)) {
Paulo Zanoni913a3a62016-01-19 11:35:54 -0200831 fbc->no_fbc_reason = "framebuffer stride not supported";
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200832 return false;
Paulo Zanoniadf70c62015-09-14 15:19:56 -0300833 }
834
Ville Syrjälä801c8fe2016-11-18 21:53:04 +0200835 if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
Paulo Zanoni913a3a62016-01-19 11:35:54 -0200836 fbc->no_fbc_reason = "pixel format is invalid";
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200837 return false;
Paulo Zanonib9e831d2015-09-21 19:48:06 -0300838 }
839
Paulo Zanoni7b24c9a2015-09-14 15:19:59 -0300840 /* WaFbcExceedCdClockThreshold:hsw,bdw */
841 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200842 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
Paulo Zanoni913a3a62016-01-19 11:35:54 -0200843 fbc->no_fbc_reason = "pixel rate is too big";
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200844 return false;
Paulo Zanoni7b24c9a2015-09-14 15:19:59 -0300845 }
846
Paulo Zanonic5ecd462015-10-15 14:19:21 -0300847 /* It is possible for the required CFB size change without a
848 * crtc->disable + crtc->enable since it is possible to change the
849 * stride without triggering a full modeset. Since we try to
850 * over-allocate the CFB, there's a chance we may keep FBC enabled even
851 * if this happens, but if we exceed the current CFB size we'll have to
852 * disable FBC. Notice that it would be possible to disable FBC, wait
853 * for a frame, free the stolen node, then try to reenable FBC in case
854 * we didn't get any invalidate/deactivate calls, but this would require
855 * a lot of tracking just for a specific case. If we conclude it's an
856 * important case, we can implement it later. */
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200857 if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200858 fbc->compressed_fb.size * fbc->threshold) {
Paulo Zanoni913a3a62016-01-19 11:35:54 -0200859 fbc->no_fbc_reason = "CFB requirements changed";
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200860 return false;
861 }
862
863 return true;
864}
865
Paulo Zanoniee2be302016-11-11 14:57:37 -0200866static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
Paulo Zanoni44a8a252016-01-19 11:35:36 -0200867{
Paulo Zanoni913a3a62016-01-19 11:35:54 -0200868 struct intel_fbc *fbc = &dev_priv->fbc;
Paulo Zanoni44a8a252016-01-19 11:35:36 -0200869
Chris Wilsonc0336662016-05-06 15:40:21 +0100870 if (intel_vgpu_active(dev_priv)) {
Paulo Zanoni913a3a62016-01-19 11:35:54 -0200871 fbc->no_fbc_reason = "VGPU is active";
Paulo Zanoni44a8a252016-01-19 11:35:36 -0200872 return false;
873 }
874
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000875 if (!i915_modparams.enable_fbc) {
Paulo Zanoni80788a02016-04-13 16:01:09 -0300876 fbc->no_fbc_reason = "disabled per module param or by default";
Paulo Zanoni44a8a252016-01-19 11:35:36 -0200877 return false;
878 }
879
Paulo Zanoni61a585d2016-09-13 10:38:57 -0300880 if (fbc->underrun_detected) {
881 fbc->no_fbc_reason = "underrun detected";
882 return false;
883 }
884
Paulo Zanoniee2be302016-11-11 14:57:37 -0200885 return true;
886}
887
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200888static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
889 struct intel_fbc_reg_params *params)
890{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100891 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200892 struct intel_fbc *fbc = &dev_priv->fbc;
893 struct intel_fbc_state_cache *cache = &fbc->state_cache;
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200894
895 /* Since all our fields are integer types, use memset here so the
896 * comparison function can rely on memcmp because the padding will be
897 * zero. */
898 memset(params, 0, sizeof(*params));
899
Chris Wilsonbe1e3412017-01-16 15:21:27 +0000900 params->vma = cache->vma;
901
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200902 params->crtc.pipe = crtc->pipe;
Ville Syrjäläb1558c72017-11-17 21:19:15 +0200903 params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
Juha-Pekka Heikkila31d1d3c2017-10-17 23:08:11 +0300904 params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200905
Ville Syrjälä801c8fe2016-11-18 21:53:04 +0200906 params->fb.format = cache->fb.format;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200907 params->fb.stride = cache->fb.stride;
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200908
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200909 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
Praveen Paneri5654a162017-08-11 00:00:33 +0530910
911 if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
912 params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
913 32 * fbc->threshold) * 8;
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200914}
915
916static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
917 struct intel_fbc_reg_params *params2)
918{
919 /* We can use this since intel_fbc_get_reg_params() does a memset. */
920 return memcmp(params1, params2, sizeof(*params1)) == 0;
921}
922
Maarten Lankhorstfaf68d92016-06-14 14:24:20 +0200923void intel_fbc_pre_update(struct intel_crtc *crtc,
924 struct intel_crtc_state *crtc_state,
925 struct intel_plane_state *plane_state)
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200926{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100927 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200928 struct intel_fbc *fbc = &dev_priv->fbc;
Chris Wilson4a3d1e02018-01-25 22:41:22 +0000929 const char *reason = "update pending";
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200930
Paulo Zanoni1eb52232016-01-19 11:35:44 -0200931 if (!fbc_supported(dev_priv))
932 return;
933
934 mutex_lock(&fbc->lock);
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200935
Maarten Lankhorstfaf68d92016-06-14 14:24:20 +0200936 if (!multiple_pipes_ok(crtc, plane_state)) {
Chris Wilson4a3d1e02018-01-25 22:41:22 +0000937 reason = "more than one pipe active";
Paulo Zanoni212890c2016-01-19 11:35:43 -0200938 goto deactivate;
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200939 }
940
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200941 if (!fbc->enabled || fbc->crtc != crtc)
Paulo Zanoni1eb52232016-01-19 11:35:44 -0200942 goto unlock;
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200943
Maarten Lankhorstfaf68d92016-06-14 14:24:20 +0200944 intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200945
Paulo Zanoni212890c2016-01-19 11:35:43 -0200946deactivate:
Chris Wilson4a3d1e02018-01-25 22:41:22 +0000947 intel_fbc_deactivate(dev_priv, reason);
Paulo Zanoni1eb52232016-01-19 11:35:44 -0200948unlock:
949 mutex_unlock(&fbc->lock);
Paulo Zanoni212890c2016-01-19 11:35:43 -0200950}
951
Paulo Zanoni1eb52232016-01-19 11:35:44 -0200952static void __intel_fbc_post_update(struct intel_crtc *crtc)
Paulo Zanoni212890c2016-01-19 11:35:43 -0200953{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100954 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Paulo Zanoni212890c2016-01-19 11:35:43 -0200955 struct intel_fbc *fbc = &dev_priv->fbc;
956 struct intel_fbc_reg_params old_params;
957
958 WARN_ON(!mutex_is_locked(&fbc->lock));
959
960 if (!fbc->enabled || fbc->crtc != crtc)
961 return;
962
963 if (!intel_fbc_can_activate(crtc)) {
964 WARN_ON(fbc->active);
965 return;
966 }
Paulo Zanoni615b40d72016-01-19 11:35:35 -0200967
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200968 old_params = fbc->params;
969 intel_fbc_get_reg_params(crtc, &fbc->params);
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200970
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200971 /* If the scanout has not changed, don't modify the FBC settings.
972 * Note that we make the fundamental assumption that the fb->obj
973 * cannot be unpinned (and have its GTT offset and fence revoked)
974 * without first being decoupled from the scanout and FBC disabled.
975 */
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200976 if (fbc->active &&
977 intel_fbc_reg_params_equal(&old_params, &fbc->params))
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200978 return;
979
Chris Wilson4a3d1e02018-01-25 22:41:22 +0000980 intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300981 intel_fbc_schedule_activation(crtc);
Paulo Zanoni25ad93f2015-07-02 19:25:10 -0300982}
983
Paulo Zanoni1eb52232016-01-19 11:35:44 -0200984void intel_fbc_post_update(struct intel_crtc *crtc)
Paulo Zanoni25ad93f2015-07-02 19:25:10 -0300985{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100986 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200987 struct intel_fbc *fbc = &dev_priv->fbc;
Paulo Zanoni754d1132015-10-13 19:13:25 -0300988
Paulo Zanoni9f218332015-09-23 12:52:27 -0300989 if (!fbc_supported(dev_priv))
Paulo Zanoni0bf73c32015-07-03 15:40:54 -0300990 return;
991
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200992 mutex_lock(&fbc->lock);
Paulo Zanoni1eb52232016-01-19 11:35:44 -0200993 __intel_fbc_post_update(crtc);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200994 mutex_unlock(&fbc->lock);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -0200995}
996
Paulo Zanoni261fe992016-01-19 11:35:40 -0200997static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
998{
999 if (fbc->enabled)
1000 return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
1001 else
1002 return fbc->possible_framebuffer_bits;
1003}
1004
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001005void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
1006 unsigned int frontbuffer_bits,
1007 enum fb_op_origin origin)
1008{
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001009 struct intel_fbc *fbc = &dev_priv->fbc;
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001010
Paulo Zanoni9f218332015-09-23 12:52:27 -03001011 if (!fbc_supported(dev_priv))
Paulo Zanoni0bf73c32015-07-03 15:40:54 -03001012 return;
1013
Paulo Zanoni0dd81542016-01-19 11:35:39 -02001014 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001015 return;
1016
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001017 mutex_lock(&fbc->lock);
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001018
Paulo Zanoni261fe992016-01-19 11:35:40 -02001019 fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001020
Paulo Zanoni5bc40472016-01-19 11:35:53 -02001021 if (fbc->enabled && fbc->busy_bits)
Chris Wilson4a3d1e02018-01-25 22:41:22 +00001022 intel_fbc_deactivate(dev_priv, "frontbuffer write");
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001023
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001024 mutex_unlock(&fbc->lock);
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001025}
1026
1027void intel_fbc_flush(struct drm_i915_private *dev_priv,
Paulo Zanoni6f4551f2015-07-14 16:29:10 -03001028 unsigned int frontbuffer_bits, enum fb_op_origin origin)
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001029{
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001030 struct intel_fbc *fbc = &dev_priv->fbc;
1031
Paulo Zanoni9f218332015-09-23 12:52:27 -03001032 if (!fbc_supported(dev_priv))
Paulo Zanoni0bf73c32015-07-03 15:40:54 -03001033 return;
1034
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001035 mutex_lock(&fbc->lock);
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001036
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001037 fbc->busy_bits &= ~frontbuffer_bits;
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001038
Paulo Zanoniab28a542016-04-04 18:17:15 -03001039 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
1040 goto out;
1041
Paulo Zanoni261fe992016-01-19 11:35:40 -02001042 if (!fbc->busy_bits && fbc->enabled &&
1043 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
Paulo Zanoni0dd81542016-01-19 11:35:39 -02001044 if (fbc->active)
Paulo Zanoniee7d6cfa2015-11-11 14:46:22 -02001045 intel_fbc_recompress(dev_priv);
Paulo Zanoni0dd81542016-01-19 11:35:39 -02001046 else
Paulo Zanoni1eb52232016-01-19 11:35:44 -02001047 __intel_fbc_post_update(fbc->crtc);
Paulo Zanoni6f4551f2015-07-14 16:29:10 -03001048 }
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001049
Paulo Zanoniab28a542016-04-04 18:17:15 -03001050out:
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001051 mutex_unlock(&fbc->lock);
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001052}
1053
Rodrigo Vivi94b83952014-12-08 06:46:31 -08001054/**
Paulo Zanonif51be2e2016-01-19 11:35:50 -02001055 * intel_fbc_choose_crtc - select a CRTC to enable FBC on
1056 * @dev_priv: i915 device instance
1057 * @state: the atomic state structure
1058 *
1059 * This function looks at the proposed state for CRTCs and planes, then chooses
1060 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
1061 * true.
1062 *
1063 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
1064 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
1065 */
1066void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
Ville Syrjälädd576022017-11-17 21:19:14 +02001067 struct intel_atomic_state *state)
Paulo Zanonif51be2e2016-01-19 11:35:50 -02001068{
1069 struct intel_fbc *fbc = &dev_priv->fbc;
Ville Syrjälädd576022017-11-17 21:19:14 +02001070 struct intel_plane *plane;
1071 struct intel_plane_state *plane_state;
Paulo Zanoni4f8f2252016-11-11 14:57:39 -02001072 bool crtc_chosen = false;
Paulo Zanoniba67fab2016-11-11 14:57:36 -02001073 int i;
Paulo Zanonif51be2e2016-01-19 11:35:50 -02001074
1075 mutex_lock(&fbc->lock);
1076
Paulo Zanoni4f8f2252016-11-11 14:57:39 -02001077 /* Does this atomic commit involve the CRTC currently tied to FBC? */
1078 if (fbc->crtc &&
Ville Syrjälädd576022017-11-17 21:19:14 +02001079 !intel_atomic_get_new_crtc_state(state, fbc->crtc))
Paulo Zanonif51be2e2016-01-19 11:35:50 -02001080 goto out;
1081
Paulo Zanoniee2be302016-11-11 14:57:37 -02001082 if (!intel_fbc_can_enable(dev_priv))
1083 goto out;
1084
Paulo Zanonif51be2e2016-01-19 11:35:50 -02001085 /* Simply choose the first CRTC that is compatible and has a visible
1086 * plane. We could go for fancier schemes such as checking the plane
1087 * size, but this would just affect the few platforms that don't tie FBC
1088 * to pipe or plane A. */
Ville Syrjälädd576022017-11-17 21:19:14 +02001089 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1090 struct intel_crtc_state *crtc_state;
1091 struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
Paulo Zanonif51be2e2016-01-19 11:35:50 -02001092
Ville Syrjälädd576022017-11-17 21:19:14 +02001093 if (!plane_state->base.visible)
Paulo Zanonif51be2e2016-01-19 11:35:50 -02001094 continue;
1095
Paulo Zanonif7e9b002016-11-11 14:57:38 -02001096 if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
1097 continue;
1098
Ville Syrjäläb1558c72017-11-17 21:19:15 +02001099 if (fbc_on_plane_a_only(dev_priv) && plane->i9xx_plane != PLANE_A)
Paulo Zanoni03e39102016-11-11 14:57:35 -02001100 continue;
1101
Ville Syrjälädd576022017-11-17 21:19:14 +02001102 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
Paulo Zanonif51be2e2016-01-19 11:35:50 -02001103
Ville Syrjälädd576022017-11-17 21:19:14 +02001104 crtc_state->enable_fbc = true;
Paulo Zanonif7e9b002016-11-11 14:57:38 -02001105 crtc_chosen = true;
Paulo Zanoniba67fab2016-11-11 14:57:36 -02001106 break;
Paulo Zanonif51be2e2016-01-19 11:35:50 -02001107 }
1108
Paulo Zanonif7e9b002016-11-11 14:57:38 -02001109 if (!crtc_chosen)
1110 fbc->no_fbc_reason = "no suitable CRTC for FBC";
1111
Paulo Zanonif51be2e2016-01-19 11:35:50 -02001112out:
1113 mutex_unlock(&fbc->lock);
1114}
1115
1116/**
Paulo Zanonid029bca2015-10-15 10:44:46 -03001117 * intel_fbc_enable: tries to enable FBC on the CRTC
1118 * @crtc: the CRTC
Daniel Vetter62f90b32016-07-15 21:48:07 +02001119 * @crtc_state: corresponding &drm_crtc_state for @crtc
1120 * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
Paulo Zanonid029bca2015-10-15 10:44:46 -03001121 *
Paulo Zanonif51be2e2016-01-19 11:35:50 -02001122 * This function checks if the given CRTC was chosen for FBC, then enables it if
Paulo Zanoni49227c42016-01-19 11:35:52 -02001123 * possible. Notice that it doesn't activate FBC. It is valid to call
1124 * intel_fbc_enable multiple times for the same pipe without an
1125 * intel_fbc_disable in the middle, as long as it is deactivated.
Paulo Zanonid029bca2015-10-15 10:44:46 -03001126 */
Maarten Lankhorstfaf68d92016-06-14 14:24:20 +02001127void intel_fbc_enable(struct intel_crtc *crtc,
1128 struct intel_crtc_state *crtc_state,
1129 struct intel_plane_state *plane_state)
Paulo Zanonid029bca2015-10-15 10:44:46 -03001130{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001131 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001132 struct intel_fbc *fbc = &dev_priv->fbc;
Paulo Zanonid029bca2015-10-15 10:44:46 -03001133
1134 if (!fbc_supported(dev_priv))
1135 return;
1136
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001137 mutex_lock(&fbc->lock);
Paulo Zanonid029bca2015-10-15 10:44:46 -03001138
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001139 if (fbc->enabled) {
Paulo Zanoni49227c42016-01-19 11:35:52 -02001140 WARN_ON(fbc->crtc == NULL);
1141 if (fbc->crtc == crtc) {
Maarten Lankhorstfaf68d92016-06-14 14:24:20 +02001142 WARN_ON(!crtc_state->enable_fbc);
Paulo Zanoni49227c42016-01-19 11:35:52 -02001143 WARN_ON(fbc->active);
1144 }
Paulo Zanonid029bca2015-10-15 10:44:46 -03001145 goto out;
1146 }
1147
Maarten Lankhorstfaf68d92016-06-14 14:24:20 +02001148 if (!crtc_state->enable_fbc)
Paulo Zanonif51be2e2016-01-19 11:35:50 -02001149 goto out;
1150
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001151 WARN_ON(fbc->active);
1152 WARN_ON(fbc->crtc != NULL);
Paulo Zanonid029bca2015-10-15 10:44:46 -03001153
Maarten Lankhorstfaf68d92016-06-14 14:24:20 +02001154 intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
Paulo Zanonic5ecd462015-10-15 14:19:21 -03001155 if (intel_fbc_alloc_cfb(crtc)) {
Paulo Zanoni913a3a62016-01-19 11:35:54 -02001156 fbc->no_fbc_reason = "not enough stolen memory";
Paulo Zanonic5ecd462015-10-15 14:19:21 -03001157 goto out;
1158 }
1159
Paulo Zanonid029bca2015-10-15 10:44:46 -03001160 DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001161 fbc->no_fbc_reason = "FBC enabled but not active yet\n";
Paulo Zanonid029bca2015-10-15 10:44:46 -03001162
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001163 fbc->enabled = true;
1164 fbc->crtc = crtc;
Paulo Zanonid029bca2015-10-15 10:44:46 -03001165out:
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001166 mutex_unlock(&fbc->lock);
Paulo Zanonid029bca2015-10-15 10:44:46 -03001167}
1168
1169/**
1170 * __intel_fbc_disable - disable FBC
1171 * @dev_priv: i915 device instance
1172 *
1173 * This is the low level function that actually disables FBC. Callers should
1174 * grab the FBC lock.
1175 */
1176static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
1177{
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001178 struct intel_fbc *fbc = &dev_priv->fbc;
1179 struct intel_crtc *crtc = fbc->crtc;
Paulo Zanonid029bca2015-10-15 10:44:46 -03001180
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001181 WARN_ON(!mutex_is_locked(&fbc->lock));
1182 WARN_ON(!fbc->enabled);
1183 WARN_ON(fbc->active);
Paulo Zanoni58f9c0b2016-01-19 11:35:51 -02001184 WARN_ON(crtc->active);
Paulo Zanonid029bca2015-10-15 10:44:46 -03001185
1186 DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
1187
Paulo Zanonic5ecd462015-10-15 14:19:21 -03001188 __intel_fbc_cleanup_cfb(dev_priv);
1189
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001190 fbc->enabled = false;
1191 fbc->crtc = NULL;
Paulo Zanonid029bca2015-10-15 10:44:46 -03001192}
1193
1194/**
Paulo Zanonic937ab3e52016-01-19 11:35:46 -02001195 * intel_fbc_disable - disable FBC if it's associated with crtc
Paulo Zanonid029bca2015-10-15 10:44:46 -03001196 * @crtc: the CRTC
1197 *
1198 * This function disables FBC if it's associated with the provided CRTC.
1199 */
Paulo Zanonic937ab3e52016-01-19 11:35:46 -02001200void intel_fbc_disable(struct intel_crtc *crtc)
Paulo Zanonid029bca2015-10-15 10:44:46 -03001201{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001202 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001203 struct intel_fbc *fbc = &dev_priv->fbc;
Paulo Zanonid029bca2015-10-15 10:44:46 -03001204
1205 if (!fbc_supported(dev_priv))
1206 return;
1207
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001208 mutex_lock(&fbc->lock);
Matthew Auld4da45612016-07-05 10:28:34 +01001209 if (fbc->crtc == crtc)
Paulo Zanonid029bca2015-10-15 10:44:46 -03001210 __intel_fbc_disable(dev_priv);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001211 mutex_unlock(&fbc->lock);
Paulo Zanoni65c76002016-01-19 11:35:47 -02001212
1213 cancel_work_sync(&fbc->work.work);
Paulo Zanonid029bca2015-10-15 10:44:46 -03001214}
1215
1216/**
Paulo Zanonic937ab3e52016-01-19 11:35:46 -02001217 * intel_fbc_global_disable - globally disable FBC
Paulo Zanonid029bca2015-10-15 10:44:46 -03001218 * @dev_priv: i915 device instance
1219 *
1220 * This function disables FBC regardless of which CRTC is associated with it.
1221 */
Paulo Zanonic937ab3e52016-01-19 11:35:46 -02001222void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
Paulo Zanonid029bca2015-10-15 10:44:46 -03001223{
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001224 struct intel_fbc *fbc = &dev_priv->fbc;
1225
Paulo Zanonid029bca2015-10-15 10:44:46 -03001226 if (!fbc_supported(dev_priv))
1227 return;
1228
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001229 mutex_lock(&fbc->lock);
1230 if (fbc->enabled)
Paulo Zanonid029bca2015-10-15 10:44:46 -03001231 __intel_fbc_disable(dev_priv);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001232 mutex_unlock(&fbc->lock);
Paulo Zanoni65c76002016-01-19 11:35:47 -02001233
1234 cancel_work_sync(&fbc->work.work);
Paulo Zanonid029bca2015-10-15 10:44:46 -03001235}
1236
Paulo Zanoni61a585d2016-09-13 10:38:57 -03001237static void intel_fbc_underrun_work_fn(struct work_struct *work)
1238{
1239 struct drm_i915_private *dev_priv =
1240 container_of(work, struct drm_i915_private, fbc.underrun_work);
1241 struct intel_fbc *fbc = &dev_priv->fbc;
1242
1243 mutex_lock(&fbc->lock);
1244
1245 /* Maybe we were scheduled twice. */
Daniel Vetter2ae9e362017-08-11 09:23:27 +02001246 if (fbc->underrun_detected || !fbc->enabled)
Paulo Zanoni61a585d2016-09-13 10:38:57 -03001247 goto out;
1248
1249 DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
1250 fbc->underrun_detected = true;
1251
Chris Wilson4a3d1e02018-01-25 22:41:22 +00001252 intel_fbc_deactivate(dev_priv, "FIFO underrun");
Paulo Zanoni61a585d2016-09-13 10:38:57 -03001253out:
1254 mutex_unlock(&fbc->lock);
1255}
1256
1257/**
1258 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1259 * @dev_priv: i915 device instance
1260 *
1261 * Without FBC, most underruns are harmless and don't really cause too many
1262 * problems, except for an annoying message on dmesg. With FBC, underruns can
1263 * become black screens or even worse, especially when paired with bad
1264 * watermarks. So in order for us to be on the safe side, completely disable FBC
1265 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
1266 * already suggests that watermarks may be bad, so try to be as safe as
1267 * possible.
1268 *
1269 * This function is called from the IRQ handler.
1270 */
1271void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
1272{
1273 struct intel_fbc *fbc = &dev_priv->fbc;
1274
1275 if (!fbc_supported(dev_priv))
1276 return;
1277
1278 /* There's no guarantee that underrun_detected won't be set to true
1279 * right after this check and before the work is scheduled, but that's
1280 * not a problem since we'll check it again under the work function
1281 * while FBC is locked. This check here is just to prevent us from
1282 * unnecessarily scheduling the work, and it relies on the fact that we
1283 * never switch underrun_detect back to false after it's true. */
1284 if (READ_ONCE(fbc->underrun_detected))
1285 return;
1286
1287 schedule_work(&fbc->underrun_work);
1288}
1289
Paulo Zanonid029bca2015-10-15 10:44:46 -03001290/**
Paulo Zanoni010cf732016-01-19 11:35:48 -02001291 * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
1292 * @dev_priv: i915 device instance
1293 *
1294 * The FBC code needs to track CRTC visibility since the older platforms can't
1295 * have FBC enabled while multiple pipes are used. This function does the
1296 * initial setup at driver load to make sure FBC is matching the real hardware.
1297 */
1298void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
1299{
1300 struct intel_crtc *crtc;
1301
1302 /* Don't even bother tracking anything if we don't need. */
1303 if (!no_fbc_on_multiple_pipes(dev_priv))
1304 return;
1305
Chris Wilson91c8a322016-07-05 10:40:23 +01001306 for_each_intel_crtc(&dev_priv->drm, crtc)
Ville Syrjälä525b9312016-10-31 22:37:02 +02001307 if (intel_crtc_active(crtc) &&
Maarten Lankhorst1d4258d2017-01-12 10:43:45 +01001308 crtc->base.primary->state->visible)
Paulo Zanoni010cf732016-01-19 11:35:48 -02001309 dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
1310}
1311
Paulo Zanoni80788a02016-04-13 16:01:09 -03001312/*
1313 * The DDX driver changes its behavior depending on the value it reads from
1314 * i915.enable_fbc, so sanitize it by translating the default value into either
1315 * 0 or 1 in order to allow it to know what's going on.
1316 *
1317 * Notice that this is done at driver initialization and we still allow user
1318 * space to change the value during runtime without sanitizing it again. IGT
1319 * relies on being able to change i915.enable_fbc at runtime.
1320 */
1321static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
1322{
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001323 if (i915_modparams.enable_fbc >= 0)
1324 return !!i915_modparams.enable_fbc;
Paulo Zanoni80788a02016-04-13 16:01:09 -03001325
Chris Wilson36dbc4d2016-08-04 08:43:53 +01001326 if (!HAS_FBC(dev_priv))
1327 return 0;
1328
Paulo Zanonifd7d6c52016-12-23 10:23:58 -02001329 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
Paulo Zanoni80788a02016-04-13 16:01:09 -03001330 return 1;
1331
1332 return 0;
1333}
1334
Chris Wilson36dbc4d2016-08-04 08:43:53 +01001335static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
1336{
Chris Wilson36dbc4d2016-08-04 08:43:53 +01001337 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
Chris Wilson80debff2017-05-25 13:16:12 +01001338 if (intel_vtd_active() &&
Chris Wilson36dbc4d2016-08-04 08:43:53 +01001339 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
1340 DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1341 return true;
1342 }
Chris Wilson36dbc4d2016-08-04 08:43:53 +01001343
1344 return false;
1345}
1346
Paulo Zanoni010cf732016-01-19 11:35:48 -02001347/**
Rodrigo Vivi94b83952014-12-08 06:46:31 -08001348 * intel_fbc_init - Initialize FBC
1349 * @dev_priv: the i915 device
1350 *
1351 * This function might be called during PM init process.
1352 */
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -02001353void intel_fbc_init(struct drm_i915_private *dev_priv)
1354{
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001355 struct intel_fbc *fbc = &dev_priv->fbc;
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001356 enum pipe pipe;
1357
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001358 INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
Paulo Zanoni61a585d2016-09-13 10:38:57 -03001359 INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001360 mutex_init(&fbc->lock);
1361 fbc->enabled = false;
1362 fbc->active = false;
1363 fbc->work.scheduled = false;
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001364
Chris Wilson36dbc4d2016-08-04 08:43:53 +01001365 if (need_fbc_vtd_wa(dev_priv))
1366 mkwrite_device_info(dev_priv)->has_fbc = false;
1367
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001368 i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
1369 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
1370 i915_modparams.enable_fbc);
Paulo Zanoni80788a02016-04-13 16:01:09 -03001371
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -02001372 if (!HAS_FBC(dev_priv)) {
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001373 fbc->no_fbc_reason = "unsupported by this chipset";
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -02001374 return;
1375 }
1376
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001377 for_each_pipe(dev_priv, pipe) {
Paulo Zanoniab34a7e2016-01-11 17:44:36 -02001378 fbc->possible_framebuffer_bits |=
Ville Syrjäläc19e1122018-01-23 20:33:43 +02001379 INTEL_FRONTBUFFER(pipe, PLANE_PRIMARY);
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001380
Paulo Zanoni57105022015-11-04 17:10:46 -02001381 if (fbc_on_pipe_a_only(dev_priv))
Paulo Zanonidbef0f12015-02-13 17:23:46 -02001382 break;
1383 }
1384
Paulo Zanoni8c400742016-01-29 18:57:39 -02001385 /* This value was pulled out of someone's hat */
Paulo Zanoni5697d602016-11-11 14:57:41 -02001386 if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -02001387 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -02001388
Paulo Zanonib07ea0f2015-11-04 17:10:52 -02001389 /* We still don't have any sort of hardware state readout for FBC, so
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001390 * deactivate it in case the BIOS activated it to make sure software
1391 * matches the hardware state. */
Paulo Zanoni8c400742016-01-29 18:57:39 -02001392 if (intel_fbc_hw_is_active(dev_priv))
1393 intel_fbc_hw_deactivate(dev_priv);
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -02001394}