blob: f15486aee7f819cb12711f6c1e2e9879dcffc06f [file] [log] [blame]
Daniel Vetterb680c372014-09-19 18:27:27 +02001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
25 */
26
27/**
28 * DOC: frontbuffer tracking
29 *
30 * Many features require us to track changes to the currently active
Daniel Vetter5c323b22014-09-30 22:10:53 +020031 * frontbuffer, especially rendering targeted at the frontbuffer.
Daniel Vetterb680c372014-09-19 18:27:27 +020032 *
33 * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
34 * frontbuffer slots through i915_gem_track_fb(). The function in this file are
35 * then called when the contents of the frontbuffer are invalidated, when
36 * frontbuffer rendering has stopped again to flush out all the changes and when
37 * the frontbuffer is exchanged with a flip. Subsystems interested in
38 * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
39 * into the relevant places and filter for the frontbuffer slots that they are
40 * interested int.
41 *
42 * On a high level there are two types of powersaving features. The first one
43 * work like a special cache (FBC and PSR) and are interested when they should
44 * stop caching and when to restart caching. This is done by placing callbacks
45 * into the invalidate and the flush functions: At invalidate the caching must
46 * be stopped and at flush time it can be restarted. And maybe they need to know
47 * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
48 * and flush on its own) which can be achieved with placing callbacks into the
49 * flip functions.
50 *
51 * The other type of display power saving feature only cares about busyness
52 * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
53 * busyness. There is no direct way to detect idleness. Instead an idle timer
54 * work delayed work should be started from the flush and flip functions and
55 * cancelled as soon as busyness is detected.
56 *
57 * Note that there's also an older frontbuffer activity tracking scheme which
Daniel Vetter5c323b22014-09-30 22:10:53 +020058 * just tracks general activity. This is done by the various mark_busy and
Daniel Vetterb680c372014-09-19 18:27:27 +020059 * mark_idle functions. For display power management features using these
60 * functions is deprecated and should be avoided.
61 */
62
63#include <drm/drmP.h>
64
65#include "intel_drv.h"
Chris Wilson5d723d72016-08-04 16:32:35 +010066#include "intel_frontbuffer.h"
Daniel Vetterb680c372014-09-19 18:27:27 +020067#include "i915_drv.h"
68
Chris Wilsonb5add952016-08-04 16:32:36 +010069void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
70 enum fb_op_origin origin)
Daniel Vetterb680c372014-09-19 18:27:27 +020071{
72 struct drm_device *dev = obj->base.dev;
Daniel Vetter9fb73862015-06-18 10:30:28 +020073 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterb680c372014-09-19 18:27:27 +020074
75 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
76
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -070077 if (origin == ORIGIN_CS) {
Chris Wilsonb5add952016-08-04 16:32:36 +010078 spin_lock(&dev_priv->fb_tracking.lock);
79 dev_priv->fb_tracking.busy_bits |= obj->frontbuffer_bits;
80 dev_priv->fb_tracking.flip_bits &= ~obj->frontbuffer_bits;
81 spin_unlock(&dev_priv->fb_tracking.lock);
Daniel Vetterb680c372014-09-19 18:27:27 +020082 }
83
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -080084 intel_psr_invalidate(dev, obj->frontbuffer_bits);
Vandana Kannana93fad02015-01-10 02:25:59 +053085 intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
Paulo Zanonidbef0f12015-02-13 17:23:46 -020086 intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
Daniel Vetterb680c372014-09-19 18:27:27 +020087}
88
89/**
90 * intel_frontbuffer_flush - flush frontbuffer
91 * @dev: DRM device
92 * @frontbuffer_bits: frontbuffer plane tracking bits
Rodrigo Vivide152b62015-07-07 16:28:51 -070093 * @origin: which operation caused the flush
Daniel Vetterb680c372014-09-19 18:27:27 +020094 *
95 * This function gets called every time rendering on the given planes has
96 * completed and frontbuffer caching can be started again. Flushes will get
Daniel Vetter5c323b22014-09-30 22:10:53 +020097 * delayed if they're blocked by some outstanding asynchronous rendering.
Daniel Vetterb680c372014-09-19 18:27:27 +020098 *
99 * Can be called without any locks held.
100 */
Paulo Zanonib6c2aa52015-07-08 18:08:37 -0300101static void intel_frontbuffer_flush(struct drm_device *dev,
102 unsigned frontbuffer_bits,
103 enum fb_op_origin origin)
Daniel Vetterb680c372014-09-19 18:27:27 +0200104{
Daniel Vetter9fb73862015-06-18 10:30:28 +0200105 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterb680c372014-09-19 18:27:27 +0200106
107 /* Delay flushing when rings are still busy.*/
Chris Wilsonb5add952016-08-04 16:32:36 +0100108 spin_lock(&dev_priv->fb_tracking.lock);
Daniel Vetterb680c372014-09-19 18:27:27 +0200109 frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
Chris Wilsonb5add952016-08-04 16:32:36 +0100110 spin_unlock(&dev_priv->fb_tracking.lock);
Daniel Vetterb680c372014-09-19 18:27:27 +0200111
Daniel Vetter27e78a22015-06-18 10:30:21 +0200112 if (!frontbuffer_bits)
113 return;
114
Vandana Kannana93fad02015-01-10 02:25:59 +0530115 intel_edp_drrs_flush(dev, frontbuffer_bits);
Rodrigo Vivi169de132015-07-08 16:21:31 -0700116 intel_psr_flush(dev, frontbuffer_bits, origin);
Paulo Zanoni6f4551f2015-07-14 16:29:10 -0300117 intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
Daniel Vetterb680c372014-09-19 18:27:27 +0200118}
119
Chris Wilsonb5add952016-08-04 16:32:36 +0100120void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
121 bool retire,
122 enum fb_op_origin origin)
Daniel Vetterb680c372014-09-19 18:27:27 +0200123{
124 struct drm_device *dev = obj->base.dev;
Daniel Vetter9fb73862015-06-18 10:30:28 +0200125 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterb680c372014-09-19 18:27:27 +0200126 unsigned frontbuffer_bits;
127
128 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
129
Daniel Vetterb680c372014-09-19 18:27:27 +0200130 frontbuffer_bits = obj->frontbuffer_bits;
131
132 if (retire) {
Chris Wilsonb5add952016-08-04 16:32:36 +0100133 spin_lock(&dev_priv->fb_tracking.lock);
Daniel Vetterb680c372014-09-19 18:27:27 +0200134 /* Filter out new bits since rendering started. */
135 frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
Daniel Vetterb680c372014-09-19 18:27:27 +0200136 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
Chris Wilsonb5add952016-08-04 16:32:36 +0100137 spin_unlock(&dev_priv->fb_tracking.lock);
Daniel Vetterb680c372014-09-19 18:27:27 +0200138 }
139
Chris Wilsonb5add952016-08-04 16:32:36 +0100140 if (frontbuffer_bits)
141 intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
Daniel Vetterb680c372014-09-19 18:27:27 +0200142}
143
144/**
Daniel Vetter5c323b22014-09-30 22:10:53 +0200145 * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
Daniel Vetterb680c372014-09-19 18:27:27 +0200146 * @dev: DRM device
147 * @frontbuffer_bits: frontbuffer plane tracking bits
148 *
149 * This function gets called after scheduling a flip on @obj. The actual
150 * frontbuffer flushing will be delayed until completion is signalled with
151 * intel_frontbuffer_flip_complete. If an invalidate happens in between this
152 * flush will be cancelled.
153 *
154 * Can be called without any locks held.
155 */
156void intel_frontbuffer_flip_prepare(struct drm_device *dev,
157 unsigned frontbuffer_bits)
158{
Daniel Vetter9fb73862015-06-18 10:30:28 +0200159 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterb680c372014-09-19 18:27:27 +0200160
Chris Wilsonb5add952016-08-04 16:32:36 +0100161 spin_lock(&dev_priv->fb_tracking.lock);
Daniel Vetter11c9b6c2014-09-30 22:10:52 +0200162 dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
163 /* Remove stale busy bits due to the old buffer. */
164 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
Chris Wilsonb5add952016-08-04 16:32:36 +0100165 spin_unlock(&dev_priv->fb_tracking.lock);
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700166
Daniel Vetter20c88382015-06-18 10:30:27 +0200167 intel_psr_single_frame_update(dev, frontbuffer_bits);
Daniel Vetterb680c372014-09-19 18:27:27 +0200168}
169
170/**
Daniel Vetter5c323b22014-09-30 22:10:53 +0200171 * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
Daniel Vetterb680c372014-09-19 18:27:27 +0200172 * @dev: DRM device
173 * @frontbuffer_bits: frontbuffer plane tracking bits
174 *
175 * This function gets called after the flip has been latched and will complete
Daniel Vetter5c323b22014-09-30 22:10:53 +0200176 * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
Daniel Vetterb680c372014-09-19 18:27:27 +0200177 *
178 * Can be called without any locks held.
179 */
180void intel_frontbuffer_flip_complete(struct drm_device *dev,
181 unsigned frontbuffer_bits)
182{
Daniel Vetter9fb73862015-06-18 10:30:28 +0200183 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterb680c372014-09-19 18:27:27 +0200184
Chris Wilsonb5add952016-08-04 16:32:36 +0100185 spin_lock(&dev_priv->fb_tracking.lock);
Daniel Vetterb680c372014-09-19 18:27:27 +0200186 /* Mask any cancelled flips. */
187 frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
188 dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
Chris Wilsonb5add952016-08-04 16:32:36 +0100189 spin_unlock(&dev_priv->fb_tracking.lock);
Daniel Vetterb680c372014-09-19 18:27:27 +0200190
Chris Wilsonb5add952016-08-04 16:32:36 +0100191 if (frontbuffer_bits)
192 intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
Daniel Vetterb680c372014-09-19 18:27:27 +0200193}
Daniel Vetterfdbff922015-06-18 11:23:24 +0200194
195/**
196 * intel_frontbuffer_flip - synchronous frontbuffer flip
197 * @dev: DRM device
198 * @frontbuffer_bits: frontbuffer plane tracking bits
199 *
200 * This function gets called after scheduling a flip on @obj. This is for
201 * synchronous plane updates which will happen on the next vblank and which will
202 * not get delayed by pending gpu rendering.
203 *
204 * Can be called without any locks held.
205 */
Daniel Vetterfdbff922015-06-18 11:23:24 +0200206void intel_frontbuffer_flip(struct drm_device *dev,
207 unsigned frontbuffer_bits)
208{
Daniel Vetter9fb73862015-06-18 10:30:28 +0200209 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterfdbff922015-06-18 11:23:24 +0200210
Chris Wilsonb5add952016-08-04 16:32:36 +0100211 spin_lock(&dev_priv->fb_tracking.lock);
Daniel Vetterfdbff922015-06-18 11:23:24 +0200212 /* Remove stale busy bits due to the old buffer. */
213 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
Chris Wilsonb5add952016-08-04 16:32:36 +0100214 spin_unlock(&dev_priv->fb_tracking.lock);
Daniel Vetterfdbff922015-06-18 11:23:24 +0200215
Rodrigo Vivide152b62015-07-07 16:28:51 -0700216 intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
Daniel Vetterfdbff922015-06-18 11:23:24 +0200217}