blob: e2531cf59266e77208c766b36c5291d4a939a825 [file] [log] [blame]
Matt Roper5ee67f12015-01-21 16:35:44 -08001/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * DOC: atomic modeset support
26 *
27 * The functions here implement the state management and hardware programming
28 * dispatch required by the atomic modeset infrastructure.
29 * See intel_atomic_plane.c for the plane-specific atomic functionality.
30 */
31
32#include <drm/drmP.h>
33#include <drm/drm_atomic.h>
34#include <drm/drm_atomic_helper.h>
35#include <drm/drm_plane_helper.h>
36#include "intel_drv.h"
37
Matt Roper2545e4a2015-01-22 16:51:27 -080038/**
39 * intel_connector_atomic_get_property - fetch connector property value
40 * @connector: connector to fetch property for
41 * @state: state containing the property value
42 * @property: property to look up
43 * @val: pointer to write property value into
44 *
45 * The DRM core does not store shadow copies of properties for
46 * atomic-capable drivers. This entrypoint is used to fetch
47 * the current value of a driver-specific connector property.
48 */
49int
50intel_connector_atomic_get_property(struct drm_connector *connector,
51 const struct drm_connector_state *state,
52 struct drm_property *property,
53 uint64_t *val)
54{
55 int i;
56
57 /*
58 * TODO: We only have atomic modeset for planes at the moment, so the
59 * crtc/connector code isn't quite ready yet. Until it's ready,
60 * continue to look up all property values in the DRM's shadow copy
61 * in obj->properties->values[].
62 *
63 * When the crtc/connector state work matures, this function should
64 * be updated to read the values out of the state structure instead.
65 */
66 for (i = 0; i < connector->base.properties->count; i++) {
67 if (connector->base.properties->properties[i] == property) {
68 *val = connector->base.properties->values[i];
69 return 0;
70 }
71 }
72
73 return -EINVAL;
74}
Matt Roper13568372015-01-21 16:35:47 -080075
76/*
77 * intel_crtc_duplicate_state - duplicate crtc state
78 * @crtc: drm crtc
79 *
80 * Allocates and returns a copy of the crtc state (both common and
81 * Intel-specific) for the specified crtc.
82 *
83 * Returns: The newly allocated crtc state, or NULL on failure.
84 */
85struct drm_crtc_state *
86intel_crtc_duplicate_state(struct drm_crtc *crtc)
87{
88 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ander Conselvan de Oliveiraa91572f2015-03-03 15:21:55 +020089 struct intel_crtc_state *crtc_state;
Matt Roper13568372015-01-21 16:35:47 -080090
91 if (WARN_ON(!intel_crtc->config))
Ander Conselvan de Oliveiraa91572f2015-03-03 15:21:55 +020092 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
93 else
94 crtc_state = kmemdup(intel_crtc->config,
95 sizeof(*intel_crtc->config), GFP_KERNEL);
Matt Roper13568372015-01-21 16:35:47 -080096
Ander Conselvan de Oliveiraf0c60572015-04-21 17:12:58 +030097 if (!crtc_state)
98 return NULL;
99
100 __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
101
102 crtc_state->base.crtc = crtc;
Ander Conselvan de Oliveiraa91572f2015-03-03 15:21:55 +0200103
104 return &crtc_state->base;
Matt Roper13568372015-01-21 16:35:47 -0800105}
106
107/**
108 * intel_crtc_destroy_state - destroy crtc state
109 * @crtc: drm crtc
110 *
111 * Destroys the crtc state (both common and Intel-specific) for the
112 * specified crtc.
113 */
114void
115intel_crtc_destroy_state(struct drm_crtc *crtc,
116 struct drm_crtc_state *state)
117{
118 drm_atomic_helper_crtc_destroy_state(crtc, state);
119}
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700120
121/**
122 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
123 * @dev: DRM device
124 * @crtc: intel crtc
125 * @crtc_state: incoming crtc_state to validate and setup scalers
126 *
127 * This function sets up scalers based on staged scaling requests for
128 * a @crtc and its planes. It is called from crtc level check path. If request
129 * is a supportable request, it attaches scalers to requested planes and crtc.
130 *
131 * This function takes into account the current scaler(s) in use by any planes
132 * not being part of this atomic state
133 *
134 * Returns:
135 * 0 - scalers were setup succesfully
136 * error code - otherwise
137 */
138int intel_atomic_setup_scalers(struct drm_device *dev,
139 struct intel_crtc *intel_crtc,
140 struct intel_crtc_state *crtc_state)
141{
142 struct drm_plane *plane = NULL;
143 struct intel_plane *intel_plane;
144 struct intel_plane_state *plane_state = NULL;
Maarten Lankhorste435d6e2015-07-13 16:30:15 +0200145 struct intel_crtc_scaler_state *scaler_state =
146 &crtc_state->scaler_state;
147 struct drm_atomic_state *drm_state = crtc_state->base.state;
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700148 int num_scalers_need;
149 int i, j;
150
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700151 num_scalers_need = hweight32(scaler_state->scaler_users);
152 DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
153 crtc_state, num_scalers_need, intel_crtc->num_scalers,
154 scaler_state->scaler_users);
155
156 /*
157 * High level flow:
158 * - staged scaler requests are already in scaler_state->scaler_users
159 * - check whether staged scaling requests can be supported
160 * - add planes using scalers that aren't in current transaction
161 * - assign scalers to requested users
162 * - as part of plane commit, scalers will be committed
163 * (i.e., either attached or detached) to respective planes in hw
164 * - as part of crtc_commit, scaler will be either attached or detached
165 * to crtc in hw
166 */
167
168 /* fail if required scalers > available scalers */
169 if (num_scalers_need > intel_crtc->num_scalers){
170 DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
171 num_scalers_need, intel_crtc->num_scalers);
172 return -EINVAL;
173 }
174
175 /* walkthrough scaler_users bits and start assigning scalers */
176 for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
177 int *scaler_id;
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200178 const char *name;
179 int idx;
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700180
181 /* skip if scaler not required */
182 if (!(scaler_state->scaler_users & (1 << i)))
183 continue;
184
185 if (i == SKL_CRTC_INDEX) {
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200186 name = "CRTC";
187 idx = intel_crtc->base.base.id;
188
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700189 /* panel fitter case: assign as a crtc scaler */
190 scaler_id = &scaler_state->scaler_id;
191 } else {
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200192 name = "PLANE";
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200193
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700194 /* plane scaler case: assign as a plane scaler */
195 /* find the plane that set the bit as scaler_user */
196 plane = drm_state->planes[i];
197
198 /*
199 * to enable/disable hq mode, add planes that are using scaler
200 * into this transaction
201 */
202 if (!plane) {
203 struct drm_plane_state *state;
204 plane = drm_plane_from_index(dev, i);
205 state = drm_atomic_get_plane_state(drm_state, plane);
206 if (IS_ERR(state)) {
207 DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
208 plane->base.id);
209 return PTR_ERR(state);
210 }
Maarten Lankhorstcf5a15b2015-06-15 12:33:41 +0200211
212 /*
213 * the plane is added after plane checks are run,
214 * but since this plane is unchanged just do the
215 * minimum required validation.
216 */
217 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
218 intel_crtc->atomic.wait_for_flips = true;
219 crtc_state->base.planes_changed = true;
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700220 }
221
222 intel_plane = to_intel_plane(plane);
Matt Roperc07a2d12015-07-06 09:19:24 -0700223 idx = plane->base.id;
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700224
225 /* plane on different crtc cannot be a scaler user of this crtc */
226 if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
227 continue;
228 }
229
230 plane_state = to_intel_plane_state(drm_state->plane_states[i]);
231 scaler_id = &plane_state->scaler_id;
232 }
233
234 if (*scaler_id < 0) {
235 /* find a free scaler */
236 for (j = 0; j < intel_crtc->num_scalers; j++) {
237 if (!scaler_state->scalers[j].in_use) {
238 scaler_state->scalers[j].in_use = 1;
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200239 *scaler_id = j;
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700240 DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200241 intel_crtc->pipe, *scaler_id, name, idx);
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700242 break;
243 }
244 }
245 }
246
247 if (WARN_ON(*scaler_id < 0)) {
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200248 DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700249 continue;
250 }
251
252 /* set scaler mode */
253 if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
254 /*
255 * when only 1 scaler is in use on either pipe A or B,
256 * scaler 0 operates in high quality (HQ) mode.
257 * In this case use scaler 0 to take advantage of HQ mode
258 */
259 *scaler_id = 0;
260 scaler_state->scalers[0].in_use = 1;
261 scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
262 scaler_state->scalers[1].in_use = 0;
263 } else {
264 scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
265 }
266 }
267
268 return 0;
269}
Maarten Lankhorstde419ab2015-06-04 10:21:28 +0200270
Maarten Lankhorstf7217902015-06-10 10:24:20 +0200271static void
Maarten Lankhorstde419ab2015-06-04 10:21:28 +0200272intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
273 struct intel_shared_dpll_config *shared_dpll)
274{
275 enum intel_dpll_id i;
276
277 /* Copy shared dpll state */
278 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
279 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
280
281 shared_dpll[i] = pll->config;
282 }
283}
284
285struct intel_shared_dpll_config *
286intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
287{
288 struct intel_atomic_state *state = to_intel_atomic_state(s);
289
290 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
291
292 if (!state->dpll_set) {
293 state->dpll_set = true;
294
295 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
296 state->shared_dpll);
297 }
298
299 return state->shared_dpll;
300}
301
302struct drm_atomic_state *
303intel_atomic_state_alloc(struct drm_device *dev)
304{
305 struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
306
307 if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
308 kfree(state);
309 return NULL;
310 }
311
312 return &state->base;
313}
314
315void intel_atomic_state_clear(struct drm_atomic_state *s)
316{
317 struct intel_atomic_state *state = to_intel_atomic_state(s);
318 drm_atomic_state_default_clear(&state->base);
319 state->dpll_set = false;
320}