blob: b92b8581efc2b387792a6e5fcacd0ad35967586a [file] [log] [blame]
Matt Roper5ee67f12015-01-21 16:35:44 -08001/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * DOC: atomic modeset support
26 *
27 * The functions here implement the state management and hardware programming
28 * dispatch required by the atomic modeset infrastructure.
29 * See intel_atomic_plane.c for the plane-specific atomic functionality.
30 */
31
32#include <drm/drmP.h>
33#include <drm/drm_atomic.h>
34#include <drm/drm_atomic_helper.h>
35#include <drm/drm_plane_helper.h>
36#include "intel_drv.h"
37
38
39/**
40 * intel_atomic_check - validate state object
41 * @dev: drm device
42 * @state: state to validate
43 */
44int intel_atomic_check(struct drm_device *dev,
45 struct drm_atomic_state *state)
46{
47 int nplanes = dev->mode_config.num_total_plane;
48 int ncrtcs = dev->mode_config.num_crtc;
49 int nconnectors = dev->mode_config.num_connector;
50 enum pipe nuclear_pipe = INVALID_PIPE;
Chandra Kondurue04fa802015-04-07 15:28:43 -070051 struct intel_crtc *nuclear_crtc = NULL;
52 struct intel_crtc_state *crtc_state = NULL;
Matt Roper5ee67f12015-01-21 16:35:44 -080053 int ret;
54 int i;
55 bool not_nuclear = false;
56
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020057 to_intel_atomic_state(state)->cdclk = to_i915(dev)->cdclk_freq;
58
Matt Roper5ee67f12015-01-21 16:35:44 -080059 /*
60 * FIXME: At the moment, we only support "nuclear pageflip" on a
61 * single CRTC. Cross-crtc updates will be added later.
62 */
63 for (i = 0; i < nplanes; i++) {
64 struct intel_plane *plane = to_intel_plane(state->planes[i]);
65 if (!plane)
66 continue;
67
68 if (nuclear_pipe == INVALID_PIPE) {
69 nuclear_pipe = plane->pipe;
70 } else if (nuclear_pipe != plane->pipe) {
71 DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
72 return -EINVAL;
73 }
74 }
75
76 /*
77 * FIXME: We only handle planes for now; make sure there are no CRTC's
78 * or connectors involved.
79 */
80 state->allow_modeset = false;
81 for (i = 0; i < ncrtcs; i++) {
82 struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
Matt Roperf1e2dae2015-04-09 10:48:38 -070083 if (crtc)
84 memset(&crtc->atomic, 0, sizeof(crtc->atomic));
Matt Roper5ee67f12015-01-21 16:35:44 -080085 if (crtc && crtc->pipe != nuclear_pipe)
86 not_nuclear = true;
Chandra Kondurue04fa802015-04-07 15:28:43 -070087 if (crtc && crtc->pipe == nuclear_pipe) {
88 nuclear_crtc = crtc;
89 crtc_state = to_intel_crtc_state(state->crtc_states[i]);
90 }
Matt Roper5ee67f12015-01-21 16:35:44 -080091 }
92 for (i = 0; i < nconnectors; i++)
93 if (state->connectors[i] != NULL)
94 not_nuclear = true;
95
96 if (not_nuclear) {
97 DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
98 return -EINVAL;
99 }
100
Maarten Lankhorstd032ffa2015-06-15 12:33:51 +0200101 if (crtc_state &&
102 crtc_state->quirks & PIPE_CONFIG_QUIRK_INITIAL_PLANES) {
103 ret = drm_atomic_add_affected_planes(state, &nuclear_crtc->base);
104 if (ret)
105 return ret;
106 }
107
Matt Roper5ee67f12015-01-21 16:35:44 -0800108 ret = drm_atomic_helper_check_planes(dev, state);
109 if (ret)
110 return ret;
111
112 return ret;
113}
114
115
116/**
117 * intel_atomic_commit - commit validated state object
118 * @dev: DRM device
119 * @state: the top-level driver state object
120 * @async: asynchronous commit
121 *
122 * This function commits a top-level state object that has been validated
123 * with drm_atomic_helper_check().
124 *
125 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
126 * we can only handle plane-related operations and do not yet support
127 * asynchronous commit.
128 *
129 * RETURNS
130 * Zero for success or -errno.
131 */
132int intel_atomic_commit(struct drm_device *dev,
133 struct drm_atomic_state *state,
134 bool async)
135{
Maarten Lankhorst61c05492015-06-01 12:50:00 +0200136 struct drm_crtc_state *crtc_state;
137 struct drm_crtc *crtc;
Matt Roper5ee67f12015-01-21 16:35:44 -0800138 int ret;
139 int i;
140
141 if (async) {
142 DRM_DEBUG_KMS("i915 does not yet support async commit\n");
143 return -EINVAL;
144 }
145
146 ret = drm_atomic_helper_prepare_planes(dev, state);
147 if (ret)
148 return ret;
149
150 /* Point of no return */
Maarten Lankhorst61c05492015-06-01 12:50:00 +0200151 drm_atomic_helper_swap_state(dev, state);
Matt Roper5ee67f12015-01-21 16:35:44 -0800152
Maarten Lankhorst61c05492015-06-01 12:50:00 +0200153 for_each_crtc_in_state(state, crtc, crtc_state, i) {
154 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
Chandra Konduru6156a452015-04-27 13:48:39 -0700155
Maarten Lankhorst5ac1c4b2015-06-01 12:50:01 +0200156 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
Chandra Konduruf76f35d2015-04-07 15:28:44 -0700157 }
158
Maarten Lankhorst5ac1c4b2015-06-01 12:50:01 +0200159 /* FIXME: This function should eventually call __intel_set_mode when needed */
Maarten Lankhorst61c05492015-06-01 12:50:00 +0200160
Matt Roper5ee67f12015-01-21 16:35:44 -0800161 drm_atomic_helper_wait_for_vblanks(dev, state);
162 drm_atomic_helper_cleanup_planes(dev, state);
163 drm_atomic_state_free(state);
164
165 return 0;
166}
Matt Roper2545e4a2015-01-22 16:51:27 -0800167
168/**
169 * intel_connector_atomic_get_property - fetch connector property value
170 * @connector: connector to fetch property for
171 * @state: state containing the property value
172 * @property: property to look up
173 * @val: pointer to write property value into
174 *
175 * The DRM core does not store shadow copies of properties for
176 * atomic-capable drivers. This entrypoint is used to fetch
177 * the current value of a driver-specific connector property.
178 */
179int
180intel_connector_atomic_get_property(struct drm_connector *connector,
181 const struct drm_connector_state *state,
182 struct drm_property *property,
183 uint64_t *val)
184{
185 int i;
186
187 /*
188 * TODO: We only have atomic modeset for planes at the moment, so the
189 * crtc/connector code isn't quite ready yet. Until it's ready,
190 * continue to look up all property values in the DRM's shadow copy
191 * in obj->properties->values[].
192 *
193 * When the crtc/connector state work matures, this function should
194 * be updated to read the values out of the state structure instead.
195 */
196 for (i = 0; i < connector->base.properties->count; i++) {
197 if (connector->base.properties->properties[i] == property) {
198 *val = connector->base.properties->values[i];
199 return 0;
200 }
201 }
202
203 return -EINVAL;
204}
Matt Roper13568372015-01-21 16:35:47 -0800205
206/*
207 * intel_crtc_duplicate_state - duplicate crtc state
208 * @crtc: drm crtc
209 *
210 * Allocates and returns a copy of the crtc state (both common and
211 * Intel-specific) for the specified crtc.
212 *
213 * Returns: The newly allocated crtc state, or NULL on failure.
214 */
215struct drm_crtc_state *
216intel_crtc_duplicate_state(struct drm_crtc *crtc)
217{
218 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ander Conselvan de Oliveiraa91572f2015-03-03 15:21:55 +0200219 struct intel_crtc_state *crtc_state;
Matt Roper13568372015-01-21 16:35:47 -0800220
221 if (WARN_ON(!intel_crtc->config))
Ander Conselvan de Oliveiraa91572f2015-03-03 15:21:55 +0200222 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
223 else
224 crtc_state = kmemdup(intel_crtc->config,
225 sizeof(*intel_crtc->config), GFP_KERNEL);
Matt Roper13568372015-01-21 16:35:47 -0800226
Ander Conselvan de Oliveiraf0c60572015-04-21 17:12:58 +0300227 if (!crtc_state)
228 return NULL;
229
230 __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
231
232 crtc_state->base.crtc = crtc;
Ander Conselvan de Oliveiraa91572f2015-03-03 15:21:55 +0200233
234 return &crtc_state->base;
Matt Roper13568372015-01-21 16:35:47 -0800235}
236
237/**
238 * intel_crtc_destroy_state - destroy crtc state
239 * @crtc: drm crtc
240 *
241 * Destroys the crtc state (both common and Intel-specific) for the
242 * specified crtc.
243 */
244void
245intel_crtc_destroy_state(struct drm_crtc *crtc,
246 struct drm_crtc_state *state)
247{
248 drm_atomic_helper_crtc_destroy_state(crtc, state);
249}
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700250
251/**
252 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
253 * @dev: DRM device
254 * @crtc: intel crtc
255 * @crtc_state: incoming crtc_state to validate and setup scalers
256 *
257 * This function sets up scalers based on staged scaling requests for
258 * a @crtc and its planes. It is called from crtc level check path. If request
259 * is a supportable request, it attaches scalers to requested planes and crtc.
260 *
261 * This function takes into account the current scaler(s) in use by any planes
262 * not being part of this atomic state
263 *
264 * Returns:
265 * 0 - scalers were setup succesfully
266 * error code - otherwise
267 */
268int intel_atomic_setup_scalers(struct drm_device *dev,
269 struct intel_crtc *intel_crtc,
270 struct intel_crtc_state *crtc_state)
271{
272 struct drm_plane *plane = NULL;
273 struct intel_plane *intel_plane;
274 struct intel_plane_state *plane_state = NULL;
Maarten Lankhorste435d6e2015-07-13 16:30:15 +0200275 struct intel_crtc_scaler_state *scaler_state =
276 &crtc_state->scaler_state;
277 struct drm_atomic_state *drm_state = crtc_state->base.state;
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700278 int num_scalers_need;
279 int i, j;
280
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700281 num_scalers_need = hweight32(scaler_state->scaler_users);
282 DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
283 crtc_state, num_scalers_need, intel_crtc->num_scalers,
284 scaler_state->scaler_users);
285
286 /*
287 * High level flow:
288 * - staged scaler requests are already in scaler_state->scaler_users
289 * - check whether staged scaling requests can be supported
290 * - add planes using scalers that aren't in current transaction
291 * - assign scalers to requested users
292 * - as part of plane commit, scalers will be committed
293 * (i.e., either attached or detached) to respective planes in hw
294 * - as part of crtc_commit, scaler will be either attached or detached
295 * to crtc in hw
296 */
297
298 /* fail if required scalers > available scalers */
299 if (num_scalers_need > intel_crtc->num_scalers){
300 DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
301 num_scalers_need, intel_crtc->num_scalers);
302 return -EINVAL;
303 }
304
305 /* walkthrough scaler_users bits and start assigning scalers */
306 for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
307 int *scaler_id;
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200308 const char *name;
309 int idx;
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700310
311 /* skip if scaler not required */
312 if (!(scaler_state->scaler_users & (1 << i)))
313 continue;
314
315 if (i == SKL_CRTC_INDEX) {
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200316 name = "CRTC";
317 idx = intel_crtc->base.base.id;
318
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700319 /* panel fitter case: assign as a crtc scaler */
320 scaler_id = &scaler_state->scaler_id;
321 } else {
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200322 name = "PLANE";
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200323
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700324 /* plane scaler case: assign as a plane scaler */
325 /* find the plane that set the bit as scaler_user */
326 plane = drm_state->planes[i];
327
328 /*
329 * to enable/disable hq mode, add planes that are using scaler
330 * into this transaction
331 */
332 if (!plane) {
333 struct drm_plane_state *state;
334 plane = drm_plane_from_index(dev, i);
335 state = drm_atomic_get_plane_state(drm_state, plane);
336 if (IS_ERR(state)) {
337 DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
338 plane->base.id);
339 return PTR_ERR(state);
340 }
Maarten Lankhorstcf5a15b2015-06-15 12:33:41 +0200341
342 /*
343 * the plane is added after plane checks are run,
344 * but since this plane is unchanged just do the
345 * minimum required validation.
346 */
347 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
348 intel_crtc->atomic.wait_for_flips = true;
349 crtc_state->base.planes_changed = true;
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700350 }
351
352 intel_plane = to_intel_plane(plane);
Matt Roperc07a2d12015-07-06 09:19:24 -0700353 idx = plane->base.id;
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700354
355 /* plane on different crtc cannot be a scaler user of this crtc */
356 if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
357 continue;
358 }
359
360 plane_state = to_intel_plane_state(drm_state->plane_states[i]);
361 scaler_id = &plane_state->scaler_id;
362 }
363
364 if (*scaler_id < 0) {
365 /* find a free scaler */
366 for (j = 0; j < intel_crtc->num_scalers; j++) {
367 if (!scaler_state->scalers[j].in_use) {
368 scaler_state->scalers[j].in_use = 1;
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200369 *scaler_id = j;
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700370 DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200371 intel_crtc->pipe, *scaler_id, name, idx);
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700372 break;
373 }
374 }
375 }
376
377 if (WARN_ON(*scaler_id < 0)) {
Maarten Lankhorst133b0d12015-06-15 12:33:39 +0200378 DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
Chandra Kondurud03c93d2015-04-09 16:42:46 -0700379 continue;
380 }
381
382 /* set scaler mode */
383 if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
384 /*
385 * when only 1 scaler is in use on either pipe A or B,
386 * scaler 0 operates in high quality (HQ) mode.
387 * In this case use scaler 0 to take advantage of HQ mode
388 */
389 *scaler_id = 0;
390 scaler_state->scalers[0].in_use = 1;
391 scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
392 scaler_state->scalers[1].in_use = 0;
393 } else {
394 scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
395 }
396 }
397
398 return 0;
399}
Maarten Lankhorstde419ab2015-06-04 10:21:28 +0200400
Maarten Lankhorstf7217902015-06-10 10:24:20 +0200401static void
Maarten Lankhorstde419ab2015-06-04 10:21:28 +0200402intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
403 struct intel_shared_dpll_config *shared_dpll)
404{
405 enum intel_dpll_id i;
406
407 /* Copy shared dpll state */
408 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
409 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
410
411 shared_dpll[i] = pll->config;
412 }
413}
414
415struct intel_shared_dpll_config *
416intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
417{
418 struct intel_atomic_state *state = to_intel_atomic_state(s);
419
420 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
421
422 if (!state->dpll_set) {
423 state->dpll_set = true;
424
425 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
426 state->shared_dpll);
427 }
428
429 return state->shared_dpll;
430}
431
432struct drm_atomic_state *
433intel_atomic_state_alloc(struct drm_device *dev)
434{
435 struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
436
437 if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
438 kfree(state);
439 return NULL;
440 }
441
442 return &state->base;
443}
444
445void intel_atomic_state_clear(struct drm_atomic_state *s)
446{
447 struct intel_atomic_state *state = to_intel_atomic_state(s);
448 drm_atomic_state_default_clear(&state->base);
449 state->dpll_set = false;
450}