blob: 3dbfbddae7e6f2f887b30773e9b06a98774d640c [file] [log] [blame]
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28
29#include <drm/drmP.h>
30#include <drm/drm_atomic.h>
Daniel Vetter72fdb402018-09-05 15:57:11 +020031#include <drm/drm_atomic_uapi.h>
Lionel Landwerlin5488dc12016-02-26 17:05:00 +000032#include <drm/drm_mode.h>
Rob Clarkfceffb322016-11-05 11:08:09 -040033#include <drm/drm_print.h>
Brian Starkey935774c2017-03-29 17:42:32 +010034#include <drm/drm_writeback.h>
Gustavo Padovan96260142016-11-15 22:06:39 +090035#include <linux/sync_file.h>
Daniel Vettercc4ceb42014-07-25 21:30:38 +020036
Thierry Redingbe35f942016-04-28 15:19:56 +020037#include "drm_crtc_internal.h"
Noralf Trønnesf02b6042017-11-07 20:13:41 +010038#include "drm_internal.h"
Thierry Redingbe35f942016-04-28 15:19:56 +020039
Daniel Vetterb3ba3f62016-12-21 14:03:35 +010040void __drm_crtc_commit_free(struct kref *kref)
Daniel Vetter3b24f7d2016-06-08 14:19:00 +020041{
42 struct drm_crtc_commit *commit =
43 container_of(kref, struct drm_crtc_commit, ref);
44
45 kfree(commit);
46}
Daniel Vetterb3ba3f62016-12-21 14:03:35 +010047EXPORT_SYMBOL(__drm_crtc_commit_free);
Daniel Vetter3b24f7d2016-06-08 14:19:00 +020048
Maarten Lankhorst036ef572015-05-18 10:06:40 +020049/**
50 * drm_atomic_state_default_release -
51 * release memory initialized by drm_atomic_state_init
52 * @state: atomic state
53 *
54 * Free all the memory allocated by drm_atomic_state_init.
Daniel Vetterda6c0592017-12-14 21:30:53 +010055 * This should only be used by drivers which are still subclassing
56 * &drm_atomic_state and haven't switched to &drm_private_state yet.
Maarten Lankhorst036ef572015-05-18 10:06:40 +020057 */
58void drm_atomic_state_default_release(struct drm_atomic_state *state)
Daniel Vettercc4ceb42014-07-25 21:30:38 +020059{
60 kfree(state->connectors);
Daniel Vettercc4ceb42014-07-25 21:30:38 +020061 kfree(state->crtcs);
Daniel Vettercc4ceb42014-07-25 21:30:38 +020062 kfree(state->planes);
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -070063 kfree(state->private_objs);
Daniel Vettercc4ceb42014-07-25 21:30:38 +020064}
Maarten Lankhorst036ef572015-05-18 10:06:40 +020065EXPORT_SYMBOL(drm_atomic_state_default_release);
Daniel Vettercc4ceb42014-07-25 21:30:38 +020066
67/**
Maarten Lankhorst036ef572015-05-18 10:06:40 +020068 * drm_atomic_state_init - init new atomic state
Daniel Vettercc4ceb42014-07-25 21:30:38 +020069 * @dev: DRM device
Maarten Lankhorst036ef572015-05-18 10:06:40 +020070 * @state: atomic state
Daniel Vettercc4ceb42014-07-25 21:30:38 +020071 *
Maarten Lankhorst036ef572015-05-18 10:06:40 +020072 * Default implementation for filling in a new atomic state.
Daniel Vetterda6c0592017-12-14 21:30:53 +010073 * This should only be used by drivers which are still subclassing
74 * &drm_atomic_state and haven't switched to &drm_private_state yet.
Daniel Vettercc4ceb42014-07-25 21:30:38 +020075 */
Maarten Lankhorst036ef572015-05-18 10:06:40 +020076int
77drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
Daniel Vettercc4ceb42014-07-25 21:30:38 +020078{
Chris Wilson08536952016-10-14 13:18:18 +010079 kref_init(&state->ref);
80
Rob Clarkd34f20d2014-12-18 16:01:56 -050081 /* TODO legacy paths should maybe do a better job about
82 * setting this appropriately?
83 */
84 state->allow_modeset = true;
85
Daniel Vettercc4ceb42014-07-25 21:30:38 +020086 state->crtcs = kcalloc(dev->mode_config.num_crtc,
87 sizeof(*state->crtcs), GFP_KERNEL);
88 if (!state->crtcs)
89 goto fail;
Daniel Vettercc4ceb42014-07-25 21:30:38 +020090 state->planes = kcalloc(dev->mode_config.num_total_plane,
91 sizeof(*state->planes), GFP_KERNEL);
92 if (!state->planes)
93 goto fail;
Daniel Vettercc4ceb42014-07-25 21:30:38 +020094
95 state->dev = dev;
96
Maarten Lankhorst036ef572015-05-18 10:06:40 +020097 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +020098
Maarten Lankhorst036ef572015-05-18 10:06:40 +020099 return 0;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200100fail:
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200101 drm_atomic_state_default_release(state);
102 return -ENOMEM;
103}
104EXPORT_SYMBOL(drm_atomic_state_init);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200105
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200106/**
107 * drm_atomic_state_alloc - allocate atomic state
108 * @dev: DRM device
109 *
110 * This allocates an empty atomic state to track updates.
111 */
112struct drm_atomic_state *
113drm_atomic_state_alloc(struct drm_device *dev)
114{
115 struct drm_mode_config *config = &dev->mode_config;
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200116
117 if (!config->funcs->atomic_state_alloc) {
Dawid Kurekac7c7482017-06-15 19:45:56 +0200118 struct drm_atomic_state *state;
119
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200120 state = kzalloc(sizeof(*state), GFP_KERNEL);
121 if (!state)
122 return NULL;
123 if (drm_atomic_state_init(dev, state) < 0) {
124 kfree(state);
125 return NULL;
126 }
127 return state;
128 }
129
130 return config->funcs->atomic_state_alloc(dev);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200131}
132EXPORT_SYMBOL(drm_atomic_state_alloc);
133
134/**
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200135 * drm_atomic_state_default_clear - clear base atomic state
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200136 * @state: atomic state
137 *
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200138 * Default implementation for clearing atomic state.
Daniel Vetterda6c0592017-12-14 21:30:53 +0100139 * This should only be used by drivers which are still subclassing
140 * &drm_atomic_state and haven't switched to &drm_private_state yet.
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200141 */
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200142void drm_atomic_state_default_clear(struct drm_atomic_state *state)
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200143{
144 struct drm_device *dev = state->dev;
Daniel Vetter6f75cea2014-11-19 18:38:07 +0100145 struct drm_mode_config *config = &dev->mode_config;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200146 int i;
147
Daniel Vetter17a38d92015-02-22 12:24:16 +0100148 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200149
Daniel Vetterf52b69f12014-11-19 18:38:08 +0100150 for (i = 0; i < state->num_connector; i++) {
Daniel Vetter63e83c12016-06-02 00:06:32 +0200151 struct drm_connector *connector = state->connectors[i].ptr;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200152
153 if (!connector)
154 continue;
155
Dave Airlied2307de2016-04-27 11:27:39 +1000156 connector->funcs->atomic_destroy_state(connector,
Daniel Vetter63e83c12016-06-02 00:06:32 +0200157 state->connectors[i].state);
158 state->connectors[i].ptr = NULL;
159 state->connectors[i].state = NULL;
Ville Syrjäläf0b408e2018-05-02 21:32:47 +0300160 state->connectors[i].old_state = NULL;
161 state->connectors[i].new_state = NULL;
Thierry Redingad093602017-02-28 15:46:39 +0100162 drm_connector_put(connector);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200163 }
164
Daniel Vetter6f75cea2014-11-19 18:38:07 +0100165 for (i = 0; i < config->num_crtc; i++) {
Daniel Vetter5d943aa62016-06-02 00:06:34 +0200166 struct drm_crtc *crtc = state->crtcs[i].ptr;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200167
168 if (!crtc)
169 continue;
170
171 crtc->funcs->atomic_destroy_state(crtc,
Daniel Vetter5d943aa62016-06-02 00:06:34 +0200172 state->crtcs[i].state);
Daniel Vetter3b24f7d2016-06-08 14:19:00 +0200173
Daniel Vetter5d943aa62016-06-02 00:06:34 +0200174 state->crtcs[i].ptr = NULL;
175 state->crtcs[i].state = NULL;
Ville Syrjäläf0b408e2018-05-02 21:32:47 +0300176 state->crtcs[i].old_state = NULL;
177 state->crtcs[i].new_state = NULL;
Leo Li4364bcb2018-10-15 09:46:40 -0400178
179 if (state->crtcs[i].commit) {
180 drm_crtc_commit_put(state->crtcs[i].commit);
181 state->crtcs[i].commit = NULL;
182 }
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200183 }
184
Daniel Vetter6f75cea2014-11-19 18:38:07 +0100185 for (i = 0; i < config->num_total_plane; i++) {
Daniel Vetterb8b53422016-06-02 00:06:33 +0200186 struct drm_plane *plane = state->planes[i].ptr;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200187
188 if (!plane)
189 continue;
190
191 plane->funcs->atomic_destroy_state(plane,
Daniel Vetterb8b53422016-06-02 00:06:33 +0200192 state->planes[i].state);
193 state->planes[i].ptr = NULL;
194 state->planes[i].state = NULL;
Ville Syrjäläf0b408e2018-05-02 21:32:47 +0300195 state->planes[i].old_state = NULL;
196 state->planes[i].new_state = NULL;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200197 }
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -0700198
199 for (i = 0; i < state->num_private_objs; i++) {
Ville Syrjäläa4370c72017-07-12 18:51:02 +0300200 struct drm_private_obj *obj = state->private_objs[i].ptr;
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -0700201
Ville Syrjäläa4370c72017-07-12 18:51:02 +0300202 obj->funcs->atomic_destroy_state(obj,
203 state->private_objs[i].state);
204 state->private_objs[i].ptr = NULL;
205 state->private_objs[i].state = NULL;
Ville Syrjäläb5cb2e52018-05-02 21:32:47 +0300206 state->private_objs[i].old_state = NULL;
207 state->private_objs[i].new_state = NULL;
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -0700208 }
209 state->num_private_objs = 0;
210
Maarten Lankhorst21a01ab2017-09-04 12:48:37 +0200211 if (state->fake_commit) {
212 drm_crtc_commit_put(state->fake_commit);
213 state->fake_commit = NULL;
214 }
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200215}
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200216EXPORT_SYMBOL(drm_atomic_state_default_clear);
217
218/**
219 * drm_atomic_state_clear - clear state object
220 * @state: atomic state
221 *
222 * When the w/w mutex algorithm detects a deadlock we need to back off and drop
223 * all locks. So someone else could sneak in and change the current modeset
224 * configuration. Which means that all the state assembled in @state is no
225 * longer an atomic update to the current state, but to some arbitrary earlier
Daniel Vetterd5745282017-01-25 07:26:45 +0100226 * state. Which could break assumptions the driver's
227 * &drm_mode_config_funcs.atomic_check likely relies on.
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200228 *
229 * Hence we must clear all cached state and completely start over, using this
230 * function.
231 */
232void drm_atomic_state_clear(struct drm_atomic_state *state)
233{
234 struct drm_device *dev = state->dev;
235 struct drm_mode_config *config = &dev->mode_config;
236
237 if (config->funcs->atomic_state_clear)
238 config->funcs->atomic_state_clear(state);
239 else
240 drm_atomic_state_default_clear(state);
241}
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200242EXPORT_SYMBOL(drm_atomic_state_clear);
243
244/**
Chris Wilson08536952016-10-14 13:18:18 +0100245 * __drm_atomic_state_free - free all memory for an atomic state
246 * @ref: This atomic state to deallocate
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200247 *
248 * This frees all memory associated with an atomic state, including all the
249 * per-object state for planes, crtcs and connectors.
250 */
Chris Wilson08536952016-10-14 13:18:18 +0100251void __drm_atomic_state_free(struct kref *ref)
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200252{
Chris Wilson08536952016-10-14 13:18:18 +0100253 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
254 struct drm_mode_config *config = &state->dev->mode_config;
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200255
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200256 drm_atomic_state_clear(state);
257
Daniel Vetter17a38d92015-02-22 12:24:16 +0100258 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200259
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200260 if (config->funcs->atomic_state_free) {
261 config->funcs->atomic_state_free(state);
262 } else {
263 drm_atomic_state_default_release(state);
264 kfree(state);
265 }
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200266}
Chris Wilson08536952016-10-14 13:18:18 +0100267EXPORT_SYMBOL(__drm_atomic_state_free);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200268
269/**
270 * drm_atomic_get_crtc_state - get crtc state
271 * @state: global atomic state object
272 * @crtc: crtc to get state object for
273 *
274 * This function returns the crtc state for the given crtc, allocating it if
275 * needed. It will also grab the relevant crtc lock to make sure that the state
276 * is consistent.
277 *
278 * Returns:
279 *
280 * Either the allocated state or the error code encoded into the pointer. When
281 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
282 * entire atomic sequence must be restarted. All other errors are fatal.
283 */
284struct drm_crtc_state *
285drm_atomic_get_crtc_state(struct drm_atomic_state *state,
286 struct drm_crtc *crtc)
287{
Maarten Lankhorst1b26a5e2015-05-13 10:37:25 +0200288 int ret, index = drm_crtc_index(crtc);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200289 struct drm_crtc_state *crtc_state;
290
Maarten Lankhorst7f4eaa82016-05-03 11:12:31 +0200291 WARN_ON(!state->acquire_ctx);
292
Maarten Lankhorst1b26a5e2015-05-13 10:37:25 +0200293 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
294 if (crtc_state)
295 return crtc_state;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200296
297 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
298 if (ret)
299 return ERR_PTR(ret);
300
301 crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
302 if (!crtc_state)
303 return ERR_PTR(-ENOMEM);
304
Daniel Vetter5d943aa62016-06-02 00:06:34 +0200305 state->crtcs[index].state = crtc_state;
Maarten Lankhorst581e49f2017-01-16 10:37:38 +0100306 state->crtcs[index].old_state = crtc->state;
307 state->crtcs[index].new_state = crtc_state;
Daniel Vetter5d943aa62016-06-02 00:06:34 +0200308 state->crtcs[index].ptr = crtc;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200309 crtc_state->state = state;
310
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +0200311 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
312 crtc->base.id, crtc->name, crtc_state, state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200313
314 return crtc_state;
315}
316EXPORT_SYMBOL(drm_atomic_get_crtc_state);
317
Rob Clark5e743732014-12-18 16:01:51 -0500318static int drm_atomic_crtc_check(struct drm_crtc *crtc,
319 struct drm_crtc_state *state)
320{
321 /* NOTE: we explicitly don't enforce constraints such as primary
322 * layer covering entire screen, since that is something we want
323 * to allow (on hw that supports it). For hw that does not, it
324 * should be checked in driver's crtc->atomic_check() vfunc.
325 *
326 * TODO: Add generic modeset state checks once we support those.
327 */
Daniel Vettereab3bbe2015-01-22 16:36:21 +0100328
329 if (state->active && !state->enable) {
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +0200330 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
331 crtc->base.id, crtc->name);
Daniel Vettereab3bbe2015-01-22 16:36:21 +0100332 return -EINVAL;
333 }
334
Daniel Stone99cf4a22015-05-25 19:11:51 +0100335 /* The state->enable vs. state->mode_blob checks can be WARN_ON,
336 * as this is a kernel-internal detail that userspace should never
337 * be able to trigger. */
338 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
339 WARN_ON(state->enable && !state->mode_blob)) {
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +0200340 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
341 crtc->base.id, crtc->name);
Daniel Stone99cf4a22015-05-25 19:11:51 +0100342 return -EINVAL;
343 }
344
345 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
346 WARN_ON(!state->enable && state->mode_blob)) {
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +0200347 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
348 crtc->base.id, crtc->name);
Daniel Stone99cf4a22015-05-25 19:11:51 +0100349 return -EINVAL;
350 }
351
Daniel Vetter4cba6852015-12-08 09:49:20 +0100352 /*
353 * Reject event generation for when a CRTC is off and stays off.
354 * It wouldn't be hard to implement this, but userspace has a track
355 * record of happily burning through 100% cpu (or worse, crash) when the
356 * display pipe is suspended. To avoid all that fun just reject updates
357 * that ask for events since likely that indicates a bug in the
358 * compositor's drawing loop. This is consistent with the vblank IOCTL
359 * and legacy page_flip IOCTL which also reject service on a disabled
360 * pipe.
361 */
362 if (state->event && !state->active && !crtc->state->active) {
Russell King6ac7c542017-02-13 12:27:03 +0000363 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
364 crtc->base.id, crtc->name);
Daniel Vetter4cba6852015-12-08 09:49:20 +0100365 return -EINVAL;
366 }
367
Rob Clark5e743732014-12-18 16:01:51 -0500368 return 0;
369}
370
Rob Clarkfceffb322016-11-05 11:08:09 -0400371static void drm_atomic_crtc_print_state(struct drm_printer *p,
372 const struct drm_crtc_state *state)
373{
374 struct drm_crtc *crtc = state->crtc;
375
376 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
377 drm_printf(p, "\tenable=%d\n", state->enable);
378 drm_printf(p, "\tactive=%d\n", state->active);
379 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
380 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
381 drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
382 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
383 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
384 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
385 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
386 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
387 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
388
389 if (crtc->funcs->atomic_print_state)
390 crtc->funcs->atomic_print_state(p, state);
391}
392
Brian Starkey935774c2017-03-29 17:42:32 +0100393static int drm_atomic_connector_check(struct drm_connector *connector,
394 struct drm_connector_state *state)
395{
396 struct drm_crtc_state *crtc_state;
397 struct drm_writeback_job *writeback_job = state->writeback_job;
398
399 if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
400 return 0;
401
402 if (writeback_job->fb && !state->crtc) {
403 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
404 connector->base.id, connector->name);
405 return -EINVAL;
406 }
407
408 if (state->crtc)
409 crtc_state = drm_atomic_get_existing_crtc_state(state->state,
410 state->crtc);
411
412 if (writeback_job->fb && !crtc_state->active) {
413 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
414 connector->base.id, connector->name,
415 state->crtc->base.id);
416 return -EINVAL;
417 }
418
Brian Starkeyb13cc8d2017-03-29 17:42:33 +0100419 if (writeback_job->out_fence && !writeback_job->fb) {
420 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
421 connector->base.id, connector->name);
422 return -EINVAL;
423 }
424
Brian Starkey935774c2017-03-29 17:42:32 +0100425 return 0;
426}
427
428/**
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200429 * drm_atomic_get_plane_state - get plane state
430 * @state: global atomic state object
431 * @plane: plane to get state object for
432 *
433 * This function returns the plane state for the given plane, allocating it if
434 * needed. It will also grab the relevant plane lock to make sure that the state
435 * is consistent.
436 *
437 * Returns:
438 *
439 * Either the allocated state or the error code encoded into the pointer. When
440 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
441 * entire atomic sequence must be restarted. All other errors are fatal.
442 */
443struct drm_plane_state *
444drm_atomic_get_plane_state(struct drm_atomic_state *state,
445 struct drm_plane *plane)
446{
Maarten Lankhorst1b26a5e2015-05-13 10:37:25 +0200447 int ret, index = drm_plane_index(plane);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200448 struct drm_plane_state *plane_state;
449
Maarten Lankhorst7f4eaa82016-05-03 11:12:31 +0200450 WARN_ON(!state->acquire_ctx);
451
Ville Syrjäläe00fb852018-05-25 21:50:45 +0300452 /* the legacy pointers should never be set */
453 WARN_ON(plane->fb);
454 WARN_ON(plane->old_fb);
455 WARN_ON(plane->crtc);
456
Maarten Lankhorst1b26a5e2015-05-13 10:37:25 +0200457 plane_state = drm_atomic_get_existing_plane_state(state, plane);
458 if (plane_state)
459 return plane_state;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200460
Daniel Vetter4d02e2d2014-11-11 10:12:00 +0100461 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200462 if (ret)
463 return ERR_PTR(ret);
464
465 plane_state = plane->funcs->atomic_duplicate_state(plane);
466 if (!plane_state)
467 return ERR_PTR(-ENOMEM);
468
Daniel Vetterb8b53422016-06-02 00:06:33 +0200469 state->planes[index].state = plane_state;
470 state->planes[index].ptr = plane;
Maarten Lankhorst581e49f2017-01-16 10:37:38 +0100471 state->planes[index].old_state = plane->state;
472 state->planes[index].new_state = plane_state;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200473 plane_state->state = state;
474
Ville Syrjälä9f4c97a2015-12-08 18:41:54 +0200475 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
476 plane->base.id, plane->name, plane_state, state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200477
478 if (plane_state->crtc) {
479 struct drm_crtc_state *crtc_state;
480
481 crtc_state = drm_atomic_get_crtc_state(state,
482 plane_state->crtc);
483 if (IS_ERR(crtc_state))
484 return ERR_CAST(crtc_state);
485 }
486
487 return plane_state;
488}
489EXPORT_SYMBOL(drm_atomic_get_plane_state);
490
Daniel Vetterf8aeb412015-08-26 21:49:42 +0200491static bool
492plane_switching_crtc(struct drm_atomic_state *state,
493 struct drm_plane *plane,
494 struct drm_plane_state *plane_state)
495{
496 if (!plane->state->crtc || !plane_state->crtc)
497 return false;
498
499 if (plane->state->crtc == plane_state->crtc)
500 return false;
501
502 /* This could be refined, but currently there's no helper or driver code
503 * to implement direct switching of active planes nor userspace to take
504 * advantage of more direct plane switching without the intermediate
505 * full OFF state.
506 */
507 return true;
508}
509
Rob Clarkac9c9252014-12-18 16:01:47 -0500510/**
Rob Clark5e743732014-12-18 16:01:51 -0500511 * drm_atomic_plane_check - check plane state
512 * @plane: plane to check
513 * @state: plane state to check
514 *
515 * Provides core sanity checks for plane state.
516 *
517 * RETURNS:
518 * Zero on success, error code on failure
519 */
520static int drm_atomic_plane_check(struct drm_plane *plane,
521 struct drm_plane_state *state)
522{
523 unsigned int fb_width, fb_height;
Laurent Pinchartead86102015-03-05 02:25:43 +0200524 int ret;
Rob Clark5e743732014-12-18 16:01:51 -0500525
526 /* either *both* CRTC and FB must be set, or neither */
Maarten Lankhorstfa5aaee2018-01-30 11:27:04 +0100527 if (state->crtc && !state->fb) {
Ville Syrjäläb6f690a2018-06-11 22:34:01 +0300528 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
529 plane->base.id, plane->name);
Rob Clark5e743732014-12-18 16:01:51 -0500530 return -EINVAL;
Maarten Lankhorstfa5aaee2018-01-30 11:27:04 +0100531 } else if (state->fb && !state->crtc) {
Ville Syrjäläb6f690a2018-06-11 22:34:01 +0300532 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
533 plane->base.id, plane->name);
Rob Clark5e743732014-12-18 16:01:51 -0500534 return -EINVAL;
535 }
536
537 /* if disabled, we don't care about the rest of the state: */
538 if (!state->crtc)
539 return 0;
540
541 /* Check whether this plane is usable on this CRTC */
542 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
Ville Syrjäläb6f690a2018-06-11 22:34:01 +0300543 DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
544 state->crtc->base.id, state->crtc->name,
545 plane->base.id, plane->name);
Rob Clark5e743732014-12-18 16:01:51 -0500546 return -EINVAL;
547 }
548
549 /* Check whether this plane supports the fb pixel format. */
Ville Syrjälä23163a72017-12-22 21:22:30 +0200550 ret = drm_plane_check_pixel_format(plane, state->fb->format->format,
551 state->fb->modifier);
Laurent Pinchartead86102015-03-05 02:25:43 +0200552 if (ret) {
Eric Engestromb3c11ac2016-11-12 01:12:56 +0000553 struct drm_format_name_buf format_name;
Ville Syrjäläb6f690a2018-06-11 22:34:01 +0300554 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
555 plane->base.id, plane->name,
Ville Syrjälä23163a72017-12-22 21:22:30 +0200556 drm_get_format_name(state->fb->format->format,
557 &format_name),
558 state->fb->modifier);
Laurent Pinchartead86102015-03-05 02:25:43 +0200559 return ret;
Rob Clark5e743732014-12-18 16:01:51 -0500560 }
561
562 /* Give drivers some help against integer overflows */
563 if (state->crtc_w > INT_MAX ||
564 state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
565 state->crtc_h > INT_MAX ||
566 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
Ville Syrjäläb6f690a2018-06-11 22:34:01 +0300567 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
568 plane->base.id, plane->name,
Daniel Vetter17a38d92015-02-22 12:24:16 +0100569 state->crtc_w, state->crtc_h,
570 state->crtc_x, state->crtc_y);
Rob Clark5e743732014-12-18 16:01:51 -0500571 return -ERANGE;
572 }
573
574 fb_width = state->fb->width << 16;
575 fb_height = state->fb->height << 16;
576
577 /* Make sure source coordinates are inside the fb. */
578 if (state->src_w > fb_width ||
579 state->src_x > fb_width - state->src_w ||
580 state->src_h > fb_height ||
581 state->src_y > fb_height - state->src_h) {
Ville Syrjäläb6f690a2018-06-11 22:34:01 +0300582 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
Ville Syrjälä0338f0d2017-11-01 20:35:33 +0200583 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
Ville Syrjäläb6f690a2018-06-11 22:34:01 +0300584 plane->base.id, plane->name,
Daniel Vetter17a38d92015-02-22 12:24:16 +0100585 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
586 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
587 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
Ville Syrjälä0338f0d2017-11-01 20:35:33 +0200588 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10,
589 state->fb->width, state->fb->height);
Rob Clark5e743732014-12-18 16:01:51 -0500590 return -ENOSPC;
591 }
592
Daniel Vetterf8aeb412015-08-26 21:49:42 +0200593 if (plane_switching_crtc(state->state, plane, state)) {
Ville Syrjälä9f4c97a2015-12-08 18:41:54 +0200594 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
595 plane->base.id, plane->name);
Daniel Vetterf8aeb412015-08-26 21:49:42 +0200596 return -EINVAL;
597 }
598
Rob Clark5e743732014-12-18 16:01:51 -0500599 return 0;
600}
601
Rob Clarkfceffb322016-11-05 11:08:09 -0400602static void drm_atomic_plane_print_state(struct drm_printer *p,
603 const struct drm_plane_state *state)
604{
605 struct drm_plane *plane = state->plane;
606 struct drm_rect src = drm_plane_state_src(state);
607 struct drm_rect dest = drm_plane_state_dest(state);
608
609 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
610 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
611 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
Noralf Trønnesf02b6042017-11-07 20:13:41 +0100612 if (state->fb)
613 drm_framebuffer_print_info(p, 2, state->fb);
Rob Clarkfceffb322016-11-05 11:08:09 -0400614 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
615 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
616 drm_printf(p, "\trotation=%x\n", state->rotation);
Benjamin Gaignardf8878bb2018-06-05 15:54:01 +0200617 drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
Ville Syrjälä56dbbaf2018-02-19 22:28:46 +0200618 drm_printf(p, "\tcolor-encoding=%s\n",
619 drm_get_color_encoding_name(state->color_encoding));
620 drm_printf(p, "\tcolor-range=%s\n",
621 drm_get_color_range_name(state->color_range));
Rob Clarkfceffb322016-11-05 11:08:09 -0400622
623 if (plane->funcs->atomic_print_state)
624 plane->funcs->atomic_print_state(p, state);
625}
626
Rob Clark5e743732014-12-18 16:01:51 -0500627/**
Daniel Vetterda6c0592017-12-14 21:30:53 +0100628 * DOC: handling driver private state
629 *
630 * Very often the DRM objects exposed to userspace in the atomic modeset api
631 * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the
632 * underlying hardware. Especially for any kind of shared resources (e.g. shared
633 * clocks, scaler units, bandwidth and fifo limits shared among a group of
634 * planes or CRTCs, and so on) it makes sense to model these as independent
635 * objects. Drivers then need to do similar state tracking and commit ordering for
636 * such private (since not exposed to userpace) objects as the atomic core and
637 * helpers already provide for connectors, planes and CRTCs.
638 *
639 * To make this easier on drivers the atomic core provides some support to track
640 * driver private state objects using struct &drm_private_obj, with the
641 * associated state struct &drm_private_state.
642 *
643 * Similar to userspace-exposed objects, private state structures can be
644 * acquired by calling drm_atomic_get_private_obj_state(). Since this function
645 * does not take care of locking, drivers should wrap it for each type of
646 * private state object they have with the required call to drm_modeset_lock()
647 * for the corresponding &drm_modeset_lock.
648 *
649 * All private state structures contained in a &drm_atomic_state update can be
650 * iterated using for_each_oldnew_private_obj_in_state(),
651 * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state().
652 * Drivers are recommended to wrap these for each type of driver private state
653 * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at
654 * least if they want to iterate over all objects of a given type.
655 *
656 * An earlier way to handle driver private state was by subclassing struct
657 * &drm_atomic_state. But since that encourages non-standard ways to implement
658 * the check/commit split atomic requires (by using e.g. "check and rollback or
659 * commit instead" of "duplicate state, check, then either commit or release
660 * duplicated state) it is deprecated in favour of using &drm_private_state.
661 */
662
663/**
Ville Syrjäläa4370c72017-07-12 18:51:02 +0300664 * drm_atomic_private_obj_init - initialize private object
665 * @obj: private object
666 * @state: initial private object state
667 * @funcs: pointer to the struct of function pointers that identify the object
668 * type
669 *
670 * Initialize the private object, which can be embedded into any
671 * driver private object that needs its own atomic state.
672 */
673void
674drm_atomic_private_obj_init(struct drm_private_obj *obj,
675 struct drm_private_state *state,
676 const struct drm_private_state_funcs *funcs)
677{
678 memset(obj, 0, sizeof(*obj));
679
680 obj->state = state;
681 obj->funcs = funcs;
682}
683EXPORT_SYMBOL(drm_atomic_private_obj_init);
684
685/**
686 * drm_atomic_private_obj_fini - finalize private object
687 * @obj: private object
688 *
689 * Finalize the private object.
690 */
691void
692drm_atomic_private_obj_fini(struct drm_private_obj *obj)
693{
694 obj->funcs->atomic_destroy_state(obj, obj->state);
695}
696EXPORT_SYMBOL(drm_atomic_private_obj_fini);
697
698/**
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -0700699 * drm_atomic_get_private_obj_state - get private object state
700 * @state: global atomic state
701 * @obj: private object to get the state for
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -0700702 *
703 * This function returns the private object state for the given private object,
704 * allocating the state if needed. It does not grab any locks as the caller is
705 * expected to care of any required locking.
706 *
707 * RETURNS:
708 *
709 * Either the allocated state or the error code encoded into a pointer.
710 */
Ville Syrjäläa4370c72017-07-12 18:51:02 +0300711struct drm_private_state *
712drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
713 struct drm_private_obj *obj)
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -0700714{
715 int index, num_objs, i;
716 size_t size;
717 struct __drm_private_objs_state *arr;
Ville Syrjäläa4370c72017-07-12 18:51:02 +0300718 struct drm_private_state *obj_state;
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -0700719
720 for (i = 0; i < state->num_private_objs; i++)
Ville Syrjäläa4370c72017-07-12 18:51:02 +0300721 if (obj == state->private_objs[i].ptr)
722 return state->private_objs[i].state;
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -0700723
724 num_objs = state->num_private_objs + 1;
725 size = sizeof(*state->private_objs) * num_objs;
726 arr = krealloc(state->private_objs, size, GFP_KERNEL);
727 if (!arr)
728 return ERR_PTR(-ENOMEM);
729
730 state->private_objs = arr;
731 index = state->num_private_objs;
732 memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
733
Ville Syrjäläa4370c72017-07-12 18:51:02 +0300734 obj_state = obj->funcs->atomic_duplicate_state(obj);
735 if (!obj_state)
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -0700736 return ERR_PTR(-ENOMEM);
737
Ville Syrjäläa4370c72017-07-12 18:51:02 +0300738 state->private_objs[index].state = obj_state;
739 state->private_objs[index].old_state = obj->state;
740 state->private_objs[index].new_state = obj_state;
741 state->private_objs[index].ptr = obj;
Alexandru Gheorghee89ea352018-05-30 18:30:52 +0100742 obj_state->state = state;
Ville Syrjäläa4370c72017-07-12 18:51:02 +0300743
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -0700744 state->num_private_objs = num_objs;
745
Ville Syrjäläa4370c72017-07-12 18:51:02 +0300746 DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
747 obj, obj_state, state);
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -0700748
Ville Syrjäläa4370c72017-07-12 18:51:02 +0300749 return obj_state;
Pandiyan, Dhinakaranb430c272017-04-20 22:51:30 -0700750}
751EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
752
753/**
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200754 * drm_atomic_get_connector_state - get connector state
755 * @state: global atomic state object
756 * @connector: connector to get state object for
757 *
758 * This function returns the connector state for the given connector,
759 * allocating it if needed. It will also grab the relevant connector lock to
760 * make sure that the state is consistent.
761 *
762 * Returns:
763 *
764 * Either the allocated state or the error code encoded into the pointer. When
765 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
766 * entire atomic sequence must be restarted. All other errors are fatal.
767 */
768struct drm_connector_state *
769drm_atomic_get_connector_state(struct drm_atomic_state *state,
770 struct drm_connector *connector)
771{
772 int ret, index;
773 struct drm_mode_config *config = &connector->dev->mode_config;
774 struct drm_connector_state *connector_state;
775
Maarten Lankhorst7f4eaa82016-05-03 11:12:31 +0200776 WARN_ON(!state->acquire_ctx);
777
Daniel Vetterc7eb76f2014-11-19 18:38:06 +0100778 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
779 if (ret)
780 return ERR_PTR(ret);
781
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200782 index = drm_connector_index(connector);
783
Daniel Vetterf52b69f12014-11-19 18:38:08 +0100784 if (index >= state->num_connector) {
Daniel Vetter63e83c12016-06-02 00:06:32 +0200785 struct __drm_connnectors_state *c;
Maarten Lankhorst5fff80b2016-02-17 08:32:05 +0100786 int alloc = max(index + 1, config->num_connector);
787
788 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
789 if (!c)
790 return ERR_PTR(-ENOMEM);
791
792 state->connectors = c;
793 memset(&state->connectors[state->num_connector], 0,
794 sizeof(*state->connectors) * (alloc - state->num_connector));
795
Maarten Lankhorst5fff80b2016-02-17 08:32:05 +0100796 state->num_connector = alloc;
Daniel Vetterf52b69f12014-11-19 18:38:08 +0100797 }
798
Daniel Vetter63e83c12016-06-02 00:06:32 +0200799 if (state->connectors[index].state)
800 return state->connectors[index].state;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200801
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200802 connector_state = connector->funcs->atomic_duplicate_state(connector);
803 if (!connector_state)
804 return ERR_PTR(-ENOMEM);
805
Thierry Redingad093602017-02-28 15:46:39 +0100806 drm_connector_get(connector);
Daniel Vetter63e83c12016-06-02 00:06:32 +0200807 state->connectors[index].state = connector_state;
Maarten Lankhorst581e49f2017-01-16 10:37:38 +0100808 state->connectors[index].old_state = connector->state;
809 state->connectors[index].new_state = connector_state;
Daniel Vetter63e83c12016-06-02 00:06:32 +0200810 state->connectors[index].ptr = connector;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200811 connector_state->state = state;
812
Russell King6ac7c542017-02-13 12:27:03 +0000813 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
814 connector->base.id, connector->name,
815 connector_state, state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200816
817 if (connector_state->crtc) {
818 struct drm_crtc_state *crtc_state;
819
820 crtc_state = drm_atomic_get_crtc_state(state,
821 connector_state->crtc);
822 if (IS_ERR(crtc_state))
823 return ERR_CAST(crtc_state);
824 }
825
826 return connector_state;
827}
828EXPORT_SYMBOL(drm_atomic_get_connector_state);
829
Rob Clarkfceffb322016-11-05 11:08:09 -0400830static void drm_atomic_connector_print_state(struct drm_printer *p,
831 const struct drm_connector_state *state)
832{
833 struct drm_connector *connector = state->connector;
834
835 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
836 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
837
Brian Starkey8cbc5ca2017-11-02 16:49:51 +0000838 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
839 if (state->writeback_job && state->writeback_job->fb)
840 drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
841
Rob Clarkfceffb322016-11-05 11:08:09 -0400842 if (connector->funcs->atomic_print_state)
843 connector->funcs->atomic_print_state(p, state);
844}
845
Daniel Vetterc0714fc2015-12-04 09:45:57 +0100846/**
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200847 * drm_atomic_add_affected_connectors - add connectors for crtc
848 * @state: atomic state
849 * @crtc: DRM crtc
850 *
851 * This function walks the current configuration and adds all connectors
852 * currently using @crtc to the atomic configuration @state. Note that this
853 * function must acquire the connection mutex. This can potentially cause
854 * unneeded seralization if the update is just for the planes on one crtc. Hence
855 * drivers and helpers should only call this when really needed (e.g. when a
856 * full modeset needs to happen due to some change).
857 *
858 * Returns:
859 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
860 * then the w/w mutex code has detected a deadlock and the entire atomic
861 * sequence must be restarted. All other errors are fatal.
862 */
863int
864drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
865 struct drm_crtc *crtc)
866{
867 struct drm_mode_config *config = &state->dev->mode_config;
868 struct drm_connector *connector;
869 struct drm_connector_state *conn_state;
Daniel Vetter613051d2016-12-14 00:08:06 +0100870 struct drm_connector_list_iter conn_iter;
Maarten Lankhorst5351bbd2017-01-16 10:37:39 +0100871 struct drm_crtc_state *crtc_state;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200872 int ret;
873
Maarten Lankhorst5351bbd2017-01-16 10:37:39 +0100874 crtc_state = drm_atomic_get_crtc_state(state, crtc);
875 if (IS_ERR(crtc_state))
876 return PTR_ERR(crtc_state);
877
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200878 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
879 if (ret)
880 return ret;
881
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +0200882 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
883 crtc->base.id, crtc->name, state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200884
885 /*
Maarten Lankhorst5351bbd2017-01-16 10:37:39 +0100886 * Changed connectors are already in @state, so only need to look
887 * at the connector_mask in crtc_state.
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200888 */
Thierry Redingb982dab2017-02-28 15:46:43 +0100889 drm_connector_list_iter_begin(state->dev, &conn_iter);
Daniel Vetter613051d2016-12-14 00:08:06 +0100890 drm_for_each_connector_iter(connector, &conn_iter) {
Ville Syrjälä73705732018-06-26 22:47:10 +0300891 if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200892 continue;
893
894 conn_state = drm_atomic_get_connector_state(state, connector);
Daniel Vetter613051d2016-12-14 00:08:06 +0100895 if (IS_ERR(conn_state)) {
Thierry Redingb982dab2017-02-28 15:46:43 +0100896 drm_connector_list_iter_end(&conn_iter);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200897 return PTR_ERR(conn_state);
Daniel Vetter613051d2016-12-14 00:08:06 +0100898 }
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200899 }
Thierry Redingb982dab2017-02-28 15:46:43 +0100900 drm_connector_list_iter_end(&conn_iter);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200901
902 return 0;
903}
904EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
905
906/**
Maarten Lankhorste01e9f72015-05-19 16:41:02 +0200907 * drm_atomic_add_affected_planes - add planes for crtc
908 * @state: atomic state
909 * @crtc: DRM crtc
910 *
911 * This function walks the current configuration and adds all planes
912 * currently used by @crtc to the atomic configuration @state. This is useful
913 * when an atomic commit also needs to check all currently enabled plane on
914 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
915 * to avoid special code to force-enable all planes.
916 *
917 * Since acquiring a plane state will always also acquire the w/w mutex of the
918 * current CRTC for that plane (if there is any) adding all the plane states for
919 * a CRTC will not reduce parallism of atomic updates.
920 *
921 * Returns:
922 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
923 * then the w/w mutex code has detected a deadlock and the entire atomic
924 * sequence must be restarted. All other errors are fatal.
925 */
926int
927drm_atomic_add_affected_planes(struct drm_atomic_state *state,
928 struct drm_crtc *crtc)
929{
930 struct drm_plane *plane;
931
Maarten Lankhorstb4d93672017-03-01 10:22:10 +0100932 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
Maarten Lankhorste01e9f72015-05-19 16:41:02 +0200933
Ville Syrjäläb6f690a2018-06-11 22:34:01 +0300934 DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
935 crtc->base.id, crtc->name, state);
936
Maarten Lankhorste01e9f72015-05-19 16:41:02 +0200937 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
938 struct drm_plane_state *plane_state =
939 drm_atomic_get_plane_state(state, plane);
940
941 if (IS_ERR(plane_state))
942 return PTR_ERR(plane_state);
943 }
944 return 0;
945}
946EXPORT_SYMBOL(drm_atomic_add_affected_planes);
947
948/**
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200949 * drm_atomic_check_only - check whether a given config would work
950 * @state: atomic configuration to check
951 *
952 * Note that this function can return -EDEADLK if the driver needed to acquire
953 * more locks but encountered a deadlock. The caller must then do the usual w/w
954 * backoff dance and restart. All other errors are fatal.
955 *
956 * Returns:
957 * 0 on success, negative error code on failure.
958 */
959int drm_atomic_check_only(struct drm_atomic_state *state)
960{
Rob Clark5e743732014-12-18 16:01:51 -0500961 struct drm_device *dev = state->dev;
962 struct drm_mode_config *config = &dev->mode_config;
Ander Conselvan de Oliveiradf63b992015-04-10 14:58:39 +0300963 struct drm_plane *plane;
964 struct drm_plane_state *plane_state;
965 struct drm_crtc *crtc;
966 struct drm_crtc_state *crtc_state;
Brian Starkey935774c2017-03-29 17:42:32 +0100967 struct drm_connector *conn;
968 struct drm_connector_state *conn_state;
Rob Clark5e743732014-12-18 16:01:51 -0500969 int i, ret = 0;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200970
Daniel Vetter17a38d92015-02-22 12:24:16 +0100971 DRM_DEBUG_ATOMIC("checking %p\n", state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200972
Maarten Lankhorst5721a382017-01-16 10:37:40 +0100973 for_each_new_plane_in_state(state, plane, plane_state, i) {
Ander Conselvan de Oliveiradf63b992015-04-10 14:58:39 +0300974 ret = drm_atomic_plane_check(plane, plane_state);
Rob Clark5e743732014-12-18 16:01:51 -0500975 if (ret) {
Ville Syrjälä9f4c97a2015-12-08 18:41:54 +0200976 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
977 plane->base.id, plane->name);
Rob Clark5e743732014-12-18 16:01:51 -0500978 return ret;
979 }
980 }
981
Maarten Lankhorst5721a382017-01-16 10:37:40 +0100982 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
Ander Conselvan de Oliveiradf63b992015-04-10 14:58:39 +0300983 ret = drm_atomic_crtc_check(crtc, crtc_state);
Rob Clark5e743732014-12-18 16:01:51 -0500984 if (ret) {
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +0200985 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
986 crtc->base.id, crtc->name);
Rob Clark5e743732014-12-18 16:01:51 -0500987 return ret;
988 }
989 }
990
Brian Starkey935774c2017-03-29 17:42:32 +0100991 for_each_new_connector_in_state(state, conn, conn_state, i) {
992 ret = drm_atomic_connector_check(conn, conn_state);
993 if (ret) {
994 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
995 conn->base.id, conn->name);
996 return ret;
997 }
998 }
999
Lyude Paul14d4e522018-04-11 19:42:40 -04001000 if (config->funcs->atomic_check) {
Rob Clark5e743732014-12-18 16:01:51 -05001001 ret = config->funcs->atomic_check(state->dev, state);
1002
Lyude Paul14d4e522018-04-11 19:42:40 -04001003 if (ret) {
1004 DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n",
1005 state, ret);
1006 return ret;
1007 }
1008 }
Maarten Lankhorsta0ffc512017-08-15 11:57:06 +02001009
Rob Clarkd34f20d2014-12-18 16:01:56 -05001010 if (!state->allow_modeset) {
Maarten Lankhorst5721a382017-01-16 10:37:40 +01001011 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
Daniel Vetter2465ff62015-06-18 09:58:55 +02001012 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +02001013 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
1014 crtc->base.id, crtc->name);
Rob Clarkd34f20d2014-12-18 16:01:56 -05001015 return -EINVAL;
1016 }
1017 }
1018 }
1019
Maarten Lankhorsta0ffc512017-08-15 11:57:06 +02001020 return 0;
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001021}
1022EXPORT_SYMBOL(drm_atomic_check_only);
1023
1024/**
1025 * drm_atomic_commit - commit configuration atomically
1026 * @state: atomic configuration to check
1027 *
1028 * Note that this function can return -EDEADLK if the driver needed to acquire
1029 * more locks but encountered a deadlock. The caller must then do the usual w/w
1030 * backoff dance and restart. All other errors are fatal.
1031 *
Maarten Lankhorst76fede22017-01-04 12:34:00 +01001032 * This function will take its own reference on @state.
1033 * Callers should always release their reference with drm_atomic_state_put().
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001034 *
1035 * Returns:
1036 * 0 on success, negative error code on failure.
1037 */
1038int drm_atomic_commit(struct drm_atomic_state *state)
1039{
1040 struct drm_mode_config *config = &state->dev->mode_config;
1041 int ret;
1042
1043 ret = drm_atomic_check_only(state);
1044 if (ret)
1045 return ret;
1046
Colin Ian Kinga0752d42017-04-12 17:27:22 +01001047 DRM_DEBUG_ATOMIC("committing %p\n", state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001048
1049 return config->funcs->atomic_commit(state->dev, state, false);
1050}
1051EXPORT_SYMBOL(drm_atomic_commit);
1052
1053/**
Daniel Vetterd5745282017-01-25 07:26:45 +01001054 * drm_atomic_nonblocking_commit - atomic nonblocking commit
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001055 * @state: atomic configuration to check
1056 *
1057 * Note that this function can return -EDEADLK if the driver needed to acquire
1058 * more locks but encountered a deadlock. The caller must then do the usual w/w
1059 * backoff dance and restart. All other errors are fatal.
1060 *
Maarten Lankhorst76fede22017-01-04 12:34:00 +01001061 * This function will take its own reference on @state.
1062 * Callers should always release their reference with drm_atomic_state_put().
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001063 *
1064 * Returns:
1065 * 0 on success, negative error code on failure.
1066 */
Maarten Lankhorstb837ba02016-04-26 16:11:35 +02001067int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001068{
1069 struct drm_mode_config *config = &state->dev->mode_config;
1070 int ret;
1071
1072 ret = drm_atomic_check_only(state);
1073 if (ret)
1074 return ret;
1075
Colin Ian Kinga0752d42017-04-12 17:27:22 +01001076 DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001077
1078 return config->funcs->atomic_commit(state->dev, state, true);
1079}
Maarten Lankhorstb837ba02016-04-26 16:11:35 +02001080EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
Rob Clarkd34f20d2014-12-18 16:01:56 -05001081
Daniel Vetter72fdb402018-09-05 15:57:11 +02001082void drm_atomic_print_state(const struct drm_atomic_state *state)
Rob Clarkfceffb322016-11-05 11:08:09 -04001083{
1084 struct drm_printer p = drm_info_printer(state->dev->dev);
1085 struct drm_plane *plane;
1086 struct drm_plane_state *plane_state;
1087 struct drm_crtc *crtc;
1088 struct drm_crtc_state *crtc_state;
1089 struct drm_connector *connector;
1090 struct drm_connector_state *connector_state;
1091 int i;
1092
1093 DRM_DEBUG_ATOMIC("checking %p\n", state);
1094
Maarten Lankhorst5721a382017-01-16 10:37:40 +01001095 for_each_new_plane_in_state(state, plane, plane_state, i)
Rob Clarkfceffb322016-11-05 11:08:09 -04001096 drm_atomic_plane_print_state(&p, plane_state);
1097
Maarten Lankhorst5721a382017-01-16 10:37:40 +01001098 for_each_new_crtc_in_state(state, crtc, crtc_state, i)
Rob Clarkfceffb322016-11-05 11:08:09 -04001099 drm_atomic_crtc_print_state(&p, crtc_state);
1100
Maarten Lankhorst5721a382017-01-16 10:37:40 +01001101 for_each_new_connector_in_state(state, connector, connector_state, i)
Rob Clarkfceffb322016-11-05 11:08:09 -04001102 drm_atomic_connector_print_state(&p, connector_state);
1103}
1104
Daniel Vetterc2d85562017-04-03 10:32:54 +02001105static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
1106 bool take_locks)
1107{
1108 struct drm_mode_config *config = &dev->mode_config;
1109 struct drm_plane *plane;
1110 struct drm_crtc *crtc;
1111 struct drm_connector *connector;
1112 struct drm_connector_list_iter conn_iter;
1113
Lyude Paul3c499ea2018-09-17 13:37:33 -04001114 if (!drm_drv_uses_atomic_modeset(dev))
Daniel Vetterc2d85562017-04-03 10:32:54 +02001115 return;
1116
1117 list_for_each_entry(plane, &config->plane_list, head) {
1118 if (take_locks)
1119 drm_modeset_lock(&plane->mutex, NULL);
1120 drm_atomic_plane_print_state(p, plane->state);
1121 if (take_locks)
1122 drm_modeset_unlock(&plane->mutex);
1123 }
1124
1125 list_for_each_entry(crtc, &config->crtc_list, head) {
1126 if (take_locks)
1127 drm_modeset_lock(&crtc->mutex, NULL);
1128 drm_atomic_crtc_print_state(p, crtc->state);
1129 if (take_locks)
1130 drm_modeset_unlock(&crtc->mutex);
1131 }
1132
1133 drm_connector_list_iter_begin(dev, &conn_iter);
1134 if (take_locks)
1135 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1136 drm_for_each_connector_iter(connector, &conn_iter)
1137 drm_atomic_connector_print_state(p, connector->state);
1138 if (take_locks)
1139 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1140 drm_connector_list_iter_end(&conn_iter);
1141}
1142
Rob Clark6559c902016-11-05 11:08:10 -04001143/**
1144 * drm_state_dump - dump entire device atomic state
1145 * @dev: the drm device
1146 * @p: where to print the state to
1147 *
1148 * Just for debugging. Drivers might want an option to dump state
1149 * to dmesg in case of error irq's. (Hint, you probably want to
1150 * ratelimit this!)
1151 *
1152 * The caller must drm_modeset_lock_all(), or if this is called
1153 * from error irq handler, it should not be enabled by default.
1154 * (Ie. if you are debugging errors you might not care that this
1155 * is racey. But calling this without all modeset locks held is
1156 * not inherently safe.)
1157 */
1158void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
1159{
Daniel Vetterc2d85562017-04-03 10:32:54 +02001160 __drm_state_dump(dev, p, false);
Rob Clark6559c902016-11-05 11:08:10 -04001161}
1162EXPORT_SYMBOL(drm_state_dump);
1163
1164#ifdef CONFIG_DEBUG_FS
1165static int drm_state_info(struct seq_file *m, void *data)
1166{
1167 struct drm_info_node *node = (struct drm_info_node *) m->private;
1168 struct drm_device *dev = node->minor->dev;
1169 struct drm_printer p = drm_seq_file_printer(m);
1170
Daniel Vetterc2d85562017-04-03 10:32:54 +02001171 __drm_state_dump(dev, &p, true);
Rob Clark6559c902016-11-05 11:08:10 -04001172
1173 return 0;
1174}
1175
1176/* any use in debugfs files to dump individual planes/crtc/etc? */
1177static const struct drm_info_list drm_atomic_debugfs_list[] = {
1178 {"state", drm_state_info, 0},
1179};
1180
1181int drm_atomic_debugfs_init(struct drm_minor *minor)
1182{
1183 return drm_debugfs_create_files(drm_atomic_debugfs_list,
1184 ARRAY_SIZE(drm_atomic_debugfs_list),
1185 minor->debugfs_root, minor);
1186}
1187#endif
1188