blob: 33778bf535909b34ce980fd64fcf401e3003cbd2 [file] [log] [blame]
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28
29#include <drm/drmP.h>
30#include <drm/drm_atomic.h>
Lionel Landwerlin5488dc12016-02-26 17:05:00 +000031#include <drm/drm_mode.h>
Daniel Vettercc4ceb42014-07-25 21:30:38 +020032#include <drm/drm_plane_helper.h>
Gustavo Padovan96e02f42016-11-15 22:06:39 +090033#include <linux/sync_file.h>
Daniel Vettercc4ceb42014-07-25 21:30:38 +020034
Thierry Redingbe35f942016-04-28 15:19:56 +020035#include "drm_crtc_internal.h"
36
Daniel Vetter3b24f7d2016-06-08 14:19:00 +020037static void crtc_commit_free(struct kref *kref)
38{
39 struct drm_crtc_commit *commit =
40 container_of(kref, struct drm_crtc_commit, ref);
41
42 kfree(commit);
43}
44
45void drm_crtc_commit_put(struct drm_crtc_commit *commit)
46{
47 kref_put(&commit->ref, crtc_commit_free);
48}
49EXPORT_SYMBOL(drm_crtc_commit_put);
50
Maarten Lankhorst036ef572015-05-18 10:06:40 +020051/**
52 * drm_atomic_state_default_release -
53 * release memory initialized by drm_atomic_state_init
54 * @state: atomic state
55 *
56 * Free all the memory allocated by drm_atomic_state_init.
57 * This is useful for drivers that subclass the atomic state.
58 */
59void drm_atomic_state_default_release(struct drm_atomic_state *state)
Daniel Vettercc4ceb42014-07-25 21:30:38 +020060{
61 kfree(state->connectors);
Daniel Vettercc4ceb42014-07-25 21:30:38 +020062 kfree(state->crtcs);
Daniel Vettercc4ceb42014-07-25 21:30:38 +020063 kfree(state->planes);
Daniel Vettercc4ceb42014-07-25 21:30:38 +020064}
Maarten Lankhorst036ef572015-05-18 10:06:40 +020065EXPORT_SYMBOL(drm_atomic_state_default_release);
Daniel Vettercc4ceb42014-07-25 21:30:38 +020066
67/**
Maarten Lankhorst036ef572015-05-18 10:06:40 +020068 * drm_atomic_state_init - init new atomic state
Daniel Vettercc4ceb42014-07-25 21:30:38 +020069 * @dev: DRM device
Maarten Lankhorst036ef572015-05-18 10:06:40 +020070 * @state: atomic state
Daniel Vettercc4ceb42014-07-25 21:30:38 +020071 *
Maarten Lankhorst036ef572015-05-18 10:06:40 +020072 * Default implementation for filling in a new atomic state.
73 * This is useful for drivers that subclass the atomic state.
Daniel Vettercc4ceb42014-07-25 21:30:38 +020074 */
Maarten Lankhorst036ef572015-05-18 10:06:40 +020075int
76drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
Daniel Vettercc4ceb42014-07-25 21:30:38 +020077{
Rob Clarkd34f20d2014-12-18 16:01:56 -050078 /* TODO legacy paths should maybe do a better job about
79 * setting this appropriately?
80 */
81 state->allow_modeset = true;
82
Daniel Vettercc4ceb42014-07-25 21:30:38 +020083 state->crtcs = kcalloc(dev->mode_config.num_crtc,
84 sizeof(*state->crtcs), GFP_KERNEL);
85 if (!state->crtcs)
86 goto fail;
Daniel Vettercc4ceb42014-07-25 21:30:38 +020087 state->planes = kcalloc(dev->mode_config.num_total_plane,
88 sizeof(*state->planes), GFP_KERNEL);
89 if (!state->planes)
90 goto fail;
Daniel Vettercc4ceb42014-07-25 21:30:38 +020091
92 state->dev = dev;
93
Maarten Lankhorst036ef572015-05-18 10:06:40 +020094 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +020095
Maarten Lankhorst036ef572015-05-18 10:06:40 +020096 return 0;
Daniel Vettercc4ceb42014-07-25 21:30:38 +020097fail:
Maarten Lankhorst036ef572015-05-18 10:06:40 +020098 drm_atomic_state_default_release(state);
99 return -ENOMEM;
100}
101EXPORT_SYMBOL(drm_atomic_state_init);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200102
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200103/**
104 * drm_atomic_state_alloc - allocate atomic state
105 * @dev: DRM device
106 *
107 * This allocates an empty atomic state to track updates.
108 */
109struct drm_atomic_state *
110drm_atomic_state_alloc(struct drm_device *dev)
111{
112 struct drm_mode_config *config = &dev->mode_config;
113 struct drm_atomic_state *state;
114
115 if (!config->funcs->atomic_state_alloc) {
116 state = kzalloc(sizeof(*state), GFP_KERNEL);
117 if (!state)
118 return NULL;
119 if (drm_atomic_state_init(dev, state) < 0) {
120 kfree(state);
121 return NULL;
122 }
123 return state;
124 }
125
126 return config->funcs->atomic_state_alloc(dev);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200127}
128EXPORT_SYMBOL(drm_atomic_state_alloc);
129
130/**
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200131 * drm_atomic_state_default_clear - clear base atomic state
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200132 * @state: atomic state
133 *
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200134 * Default implementation for clearing atomic state.
135 * This is useful for drivers that subclass the atomic state.
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200136 */
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200137void drm_atomic_state_default_clear(struct drm_atomic_state *state)
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200138{
139 struct drm_device *dev = state->dev;
Daniel Vetter6f75cea2014-11-19 18:38:07 +0100140 struct drm_mode_config *config = &dev->mode_config;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200141 int i;
142
Daniel Vetter17a38d92015-02-22 12:24:16 +0100143 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200144
Daniel Vetterf52b69f12014-11-19 18:38:08 +0100145 for (i = 0; i < state->num_connector; i++) {
Daniel Vetter63e83c12016-06-02 00:06:32 +0200146 struct drm_connector *connector = state->connectors[i].ptr;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200147
148 if (!connector)
149 continue;
150
Dave Airlied2307de2016-04-27 11:27:39 +1000151 connector->funcs->atomic_destroy_state(connector,
Daniel Vetter63e83c12016-06-02 00:06:32 +0200152 state->connectors[i].state);
153 state->connectors[i].ptr = NULL;
154 state->connectors[i].state = NULL;
Dave Airlieb164d312016-04-27 11:10:09 +1000155 drm_connector_unreference(connector);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200156 }
157
Daniel Vetter6f75cea2014-11-19 18:38:07 +0100158 for (i = 0; i < config->num_crtc; i++) {
Daniel Vetter5d943aa62016-06-02 00:06:34 +0200159 struct drm_crtc *crtc = state->crtcs[i].ptr;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200160
161 if (!crtc)
162 continue;
163
164 crtc->funcs->atomic_destroy_state(crtc,
Daniel Vetter5d943aa62016-06-02 00:06:34 +0200165 state->crtcs[i].state);
Daniel Vetter3b24f7d2016-06-08 14:19:00 +0200166
167 if (state->crtcs[i].commit) {
168 kfree(state->crtcs[i].commit->event);
169 state->crtcs[i].commit->event = NULL;
170 drm_crtc_commit_put(state->crtcs[i].commit);
171 }
172
173 state->crtcs[i].commit = NULL;
Daniel Vetter5d943aa62016-06-02 00:06:34 +0200174 state->crtcs[i].ptr = NULL;
175 state->crtcs[i].state = NULL;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200176 }
177
Daniel Vetter6f75cea2014-11-19 18:38:07 +0100178 for (i = 0; i < config->num_total_plane; i++) {
Daniel Vetterb8b53422016-06-02 00:06:33 +0200179 struct drm_plane *plane = state->planes[i].ptr;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200180
181 if (!plane)
182 continue;
183
184 plane->funcs->atomic_destroy_state(plane,
Daniel Vetterb8b53422016-06-02 00:06:33 +0200185 state->planes[i].state);
186 state->planes[i].ptr = NULL;
187 state->planes[i].state = NULL;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200188 }
189}
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200190EXPORT_SYMBOL(drm_atomic_state_default_clear);
191
192/**
193 * drm_atomic_state_clear - clear state object
194 * @state: atomic state
195 *
196 * When the w/w mutex algorithm detects a deadlock we need to back off and drop
197 * all locks. So someone else could sneak in and change the current modeset
198 * configuration. Which means that all the state assembled in @state is no
199 * longer an atomic update to the current state, but to some arbitrary earlier
200 * state. Which could break assumptions the driver's ->atomic_check likely
201 * relies on.
202 *
203 * Hence we must clear all cached state and completely start over, using this
204 * function.
205 */
206void drm_atomic_state_clear(struct drm_atomic_state *state)
207{
208 struct drm_device *dev = state->dev;
209 struct drm_mode_config *config = &dev->mode_config;
210
211 if (config->funcs->atomic_state_clear)
212 config->funcs->atomic_state_clear(state);
213 else
214 drm_atomic_state_default_clear(state);
215}
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200216EXPORT_SYMBOL(drm_atomic_state_clear);
217
218/**
219 * drm_atomic_state_free - free all memory for an atomic state
220 * @state: atomic state to deallocate
221 *
222 * This frees all memory associated with an atomic state, including all the
223 * per-object state for planes, crtcs and connectors.
224 */
225void drm_atomic_state_free(struct drm_atomic_state *state)
226{
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200227 struct drm_device *dev;
228 struct drm_mode_config *config;
229
Ander Conselvan de Oliveiraa0211bb2015-03-30 14:05:43 +0300230 if (!state)
231 return;
232
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200233 dev = state->dev;
234 config = &dev->mode_config;
235
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200236 drm_atomic_state_clear(state);
237
Daniel Vetter17a38d92015-02-22 12:24:16 +0100238 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200239
Maarten Lankhorst036ef572015-05-18 10:06:40 +0200240 if (config->funcs->atomic_state_free) {
241 config->funcs->atomic_state_free(state);
242 } else {
243 drm_atomic_state_default_release(state);
244 kfree(state);
245 }
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200246}
247EXPORT_SYMBOL(drm_atomic_state_free);
248
249/**
250 * drm_atomic_get_crtc_state - get crtc state
251 * @state: global atomic state object
252 * @crtc: crtc to get state object for
253 *
254 * This function returns the crtc state for the given crtc, allocating it if
255 * needed. It will also grab the relevant crtc lock to make sure that the state
256 * is consistent.
257 *
258 * Returns:
259 *
260 * Either the allocated state or the error code encoded into the pointer. When
261 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
262 * entire atomic sequence must be restarted. All other errors are fatal.
263 */
264struct drm_crtc_state *
265drm_atomic_get_crtc_state(struct drm_atomic_state *state,
266 struct drm_crtc *crtc)
267{
Maarten Lankhorst1b26a5e2015-05-13 10:37:25 +0200268 int ret, index = drm_crtc_index(crtc);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200269 struct drm_crtc_state *crtc_state;
270
Maarten Lankhorst7f4eaa82016-05-03 11:12:31 +0200271 WARN_ON(!state->acquire_ctx);
272
Maarten Lankhorst1b26a5e2015-05-13 10:37:25 +0200273 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
274 if (crtc_state)
275 return crtc_state;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200276
277 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
278 if (ret)
279 return ERR_PTR(ret);
280
281 crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
282 if (!crtc_state)
283 return ERR_PTR(-ENOMEM);
284
Daniel Vetter5d943aa62016-06-02 00:06:34 +0200285 state->crtcs[index].state = crtc_state;
286 state->crtcs[index].ptr = crtc;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200287 crtc_state->state = state;
288
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +0200289 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
290 crtc->base.id, crtc->name, crtc_state, state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200291
292 return crtc_state;
293}
294EXPORT_SYMBOL(drm_atomic_get_crtc_state);
295
Gustavo Padovan710c1ed2016-11-16 22:00:21 +0900296static void set_out_fence_for_crtc(struct drm_atomic_state *state,
Gustavo Padovand6eefcb2017-01-13 12:22:09 -0200297 struct drm_crtc *crtc, s32 __user *fence_ptr)
Gustavo Padovan710c1ed2016-11-16 22:00:21 +0900298{
299 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
300}
301
Gustavo Padovand6eefcb2017-01-13 12:22:09 -0200302static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
Gustavo Padovan710c1ed2016-11-16 22:00:21 +0900303 struct drm_crtc *crtc)
304{
Gustavo Padovand6eefcb2017-01-13 12:22:09 -0200305 s32 __user *fence_ptr;
Gustavo Padovan710c1ed2016-11-16 22:00:21 +0900306
307 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
308 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
309
310 return fence_ptr;
311}
312
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200313/**
Daniel Stone819364d2015-05-26 14:36:48 +0100314 * drm_atomic_set_mode_for_crtc - set mode for CRTC
315 * @state: the CRTC whose incoming state to update
316 * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
317 *
318 * Set a mode (originating from the kernel) on the desired CRTC state. Does
319 * not change any other state properties, including enable, active, or
320 * mode_changed.
321 *
322 * RETURNS:
323 * Zero on success, error code on failure. Cannot return -EDEADLK.
324 */
325int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
326 struct drm_display_mode *mode)
327{
Daniel Stone99cf4a22015-05-25 19:11:51 +0100328 struct drm_mode_modeinfo umode;
329
Daniel Stone819364d2015-05-26 14:36:48 +0100330 /* Early return for no change. */
331 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
332 return 0;
333
Markus Elfring5f911902015-11-06 12:03:46 +0100334 drm_property_unreference_blob(state->mode_blob);
Daniel Stone99cf4a22015-05-25 19:11:51 +0100335 state->mode_blob = NULL;
336
Daniel Stone819364d2015-05-26 14:36:48 +0100337 if (mode) {
Daniel Stone99cf4a22015-05-25 19:11:51 +0100338 drm_mode_convert_to_umode(&umode, mode);
339 state->mode_blob =
340 drm_property_create_blob(state->crtc->dev,
341 sizeof(umode),
342 &umode);
343 if (IS_ERR(state->mode_blob))
344 return PTR_ERR(state->mode_blob);
345
Daniel Stone819364d2015-05-26 14:36:48 +0100346 drm_mode_copy(&state->mode, mode);
347 state->enable = true;
348 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
349 mode->name, state);
350 } else {
351 memset(&state->mode, 0, sizeof(state->mode));
352 state->enable = false;
353 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
354 state);
355 }
356
357 return 0;
358}
359EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
360
Daniel Stone819364d2015-05-26 14:36:48 +0100361/**
Daniel Stone955f3c32015-05-25 19:11:52 +0100362 * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
363 * @state: the CRTC whose incoming state to update
364 * @blob: pointer to blob property to use for mode
365 *
366 * Set a mode (originating from a blob property) on the desired CRTC state.
367 * This function will take a reference on the blob property for the CRTC state,
368 * and release the reference held on the state's existing mode property, if any
369 * was set.
370 *
371 * RETURNS:
372 * Zero on success, error code on failure. Cannot return -EDEADLK.
373 */
374int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
375 struct drm_property_blob *blob)
376{
377 if (blob == state->mode_blob)
378 return 0;
379
Markus Elfring5f911902015-11-06 12:03:46 +0100380 drm_property_unreference_blob(state->mode_blob);
Daniel Stone955f3c32015-05-25 19:11:52 +0100381 state->mode_blob = NULL;
382
Tomi Valkeinen67098872016-05-31 15:03:17 +0300383 memset(&state->mode, 0, sizeof(state->mode));
384
Daniel Stone955f3c32015-05-25 19:11:52 +0100385 if (blob) {
386 if (blob->length != sizeof(struct drm_mode_modeinfo) ||
387 drm_mode_convert_umode(&state->mode,
388 (const struct drm_mode_modeinfo *)
389 blob->data))
390 return -EINVAL;
391
392 state->mode_blob = drm_property_reference_blob(blob);
393 state->enable = true;
394 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
395 state->mode.name, state);
396 } else {
Daniel Stone955f3c32015-05-25 19:11:52 +0100397 state->enable = false;
398 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
399 state);
400 }
401
402 return 0;
403}
404EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
405
406/**
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000407 * drm_atomic_replace_property_blob - replace a blob property
408 * @blob: a pointer to the member blob to be replaced
409 * @new_blob: the new blob to replace with
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000410 * @replaced: whether the blob has been replaced
411 *
412 * RETURNS:
413 * Zero on success, error code on failure
414 */
415static void
416drm_atomic_replace_property_blob(struct drm_property_blob **blob,
417 struct drm_property_blob *new_blob,
418 bool *replaced)
419{
420 struct drm_property_blob *old_blob = *blob;
421
422 if (old_blob == new_blob)
423 return;
424
Markus Elfringf35cbe62016-07-20 17:54:32 +0200425 drm_property_unreference_blob(old_blob);
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000426 if (new_blob)
427 drm_property_reference_blob(new_blob);
428 *blob = new_blob;
429 *replaced = true;
430
431 return;
432}
433
434static int
435drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
436 struct drm_property_blob **blob,
437 uint64_t blob_id,
438 ssize_t expected_size,
439 bool *replaced)
440{
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000441 struct drm_property_blob *new_blob = NULL;
442
443 if (blob_id != 0) {
Felix Monningercac5fced2016-10-25 22:28:08 +0100444 new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000445 if (new_blob == NULL)
446 return -EINVAL;
Felix Monningercac5fced2016-10-25 22:28:08 +0100447
448 if (expected_size > 0 && expected_size != new_blob->length) {
449 drm_property_unreference_blob(new_blob);
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000450 return -EINVAL;
Felix Monningercac5fced2016-10-25 22:28:08 +0100451 }
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000452 }
453
454 drm_atomic_replace_property_blob(blob, new_blob, replaced);
Felix Monningercac5fced2016-10-25 22:28:08 +0100455 drm_property_unreference_blob(new_blob);
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000456
457 return 0;
458}
459
460/**
Rob Clark40ecc692014-12-18 16:01:46 -0500461 * drm_atomic_crtc_set_property - set property on CRTC
462 * @crtc: the drm CRTC to set a property on
463 * @state: the state object to update with the new property value
464 * @property: the property to set
465 * @val: the new property value
466 *
467 * Use this instead of calling crtc->atomic_set_property directly.
468 * This function handles generic/core properties and calls out to
469 * driver's ->atomic_set_property() for driver properties. To ensure
470 * consistent behavior you must call this function rather than the
471 * driver hook directly.
472 *
473 * RETURNS:
474 * Zero on success, error code on failure
475 */
476int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
477 struct drm_crtc_state *state, struct drm_property *property,
478 uint64_t val)
479{
Daniel Vettereab3bbe2015-01-22 16:36:21 +0100480 struct drm_device *dev = crtc->dev;
481 struct drm_mode_config *config = &dev->mode_config;
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000482 bool replaced = false;
Daniel Stone955f3c32015-05-25 19:11:52 +0100483 int ret;
Daniel Vettereab3bbe2015-01-22 16:36:21 +0100484
Daniel Stone27798362015-03-19 04:33:26 +0000485 if (property == config->prop_active)
Daniel Vettereab3bbe2015-01-22 16:36:21 +0100486 state->active = val;
Daniel Stone955f3c32015-05-25 19:11:52 +0100487 else if (property == config->prop_mode_id) {
488 struct drm_property_blob *mode =
489 drm_property_lookup_blob(dev, val);
490 ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
Markus Elfring5f911902015-11-06 12:03:46 +0100491 drm_property_unreference_blob(mode);
Daniel Stone955f3c32015-05-25 19:11:52 +0100492 return ret;
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000493 } else if (property == config->degamma_lut_property) {
494 ret = drm_atomic_replace_property_blob_from_id(crtc,
495 &state->degamma_lut,
496 val,
497 -1,
498 &replaced);
Mario Kleineradd1fa72016-08-27 01:02:28 +0200499 state->color_mgmt_changed |= replaced;
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000500 return ret;
501 } else if (property == config->ctm_property) {
502 ret = drm_atomic_replace_property_blob_from_id(crtc,
503 &state->ctm,
504 val,
505 sizeof(struct drm_color_ctm),
506 &replaced);
Mario Kleineradd1fa72016-08-27 01:02:28 +0200507 state->color_mgmt_changed |= replaced;
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000508 return ret;
509 } else if (property == config->gamma_lut_property) {
510 ret = drm_atomic_replace_property_blob_from_id(crtc,
511 &state->gamma_lut,
512 val,
513 -1,
514 &replaced);
Mario Kleineradd1fa72016-08-27 01:02:28 +0200515 state->color_mgmt_changed |= replaced;
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000516 return ret;
Gustavo Padovan710c1ed2016-11-16 22:00:21 +0900517 } else if (property == config->prop_out_fence_ptr) {
Gustavo Padovand6eefcb2017-01-13 12:22:09 -0200518 s32 __user *fence_ptr = u64_to_user_ptr(val);
Gustavo Padovan710c1ed2016-11-16 22:00:21 +0900519
520 if (!fence_ptr)
521 return 0;
522
523 if (put_user(-1, fence_ptr))
524 return -EFAULT;
525
526 set_out_fence_for_crtc(state->state, crtc, fence_ptr);
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000527 } else if (crtc->funcs->atomic_set_property)
Rob Clark40ecc692014-12-18 16:01:46 -0500528 return crtc->funcs->atomic_set_property(crtc, state, property, val);
Daniel Stone27798362015-03-19 04:33:26 +0000529 else
530 return -EINVAL;
531
532 return 0;
Rob Clark40ecc692014-12-18 16:01:46 -0500533}
534EXPORT_SYMBOL(drm_atomic_crtc_set_property);
535
Daniel Vetterc0714fc2015-12-04 09:45:57 +0100536/**
537 * drm_atomic_crtc_get_property - get property value from CRTC state
538 * @crtc: the drm CRTC to set a property on
539 * @state: the state object to get the property value from
540 * @property: the property to set
541 * @val: return location for the property value
542 *
Rob Clarkac9c9252014-12-18 16:01:47 -0500543 * This function handles generic/core properties and calls out to
544 * driver's ->atomic_get_property() for driver properties. To ensure
545 * consistent behavior you must call this function rather than the
546 * driver hook directly.
Daniel Vetterc0714fc2015-12-04 09:45:57 +0100547 *
548 * RETURNS:
549 * Zero on success, error code on failure
Rob Clarkac9c9252014-12-18 16:01:47 -0500550 */
Geliang Tangbf22f3b2015-09-24 03:01:03 -0700551static int
552drm_atomic_crtc_get_property(struct drm_crtc *crtc,
Rob Clarkac9c9252014-12-18 16:01:47 -0500553 const struct drm_crtc_state *state,
554 struct drm_property *property, uint64_t *val)
555{
Daniel Stone8f164ce2015-03-19 04:33:25 +0000556 struct drm_device *dev = crtc->dev;
557 struct drm_mode_config *config = &dev->mode_config;
558
559 if (property == config->prop_active)
560 *val = state->active;
Daniel Stone955f3c32015-05-25 19:11:52 +0100561 else if (property == config->prop_mode_id)
562 *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
Lionel Landwerlin5488dc12016-02-26 17:05:00 +0000563 else if (property == config->degamma_lut_property)
564 *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
565 else if (property == config->ctm_property)
566 *val = (state->ctm) ? state->ctm->base.id : 0;
567 else if (property == config->gamma_lut_property)
568 *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
Gustavo Padovan710c1ed2016-11-16 22:00:21 +0900569 else if (property == config->prop_out_fence_ptr)
570 *val = 0;
Daniel Stone8f164ce2015-03-19 04:33:25 +0000571 else if (crtc->funcs->atomic_get_property)
Rob Clarkac9c9252014-12-18 16:01:47 -0500572 return crtc->funcs->atomic_get_property(crtc, state, property, val);
Daniel Stone8f164ce2015-03-19 04:33:25 +0000573 else
574 return -EINVAL;
575
576 return 0;
Rob Clarkac9c9252014-12-18 16:01:47 -0500577}
Rob Clarkac9c9252014-12-18 16:01:47 -0500578
579/**
Rob Clark5e743732014-12-18 16:01:51 -0500580 * drm_atomic_crtc_check - check crtc state
581 * @crtc: crtc to check
582 * @state: crtc state to check
583 *
584 * Provides core sanity checks for crtc state.
585 *
586 * RETURNS:
587 * Zero on success, error code on failure
588 */
589static int drm_atomic_crtc_check(struct drm_crtc *crtc,
590 struct drm_crtc_state *state)
591{
592 /* NOTE: we explicitly don't enforce constraints such as primary
593 * layer covering entire screen, since that is something we want
594 * to allow (on hw that supports it). For hw that does not, it
595 * should be checked in driver's crtc->atomic_check() vfunc.
596 *
597 * TODO: Add generic modeset state checks once we support those.
598 */
Daniel Vettereab3bbe2015-01-22 16:36:21 +0100599
600 if (state->active && !state->enable) {
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +0200601 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
602 crtc->base.id, crtc->name);
Daniel Vettereab3bbe2015-01-22 16:36:21 +0100603 return -EINVAL;
604 }
605
Daniel Stone99cf4a22015-05-25 19:11:51 +0100606 /* The state->enable vs. state->mode_blob checks can be WARN_ON,
607 * as this is a kernel-internal detail that userspace should never
608 * be able to trigger. */
609 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
610 WARN_ON(state->enable && !state->mode_blob)) {
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +0200611 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
612 crtc->base.id, crtc->name);
Daniel Stone99cf4a22015-05-25 19:11:51 +0100613 return -EINVAL;
614 }
615
616 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
617 WARN_ON(!state->enable && state->mode_blob)) {
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +0200618 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
619 crtc->base.id, crtc->name);
Daniel Stone99cf4a22015-05-25 19:11:51 +0100620 return -EINVAL;
621 }
622
Daniel Vetter4cba6852015-12-08 09:49:20 +0100623 /*
624 * Reject event generation for when a CRTC is off and stays off.
625 * It wouldn't be hard to implement this, but userspace has a track
626 * record of happily burning through 100% cpu (or worse, crash) when the
627 * display pipe is suspended. To avoid all that fun just reject updates
628 * that ask for events since likely that indicates a bug in the
629 * compositor's drawing loop. This is consistent with the vblank IOCTL
630 * and legacy page_flip IOCTL which also reject service on a disabled
631 * pipe.
632 */
633 if (state->event && !state->active && !crtc->state->active) {
634 DRM_DEBUG_ATOMIC("[CRTC:%d] requesting event but off\n",
635 crtc->base.id);
636 return -EINVAL;
637 }
638
Rob Clark5e743732014-12-18 16:01:51 -0500639 return 0;
640}
641
642/**
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200643 * drm_atomic_get_plane_state - get plane state
644 * @state: global atomic state object
645 * @plane: plane to get state object for
646 *
647 * This function returns the plane state for the given plane, allocating it if
648 * needed. It will also grab the relevant plane lock to make sure that the state
649 * is consistent.
650 *
651 * Returns:
652 *
653 * Either the allocated state or the error code encoded into the pointer. When
654 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
655 * entire atomic sequence must be restarted. All other errors are fatal.
656 */
657struct drm_plane_state *
658drm_atomic_get_plane_state(struct drm_atomic_state *state,
659 struct drm_plane *plane)
660{
Maarten Lankhorst1b26a5e2015-05-13 10:37:25 +0200661 int ret, index = drm_plane_index(plane);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200662 struct drm_plane_state *plane_state;
663
Maarten Lankhorst7f4eaa82016-05-03 11:12:31 +0200664 WARN_ON(!state->acquire_ctx);
665
Maarten Lankhorst1b26a5e2015-05-13 10:37:25 +0200666 plane_state = drm_atomic_get_existing_plane_state(state, plane);
667 if (plane_state)
668 return plane_state;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200669
Daniel Vetter4d02e2d2014-11-11 10:12:00 +0100670 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200671 if (ret)
672 return ERR_PTR(ret);
673
674 plane_state = plane->funcs->atomic_duplicate_state(plane);
675 if (!plane_state)
676 return ERR_PTR(-ENOMEM);
677
Daniel Vetterb8b53422016-06-02 00:06:33 +0200678 state->planes[index].state = plane_state;
679 state->planes[index].ptr = plane;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200680 plane_state->state = state;
681
Ville Syrjälä9f4c97a2015-12-08 18:41:54 +0200682 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
683 plane->base.id, plane->name, plane_state, state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200684
685 if (plane_state->crtc) {
686 struct drm_crtc_state *crtc_state;
687
688 crtc_state = drm_atomic_get_crtc_state(state,
689 plane_state->crtc);
690 if (IS_ERR(crtc_state))
691 return ERR_CAST(crtc_state);
692 }
693
694 return plane_state;
695}
696EXPORT_SYMBOL(drm_atomic_get_plane_state);
697
698/**
Rob Clark40ecc692014-12-18 16:01:46 -0500699 * drm_atomic_plane_set_property - set property on plane
700 * @plane: the drm plane to set a property on
701 * @state: the state object to update with the new property value
702 * @property: the property to set
703 * @val: the new property value
704 *
705 * Use this instead of calling plane->atomic_set_property directly.
706 * This function handles generic/core properties and calls out to
707 * driver's ->atomic_set_property() for driver properties. To ensure
708 * consistent behavior you must call this function rather than the
709 * driver hook directly.
710 *
711 * RETURNS:
712 * Zero on success, error code on failure
713 */
714int drm_atomic_plane_set_property(struct drm_plane *plane,
715 struct drm_plane_state *state, struct drm_property *property,
716 uint64_t val)
717{
Rob Clark6b4959f2014-12-18 16:01:53 -0500718 struct drm_device *dev = plane->dev;
719 struct drm_mode_config *config = &dev->mode_config;
720
721 if (property == config->prop_fb_id) {
722 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
723 drm_atomic_set_fb_for_plane(state, fb);
724 if (fb)
725 drm_framebuffer_unreference(fb);
Gustavo Padovan96e02f42016-11-15 22:06:39 +0900726 } else if (property == config->prop_in_fence_fd) {
727 if (state->fence)
728 return -EINVAL;
729
730 if (U642I64(val) == -1)
731 return 0;
732
733 state->fence = sync_file_get_fence(val);
734 if (!state->fence)
735 return -EINVAL;
736
Rob Clark6b4959f2014-12-18 16:01:53 -0500737 } else if (property == config->prop_crtc_id) {
738 struct drm_crtc *crtc = drm_crtc_find(dev, val);
739 return drm_atomic_set_crtc_for_plane(state, crtc);
740 } else if (property == config->prop_crtc_x) {
741 state->crtc_x = U642I64(val);
742 } else if (property == config->prop_crtc_y) {
743 state->crtc_y = U642I64(val);
744 } else if (property == config->prop_crtc_w) {
745 state->crtc_w = val;
746 } else if (property == config->prop_crtc_h) {
747 state->crtc_h = val;
748 } else if (property == config->prop_src_x) {
749 state->src_x = val;
750 } else if (property == config->prop_src_y) {
751 state->src_y = val;
752 } else if (property == config->prop_src_w) {
753 state->src_w = val;
754 } else if (property == config->prop_src_h) {
755 state->src_h = val;
Matt Roper1da30622015-01-21 16:35:40 -0800756 } else if (property == config->rotation_property) {
757 state->rotation = val;
Marek Szyprowski44d1240d2016-06-13 11:11:26 +0200758 } else if (property == plane->zpos_property) {
759 state->zpos = val;
Rob Clark6b4959f2014-12-18 16:01:53 -0500760 } else if (plane->funcs->atomic_set_property) {
761 return plane->funcs->atomic_set_property(plane, state,
762 property, val);
763 } else {
764 return -EINVAL;
765 }
766
767 return 0;
Rob Clark40ecc692014-12-18 16:01:46 -0500768}
769EXPORT_SYMBOL(drm_atomic_plane_set_property);
770
Daniel Vetterc0714fc2015-12-04 09:45:57 +0100771/**
772 * drm_atomic_plane_get_property - get property value from plane state
773 * @plane: the drm plane to set a property on
774 * @state: the state object to get the property value from
775 * @property: the property to set
776 * @val: return location for the property value
777 *
Rob Clarkac9c9252014-12-18 16:01:47 -0500778 * This function handles generic/core properties and calls out to
779 * driver's ->atomic_get_property() for driver properties. To ensure
780 * consistent behavior you must call this function rather than the
781 * driver hook directly.
Daniel Vetterc0714fc2015-12-04 09:45:57 +0100782 *
783 * RETURNS:
784 * Zero on success, error code on failure
Rob Clarkac9c9252014-12-18 16:01:47 -0500785 */
Daniel Vettera97df1c2014-12-18 22:49:02 +0100786static int
787drm_atomic_plane_get_property(struct drm_plane *plane,
Rob Clarkac9c9252014-12-18 16:01:47 -0500788 const struct drm_plane_state *state,
789 struct drm_property *property, uint64_t *val)
790{
Rob Clark6b4959f2014-12-18 16:01:53 -0500791 struct drm_device *dev = plane->dev;
792 struct drm_mode_config *config = &dev->mode_config;
793
794 if (property == config->prop_fb_id) {
795 *val = (state->fb) ? state->fb->base.id : 0;
Gustavo Padovan96e02f42016-11-15 22:06:39 +0900796 } else if (property == config->prop_in_fence_fd) {
797 *val = -1;
Rob Clark6b4959f2014-12-18 16:01:53 -0500798 } else if (property == config->prop_crtc_id) {
799 *val = (state->crtc) ? state->crtc->base.id : 0;
800 } else if (property == config->prop_crtc_x) {
801 *val = I642U64(state->crtc_x);
802 } else if (property == config->prop_crtc_y) {
803 *val = I642U64(state->crtc_y);
804 } else if (property == config->prop_crtc_w) {
805 *val = state->crtc_w;
806 } else if (property == config->prop_crtc_h) {
807 *val = state->crtc_h;
808 } else if (property == config->prop_src_x) {
809 *val = state->src_x;
810 } else if (property == config->prop_src_y) {
811 *val = state->src_y;
812 } else if (property == config->prop_src_w) {
813 *val = state->src_w;
814 } else if (property == config->prop_src_h) {
815 *val = state->src_h;
Tvrtko Ursulin4cda09c2015-02-26 13:49:17 +0000816 } else if (property == config->rotation_property) {
817 *val = state->rotation;
Marek Szyprowski44d1240d2016-06-13 11:11:26 +0200818 } else if (property == plane->zpos_property) {
819 *val = state->zpos;
Rob Clark6b4959f2014-12-18 16:01:53 -0500820 } else if (plane->funcs->atomic_get_property) {
Rob Clarkac9c9252014-12-18 16:01:47 -0500821 return plane->funcs->atomic_get_property(plane, state, property, val);
Rob Clark6b4959f2014-12-18 16:01:53 -0500822 } else {
823 return -EINVAL;
824 }
825
826 return 0;
Rob Clarkac9c9252014-12-18 16:01:47 -0500827}
Rob Clarkac9c9252014-12-18 16:01:47 -0500828
Daniel Vetterf8aeb412015-08-26 21:49:42 +0200829static bool
830plane_switching_crtc(struct drm_atomic_state *state,
831 struct drm_plane *plane,
832 struct drm_plane_state *plane_state)
833{
834 if (!plane->state->crtc || !plane_state->crtc)
835 return false;
836
837 if (plane->state->crtc == plane_state->crtc)
838 return false;
839
840 /* This could be refined, but currently there's no helper or driver code
841 * to implement direct switching of active planes nor userspace to take
842 * advantage of more direct plane switching without the intermediate
843 * full OFF state.
844 */
845 return true;
846}
847
Rob Clarkac9c9252014-12-18 16:01:47 -0500848/**
Rob Clark5e743732014-12-18 16:01:51 -0500849 * drm_atomic_plane_check - check plane state
850 * @plane: plane to check
851 * @state: plane state to check
852 *
853 * Provides core sanity checks for plane state.
854 *
855 * RETURNS:
856 * Zero on success, error code on failure
857 */
858static int drm_atomic_plane_check(struct drm_plane *plane,
859 struct drm_plane_state *state)
860{
861 unsigned int fb_width, fb_height;
Laurent Pinchartead86102015-03-05 02:25:43 +0200862 int ret;
Rob Clark5e743732014-12-18 16:01:51 -0500863
864 /* either *both* CRTC and FB must be set, or neither */
865 if (WARN_ON(state->crtc && !state->fb)) {
Daniel Vetter17a38d92015-02-22 12:24:16 +0100866 DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
Rob Clark5e743732014-12-18 16:01:51 -0500867 return -EINVAL;
868 } else if (WARN_ON(state->fb && !state->crtc)) {
Daniel Vetter17a38d92015-02-22 12:24:16 +0100869 DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
Rob Clark5e743732014-12-18 16:01:51 -0500870 return -EINVAL;
871 }
872
873 /* if disabled, we don't care about the rest of the state: */
874 if (!state->crtc)
875 return 0;
876
877 /* Check whether this plane is usable on this CRTC */
878 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
Daniel Vetter17a38d92015-02-22 12:24:16 +0100879 DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
Rob Clark5e743732014-12-18 16:01:51 -0500880 return -EINVAL;
881 }
882
883 /* Check whether this plane supports the fb pixel format. */
Laurent Pinchartead86102015-03-05 02:25:43 +0200884 ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
885 if (ret) {
Eric Engestromd3828142016-08-15 16:29:55 +0100886 char *format_name = drm_get_format_name(state->fb->pixel_format);
Eric Engestrom90844f02016-08-15 01:02:38 +0100887 DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", format_name);
888 kfree(format_name);
Laurent Pinchartead86102015-03-05 02:25:43 +0200889 return ret;
Rob Clark5e743732014-12-18 16:01:51 -0500890 }
891
892 /* Give drivers some help against integer overflows */
893 if (state->crtc_w > INT_MAX ||
894 state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
895 state->crtc_h > INT_MAX ||
896 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
Daniel Vetter17a38d92015-02-22 12:24:16 +0100897 DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
898 state->crtc_w, state->crtc_h,
899 state->crtc_x, state->crtc_y);
Rob Clark5e743732014-12-18 16:01:51 -0500900 return -ERANGE;
901 }
902
903 fb_width = state->fb->width << 16;
904 fb_height = state->fb->height << 16;
905
906 /* Make sure source coordinates are inside the fb. */
907 if (state->src_w > fb_width ||
908 state->src_x > fb_width - state->src_w ||
909 state->src_h > fb_height ||
910 state->src_y > fb_height - state->src_h) {
Daniel Vetter17a38d92015-02-22 12:24:16 +0100911 DRM_DEBUG_ATOMIC("Invalid source coordinates "
912 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
913 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
914 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
915 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
916 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
Rob Clark5e743732014-12-18 16:01:51 -0500917 return -ENOSPC;
918 }
919
Daniel Vetterf8aeb412015-08-26 21:49:42 +0200920 if (plane_switching_crtc(state->state, plane, state)) {
Ville Syrjälä9f4c97a2015-12-08 18:41:54 +0200921 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
922 plane->base.id, plane->name);
Daniel Vetterf8aeb412015-08-26 21:49:42 +0200923 return -EINVAL;
924 }
925
Rob Clark5e743732014-12-18 16:01:51 -0500926 return 0;
927}
928
929/**
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200930 * drm_atomic_get_connector_state - get connector state
931 * @state: global atomic state object
932 * @connector: connector to get state object for
933 *
934 * This function returns the connector state for the given connector,
935 * allocating it if needed. It will also grab the relevant connector lock to
936 * make sure that the state is consistent.
937 *
938 * Returns:
939 *
940 * Either the allocated state or the error code encoded into the pointer. When
941 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
942 * entire atomic sequence must be restarted. All other errors are fatal.
943 */
944struct drm_connector_state *
945drm_atomic_get_connector_state(struct drm_atomic_state *state,
946 struct drm_connector *connector)
947{
948 int ret, index;
949 struct drm_mode_config *config = &connector->dev->mode_config;
950 struct drm_connector_state *connector_state;
951
Maarten Lankhorst7f4eaa82016-05-03 11:12:31 +0200952 WARN_ON(!state->acquire_ctx);
953
Daniel Vetterc7eb76f2014-11-19 18:38:06 +0100954 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
955 if (ret)
956 return ERR_PTR(ret);
957
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200958 index = drm_connector_index(connector);
959
Daniel Vetterf52b69f12014-11-19 18:38:08 +0100960 if (index >= state->num_connector) {
Daniel Vetter63e83c12016-06-02 00:06:32 +0200961 struct __drm_connnectors_state *c;
Maarten Lankhorst5fff80b2016-02-17 08:32:05 +0100962 int alloc = max(index + 1, config->num_connector);
963
964 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
965 if (!c)
966 return ERR_PTR(-ENOMEM);
967
968 state->connectors = c;
969 memset(&state->connectors[state->num_connector], 0,
970 sizeof(*state->connectors) * (alloc - state->num_connector));
971
Maarten Lankhorst5fff80b2016-02-17 08:32:05 +0100972 state->num_connector = alloc;
Daniel Vetterf52b69f12014-11-19 18:38:08 +0100973 }
974
Daniel Vetter63e83c12016-06-02 00:06:32 +0200975 if (state->connectors[index].state)
976 return state->connectors[index].state;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200977
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200978 connector_state = connector->funcs->atomic_duplicate_state(connector);
979 if (!connector_state)
980 return ERR_PTR(-ENOMEM);
981
Dave Airlieb164d312016-04-27 11:10:09 +1000982 drm_connector_reference(connector);
Daniel Vetter63e83c12016-06-02 00:06:32 +0200983 state->connectors[index].state = connector_state;
984 state->connectors[index].ptr = connector;
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200985 connector_state->state = state;
986
Daniel Vetter17a38d92015-02-22 12:24:16 +0100987 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
988 connector->base.id, connector_state, state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +0200989
990 if (connector_state->crtc) {
991 struct drm_crtc_state *crtc_state;
992
993 crtc_state = drm_atomic_get_crtc_state(state,
994 connector_state->crtc);
995 if (IS_ERR(crtc_state))
996 return ERR_CAST(crtc_state);
997 }
998
999 return connector_state;
1000}
1001EXPORT_SYMBOL(drm_atomic_get_connector_state);
1002
1003/**
Rob Clark40ecc692014-12-18 16:01:46 -05001004 * drm_atomic_connector_set_property - set property on connector.
1005 * @connector: the drm connector to set a property on
1006 * @state: the state object to update with the new property value
1007 * @property: the property to set
1008 * @val: the new property value
1009 *
1010 * Use this instead of calling connector->atomic_set_property directly.
1011 * This function handles generic/core properties and calls out to
1012 * driver's ->atomic_set_property() for driver properties. To ensure
1013 * consistent behavior you must call this function rather than the
1014 * driver hook directly.
1015 *
1016 * RETURNS:
1017 * Zero on success, error code on failure
1018 */
1019int drm_atomic_connector_set_property(struct drm_connector *connector,
1020 struct drm_connector_state *state, struct drm_property *property,
1021 uint64_t val)
1022{
1023 struct drm_device *dev = connector->dev;
1024 struct drm_mode_config *config = &dev->mode_config;
1025
Rob Clarkae16c592014-12-18 16:01:54 -05001026 if (property == config->prop_crtc_id) {
1027 struct drm_crtc *crtc = drm_crtc_find(dev, val);
1028 return drm_atomic_set_crtc_for_connector(state, crtc);
1029 } else if (property == config->dpms_property) {
Rob Clark40ecc692014-12-18 16:01:46 -05001030 /* setting DPMS property requires special handling, which
1031 * is done in legacy setprop path for us. Disallow (for
1032 * now?) atomic writes to DPMS property:
1033 */
1034 return -EINVAL;
1035 } else if (connector->funcs->atomic_set_property) {
1036 return connector->funcs->atomic_set_property(connector,
1037 state, property, val);
1038 } else {
1039 return -EINVAL;
1040 }
1041}
1042EXPORT_SYMBOL(drm_atomic_connector_set_property);
1043
Daniel Vetterc0714fc2015-12-04 09:45:57 +01001044/**
1045 * drm_atomic_connector_get_property - get property value from connector state
1046 * @connector: the drm connector to set a property on
1047 * @state: the state object to get the property value from
1048 * @property: the property to set
1049 * @val: return location for the property value
1050 *
Rob Clarkac9c9252014-12-18 16:01:47 -05001051 * This function handles generic/core properties and calls out to
1052 * driver's ->atomic_get_property() for driver properties. To ensure
1053 * consistent behavior you must call this function rather than the
1054 * driver hook directly.
Daniel Vetterc0714fc2015-12-04 09:45:57 +01001055 *
1056 * RETURNS:
1057 * Zero on success, error code on failure
Rob Clarkac9c9252014-12-18 16:01:47 -05001058 */
Daniel Vettera97df1c2014-12-18 22:49:02 +01001059static int
1060drm_atomic_connector_get_property(struct drm_connector *connector,
Rob Clarkac9c9252014-12-18 16:01:47 -05001061 const struct drm_connector_state *state,
1062 struct drm_property *property, uint64_t *val)
1063{
1064 struct drm_device *dev = connector->dev;
1065 struct drm_mode_config *config = &dev->mode_config;
1066
Rob Clarkae16c592014-12-18 16:01:54 -05001067 if (property == config->prop_crtc_id) {
1068 *val = (state->crtc) ? state->crtc->base.id : 0;
1069 } else if (property == config->dpms_property) {
Rob Clarkac9c9252014-12-18 16:01:47 -05001070 *val = connector->dpms;
1071 } else if (connector->funcs->atomic_get_property) {
1072 return connector->funcs->atomic_get_property(connector,
1073 state, property, val);
1074 } else {
1075 return -EINVAL;
1076 }
1077
1078 return 0;
1079}
Rob Clarkac9c9252014-12-18 16:01:47 -05001080
Rob Clark88a48e22014-12-18 16:01:50 -05001081int drm_atomic_get_property(struct drm_mode_object *obj,
1082 struct drm_property *property, uint64_t *val)
1083{
1084 struct drm_device *dev = property->dev;
1085 int ret;
1086
1087 switch (obj->type) {
1088 case DRM_MODE_OBJECT_CONNECTOR: {
1089 struct drm_connector *connector = obj_to_connector(obj);
1090 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1091 ret = drm_atomic_connector_get_property(connector,
1092 connector->state, property, val);
1093 break;
1094 }
1095 case DRM_MODE_OBJECT_CRTC: {
1096 struct drm_crtc *crtc = obj_to_crtc(obj);
1097 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
1098 ret = drm_atomic_crtc_get_property(crtc,
1099 crtc->state, property, val);
1100 break;
1101 }
1102 case DRM_MODE_OBJECT_PLANE: {
1103 struct drm_plane *plane = obj_to_plane(obj);
1104 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
1105 ret = drm_atomic_plane_get_property(plane,
1106 plane->state, property, val);
1107 break;
1108 }
1109 default:
1110 ret = -EINVAL;
1111 break;
1112 }
1113
1114 return ret;
1115}
1116
1117/**
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001118 * drm_atomic_set_crtc_for_plane - set crtc for plane
Daniel Vetter07cc0ef2014-11-27 15:49:39 +01001119 * @plane_state: the plane whose incoming state to update
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001120 * @crtc: crtc to use for the plane
1121 *
1122 * Changing the assigned crtc for a plane requires us to grab the lock and state
1123 * for the new crtc, as needed. This function takes care of all these details
1124 * besides updating the pointer in the state object itself.
1125 *
1126 * Returns:
1127 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1128 * then the w/w mutex code has detected a deadlock and the entire atomic
1129 * sequence must be restarted. All other errors are fatal.
1130 */
1131int
Daniel Vetter07cc0ef2014-11-27 15:49:39 +01001132drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
1133 struct drm_crtc *crtc)
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001134{
Daniel Vetter07cc0ef2014-11-27 15:49:39 +01001135 struct drm_plane *plane = plane_state->plane;
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001136 struct drm_crtc_state *crtc_state;
1137
Rob Clark6ddd3882014-11-21 15:28:31 -05001138 if (plane_state->crtc) {
1139 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
1140 plane_state->crtc);
1141 if (WARN_ON(IS_ERR(crtc_state)))
1142 return PTR_ERR(crtc_state);
1143
1144 crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
1145 }
1146
1147 plane_state->crtc = crtc;
1148
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001149 if (crtc) {
1150 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
1151 crtc);
1152 if (IS_ERR(crtc_state))
1153 return PTR_ERR(crtc_state);
Rob Clark6ddd3882014-11-21 15:28:31 -05001154 crtc_state->plane_mask |= (1 << drm_plane_index(plane));
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001155 }
1156
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001157 if (crtc)
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +02001158 DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
1159 plane_state, crtc->base.id, crtc->name);
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001160 else
Daniel Vetter17a38d92015-02-22 12:24:16 +01001161 DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
1162 plane_state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001163
1164 return 0;
1165}
1166EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
1167
1168/**
John Hunter16d78bc2e2015-04-07 19:38:50 +08001169 * drm_atomic_set_fb_for_plane - set framebuffer for plane
Daniel Vetter321ebf02014-11-04 22:57:27 +01001170 * @plane_state: atomic state object for the plane
1171 * @fb: fb to use for the plane
1172 *
1173 * Changing the assigned framebuffer for a plane requires us to grab a reference
1174 * to the new fb and drop the reference to the old fb, if there is one. This
1175 * function takes care of all these details besides updating the pointer in the
1176 * state object itself.
1177 */
1178void
1179drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
1180 struct drm_framebuffer *fb)
1181{
1182 if (plane_state->fb)
1183 drm_framebuffer_unreference(plane_state->fb);
1184 if (fb)
1185 drm_framebuffer_reference(fb);
1186 plane_state->fb = fb;
1187
1188 if (fb)
Daniel Vetter17a38d92015-02-22 12:24:16 +01001189 DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
1190 fb->base.id, plane_state);
Daniel Vetter321ebf02014-11-04 22:57:27 +01001191 else
Daniel Vetter17a38d92015-02-22 12:24:16 +01001192 DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
1193 plane_state);
Daniel Vetter321ebf02014-11-04 22:57:27 +01001194}
1195EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
1196
1197/**
Gustavo Padovan0ed87072016-11-07 19:03:30 +09001198 * drm_atomic_set_fence_for_plane - set fence for plane
1199 * @plane_state: atomic state object for the plane
1200 * @fence: fence to use for the plane
1201 *
1202 * Helper to setup the plane_state fence in case it is not set yet.
1203 * By using this drivers doesn't need to worry if the user choose
1204 * implicit or explicit fencing.
1205 *
1206 * This function will not set the fence to the state if it was set
1207 * via explicit fencing interfaces on the atomic ioctl. It will
1208 * all drope the reference to the fence as we not storing it
1209 * anywhere.
1210 *
1211 * Otherwise, if plane_state->fence is not set this function we
1212 * just set it with the received implict fence.
1213 */
1214void
1215drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
1216 struct fence *fence)
1217{
1218 if (plane_state->fence) {
1219 fence_put(fence);
1220 return;
1221 }
1222
1223 plane_state->fence = fence;
1224}
1225EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
1226
1227/**
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001228 * drm_atomic_set_crtc_for_connector - set crtc for connector
1229 * @conn_state: atomic state object for the connector
1230 * @crtc: crtc to use for the connector
1231 *
1232 * Changing the assigned crtc for a connector requires us to grab the lock and
1233 * state for the new crtc, as needed. This function takes care of all these
1234 * details besides updating the pointer in the state object itself.
1235 *
1236 * Returns:
1237 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1238 * then the w/w mutex code has detected a deadlock and the entire atomic
1239 * sequence must be restarted. All other errors are fatal.
1240 */
1241int
1242drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
1243 struct drm_crtc *crtc)
1244{
1245 struct drm_crtc_state *crtc_state;
1246
Chris Wilsone2d800a2016-05-06 12:47:45 +01001247 if (conn_state->crtc == crtc)
1248 return 0;
1249
1250 if (conn_state->crtc) {
Maarten Lankhorst4cd9fa52016-01-04 12:53:18 +01001251 crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state,
1252 conn_state->crtc);
1253
1254 crtc_state->connector_mask &=
1255 ~(1 << drm_connector_index(conn_state->connector));
Chris Wilsone2d800a2016-05-06 12:47:45 +01001256
1257 drm_connector_unreference(conn_state->connector);
1258 conn_state->crtc = NULL;
Maarten Lankhorst4cd9fa52016-01-04 12:53:18 +01001259 }
1260
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001261 if (crtc) {
1262 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
1263 if (IS_ERR(crtc_state))
1264 return PTR_ERR(crtc_state);
Maarten Lankhorst4cd9fa52016-01-04 12:53:18 +01001265
1266 crtc_state->connector_mask |=
1267 1 << drm_connector_index(conn_state->connector);
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001268
Chris Wilsone2d800a2016-05-06 12:47:45 +01001269 drm_connector_reference(conn_state->connector);
1270 conn_state->crtc = crtc;
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001271
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +02001272 DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
1273 conn_state, crtc->base.id, crtc->name);
Chris Wilsone2d800a2016-05-06 12:47:45 +01001274 } else {
Daniel Vetter17a38d92015-02-22 12:24:16 +01001275 DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
1276 conn_state);
Chris Wilsone2d800a2016-05-06 12:47:45 +01001277 }
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001278
1279 return 0;
1280}
1281EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
1282
1283/**
1284 * drm_atomic_add_affected_connectors - add connectors for crtc
1285 * @state: atomic state
1286 * @crtc: DRM crtc
1287 *
1288 * This function walks the current configuration and adds all connectors
1289 * currently using @crtc to the atomic configuration @state. Note that this
1290 * function must acquire the connection mutex. This can potentially cause
1291 * unneeded seralization if the update is just for the planes on one crtc. Hence
1292 * drivers and helpers should only call this when really needed (e.g. when a
1293 * full modeset needs to happen due to some change).
1294 *
1295 * Returns:
1296 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1297 * then the w/w mutex code has detected a deadlock and the entire atomic
1298 * sequence must be restarted. All other errors are fatal.
1299 */
1300int
1301drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
1302 struct drm_crtc *crtc)
1303{
1304 struct drm_mode_config *config = &state->dev->mode_config;
1305 struct drm_connector *connector;
1306 struct drm_connector_state *conn_state;
1307 int ret;
1308
1309 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
1310 if (ret)
1311 return ret;
1312
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +02001313 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
1314 crtc->base.id, crtc->name, state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001315
1316 /*
1317 * Changed connectors are already in @state, so only need to look at the
1318 * current configuration.
1319 */
Daniel Vetter9a9f5ce2015-07-09 23:44:34 +02001320 drm_for_each_connector(connector, state->dev) {
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001321 if (connector->state->crtc != crtc)
1322 continue;
1323
1324 conn_state = drm_atomic_get_connector_state(state, connector);
1325 if (IS_ERR(conn_state))
1326 return PTR_ERR(conn_state);
1327 }
1328
1329 return 0;
1330}
1331EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
1332
1333/**
Maarten Lankhorste01e9f72015-05-19 16:41:02 +02001334 * drm_atomic_add_affected_planes - add planes for crtc
1335 * @state: atomic state
1336 * @crtc: DRM crtc
1337 *
1338 * This function walks the current configuration and adds all planes
1339 * currently used by @crtc to the atomic configuration @state. This is useful
1340 * when an atomic commit also needs to check all currently enabled plane on
1341 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
1342 * to avoid special code to force-enable all planes.
1343 *
1344 * Since acquiring a plane state will always also acquire the w/w mutex of the
1345 * current CRTC for that plane (if there is any) adding all the plane states for
1346 * a CRTC will not reduce parallism of atomic updates.
1347 *
1348 * Returns:
1349 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1350 * then the w/w mutex code has detected a deadlock and the entire atomic
1351 * sequence must be restarted. All other errors are fatal.
1352 */
1353int
1354drm_atomic_add_affected_planes(struct drm_atomic_state *state,
1355 struct drm_crtc *crtc)
1356{
1357 struct drm_plane *plane;
1358
1359 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
1360
1361 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
1362 struct drm_plane_state *plane_state =
1363 drm_atomic_get_plane_state(state, plane);
1364
1365 if (IS_ERR(plane_state))
1366 return PTR_ERR(plane_state);
1367 }
1368 return 0;
1369}
1370EXPORT_SYMBOL(drm_atomic_add_affected_planes);
1371
1372/**
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001373 * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
1374 * @state: atomic state
1375 *
1376 * This function should be used by legacy entry points which don't understand
1377 * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
John Hunter16d78bc2e2015-04-07 19:38:50 +08001378 * the slowpath completed.
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001379 */
1380void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
1381{
Maarten Lankhorst81e257e2016-06-23 13:45:06 +02001382 struct drm_device *dev = state->dev;
1383 unsigned crtc_mask = 0;
1384 struct drm_crtc *crtc;
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001385 int ret;
Maarten Lankhorst81e257e2016-06-23 13:45:06 +02001386 bool global = false;
1387
1388 drm_for_each_crtc(crtc, dev) {
1389 if (crtc->acquire_ctx != state->acquire_ctx)
1390 continue;
1391
1392 crtc_mask |= drm_crtc_mask(crtc);
1393 crtc->acquire_ctx = NULL;
1394 }
1395
1396 if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
1397 global = true;
1398
1399 dev->mode_config.acquire_ctx = NULL;
1400 }
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001401
1402retry:
1403 drm_modeset_backoff(state->acquire_ctx);
1404
Maarten Lankhorst81e257e2016-06-23 13:45:06 +02001405 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001406 if (ret)
1407 goto retry;
Maarten Lankhorst81e257e2016-06-23 13:45:06 +02001408
1409 drm_for_each_crtc(crtc, dev)
1410 if (drm_crtc_mask(crtc) & crtc_mask)
1411 crtc->acquire_ctx = state->acquire_ctx;
1412
1413 if (global)
1414 dev->mode_config.acquire_ctx = state->acquire_ctx;
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001415}
1416EXPORT_SYMBOL(drm_atomic_legacy_backoff);
1417
1418/**
1419 * drm_atomic_check_only - check whether a given config would work
1420 * @state: atomic configuration to check
1421 *
1422 * Note that this function can return -EDEADLK if the driver needed to acquire
1423 * more locks but encountered a deadlock. The caller must then do the usual w/w
1424 * backoff dance and restart. All other errors are fatal.
1425 *
1426 * Returns:
1427 * 0 on success, negative error code on failure.
1428 */
1429int drm_atomic_check_only(struct drm_atomic_state *state)
1430{
Rob Clark5e743732014-12-18 16:01:51 -05001431 struct drm_device *dev = state->dev;
1432 struct drm_mode_config *config = &dev->mode_config;
Ander Conselvan de Oliveiradf63b992015-04-10 14:58:39 +03001433 struct drm_plane *plane;
1434 struct drm_plane_state *plane_state;
1435 struct drm_crtc *crtc;
1436 struct drm_crtc_state *crtc_state;
Rob Clark5e743732014-12-18 16:01:51 -05001437 int i, ret = 0;
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001438
Daniel Vetter17a38d92015-02-22 12:24:16 +01001439 DRM_DEBUG_ATOMIC("checking %p\n", state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001440
Ander Conselvan de Oliveiradf63b992015-04-10 14:58:39 +03001441 for_each_plane_in_state(state, plane, plane_state, i) {
1442 ret = drm_atomic_plane_check(plane, plane_state);
Rob Clark5e743732014-12-18 16:01:51 -05001443 if (ret) {
Ville Syrjälä9f4c97a2015-12-08 18:41:54 +02001444 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
1445 plane->base.id, plane->name);
Rob Clark5e743732014-12-18 16:01:51 -05001446 return ret;
1447 }
1448 }
1449
Ander Conselvan de Oliveiradf63b992015-04-10 14:58:39 +03001450 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1451 ret = drm_atomic_crtc_check(crtc, crtc_state);
Rob Clark5e743732014-12-18 16:01:51 -05001452 if (ret) {
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +02001453 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
1454 crtc->base.id, crtc->name);
Rob Clark5e743732014-12-18 16:01:51 -05001455 return ret;
1456 }
1457 }
1458
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001459 if (config->funcs->atomic_check)
Rob Clark5e743732014-12-18 16:01:51 -05001460 ret = config->funcs->atomic_check(state->dev, state);
1461
Maarten Lankhorsteed96e72017-08-15 11:57:06 +02001462 if (ret)
1463 return ret;
1464
Rob Clarkd34f20d2014-12-18 16:01:56 -05001465 if (!state->allow_modeset) {
Ander Conselvan de Oliveiradf63b992015-04-10 14:58:39 +03001466 for_each_crtc_in_state(state, crtc, crtc_state, i) {
Daniel Vetter2465ff62015-06-18 09:58:55 +02001467 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
Ville Syrjäläfa3ab4c2015-12-08 18:41:53 +02001468 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
1469 crtc->base.id, crtc->name);
Rob Clarkd34f20d2014-12-18 16:01:56 -05001470 return -EINVAL;
1471 }
1472 }
1473 }
1474
Maarten Lankhorsteed96e72017-08-15 11:57:06 +02001475 return 0;
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001476}
1477EXPORT_SYMBOL(drm_atomic_check_only);
1478
1479/**
1480 * drm_atomic_commit - commit configuration atomically
1481 * @state: atomic configuration to check
1482 *
1483 * Note that this function can return -EDEADLK if the driver needed to acquire
1484 * more locks but encountered a deadlock. The caller must then do the usual w/w
1485 * backoff dance and restart. All other errors are fatal.
1486 *
1487 * Also note that on successful execution ownership of @state is transferred
1488 * from the caller of this function to the function itself. The caller must not
1489 * free or in any other way access @state. If the function fails then the caller
1490 * must clean up @state itself.
1491 *
1492 * Returns:
1493 * 0 on success, negative error code on failure.
1494 */
1495int drm_atomic_commit(struct drm_atomic_state *state)
1496{
1497 struct drm_mode_config *config = &state->dev->mode_config;
1498 int ret;
1499
1500 ret = drm_atomic_check_only(state);
1501 if (ret)
1502 return ret;
1503
Daniel Vetter17a38d92015-02-22 12:24:16 +01001504 DRM_DEBUG_ATOMIC("commiting %p\n", state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001505
1506 return config->funcs->atomic_commit(state->dev, state, false);
1507}
1508EXPORT_SYMBOL(drm_atomic_commit);
1509
1510/**
Maarten Lankhorstb837ba02016-04-26 16:11:35 +02001511 * drm_atomic_nonblocking_commit - atomic&nonblocking configuration commit
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001512 * @state: atomic configuration to check
1513 *
1514 * Note that this function can return -EDEADLK if the driver needed to acquire
1515 * more locks but encountered a deadlock. The caller must then do the usual w/w
1516 * backoff dance and restart. All other errors are fatal.
1517 *
1518 * Also note that on successful execution ownership of @state is transferred
1519 * from the caller of this function to the function itself. The caller must not
1520 * free or in any other way access @state. If the function fails then the caller
1521 * must clean up @state itself.
1522 *
1523 * Returns:
1524 * 0 on success, negative error code on failure.
1525 */
Maarten Lankhorstb837ba02016-04-26 16:11:35 +02001526int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001527{
1528 struct drm_mode_config *config = &state->dev->mode_config;
1529 int ret;
1530
1531 ret = drm_atomic_check_only(state);
1532 if (ret)
1533 return ret;
1534
Maarten Lankhorstb837ba02016-04-26 16:11:35 +02001535 DRM_DEBUG_ATOMIC("commiting %p nonblocking\n", state);
Daniel Vettercc4ceb42014-07-25 21:30:38 +02001536
1537 return config->funcs->atomic_commit(state->dev, state, true);
1538}
Maarten Lankhorstb837ba02016-04-26 16:11:35 +02001539EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
Rob Clarkd34f20d2014-12-18 16:01:56 -05001540
1541/*
1542 * The big monstor ioctl
1543 */
1544
1545static struct drm_pending_vblank_event *create_vblank_event(
Gustavo Padovan710c1ed2016-11-16 22:00:21 +09001546 struct drm_device *dev, uint64_t user_data)
Rob Clarkd34f20d2014-12-18 16:01:56 -05001547{
1548 struct drm_pending_vblank_event *e = NULL;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001549
1550 e = kzalloc(sizeof *e, GFP_KERNEL);
Daniel Vetter2dd500f2016-01-11 22:40:56 +01001551 if (!e)
1552 return NULL;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001553
1554 e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
Daniel Vetter2dd500f2016-01-11 22:40:56 +01001555 e->event.base.length = sizeof(e->event);
Rob Clarkd34f20d2014-12-18 16:01:56 -05001556 e->event.user_data = user_data;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001557
Rob Clarkd34f20d2014-12-18 16:01:56 -05001558 return e;
1559}
1560
Rob Clarkd34f20d2014-12-18 16:01:56 -05001561static int atomic_set_prop(struct drm_atomic_state *state,
1562 struct drm_mode_object *obj, struct drm_property *prop,
1563 uint64_t prop_value)
1564{
1565 struct drm_mode_object *ref;
1566 int ret;
1567
1568 if (!drm_property_change_valid_get(prop, prop_value, &ref))
1569 return -EINVAL;
1570
1571 switch (obj->type) {
1572 case DRM_MODE_OBJECT_CONNECTOR: {
1573 struct drm_connector *connector = obj_to_connector(obj);
1574 struct drm_connector_state *connector_state;
1575
1576 connector_state = drm_atomic_get_connector_state(state, connector);
1577 if (IS_ERR(connector_state)) {
1578 ret = PTR_ERR(connector_state);
1579 break;
1580 }
1581
1582 ret = drm_atomic_connector_set_property(connector,
1583 connector_state, prop, prop_value);
1584 break;
1585 }
1586 case DRM_MODE_OBJECT_CRTC: {
1587 struct drm_crtc *crtc = obj_to_crtc(obj);
1588 struct drm_crtc_state *crtc_state;
1589
1590 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1591 if (IS_ERR(crtc_state)) {
1592 ret = PTR_ERR(crtc_state);
1593 break;
1594 }
1595
1596 ret = drm_atomic_crtc_set_property(crtc,
1597 crtc_state, prop, prop_value);
1598 break;
1599 }
1600 case DRM_MODE_OBJECT_PLANE: {
1601 struct drm_plane *plane = obj_to_plane(obj);
1602 struct drm_plane_state *plane_state;
1603
1604 plane_state = drm_atomic_get_plane_state(state, plane);
1605 if (IS_ERR(plane_state)) {
1606 ret = PTR_ERR(plane_state);
1607 break;
1608 }
1609
1610 ret = drm_atomic_plane_set_property(plane,
1611 plane_state, prop, prop_value);
1612 break;
1613 }
1614 default:
1615 ret = -EINVAL;
1616 break;
1617 }
1618
1619 drm_property_change_valid_put(prop, ref);
1620 return ret;
1621}
1622
Maarten Lankhorst0f45c262015-11-11 11:29:09 +01001623/**
Maarten Lankhorst9744bf42015-11-24 10:34:34 +01001624 * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
Maarten Lankhorst0f45c262015-11-11 11:29:09 +01001625 *
1626 * @dev: drm device to check.
1627 * @plane_mask: plane mask for planes that were updated.
1628 * @ret: return value, can be -EDEADLK for a retry.
1629 *
1630 * Before doing an update plane->old_fb is set to plane->fb,
1631 * but before dropping the locks old_fb needs to be set to NULL
1632 * and plane->fb updated. This is a common operation for each
1633 * atomic update, so this call is split off as a helper.
1634 */
1635void drm_atomic_clean_old_fb(struct drm_device *dev,
1636 unsigned plane_mask,
1637 int ret)
1638{
1639 struct drm_plane *plane;
1640
1641 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
1642 * locks (ie. while it is still safe to deref plane->state). We
1643 * need to do this here because the driver entry points cannot
1644 * distinguish between legacy and atomic ioctls.
1645 */
1646 drm_for_each_plane_mask(plane, dev, plane_mask) {
1647 if (ret == 0) {
1648 struct drm_framebuffer *new_fb = plane->state->fb;
1649 if (new_fb)
1650 drm_framebuffer_reference(new_fb);
1651 plane->fb = new_fb;
1652 plane->crtc = plane->state->crtc;
1653
1654 if (plane->old_fb)
1655 drm_framebuffer_unreference(plane->old_fb);
1656 }
1657 plane->old_fb = NULL;
1658 }
1659}
1660EXPORT_SYMBOL(drm_atomic_clean_old_fb);
1661
Gustavo Padovan50696b32016-11-22 09:11:28 +09001662/**
1663 * DOC: explicit fencing properties
1664 *
1665 * Explicit fencing allows userspace to control the buffer synchronization
1666 * between devices. A Fence or a group of fences are transfered to/from
1667 * userspace using Sync File fds and there are two DRM properties for that.
1668 * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
1669 * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
1670 *
1671 * As a contrast, with implicit fencing the kernel keeps track of any
1672 * ongoing rendering, and automatically ensures that the atomic update waits
1673 * for any pending rendering to complete. For shared buffers represented with
1674 * a struct &dma_buf this is tracked in &reservation_object structures.
1675 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
1676 * whereas explicit fencing is what Android wants.
1677 *
1678 * "IN_FENCE_FD”:
1679 * Use this property to pass a fence that DRM should wait on before
1680 * proceeding with the Atomic Commit request and show the framebuffer for
1681 * the plane on the screen. The fence can be either a normal fence or a
1682 * merged one, the sync_file framework will handle both cases and use a
1683 * fence_array if a merged fence is received. Passing -1 here means no
1684 * fences to wait on.
1685 *
1686 * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
1687 * it will only check if the Sync File is a valid one.
1688 *
1689 * On the driver side the fence is stored on the @fence parameter of
1690 * struct &drm_plane_state. Drivers which also support implicit fencing
1691 * should set the implicit fence using drm_atomic_set_fence_for_plane(),
1692 * to make sure there's consistent behaviour between drivers in precedence
1693 * of implicit vs. explicit fencing.
1694 *
1695 * "OUT_FENCE_PTR”:
1696 * Use this property to pass a file descriptor pointer to DRM. Once the
1697 * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
1698 * the file descriptor number of a Sync File. This Sync File contains the
1699 * CRTC fence that will be signaled when all framebuffers present on the
1700 * Atomic Commit * request for that given CRTC are scanned out on the
1701 * screen.
1702 *
1703 * The Atomic Commit request fails if a invalid pointer is passed. If the
1704 * Atomic Commit request fails for any other reason the out fence fd
1705 * returned will be -1. On a Atomic Commit with the
1706 * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
1707 *
1708 * Note that out-fences don't have a special interface to drivers and are
1709 * internally represented by a struct &drm_pending_vblank_event in struct
1710 * &drm_crtc_state, which is also used by the nonblocking atomic commit
1711 * helpers and for the DRM event handling for existing userspace.
1712 */
1713
Gustavo Padovan710c1ed2016-11-16 22:00:21 +09001714struct drm_out_fence_state {
Gustavo Padovand6eefcb2017-01-13 12:22:09 -02001715 s32 __user *out_fence_ptr;
Gustavo Padovan710c1ed2016-11-16 22:00:21 +09001716 struct sync_file *sync_file;
1717 int fd;
1718};
1719
1720static int setup_out_fence(struct drm_out_fence_state *fence_state,
1721 struct fence *fence)
1722{
1723 fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
1724 if (fence_state->fd < 0)
1725 return fence_state->fd;
1726
1727 if (put_user(fence_state->fd, fence_state->out_fence_ptr))
1728 return -EFAULT;
1729
1730 fence_state->sync_file = sync_file_create(fence);
1731 if (!fence_state->sync_file)
1732 return -ENOMEM;
1733
1734 return 0;
1735}
1736
1737static int prepare_crtc_signaling(struct drm_device *dev,
1738 struct drm_atomic_state *state,
1739 struct drm_mode_atomic *arg,
1740 struct drm_file *file_priv,
1741 struct drm_out_fence_state **fence_state,
1742 unsigned int *num_fences)
1743{
1744 struct drm_crtc *crtc;
1745 struct drm_crtc_state *crtc_state;
1746 int i, ret;
1747
1748 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
1749 return 0;
1750
1751 for_each_crtc_in_state(state, crtc, crtc_state, i) {
Gustavo Padovand6eefcb2017-01-13 12:22:09 -02001752 s32 __user *fence_ptr;
Gustavo Padovan710c1ed2016-11-16 22:00:21 +09001753
1754 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
1755
1756 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
1757 struct drm_pending_vblank_event *e;
1758
1759 e = create_vblank_event(dev, arg->user_data);
1760 if (!e)
1761 return -ENOMEM;
1762
1763 crtc_state->event = e;
1764 }
1765
1766 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
1767 struct drm_pending_vblank_event *e = crtc_state->event;
1768
1769 if (!file_priv)
1770 continue;
1771
1772 ret = drm_event_reserve_init(dev, file_priv, &e->base,
1773 &e->event.base);
1774 if (ret) {
1775 kfree(e);
1776 crtc_state->event = NULL;
1777 return ret;
1778 }
1779 }
1780
1781 if (fence_ptr) {
1782 struct fence *fence;
1783 struct drm_out_fence_state *f;
1784
1785 f = krealloc(*fence_state, sizeof(**fence_state) *
1786 (*num_fences + 1), GFP_KERNEL);
1787 if (!f)
1788 return -ENOMEM;
1789
1790 memset(&f[*num_fences], 0, sizeof(*f));
1791
1792 f[*num_fences].out_fence_ptr = fence_ptr;
1793 *fence_state = f;
1794
Gustavo Padovan34e0b7d2016-12-06 15:47:17 -02001795 fence = drm_crtc_create_fence(crtc);
Gustavo Padovan710c1ed2016-11-16 22:00:21 +09001796 if (!fence)
1797 return -ENOMEM;
1798
1799 ret = setup_out_fence(&f[(*num_fences)++], fence);
1800 if (ret) {
1801 fence_put(fence);
1802 return ret;
1803 }
1804
1805 crtc_state->event->base.fence = fence;
1806 }
1807 }
1808
1809 return 0;
1810}
1811
1812static void complete_crtc_signaling(struct drm_device *dev,
1813 struct drm_atomic_state *state,
1814 struct drm_out_fence_state *fence_state,
1815 unsigned int num_fences,
1816 bool install_fds)
1817{
1818 struct drm_crtc *crtc;
1819 struct drm_crtc_state *crtc_state;
1820 int i;
1821
1822 if (install_fds) {
1823 for (i = 0; i < num_fences; i++)
1824 fd_install(fence_state[i].fd,
1825 fence_state[i].sync_file->file);
1826
1827 kfree(fence_state);
1828 return;
1829 }
1830
1831 for_each_crtc_in_state(state, crtc, crtc_state, i) {
Maarten Lankhorst4dfd0ed2017-01-31 10:25:25 +01001832 struct drm_pending_vblank_event *event = crtc_state->event;
Gustavo Padovan710c1ed2016-11-16 22:00:21 +09001833 /*
Maarten Lankhorst4dfd0ed2017-01-31 10:25:25 +01001834 * Free the allocated event. drm_atomic_helper_setup_commit
1835 * can allocate an event too, so only free it if it's ours
1836 * to prevent a double free in drm_atomic_state_clear.
Gustavo Padovan710c1ed2016-11-16 22:00:21 +09001837 */
Maarten Lankhorst4dfd0ed2017-01-31 10:25:25 +01001838 if (event && (event->base.fence || event->base.file_priv)) {
1839 drm_event_cancel_free(dev, &event->base);
1840 crtc_state->event = NULL;
1841 }
Gustavo Padovan710c1ed2016-11-16 22:00:21 +09001842 }
1843
1844 if (!fence_state)
1845 return;
1846
1847 for (i = 0; i < num_fences; i++) {
1848 if (fence_state[i].sync_file)
1849 fput(fence_state[i].sync_file->file);
1850 if (fence_state[i].fd >= 0)
1851 put_unused_fd(fence_state[i].fd);
1852
1853 /* If this fails log error to the user */
1854 if (fence_state[i].out_fence_ptr &&
1855 put_user(-1, fence_state[i].out_fence_ptr))
1856 DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
1857 }
1858
1859 kfree(fence_state);
1860}
1861
Rob Clarkd34f20d2014-12-18 16:01:56 -05001862int drm_mode_atomic_ioctl(struct drm_device *dev,
1863 void *data, struct drm_file *file_priv)
1864{
1865 struct drm_mode_atomic *arg = data;
1866 uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
1867 uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
1868 uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
1869 uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
1870 unsigned int copied_objs, copied_props;
1871 struct drm_atomic_state *state;
1872 struct drm_modeset_acquire_ctx ctx;
1873 struct drm_plane *plane;
Maarten Lankhorst47589a52017-08-14 12:07:21 +02001874 struct drm_out_fence_state *fence_state;
Maarten Lankhorst45723722015-11-11 11:29:08 +01001875 unsigned plane_mask;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001876 int ret = 0;
Maarten Lankhorst47589a52017-08-14 12:07:21 +02001877 unsigned int i, j, num_fences;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001878
1879 /* disallow for drivers not supporting atomic: */
1880 if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
1881 return -EINVAL;
1882
1883 /* disallow for userspace that has not enabled atomic cap (even
1884 * though this may be a bit overkill, since legacy userspace
1885 * wouldn't know how to call this ioctl)
1886 */
1887 if (!file_priv->atomic)
1888 return -EINVAL;
1889
1890 if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
1891 return -EINVAL;
1892
1893 if (arg->reserved)
1894 return -EINVAL;
1895
1896 if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
1897 !dev->mode_config.async_page_flip)
1898 return -EINVAL;
1899
1900 /* can't test and expect an event at the same time. */
1901 if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
1902 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
1903 return -EINVAL;
1904
1905 drm_modeset_acquire_init(&ctx, 0);
1906
1907 state = drm_atomic_state_alloc(dev);
1908 if (!state)
1909 return -ENOMEM;
1910
1911 state->acquire_ctx = &ctx;
1912 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1913
1914retry:
Maarten Lankhorst45723722015-11-11 11:29:08 +01001915 plane_mask = 0;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001916 copied_objs = 0;
1917 copied_props = 0;
Maarten Lankhorst47589a52017-08-14 12:07:21 +02001918 fence_state = NULL;
1919 num_fences = 0;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001920
1921 for (i = 0; i < arg->count_objs; i++) {
1922 uint32_t obj_id, count_props;
1923 struct drm_mode_object *obj;
1924
1925 if (get_user(obj_id, objs_ptr + copied_objs)) {
1926 ret = -EFAULT;
Maarten Lankhorstec9f9322015-06-24 08:59:25 +02001927 goto out;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001928 }
1929
1930 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
Dave Airlieb164d312016-04-27 11:10:09 +10001931 if (!obj) {
1932 ret = -ENOENT;
1933 goto out;
1934 }
1935
1936 if (!obj->properties) {
1937 drm_mode_object_unreference(obj);
Rob Clarkd34f20d2014-12-18 16:01:56 -05001938 ret = -ENOENT;
Maarten Lankhorstec9f9322015-06-24 08:59:25 +02001939 goto out;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001940 }
1941
Rob Clarkd34f20d2014-12-18 16:01:56 -05001942 if (get_user(count_props, count_props_ptr + copied_objs)) {
Dave Airlieb164d312016-04-27 11:10:09 +10001943 drm_mode_object_unreference(obj);
Rob Clarkd34f20d2014-12-18 16:01:56 -05001944 ret = -EFAULT;
Maarten Lankhorstec9f9322015-06-24 08:59:25 +02001945 goto out;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001946 }
1947
1948 copied_objs++;
1949
1950 for (j = 0; j < count_props; j++) {
1951 uint32_t prop_id;
1952 uint64_t prop_value;
1953 struct drm_property *prop;
1954
1955 if (get_user(prop_id, props_ptr + copied_props)) {
Dave Airlieb164d312016-04-27 11:10:09 +10001956 drm_mode_object_unreference(obj);
Rob Clarkd34f20d2014-12-18 16:01:56 -05001957 ret = -EFAULT;
Maarten Lankhorstec9f9322015-06-24 08:59:25 +02001958 goto out;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001959 }
1960
Maarten Lankhorstf92f0532016-09-08 12:30:01 +02001961 prop = drm_mode_obj_find_prop_id(obj, prop_id);
Rob Clarkd34f20d2014-12-18 16:01:56 -05001962 if (!prop) {
Dave Airlieb164d312016-04-27 11:10:09 +10001963 drm_mode_object_unreference(obj);
Rob Clarkd34f20d2014-12-18 16:01:56 -05001964 ret = -ENOENT;
Maarten Lankhorstec9f9322015-06-24 08:59:25 +02001965 goto out;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001966 }
1967
Guenter Roeck42c58142015-01-12 21:12:17 -08001968 if (copy_from_user(&prop_value,
1969 prop_values_ptr + copied_props,
1970 sizeof(prop_value))) {
Dave Airlieb164d312016-04-27 11:10:09 +10001971 drm_mode_object_unreference(obj);
Rob Clarkd34f20d2014-12-18 16:01:56 -05001972 ret = -EFAULT;
Maarten Lankhorstec9f9322015-06-24 08:59:25 +02001973 goto out;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001974 }
1975
1976 ret = atomic_set_prop(state, obj, prop, prop_value);
Dave Airlieb164d312016-04-27 11:10:09 +10001977 if (ret) {
1978 drm_mode_object_unreference(obj);
Maarten Lankhorstec9f9322015-06-24 08:59:25 +02001979 goto out;
Dave Airlieb164d312016-04-27 11:10:09 +10001980 }
Rob Clarkd34f20d2014-12-18 16:01:56 -05001981
1982 copied_props++;
1983 }
Maarten Lankhorsta9cc54e2015-06-24 08:59:24 +02001984
Maarten Lankhorstc4749c92015-08-31 12:25:04 +02001985 if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
1986 !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
Maarten Lankhorsta9cc54e2015-06-24 08:59:24 +02001987 plane = obj_to_plane(obj);
1988 plane_mask |= (1 << drm_plane_index(plane));
1989 plane->old_fb = plane->fb;
1990 }
Dave Airlieb164d312016-04-27 11:10:09 +10001991 drm_mode_object_unreference(obj);
Rob Clarkd34f20d2014-12-18 16:01:56 -05001992 }
1993
Gustavo Padovan710c1ed2016-11-16 22:00:21 +09001994 ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
1995 &num_fences);
1996 if (ret)
1997 goto out;
Rob Clarkd34f20d2014-12-18 16:01:56 -05001998
1999 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
Maarten Lankhorstc4749c92015-08-31 12:25:04 +02002000 /*
2001 * Unlike commit, check_only does not clean up state.
2002 * Below we call drm_atomic_state_free for it.
2003 */
Rob Clarkd34f20d2014-12-18 16:01:56 -05002004 ret = drm_atomic_check_only(state);
Rob Clarkd34f20d2014-12-18 16:01:56 -05002005 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
Maarten Lankhorstb837ba02016-04-26 16:11:35 +02002006 ret = drm_atomic_nonblocking_commit(state);
Rob Clarkd34f20d2014-12-18 16:01:56 -05002007 } else {
2008 ret = drm_atomic_commit(state);
2009 }
2010
Maarten Lankhorstec9f9322015-06-24 08:59:25 +02002011out:
Maarten Lankhorst0f45c262015-11-11 11:29:09 +01002012 drm_atomic_clean_old_fb(dev, plane_mask, ret);
Maarten Lankhorstec9f9322015-06-24 08:59:25 +02002013
Gustavo Padovan710c1ed2016-11-16 22:00:21 +09002014 complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
Maarten Lankhorstc4749c92015-08-31 12:25:04 +02002015
Maarten Lankhorstec9f9322015-06-24 08:59:25 +02002016 if (ret == -EDEADLK) {
2017 drm_atomic_state_clear(state);
2018 drm_modeset_backoff(&ctx);
2019 goto retry;
2020 }
2021
Maarten Lankhorstc4749c92015-08-31 12:25:04 +02002022 if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
Maarten Lankhorstec9f9322015-06-24 08:59:25 +02002023 drm_atomic_state_free(state);
Rob Clarkd34f20d2014-12-18 16:01:56 -05002024
2025 drm_modeset_drop_locks(&ctx);
2026 drm_modeset_acquire_fini(&ctx);
2027
2028 return ret;
Rob Clarkd34f20d2014-12-18 16:01:56 -05002029}