blob: 28f2e7c0baf36b1c57a6772fdce28bd648dd4fce [file] [log] [blame]
Rob Clarkcf3a7e42014-11-08 13:21:06 -05001/*
Dhaval Patel04c7e8e2016-09-26 20:14:31 -07002 * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
Rob Clarkcf3a7e42014-11-08 13:21:06 -05003 * Copyright (C) 2014 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include "msm_drv.h"
20#include "msm_kms.h"
21#include "msm_gem.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040022#include "msm_fence.h"
Narendra Muppalla77b32932017-05-10 13:53:11 -070023#include "sde_trace.h"
Rob Clarkcf3a7e42014-11-08 13:21:06 -050024
25struct msm_commit {
Rob Clark0b776d42015-01-30 17:04:45 -050026 struct drm_device *dev;
Rob Clarkcf3a7e42014-11-08 13:21:06 -050027 struct drm_atomic_state *state;
Rob Clarkf86afec2014-11-25 12:41:18 -050028 uint32_t crtc_mask;
Alan Kwongefd65132017-08-07 22:36:51 -040029 bool nonblock;
Sandeep Pandaf48c46a2016-10-24 09:48:50 +053030 struct kthread_work commit_work;
Rob Clarkcf3a7e42014-11-08 13:21:06 -050031};
32
Rob Clarkf86afec2014-11-25 12:41:18 -050033/* block until specified crtcs are no longer pending update, and
34 * atomically mark them as pending update
35 */
36static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
37{
38 int ret;
39
40 spin_lock(&priv->pending_crtcs_event.lock);
41 ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
42 !(priv->pending_crtcs & crtc_mask));
43 if (ret == 0) {
44 DBG("start: %08x", crtc_mask);
45 priv->pending_crtcs |= crtc_mask;
46 }
47 spin_unlock(&priv->pending_crtcs_event.lock);
48
49 return ret;
50}
51
52/* clear specified crtcs (no longer pending update)
53 */
54static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
55{
56 spin_lock(&priv->pending_crtcs_event.lock);
57 DBG("end: %08x", crtc_mask);
58 priv->pending_crtcs &= ~crtc_mask;
59 wake_up_all_locked(&priv->pending_crtcs_event);
60 spin_unlock(&priv->pending_crtcs_event.lock);
61}
62
Rob Clark0b776d42015-01-30 17:04:45 -050063static void commit_destroy(struct msm_commit *c)
64{
65 end_atomic(c->dev->dev_private, c->crtc_mask);
Alan Kwongefd65132017-08-07 22:36:51 -040066 if (c->nonblock)
67 kfree(c);
Rob Clark0b776d42015-01-30 17:04:45 -050068}
69
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -040070static void msm_atomic_wait_for_commit_done(
71 struct drm_device *dev,
Lloyd Atkinson940340d2016-12-09 14:49:37 -050072 struct drm_atomic_state *old_state)
Hai Li0a5c9aa2015-04-28 19:35:37 -040073{
74 struct drm_crtc *crtc;
Daniel Vetter8d76b792016-06-02 15:41:53 +020075 struct drm_crtc_state *crtc_state;
Hai Li0a5c9aa2015-04-28 19:35:37 -040076 struct msm_drm_private *priv = old_state->dev->dev_private;
77 struct msm_kms *kms = priv->kms;
Hai Li0a5c9aa2015-04-28 19:35:37 -040078 int i;
79
Daniel Vetter8d76b792016-06-02 15:41:53 +020080 for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
Hai Li0a5c9aa2015-04-28 19:35:37 -040081 if (!crtc->state->enable)
82 continue;
83
84 /* Legacy cursor ioctls are completely unsynced, and userspace
85 * relies on that (by doing tons of cursor updates). */
86 if (old_state->legacy_cursor_update)
87 continue;
88
89 kms->funcs->wait_for_crtc_commit_done(kms, crtc);
90 }
91}
92
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -040093static void
94msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
95{
96 struct drm_connector *connector;
97 struct drm_connector_state *old_conn_state;
98 struct drm_crtc *crtc;
99 struct drm_crtc_state *old_crtc_state;
100 int i;
101
Narendra Muppalla77b32932017-05-10 13:53:11 -0700102 SDE_ATRACE_BEGIN("msm_disable");
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400103 for_each_connector_in_state(old_state, connector, old_conn_state, i) {
104 const struct drm_encoder_helper_funcs *funcs;
105 struct drm_encoder *encoder;
106 struct drm_crtc_state *old_crtc_state;
107 unsigned int crtc_idx;
108
109 /*
110 * Shut down everything that's in the changeset and currently
111 * still on. So need to check the old, saved state.
112 */
113 if (!old_conn_state->crtc)
114 continue;
115
116 crtc_idx = drm_crtc_index(old_conn_state->crtc);
Dhaval Patel04c7e8e2016-09-26 20:14:31 -0700117 old_crtc_state = drm_atomic_get_existing_crtc_state(old_state,
118 old_conn_state->crtc);
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400119
120 if (!old_crtc_state->active ||
121 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
122 continue;
123
124 encoder = old_conn_state->best_encoder;
125
126 /* We shouldn't get this far if we didn't previously have
127 * an encoder.. but WARN_ON() rather than explode.
128 */
129 if (WARN_ON(!encoder))
130 continue;
131
132 if (msm_is_mode_seamless(
Raviteja Tamatam68892de2017-06-20 04:47:19 +0530133 &connector->encoder->crtc->state->mode) ||
134 msm_is_mode_seamless_vrr(
135 &connector->encoder->crtc->state->adjusted_mode))
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400136 continue;
137
Jeykumar Sankaran69934622017-05-31 18:16:25 -0700138 if (msm_is_mode_seamless_dms(
Raviteja Tamatam68892de2017-06-20 04:47:19 +0530139 &connector->encoder->crtc->state->adjusted_mode))
Jeykumar Sankaran69934622017-05-31 18:16:25 -0700140 continue;
141
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400142 funcs = encoder->helper_private;
143
144 DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
145 encoder->base.id, encoder->name);
146
147 /*
148 * Each encoder has at most one connector (since we always steal
149 * it away), so we won't call disable hooks twice.
150 */
151 drm_bridge_disable(encoder->bridge);
152
153 /* Right function depends upon target state. */
154 if (connector->state->crtc && funcs->prepare)
155 funcs->prepare(encoder);
156 else if (funcs->disable)
157 funcs->disable(encoder);
158 else
159 funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
160
161 drm_bridge_post_disable(encoder->bridge);
162 }
163
164 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
165 const struct drm_crtc_helper_funcs *funcs;
166
167 /* Shut down everything that needs a full modeset. */
168 if (!drm_atomic_crtc_needs_modeset(crtc->state))
169 continue;
170
171 if (!old_crtc_state->active)
172 continue;
173
Raviteja Tamatam68892de2017-06-20 04:47:19 +0530174 if (msm_is_mode_seamless(&crtc->state->mode) ||
175 msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode))
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400176 continue;
177
Jeykumar Sankaran69934622017-05-31 18:16:25 -0700178 if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode))
179 continue;
180
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400181 funcs = crtc->helper_private;
182
183 DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
184 crtc->base.id);
185
186 /* Right function depends upon target state. */
187 if (crtc->state->enable && funcs->prepare)
188 funcs->prepare(crtc);
189 else if (funcs->disable)
190 funcs->disable(crtc);
191 else
192 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
193 }
Narendra Muppalla77b32932017-05-10 13:53:11 -0700194 SDE_ATRACE_END("msm_disable");
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400195}
196
197static void
198msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
199{
200 struct drm_crtc *crtc;
201 struct drm_crtc_state *old_crtc_state;
202 struct drm_connector *connector;
203 struct drm_connector_state *old_conn_state;
204 int i;
205
206 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
207 const struct drm_crtc_helper_funcs *funcs;
208
209 if (!crtc->state->mode_changed)
210 continue;
211
212 funcs = crtc->helper_private;
213
214 if (crtc->state->enable && funcs->mode_set_nofb) {
215 DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
216 crtc->base.id);
217
218 funcs->mode_set_nofb(crtc);
219 }
220 }
221
222 for_each_connector_in_state(old_state, connector, old_conn_state, i) {
223 const struct drm_encoder_helper_funcs *funcs;
224 struct drm_crtc_state *new_crtc_state;
225 struct drm_encoder *encoder;
226 struct drm_display_mode *mode, *adjusted_mode;
227
228 if (!connector->state->best_encoder)
229 continue;
230
231 encoder = connector->state->best_encoder;
232 funcs = encoder->helper_private;
233 new_crtc_state = connector->state->crtc->state;
234 mode = &new_crtc_state->mode;
235 adjusted_mode = &new_crtc_state->adjusted_mode;
236
237 if (!new_crtc_state->mode_changed)
238 continue;
239
240 DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
241 encoder->base.id, encoder->name);
242
243 /*
244 * Each encoder has at most one connector (since we always steal
245 * it away), so we won't call mode_set hooks twice.
246 */
247 if (funcs->mode_set)
248 funcs->mode_set(encoder, mode, adjusted_mode);
249
250 drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
251 }
252}
253
254/**
255 * msm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
256 * @dev: DRM device
257 * @old_state: atomic state object with old state structures
258 *
259 * This function shuts down all the outputs that need to be shut down and
260 * prepares them (if required) with the new mode.
261 *
262 * For compatibility with legacy crtc helpers this should be called before
263 * drm_atomic_helper_commit_planes(), which is what the default commit function
264 * does. But drivers with different needs can group the modeset commits together
265 * and do the plane commits at the end. This is useful for drivers doing runtime
266 * PM since planes updates then only happen when the CRTC is actually enabled.
267 */
268void msm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
269 struct drm_atomic_state *old_state)
270{
271 msm_disable_outputs(dev, old_state);
272
273 drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
274
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400275 msm_crtc_set_mode(dev, old_state);
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400276}
277
278/**
279 * msm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
280 * @dev: DRM device
281 * @old_state: atomic state object with old state structures
282 *
283 * This function enables all the outputs with the new configuration which had to
284 * be turned off for the update.
285 *
286 * For compatibility with legacy crtc helpers this should be called after
287 * drm_atomic_helper_commit_planes(), which is what the default commit function
288 * does. But drivers with different needs can group the modeset commits together
289 * and do the plane commits at the end. This is useful for drivers doing runtime
290 * PM since planes updates then only happen when the CRTC is actually enabled.
291 */
Lloyd Atkinson0b7ff592016-05-30 14:01:10 -0400292static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400293 struct drm_atomic_state *old_state)
294{
295 struct drm_crtc *crtc;
296 struct drm_crtc_state *old_crtc_state;
297 struct drm_connector *connector;
298 struct drm_connector_state *old_conn_state;
Lloyd Atkinson0b7ff592016-05-30 14:01:10 -0400299 struct msm_drm_private *priv = dev->dev_private;
300 struct msm_kms *kms = priv->kms;
301 int bridge_enable_count = 0;
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400302 int i;
303
Narendra Muppalla77b32932017-05-10 13:53:11 -0700304 SDE_ATRACE_BEGIN("msm_enable");
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400305 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
306 const struct drm_crtc_helper_funcs *funcs;
307
308 /* Need to filter out CRTCs where only planes change. */
309 if (!drm_atomic_crtc_needs_modeset(crtc->state))
310 continue;
311
312 if (!crtc->state->active)
313 continue;
314
Raviteja Tamatam68892de2017-06-20 04:47:19 +0530315 if (msm_is_mode_seamless(&crtc->state->mode) ||
316 msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode))
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400317 continue;
318
319 funcs = crtc->helper_private;
320
321 if (crtc->state->enable) {
322 DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
323 crtc->base.id);
324
325 if (funcs->enable)
326 funcs->enable(crtc);
327 else
328 funcs->commit(crtc);
329 }
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400330
Lloyd Atkinson940340d2016-12-09 14:49:37 -0500331 if (msm_needs_vblank_pre_modeset(&crtc->state->adjusted_mode))
332 drm_crtc_wait_one_vblank(crtc);
333 }
Clarence Ipf99ccea2016-07-05 11:41:07 -0400334
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400335 for_each_connector_in_state(old_state, connector, old_conn_state, i) {
336 const struct drm_encoder_helper_funcs *funcs;
337 struct drm_encoder *encoder;
338
339 if (!connector->state->best_encoder)
340 continue;
341
342 if (!connector->state->crtc->state->active ||
343 !drm_atomic_crtc_needs_modeset(
344 connector->state->crtc->state))
345 continue;
346
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400347 encoder = connector->state->best_encoder;
348 funcs = encoder->helper_private;
349
350 DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
351 encoder->base.id, encoder->name);
352
353 /*
354 * Each encoder has at most one connector (since we always steal
355 * it away), so we won't call enable hooks twice.
356 */
357 drm_bridge_pre_enable(encoder->bridge);
Lloyd Atkinson0b7ff592016-05-30 14:01:10 -0400358 ++bridge_enable_count;
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400359
360 if (funcs->enable)
361 funcs->enable(encoder);
362 else
363 funcs->commit(encoder);
Lloyd Atkinson0b7ff592016-05-30 14:01:10 -0400364 }
365
366 if (kms->funcs->commit) {
367 DRM_DEBUG_ATOMIC("triggering commit\n");
368 kms->funcs->commit(kms, old_state);
369 }
370
371 /* If no bridges were pre_enabled, skip iterating over them again */
Narendra Muppalla77b32932017-05-10 13:53:11 -0700372 if (bridge_enable_count == 0) {
373 SDE_ATRACE_END("msm_enable");
Lloyd Atkinson0b7ff592016-05-30 14:01:10 -0400374 return;
Narendra Muppalla77b32932017-05-10 13:53:11 -0700375 }
Lloyd Atkinson0b7ff592016-05-30 14:01:10 -0400376
377 for_each_connector_in_state(old_state, connector, old_conn_state, i) {
378 struct drm_encoder *encoder;
379
380 if (!connector->state->best_encoder)
381 continue;
382
383 if (!connector->state->crtc->state->active ||
384 !drm_atomic_crtc_needs_modeset(
385 connector->state->crtc->state))
386 continue;
387
388 encoder = connector->state->best_encoder;
389
390 DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n",
391 encoder->base.id, encoder->name);
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400392
393 drm_bridge_enable(encoder->bridge);
394 }
Narendra Muppalla77b32932017-05-10 13:53:11 -0700395 SDE_ATRACE_END("msm_enable");
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400396}
397
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500398/* The (potentially) asynchronous part of the commit. At this point
399 * nothing can fail short of armageddon.
400 */
Sandeep Pandaf48c46a2016-10-24 09:48:50 +0530401static void complete_commit(struct msm_commit *c)
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500402{
403 struct drm_atomic_state *state = c->state;
404 struct drm_device *dev = state->dev;
Rob Clark0b776d42015-01-30 17:04:45 -0500405 struct msm_drm_private *priv = dev->dev_private;
406 struct msm_kms *kms = priv->kms;
407
Gustavo Padovanf6ce4102016-09-12 16:08:11 -0300408 drm_atomic_helper_wait_for_fences(dev, state, false);
Rob Clarkba00c3f2016-03-16 18:18:17 -0400409
Rob Clark0b776d42015-01-30 17:04:45 -0500410 kms->funcs->prepare_commit(kms, state);
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500411
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400412 msm_atomic_helper_commit_modeset_disables(dev, state);
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500413
Liu Ying2b58e982016-08-29 17:12:03 +0800414 drm_atomic_helper_commit_planes(dev, state, 0);
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500415
Lloyd Atkinsonaf7952d2016-06-26 22:41:26 -0400416 msm_atomic_helper_commit_modeset_enables(dev, state);
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500417
Rob Clarkf86afec2014-11-25 12:41:18 -0500418 /* NOTE: _wait_for_vblanks() only waits for vblank on
419 * enabled CRTCs. So we end up faulting when disabling
420 * due to (potentially) unref'ing the outgoing fb's
421 * before the vblank when the disable has latched.
422 *
423 * But if it did wait on disabled (or newly disabled)
424 * CRTCs, that would be racy (ie. we could have missed
425 * the irq. We need some way to poll for pipe shut
426 * down. Or just live with occasionally hitting the
427 * timeout in the CRTC disable path (which really should
428 * not be critical path)
429 */
430
Lloyd Atkinson940340d2016-12-09 14:49:37 -0500431 msm_atomic_wait_for_commit_done(dev, state);
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500432
433 drm_atomic_helper_cleanup_planes(dev, state);
434
Rob Clark0b776d42015-01-30 17:04:45 -0500435 kms->funcs->complete_commit(kms, state);
436
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500437 drm_atomic_state_free(state);
438
Rob Clark0b776d42015-01-30 17:04:45 -0500439 commit_destroy(c);
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500440}
441
Sandeep Pandaf48c46a2016-10-24 09:48:50 +0530442static void _msm_drm_commit_work_cb(struct kthread_work *work)
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500443{
Sandeep Pandaf48c46a2016-10-24 09:48:50 +0530444 struct msm_commit *commit = NULL;
445
446 if (!work) {
447 DRM_ERROR("%s: Invalid commit work data!\n", __func__);
448 return;
449 }
450
451 commit = container_of(work, struct msm_commit, commit_work);
452
Narendra Muppalla77b32932017-05-10 13:53:11 -0700453 SDE_ATRACE_BEGIN("complete_commit");
Sandeep Pandaf48c46a2016-10-24 09:48:50 +0530454 complete_commit(commit);
Narendra Muppalla77b32932017-05-10 13:53:11 -0700455 SDE_ATRACE_END("complete_commit");
Sandeep Pandaf48c46a2016-10-24 09:48:50 +0530456}
457
Alan Kwongefd65132017-08-07 22:36:51 -0400458static struct msm_commit *commit_init(struct drm_atomic_state *state,
459 bool nonblock)
Sandeep Pandaf48c46a2016-10-24 09:48:50 +0530460{
461 struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
462
463 if (!c)
464 return NULL;
465
466 c->dev = state->dev;
467 c->state = state;
Alan Kwongefd65132017-08-07 22:36:51 -0400468 c->nonblock = nonblock;
Sandeep Pandaf48c46a2016-10-24 09:48:50 +0530469
470 kthread_init_work(&c->commit_work, _msm_drm_commit_work_cb);
471
472 return c;
473}
474
475/* Start display thread function */
Lloyd Atkinson516fca42017-08-24 10:06:32 -0400476static void msm_atomic_commit_dispatch(struct drm_device *dev,
Sandeep Pandaf48c46a2016-10-24 09:48:50 +0530477 struct drm_atomic_state *state, struct msm_commit *commit)
478{
479 struct msm_drm_private *priv = dev->dev_private;
480 struct drm_crtc *crtc = NULL;
481 struct drm_crtc_state *crtc_state = NULL;
482 int ret = -EINVAL, i = 0, j = 0;
Lloyd Atkinsona989f5c2017-08-28 11:54:42 -0400483 bool nonblock;
484
485 /* cache since work will kfree commit in non-blocking case */
486 nonblock = commit->nonblock;
Sandeep Pandaf48c46a2016-10-24 09:48:50 +0530487
488 for_each_crtc_in_state(state, crtc, crtc_state, i) {
489 for (j = 0; j < priv->num_crtcs; j++) {
490 if (priv->disp_thread[j].crtc_id ==
491 crtc->base.id) {
492 if (priv->disp_thread[j].thread) {
493 kthread_queue_work(
494 &priv->disp_thread[j].worker,
495 &commit->commit_work);
496 /* only return zero if work is
497 * queued successfully.
498 */
499 ret = 0;
500 } else {
501 DRM_ERROR(" Error for crtc_id: %d\n",
502 priv->disp_thread[j].crtc_id);
503 }
504 break;
505 }
506 }
507 /*
508 * TODO: handle cases where there will be more than
509 * one crtc per commit cycle. Remove this check then.
510 * Current assumption is there will be only one crtc
511 * per commit cycle.
512 */
513 if (j < priv->num_crtcs)
514 break;
515 }
516
Lloyd Atkinson516fca42017-08-24 10:06:32 -0400517 if (ret) {
518 /**
519 * this is not expected to happen, but at this point the state
520 * has been swapped, but we couldn't dispatch to a crtc thread.
521 * fallback now to a synchronous complete_commit to try and
522 * ensure that SW and HW state don't get out of sync.
523 */
524 DRM_ERROR("failed to dispatch commit to any CRTC\n");
525 complete_commit(commit);
Lloyd Atkinsona989f5c2017-08-28 11:54:42 -0400526 } else if (!nonblock) {
Alan Kwongefd65132017-08-07 22:36:51 -0400527 kthread_flush_work(&commit->commit_work);
Alan Kwongefd65132017-08-07 22:36:51 -0400528 }
Lloyd Atkinsona989f5c2017-08-28 11:54:42 -0400529
530 /* free nonblocking commits in this context, after processing */
531 if (!nonblock)
532 kfree(commit);
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500533}
534
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500535/**
536 * drm_atomic_helper_commit - commit validated state object
537 * @dev: DRM device
538 * @state: the driver state object
Maarten Lankhorsta3ccfb92016-04-26 16:11:38 +0200539 * @nonblock: nonblocking commit
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500540 *
541 * This function commits a with drm_atomic_helper_check() pre-validated state
Maarten Lankhorsta3ccfb92016-04-26 16:11:38 +0200542 * object. This can still fail when e.g. the framebuffer reservation fails.
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500543 *
544 * RETURNS
545 * Zero for success or -errno.
546 */
547int msm_atomic_commit(struct drm_device *dev,
Maarten Lankhorsta3ccfb92016-04-26 16:11:38 +0200548 struct drm_atomic_state *state, bool nonblock)
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500549{
Rob Clarkca762a82016-03-15 17:22:13 -0400550 struct msm_drm_private *priv = dev->dev_private;
Rob Clarkf86afec2014-11-25 12:41:18 -0500551 struct msm_commit *c;
Daniel Vetter8d76b792016-06-02 15:41:53 +0200552 struct drm_crtc *crtc;
553 struct drm_crtc_state *crtc_state;
554 struct drm_plane *plane;
555 struct drm_plane_state *plane_state;
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500556 int i, ret;
557
Dhaval Patelbadfefd2017-09-26 13:58:02 -0700558 if (!priv || priv->shutdown_in_progress) {
559 DRM_ERROR("priv is null or shutdwon is in-progress\n");
560 return -EINVAL;
561 }
562
Narendra Muppalla77b32932017-05-10 13:53:11 -0700563 SDE_ATRACE_BEGIN("atomic_commit");
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500564 ret = drm_atomic_helper_prepare_planes(dev, state);
Narendra Muppalla77b32932017-05-10 13:53:11 -0700565 if (ret) {
566 SDE_ATRACE_END("atomic_commit");
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500567 return ret;
Narendra Muppalla77b32932017-05-10 13:53:11 -0700568 }
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500569
Alan Kwongefd65132017-08-07 22:36:51 -0400570 c = commit_init(state, nonblock);
Laurent Pinchartf65c18c2015-05-27 14:39:46 +0300571 if (!c) {
572 ret = -ENOMEM;
573 goto error;
574 }
Rob Clarkf86afec2014-11-25 12:41:18 -0500575
576 /*
577 * Figure out what crtcs we have:
578 */
Daniel Vetter8d76b792016-06-02 15:41:53 +0200579 for_each_crtc_in_state(state, crtc, crtc_state, i)
580 c->crtc_mask |= drm_crtc_mask(crtc);
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500581
582 /*
Rob Clarkb6295f92016-03-15 18:26:28 -0400583 * Figure out what fence to wait for:
584 */
Daniel Vetter8d76b792016-06-02 15:41:53 +0200585 for_each_plane_in_state(state, plane, plane_state, i) {
586 if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
587 struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
Rob Clarkb6295f92016-03-15 18:26:28 -0400588 struct msm_gem_object *msm_obj = to_msm_bo(obj);
589
Daniel Vetter8d76b792016-06-02 15:41:53 +0200590 plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
Rob Clarkb6295f92016-03-15 18:26:28 -0400591 }
592 }
593
594 /*
Rob Clarkf86afec2014-11-25 12:41:18 -0500595 * Wait for pending updates on any of the same crtc's and then
596 * mark our set of crtc's as busy:
597 */
598 ret = start_atomic(dev->dev_private, c->crtc_mask);
Laurent Pinchart5b2e2b62015-02-23 00:58:03 +0200599 if (ret) {
600 kfree(c);
Laurent Pinchartf65c18c2015-05-27 14:39:46 +0300601 goto error;
Laurent Pinchart5b2e2b62015-02-23 00:58:03 +0200602 }
Rob Clarkf86afec2014-11-25 12:41:18 -0500603
604 /*
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500605 * This is the point of no return - everything below never fails except
606 * when the hw goes bonghits. Which means we can commit the new state on
607 * the software side now.
608 */
609
Daniel Vetter5e84c262016-06-10 00:06:32 +0200610 drm_atomic_helper_swap_state(state, true);
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500611
612 /*
Clarence Ip24f80662016-06-13 19:05:32 -0400613 * Provide the driver a chance to prepare for output fences. This is
614 * done after the point of no return, but before asynchronous commits
615 * are dispatched to work queues, so that the fence preparation is
616 * finished before the .atomic_commit returns.
617 */
Dhaval Patelbadfefd2017-09-26 13:58:02 -0700618 if (priv->kms && priv->kms->funcs && priv->kms->funcs->prepare_fence)
Clarence Ip24f80662016-06-13 19:05:32 -0400619 priv->kms->funcs->prepare_fence(priv->kms, state);
620
621 /*
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500622 * Everything below can be run asynchronously without the need to grab
623 * any modeset locks at all under one conditions: It must be guaranteed
624 * that the asynchronous work has either been cancelled (if the driver
625 * supports it, which at least requires that the framebuffers get
626 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
627 * before the new state gets committed on the software side with
628 * drm_atomic_helper_swap_state().
629 *
630 * This scheme allows new atomic state updates to be prepared and
631 * checked in parallel to the asynchronous completion of the previous
632 * update. Which is important since compositors need to figure out the
633 * composition of the next frame right after having submitted the
634 * current layout.
635 */
636
Lloyd Atkinson516fca42017-08-24 10:06:32 -0400637 msm_atomic_commit_dispatch(dev, state, c);
Narendra Muppalla77b32932017-05-10 13:53:11 -0700638 SDE_ATRACE_END("atomic_commit");
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500639 return 0;
Laurent Pinchartf65c18c2015-05-27 14:39:46 +0300640
641error:
642 drm_atomic_helper_cleanup_planes(dev, state);
Narendra Muppalla77b32932017-05-10 13:53:11 -0700643 SDE_ATRACE_END("atomic_commit");
Laurent Pinchartf65c18c2015-05-27 14:39:46 +0300644 return ret;
Rob Clarkcf3a7e42014-11-08 13:21:06 -0500645}