Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 1 | /* |
Dhaval Patel | 04c7e8e | 2016-09-26 20:14:31 -0700 | [diff] [blame] | 2 | * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 3 | * Copyright (C) 2014 Red Hat |
| 4 | * Author: Rob Clark <robdclark@gmail.com> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License version 2 as published by |
| 8 | * the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 13 | * more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License along with |
| 16 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 17 | */ |
| 18 | |
| 19 | #include "msm_drv.h" |
| 20 | #include "msm_kms.h" |
| 21 | #include "msm_gem.h" |
Rob Clark | fde5de6 | 2016-03-15 15:35:08 -0400 | [diff] [blame] | 22 | #include "msm_fence.h" |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 23 | #include "sde_trace.h" |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 24 | |
| 25 | struct msm_commit { |
Rob Clark | 0b776d4 | 2015-01-30 17:04:45 -0500 | [diff] [blame] | 26 | struct drm_device *dev; |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 27 | struct drm_atomic_state *state; |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame] | 28 | uint32_t crtc_mask; |
Alan Kwong | efd6513 | 2017-08-07 22:36:51 -0400 | [diff] [blame] | 29 | bool nonblock; |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 30 | struct kthread_work commit_work; |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 31 | }; |
| 32 | |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame] | 33 | /* block until specified crtcs are no longer pending update, and |
| 34 | * atomically mark them as pending update |
| 35 | */ |
| 36 | static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) |
| 37 | { |
| 38 | int ret; |
| 39 | |
| 40 | spin_lock(&priv->pending_crtcs_event.lock); |
| 41 | ret = wait_event_interruptible_locked(priv->pending_crtcs_event, |
| 42 | !(priv->pending_crtcs & crtc_mask)); |
| 43 | if (ret == 0) { |
| 44 | DBG("start: %08x", crtc_mask); |
| 45 | priv->pending_crtcs |= crtc_mask; |
| 46 | } |
| 47 | spin_unlock(&priv->pending_crtcs_event.lock); |
| 48 | |
| 49 | return ret; |
| 50 | } |
| 51 | |
| 52 | /* clear specified crtcs (no longer pending update) |
| 53 | */ |
| 54 | static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) |
| 55 | { |
| 56 | spin_lock(&priv->pending_crtcs_event.lock); |
| 57 | DBG("end: %08x", crtc_mask); |
| 58 | priv->pending_crtcs &= ~crtc_mask; |
| 59 | wake_up_all_locked(&priv->pending_crtcs_event); |
| 60 | spin_unlock(&priv->pending_crtcs_event.lock); |
| 61 | } |
| 62 | |
Rob Clark | 0b776d4 | 2015-01-30 17:04:45 -0500 | [diff] [blame] | 63 | static void commit_destroy(struct msm_commit *c) |
| 64 | { |
| 65 | end_atomic(c->dev->dev_private, c->crtc_mask); |
Alan Kwong | efd6513 | 2017-08-07 22:36:51 -0400 | [diff] [blame] | 66 | if (c->nonblock) |
| 67 | kfree(c); |
Rob Clark | 0b776d4 | 2015-01-30 17:04:45 -0500 | [diff] [blame] | 68 | } |
| 69 | |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 70 | static void msm_atomic_wait_for_commit_done( |
| 71 | struct drm_device *dev, |
Lloyd Atkinson | 940340d | 2016-12-09 14:49:37 -0500 | [diff] [blame] | 72 | struct drm_atomic_state *old_state) |
Hai Li | 0a5c9aa | 2015-04-28 19:35:37 -0400 | [diff] [blame] | 73 | { |
| 74 | struct drm_crtc *crtc; |
Daniel Vetter | 8d76b79 | 2016-06-02 15:41:53 +0200 | [diff] [blame] | 75 | struct drm_crtc_state *crtc_state; |
Hai Li | 0a5c9aa | 2015-04-28 19:35:37 -0400 | [diff] [blame] | 76 | struct msm_drm_private *priv = old_state->dev->dev_private; |
| 77 | struct msm_kms *kms = priv->kms; |
Hai Li | 0a5c9aa | 2015-04-28 19:35:37 -0400 | [diff] [blame] | 78 | int i; |
| 79 | |
Daniel Vetter | 8d76b79 | 2016-06-02 15:41:53 +0200 | [diff] [blame] | 80 | for_each_crtc_in_state(old_state, crtc, crtc_state, i) { |
Hai Li | 0a5c9aa | 2015-04-28 19:35:37 -0400 | [diff] [blame] | 81 | if (!crtc->state->enable) |
| 82 | continue; |
| 83 | |
| 84 | /* Legacy cursor ioctls are completely unsynced, and userspace |
| 85 | * relies on that (by doing tons of cursor updates). */ |
| 86 | if (old_state->legacy_cursor_update) |
| 87 | continue; |
| 88 | |
| 89 | kms->funcs->wait_for_crtc_commit_done(kms, crtc); |
| 90 | } |
| 91 | } |
| 92 | |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 93 | static void |
| 94 | msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) |
| 95 | { |
| 96 | struct drm_connector *connector; |
| 97 | struct drm_connector_state *old_conn_state; |
| 98 | struct drm_crtc *crtc; |
| 99 | struct drm_crtc_state *old_crtc_state; |
| 100 | int i; |
| 101 | |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 102 | SDE_ATRACE_BEGIN("msm_disable"); |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 103 | for_each_connector_in_state(old_state, connector, old_conn_state, i) { |
| 104 | const struct drm_encoder_helper_funcs *funcs; |
| 105 | struct drm_encoder *encoder; |
| 106 | struct drm_crtc_state *old_crtc_state; |
| 107 | unsigned int crtc_idx; |
| 108 | |
| 109 | /* |
| 110 | * Shut down everything that's in the changeset and currently |
| 111 | * still on. So need to check the old, saved state. |
| 112 | */ |
| 113 | if (!old_conn_state->crtc) |
| 114 | continue; |
| 115 | |
| 116 | crtc_idx = drm_crtc_index(old_conn_state->crtc); |
Dhaval Patel | 04c7e8e | 2016-09-26 20:14:31 -0700 | [diff] [blame] | 117 | old_crtc_state = drm_atomic_get_existing_crtc_state(old_state, |
| 118 | old_conn_state->crtc); |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 119 | |
| 120 | if (!old_crtc_state->active || |
| 121 | !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) |
| 122 | continue; |
| 123 | |
| 124 | encoder = old_conn_state->best_encoder; |
| 125 | |
| 126 | /* We shouldn't get this far if we didn't previously have |
| 127 | * an encoder.. but WARN_ON() rather than explode. |
| 128 | */ |
| 129 | if (WARN_ON(!encoder)) |
| 130 | continue; |
| 131 | |
| 132 | if (msm_is_mode_seamless( |
Raviteja Tamatam | 68892de | 2017-06-20 04:47:19 +0530 | [diff] [blame] | 133 | &connector->encoder->crtc->state->mode) || |
| 134 | msm_is_mode_seamless_vrr( |
| 135 | &connector->encoder->crtc->state->adjusted_mode)) |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 136 | continue; |
| 137 | |
Jeykumar Sankaran | 6993462 | 2017-05-31 18:16:25 -0700 | [diff] [blame] | 138 | if (msm_is_mode_seamless_dms( |
Raviteja Tamatam | 68892de | 2017-06-20 04:47:19 +0530 | [diff] [blame] | 139 | &connector->encoder->crtc->state->adjusted_mode)) |
Jeykumar Sankaran | 6993462 | 2017-05-31 18:16:25 -0700 | [diff] [blame] | 140 | continue; |
| 141 | |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 142 | funcs = encoder->helper_private; |
| 143 | |
| 144 | DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n", |
| 145 | encoder->base.id, encoder->name); |
| 146 | |
| 147 | /* |
| 148 | * Each encoder has at most one connector (since we always steal |
| 149 | * it away), so we won't call disable hooks twice. |
| 150 | */ |
| 151 | drm_bridge_disable(encoder->bridge); |
| 152 | |
| 153 | /* Right function depends upon target state. */ |
| 154 | if (connector->state->crtc && funcs->prepare) |
| 155 | funcs->prepare(encoder); |
| 156 | else if (funcs->disable) |
| 157 | funcs->disable(encoder); |
| 158 | else |
| 159 | funcs->dpms(encoder, DRM_MODE_DPMS_OFF); |
| 160 | |
| 161 | drm_bridge_post_disable(encoder->bridge); |
| 162 | } |
| 163 | |
| 164 | for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { |
| 165 | const struct drm_crtc_helper_funcs *funcs; |
| 166 | |
| 167 | /* Shut down everything that needs a full modeset. */ |
| 168 | if (!drm_atomic_crtc_needs_modeset(crtc->state)) |
| 169 | continue; |
| 170 | |
| 171 | if (!old_crtc_state->active) |
| 172 | continue; |
| 173 | |
Raviteja Tamatam | 68892de | 2017-06-20 04:47:19 +0530 | [diff] [blame] | 174 | if (msm_is_mode_seamless(&crtc->state->mode) || |
| 175 | msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode)) |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 176 | continue; |
| 177 | |
Jeykumar Sankaran | 6993462 | 2017-05-31 18:16:25 -0700 | [diff] [blame] | 178 | if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode)) |
| 179 | continue; |
| 180 | |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 181 | funcs = crtc->helper_private; |
| 182 | |
| 183 | DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n", |
| 184 | crtc->base.id); |
| 185 | |
| 186 | /* Right function depends upon target state. */ |
| 187 | if (crtc->state->enable && funcs->prepare) |
| 188 | funcs->prepare(crtc); |
| 189 | else if (funcs->disable) |
| 190 | funcs->disable(crtc); |
| 191 | else |
| 192 | funcs->dpms(crtc, DRM_MODE_DPMS_OFF); |
| 193 | } |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 194 | SDE_ATRACE_END("msm_disable"); |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | static void |
| 198 | msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) |
| 199 | { |
| 200 | struct drm_crtc *crtc; |
| 201 | struct drm_crtc_state *old_crtc_state; |
| 202 | struct drm_connector *connector; |
| 203 | struct drm_connector_state *old_conn_state; |
| 204 | int i; |
| 205 | |
| 206 | for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { |
| 207 | const struct drm_crtc_helper_funcs *funcs; |
| 208 | |
| 209 | if (!crtc->state->mode_changed) |
| 210 | continue; |
| 211 | |
| 212 | funcs = crtc->helper_private; |
| 213 | |
| 214 | if (crtc->state->enable && funcs->mode_set_nofb) { |
| 215 | DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n", |
| 216 | crtc->base.id); |
| 217 | |
| 218 | funcs->mode_set_nofb(crtc); |
| 219 | } |
| 220 | } |
| 221 | |
| 222 | for_each_connector_in_state(old_state, connector, old_conn_state, i) { |
| 223 | const struct drm_encoder_helper_funcs *funcs; |
| 224 | struct drm_crtc_state *new_crtc_state; |
| 225 | struct drm_encoder *encoder; |
| 226 | struct drm_display_mode *mode, *adjusted_mode; |
| 227 | |
| 228 | if (!connector->state->best_encoder) |
| 229 | continue; |
| 230 | |
| 231 | encoder = connector->state->best_encoder; |
| 232 | funcs = encoder->helper_private; |
| 233 | new_crtc_state = connector->state->crtc->state; |
| 234 | mode = &new_crtc_state->mode; |
| 235 | adjusted_mode = &new_crtc_state->adjusted_mode; |
| 236 | |
| 237 | if (!new_crtc_state->mode_changed) |
| 238 | continue; |
| 239 | |
| 240 | DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n", |
| 241 | encoder->base.id, encoder->name); |
| 242 | |
| 243 | /* |
| 244 | * Each encoder has at most one connector (since we always steal |
| 245 | * it away), so we won't call mode_set hooks twice. |
| 246 | */ |
| 247 | if (funcs->mode_set) |
| 248 | funcs->mode_set(encoder, mode, adjusted_mode); |
| 249 | |
| 250 | drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode); |
| 251 | } |
| 252 | } |
| 253 | |
| 254 | /** |
| 255 | * msm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs |
| 256 | * @dev: DRM device |
| 257 | * @old_state: atomic state object with old state structures |
| 258 | * |
| 259 | * This function shuts down all the outputs that need to be shut down and |
| 260 | * prepares them (if required) with the new mode. |
| 261 | * |
| 262 | * For compatibility with legacy crtc helpers this should be called before |
| 263 | * drm_atomic_helper_commit_planes(), which is what the default commit function |
| 264 | * does. But drivers with different needs can group the modeset commits together |
| 265 | * and do the plane commits at the end. This is useful for drivers doing runtime |
| 266 | * PM since planes updates then only happen when the CRTC is actually enabled. |
| 267 | */ |
| 268 | void msm_atomic_helper_commit_modeset_disables(struct drm_device *dev, |
| 269 | struct drm_atomic_state *old_state) |
| 270 | { |
| 271 | msm_disable_outputs(dev, old_state); |
| 272 | |
| 273 | drm_atomic_helper_update_legacy_modeset_state(dev, old_state); |
| 274 | |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 275 | msm_crtc_set_mode(dev, old_state); |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 276 | } |
| 277 | |
| 278 | /** |
| 279 | * msm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs |
| 280 | * @dev: DRM device |
| 281 | * @old_state: atomic state object with old state structures |
| 282 | * |
| 283 | * This function enables all the outputs with the new configuration which had to |
| 284 | * be turned off for the update. |
| 285 | * |
| 286 | * For compatibility with legacy crtc helpers this should be called after |
| 287 | * drm_atomic_helper_commit_planes(), which is what the default commit function |
| 288 | * does. But drivers with different needs can group the modeset commits together |
| 289 | * and do the plane commits at the end. This is useful for drivers doing runtime |
| 290 | * PM since planes updates then only happen when the CRTC is actually enabled. |
| 291 | */ |
Lloyd Atkinson | 0b7ff59 | 2016-05-30 14:01:10 -0400 | [diff] [blame] | 292 | static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 293 | struct drm_atomic_state *old_state) |
| 294 | { |
| 295 | struct drm_crtc *crtc; |
| 296 | struct drm_crtc_state *old_crtc_state; |
| 297 | struct drm_connector *connector; |
| 298 | struct drm_connector_state *old_conn_state; |
Lloyd Atkinson | 0b7ff59 | 2016-05-30 14:01:10 -0400 | [diff] [blame] | 299 | struct msm_drm_private *priv = dev->dev_private; |
| 300 | struct msm_kms *kms = priv->kms; |
| 301 | int bridge_enable_count = 0; |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 302 | int i; |
| 303 | |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 304 | SDE_ATRACE_BEGIN("msm_enable"); |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 305 | for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { |
| 306 | const struct drm_crtc_helper_funcs *funcs; |
| 307 | |
| 308 | /* Need to filter out CRTCs where only planes change. */ |
| 309 | if (!drm_atomic_crtc_needs_modeset(crtc->state)) |
| 310 | continue; |
| 311 | |
| 312 | if (!crtc->state->active) |
| 313 | continue; |
| 314 | |
Raviteja Tamatam | 68892de | 2017-06-20 04:47:19 +0530 | [diff] [blame] | 315 | if (msm_is_mode_seamless(&crtc->state->mode) || |
| 316 | msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode)) |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 317 | continue; |
| 318 | |
| 319 | funcs = crtc->helper_private; |
| 320 | |
| 321 | if (crtc->state->enable) { |
| 322 | DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n", |
| 323 | crtc->base.id); |
| 324 | |
| 325 | if (funcs->enable) |
| 326 | funcs->enable(crtc); |
| 327 | else |
| 328 | funcs->commit(crtc); |
| 329 | } |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 330 | |
Lloyd Atkinson | 940340d | 2016-12-09 14:49:37 -0500 | [diff] [blame] | 331 | if (msm_needs_vblank_pre_modeset(&crtc->state->adjusted_mode)) |
| 332 | drm_crtc_wait_one_vblank(crtc); |
| 333 | } |
Clarence Ip | f99ccea | 2016-07-05 11:41:07 -0400 | [diff] [blame] | 334 | |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 335 | for_each_connector_in_state(old_state, connector, old_conn_state, i) { |
| 336 | const struct drm_encoder_helper_funcs *funcs; |
| 337 | struct drm_encoder *encoder; |
| 338 | |
| 339 | if (!connector->state->best_encoder) |
| 340 | continue; |
| 341 | |
| 342 | if (!connector->state->crtc->state->active || |
| 343 | !drm_atomic_crtc_needs_modeset( |
| 344 | connector->state->crtc->state)) |
| 345 | continue; |
| 346 | |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 347 | encoder = connector->state->best_encoder; |
| 348 | funcs = encoder->helper_private; |
| 349 | |
| 350 | DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n", |
| 351 | encoder->base.id, encoder->name); |
| 352 | |
| 353 | /* |
| 354 | * Each encoder has at most one connector (since we always steal |
| 355 | * it away), so we won't call enable hooks twice. |
| 356 | */ |
| 357 | drm_bridge_pre_enable(encoder->bridge); |
Lloyd Atkinson | 0b7ff59 | 2016-05-30 14:01:10 -0400 | [diff] [blame] | 358 | ++bridge_enable_count; |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 359 | |
| 360 | if (funcs->enable) |
| 361 | funcs->enable(encoder); |
| 362 | else |
| 363 | funcs->commit(encoder); |
Lloyd Atkinson | 0b7ff59 | 2016-05-30 14:01:10 -0400 | [diff] [blame] | 364 | } |
| 365 | |
| 366 | if (kms->funcs->commit) { |
| 367 | DRM_DEBUG_ATOMIC("triggering commit\n"); |
| 368 | kms->funcs->commit(kms, old_state); |
| 369 | } |
| 370 | |
| 371 | /* If no bridges were pre_enabled, skip iterating over them again */ |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 372 | if (bridge_enable_count == 0) { |
| 373 | SDE_ATRACE_END("msm_enable"); |
Lloyd Atkinson | 0b7ff59 | 2016-05-30 14:01:10 -0400 | [diff] [blame] | 374 | return; |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 375 | } |
Lloyd Atkinson | 0b7ff59 | 2016-05-30 14:01:10 -0400 | [diff] [blame] | 376 | |
| 377 | for_each_connector_in_state(old_state, connector, old_conn_state, i) { |
| 378 | struct drm_encoder *encoder; |
| 379 | |
| 380 | if (!connector->state->best_encoder) |
| 381 | continue; |
| 382 | |
| 383 | if (!connector->state->crtc->state->active || |
| 384 | !drm_atomic_crtc_needs_modeset( |
| 385 | connector->state->crtc->state)) |
| 386 | continue; |
| 387 | |
| 388 | encoder = connector->state->best_encoder; |
| 389 | |
| 390 | DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n", |
| 391 | encoder->base.id, encoder->name); |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 392 | |
| 393 | drm_bridge_enable(encoder->bridge); |
| 394 | } |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 395 | SDE_ATRACE_END("msm_enable"); |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 396 | } |
| 397 | |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 398 | /* The (potentially) asynchronous part of the commit. At this point |
| 399 | * nothing can fail short of armageddon. |
| 400 | */ |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 401 | static void complete_commit(struct msm_commit *c) |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 402 | { |
| 403 | struct drm_atomic_state *state = c->state; |
| 404 | struct drm_device *dev = state->dev; |
Rob Clark | 0b776d4 | 2015-01-30 17:04:45 -0500 | [diff] [blame] | 405 | struct msm_drm_private *priv = dev->dev_private; |
| 406 | struct msm_kms *kms = priv->kms; |
| 407 | |
Gustavo Padovan | f6ce410 | 2016-09-12 16:08:11 -0300 | [diff] [blame] | 408 | drm_atomic_helper_wait_for_fences(dev, state, false); |
Rob Clark | ba00c3f | 2016-03-16 18:18:17 -0400 | [diff] [blame] | 409 | |
Rob Clark | 0b776d4 | 2015-01-30 17:04:45 -0500 | [diff] [blame] | 410 | kms->funcs->prepare_commit(kms, state); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 411 | |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 412 | msm_atomic_helper_commit_modeset_disables(dev, state); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 413 | |
Liu Ying | 2b58e98 | 2016-08-29 17:12:03 +0800 | [diff] [blame] | 414 | drm_atomic_helper_commit_planes(dev, state, 0); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 415 | |
Lloyd Atkinson | af7952d | 2016-06-26 22:41:26 -0400 | [diff] [blame] | 416 | msm_atomic_helper_commit_modeset_enables(dev, state); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 417 | |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame] | 418 | /* NOTE: _wait_for_vblanks() only waits for vblank on |
| 419 | * enabled CRTCs. So we end up faulting when disabling |
| 420 | * due to (potentially) unref'ing the outgoing fb's |
| 421 | * before the vblank when the disable has latched. |
| 422 | * |
| 423 | * But if it did wait on disabled (or newly disabled) |
| 424 | * CRTCs, that would be racy (ie. we could have missed |
| 425 | * the irq. We need some way to poll for pipe shut |
| 426 | * down. Or just live with occasionally hitting the |
| 427 | * timeout in the CRTC disable path (which really should |
| 428 | * not be critical path) |
| 429 | */ |
| 430 | |
Lloyd Atkinson | 940340d | 2016-12-09 14:49:37 -0500 | [diff] [blame] | 431 | msm_atomic_wait_for_commit_done(dev, state); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 432 | |
| 433 | drm_atomic_helper_cleanup_planes(dev, state); |
| 434 | |
Rob Clark | 0b776d4 | 2015-01-30 17:04:45 -0500 | [diff] [blame] | 435 | kms->funcs->complete_commit(kms, state); |
| 436 | |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 437 | drm_atomic_state_free(state); |
| 438 | |
Rob Clark | 0b776d4 | 2015-01-30 17:04:45 -0500 | [diff] [blame] | 439 | commit_destroy(c); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 440 | } |
| 441 | |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 442 | static void _msm_drm_commit_work_cb(struct kthread_work *work) |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 443 | { |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 444 | struct msm_commit *commit = NULL; |
| 445 | |
| 446 | if (!work) { |
| 447 | DRM_ERROR("%s: Invalid commit work data!\n", __func__); |
| 448 | return; |
| 449 | } |
| 450 | |
| 451 | commit = container_of(work, struct msm_commit, commit_work); |
| 452 | |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 453 | SDE_ATRACE_BEGIN("complete_commit"); |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 454 | complete_commit(commit); |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 455 | SDE_ATRACE_END("complete_commit"); |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 456 | } |
| 457 | |
Alan Kwong | efd6513 | 2017-08-07 22:36:51 -0400 | [diff] [blame] | 458 | static struct msm_commit *commit_init(struct drm_atomic_state *state, |
| 459 | bool nonblock) |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 460 | { |
| 461 | struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); |
| 462 | |
| 463 | if (!c) |
| 464 | return NULL; |
| 465 | |
| 466 | c->dev = state->dev; |
| 467 | c->state = state; |
Alan Kwong | efd6513 | 2017-08-07 22:36:51 -0400 | [diff] [blame] | 468 | c->nonblock = nonblock; |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 469 | |
| 470 | kthread_init_work(&c->commit_work, _msm_drm_commit_work_cb); |
| 471 | |
| 472 | return c; |
| 473 | } |
| 474 | |
| 475 | /* Start display thread function */ |
Lloyd Atkinson | 516fca4 | 2017-08-24 10:06:32 -0400 | [diff] [blame] | 476 | static void msm_atomic_commit_dispatch(struct drm_device *dev, |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 477 | struct drm_atomic_state *state, struct msm_commit *commit) |
| 478 | { |
| 479 | struct msm_drm_private *priv = dev->dev_private; |
| 480 | struct drm_crtc *crtc = NULL; |
| 481 | struct drm_crtc_state *crtc_state = NULL; |
| 482 | int ret = -EINVAL, i = 0, j = 0; |
Lloyd Atkinson | a989f5c | 2017-08-28 11:54:42 -0400 | [diff] [blame] | 483 | bool nonblock; |
| 484 | |
| 485 | /* cache since work will kfree commit in non-blocking case */ |
| 486 | nonblock = commit->nonblock; |
Sandeep Panda | f48c46a | 2016-10-24 09:48:50 +0530 | [diff] [blame] | 487 | |
| 488 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 489 | for (j = 0; j < priv->num_crtcs; j++) { |
| 490 | if (priv->disp_thread[j].crtc_id == |
| 491 | crtc->base.id) { |
| 492 | if (priv->disp_thread[j].thread) { |
| 493 | kthread_queue_work( |
| 494 | &priv->disp_thread[j].worker, |
| 495 | &commit->commit_work); |
| 496 | /* only return zero if work is |
| 497 | * queued successfully. |
| 498 | */ |
| 499 | ret = 0; |
| 500 | } else { |
| 501 | DRM_ERROR(" Error for crtc_id: %d\n", |
| 502 | priv->disp_thread[j].crtc_id); |
| 503 | } |
| 504 | break; |
| 505 | } |
| 506 | } |
| 507 | /* |
| 508 | * TODO: handle cases where there will be more than |
| 509 | * one crtc per commit cycle. Remove this check then. |
| 510 | * Current assumption is there will be only one crtc |
| 511 | * per commit cycle. |
| 512 | */ |
| 513 | if (j < priv->num_crtcs) |
| 514 | break; |
| 515 | } |
| 516 | |
Lloyd Atkinson | 516fca4 | 2017-08-24 10:06:32 -0400 | [diff] [blame] | 517 | if (ret) { |
| 518 | /** |
| 519 | * this is not expected to happen, but at this point the state |
| 520 | * has been swapped, but we couldn't dispatch to a crtc thread. |
| 521 | * fallback now to a synchronous complete_commit to try and |
| 522 | * ensure that SW and HW state don't get out of sync. |
| 523 | */ |
| 524 | DRM_ERROR("failed to dispatch commit to any CRTC\n"); |
| 525 | complete_commit(commit); |
Lloyd Atkinson | a989f5c | 2017-08-28 11:54:42 -0400 | [diff] [blame] | 526 | } else if (!nonblock) { |
Alan Kwong | efd6513 | 2017-08-07 22:36:51 -0400 | [diff] [blame] | 527 | kthread_flush_work(&commit->commit_work); |
Alan Kwong | efd6513 | 2017-08-07 22:36:51 -0400 | [diff] [blame] | 528 | } |
Lloyd Atkinson | a989f5c | 2017-08-28 11:54:42 -0400 | [diff] [blame] | 529 | |
| 530 | /* free nonblocking commits in this context, after processing */ |
| 531 | if (!nonblock) |
| 532 | kfree(commit); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 533 | } |
| 534 | |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 535 | /** |
| 536 | * drm_atomic_helper_commit - commit validated state object |
| 537 | * @dev: DRM device |
| 538 | * @state: the driver state object |
Maarten Lankhorst | a3ccfb9 | 2016-04-26 16:11:38 +0200 | [diff] [blame] | 539 | * @nonblock: nonblocking commit |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 540 | * |
| 541 | * This function commits a with drm_atomic_helper_check() pre-validated state |
Maarten Lankhorst | a3ccfb9 | 2016-04-26 16:11:38 +0200 | [diff] [blame] | 542 | * object. This can still fail when e.g. the framebuffer reservation fails. |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 543 | * |
| 544 | * RETURNS |
| 545 | * Zero for success or -errno. |
| 546 | */ |
| 547 | int msm_atomic_commit(struct drm_device *dev, |
Maarten Lankhorst | a3ccfb9 | 2016-04-26 16:11:38 +0200 | [diff] [blame] | 548 | struct drm_atomic_state *state, bool nonblock) |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 549 | { |
Rob Clark | ca762a8 | 2016-03-15 17:22:13 -0400 | [diff] [blame] | 550 | struct msm_drm_private *priv = dev->dev_private; |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame] | 551 | struct msm_commit *c; |
Daniel Vetter | 8d76b79 | 2016-06-02 15:41:53 +0200 | [diff] [blame] | 552 | struct drm_crtc *crtc; |
| 553 | struct drm_crtc_state *crtc_state; |
| 554 | struct drm_plane *plane; |
| 555 | struct drm_plane_state *plane_state; |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 556 | int i, ret; |
| 557 | |
Dhaval Patel | badfefd | 2017-09-26 13:58:02 -0700 | [diff] [blame] | 558 | if (!priv || priv->shutdown_in_progress) { |
| 559 | DRM_ERROR("priv is null or shutdwon is in-progress\n"); |
| 560 | return -EINVAL; |
| 561 | } |
| 562 | |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 563 | SDE_ATRACE_BEGIN("atomic_commit"); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 564 | ret = drm_atomic_helper_prepare_planes(dev, state); |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 565 | if (ret) { |
| 566 | SDE_ATRACE_END("atomic_commit"); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 567 | return ret; |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 568 | } |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 569 | |
Alan Kwong | efd6513 | 2017-08-07 22:36:51 -0400 | [diff] [blame] | 570 | c = commit_init(state, nonblock); |
Laurent Pinchart | f65c18c | 2015-05-27 14:39:46 +0300 | [diff] [blame] | 571 | if (!c) { |
| 572 | ret = -ENOMEM; |
| 573 | goto error; |
| 574 | } |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame] | 575 | |
| 576 | /* |
| 577 | * Figure out what crtcs we have: |
| 578 | */ |
Daniel Vetter | 8d76b79 | 2016-06-02 15:41:53 +0200 | [diff] [blame] | 579 | for_each_crtc_in_state(state, crtc, crtc_state, i) |
| 580 | c->crtc_mask |= drm_crtc_mask(crtc); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 581 | |
| 582 | /* |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 583 | * Figure out what fence to wait for: |
| 584 | */ |
Daniel Vetter | 8d76b79 | 2016-06-02 15:41:53 +0200 | [diff] [blame] | 585 | for_each_plane_in_state(state, plane, plane_state, i) { |
| 586 | if ((plane->state->fb != plane_state->fb) && plane_state->fb) { |
| 587 | struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 588 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 589 | |
Daniel Vetter | 8d76b79 | 2016-06-02 15:41:53 +0200 | [diff] [blame] | 590 | plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); |
Rob Clark | b6295f9 | 2016-03-15 18:26:28 -0400 | [diff] [blame] | 591 | } |
| 592 | } |
| 593 | |
| 594 | /* |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame] | 595 | * Wait for pending updates on any of the same crtc's and then |
| 596 | * mark our set of crtc's as busy: |
| 597 | */ |
| 598 | ret = start_atomic(dev->dev_private, c->crtc_mask); |
Laurent Pinchart | 5b2e2b6 | 2015-02-23 00:58:03 +0200 | [diff] [blame] | 599 | if (ret) { |
| 600 | kfree(c); |
Laurent Pinchart | f65c18c | 2015-05-27 14:39:46 +0300 | [diff] [blame] | 601 | goto error; |
Laurent Pinchart | 5b2e2b6 | 2015-02-23 00:58:03 +0200 | [diff] [blame] | 602 | } |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame] | 603 | |
| 604 | /* |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 605 | * This is the point of no return - everything below never fails except |
| 606 | * when the hw goes bonghits. Which means we can commit the new state on |
| 607 | * the software side now. |
| 608 | */ |
| 609 | |
Daniel Vetter | 5e84c26 | 2016-06-10 00:06:32 +0200 | [diff] [blame] | 610 | drm_atomic_helper_swap_state(state, true); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 611 | |
| 612 | /* |
Clarence Ip | 24f8066 | 2016-06-13 19:05:32 -0400 | [diff] [blame] | 613 | * Provide the driver a chance to prepare for output fences. This is |
| 614 | * done after the point of no return, but before asynchronous commits |
| 615 | * are dispatched to work queues, so that the fence preparation is |
| 616 | * finished before the .atomic_commit returns. |
| 617 | */ |
Dhaval Patel | badfefd | 2017-09-26 13:58:02 -0700 | [diff] [blame] | 618 | if (priv->kms && priv->kms->funcs && priv->kms->funcs->prepare_fence) |
Clarence Ip | 24f8066 | 2016-06-13 19:05:32 -0400 | [diff] [blame] | 619 | priv->kms->funcs->prepare_fence(priv->kms, state); |
| 620 | |
| 621 | /* |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 622 | * Everything below can be run asynchronously without the need to grab |
| 623 | * any modeset locks at all under one conditions: It must be guaranteed |
| 624 | * that the asynchronous work has either been cancelled (if the driver |
| 625 | * supports it, which at least requires that the framebuffers get |
| 626 | * cleaned up with drm_atomic_helper_cleanup_planes()) or completed |
| 627 | * before the new state gets committed on the software side with |
| 628 | * drm_atomic_helper_swap_state(). |
| 629 | * |
| 630 | * This scheme allows new atomic state updates to be prepared and |
| 631 | * checked in parallel to the asynchronous completion of the previous |
| 632 | * update. Which is important since compositors need to figure out the |
| 633 | * composition of the next frame right after having submitted the |
| 634 | * current layout. |
| 635 | */ |
| 636 | |
Lloyd Atkinson | 516fca4 | 2017-08-24 10:06:32 -0400 | [diff] [blame] | 637 | msm_atomic_commit_dispatch(dev, state, c); |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 638 | SDE_ATRACE_END("atomic_commit"); |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 639 | return 0; |
Laurent Pinchart | f65c18c | 2015-05-27 14:39:46 +0300 | [diff] [blame] | 640 | |
| 641 | error: |
| 642 | drm_atomic_helper_cleanup_planes(dev, state); |
Narendra Muppalla | 77b3293 | 2017-05-10 13:53:11 -0700 | [diff] [blame] | 643 | SDE_ATRACE_END("atomic_commit"); |
Laurent Pinchart | f65c18c | 2015-05-27 14:39:46 +0300 | [diff] [blame] | 644 | return ret; |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 645 | } |