Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 Red Hat |
| 3 | * Author: Rob Clark <robdclark@gmail.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 as published by |
| 7 | * the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with |
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | #include "msm_drv.h" |
| 19 | #include "msm_kms.h" |
| 20 | #include "msm_gem.h" |
| 21 | |
| 22 | struct msm_commit { |
| 23 | struct drm_atomic_state *state; |
| 24 | uint32_t fence; |
| 25 | struct msm_fence_cb fence_cb; |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame^] | 26 | uint32_t crtc_mask; |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 27 | }; |
| 28 | |
| 29 | static void fence_cb(struct msm_fence_cb *cb); |
| 30 | |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame^] | 31 | /* block until specified crtcs are no longer pending update, and |
| 32 | * atomically mark them as pending update |
| 33 | */ |
| 34 | static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) |
| 35 | { |
| 36 | int ret; |
| 37 | |
| 38 | spin_lock(&priv->pending_crtcs_event.lock); |
| 39 | ret = wait_event_interruptible_locked(priv->pending_crtcs_event, |
| 40 | !(priv->pending_crtcs & crtc_mask)); |
| 41 | if (ret == 0) { |
| 42 | DBG("start: %08x", crtc_mask); |
| 43 | priv->pending_crtcs |= crtc_mask; |
| 44 | } |
| 45 | spin_unlock(&priv->pending_crtcs_event.lock); |
| 46 | |
| 47 | return ret; |
| 48 | } |
| 49 | |
| 50 | /* clear specified crtcs (no longer pending update) |
| 51 | */ |
| 52 | static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) |
| 53 | { |
| 54 | spin_lock(&priv->pending_crtcs_event.lock); |
| 55 | DBG("end: %08x", crtc_mask); |
| 56 | priv->pending_crtcs &= ~crtc_mask; |
| 57 | wake_up_all_locked(&priv->pending_crtcs_event); |
| 58 | spin_unlock(&priv->pending_crtcs_event.lock); |
| 59 | } |
| 60 | |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 61 | static struct msm_commit *new_commit(struct drm_atomic_state *state) |
| 62 | { |
| 63 | struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); |
| 64 | |
| 65 | if (!c) |
| 66 | return NULL; |
| 67 | |
| 68 | c->state = state; |
| 69 | /* TODO we might need a way to indicate to run the cb on a |
| 70 | * different wq so wait_for_vblanks() doesn't block retiring |
| 71 | * bo's.. |
| 72 | */ |
| 73 | INIT_FENCE_CB(&c->fence_cb, fence_cb); |
| 74 | |
| 75 | return c; |
| 76 | } |
| 77 | |
| 78 | /* The (potentially) asynchronous part of the commit. At this point |
| 79 | * nothing can fail short of armageddon. |
| 80 | */ |
| 81 | static void complete_commit(struct msm_commit *c) |
| 82 | { |
| 83 | struct drm_atomic_state *state = c->state; |
| 84 | struct drm_device *dev = state->dev; |
| 85 | |
| 86 | drm_atomic_helper_commit_pre_planes(dev, state); |
| 87 | |
| 88 | drm_atomic_helper_commit_planes(dev, state); |
| 89 | |
| 90 | drm_atomic_helper_commit_post_planes(dev, state); |
| 91 | |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame^] | 92 | /* NOTE: _wait_for_vblanks() only waits for vblank on |
| 93 | * enabled CRTCs. So we end up faulting when disabling |
| 94 | * due to (potentially) unref'ing the outgoing fb's |
| 95 | * before the vblank when the disable has latched. |
| 96 | * |
| 97 | * But if it did wait on disabled (or newly disabled) |
| 98 | * CRTCs, that would be racy (ie. we could have missed |
| 99 | * the irq. We need some way to poll for pipe shut |
| 100 | * down. Or just live with occasionally hitting the |
| 101 | * timeout in the CRTC disable path (which really should |
| 102 | * not be critical path) |
| 103 | */ |
| 104 | |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 105 | drm_atomic_helper_wait_for_vblanks(dev, state); |
| 106 | |
| 107 | drm_atomic_helper_cleanup_planes(dev, state); |
| 108 | |
| 109 | drm_atomic_state_free(state); |
| 110 | |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame^] | 111 | end_atomic(dev->dev_private, c->crtc_mask); |
| 112 | |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 113 | kfree(c); |
| 114 | } |
| 115 | |
| 116 | static void fence_cb(struct msm_fence_cb *cb) |
| 117 | { |
| 118 | struct msm_commit *c = |
| 119 | container_of(cb, struct msm_commit, fence_cb); |
| 120 | complete_commit(c); |
| 121 | } |
| 122 | |
| 123 | static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb) |
| 124 | { |
| 125 | struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0); |
| 126 | c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ)); |
| 127 | } |
| 128 | |
| 129 | |
| 130 | /** |
| 131 | * drm_atomic_helper_commit - commit validated state object |
| 132 | * @dev: DRM device |
| 133 | * @state: the driver state object |
| 134 | * @async: asynchronous commit |
| 135 | * |
| 136 | * This function commits a with drm_atomic_helper_check() pre-validated state |
| 137 | * object. This can still fail when e.g. the framebuffer reservation fails. For |
| 138 | * now this doesn't implement asynchronous commits. |
| 139 | * |
| 140 | * RETURNS |
| 141 | * Zero for success or -errno. |
| 142 | */ |
| 143 | int msm_atomic_commit(struct drm_device *dev, |
| 144 | struct drm_atomic_state *state, bool async) |
| 145 | { |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 146 | int nplanes = dev->mode_config.num_total_plane; |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame^] | 147 | int ncrtcs = dev->mode_config.num_crtc; |
| 148 | struct msm_commit *c; |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 149 | int i, ret; |
| 150 | |
| 151 | ret = drm_atomic_helper_prepare_planes(dev, state); |
| 152 | if (ret) |
| 153 | return ret; |
| 154 | |
| 155 | c = new_commit(state); |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame^] | 156 | if (!c) |
| 157 | return -ENOMEM; |
| 158 | |
| 159 | /* |
| 160 | * Figure out what crtcs we have: |
| 161 | */ |
| 162 | for (i = 0; i < ncrtcs; i++) { |
| 163 | struct drm_crtc *crtc = state->crtcs[i]; |
| 164 | if (!crtc) |
| 165 | continue; |
| 166 | c->crtc_mask |= (1 << drm_crtc_index(crtc)); |
| 167 | } |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 168 | |
| 169 | /* |
| 170 | * Figure out what fence to wait for: |
| 171 | */ |
| 172 | for (i = 0; i < nplanes; i++) { |
| 173 | struct drm_plane *plane = state->planes[i]; |
| 174 | struct drm_plane_state *new_state = state->plane_states[i]; |
| 175 | |
| 176 | if (!plane) |
| 177 | continue; |
| 178 | |
Rob Clark | 3e2f29e | 2014-11-19 12:29:33 -0500 | [diff] [blame] | 179 | if ((plane->state->fb != new_state->fb) && new_state->fb) |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 180 | add_fb(c, new_state->fb); |
| 181 | } |
| 182 | |
| 183 | /* |
Rob Clark | f86afec | 2014-11-25 12:41:18 -0500 | [diff] [blame^] | 184 | * Wait for pending updates on any of the same crtc's and then |
| 185 | * mark our set of crtc's as busy: |
| 186 | */ |
| 187 | ret = start_atomic(dev->dev_private, c->crtc_mask); |
| 188 | if (ret) |
| 189 | return ret; |
| 190 | |
| 191 | /* |
Rob Clark | cf3a7e4 | 2014-11-08 13:21:06 -0500 | [diff] [blame] | 192 | * This is the point of no return - everything below never fails except |
| 193 | * when the hw goes bonghits. Which means we can commit the new state on |
| 194 | * the software side now. |
| 195 | */ |
| 196 | |
| 197 | drm_atomic_helper_swap_state(dev, state); |
| 198 | |
| 199 | /* |
| 200 | * Everything below can be run asynchronously without the need to grab |
| 201 | * any modeset locks at all under one conditions: It must be guaranteed |
| 202 | * that the asynchronous work has either been cancelled (if the driver |
| 203 | * supports it, which at least requires that the framebuffers get |
| 204 | * cleaned up with drm_atomic_helper_cleanup_planes()) or completed |
| 205 | * before the new state gets committed on the software side with |
| 206 | * drm_atomic_helper_swap_state(). |
| 207 | * |
| 208 | * This scheme allows new atomic state updates to be prepared and |
| 209 | * checked in parallel to the asynchronous completion of the previous |
| 210 | * update. Which is important since compositors need to figure out the |
| 211 | * composition of the next frame right after having submitted the |
| 212 | * current layout. |
| 213 | */ |
| 214 | |
| 215 | if (async) { |
| 216 | msm_queue_fence_cb(dev, &c->fence_cb, c->fence); |
| 217 | return 0; |
| 218 | } |
| 219 | |
| 220 | ret = msm_wait_fence_interruptable(dev, c->fence, NULL); |
| 221 | if (ret) { |
| 222 | WARN_ON(ret); // TODO unswap state back? or?? |
| 223 | kfree(c); |
| 224 | return ret; |
| 225 | } |
| 226 | |
| 227 | complete_commit(c); |
| 228 | |
| 229 | return 0; |
| 230 | } |