Matt Roper | 5ee67f1 | 2015-01-21 16:35:44 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2015 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 21 | * DEALINGS IN THE SOFTWARE. |
| 22 | */ |
| 23 | |
| 24 | /** |
| 25 | * DOC: atomic modeset support |
| 26 | * |
| 27 | * The functions here implement the state management and hardware programming |
| 28 | * dispatch required by the atomic modeset infrastructure. |
| 29 | * See intel_atomic_plane.c for the plane-specific atomic functionality. |
| 30 | */ |
| 31 | |
| 32 | #include <drm/drmP.h> |
| 33 | #include <drm/drm_atomic.h> |
| 34 | #include <drm/drm_atomic_helper.h> |
| 35 | #include <drm/drm_plane_helper.h> |
| 36 | #include "intel_drv.h" |
| 37 | |
Matt Roper | 2545e4a | 2015-01-22 16:51:27 -0800 | [diff] [blame] | 38 | /** |
Maarten Lankhorst | 11c1a9e | 2017-05-01 15:37:57 +0200 | [diff] [blame] | 39 | * intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property. |
| 40 | * @connector: Connector to get the property for. |
| 41 | * @state: Connector state to retrieve the property from. |
| 42 | * @property: Property to retrieve. |
| 43 | * @val: Return value for the property. |
| 44 | * |
| 45 | * Returns the atomic property value for a digital connector. |
| 46 | */ |
| 47 | int intel_digital_connector_atomic_get_property(struct drm_connector *connector, |
| 48 | const struct drm_connector_state *state, |
| 49 | struct drm_property *property, |
| 50 | uint64_t *val) |
| 51 | { |
| 52 | struct drm_device *dev = connector->dev; |
| 53 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 54 | struct intel_digital_connector_state *intel_conn_state = |
| 55 | to_intel_digital_connector_state(state); |
| 56 | |
| 57 | if (property == dev_priv->force_audio_property) |
| 58 | *val = intel_conn_state->force_audio; |
| 59 | else if (property == dev_priv->broadcast_rgb_property) |
| 60 | *val = intel_conn_state->broadcast_rgb; |
| 61 | else { |
| 62 | DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name); |
| 63 | return -EINVAL; |
| 64 | } |
| 65 | |
| 66 | return 0; |
| 67 | } |
| 68 | |
| 69 | /** |
| 70 | * intel_digital_connector_atomic_set_property - hook for connector->atomic_set_property. |
| 71 | * @connector: Connector to set the property for. |
| 72 | * @state: Connector state to set the property on. |
| 73 | * @property: Property to set. |
| 74 | * @val: New value for the property. |
| 75 | * |
| 76 | * Sets the atomic property value for a digital connector. |
| 77 | */ |
| 78 | int intel_digital_connector_atomic_set_property(struct drm_connector *connector, |
| 79 | struct drm_connector_state *state, |
| 80 | struct drm_property *property, |
| 81 | uint64_t val) |
| 82 | { |
| 83 | struct drm_device *dev = connector->dev; |
| 84 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 85 | struct intel_digital_connector_state *intel_conn_state = |
| 86 | to_intel_digital_connector_state(state); |
| 87 | |
| 88 | if (property == dev_priv->force_audio_property) { |
| 89 | intel_conn_state->force_audio = val; |
| 90 | return 0; |
| 91 | } |
| 92 | |
| 93 | if (property == dev_priv->broadcast_rgb_property) { |
| 94 | intel_conn_state->broadcast_rgb = val; |
| 95 | return 0; |
| 96 | } |
| 97 | |
| 98 | DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name); |
| 99 | return -EINVAL; |
| 100 | } |
| 101 | |
| 102 | int intel_digital_connector_atomic_check(struct drm_connector *conn, |
| 103 | struct drm_connector_state *new_state) |
| 104 | { |
| 105 | struct intel_digital_connector_state *new_conn_state = |
| 106 | to_intel_digital_connector_state(new_state); |
| 107 | struct drm_connector_state *old_state = |
| 108 | drm_atomic_get_old_connector_state(new_state->state, conn); |
| 109 | struct intel_digital_connector_state *old_conn_state = |
| 110 | to_intel_digital_connector_state(old_state); |
| 111 | struct drm_crtc_state *crtc_state; |
| 112 | |
Sean Paul | ee5e5e7 | 2018-01-08 14:55:39 -0500 | [diff] [blame] | 113 | intel_hdcp_atomic_check(conn, old_state, new_state); |
| 114 | |
Maarten Lankhorst | 11c1a9e | 2017-05-01 15:37:57 +0200 | [diff] [blame] | 115 | if (!new_state->crtc) |
| 116 | return 0; |
| 117 | |
| 118 | crtc_state = drm_atomic_get_new_crtc_state(new_state->state, new_state->crtc); |
| 119 | |
| 120 | /* |
| 121 | * These properties are handled by fastset, and might not end |
| 122 | * up in a modeset. |
| 123 | */ |
| 124 | if (new_conn_state->force_audio != old_conn_state->force_audio || |
| 125 | new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb || |
| 126 | new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio || |
| 127 | new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode) |
| 128 | crtc_state->mode_changed = true; |
| 129 | |
| 130 | return 0; |
| 131 | } |
| 132 | |
| 133 | /** |
| 134 | * intel_digital_connector_duplicate_state - duplicate connector state |
| 135 | * @connector: digital connector |
| 136 | * |
| 137 | * Allocates and returns a copy of the connector state (both common and |
| 138 | * digital connector specific) for the specified connector. |
| 139 | * |
| 140 | * Returns: The newly allocated connector state, or NULL on failure. |
| 141 | */ |
| 142 | struct drm_connector_state * |
| 143 | intel_digital_connector_duplicate_state(struct drm_connector *connector) |
| 144 | { |
| 145 | struct intel_digital_connector_state *state; |
| 146 | |
| 147 | state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL); |
| 148 | if (!state) |
| 149 | return NULL; |
| 150 | |
| 151 | __drm_atomic_helper_connector_duplicate_state(connector, &state->base); |
| 152 | return &state->base; |
| 153 | } |
| 154 | |
| 155 | /** |
Matt Roper | 1356837 | 2015-01-21 16:35:47 -0800 | [diff] [blame] | 156 | * intel_crtc_duplicate_state - duplicate crtc state |
| 157 | * @crtc: drm crtc |
| 158 | * |
| 159 | * Allocates and returns a copy of the crtc state (both common and |
| 160 | * Intel-specific) for the specified crtc. |
| 161 | * |
| 162 | * Returns: The newly allocated crtc state, or NULL on failure. |
| 163 | */ |
| 164 | struct drm_crtc_state * |
| 165 | intel_crtc_duplicate_state(struct drm_crtc *crtc) |
| 166 | { |
Ander Conselvan de Oliveira | a91572f | 2015-03-03 15:21:55 +0200 | [diff] [blame] | 167 | struct intel_crtc_state *crtc_state; |
Matt Roper | 1356837 | 2015-01-21 16:35:47 -0800 | [diff] [blame] | 168 | |
Maarten Lankhorst | f2a066f | 2015-09-10 16:08:03 +0200 | [diff] [blame] | 169 | crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL); |
Ander Conselvan de Oliveira | f0c6057 | 2015-04-21 17:12:58 +0300 | [diff] [blame] | 170 | if (!crtc_state) |
| 171 | return NULL; |
| 172 | |
| 173 | __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); |
| 174 | |
Maarten Lankhorst | bfd16b2 | 2015-08-27 15:44:05 +0200 | [diff] [blame] | 175 | crtc_state->update_pipe = false; |
Matt Roper | d21fbe8 | 2015-09-24 15:53:12 -0700 | [diff] [blame] | 176 | crtc_state->disable_lp_wm = false; |
Maarten Lankhorst | ab1d3a0 | 2015-11-19 16:07:14 +0100 | [diff] [blame] | 177 | crtc_state->disable_cxsr = false; |
Ville Syrjälä | caed361 | 2016-03-09 19:07:25 +0200 | [diff] [blame] | 178 | crtc_state->update_wm_pre = false; |
| 179 | crtc_state->update_wm_post = false; |
Maarten Lankhorst | e886167 | 2016-02-24 11:24:26 +0100 | [diff] [blame] | 180 | crtc_state->fb_changed = false; |
Ville Syrjälä | 236c48e | 2017-03-02 19:14:58 +0200 | [diff] [blame] | 181 | crtc_state->fifo_changed = false; |
Matt Roper | ed4a6a7 | 2016-02-23 17:20:13 -0800 | [diff] [blame] | 182 | crtc_state->wm.need_postvbl_update = false; |
Maarten Lankhorst | cd202f6 | 2016-03-09 10:35:44 +0100 | [diff] [blame] | 183 | crtc_state->fb_bits = 0; |
Maarten Lankhorst | bfd16b2 | 2015-08-27 15:44:05 +0200 | [diff] [blame] | 184 | |
Ander Conselvan de Oliveira | a91572f | 2015-03-03 15:21:55 +0200 | [diff] [blame] | 185 | return &crtc_state->base; |
Matt Roper | 1356837 | 2015-01-21 16:35:47 -0800 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | /** |
| 189 | * intel_crtc_destroy_state - destroy crtc state |
| 190 | * @crtc: drm crtc |
Chris Wilson | eac95c4 | 2018-02-14 13:49:21 +0000 | [diff] [blame] | 191 | * @state: the state to destroy |
Matt Roper | 1356837 | 2015-01-21 16:35:47 -0800 | [diff] [blame] | 192 | * |
| 193 | * Destroys the crtc state (both common and Intel-specific) for the |
| 194 | * specified crtc. |
| 195 | */ |
| 196 | void |
| 197 | intel_crtc_destroy_state(struct drm_crtc *crtc, |
Chris Wilson | eac95c4 | 2018-02-14 13:49:21 +0000 | [diff] [blame] | 198 | struct drm_crtc_state *state) |
Matt Roper | 1356837 | 2015-01-21 16:35:47 -0800 | [diff] [blame] | 199 | { |
| 200 | drm_atomic_helper_crtc_destroy_state(crtc, state); |
| 201 | } |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 202 | |
| 203 | /** |
| 204 | * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests |
Ander Conselvan de Oliveira | 6ebc692 | 2017-02-23 09:15:59 +0200 | [diff] [blame] | 205 | * @dev_priv: i915 device |
Chris Wilson | eac95c4 | 2018-02-14 13:49:21 +0000 | [diff] [blame] | 206 | * @intel_crtc: intel crtc |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 207 | * @crtc_state: incoming crtc_state to validate and setup scalers |
| 208 | * |
| 209 | * This function sets up scalers based on staged scaling requests for |
| 210 | * a @crtc and its planes. It is called from crtc level check path. If request |
| 211 | * is a supportable request, it attaches scalers to requested planes and crtc. |
| 212 | * |
| 213 | * This function takes into account the current scaler(s) in use by any planes |
| 214 | * not being part of this atomic state |
| 215 | * |
| 216 | * Returns: |
| 217 | * 0 - scalers were setup succesfully |
| 218 | * error code - otherwise |
| 219 | */ |
Ander Conselvan de Oliveira | 6ebc692 | 2017-02-23 09:15:59 +0200 | [diff] [blame] | 220 | int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, |
| 221 | struct intel_crtc *intel_crtc, |
| 222 | struct intel_crtc_state *crtc_state) |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 223 | { |
| 224 | struct drm_plane *plane = NULL; |
| 225 | struct intel_plane *intel_plane; |
| 226 | struct intel_plane_state *plane_state = NULL; |
Maarten Lankhorst | e435d6e | 2015-07-13 16:30:15 +0200 | [diff] [blame] | 227 | struct intel_crtc_scaler_state *scaler_state = |
| 228 | &crtc_state->scaler_state; |
| 229 | struct drm_atomic_state *drm_state = crtc_state->base.state; |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 230 | int num_scalers_need; |
| 231 | int i, j; |
| 232 | |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 233 | num_scalers_need = hweight32(scaler_state->scaler_users); |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 234 | |
| 235 | /* |
| 236 | * High level flow: |
| 237 | * - staged scaler requests are already in scaler_state->scaler_users |
| 238 | * - check whether staged scaling requests can be supported |
| 239 | * - add planes using scalers that aren't in current transaction |
| 240 | * - assign scalers to requested users |
| 241 | * - as part of plane commit, scalers will be committed |
| 242 | * (i.e., either attached or detached) to respective planes in hw |
| 243 | * - as part of crtc_commit, scaler will be either attached or detached |
| 244 | * to crtc in hw |
| 245 | */ |
| 246 | |
| 247 | /* fail if required scalers > available scalers */ |
| 248 | if (num_scalers_need > intel_crtc->num_scalers){ |
| 249 | DRM_DEBUG_KMS("Too many scaling requests %d > %d\n", |
| 250 | num_scalers_need, intel_crtc->num_scalers); |
| 251 | return -EINVAL; |
| 252 | } |
| 253 | |
| 254 | /* walkthrough scaler_users bits and start assigning scalers */ |
| 255 | for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { |
| 256 | int *scaler_id; |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 257 | const char *name; |
| 258 | int idx; |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 259 | |
| 260 | /* skip if scaler not required */ |
| 261 | if (!(scaler_state->scaler_users & (1 << i))) |
| 262 | continue; |
| 263 | |
| 264 | if (i == SKL_CRTC_INDEX) { |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 265 | name = "CRTC"; |
| 266 | idx = intel_crtc->base.base.id; |
| 267 | |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 268 | /* panel fitter case: assign as a crtc scaler */ |
| 269 | scaler_id = &scaler_state->scaler_id; |
| 270 | } else { |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 271 | name = "PLANE"; |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 272 | |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 273 | /* plane scaler case: assign as a plane scaler */ |
| 274 | /* find the plane that set the bit as scaler_user */ |
Daniel Vetter | b8b5342 | 2016-06-02 00:06:33 +0200 | [diff] [blame] | 275 | plane = drm_state->planes[i].ptr; |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 276 | |
| 277 | /* |
| 278 | * to enable/disable hq mode, add planes that are using scaler |
| 279 | * into this transaction |
| 280 | */ |
| 281 | if (!plane) { |
| 282 | struct drm_plane_state *state; |
Ander Conselvan de Oliveira | 6ebc692 | 2017-02-23 09:15:59 +0200 | [diff] [blame] | 283 | plane = drm_plane_from_index(&dev_priv->drm, i); |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 284 | state = drm_atomic_get_plane_state(drm_state, plane); |
| 285 | if (IS_ERR(state)) { |
| 286 | DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n", |
| 287 | plane->base.id); |
| 288 | return PTR_ERR(state); |
| 289 | } |
Maarten Lankhorst | cf5a15b | 2015-06-15 12:33:41 +0200 | [diff] [blame] | 290 | |
| 291 | /* |
| 292 | * the plane is added after plane checks are run, |
| 293 | * but since this plane is unchanged just do the |
| 294 | * minimum required validation. |
| 295 | */ |
Maarten Lankhorst | cf5a15b | 2015-06-15 12:33:41 +0200 | [diff] [blame] | 296 | crtc_state->base.planes_changed = true; |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 297 | } |
| 298 | |
| 299 | intel_plane = to_intel_plane(plane); |
Matt Roper | c07a2d1 | 2015-07-06 09:19:24 -0700 | [diff] [blame] | 300 | idx = plane->base.id; |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 301 | |
| 302 | /* plane on different crtc cannot be a scaler user of this crtc */ |
| 303 | if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) { |
| 304 | continue; |
| 305 | } |
| 306 | |
Daniel Vetter | 831655e | 2016-06-02 00:06:25 +0200 | [diff] [blame] | 307 | plane_state = intel_atomic_get_existing_plane_state(drm_state, |
| 308 | intel_plane); |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 309 | scaler_id = &plane_state->scaler_id; |
| 310 | } |
| 311 | |
| 312 | if (*scaler_id < 0) { |
| 313 | /* find a free scaler */ |
| 314 | for (j = 0; j < intel_crtc->num_scalers; j++) { |
| 315 | if (!scaler_state->scalers[j].in_use) { |
| 316 | scaler_state->scalers[j].in_use = 1; |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 317 | *scaler_id = j; |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 318 | DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 319 | intel_crtc->pipe, *scaler_id, name, idx); |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 320 | break; |
| 321 | } |
| 322 | } |
| 323 | } |
| 324 | |
| 325 | if (WARN_ON(*scaler_id < 0)) { |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 326 | DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx); |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 327 | continue; |
| 328 | } |
| 329 | |
| 330 | /* set scaler mode */ |
Rodrigo Vivi | f9569aa | 2017-06-09 15:26:12 -0700 | [diff] [blame] | 331 | if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { |
Ander Conselvan de Oliveira | 08f5ba9 | 2017-02-23 09:16:00 +0200 | [diff] [blame] | 332 | scaler_state->scalers[*scaler_id].mode = 0; |
| 333 | } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) { |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 334 | /* |
| 335 | * when only 1 scaler is in use on either pipe A or B, |
| 336 | * scaler 0 operates in high quality (HQ) mode. |
| 337 | * In this case use scaler 0 to take advantage of HQ mode |
| 338 | */ |
| 339 | *scaler_id = 0; |
| 340 | scaler_state->scalers[0].in_use = 1; |
| 341 | scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ; |
| 342 | scaler_state->scalers[1].in_use = 0; |
| 343 | } else { |
| 344 | scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN; |
| 345 | } |
| 346 | } |
| 347 | |
| 348 | return 0; |
| 349 | } |
Maarten Lankhorst | de419ab | 2015-06-04 10:21:28 +0200 | [diff] [blame] | 350 | |
Maarten Lankhorst | de419ab | 2015-06-04 10:21:28 +0200 | [diff] [blame] | 351 | struct drm_atomic_state * |
| 352 | intel_atomic_state_alloc(struct drm_device *dev) |
| 353 | { |
| 354 | struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL); |
| 355 | |
| 356 | if (!state || drm_atomic_state_init(dev, &state->base) < 0) { |
| 357 | kfree(state); |
| 358 | return NULL; |
| 359 | } |
| 360 | |
| 361 | return &state->base; |
| 362 | } |
| 363 | |
| 364 | void intel_atomic_state_clear(struct drm_atomic_state *s) |
| 365 | { |
| 366 | struct intel_atomic_state *state = to_intel_atomic_state(s); |
| 367 | drm_atomic_state_default_clear(&state->base); |
Maarten Lankhorst | 565602d | 2015-12-10 12:33:57 +0100 | [diff] [blame] | 368 | state->dpll_set = state->modeset = false; |
Maarten Lankhorst | de419ab | 2015-06-04 10:21:28 +0200 | [diff] [blame] | 369 | } |