Matt Roper | 5ee67f1 | 2015-01-21 16:35:44 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2015 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 21 | * DEALINGS IN THE SOFTWARE. |
| 22 | */ |
| 23 | |
| 24 | /** |
| 25 | * DOC: atomic modeset support |
| 26 | * |
| 27 | * The functions here implement the state management and hardware programming |
| 28 | * dispatch required by the atomic modeset infrastructure. |
| 29 | * See intel_atomic_plane.c for the plane-specific atomic functionality. |
| 30 | */ |
| 31 | |
| 32 | #include <drm/drmP.h> |
| 33 | #include <drm/drm_atomic.h> |
| 34 | #include <drm/drm_atomic_helper.h> |
| 35 | #include <drm/drm_plane_helper.h> |
| 36 | #include "intel_drv.h" |
| 37 | |
| 38 | |
| 39 | /** |
| 40 | * intel_atomic_check - validate state object |
| 41 | * @dev: drm device |
| 42 | * @state: state to validate |
| 43 | */ |
| 44 | int intel_atomic_check(struct drm_device *dev, |
| 45 | struct drm_atomic_state *state) |
| 46 | { |
| 47 | int nplanes = dev->mode_config.num_total_plane; |
| 48 | int ncrtcs = dev->mode_config.num_crtc; |
| 49 | int nconnectors = dev->mode_config.num_connector; |
| 50 | enum pipe nuclear_pipe = INVALID_PIPE; |
Chandra Konduru | e04fa80 | 2015-04-07 15:28:43 -0700 | [diff] [blame] | 51 | struct intel_crtc *nuclear_crtc = NULL; |
| 52 | struct intel_crtc_state *crtc_state = NULL; |
Matt Roper | 5ee67f1 | 2015-01-21 16:35:44 -0800 | [diff] [blame] | 53 | int ret; |
| 54 | int i; |
| 55 | bool not_nuclear = false; |
| 56 | |
| 57 | /* |
| 58 | * FIXME: At the moment, we only support "nuclear pageflip" on a |
| 59 | * single CRTC. Cross-crtc updates will be added later. |
| 60 | */ |
| 61 | for (i = 0; i < nplanes; i++) { |
| 62 | struct intel_plane *plane = to_intel_plane(state->planes[i]); |
| 63 | if (!plane) |
| 64 | continue; |
| 65 | |
| 66 | if (nuclear_pipe == INVALID_PIPE) { |
| 67 | nuclear_pipe = plane->pipe; |
| 68 | } else if (nuclear_pipe != plane->pipe) { |
| 69 | DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n"); |
| 70 | return -EINVAL; |
| 71 | } |
| 72 | } |
| 73 | |
| 74 | /* |
| 75 | * FIXME: We only handle planes for now; make sure there are no CRTC's |
| 76 | * or connectors involved. |
| 77 | */ |
| 78 | state->allow_modeset = false; |
| 79 | for (i = 0; i < ncrtcs; i++) { |
| 80 | struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]); |
Matt Roper | f1e2dae | 2015-04-09 10:48:38 -0700 | [diff] [blame] | 81 | if (crtc) |
| 82 | memset(&crtc->atomic, 0, sizeof(crtc->atomic)); |
Matt Roper | 5ee67f1 | 2015-01-21 16:35:44 -0800 | [diff] [blame] | 83 | if (crtc && crtc->pipe != nuclear_pipe) |
| 84 | not_nuclear = true; |
Chandra Konduru | e04fa80 | 2015-04-07 15:28:43 -0700 | [diff] [blame] | 85 | if (crtc && crtc->pipe == nuclear_pipe) { |
| 86 | nuclear_crtc = crtc; |
| 87 | crtc_state = to_intel_crtc_state(state->crtc_states[i]); |
| 88 | } |
Matt Roper | 5ee67f1 | 2015-01-21 16:35:44 -0800 | [diff] [blame] | 89 | } |
| 90 | for (i = 0; i < nconnectors; i++) |
| 91 | if (state->connectors[i] != NULL) |
| 92 | not_nuclear = true; |
| 93 | |
| 94 | if (not_nuclear) { |
| 95 | DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n"); |
| 96 | return -EINVAL; |
| 97 | } |
| 98 | |
Maarten Lankhorst | d032ffa | 2015-06-15 12:33:51 +0200 | [diff] [blame] | 99 | if (crtc_state && |
| 100 | crtc_state->quirks & PIPE_CONFIG_QUIRK_INITIAL_PLANES) { |
| 101 | ret = drm_atomic_add_affected_planes(state, &nuclear_crtc->base); |
| 102 | if (ret) |
| 103 | return ret; |
| 104 | } |
| 105 | |
Matt Roper | 5ee67f1 | 2015-01-21 16:35:44 -0800 | [diff] [blame] | 106 | ret = drm_atomic_helper_check_planes(dev, state); |
| 107 | if (ret) |
| 108 | return ret; |
| 109 | |
| 110 | return ret; |
| 111 | } |
| 112 | |
| 113 | |
| 114 | /** |
| 115 | * intel_atomic_commit - commit validated state object |
| 116 | * @dev: DRM device |
| 117 | * @state: the top-level driver state object |
| 118 | * @async: asynchronous commit |
| 119 | * |
| 120 | * This function commits a top-level state object that has been validated |
| 121 | * with drm_atomic_helper_check(). |
| 122 | * |
| 123 | * FIXME: Atomic modeset support for i915 is not yet complete. At the moment |
| 124 | * we can only handle plane-related operations and do not yet support |
| 125 | * asynchronous commit. |
| 126 | * |
| 127 | * RETURNS |
| 128 | * Zero for success or -errno. |
| 129 | */ |
| 130 | int intel_atomic_commit(struct drm_device *dev, |
| 131 | struct drm_atomic_state *state, |
| 132 | bool async) |
| 133 | { |
Maarten Lankhorst | 61c0549 | 2015-06-01 12:50:00 +0200 | [diff] [blame] | 134 | struct drm_crtc_state *crtc_state; |
| 135 | struct drm_crtc *crtc; |
Matt Roper | 5ee67f1 | 2015-01-21 16:35:44 -0800 | [diff] [blame] | 136 | int ret; |
| 137 | int i; |
| 138 | |
| 139 | if (async) { |
| 140 | DRM_DEBUG_KMS("i915 does not yet support async commit\n"); |
| 141 | return -EINVAL; |
| 142 | } |
| 143 | |
| 144 | ret = drm_atomic_helper_prepare_planes(dev, state); |
| 145 | if (ret) |
| 146 | return ret; |
| 147 | |
| 148 | /* Point of no return */ |
Maarten Lankhorst | 61c0549 | 2015-06-01 12:50:00 +0200 | [diff] [blame] | 149 | drm_atomic_helper_swap_state(dev, state); |
Matt Roper | 5ee67f1 | 2015-01-21 16:35:44 -0800 | [diff] [blame] | 150 | |
Maarten Lankhorst | 61c0549 | 2015-06-01 12:50:00 +0200 | [diff] [blame] | 151 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 152 | to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state); |
Chandra Konduru | 6156a45 | 2015-04-27 13:48:39 -0700 | [diff] [blame] | 153 | |
Maarten Lankhorst | 5ac1c4b | 2015-06-01 12:50:01 +0200 | [diff] [blame] | 154 | drm_atomic_helper_commit_planes_on_crtc(crtc_state); |
Chandra Konduru | f76f35d | 2015-04-07 15:28:44 -0700 | [diff] [blame] | 155 | } |
| 156 | |
Maarten Lankhorst | 5ac1c4b | 2015-06-01 12:50:01 +0200 | [diff] [blame] | 157 | /* FIXME: This function should eventually call __intel_set_mode when needed */ |
Maarten Lankhorst | 61c0549 | 2015-06-01 12:50:00 +0200 | [diff] [blame] | 158 | |
Matt Roper | 5ee67f1 | 2015-01-21 16:35:44 -0800 | [diff] [blame] | 159 | drm_atomic_helper_wait_for_vblanks(dev, state); |
| 160 | drm_atomic_helper_cleanup_planes(dev, state); |
| 161 | drm_atomic_state_free(state); |
| 162 | |
| 163 | return 0; |
| 164 | } |
Matt Roper | 2545e4a | 2015-01-22 16:51:27 -0800 | [diff] [blame] | 165 | |
| 166 | /** |
| 167 | * intel_connector_atomic_get_property - fetch connector property value |
| 168 | * @connector: connector to fetch property for |
| 169 | * @state: state containing the property value |
| 170 | * @property: property to look up |
| 171 | * @val: pointer to write property value into |
| 172 | * |
| 173 | * The DRM core does not store shadow copies of properties for |
| 174 | * atomic-capable drivers. This entrypoint is used to fetch |
| 175 | * the current value of a driver-specific connector property. |
| 176 | */ |
| 177 | int |
| 178 | intel_connector_atomic_get_property(struct drm_connector *connector, |
| 179 | const struct drm_connector_state *state, |
| 180 | struct drm_property *property, |
| 181 | uint64_t *val) |
| 182 | { |
| 183 | int i; |
| 184 | |
| 185 | /* |
| 186 | * TODO: We only have atomic modeset for planes at the moment, so the |
| 187 | * crtc/connector code isn't quite ready yet. Until it's ready, |
| 188 | * continue to look up all property values in the DRM's shadow copy |
| 189 | * in obj->properties->values[]. |
| 190 | * |
| 191 | * When the crtc/connector state work matures, this function should |
| 192 | * be updated to read the values out of the state structure instead. |
| 193 | */ |
| 194 | for (i = 0; i < connector->base.properties->count; i++) { |
| 195 | if (connector->base.properties->properties[i] == property) { |
| 196 | *val = connector->base.properties->values[i]; |
| 197 | return 0; |
| 198 | } |
| 199 | } |
| 200 | |
| 201 | return -EINVAL; |
| 202 | } |
Matt Roper | 1356837 | 2015-01-21 16:35:47 -0800 | [diff] [blame] | 203 | |
| 204 | /* |
| 205 | * intel_crtc_duplicate_state - duplicate crtc state |
| 206 | * @crtc: drm crtc |
| 207 | * |
| 208 | * Allocates and returns a copy of the crtc state (both common and |
| 209 | * Intel-specific) for the specified crtc. |
| 210 | * |
| 211 | * Returns: The newly allocated crtc state, or NULL on failure. |
| 212 | */ |
| 213 | struct drm_crtc_state * |
| 214 | intel_crtc_duplicate_state(struct drm_crtc *crtc) |
| 215 | { |
| 216 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
Ander Conselvan de Oliveira | a91572f | 2015-03-03 15:21:55 +0200 | [diff] [blame] | 217 | struct intel_crtc_state *crtc_state; |
Matt Roper | 1356837 | 2015-01-21 16:35:47 -0800 | [diff] [blame] | 218 | |
| 219 | if (WARN_ON(!intel_crtc->config)) |
Ander Conselvan de Oliveira | a91572f | 2015-03-03 15:21:55 +0200 | [diff] [blame] | 220 | crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); |
| 221 | else |
| 222 | crtc_state = kmemdup(intel_crtc->config, |
| 223 | sizeof(*intel_crtc->config), GFP_KERNEL); |
Matt Roper | 1356837 | 2015-01-21 16:35:47 -0800 | [diff] [blame] | 224 | |
Ander Conselvan de Oliveira | f0c6057 | 2015-04-21 17:12:58 +0300 | [diff] [blame] | 225 | if (!crtc_state) |
| 226 | return NULL; |
| 227 | |
| 228 | __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); |
| 229 | |
| 230 | crtc_state->base.crtc = crtc; |
Ander Conselvan de Oliveira | a91572f | 2015-03-03 15:21:55 +0200 | [diff] [blame] | 231 | |
| 232 | return &crtc_state->base; |
Matt Roper | 1356837 | 2015-01-21 16:35:47 -0800 | [diff] [blame] | 233 | } |
| 234 | |
| 235 | /** |
| 236 | * intel_crtc_destroy_state - destroy crtc state |
| 237 | * @crtc: drm crtc |
| 238 | * |
| 239 | * Destroys the crtc state (both common and Intel-specific) for the |
| 240 | * specified crtc. |
| 241 | */ |
| 242 | void |
| 243 | intel_crtc_destroy_state(struct drm_crtc *crtc, |
| 244 | struct drm_crtc_state *state) |
| 245 | { |
| 246 | drm_atomic_helper_crtc_destroy_state(crtc, state); |
| 247 | } |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 248 | |
| 249 | /** |
| 250 | * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests |
| 251 | * @dev: DRM device |
| 252 | * @crtc: intel crtc |
| 253 | * @crtc_state: incoming crtc_state to validate and setup scalers |
| 254 | * |
| 255 | * This function sets up scalers based on staged scaling requests for |
| 256 | * a @crtc and its planes. It is called from crtc level check path. If request |
| 257 | * is a supportable request, it attaches scalers to requested planes and crtc. |
| 258 | * |
| 259 | * This function takes into account the current scaler(s) in use by any planes |
| 260 | * not being part of this atomic state |
| 261 | * |
| 262 | * Returns: |
| 263 | * 0 - scalers were setup succesfully |
| 264 | * error code - otherwise |
| 265 | */ |
| 266 | int intel_atomic_setup_scalers(struct drm_device *dev, |
| 267 | struct intel_crtc *intel_crtc, |
| 268 | struct intel_crtc_state *crtc_state) |
| 269 | { |
| 270 | struct drm_plane *plane = NULL; |
| 271 | struct intel_plane *intel_plane; |
| 272 | struct intel_plane_state *plane_state = NULL; |
| 273 | struct intel_crtc_scaler_state *scaler_state; |
| 274 | struct drm_atomic_state *drm_state; |
| 275 | int num_scalers_need; |
| 276 | int i, j; |
| 277 | |
| 278 | if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state) |
| 279 | return 0; |
| 280 | |
| 281 | scaler_state = &crtc_state->scaler_state; |
| 282 | drm_state = crtc_state->base.state; |
| 283 | |
| 284 | num_scalers_need = hweight32(scaler_state->scaler_users); |
| 285 | DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n", |
| 286 | crtc_state, num_scalers_need, intel_crtc->num_scalers, |
| 287 | scaler_state->scaler_users); |
| 288 | |
| 289 | /* |
| 290 | * High level flow: |
| 291 | * - staged scaler requests are already in scaler_state->scaler_users |
| 292 | * - check whether staged scaling requests can be supported |
| 293 | * - add planes using scalers that aren't in current transaction |
| 294 | * - assign scalers to requested users |
| 295 | * - as part of plane commit, scalers will be committed |
| 296 | * (i.e., either attached or detached) to respective planes in hw |
| 297 | * - as part of crtc_commit, scaler will be either attached or detached |
| 298 | * to crtc in hw |
| 299 | */ |
| 300 | |
| 301 | /* fail if required scalers > available scalers */ |
| 302 | if (num_scalers_need > intel_crtc->num_scalers){ |
| 303 | DRM_DEBUG_KMS("Too many scaling requests %d > %d\n", |
| 304 | num_scalers_need, intel_crtc->num_scalers); |
| 305 | return -EINVAL; |
| 306 | } |
| 307 | |
| 308 | /* walkthrough scaler_users bits and start assigning scalers */ |
| 309 | for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { |
| 310 | int *scaler_id; |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 311 | const char *name; |
| 312 | int idx; |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 313 | |
| 314 | /* skip if scaler not required */ |
| 315 | if (!(scaler_state->scaler_users & (1 << i))) |
| 316 | continue; |
| 317 | |
| 318 | if (i == SKL_CRTC_INDEX) { |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 319 | name = "CRTC"; |
| 320 | idx = intel_crtc->base.base.id; |
| 321 | |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 322 | /* panel fitter case: assign as a crtc scaler */ |
| 323 | scaler_id = &scaler_state->scaler_id; |
| 324 | } else { |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 325 | name = "PLANE"; |
| 326 | idx = plane->base.id; |
| 327 | |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 328 | if (!drm_state) |
| 329 | continue; |
| 330 | |
| 331 | /* plane scaler case: assign as a plane scaler */ |
| 332 | /* find the plane that set the bit as scaler_user */ |
| 333 | plane = drm_state->planes[i]; |
| 334 | |
| 335 | /* |
| 336 | * to enable/disable hq mode, add planes that are using scaler |
| 337 | * into this transaction |
| 338 | */ |
| 339 | if (!plane) { |
| 340 | struct drm_plane_state *state; |
| 341 | plane = drm_plane_from_index(dev, i); |
| 342 | state = drm_atomic_get_plane_state(drm_state, plane); |
| 343 | if (IS_ERR(state)) { |
| 344 | DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n", |
| 345 | plane->base.id); |
| 346 | return PTR_ERR(state); |
| 347 | } |
Maarten Lankhorst | cf5a15b | 2015-06-15 12:33:41 +0200 | [diff] [blame] | 348 | |
| 349 | /* |
| 350 | * the plane is added after plane checks are run, |
| 351 | * but since this plane is unchanged just do the |
| 352 | * minimum required validation. |
| 353 | */ |
| 354 | if (plane->type == DRM_PLANE_TYPE_PRIMARY) |
| 355 | intel_crtc->atomic.wait_for_flips = true; |
| 356 | crtc_state->base.planes_changed = true; |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 357 | } |
| 358 | |
| 359 | intel_plane = to_intel_plane(plane); |
| 360 | |
| 361 | /* plane on different crtc cannot be a scaler user of this crtc */ |
| 362 | if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) { |
| 363 | continue; |
| 364 | } |
| 365 | |
| 366 | plane_state = to_intel_plane_state(drm_state->plane_states[i]); |
| 367 | scaler_id = &plane_state->scaler_id; |
| 368 | } |
| 369 | |
| 370 | if (*scaler_id < 0) { |
| 371 | /* find a free scaler */ |
| 372 | for (j = 0; j < intel_crtc->num_scalers; j++) { |
| 373 | if (!scaler_state->scalers[j].in_use) { |
| 374 | scaler_state->scalers[j].in_use = 1; |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 375 | *scaler_id = j; |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 376 | DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 377 | intel_crtc->pipe, *scaler_id, name, idx); |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 378 | break; |
| 379 | } |
| 380 | } |
| 381 | } |
| 382 | |
| 383 | if (WARN_ON(*scaler_id < 0)) { |
Maarten Lankhorst | 133b0d1 | 2015-06-15 12:33:39 +0200 | [diff] [blame] | 384 | DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx); |
Chandra Konduru | d03c93d | 2015-04-09 16:42:46 -0700 | [diff] [blame] | 385 | continue; |
| 386 | } |
| 387 | |
| 388 | /* set scaler mode */ |
| 389 | if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) { |
| 390 | /* |
| 391 | * when only 1 scaler is in use on either pipe A or B, |
| 392 | * scaler 0 operates in high quality (HQ) mode. |
| 393 | * In this case use scaler 0 to take advantage of HQ mode |
| 394 | */ |
| 395 | *scaler_id = 0; |
| 396 | scaler_state->scalers[0].in_use = 1; |
| 397 | scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ; |
| 398 | scaler_state->scalers[1].in_use = 0; |
| 399 | } else { |
| 400 | scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN; |
| 401 | } |
| 402 | } |
| 403 | |
| 404 | return 0; |
| 405 | } |
Maarten Lankhorst | de419ab | 2015-06-04 10:21:28 +0200 | [diff] [blame] | 406 | |
Maarten Lankhorst | f721790 | 2015-06-10 10:24:20 +0200 | [diff] [blame] | 407 | static void |
Maarten Lankhorst | de419ab | 2015-06-04 10:21:28 +0200 | [diff] [blame] | 408 | intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, |
| 409 | struct intel_shared_dpll_config *shared_dpll) |
| 410 | { |
| 411 | enum intel_dpll_id i; |
| 412 | |
| 413 | /* Copy shared dpll state */ |
| 414 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
| 415 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; |
| 416 | |
| 417 | shared_dpll[i] = pll->config; |
| 418 | } |
| 419 | } |
| 420 | |
| 421 | struct intel_shared_dpll_config * |
| 422 | intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s) |
| 423 | { |
| 424 | struct intel_atomic_state *state = to_intel_atomic_state(s); |
| 425 | |
| 426 | WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex)); |
| 427 | |
| 428 | if (!state->dpll_set) { |
| 429 | state->dpll_set = true; |
| 430 | |
| 431 | intel_atomic_duplicate_dpll_state(to_i915(s->dev), |
| 432 | state->shared_dpll); |
| 433 | } |
| 434 | |
| 435 | return state->shared_dpll; |
| 436 | } |
| 437 | |
| 438 | struct drm_atomic_state * |
| 439 | intel_atomic_state_alloc(struct drm_device *dev) |
| 440 | { |
| 441 | struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL); |
| 442 | |
| 443 | if (!state || drm_atomic_state_init(dev, &state->base) < 0) { |
| 444 | kfree(state); |
| 445 | return NULL; |
| 446 | } |
| 447 | |
| 448 | return &state->base; |
| 449 | } |
| 450 | |
| 451 | void intel_atomic_state_clear(struct drm_atomic_state *s) |
| 452 | { |
| 453 | struct intel_atomic_state *state = to_intel_atomic_state(s); |
| 454 | drm_atomic_state_default_clear(&state->base); |
| 455 | state->dpll_set = false; |
| 456 | } |