1 /* 2 * Copyright (C) 2014 Red Hat 3 * Copyright (C) 2014 Intel Corp. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robdclark@gmail.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 */ 27 28 29 #include <drm/drmP.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_mode.h> 32 #include <drm/drm_print.h> 33 #include <drm/drm_writeback.h> 34 #include <linux/sync_file.h> 35 36 #include "drm_crtc_internal.h" 37 #include "drm_internal.h" 38 39 void __drm_crtc_commit_free(struct kref *kref) 40 { 41 struct drm_crtc_commit *commit = 42 container_of(kref, struct drm_crtc_commit, ref); 43 44 kfree(commit); 45 } 46 EXPORT_SYMBOL(__drm_crtc_commit_free); 47 48 /** 49 * drm_atomic_state_default_release - 50 * release memory initialized by drm_atomic_state_init 51 * @state: atomic state 52 * 53 * Free all the memory allocated by drm_atomic_state_init. 54 * This should only be used by drivers which are still subclassing 55 * &drm_atomic_state and haven't switched to &drm_private_state yet. 56 */ 57 void drm_atomic_state_default_release(struct drm_atomic_state *state) 58 { 59 kfree(state->connectors); 60 kfree(state->crtcs); 61 kfree(state->planes); 62 kfree(state->private_objs); 63 } 64 EXPORT_SYMBOL(drm_atomic_state_default_release); 65 66 /** 67 * drm_atomic_state_init - init new atomic state 68 * @dev: DRM device 69 * @state: atomic state 70 * 71 * Default implementation for filling in a new atomic state. 72 * This should only be used by drivers which are still subclassing 73 * &drm_atomic_state and haven't switched to &drm_private_state yet. 74 */ 75 int 76 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) 77 { 78 kref_init(&state->ref); 79 80 /* TODO legacy paths should maybe do a better job about 81 * setting this appropriately? 82 */ 83 state->allow_modeset = true; 84 85 state->crtcs = kcalloc(dev->mode_config.num_crtc, 86 sizeof(*state->crtcs), GFP_KERNEL); 87 if (!state->crtcs) 88 goto fail; 89 state->planes = kcalloc(dev->mode_config.num_total_plane, 90 sizeof(*state->planes), GFP_KERNEL); 91 if (!state->planes) 92 goto fail; 93 94 state->dev = dev; 95 96 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); 97 98 return 0; 99 fail: 100 drm_atomic_state_default_release(state); 101 return -ENOMEM; 102 } 103 EXPORT_SYMBOL(drm_atomic_state_init); 104 105 /** 106 * drm_atomic_state_alloc - allocate atomic state 107 * @dev: DRM device 108 * 109 * This allocates an empty atomic state to track updates. 110 */ 111 struct drm_atomic_state * 112 drm_atomic_state_alloc(struct drm_device *dev) 113 { 114 struct drm_mode_config *config = &dev->mode_config; 115 116 if (!config->funcs->atomic_state_alloc) { 117 struct drm_atomic_state *state; 118 119 state = kzalloc(sizeof(*state), GFP_KERNEL); 120 if (!state) 121 return NULL; 122 if (drm_atomic_state_init(dev, state) < 0) { 123 kfree(state); 124 return NULL; 125 } 126 return state; 127 } 128 129 return config->funcs->atomic_state_alloc(dev); 130 } 131 EXPORT_SYMBOL(drm_atomic_state_alloc); 132 133 /** 134 * drm_atomic_state_default_clear - clear base atomic state 135 * @state: atomic state 136 * 137 * Default implementation for clearing atomic state. 138 * This should only be used by drivers which are still subclassing 139 * &drm_atomic_state and haven't switched to &drm_private_state yet. 140 */ 141 void drm_atomic_state_default_clear(struct drm_atomic_state *state) 142 { 143 struct drm_device *dev = state->dev; 144 struct drm_mode_config *config = &dev->mode_config; 145 int i; 146 147 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); 148 149 for (i = 0; i < state->num_connector; i++) { 150 struct drm_connector *connector = state->connectors[i].ptr; 151 152 if (!connector) 153 continue; 154 155 connector->funcs->atomic_destroy_state(connector, 156 state->connectors[i].state); 157 state->connectors[i].ptr = NULL; 158 state->connectors[i].state = NULL; 159 state->connectors[i].old_state = NULL; 160 state->connectors[i].new_state = NULL; 161 drm_connector_put(connector); 162 } 163 164 for (i = 0; i < config->num_crtc; i++) { 165 struct drm_crtc *crtc = state->crtcs[i].ptr; 166 167 if (!crtc) 168 continue; 169 170 crtc->funcs->atomic_destroy_state(crtc, 171 state->crtcs[i].state); 172 173 state->crtcs[i].ptr = NULL; 174 state->crtcs[i].state = NULL; 175 state->crtcs[i].old_state = NULL; 176 state->crtcs[i].new_state = NULL; 177 178 if (state->crtcs[i].commit) { 179 drm_crtc_commit_put(state->crtcs[i].commit); 180 state->crtcs[i].commit = NULL; 181 } 182 } 183 184 for (i = 0; i < config->num_total_plane; i++) { 185 struct drm_plane *plane = state->planes[i].ptr; 186 187 if (!plane) 188 continue; 189 190 plane->funcs->atomic_destroy_state(plane, 191 state->planes[i].state); 192 state->planes[i].ptr = NULL; 193 state->planes[i].state = NULL; 194 state->planes[i].old_state = NULL; 195 state->planes[i].new_state = NULL; 196 } 197 198 for (i = 0; i < state->num_private_objs; i++) { 199 struct drm_private_obj *obj = state->private_objs[i].ptr; 200 201 obj->funcs->atomic_destroy_state(obj, 202 state->private_objs[i].state); 203 state->private_objs[i].ptr = NULL; 204 state->private_objs[i].state = NULL; 205 state->private_objs[i].old_state = NULL; 206 state->private_objs[i].new_state = NULL; 207 } 208 state->num_private_objs = 0; 209 210 if (state->fake_commit) { 211 drm_crtc_commit_put(state->fake_commit); 212 state->fake_commit = NULL; 213 } 214 } 215 EXPORT_SYMBOL(drm_atomic_state_default_clear); 216 217 /** 218 * drm_atomic_state_clear - clear state object 219 * @state: atomic state 220 * 221 * When the w/w mutex algorithm detects a deadlock we need to back off and drop 222 * all locks. So someone else could sneak in and change the current modeset 223 * configuration. Which means that all the state assembled in @state is no 224 * longer an atomic update to the current state, but to some arbitrary earlier 225 * state. Which could break assumptions the driver's 226 * &drm_mode_config_funcs.atomic_check likely relies on. 227 * 228 * Hence we must clear all cached state and completely start over, using this 229 * function. 230 */ 231 void drm_atomic_state_clear(struct drm_atomic_state *state) 232 { 233 struct drm_device *dev = state->dev; 234 struct drm_mode_config *config = &dev->mode_config; 235 236 if (config->funcs->atomic_state_clear) 237 config->funcs->atomic_state_clear(state); 238 else 239 drm_atomic_state_default_clear(state); 240 } 241 EXPORT_SYMBOL(drm_atomic_state_clear); 242 243 /** 244 * __drm_atomic_state_free - free all memory for an atomic state 245 * @ref: This atomic state to deallocate 246 * 247 * This frees all memory associated with an atomic state, including all the 248 * per-object state for planes, crtcs and connectors. 249 */ 250 void __drm_atomic_state_free(struct kref *ref) 251 { 252 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); 253 struct drm_mode_config *config = &state->dev->mode_config; 254 255 drm_atomic_state_clear(state); 256 257 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); 258 259 if (config->funcs->atomic_state_free) { 260 config->funcs->atomic_state_free(state); 261 } else { 262 drm_atomic_state_default_release(state); 263 kfree(state); 264 } 265 } 266 EXPORT_SYMBOL(__drm_atomic_state_free); 267 268 /** 269 * drm_atomic_get_crtc_state - get crtc state 270 * @state: global atomic state object 271 * @crtc: crtc to get state object for 272 * 273 * This function returns the crtc state for the given crtc, allocating it if 274 * needed. It will also grab the relevant crtc lock to make sure that the state 275 * is consistent. 276 * 277 * Returns: 278 * 279 * Either the allocated state or the error code encoded into the pointer. When 280 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 281 * entire atomic sequence must be restarted. All other errors are fatal. 282 */ 283 struct drm_crtc_state * 284 drm_atomic_get_crtc_state(struct drm_atomic_state *state, 285 struct drm_crtc *crtc) 286 { 287 int ret, index = drm_crtc_index(crtc); 288 struct drm_crtc_state *crtc_state; 289 290 WARN_ON(!state->acquire_ctx); 291 292 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); 293 if (crtc_state) 294 return crtc_state; 295 296 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 297 if (ret) 298 return ERR_PTR(ret); 299 300 crtc_state = crtc->funcs->atomic_duplicate_state(crtc); 301 if (!crtc_state) 302 return ERR_PTR(-ENOMEM); 303 304 state->crtcs[index].state = crtc_state; 305 state->crtcs[index].old_state = crtc->state; 306 state->crtcs[index].new_state = crtc_state; 307 state->crtcs[index].ptr = crtc; 308 crtc_state->state = state; 309 310 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", 311 crtc->base.id, crtc->name, crtc_state, state); 312 313 return crtc_state; 314 } 315 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 316 317 static void set_out_fence_for_crtc(struct drm_atomic_state *state, 318 struct drm_crtc *crtc, s32 __user *fence_ptr) 319 { 320 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; 321 } 322 323 static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 324 struct drm_crtc *crtc) 325 { 326 s32 __user *fence_ptr; 327 328 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; 329 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; 330 331 return fence_ptr; 332 } 333 334 static int set_out_fence_for_connector(struct drm_atomic_state *state, 335 struct drm_connector *connector, 336 s32 __user *fence_ptr) 337 { 338 unsigned int index = drm_connector_index(connector); 339 340 if (!fence_ptr) 341 return 0; 342 343 if (put_user(-1, fence_ptr)) 344 return -EFAULT; 345 346 state->connectors[index].out_fence_ptr = fence_ptr; 347 348 return 0; 349 } 350 351 static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state, 352 struct drm_connector *connector) 353 { 354 unsigned int index = drm_connector_index(connector); 355 s32 __user *fence_ptr; 356 357 fence_ptr = state->connectors[index].out_fence_ptr; 358 state->connectors[index].out_fence_ptr = NULL; 359 360 return fence_ptr; 361 } 362 363 /** 364 * drm_atomic_set_mode_for_crtc - set mode for CRTC 365 * @state: the CRTC whose incoming state to update 366 * @mode: kernel-internal mode to use for the CRTC, or NULL to disable 367 * 368 * Set a mode (originating from the kernel) on the desired CRTC state and update 369 * the enable property. 370 * 371 * RETURNS: 372 * Zero on success, error code on failure. Cannot return -EDEADLK. 373 */ 374 int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, 375 const struct drm_display_mode *mode) 376 { 377 struct drm_crtc *crtc = state->crtc; 378 struct drm_mode_modeinfo umode; 379 380 /* Early return for no change. */ 381 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) 382 return 0; 383 384 drm_property_blob_put(state->mode_blob); 385 state->mode_blob = NULL; 386 387 if (mode) { 388 drm_mode_convert_to_umode(&umode, mode); 389 state->mode_blob = 390 drm_property_create_blob(state->crtc->dev, 391 sizeof(umode), 392 &umode); 393 if (IS_ERR(state->mode_blob)) 394 return PTR_ERR(state->mode_blob); 395 396 drm_mode_copy(&state->mode, mode); 397 state->enable = true; 398 DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", 399 mode->name, crtc->base.id, crtc->name, state); 400 } else { 401 memset(&state->mode, 0, sizeof(state->mode)); 402 state->enable = false; 403 DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", 404 crtc->base.id, crtc->name, state); 405 } 406 407 return 0; 408 } 409 EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc); 410 411 /** 412 * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC 413 * @state: the CRTC whose incoming state to update 414 * @blob: pointer to blob property to use for mode 415 * 416 * Set a mode (originating from a blob property) on the desired CRTC state. 417 * This function will take a reference on the blob property for the CRTC state, 418 * and release the reference held on the state's existing mode property, if any 419 * was set. 420 * 421 * RETURNS: 422 * Zero on success, error code on failure. Cannot return -EDEADLK. 423 */ 424 int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, 425 struct drm_property_blob *blob) 426 { 427 struct drm_crtc *crtc = state->crtc; 428 429 if (blob == state->mode_blob) 430 return 0; 431 432 drm_property_blob_put(state->mode_blob); 433 state->mode_blob = NULL; 434 435 memset(&state->mode, 0, sizeof(state->mode)); 436 437 if (blob) { 438 int ret; 439 440 if (blob->length != sizeof(struct drm_mode_modeinfo)) { 441 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n", 442 crtc->base.id, crtc->name, 443 blob->length); 444 return -EINVAL; 445 } 446 447 ret = drm_mode_convert_umode(crtc->dev, 448 &state->mode, blob->data); 449 if (ret) { 450 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n", 451 crtc->base.id, crtc->name, 452 ret, drm_get_mode_status_name(state->mode.status)); 453 drm_mode_debug_printmodeline(&state->mode); 454 return -EINVAL; 455 } 456 457 state->mode_blob = drm_property_blob_get(blob); 458 state->enable = true; 459 DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", 460 state->mode.name, crtc->base.id, crtc->name, 461 state); 462 } else { 463 state->enable = false; 464 DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", 465 crtc->base.id, crtc->name, state); 466 } 467 468 return 0; 469 } 470 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); 471 472 /** 473 * drm_atomic_replace_property_blob_from_id - lookup the new blob and replace the old one with it 474 * @dev: DRM device 475 * @blob: a pointer to the member blob to be replaced 476 * @blob_id: ID of the new blob 477 * @expected_size: total expected size of the blob data (in bytes) 478 * @expected_elem_size: expected element size of the blob data (in bytes) 479 * @replaced: did the blob get replaced? 480 * 481 * Replace @blob with another blob with the ID @blob_id. If @blob_id is zero 482 * @blob becomes NULL. 483 * 484 * If @expected_size is positive the new blob length is expected to be equal 485 * to @expected_size bytes. If @expected_elem_size is positive the new blob 486 * length is expected to be a multiple of @expected_elem_size bytes. Otherwise 487 * an error is returned. 488 * 489 * @replaced will indicate to the caller whether the blob was replaced or not. 490 * If the old and new blobs were in fact the same blob @replaced will be false 491 * otherwise it will be true. 492 * 493 * RETURNS: 494 * Zero on success, error code on failure. 495 */ 496 static int 497 drm_atomic_replace_property_blob_from_id(struct drm_device *dev, 498 struct drm_property_blob **blob, 499 uint64_t blob_id, 500 ssize_t expected_size, 501 ssize_t expected_elem_size, 502 bool *replaced) 503 { 504 struct drm_property_blob *new_blob = NULL; 505 506 if (blob_id != 0) { 507 new_blob = drm_property_lookup_blob(dev, blob_id); 508 if (new_blob == NULL) 509 return -EINVAL; 510 511 if (expected_size > 0 && 512 new_blob->length != expected_size) { 513 drm_property_blob_put(new_blob); 514 return -EINVAL; 515 } 516 if (expected_elem_size > 0 && 517 new_blob->length % expected_elem_size != 0) { 518 drm_property_blob_put(new_blob); 519 return -EINVAL; 520 } 521 } 522 523 *replaced |= drm_property_replace_blob(blob, new_blob); 524 drm_property_blob_put(new_blob); 525 526 return 0; 527 } 528 529 /** 530 * drm_atomic_crtc_set_property - set property on CRTC 531 * @crtc: the drm CRTC to set a property on 532 * @state: the state object to update with the new property value 533 * @property: the property to set 534 * @val: the new property value 535 * 536 * This function handles generic/core properties and calls out to driver's 537 * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure 538 * consistent behavior you must call this function rather than the driver hook 539 * directly. 540 * 541 * RETURNS: 542 * Zero on success, error code on failure 543 */ 544 int drm_atomic_crtc_set_property(struct drm_crtc *crtc, 545 struct drm_crtc_state *state, struct drm_property *property, 546 uint64_t val) 547 { 548 struct drm_device *dev = crtc->dev; 549 struct drm_mode_config *config = &dev->mode_config; 550 bool replaced = false; 551 int ret; 552 553 if (property == config->prop_active) 554 state->active = val; 555 else if (property == config->prop_mode_id) { 556 struct drm_property_blob *mode = 557 drm_property_lookup_blob(dev, val); 558 ret = drm_atomic_set_mode_prop_for_crtc(state, mode); 559 drm_property_blob_put(mode); 560 return ret; 561 } else if (property == config->degamma_lut_property) { 562 ret = drm_atomic_replace_property_blob_from_id(dev, 563 &state->degamma_lut, 564 val, 565 -1, sizeof(struct drm_color_lut), 566 &replaced); 567 state->color_mgmt_changed |= replaced; 568 return ret; 569 } else if (property == config->ctm_property) { 570 ret = drm_atomic_replace_property_blob_from_id(dev, 571 &state->ctm, 572 val, 573 sizeof(struct drm_color_ctm), -1, 574 &replaced); 575 state->color_mgmt_changed |= replaced; 576 return ret; 577 } else if (property == config->gamma_lut_property) { 578 ret = drm_atomic_replace_property_blob_from_id(dev, 579 &state->gamma_lut, 580 val, 581 -1, sizeof(struct drm_color_lut), 582 &replaced); 583 state->color_mgmt_changed |= replaced; 584 return ret; 585 } else if (property == config->prop_out_fence_ptr) { 586 s32 __user *fence_ptr = u64_to_user_ptr(val); 587 588 if (!fence_ptr) 589 return 0; 590 591 if (put_user(-1, fence_ptr)) 592 return -EFAULT; 593 594 set_out_fence_for_crtc(state->state, crtc, fence_ptr); 595 } else if (crtc->funcs->atomic_set_property) { 596 return crtc->funcs->atomic_set_property(crtc, state, property, val); 597 } else { 598 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n", 599 crtc->base.id, crtc->name, 600 property->base.id, property->name); 601 return -EINVAL; 602 } 603 604 return 0; 605 } 606 EXPORT_SYMBOL(drm_atomic_crtc_set_property); 607 608 /** 609 * drm_atomic_crtc_get_property - get property value from CRTC state 610 * @crtc: the drm CRTC to set a property on 611 * @state: the state object to get the property value from 612 * @property: the property to set 613 * @val: return location for the property value 614 * 615 * This function handles generic/core properties and calls out to driver's 616 * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure 617 * consistent behavior you must call this function rather than the driver hook 618 * directly. 619 * 620 * RETURNS: 621 * Zero on success, error code on failure 622 */ 623 static int 624 drm_atomic_crtc_get_property(struct drm_crtc *crtc, 625 const struct drm_crtc_state *state, 626 struct drm_property *property, uint64_t *val) 627 { 628 struct drm_device *dev = crtc->dev; 629 struct drm_mode_config *config = &dev->mode_config; 630 631 if (property == config->prop_active) 632 *val = state->active; 633 else if (property == config->prop_mode_id) 634 *val = (state->mode_blob) ? state->mode_blob->base.id : 0; 635 else if (property == config->degamma_lut_property) 636 *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; 637 else if (property == config->ctm_property) 638 *val = (state->ctm) ? state->ctm->base.id : 0; 639 else if (property == config->gamma_lut_property) 640 *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; 641 else if (property == config->prop_out_fence_ptr) 642 *val = 0; 643 else if (crtc->funcs->atomic_get_property) 644 return crtc->funcs->atomic_get_property(crtc, state, property, val); 645 else 646 return -EINVAL; 647 648 return 0; 649 } 650 651 /** 652 * drm_atomic_crtc_check - check crtc state 653 * @crtc: crtc to check 654 * @state: crtc state to check 655 * 656 * Provides core sanity checks for crtc state. 657 * 658 * RETURNS: 659 * Zero on success, error code on failure 660 */ 661 static int drm_atomic_crtc_check(struct drm_crtc *crtc, 662 struct drm_crtc_state *state) 663 { 664 /* NOTE: we explicitly don't enforce constraints such as primary 665 * layer covering entire screen, since that is something we want 666 * to allow (on hw that supports it). For hw that does not, it 667 * should be checked in driver's crtc->atomic_check() vfunc. 668 * 669 * TODO: Add generic modeset state checks once we support those. 670 */ 671 672 if (state->active && !state->enable) { 673 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", 674 crtc->base.id, crtc->name); 675 return -EINVAL; 676 } 677 678 /* The state->enable vs. state->mode_blob checks can be WARN_ON, 679 * as this is a kernel-internal detail that userspace should never 680 * be able to trigger. */ 681 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 682 WARN_ON(state->enable && !state->mode_blob)) { 683 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", 684 crtc->base.id, crtc->name); 685 return -EINVAL; 686 } 687 688 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 689 WARN_ON(!state->enable && state->mode_blob)) { 690 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", 691 crtc->base.id, crtc->name); 692 return -EINVAL; 693 } 694 695 /* 696 * Reject event generation for when a CRTC is off and stays off. 697 * It wouldn't be hard to implement this, but userspace has a track 698 * record of happily burning through 100% cpu (or worse, crash) when the 699 * display pipe is suspended. To avoid all that fun just reject updates 700 * that ask for events since likely that indicates a bug in the 701 * compositor's drawing loop. This is consistent with the vblank IOCTL 702 * and legacy page_flip IOCTL which also reject service on a disabled 703 * pipe. 704 */ 705 if (state->event && !state->active && !crtc->state->active) { 706 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", 707 crtc->base.id, crtc->name); 708 return -EINVAL; 709 } 710 711 return 0; 712 } 713 714 static void drm_atomic_crtc_print_state(struct drm_printer *p, 715 const struct drm_crtc_state *state) 716 { 717 struct drm_crtc *crtc = state->crtc; 718 719 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); 720 drm_printf(p, "\tenable=%d\n", state->enable); 721 drm_printf(p, "\tactive=%d\n", state->active); 722 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); 723 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); 724 drm_printf(p, "\tactive_changed=%d\n", state->active_changed); 725 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); 726 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); 727 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); 728 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); 729 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); 730 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); 731 732 if (crtc->funcs->atomic_print_state) 733 crtc->funcs->atomic_print_state(p, state); 734 } 735 736 /** 737 * drm_atomic_connector_check - check connector state 738 * @connector: connector to check 739 * @state: connector state to check 740 * 741 * Provides core sanity checks for connector state. 742 * 743 * RETURNS: 744 * Zero on success, error code on failure 745 */ 746 static int drm_atomic_connector_check(struct drm_connector *connector, 747 struct drm_connector_state *state) 748 { 749 struct drm_crtc_state *crtc_state; 750 struct drm_writeback_job *writeback_job = state->writeback_job; 751 752 if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job) 753 return 0; 754 755 if (writeback_job->fb && !state->crtc) { 756 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n", 757 connector->base.id, connector->name); 758 return -EINVAL; 759 } 760 761 if (state->crtc) 762 crtc_state = drm_atomic_get_existing_crtc_state(state->state, 763 state->crtc); 764 765 if (writeback_job->fb && !crtc_state->active) { 766 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n", 767 connector->base.id, connector->name, 768 state->crtc->base.id); 769 return -EINVAL; 770 } 771 772 if (writeback_job->out_fence && !writeback_job->fb) { 773 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", 774 connector->base.id, connector->name); 775 return -EINVAL; 776 } 777 778 return 0; 779 } 780 781 /** 782 * drm_atomic_get_plane_state - get plane state 783 * @state: global atomic state object 784 * @plane: plane to get state object for 785 * 786 * This function returns the plane state for the given plane, allocating it if 787 * needed. It will also grab the relevant plane lock to make sure that the state 788 * is consistent. 789 * 790 * Returns: 791 * 792 * Either the allocated state or the error code encoded into the pointer. When 793 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 794 * entire atomic sequence must be restarted. All other errors are fatal. 795 */ 796 struct drm_plane_state * 797 drm_atomic_get_plane_state(struct drm_atomic_state *state, 798 struct drm_plane *plane) 799 { 800 int ret, index = drm_plane_index(plane); 801 struct drm_plane_state *plane_state; 802 803 WARN_ON(!state->acquire_ctx); 804 805 /* the legacy pointers should never be set */ 806 WARN_ON(plane->fb); 807 WARN_ON(plane->old_fb); 808 WARN_ON(plane->crtc); 809 810 plane_state = drm_atomic_get_existing_plane_state(state, plane); 811 if (plane_state) 812 return plane_state; 813 814 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); 815 if (ret) 816 return ERR_PTR(ret); 817 818 plane_state = plane->funcs->atomic_duplicate_state(plane); 819 if (!plane_state) 820 return ERR_PTR(-ENOMEM); 821 822 state->planes[index].state = plane_state; 823 state->planes[index].ptr = plane; 824 state->planes[index].old_state = plane->state; 825 state->planes[index].new_state = plane_state; 826 plane_state->state = state; 827 828 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", 829 plane->base.id, plane->name, plane_state, state); 830 831 if (plane_state->crtc) { 832 struct drm_crtc_state *crtc_state; 833 834 crtc_state = drm_atomic_get_crtc_state(state, 835 plane_state->crtc); 836 if (IS_ERR(crtc_state)) 837 return ERR_CAST(crtc_state); 838 } 839 840 return plane_state; 841 } 842 EXPORT_SYMBOL(drm_atomic_get_plane_state); 843 844 /** 845 * drm_atomic_plane_set_property - set property on plane 846 * @plane: the drm plane to set a property on 847 * @state: the state object to update with the new property value 848 * @property: the property to set 849 * @val: the new property value 850 * 851 * This function handles generic/core properties and calls out to driver's 852 * &drm_plane_funcs.atomic_set_property for driver properties. To ensure 853 * consistent behavior you must call this function rather than the driver hook 854 * directly. 855 * 856 * RETURNS: 857 * Zero on success, error code on failure 858 */ 859 static int drm_atomic_plane_set_property(struct drm_plane *plane, 860 struct drm_plane_state *state, struct drm_property *property, 861 uint64_t val) 862 { 863 struct drm_device *dev = plane->dev; 864 struct drm_mode_config *config = &dev->mode_config; 865 866 if (property == config->prop_fb_id) { 867 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); 868 drm_atomic_set_fb_for_plane(state, fb); 869 if (fb) 870 drm_framebuffer_put(fb); 871 } else if (property == config->prop_in_fence_fd) { 872 if (state->fence) 873 return -EINVAL; 874 875 if (U642I64(val) == -1) 876 return 0; 877 878 state->fence = sync_file_get_fence(val); 879 if (!state->fence) 880 return -EINVAL; 881 882 } else if (property == config->prop_crtc_id) { 883 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); 884 return drm_atomic_set_crtc_for_plane(state, crtc); 885 } else if (property == config->prop_crtc_x) { 886 state->crtc_x = U642I64(val); 887 } else if (property == config->prop_crtc_y) { 888 state->crtc_y = U642I64(val); 889 } else if (property == config->prop_crtc_w) { 890 state->crtc_w = val; 891 } else if (property == config->prop_crtc_h) { 892 state->crtc_h = val; 893 } else if (property == config->prop_src_x) { 894 state->src_x = val; 895 } else if (property == config->prop_src_y) { 896 state->src_y = val; 897 } else if (property == config->prop_src_w) { 898 state->src_w = val; 899 } else if (property == config->prop_src_h) { 900 state->src_h = val; 901 } else if (property == plane->alpha_property) { 902 state->alpha = val; 903 } else if (property == plane->rotation_property) { 904 if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) { 905 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n", 906 plane->base.id, plane->name, val); 907 return -EINVAL; 908 } 909 state->rotation = val; 910 } else if (property == plane->zpos_property) { 911 state->zpos = val; 912 } else if (property == plane->color_encoding_property) { 913 state->color_encoding = val; 914 } else if (property == plane->color_range_property) { 915 state->color_range = val; 916 } else if (plane->funcs->atomic_set_property) { 917 return plane->funcs->atomic_set_property(plane, state, 918 property, val); 919 } else { 920 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", 921 plane->base.id, plane->name, 922 property->base.id, property->name); 923 return -EINVAL; 924 } 925 926 return 0; 927 } 928 929 /** 930 * drm_atomic_plane_get_property - get property value from plane state 931 * @plane: the drm plane to set a property on 932 * @state: the state object to get the property value from 933 * @property: the property to set 934 * @val: return location for the property value 935 * 936 * This function handles generic/core properties and calls out to driver's 937 * &drm_plane_funcs.atomic_get_property for driver properties. To ensure 938 * consistent behavior you must call this function rather than the driver hook 939 * directly. 940 * 941 * RETURNS: 942 * Zero on success, error code on failure 943 */ 944 static int 945 drm_atomic_plane_get_property(struct drm_plane *plane, 946 const struct drm_plane_state *state, 947 struct drm_property *property, uint64_t *val) 948 { 949 struct drm_device *dev = plane->dev; 950 struct drm_mode_config *config = &dev->mode_config; 951 952 if (property == config->prop_fb_id) { 953 *val = (state->fb) ? state->fb->base.id : 0; 954 } else if (property == config->prop_in_fence_fd) { 955 *val = -1; 956 } else if (property == config->prop_crtc_id) { 957 *val = (state->crtc) ? state->crtc->base.id : 0; 958 } else if (property == config->prop_crtc_x) { 959 *val = I642U64(state->crtc_x); 960 } else if (property == config->prop_crtc_y) { 961 *val = I642U64(state->crtc_y); 962 } else if (property == config->prop_crtc_w) { 963 *val = state->crtc_w; 964 } else if (property == config->prop_crtc_h) { 965 *val = state->crtc_h; 966 } else if (property == config->prop_src_x) { 967 *val = state->src_x; 968 } else if (property == config->prop_src_y) { 969 *val = state->src_y; 970 } else if (property == config->prop_src_w) { 971 *val = state->src_w; 972 } else if (property == config->prop_src_h) { 973 *val = state->src_h; 974 } else if (property == plane->alpha_property) { 975 *val = state->alpha; 976 } else if (property == plane->rotation_property) { 977 *val = state->rotation; 978 } else if (property == plane->zpos_property) { 979 *val = state->zpos; 980 } else if (property == plane->color_encoding_property) { 981 *val = state->color_encoding; 982 } else if (property == plane->color_range_property) { 983 *val = state->color_range; 984 } else if (plane->funcs->atomic_get_property) { 985 return plane->funcs->atomic_get_property(plane, state, property, val); 986 } else { 987 return -EINVAL; 988 } 989 990 return 0; 991 } 992 993 static bool 994 plane_switching_crtc(struct drm_atomic_state *state, 995 struct drm_plane *plane, 996 struct drm_plane_state *plane_state) 997 { 998 if (!plane->state->crtc || !plane_state->crtc) 999 return false; 1000 1001 if (plane->state->crtc == plane_state->crtc) 1002 return false; 1003 1004 /* This could be refined, but currently there's no helper or driver code 1005 * to implement direct switching of active planes nor userspace to take 1006 * advantage of more direct plane switching without the intermediate 1007 * full OFF state. 1008 */ 1009 return true; 1010 } 1011 1012 /** 1013 * drm_atomic_plane_check - check plane state 1014 * @plane: plane to check 1015 * @state: plane state to check 1016 * 1017 * Provides core sanity checks for plane state. 1018 * 1019 * RETURNS: 1020 * Zero on success, error code on failure 1021 */ 1022 static int drm_atomic_plane_check(struct drm_plane *plane, 1023 struct drm_plane_state *state) 1024 { 1025 unsigned int fb_width, fb_height; 1026 int ret; 1027 1028 /* either *both* CRTC and FB must be set, or neither */ 1029 if (state->crtc && !state->fb) { 1030 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n", 1031 plane->base.id, plane->name); 1032 return -EINVAL; 1033 } else if (state->fb && !state->crtc) { 1034 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n", 1035 plane->base.id, plane->name); 1036 return -EINVAL; 1037 } 1038 1039 /* if disabled, we don't care about the rest of the state: */ 1040 if (!state->crtc) 1041 return 0; 1042 1043 /* Check whether this plane is usable on this CRTC */ 1044 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) { 1045 DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", 1046 state->crtc->base.id, state->crtc->name, 1047 plane->base.id, plane->name); 1048 return -EINVAL; 1049 } 1050 1051 /* Check whether this plane supports the fb pixel format. */ 1052 ret = drm_plane_check_pixel_format(plane, state->fb->format->format, 1053 state->fb->modifier); 1054 if (ret) { 1055 struct drm_format_name_buf format_name; 1056 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n", 1057 plane->base.id, plane->name, 1058 drm_get_format_name(state->fb->format->format, 1059 &format_name), 1060 state->fb->modifier); 1061 return ret; 1062 } 1063 1064 /* Give drivers some help against integer overflows */ 1065 if (state->crtc_w > INT_MAX || 1066 state->crtc_x > INT_MAX - (int32_t) state->crtc_w || 1067 state->crtc_h > INT_MAX || 1068 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) { 1069 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", 1070 plane->base.id, plane->name, 1071 state->crtc_w, state->crtc_h, 1072 state->crtc_x, state->crtc_y); 1073 return -ERANGE; 1074 } 1075 1076 fb_width = state->fb->width << 16; 1077 fb_height = state->fb->height << 16; 1078 1079 /* Make sure source coordinates are inside the fb. */ 1080 if (state->src_w > fb_width || 1081 state->src_x > fb_width - state->src_w || 1082 state->src_h > fb_height || 1083 state->src_y > fb_height - state->src_h) { 1084 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates " 1085 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", 1086 plane->base.id, plane->name, 1087 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, 1088 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, 1089 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, 1090 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10, 1091 state->fb->width, state->fb->height); 1092 return -ENOSPC; 1093 } 1094 1095 if (plane_switching_crtc(state->state, plane, state)) { 1096 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", 1097 plane->base.id, plane->name); 1098 return -EINVAL; 1099 } 1100 1101 return 0; 1102 } 1103 1104 static void drm_atomic_plane_print_state(struct drm_printer *p, 1105 const struct drm_plane_state *state) 1106 { 1107 struct drm_plane *plane = state->plane; 1108 struct drm_rect src = drm_plane_state_src(state); 1109 struct drm_rect dest = drm_plane_state_dest(state); 1110 1111 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); 1112 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 1113 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); 1114 if (state->fb) 1115 drm_framebuffer_print_info(p, 2, state->fb); 1116 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); 1117 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); 1118 drm_printf(p, "\trotation=%x\n", state->rotation); 1119 drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos); 1120 drm_printf(p, "\tcolor-encoding=%s\n", 1121 drm_get_color_encoding_name(state->color_encoding)); 1122 drm_printf(p, "\tcolor-range=%s\n", 1123 drm_get_color_range_name(state->color_range)); 1124 1125 if (plane->funcs->atomic_print_state) 1126 plane->funcs->atomic_print_state(p, state); 1127 } 1128 1129 /** 1130 * DOC: handling driver private state 1131 * 1132 * Very often the DRM objects exposed to userspace in the atomic modeset api 1133 * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the 1134 * underlying hardware. Especially for any kind of shared resources (e.g. shared 1135 * clocks, scaler units, bandwidth and fifo limits shared among a group of 1136 * planes or CRTCs, and so on) it makes sense to model these as independent 1137 * objects. Drivers then need to do similar state tracking and commit ordering for 1138 * such private (since not exposed to userpace) objects as the atomic core and 1139 * helpers already provide for connectors, planes and CRTCs. 1140 * 1141 * To make this easier on drivers the atomic core provides some support to track 1142 * driver private state objects using struct &drm_private_obj, with the 1143 * associated state struct &drm_private_state. 1144 * 1145 * Similar to userspace-exposed objects, private state structures can be 1146 * acquired by calling drm_atomic_get_private_obj_state(). Since this function 1147 * does not take care of locking, drivers should wrap it for each type of 1148 * private state object they have with the required call to drm_modeset_lock() 1149 * for the corresponding &drm_modeset_lock. 1150 * 1151 * All private state structures contained in a &drm_atomic_state update can be 1152 * iterated using for_each_oldnew_private_obj_in_state(), 1153 * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state(). 1154 * Drivers are recommended to wrap these for each type of driver private state 1155 * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at 1156 * least if they want to iterate over all objects of a given type. 1157 * 1158 * An earlier way to handle driver private state was by subclassing struct 1159 * &drm_atomic_state. But since that encourages non-standard ways to implement 1160 * the check/commit split atomic requires (by using e.g. "check and rollback or 1161 * commit instead" of "duplicate state, check, then either commit or release 1162 * duplicated state) it is deprecated in favour of using &drm_private_state. 1163 */ 1164 1165 /** 1166 * drm_atomic_private_obj_init - initialize private object 1167 * @obj: private object 1168 * @state: initial private object state 1169 * @funcs: pointer to the struct of function pointers that identify the object 1170 * type 1171 * 1172 * Initialize the private object, which can be embedded into any 1173 * driver private object that needs its own atomic state. 1174 */ 1175 void 1176 drm_atomic_private_obj_init(struct drm_private_obj *obj, 1177 struct drm_private_state *state, 1178 const struct drm_private_state_funcs *funcs) 1179 { 1180 memset(obj, 0, sizeof(*obj)); 1181 1182 obj->state = state; 1183 obj->funcs = funcs; 1184 } 1185 EXPORT_SYMBOL(drm_atomic_private_obj_init); 1186 1187 /** 1188 * drm_atomic_private_obj_fini - finalize private object 1189 * @obj: private object 1190 * 1191 * Finalize the private object. 1192 */ 1193 void 1194 drm_atomic_private_obj_fini(struct drm_private_obj *obj) 1195 { 1196 obj->funcs->atomic_destroy_state(obj, obj->state); 1197 } 1198 EXPORT_SYMBOL(drm_atomic_private_obj_fini); 1199 1200 /** 1201 * drm_atomic_get_private_obj_state - get private object state 1202 * @state: global atomic state 1203 * @obj: private object to get the state for 1204 * 1205 * This function returns the private object state for the given private object, 1206 * allocating the state if needed. It does not grab any locks as the caller is 1207 * expected to care of any required locking. 1208 * 1209 * RETURNS: 1210 * 1211 * Either the allocated state or the error code encoded into a pointer. 1212 */ 1213 struct drm_private_state * 1214 drm_atomic_get_private_obj_state(struct drm_atomic_state *state, 1215 struct drm_private_obj *obj) 1216 { 1217 int index, num_objs, i; 1218 size_t size; 1219 struct __drm_private_objs_state *arr; 1220 struct drm_private_state *obj_state; 1221 1222 for (i = 0; i < state->num_private_objs; i++) 1223 if (obj == state->private_objs[i].ptr) 1224 return state->private_objs[i].state; 1225 1226 num_objs = state->num_private_objs + 1; 1227 size = sizeof(*state->private_objs) * num_objs; 1228 arr = kmalloc(size, GFP_KERNEL); 1229 if (!arr) 1230 return ERR_PTR(-ENOMEM); 1231 memcpy(arr, state->private_objs, 1232 sizeof(*state->private_objs) * state->num_private_objs); 1233 kfree(state->private_objs); 1234 1235 state->private_objs = arr; 1236 index = state->num_private_objs; 1237 memset(&state->private_objs[index], 0, sizeof(*state->private_objs)); 1238 1239 obj_state = obj->funcs->atomic_duplicate_state(obj); 1240 if (!obj_state) 1241 return ERR_PTR(-ENOMEM); 1242 1243 state->private_objs[index].state = obj_state; 1244 state->private_objs[index].old_state = obj->state; 1245 state->private_objs[index].new_state = obj_state; 1246 state->private_objs[index].ptr = obj; 1247 obj_state->state = state; 1248 1249 state->num_private_objs = num_objs; 1250 1251 DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n", 1252 obj, obj_state, state); 1253 1254 return obj_state; 1255 } 1256 EXPORT_SYMBOL(drm_atomic_get_private_obj_state); 1257 1258 /** 1259 * drm_atomic_get_connector_state - get connector state 1260 * @state: global atomic state object 1261 * @connector: connector to get state object for 1262 * 1263 * This function returns the connector state for the given connector, 1264 * allocating it if needed. It will also grab the relevant connector lock to 1265 * make sure that the state is consistent. 1266 * 1267 * Returns: 1268 * 1269 * Either the allocated state or the error code encoded into the pointer. When 1270 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 1271 * entire atomic sequence must be restarted. All other errors are fatal. 1272 */ 1273 struct drm_connector_state * 1274 drm_atomic_get_connector_state(struct drm_atomic_state *state, 1275 struct drm_connector *connector) 1276 { 1277 int ret, index; 1278 struct drm_mode_config *config = &connector->dev->mode_config; 1279 struct drm_connector_state *connector_state; 1280 1281 WARN_ON(!state->acquire_ctx); 1282 1283 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1284 if (ret) 1285 return ERR_PTR(ret); 1286 1287 index = drm_connector_index(connector); 1288 1289 if (index >= state->num_connector) { 1290 struct __drm_connnectors_state *c; 1291 int alloc = max(index + 1, config->num_connector); 1292 1293 c = kmalloc(alloc * sizeof(*state->connectors), GFP_KERNEL); 1294 if (!c) 1295 return ERR_PTR(-ENOMEM); 1296 memcpy(c, state->connectors, 1297 state->num_connector * sizeof(*state->connectors)); 1298 kfree(state->connectors); 1299 1300 state->connectors = c; 1301 memset(&state->connectors[state->num_connector], 0, 1302 sizeof(*state->connectors) * (alloc - state->num_connector)); 1303 1304 state->num_connector = alloc; 1305 } 1306 1307 if (state->connectors[index].state) 1308 return state->connectors[index].state; 1309 1310 connector_state = connector->funcs->atomic_duplicate_state(connector); 1311 if (!connector_state) 1312 return ERR_PTR(-ENOMEM); 1313 1314 drm_connector_get(connector); 1315 state->connectors[index].state = connector_state; 1316 state->connectors[index].old_state = connector->state; 1317 state->connectors[index].new_state = connector_state; 1318 state->connectors[index].ptr = connector; 1319 connector_state->state = state; 1320 1321 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n", 1322 connector->base.id, connector->name, 1323 connector_state, state); 1324 1325 if (connector_state->crtc) { 1326 struct drm_crtc_state *crtc_state; 1327 1328 crtc_state = drm_atomic_get_crtc_state(state, 1329 connector_state->crtc); 1330 if (IS_ERR(crtc_state)) 1331 return ERR_CAST(crtc_state); 1332 } 1333 1334 return connector_state; 1335 } 1336 EXPORT_SYMBOL(drm_atomic_get_connector_state); 1337 1338 /** 1339 * drm_atomic_connector_set_property - set property on connector. 1340 * @connector: the drm connector to set a property on 1341 * @state: the state object to update with the new property value 1342 * @property: the property to set 1343 * @val: the new property value 1344 * 1345 * This function handles generic/core properties and calls out to driver's 1346 * &drm_connector_funcs.atomic_set_property for driver properties. To ensure 1347 * consistent behavior you must call this function rather than the driver hook 1348 * directly. 1349 * 1350 * RETURNS: 1351 * Zero on success, error code on failure 1352 */ 1353 static int drm_atomic_connector_set_property(struct drm_connector *connector, 1354 struct drm_connector_state *state, struct drm_property *property, 1355 uint64_t val) 1356 { 1357 struct drm_device *dev = connector->dev; 1358 struct drm_mode_config *config = &dev->mode_config; 1359 1360 if (property == config->prop_crtc_id) { 1361 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); 1362 return drm_atomic_set_crtc_for_connector(state, crtc); 1363 } else if (property == config->dpms_property) { 1364 /* setting DPMS property requires special handling, which 1365 * is done in legacy setprop path for us. Disallow (for 1366 * now?) atomic writes to DPMS property: 1367 */ 1368 return -EINVAL; 1369 } else if (property == config->tv_select_subconnector_property) { 1370 state->tv.subconnector = val; 1371 } else if (property == config->tv_left_margin_property) { 1372 state->tv.margins.left = val; 1373 } else if (property == config->tv_right_margin_property) { 1374 state->tv.margins.right = val; 1375 } else if (property == config->tv_top_margin_property) { 1376 state->tv.margins.top = val; 1377 } else if (property == config->tv_bottom_margin_property) { 1378 state->tv.margins.bottom = val; 1379 } else if (property == config->tv_mode_property) { 1380 state->tv.mode = val; 1381 } else if (property == config->tv_brightness_property) { 1382 state->tv.brightness = val; 1383 } else if (property == config->tv_contrast_property) { 1384 state->tv.contrast = val; 1385 } else if (property == config->tv_flicker_reduction_property) { 1386 state->tv.flicker_reduction = val; 1387 } else if (property == config->tv_overscan_property) { 1388 state->tv.overscan = val; 1389 } else if (property == config->tv_saturation_property) { 1390 state->tv.saturation = val; 1391 } else if (property == config->tv_hue_property) { 1392 state->tv.hue = val; 1393 } else if (property == config->link_status_property) { 1394 /* Never downgrade from GOOD to BAD on userspace's request here, 1395 * only hw issues can do that. 1396 * 1397 * For an atomic property the userspace doesn't need to be able 1398 * to understand all the properties, but needs to be able to 1399 * restore the state it wants on VT switch. So if the userspace 1400 * tries to change the link_status from GOOD to BAD, driver 1401 * silently rejects it and returns a 0. This prevents userspace 1402 * from accidently breaking the display when it restores the 1403 * state. 1404 */ 1405 if (state->link_status != DRM_LINK_STATUS_GOOD) 1406 state->link_status = val; 1407 } else if (property == config->aspect_ratio_property) { 1408 state->picture_aspect_ratio = val; 1409 } else if (property == config->content_type_property) { 1410 state->content_type = val; 1411 } else if (property == connector->scaling_mode_property) { 1412 state->scaling_mode = val; 1413 } else if (property == connector->content_protection_property) { 1414 if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1415 DRM_DEBUG_KMS("only drivers can set CP Enabled\n"); 1416 return -EINVAL; 1417 } 1418 state->content_protection = val; 1419 } else if (property == config->writeback_fb_id_property) { 1420 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); 1421 int ret = drm_atomic_set_writeback_fb_for_connector(state, fb); 1422 if (fb) 1423 drm_framebuffer_put(fb); 1424 return ret; 1425 } else if (property == config->writeback_out_fence_ptr_property) { 1426 s32 __user *fence_ptr = u64_to_user_ptr(val); 1427 1428 return set_out_fence_for_connector(state->state, connector, 1429 fence_ptr); 1430 } else if (connector->funcs->atomic_set_property) { 1431 return connector->funcs->atomic_set_property(connector, 1432 state, property, val); 1433 } else { 1434 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n", 1435 connector->base.id, connector->name, 1436 property->base.id, property->name); 1437 return -EINVAL; 1438 } 1439 1440 return 0; 1441 } 1442 1443 static void drm_atomic_connector_print_state(struct drm_printer *p, 1444 const struct drm_connector_state *state) 1445 { 1446 struct drm_connector *connector = state->connector; 1447 1448 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); 1449 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 1450 1451 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 1452 if (state->writeback_job && state->writeback_job->fb) 1453 drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id); 1454 1455 if (connector->funcs->atomic_print_state) 1456 connector->funcs->atomic_print_state(p, state); 1457 } 1458 1459 /** 1460 * drm_atomic_connector_get_property - get property value from connector state 1461 * @connector: the drm connector to set a property on 1462 * @state: the state object to get the property value from 1463 * @property: the property to set 1464 * @val: return location for the property value 1465 * 1466 * This function handles generic/core properties and calls out to driver's 1467 * &drm_connector_funcs.atomic_get_property for driver properties. To ensure 1468 * consistent behavior you must call this function rather than the driver hook 1469 * directly. 1470 * 1471 * RETURNS: 1472 * Zero on success, error code on failure 1473 */ 1474 static int 1475 drm_atomic_connector_get_property(struct drm_connector *connector, 1476 const struct drm_connector_state *state, 1477 struct drm_property *property, uint64_t *val) 1478 { 1479 struct drm_device *dev = connector->dev; 1480 struct drm_mode_config *config = &dev->mode_config; 1481 1482 if (property == config->prop_crtc_id) { 1483 *val = (state->crtc) ? state->crtc->base.id : 0; 1484 } else if (property == config->dpms_property) { 1485 *val = connector->dpms; 1486 } else if (property == config->tv_select_subconnector_property) { 1487 *val = state->tv.subconnector; 1488 } else if (property == config->tv_left_margin_property) { 1489 *val = state->tv.margins.left; 1490 } else if (property == config->tv_right_margin_property) { 1491 *val = state->tv.margins.right; 1492 } else if (property == config->tv_top_margin_property) { 1493 *val = state->tv.margins.top; 1494 } else if (property == config->tv_bottom_margin_property) { 1495 *val = state->tv.margins.bottom; 1496 } else if (property == config->tv_mode_property) { 1497 *val = state->tv.mode; 1498 } else if (property == config->tv_brightness_property) { 1499 *val = state->tv.brightness; 1500 } else if (property == config->tv_contrast_property) { 1501 *val = state->tv.contrast; 1502 } else if (property == config->tv_flicker_reduction_property) { 1503 *val = state->tv.flicker_reduction; 1504 } else if (property == config->tv_overscan_property) { 1505 *val = state->tv.overscan; 1506 } else if (property == config->tv_saturation_property) { 1507 *val = state->tv.saturation; 1508 } else if (property == config->tv_hue_property) { 1509 *val = state->tv.hue; 1510 } else if (property == config->link_status_property) { 1511 *val = state->link_status; 1512 } else if (property == config->aspect_ratio_property) { 1513 *val = state->picture_aspect_ratio; 1514 } else if (property == config->content_type_property) { 1515 *val = state->content_type; 1516 } else if (property == connector->scaling_mode_property) { 1517 *val = state->scaling_mode; 1518 } else if (property == connector->content_protection_property) { 1519 *val = state->content_protection; 1520 } else if (property == config->writeback_fb_id_property) { 1521 /* Writeback framebuffer is one-shot, write and forget */ 1522 *val = 0; 1523 } else if (property == config->writeback_out_fence_ptr_property) { 1524 *val = 0; 1525 } else if (connector->funcs->atomic_get_property) { 1526 return connector->funcs->atomic_get_property(connector, 1527 state, property, val); 1528 } else { 1529 return -EINVAL; 1530 } 1531 1532 return 0; 1533 } 1534 1535 int drm_atomic_get_property(struct drm_mode_object *obj, 1536 struct drm_property *property, uint64_t *val) 1537 { 1538 struct drm_device *dev = property->dev; 1539 int ret; 1540 1541 switch (obj->type) { 1542 case DRM_MODE_OBJECT_CONNECTOR: { 1543 struct drm_connector *connector = obj_to_connector(obj); 1544 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 1545 ret = drm_atomic_connector_get_property(connector, 1546 connector->state, property, val); 1547 break; 1548 } 1549 case DRM_MODE_OBJECT_CRTC: { 1550 struct drm_crtc *crtc = obj_to_crtc(obj); 1551 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 1552 ret = drm_atomic_crtc_get_property(crtc, 1553 crtc->state, property, val); 1554 break; 1555 } 1556 case DRM_MODE_OBJECT_PLANE: { 1557 struct drm_plane *plane = obj_to_plane(obj); 1558 WARN_ON(!drm_modeset_is_locked(&plane->mutex)); 1559 ret = drm_atomic_plane_get_property(plane, 1560 plane->state, property, val); 1561 break; 1562 } 1563 default: 1564 ret = -EINVAL; 1565 break; 1566 } 1567 1568 return ret; 1569 } 1570 1571 /** 1572 * drm_atomic_set_crtc_for_plane - set crtc for plane 1573 * @plane_state: the plane whose incoming state to update 1574 * @crtc: crtc to use for the plane 1575 * 1576 * Changing the assigned crtc for a plane requires us to grab the lock and state 1577 * for the new crtc, as needed. This function takes care of all these details 1578 * besides updating the pointer in the state object itself. 1579 * 1580 * Returns: 1581 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1582 * then the w/w mutex code has detected a deadlock and the entire atomic 1583 * sequence must be restarted. All other errors are fatal. 1584 */ 1585 int 1586 drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, 1587 struct drm_crtc *crtc) 1588 { 1589 struct drm_plane *plane = plane_state->plane; 1590 struct drm_crtc_state *crtc_state; 1591 /* Nothing to do for same crtc*/ 1592 if (plane_state->crtc == crtc) 1593 return 0; 1594 if (plane_state->crtc) { 1595 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1596 plane_state->crtc); 1597 if (WARN_ON(IS_ERR(crtc_state))) 1598 return PTR_ERR(crtc_state); 1599 1600 crtc_state->plane_mask &= ~drm_plane_mask(plane); 1601 } 1602 1603 plane_state->crtc = crtc; 1604 1605 if (crtc) { 1606 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1607 crtc); 1608 if (IS_ERR(crtc_state)) 1609 return PTR_ERR(crtc_state); 1610 crtc_state->plane_mask |= drm_plane_mask(plane); 1611 } 1612 1613 if (crtc) 1614 DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n", 1615 plane->base.id, plane->name, plane_state, 1616 crtc->base.id, crtc->name); 1617 else 1618 DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n", 1619 plane->base.id, plane->name, plane_state); 1620 1621 return 0; 1622 } 1623 EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane); 1624 1625 /** 1626 * drm_atomic_set_fb_for_plane - set framebuffer for plane 1627 * @plane_state: atomic state object for the plane 1628 * @fb: fb to use for the plane 1629 * 1630 * Changing the assigned framebuffer for a plane requires us to grab a reference 1631 * to the new fb and drop the reference to the old fb, if there is one. This 1632 * function takes care of all these details besides updating the pointer in the 1633 * state object itself. 1634 */ 1635 void 1636 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, 1637 struct drm_framebuffer *fb) 1638 { 1639 struct drm_plane *plane = plane_state->plane; 1640 1641 if (fb) 1642 DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n", 1643 fb->base.id, plane->base.id, plane->name, 1644 plane_state); 1645 else 1646 DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n", 1647 plane->base.id, plane->name, plane_state); 1648 1649 drm_framebuffer_assign(&plane_state->fb, fb); 1650 } 1651 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); 1652 1653 /** 1654 * drm_atomic_set_fence_for_plane - set fence for plane 1655 * @plane_state: atomic state object for the plane 1656 * @fence: dma_fence to use for the plane 1657 * 1658 * Helper to setup the plane_state fence in case it is not set yet. 1659 * By using this drivers doesn't need to worry if the user choose 1660 * implicit or explicit fencing. 1661 * 1662 * This function will not set the fence to the state if it was set 1663 * via explicit fencing interfaces on the atomic ioctl. In that case it will 1664 * drop the reference to the fence as we are not storing it anywhere. 1665 * Otherwise, if &drm_plane_state.fence is not set this function we just set it 1666 * with the received implicit fence. In both cases this function consumes a 1667 * reference for @fence. 1668 * 1669 * This way explicit fencing can be used to overrule implicit fencing, which is 1670 * important to make explicit fencing use-cases work: One example is using one 1671 * buffer for 2 screens with different refresh rates. Implicit fencing will 1672 * clamp rendering to the refresh rate of the slower screen, whereas explicit 1673 * fence allows 2 independent render and display loops on a single buffer. If a 1674 * driver allows obeys both implicit and explicit fences for plane updates, then 1675 * it will break all the benefits of explicit fencing. 1676 */ 1677 void 1678 drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, 1679 struct dma_fence *fence) 1680 { 1681 if (plane_state->fence) { 1682 dma_fence_put(fence); 1683 return; 1684 } 1685 1686 plane_state->fence = fence; 1687 } 1688 EXPORT_SYMBOL(drm_atomic_set_fence_for_plane); 1689 1690 /** 1691 * drm_atomic_set_crtc_for_connector - set crtc for connector 1692 * @conn_state: atomic state object for the connector 1693 * @crtc: crtc to use for the connector 1694 * 1695 * Changing the assigned crtc for a connector requires us to grab the lock and 1696 * state for the new crtc, as needed. This function takes care of all these 1697 * details besides updating the pointer in the state object itself. 1698 * 1699 * Returns: 1700 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1701 * then the w/w mutex code has detected a deadlock and the entire atomic 1702 * sequence must be restarted. All other errors are fatal. 1703 */ 1704 int 1705 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, 1706 struct drm_crtc *crtc) 1707 { 1708 struct drm_connector *connector = conn_state->connector; 1709 struct drm_crtc_state *crtc_state; 1710 1711 if (conn_state->crtc == crtc) 1712 return 0; 1713 1714 if (conn_state->crtc) { 1715 crtc_state = drm_atomic_get_new_crtc_state(conn_state->state, 1716 conn_state->crtc); 1717 1718 crtc_state->connector_mask &= 1719 ~drm_connector_mask(conn_state->connector); 1720 1721 drm_connector_put(conn_state->connector); 1722 conn_state->crtc = NULL; 1723 } 1724 1725 if (crtc) { 1726 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); 1727 if (IS_ERR(crtc_state)) 1728 return PTR_ERR(crtc_state); 1729 1730 crtc_state->connector_mask |= 1731 drm_connector_mask(conn_state->connector); 1732 1733 drm_connector_get(conn_state->connector); 1734 conn_state->crtc = crtc; 1735 1736 DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n", 1737 connector->base.id, connector->name, 1738 conn_state, crtc->base.id, crtc->name); 1739 } else { 1740 DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n", 1741 connector->base.id, connector->name, 1742 conn_state); 1743 } 1744 1745 return 0; 1746 } 1747 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); 1748 1749 /* 1750 * drm_atomic_get_writeback_job - return or allocate a writeback job 1751 * @conn_state: Connector state to get the job for 1752 * 1753 * Writeback jobs have a different lifetime to the atomic state they are 1754 * associated with. This convenience function takes care of allocating a job 1755 * if there isn't yet one associated with the connector state, otherwise 1756 * it just returns the existing job. 1757 * 1758 * Returns: The writeback job for the given connector state 1759 */ 1760 static struct drm_writeback_job * 1761 drm_atomic_get_writeback_job(struct drm_connector_state *conn_state) 1762 { 1763 WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); 1764 1765 if (!conn_state->writeback_job) 1766 conn_state->writeback_job = 1767 kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL); 1768 1769 return conn_state->writeback_job; 1770 } 1771 1772 /** 1773 * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer 1774 * @conn_state: atomic state object for the connector 1775 * @fb: fb to use for the connector 1776 * 1777 * This is used to set the framebuffer for a writeback connector, which outputs 1778 * to a buffer instead of an actual physical connector. 1779 * Changing the assigned framebuffer requires us to grab a reference to the new 1780 * fb and drop the reference to the old fb, if there is one. This function 1781 * takes care of all these details besides updating the pointer in the 1782 * state object itself. 1783 * 1784 * Note: The only way conn_state can already have an fb set is if the commit 1785 * sets the property more than once. 1786 * 1787 * See also: drm_writeback_connector_init() 1788 * 1789 * Returns: 0 on success 1790 */ 1791 int drm_atomic_set_writeback_fb_for_connector( 1792 struct drm_connector_state *conn_state, 1793 struct drm_framebuffer *fb) 1794 { 1795 struct drm_writeback_job *job = 1796 drm_atomic_get_writeback_job(conn_state); 1797 if (!job) 1798 return -ENOMEM; 1799 1800 drm_framebuffer_assign(&job->fb, fb); 1801 1802 if (fb) 1803 DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n", 1804 fb->base.id, conn_state); 1805 else 1806 DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n", 1807 conn_state); 1808 1809 return 0; 1810 } 1811 EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector); 1812 1813 /** 1814 * drm_atomic_add_affected_connectors - add connectors for crtc 1815 * @state: atomic state 1816 * @crtc: DRM crtc 1817 * 1818 * This function walks the current configuration and adds all connectors 1819 * currently using @crtc to the atomic configuration @state. Note that this 1820 * function must acquire the connection mutex. This can potentially cause 1821 * unneeded seralization if the update is just for the planes on one crtc. Hence 1822 * drivers and helpers should only call this when really needed (e.g. when a 1823 * full modeset needs to happen due to some change). 1824 * 1825 * Returns: 1826 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1827 * then the w/w mutex code has detected a deadlock and the entire atomic 1828 * sequence must be restarted. All other errors are fatal. 1829 */ 1830 int 1831 drm_atomic_add_affected_connectors(struct drm_atomic_state *state, 1832 struct drm_crtc *crtc) 1833 { 1834 struct drm_mode_config *config = &state->dev->mode_config; 1835 struct drm_connector *connector; 1836 struct drm_connector_state *conn_state; 1837 struct drm_connector_list_iter conn_iter; 1838 struct drm_crtc_state *crtc_state; 1839 int ret; 1840 1841 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1842 if (IS_ERR(crtc_state)) 1843 return PTR_ERR(crtc_state); 1844 1845 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1846 if (ret) 1847 return ret; 1848 1849 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", 1850 crtc->base.id, crtc->name, state); 1851 1852 /* 1853 * Changed connectors are already in @state, so only need to look 1854 * at the connector_mask in crtc_state. 1855 */ 1856 drm_connector_list_iter_begin(state->dev, &conn_iter); 1857 drm_for_each_connector_iter(connector, &conn_iter) { 1858 if (!(crtc_state->connector_mask & drm_connector_mask(connector))) 1859 continue; 1860 1861 conn_state = drm_atomic_get_connector_state(state, connector); 1862 if (IS_ERR(conn_state)) { 1863 drm_connector_list_iter_end(&conn_iter); 1864 return PTR_ERR(conn_state); 1865 } 1866 } 1867 drm_connector_list_iter_end(&conn_iter); 1868 1869 return 0; 1870 } 1871 EXPORT_SYMBOL(drm_atomic_add_affected_connectors); 1872 1873 /** 1874 * drm_atomic_add_affected_planes - add planes for crtc 1875 * @state: atomic state 1876 * @crtc: DRM crtc 1877 * 1878 * This function walks the current configuration and adds all planes 1879 * currently used by @crtc to the atomic configuration @state. This is useful 1880 * when an atomic commit also needs to check all currently enabled plane on 1881 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC 1882 * to avoid special code to force-enable all planes. 1883 * 1884 * Since acquiring a plane state will always also acquire the w/w mutex of the 1885 * current CRTC for that plane (if there is any) adding all the plane states for 1886 * a CRTC will not reduce parallism of atomic updates. 1887 * 1888 * Returns: 1889 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1890 * then the w/w mutex code has detected a deadlock and the entire atomic 1891 * sequence must be restarted. All other errors are fatal. 1892 */ 1893 int 1894 drm_atomic_add_affected_planes(struct drm_atomic_state *state, 1895 struct drm_crtc *crtc) 1896 { 1897 struct drm_plane *plane; 1898 1899 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); 1900 1901 DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n", 1902 crtc->base.id, crtc->name, state); 1903 1904 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 1905 struct drm_plane_state *plane_state = 1906 drm_atomic_get_plane_state(state, plane); 1907 1908 if (IS_ERR(plane_state)) 1909 return PTR_ERR(plane_state); 1910 } 1911 return 0; 1912 } 1913 EXPORT_SYMBOL(drm_atomic_add_affected_planes); 1914 1915 /** 1916 * drm_atomic_check_only - check whether a given config would work 1917 * @state: atomic configuration to check 1918 * 1919 * Note that this function can return -EDEADLK if the driver needed to acquire 1920 * more locks but encountered a deadlock. The caller must then do the usual w/w 1921 * backoff dance and restart. All other errors are fatal. 1922 * 1923 * Returns: 1924 * 0 on success, negative error code on failure. 1925 */ 1926 int drm_atomic_check_only(struct drm_atomic_state *state) 1927 { 1928 struct drm_device *dev = state->dev; 1929 struct drm_mode_config *config = &dev->mode_config; 1930 struct drm_plane *plane; 1931 struct drm_plane_state *plane_state; 1932 struct drm_crtc *crtc; 1933 struct drm_crtc_state *crtc_state; 1934 struct drm_connector *conn; 1935 struct drm_connector_state *conn_state; 1936 int i, ret = 0; 1937 1938 DRM_DEBUG_ATOMIC("checking %p\n", state); 1939 1940 for_each_new_plane_in_state(state, plane, plane_state, i) { 1941 ret = drm_atomic_plane_check(plane, plane_state); 1942 if (ret) { 1943 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", 1944 plane->base.id, plane->name); 1945 return ret; 1946 } 1947 } 1948 1949 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1950 ret = drm_atomic_crtc_check(crtc, crtc_state); 1951 if (ret) { 1952 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", 1953 crtc->base.id, crtc->name); 1954 return ret; 1955 } 1956 } 1957 1958 for_each_new_connector_in_state(state, conn, conn_state, i) { 1959 ret = drm_atomic_connector_check(conn, conn_state); 1960 if (ret) { 1961 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n", 1962 conn->base.id, conn->name); 1963 return ret; 1964 } 1965 } 1966 1967 if (config->funcs->atomic_check) { 1968 ret = config->funcs->atomic_check(state->dev, state); 1969 1970 if (ret) { 1971 DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n", 1972 state, ret); 1973 return ret; 1974 } 1975 } 1976 1977 if (!state->allow_modeset) { 1978 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1979 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 1980 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", 1981 crtc->base.id, crtc->name); 1982 return -EINVAL; 1983 } 1984 } 1985 } 1986 1987 return 0; 1988 } 1989 EXPORT_SYMBOL(drm_atomic_check_only); 1990 1991 /** 1992 * drm_atomic_commit - commit configuration atomically 1993 * @state: atomic configuration to check 1994 * 1995 * Note that this function can return -EDEADLK if the driver needed to acquire 1996 * more locks but encountered a deadlock. The caller must then do the usual w/w 1997 * backoff dance and restart. All other errors are fatal. 1998 * 1999 * This function will take its own reference on @state. 2000 * Callers should always release their reference with drm_atomic_state_put(). 2001 * 2002 * Returns: 2003 * 0 on success, negative error code on failure. 2004 */ 2005 int drm_atomic_commit(struct drm_atomic_state *state) 2006 { 2007 struct drm_mode_config *config = &state->dev->mode_config; 2008 int ret; 2009 2010 ret = drm_atomic_check_only(state); 2011 if (ret) 2012 return ret; 2013 2014 DRM_DEBUG_ATOMIC("committing %p\n", state); 2015 2016 return config->funcs->atomic_commit(state->dev, state, false); 2017 } 2018 EXPORT_SYMBOL(drm_atomic_commit); 2019 2020 /** 2021 * drm_atomic_nonblocking_commit - atomic nonblocking commit 2022 * @state: atomic configuration to check 2023 * 2024 * Note that this function can return -EDEADLK if the driver needed to acquire 2025 * more locks but encountered a deadlock. The caller must then do the usual w/w 2026 * backoff dance and restart. All other errors are fatal. 2027 * 2028 * This function will take its own reference on @state. 2029 * Callers should always release their reference with drm_atomic_state_put(). 2030 * 2031 * Returns: 2032 * 0 on success, negative error code on failure. 2033 */ 2034 int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) 2035 { 2036 struct drm_mode_config *config = &state->dev->mode_config; 2037 int ret; 2038 2039 ret = drm_atomic_check_only(state); 2040 if (ret) 2041 return ret; 2042 2043 DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state); 2044 2045 return config->funcs->atomic_commit(state->dev, state, true); 2046 } 2047 EXPORT_SYMBOL(drm_atomic_nonblocking_commit); 2048 2049 static void drm_atomic_print_state(const struct drm_atomic_state *state) 2050 { 2051 struct drm_printer p = drm_info_printer(state->dev->dev); 2052 struct drm_plane *plane; 2053 struct drm_plane_state *plane_state; 2054 struct drm_crtc *crtc; 2055 struct drm_crtc_state *crtc_state; 2056 struct drm_connector *connector; 2057 struct drm_connector_state *connector_state; 2058 int i; 2059 2060 DRM_DEBUG_ATOMIC("checking %p\n", state); 2061 2062 for_each_new_plane_in_state(state, plane, plane_state, i) 2063 drm_atomic_plane_print_state(&p, plane_state); 2064 2065 for_each_new_crtc_in_state(state, crtc, crtc_state, i) 2066 drm_atomic_crtc_print_state(&p, crtc_state); 2067 2068 for_each_new_connector_in_state(state, connector, connector_state, i) 2069 drm_atomic_connector_print_state(&p, connector_state); 2070 } 2071 2072 static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, 2073 bool take_locks) 2074 { 2075 struct drm_mode_config *config = &dev->mode_config; 2076 struct drm_plane *plane; 2077 struct drm_crtc *crtc; 2078 struct drm_connector *connector; 2079 struct drm_connector_list_iter conn_iter; 2080 2081 if (!drm_drv_uses_atomic_modeset(dev)) 2082 return; 2083 2084 list_for_each_entry(plane, &config->plane_list, head) { 2085 if (take_locks) 2086 drm_modeset_lock(&plane->mutex, NULL); 2087 drm_atomic_plane_print_state(p, plane->state); 2088 if (take_locks) 2089 drm_modeset_unlock(&plane->mutex); 2090 } 2091 2092 list_for_each_entry(crtc, &config->crtc_list, head) { 2093 if (take_locks) 2094 drm_modeset_lock(&crtc->mutex, NULL); 2095 drm_atomic_crtc_print_state(p, crtc->state); 2096 if (take_locks) 2097 drm_modeset_unlock(&crtc->mutex); 2098 } 2099 2100 drm_connector_list_iter_begin(dev, &conn_iter); 2101 if (take_locks) 2102 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 2103 drm_for_each_connector_iter(connector, &conn_iter) 2104 drm_atomic_connector_print_state(p, connector->state); 2105 if (take_locks) 2106 drm_modeset_unlock(&dev->mode_config.connection_mutex); 2107 drm_connector_list_iter_end(&conn_iter); 2108 } 2109 2110 /** 2111 * drm_state_dump - dump entire device atomic state 2112 * @dev: the drm device 2113 * @p: where to print the state to 2114 * 2115 * Just for debugging. Drivers might want an option to dump state 2116 * to dmesg in case of error irq's. (Hint, you probably want to 2117 * ratelimit this!) 2118 * 2119 * The caller must drm_modeset_lock_all(), or if this is called 2120 * from error irq handler, it should not be enabled by default. 2121 * (Ie. if you are debugging errors you might not care that this 2122 * is racey. But calling this without all modeset locks held is 2123 * not inherently safe.) 2124 */ 2125 void drm_state_dump(struct drm_device *dev, struct drm_printer *p) 2126 { 2127 __drm_state_dump(dev, p, false); 2128 } 2129 EXPORT_SYMBOL(drm_state_dump); 2130 2131 #ifdef CONFIG_DEBUG_FS 2132 static int drm_state_info(struct seq_file *m, void *data) 2133 { 2134 struct drm_info_node *node = (struct drm_info_node *) m->private; 2135 struct drm_device *dev = node->minor->dev; 2136 struct drm_printer p = drm_seq_file_printer(m); 2137 2138 __drm_state_dump(dev, &p, true); 2139 2140 return 0; 2141 } 2142 2143 /* any use in debugfs files to dump individual planes/crtc/etc? */ 2144 static const struct drm_info_list drm_atomic_debugfs_list[] = { 2145 {"state", drm_state_info, 0}, 2146 }; 2147 2148 int drm_atomic_debugfs_init(struct drm_minor *minor) 2149 { 2150 return drm_debugfs_create_files(drm_atomic_debugfs_list, 2151 ARRAY_SIZE(drm_atomic_debugfs_list), 2152 minor->debugfs_root, minor); 2153 } 2154 #endif 2155 2156 /* 2157 * The big monster ioctl 2158 */ 2159 2160 static struct drm_pending_vblank_event *create_vblank_event( 2161 struct drm_crtc *crtc, uint64_t user_data) 2162 { 2163 struct drm_pending_vblank_event *e = NULL; 2164 2165 e = kzalloc(sizeof *e, GFP_KERNEL); 2166 if (!e) 2167 return NULL; 2168 2169 e->event.base.type = DRM_EVENT_FLIP_COMPLETE; 2170 e->event.base.length = sizeof(e->event); 2171 e->event.vbl.crtc_id = crtc->base.id; 2172 e->event.vbl.user_data = user_data; 2173 2174 return e; 2175 } 2176 2177 int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, 2178 struct drm_connector *connector, 2179 int mode) 2180 { 2181 struct drm_connector *tmp_connector; 2182 struct drm_connector_state *new_conn_state; 2183 struct drm_crtc *crtc; 2184 struct drm_crtc_state *crtc_state; 2185 int i, ret, old_mode = connector->dpms; 2186 bool active = false; 2187 2188 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, 2189 state->acquire_ctx); 2190 if (ret) 2191 return ret; 2192 2193 if (mode != DRM_MODE_DPMS_ON) 2194 mode = DRM_MODE_DPMS_OFF; 2195 connector->dpms = mode; 2196 2197 crtc = connector->state->crtc; 2198 if (!crtc) 2199 goto out; 2200 ret = drm_atomic_add_affected_connectors(state, crtc); 2201 if (ret) 2202 goto out; 2203 2204 crtc_state = drm_atomic_get_crtc_state(state, crtc); 2205 if (IS_ERR(crtc_state)) { 2206 ret = PTR_ERR(crtc_state); 2207 goto out; 2208 } 2209 2210 for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) { 2211 if (new_conn_state->crtc != crtc) 2212 continue; 2213 if (tmp_connector->dpms == DRM_MODE_DPMS_ON) { 2214 active = true; 2215 break; 2216 } 2217 } 2218 2219 crtc_state->active = active; 2220 ret = drm_atomic_commit(state); 2221 out: 2222 if (ret != 0) 2223 connector->dpms = old_mode; 2224 return ret; 2225 } 2226 2227 int drm_atomic_set_property(struct drm_atomic_state *state, 2228 struct drm_mode_object *obj, 2229 struct drm_property *prop, 2230 uint64_t prop_value) 2231 { 2232 struct drm_mode_object *ref; 2233 int ret; 2234 2235 if (!drm_property_change_valid_get(prop, prop_value, &ref)) 2236 return -EINVAL; 2237 2238 switch (obj->type) { 2239 case DRM_MODE_OBJECT_CONNECTOR: { 2240 struct drm_connector *connector = obj_to_connector(obj); 2241 struct drm_connector_state *connector_state; 2242 2243 connector_state = drm_atomic_get_connector_state(state, connector); 2244 if (IS_ERR(connector_state)) { 2245 ret = PTR_ERR(connector_state); 2246 break; 2247 } 2248 2249 ret = drm_atomic_connector_set_property(connector, 2250 connector_state, prop, prop_value); 2251 break; 2252 } 2253 case DRM_MODE_OBJECT_CRTC: { 2254 struct drm_crtc *crtc = obj_to_crtc(obj); 2255 struct drm_crtc_state *crtc_state; 2256 2257 crtc_state = drm_atomic_get_crtc_state(state, crtc); 2258 if (IS_ERR(crtc_state)) { 2259 ret = PTR_ERR(crtc_state); 2260 break; 2261 } 2262 2263 ret = drm_atomic_crtc_set_property(crtc, 2264 crtc_state, prop, prop_value); 2265 break; 2266 } 2267 case DRM_MODE_OBJECT_PLANE: { 2268 struct drm_plane *plane = obj_to_plane(obj); 2269 struct drm_plane_state *plane_state; 2270 2271 plane_state = drm_atomic_get_plane_state(state, plane); 2272 if (IS_ERR(plane_state)) { 2273 ret = PTR_ERR(plane_state); 2274 break; 2275 } 2276 2277 ret = drm_atomic_plane_set_property(plane, 2278 plane_state, prop, prop_value); 2279 break; 2280 } 2281 default: 2282 ret = -EINVAL; 2283 break; 2284 } 2285 2286 drm_property_change_valid_put(prop, ref); 2287 return ret; 2288 } 2289 2290 /** 2291 * DOC: explicit fencing properties 2292 * 2293 * Explicit fencing allows userspace to control the buffer synchronization 2294 * between devices. A Fence or a group of fences are transfered to/from 2295 * userspace using Sync File fds and there are two DRM properties for that. 2296 * IN_FENCE_FD on each DRM Plane to send fences to the kernel and 2297 * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel. 2298 * 2299 * As a contrast, with implicit fencing the kernel keeps track of any 2300 * ongoing rendering, and automatically ensures that the atomic update waits 2301 * for any pending rendering to complete. For shared buffers represented with 2302 * a &struct dma_buf this is tracked in &struct reservation_object. 2303 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), 2304 * whereas explicit fencing is what Android wants. 2305 * 2306 * "IN_FENCE_FD”: 2307 * Use this property to pass a fence that DRM should wait on before 2308 * proceeding with the Atomic Commit request and show the framebuffer for 2309 * the plane on the screen. The fence can be either a normal fence or a 2310 * merged one, the sync_file framework will handle both cases and use a 2311 * fence_array if a merged fence is received. Passing -1 here means no 2312 * fences to wait on. 2313 * 2314 * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag 2315 * it will only check if the Sync File is a valid one. 2316 * 2317 * On the driver side the fence is stored on the @fence parameter of 2318 * &struct drm_plane_state. Drivers which also support implicit fencing 2319 * should set the implicit fence using drm_atomic_set_fence_for_plane(), 2320 * to make sure there's consistent behaviour between drivers in precedence 2321 * of implicit vs. explicit fencing. 2322 * 2323 * "OUT_FENCE_PTR”: 2324 * Use this property to pass a file descriptor pointer to DRM. Once the 2325 * Atomic Commit request call returns OUT_FENCE_PTR will be filled with 2326 * the file descriptor number of a Sync File. This Sync File contains the 2327 * CRTC fence that will be signaled when all framebuffers present on the 2328 * Atomic Commit * request for that given CRTC are scanned out on the 2329 * screen. 2330 * 2331 * The Atomic Commit request fails if a invalid pointer is passed. If the 2332 * Atomic Commit request fails for any other reason the out fence fd 2333 * returned will be -1. On a Atomic Commit with the 2334 * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1. 2335 * 2336 * Note that out-fences don't have a special interface to drivers and are 2337 * internally represented by a &struct drm_pending_vblank_event in struct 2338 * &drm_crtc_state, which is also used by the nonblocking atomic commit 2339 * helpers and for the DRM event handling for existing userspace. 2340 */ 2341 2342 struct drm_out_fence_state { 2343 s32 __user *out_fence_ptr; 2344 struct sync_file *sync_file; 2345 int fd; 2346 }; 2347 2348 static int setup_out_fence(struct drm_out_fence_state *fence_state, 2349 struct dma_fence *fence) 2350 { 2351 fence_state->fd = get_unused_fd_flags(O_CLOEXEC); 2352 if (fence_state->fd < 0) 2353 return fence_state->fd; 2354 2355 if (put_user(fence_state->fd, fence_state->out_fence_ptr)) 2356 return -EFAULT; 2357 2358 fence_state->sync_file = sync_file_create(fence); 2359 if (!fence_state->sync_file) 2360 return -ENOMEM; 2361 2362 return 0; 2363 } 2364 2365 static int prepare_signaling(struct drm_device *dev, 2366 struct drm_atomic_state *state, 2367 struct drm_mode_atomic *arg, 2368 struct drm_file *file_priv, 2369 struct drm_out_fence_state **fence_state, 2370 unsigned int *num_fences) 2371 { 2372 struct drm_crtc *crtc; 2373 struct drm_crtc_state *crtc_state; 2374 struct drm_connector *conn; 2375 struct drm_connector_state *conn_state; 2376 int i, c = 0, ret; 2377 2378 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) 2379 return 0; 2380 2381 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2382 s32 __user *fence_ptr; 2383 2384 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); 2385 2386 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) { 2387 struct drm_pending_vblank_event *e; 2388 2389 e = create_vblank_event(crtc, arg->user_data); 2390 if (!e) 2391 return -ENOMEM; 2392 2393 crtc_state->event = e; 2394 } 2395 2396 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 2397 struct drm_pending_vblank_event *e = crtc_state->event; 2398 2399 if (!file_priv) 2400 continue; 2401 2402 ret = drm_event_reserve_init(dev, file_priv, &e->base, 2403 &e->event.base); 2404 if (ret) { 2405 kfree(e); 2406 crtc_state->event = NULL; 2407 return ret; 2408 } 2409 } 2410 2411 if (fence_ptr) { 2412 struct dma_fence *fence; 2413 struct drm_out_fence_state *f; 2414 2415 f = kmalloc(sizeof(**fence_state) * 2416 (*num_fences + 1), GFP_KERNEL); 2417 if (!f) 2418 return -ENOMEM; 2419 memcpy(f, *fence_state, 2420 sizeof(**fence_state) * (*num_fences)); 2421 kfree(*fence_state); 2422 2423 memset(&f[*num_fences], 0, sizeof(*f)); 2424 2425 f[*num_fences].out_fence_ptr = fence_ptr; 2426 *fence_state = f; 2427 2428 fence = drm_crtc_create_fence(crtc); 2429 if (!fence) 2430 return -ENOMEM; 2431 2432 ret = setup_out_fence(&f[(*num_fences)++], fence); 2433 if (ret) { 2434 dma_fence_put(fence); 2435 return ret; 2436 } 2437 2438 crtc_state->event->base.fence = fence; 2439 } 2440 2441 c++; 2442 } 2443 2444 for_each_new_connector_in_state(state, conn, conn_state, i) { 2445 struct drm_writeback_connector *wb_conn; 2446 struct drm_writeback_job *job; 2447 struct drm_out_fence_state *f; 2448 struct dma_fence *fence; 2449 s32 __user *fence_ptr; 2450 2451 fence_ptr = get_out_fence_for_connector(state, conn); 2452 if (!fence_ptr) 2453 continue; 2454 2455 job = drm_atomic_get_writeback_job(conn_state); 2456 if (!job) 2457 return -ENOMEM; 2458 2459 f = kmalloc(sizeof(**fence_state) * 2460 (*num_fences + 1), GFP_KERNEL); 2461 if (!f) 2462 return -ENOMEM; 2463 memcpy(f, *fence_state, 2464 sizeof(**fence_state) * (*num_fences)); 2465 kfree(*fence_state); 2466 2467 memset(&f[*num_fences], 0, sizeof(*f)); 2468 2469 f[*num_fences].out_fence_ptr = fence_ptr; 2470 *fence_state = f; 2471 2472 wb_conn = drm_connector_to_writeback(conn); 2473 fence = drm_writeback_get_out_fence(wb_conn); 2474 if (!fence) 2475 return -ENOMEM; 2476 2477 ret = setup_out_fence(&f[(*num_fences)++], fence); 2478 if (ret) { 2479 dma_fence_put(fence); 2480 return ret; 2481 } 2482 2483 job->out_fence = fence; 2484 } 2485 2486 /* 2487 * Having this flag means user mode pends on event which will never 2488 * reach due to lack of at least one CRTC for signaling 2489 */ 2490 if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 2491 return -EINVAL; 2492 2493 return 0; 2494 } 2495 2496 static void complete_signaling(struct drm_device *dev, 2497 struct drm_atomic_state *state, 2498 struct drm_out_fence_state *fence_state, 2499 unsigned int num_fences, 2500 bool install_fds) 2501 { 2502 struct drm_crtc *crtc; 2503 struct drm_crtc_state *crtc_state; 2504 int i; 2505 2506 if (install_fds) { 2507 #ifdef __linux__ 2508 for (i = 0; i < num_fences; i++) 2509 fd_install(fence_state[i].fd, 2510 fence_state[i].sync_file->file); 2511 #endif 2512 2513 kfree(fence_state); 2514 return; 2515 } 2516 2517 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2518 struct drm_pending_vblank_event *event = crtc_state->event; 2519 /* 2520 * Free the allocated event. drm_atomic_helper_setup_commit 2521 * can allocate an event too, so only free it if it's ours 2522 * to prevent a double free in drm_atomic_state_clear. 2523 */ 2524 if (event && (event->base.fence || event->base.file_priv)) { 2525 drm_event_cancel_free(dev, &event->base); 2526 crtc_state->event = NULL; 2527 } 2528 } 2529 2530 if (!fence_state) 2531 return; 2532 2533 for (i = 0; i < num_fences; i++) { 2534 #ifdef __linux__ 2535 if (fence_state[i].sync_file) 2536 fput(fence_state[i].sync_file->file); 2537 if (fence_state[i].fd >= 0) 2538 put_unused_fd(fence_state[i].fd); 2539 #endif 2540 2541 /* If this fails log error to the user */ 2542 if (fence_state[i].out_fence_ptr && 2543 put_user(-1, fence_state[i].out_fence_ptr)) 2544 DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n"); 2545 } 2546 2547 kfree(fence_state); 2548 } 2549 2550 int drm_mode_atomic_ioctl(struct drm_device *dev, 2551 void *data, struct drm_file *file_priv) 2552 { 2553 struct drm_mode_atomic *arg = data; 2554 uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr); 2555 uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr); 2556 uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr); 2557 uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr); 2558 unsigned int copied_objs, copied_props; 2559 struct drm_atomic_state *state; 2560 struct drm_modeset_acquire_ctx ctx; 2561 struct drm_out_fence_state *fence_state; 2562 int ret = 0; 2563 unsigned int i, j, num_fences; 2564 2565 /* disallow for drivers not supporting atomic: */ 2566 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 2567 return -EINVAL; 2568 2569 /* disallow for userspace that has not enabled atomic cap (even 2570 * though this may be a bit overkill, since legacy userspace 2571 * wouldn't know how to call this ioctl) 2572 */ 2573 if (!file_priv->atomic) 2574 return -EINVAL; 2575 2576 if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) 2577 return -EINVAL; 2578 2579 if (arg->reserved) 2580 return -EINVAL; 2581 2582 if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) && 2583 !dev->mode_config.async_page_flip) 2584 return -EINVAL; 2585 2586 /* can't test and expect an event at the same time. */ 2587 if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) && 2588 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 2589 return -EINVAL; 2590 2591 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2592 2593 state = drm_atomic_state_alloc(dev); 2594 if (!state) 2595 return -ENOMEM; 2596 2597 state->acquire_ctx = &ctx; 2598 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 2599 2600 retry: 2601 copied_objs = 0; 2602 copied_props = 0; 2603 fence_state = NULL; 2604 num_fences = 0; 2605 2606 for (i = 0; i < arg->count_objs; i++) { 2607 uint32_t obj_id, count_props; 2608 struct drm_mode_object *obj; 2609 2610 if (get_user(obj_id, objs_ptr + copied_objs)) { 2611 ret = -EFAULT; 2612 goto out; 2613 } 2614 2615 obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY); 2616 if (!obj) { 2617 ret = -ENOENT; 2618 goto out; 2619 } 2620 2621 if (!obj->properties) { 2622 drm_mode_object_put(obj); 2623 ret = -ENOENT; 2624 goto out; 2625 } 2626 2627 if (get_user(count_props, count_props_ptr + copied_objs)) { 2628 drm_mode_object_put(obj); 2629 ret = -EFAULT; 2630 goto out; 2631 } 2632 2633 copied_objs++; 2634 2635 for (j = 0; j < count_props; j++) { 2636 uint32_t prop_id; 2637 uint64_t prop_value; 2638 struct drm_property *prop; 2639 2640 if (get_user(prop_id, props_ptr + copied_props)) { 2641 drm_mode_object_put(obj); 2642 ret = -EFAULT; 2643 goto out; 2644 } 2645 2646 prop = drm_mode_obj_find_prop_id(obj, prop_id); 2647 if (!prop) { 2648 drm_mode_object_put(obj); 2649 ret = -ENOENT; 2650 goto out; 2651 } 2652 2653 if (copy_from_user(&prop_value, 2654 prop_values_ptr + copied_props, 2655 sizeof(prop_value))) { 2656 drm_mode_object_put(obj); 2657 ret = -EFAULT; 2658 goto out; 2659 } 2660 2661 ret = drm_atomic_set_property(state, obj, prop, 2662 prop_value); 2663 if (ret) { 2664 drm_mode_object_put(obj); 2665 goto out; 2666 } 2667 2668 copied_props++; 2669 } 2670 2671 drm_mode_object_put(obj); 2672 } 2673 2674 ret = prepare_signaling(dev, state, arg, file_priv, &fence_state, 2675 &num_fences); 2676 if (ret) 2677 goto out; 2678 2679 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { 2680 ret = drm_atomic_check_only(state); 2681 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { 2682 ret = drm_atomic_nonblocking_commit(state); 2683 } else { 2684 if (unlikely(drm_debug & DRM_UT_STATE)) 2685 drm_atomic_print_state(state); 2686 2687 ret = drm_atomic_commit(state); 2688 } 2689 2690 out: 2691 complete_signaling(dev, state, fence_state, num_fences, !ret); 2692 2693 if (ret == -EDEADLK) { 2694 drm_atomic_state_clear(state); 2695 ret = drm_modeset_backoff(&ctx); 2696 if (!ret) 2697 goto retry; 2698 } 2699 2700 drm_atomic_state_put(state); 2701 2702 drm_modeset_drop_locks(&ctx); 2703 drm_modeset_acquire_fini(&ctx); 2704 2705 return ret; 2706 } 2707