1 /* 2 * Copyright 2017 Red Hat 3 * Parts ported from amdgpu (fence wait code). 4 * Copyright 2016 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 * Authors: 26 * 27 */ 28 29 /** 30 * DOC: Overview 31 * 32 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are 33 * persistent objects that contain an optional fence. The fence can be updated 34 * with a new fence, or be NULL. 35 * 36 * syncobj's can be waited upon, where it will wait for the underlying 37 * fence. 38 * 39 * syncobj's can be export to fd's and back, these fd's are opaque and 40 * have no other use case, except passing the syncobj between processes. 41 * 42 * Their primary use-case is to implement Vulkan fences and semaphores. 43 * 44 * syncobj have a kref reference count, but also have an optional file. 45 * The file is only created once the syncobj is exported. 46 * The file takes a reference on the kref. 47 */ 48 49 #include <drm/drmP.h> 50 #include <linux/file.h> 51 #include <linux/fs.h> 52 #include <linux/anon_inodes.h> 53 #include <linux/sync_file.h> 54 #include <linux/sched/signal.h> 55 56 #include "drm_internal.h" 57 #include <drm/drm_syncobj.h> 58 59 /** 60 * drm_syncobj_find - lookup and reference a sync object. 61 * @file_private: drm file private pointer 62 * @handle: sync object handle to lookup. 63 * 64 * Returns a reference to the syncobj pointed to by handle or NULL. The 65 * reference must be released by calling drm_syncobj_put(). 66 */ 67 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, 68 u32 handle) 69 { 70 struct drm_syncobj *syncobj; 71 72 spin_lock(&file_private->syncobj_table_lock); 73 74 /* Check if we currently have a reference on the object */ 75 syncobj = idr_find(&file_private->syncobj_idr, handle); 76 if (syncobj) 77 drm_syncobj_get(syncobj); 78 79 spin_unlock(&file_private->syncobj_table_lock); 80 81 return syncobj; 82 } 83 EXPORT_SYMBOL(drm_syncobj_find); 84 85 static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj, 86 struct drm_syncobj_cb *cb, 87 drm_syncobj_func_t func) 88 { 89 cb->func = func; 90 list_add_tail(&cb->node, &syncobj->cb_list); 91 } 92 93 static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, 94 struct dma_fence **fence, 95 struct drm_syncobj_cb *cb, 96 drm_syncobj_func_t func) 97 { 98 int ret; 99 100 WARN_ON(*fence); 101 102 *fence = drm_syncobj_fence_get(syncobj); 103 if (*fence) 104 return 1; 105 106 spin_lock(&syncobj->lock); 107 /* We've already tried once to get a fence and failed. Now that we 108 * have the lock, try one more time just to be sure we don't add a 109 * callback when a fence has already been set. 110 */ 111 if (syncobj->fence) { 112 *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 113 lockdep_is_held(&syncobj->lock))); 114 ret = 1; 115 } else { 116 *fence = NULL; 117 drm_syncobj_add_callback_locked(syncobj, cb, func); 118 ret = 0; 119 } 120 spin_unlock(&syncobj->lock); 121 122 return ret; 123 } 124 125 /** 126 * drm_syncobj_add_callback - adds a callback to syncobj::cb_list 127 * @syncobj: Sync object to which to add the callback 128 * @cb: Callback to add 129 * @func: Func to use when initializing the drm_syncobj_cb struct 130 * 131 * This adds a callback to be called next time the fence is replaced 132 */ 133 void drm_syncobj_add_callback(struct drm_syncobj *syncobj, 134 struct drm_syncobj_cb *cb, 135 drm_syncobj_func_t func) 136 { 137 spin_lock(&syncobj->lock); 138 drm_syncobj_add_callback_locked(syncobj, cb, func); 139 spin_unlock(&syncobj->lock); 140 } 141 EXPORT_SYMBOL(drm_syncobj_add_callback); 142 143 /** 144 * drm_syncobj_add_callback - removes a callback to syncobj::cb_list 145 * @syncobj: Sync object from which to remove the callback 146 * @cb: Callback to remove 147 */ 148 void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, 149 struct drm_syncobj_cb *cb) 150 { 151 spin_lock(&syncobj->lock); 152 list_del_init(&cb->node); 153 spin_unlock(&syncobj->lock); 154 } 155 EXPORT_SYMBOL(drm_syncobj_remove_callback); 156 157 /** 158 * drm_syncobj_replace_fence - replace fence in a sync object. 159 * @syncobj: Sync object to replace fence in 160 * @fence: fence to install in sync file. 161 * 162 * This replaces the fence on a sync object. 163 */ 164 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, 165 struct dma_fence *fence) 166 { 167 struct dma_fence *old_fence; 168 struct drm_syncobj_cb *cur, *tmp; 169 170 if (fence) 171 dma_fence_get(fence); 172 173 spin_lock(&syncobj->lock); 174 175 old_fence = rcu_dereference_protected(syncobj->fence, 176 lockdep_is_held(&syncobj->lock)); 177 rcu_assign_pointer(syncobj->fence, fence); 178 179 if (fence != old_fence) { 180 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { 181 list_del_init(&cur->node); 182 cur->func(syncobj, cur); 183 } 184 } 185 186 spin_unlock(&syncobj->lock); 187 188 dma_fence_put(old_fence); 189 } 190 EXPORT_SYMBOL(drm_syncobj_replace_fence); 191 192 struct drm_syncobj_null_fence { 193 struct dma_fence base; 194 spinlock_t lock; 195 }; 196 197 static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence) 198 { 199 return "syncobjnull"; 200 } 201 202 static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence) 203 { 204 dma_fence_enable_sw_signaling(fence); 205 return !dma_fence_is_signaled(fence); 206 } 207 208 static const struct dma_fence_ops drm_syncobj_null_fence_ops = { 209 .get_driver_name = drm_syncobj_null_fence_get_name, 210 .get_timeline_name = drm_syncobj_null_fence_get_name, 211 .enable_signaling = drm_syncobj_null_fence_enable_signaling, 212 .release = NULL, 213 }; 214 215 static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) 216 { 217 struct drm_syncobj_null_fence *fence; 218 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 219 if (fence == NULL) 220 return -ENOMEM; 221 222 mtx_init(&fence->lock, IPL_NONE); 223 dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops, 224 &fence->lock, 0, 0); 225 dma_fence_signal(&fence->base); 226 227 drm_syncobj_replace_fence(syncobj, &fence->base); 228 229 dma_fence_put(&fence->base); 230 231 return 0; 232 } 233 234 /** 235 * drm_syncobj_find_fence - lookup and reference the fence in a sync object 236 * @file_private: drm file private pointer 237 * @handle: sync object handle to lookup. 238 * @fence: out parameter for the fence 239 * 240 * This is just a convenience function that combines drm_syncobj_find() and 241 * drm_syncobj_fence_get(). 242 * 243 * Returns 0 on success or a negative error value on failure. On success @fence 244 * contains a reference to the fence, which must be released by calling 245 * dma_fence_put(). 246 */ 247 int drm_syncobj_find_fence(struct drm_file *file_private, 248 u32 handle, 249 struct dma_fence **fence) 250 { 251 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 252 int ret = 0; 253 254 if (!syncobj) 255 return -ENOENT; 256 257 *fence = drm_syncobj_fence_get(syncobj); 258 if (!*fence) { 259 ret = -EINVAL; 260 } 261 drm_syncobj_put(syncobj); 262 return ret; 263 } 264 EXPORT_SYMBOL(drm_syncobj_find_fence); 265 266 /** 267 * drm_syncobj_free - free a sync object. 268 * @kref: kref to free. 269 * 270 * Only to be called from kref_put in drm_syncobj_put. 271 */ 272 void drm_syncobj_free(struct kref *kref) 273 { 274 struct drm_syncobj *syncobj = container_of(kref, 275 struct drm_syncobj, 276 refcount); 277 drm_syncobj_replace_fence(syncobj, NULL); 278 kfree(syncobj); 279 } 280 EXPORT_SYMBOL(drm_syncobj_free); 281 282 /** 283 * drm_syncobj_create - create a new syncobj 284 * @out_syncobj: returned syncobj 285 * @flags: DRM_SYNCOBJ_* flags 286 * @fence: if non-NULL, the syncobj will represent this fence 287 * 288 * This is the first function to create a sync object. After creating, drivers 289 * probably want to make it available to userspace, either through 290 * drm_syncobj_get_handle() or drm_syncobj_get_fd(). 291 * 292 * Returns 0 on success or a negative error value on failure. 293 */ 294 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, 295 struct dma_fence *fence) 296 { 297 int ret; 298 struct drm_syncobj *syncobj; 299 300 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); 301 if (!syncobj) 302 return -ENOMEM; 303 304 kref_init(&syncobj->refcount); 305 INIT_LIST_HEAD(&syncobj->cb_list); 306 mtx_init(&syncobj->lock, IPL_NONE); 307 308 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { 309 ret = drm_syncobj_assign_null_handle(syncobj); 310 if (ret < 0) { 311 drm_syncobj_put(syncobj); 312 return ret; 313 } 314 } 315 316 if (fence) 317 drm_syncobj_replace_fence(syncobj, fence); 318 319 *out_syncobj = syncobj; 320 return 0; 321 } 322 EXPORT_SYMBOL(drm_syncobj_create); 323 324 /** 325 * drm_syncobj_get_handle - get a handle from a syncobj 326 * @file_private: drm file private pointer 327 * @syncobj: Sync object to export 328 * @handle: out parameter with the new handle 329 * 330 * Exports a sync object created with drm_syncobj_create() as a handle on 331 * @file_private to userspace. 332 * 333 * Returns 0 on success or a negative error value on failure. 334 */ 335 int drm_syncobj_get_handle(struct drm_file *file_private, 336 struct drm_syncobj *syncobj, u32 *handle) 337 { 338 int ret; 339 340 /* take a reference to put in the idr */ 341 drm_syncobj_get(syncobj); 342 343 idr_preload(GFP_KERNEL); 344 spin_lock(&file_private->syncobj_table_lock); 345 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 346 spin_unlock(&file_private->syncobj_table_lock); 347 348 idr_preload_end(); 349 350 if (ret < 0) { 351 drm_syncobj_put(syncobj); 352 return ret; 353 } 354 355 *handle = ret; 356 return 0; 357 } 358 EXPORT_SYMBOL(drm_syncobj_get_handle); 359 360 static int drm_syncobj_create_as_handle(struct drm_file *file_private, 361 u32 *handle, uint32_t flags) 362 { 363 int ret; 364 struct drm_syncobj *syncobj; 365 366 ret = drm_syncobj_create(&syncobj, flags, NULL); 367 if (ret) 368 return ret; 369 370 ret = drm_syncobj_get_handle(file_private, syncobj, handle); 371 drm_syncobj_put(syncobj); 372 return ret; 373 } 374 375 static int drm_syncobj_destroy(struct drm_file *file_private, 376 u32 handle) 377 { 378 struct drm_syncobj *syncobj; 379 380 spin_lock(&file_private->syncobj_table_lock); 381 syncobj = idr_remove(&file_private->syncobj_idr, handle); 382 spin_unlock(&file_private->syncobj_table_lock); 383 384 if (!syncobj) 385 return -EINVAL; 386 387 drm_syncobj_put(syncobj); 388 return 0; 389 } 390 391 #ifdef notyet 392 static int drm_syncobj_file_release(struct inode *inode, struct file *file) 393 { 394 struct drm_syncobj *syncobj = file->private_data; 395 396 drm_syncobj_put(syncobj); 397 return 0; 398 } 399 400 static const struct file_operations drm_syncobj_file_fops = { 401 .release = drm_syncobj_file_release, 402 }; 403 #endif 404 405 /** 406 * drm_syncobj_get_fd - get a file descriptor from a syncobj 407 * @syncobj: Sync object to export 408 * @p_fd: out parameter with the new file descriptor 409 * 410 * Exports a sync object created with drm_syncobj_create() as a file descriptor. 411 * 412 * Returns 0 on success or a negative error value on failure. 413 */ 414 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd) 415 { 416 STUB(); 417 return -1; 418 #ifdef notyet 419 struct file *file; 420 int fd; 421 422 fd = get_unused_fd_flags(O_CLOEXEC); 423 if (fd < 0) 424 return fd; 425 426 file = anon_inode_getfile("syncobj_file", 427 &drm_syncobj_file_fops, 428 syncobj, 0); 429 if (IS_ERR(file)) { 430 put_unused_fd(fd); 431 return PTR_ERR(file); 432 } 433 434 drm_syncobj_get(syncobj); 435 fd_install(fd, file); 436 437 *p_fd = fd; 438 return 0; 439 #endif 440 } 441 EXPORT_SYMBOL(drm_syncobj_get_fd); 442 443 static int drm_syncobj_handle_to_fd(struct drm_file *file_private, 444 u32 handle, int *p_fd) 445 { 446 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 447 int ret; 448 449 if (!syncobj) 450 return -EINVAL; 451 452 ret = drm_syncobj_get_fd(syncobj, p_fd); 453 drm_syncobj_put(syncobj); 454 return ret; 455 } 456 457 static int drm_syncobj_fd_to_handle(struct drm_file *file_private, 458 int fd, u32 *handle) 459 { 460 STUB(); 461 return -ENOSYS; 462 #ifdef notyet 463 struct drm_syncobj *syncobj; 464 struct file *file; 465 int ret; 466 467 file = fget(fd); 468 if (!file) 469 return -EINVAL; 470 471 if (file->f_op != &drm_syncobj_file_fops) { 472 fput(file); 473 return -EINVAL; 474 } 475 476 /* take a reference to put in the idr */ 477 syncobj = file->private_data; 478 drm_syncobj_get(syncobj); 479 480 idr_preload(GFP_KERNEL); 481 spin_lock(&file_private->syncobj_table_lock); 482 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 483 spin_unlock(&file_private->syncobj_table_lock); 484 idr_preload_end(); 485 486 if (ret > 0) { 487 *handle = ret; 488 ret = 0; 489 } else 490 drm_syncobj_put(syncobj); 491 492 fput(file); 493 return ret; 494 #endif 495 } 496 497 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, 498 int fd, int handle) 499 { 500 struct dma_fence *fence = sync_file_get_fence(fd); 501 struct drm_syncobj *syncobj; 502 503 if (!fence) 504 return -EINVAL; 505 506 syncobj = drm_syncobj_find(file_private, handle); 507 if (!syncobj) { 508 dma_fence_put(fence); 509 return -ENOENT; 510 } 511 512 drm_syncobj_replace_fence(syncobj, fence); 513 dma_fence_put(fence); 514 drm_syncobj_put(syncobj); 515 return 0; 516 } 517 518 static int drm_syncobj_export_sync_file(struct drm_file *file_private, 519 int handle, int *p_fd) 520 { 521 STUB(); 522 return -ENOSYS; 523 #ifdef notyet 524 int ret; 525 struct dma_fence *fence; 526 struct sync_file *sync_file; 527 int fd = get_unused_fd_flags(O_CLOEXEC); 528 529 if (fd < 0) 530 return fd; 531 532 ret = drm_syncobj_find_fence(file_private, handle, &fence); 533 if (ret) 534 goto err_put_fd; 535 536 sync_file = sync_file_create(fence); 537 538 dma_fence_put(fence); 539 540 if (!sync_file) { 541 ret = -EINVAL; 542 goto err_put_fd; 543 } 544 545 fd_install(fd, sync_file->file); 546 547 *p_fd = fd; 548 return 0; 549 err_put_fd: 550 put_unused_fd(fd); 551 return ret; 552 #endif 553 } 554 /** 555 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time 556 * @file_private: drm file-private structure to set up 557 * 558 * Called at device open time, sets up the structure for handling refcounting 559 * of sync objects. 560 */ 561 void 562 drm_syncobj_open(struct drm_file *file_private) 563 { 564 idr_init_base(&file_private->syncobj_idr, 1); 565 mtx_init(&file_private->syncobj_table_lock, IPL_NONE); 566 } 567 568 static int 569 drm_syncobj_release_handle(int id, void *ptr, void *data) 570 { 571 struct drm_syncobj *syncobj = ptr; 572 573 drm_syncobj_put(syncobj); 574 return 0; 575 } 576 577 /** 578 * drm_syncobj_release - release file-private sync object resources 579 * @file_private: drm file-private structure to clean up 580 * 581 * Called at close time when the filp is going away. 582 * 583 * Releases any remaining references on objects by this filp. 584 */ 585 void 586 drm_syncobj_release(struct drm_file *file_private) 587 { 588 idr_for_each(&file_private->syncobj_idr, 589 &drm_syncobj_release_handle, file_private); 590 idr_destroy(&file_private->syncobj_idr); 591 } 592 593 int 594 drm_syncobj_create_ioctl(struct drm_device *dev, void *data, 595 struct drm_file *file_private) 596 { 597 struct drm_syncobj_create *args = data; 598 599 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 600 return -ENODEV; 601 602 /* no valid flags yet */ 603 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED) 604 return -EINVAL; 605 606 return drm_syncobj_create_as_handle(file_private, 607 &args->handle, args->flags); 608 } 609 610 int 611 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, 612 struct drm_file *file_private) 613 { 614 struct drm_syncobj_destroy *args = data; 615 616 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 617 return -ENODEV; 618 619 /* make sure padding is empty */ 620 if (args->pad) 621 return -EINVAL; 622 return drm_syncobj_destroy(file_private, args->handle); 623 } 624 625 int 626 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, 627 struct drm_file *file_private) 628 { 629 struct drm_syncobj_handle *args = data; 630 631 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 632 return -ENODEV; 633 634 if (args->pad) 635 return -EINVAL; 636 637 if (args->flags != 0 && 638 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 639 return -EINVAL; 640 641 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 642 return drm_syncobj_export_sync_file(file_private, args->handle, 643 &args->fd); 644 645 return drm_syncobj_handle_to_fd(file_private, args->handle, 646 &args->fd); 647 } 648 649 int 650 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, 651 struct drm_file *file_private) 652 { 653 struct drm_syncobj_handle *args = data; 654 655 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 656 return -ENODEV; 657 658 if (args->pad) 659 return -EINVAL; 660 661 if (args->flags != 0 && 662 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 663 return -EINVAL; 664 665 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 666 return drm_syncobj_import_sync_file_fence(file_private, 667 args->fd, 668 args->handle); 669 670 return drm_syncobj_fd_to_handle(file_private, args->fd, 671 &args->handle); 672 } 673 674 struct syncobj_wait_entry { 675 #ifdef __linux__ 676 struct task_struct *task; 677 #else 678 struct proc *task; 679 #endif 680 struct dma_fence *fence; 681 struct dma_fence_cb fence_cb; 682 struct drm_syncobj_cb syncobj_cb; 683 }; 684 685 static void syncobj_wait_fence_func(struct dma_fence *fence, 686 struct dma_fence_cb *cb) 687 { 688 struct syncobj_wait_entry *wait = 689 container_of(cb, struct syncobj_wait_entry, fence_cb); 690 691 wake_up_process(wait->task); 692 } 693 694 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 695 struct drm_syncobj_cb *cb) 696 { 697 struct syncobj_wait_entry *wait = 698 container_of(cb, struct syncobj_wait_entry, syncobj_cb); 699 700 /* This happens inside the syncobj lock */ 701 wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 702 lockdep_is_held(&syncobj->lock))); 703 wake_up_process(wait->task); 704 } 705 706 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, 707 uint32_t count, 708 uint32_t flags, 709 signed long timeout, 710 uint32_t *idx) 711 { 712 struct syncobj_wait_entry *entries; 713 struct dma_fence *fence; 714 signed long ret; 715 uint32_t signaled_count, i; 716 717 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 718 if (!entries) 719 return -ENOMEM; 720 721 /* Walk the list of sync objects and initialize entries. We do 722 * this up-front so that we can properly return -EINVAL if there is 723 * a syncobj with a missing fence and then never have the chance of 724 * returning -EINVAL again. 725 */ 726 signaled_count = 0; 727 for (i = 0; i < count; ++i) { 728 #ifdef __linux__ 729 entries[i].task = current; 730 #else 731 entries[i].task = curproc; 732 #endif 733 entries[i].fence = drm_syncobj_fence_get(syncobjs[i]); 734 if (!entries[i].fence) { 735 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 736 continue; 737 } else { 738 ret = -EINVAL; 739 goto cleanup_entries; 740 } 741 } 742 743 if (dma_fence_is_signaled(entries[i].fence)) { 744 if (signaled_count == 0 && idx) 745 *idx = i; 746 signaled_count++; 747 } 748 } 749 750 /* Initialize ret to the max of timeout and 1. That way, the 751 * default return value indicates a successful wait and not a 752 * timeout. 753 */ 754 ret = max_t(signed long, timeout, 1); 755 756 if (signaled_count == count || 757 (signaled_count > 0 && 758 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL))) 759 goto cleanup_entries; 760 761 /* There's a very annoying laxness in the dma_fence API here, in 762 * that backends are not required to automatically report when a 763 * fence is signaled prior to fence->ops->enable_signaling() being 764 * called. So here if we fail to match signaled_count, we need to 765 * fallthough and try a 0 timeout wait! 766 */ 767 768 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 769 for (i = 0; i < count; ++i) { 770 if (entries[i].fence) 771 continue; 772 773 drm_syncobj_fence_get_or_add_callback(syncobjs[i], 774 &entries[i].fence, 775 &entries[i].syncobj_cb, 776 syncobj_wait_syncobj_func); 777 } 778 } 779 780 do { 781 set_current_state(TASK_INTERRUPTIBLE); 782 783 signaled_count = 0; 784 for (i = 0; i < count; ++i) { 785 fence = entries[i].fence; 786 if (!fence) 787 continue; 788 789 if (dma_fence_is_signaled(fence) || 790 (!entries[i].fence_cb.func && 791 dma_fence_add_callback(fence, 792 &entries[i].fence_cb, 793 syncobj_wait_fence_func))) { 794 /* The fence has been signaled */ 795 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) { 796 signaled_count++; 797 } else { 798 if (idx) 799 *idx = i; 800 goto done_waiting; 801 } 802 } 803 } 804 805 if (signaled_count == count) 806 goto done_waiting; 807 808 if (timeout == 0) { 809 /* If we are doing a 0 timeout wait and we got 810 * here, then we just timed out. 811 */ 812 ret = 0; 813 goto done_waiting; 814 } 815 816 ret = schedule_timeout(ret); 817 818 if (ret > 0 && signal_pending(current)) 819 ret = -ERESTARTSYS; 820 } while (ret > 0); 821 822 done_waiting: 823 __set_current_state(TASK_RUNNING); 824 825 cleanup_entries: 826 for (i = 0; i < count; ++i) { 827 if (entries[i].syncobj_cb.func) 828 drm_syncobj_remove_callback(syncobjs[i], 829 &entries[i].syncobj_cb); 830 if (entries[i].fence_cb.func) 831 dma_fence_remove_callback(entries[i].fence, 832 &entries[i].fence_cb); 833 dma_fence_put(entries[i].fence); 834 } 835 kfree(entries); 836 837 return ret; 838 } 839 840 /** 841 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value 842 * 843 * @timeout_nsec: timeout nsec component in ns, 0 for poll 844 * 845 * Calculate the timeout in jiffies from an absolute time in sec/nsec. 846 */ 847 static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) 848 { 849 ktime_t abs_timeout, now; 850 u64 timeout_ns, timeout_jiffies64; 851 852 /* make 0 timeout means poll - absolute 0 doesn't seem valid */ 853 if (timeout_nsec == 0) 854 return 0; 855 856 abs_timeout = ns_to_ktime(timeout_nsec); 857 now = ktime_get(); 858 859 if (!ktime_after(abs_timeout, now)) 860 return 0; 861 862 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now)); 863 864 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns); 865 /* clamp timeout to avoid infinite timeout */ 866 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1) 867 return MAX_SCHEDULE_TIMEOUT - 1; 868 869 return timeout_jiffies64 + 1; 870 } 871 872 static int drm_syncobj_array_wait(struct drm_device *dev, 873 struct drm_file *file_private, 874 struct drm_syncobj_wait *wait, 875 struct drm_syncobj **syncobjs) 876 { 877 signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); 878 signed long ret = 0; 879 uint32_t first = ~0; 880 881 ret = drm_syncobj_array_wait_timeout(syncobjs, 882 wait->count_handles, 883 wait->flags, 884 timeout, &first); 885 if (ret < 0) 886 return ret; 887 888 wait->first_signaled = first; 889 if (ret == 0) 890 return -ETIME; 891 return 0; 892 } 893 894 static int drm_syncobj_array_find(struct drm_file *file_private, 895 void __user *user_handles, 896 uint32_t count_handles, 897 struct drm_syncobj ***syncobjs_out) 898 { 899 uint32_t i, *handles; 900 struct drm_syncobj **syncobjs; 901 int ret; 902 903 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL); 904 if (handles == NULL) 905 return -ENOMEM; 906 907 if (copy_from_user(handles, user_handles, 908 sizeof(uint32_t) * count_handles)) { 909 ret = -EFAULT; 910 goto err_free_handles; 911 } 912 913 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL); 914 if (syncobjs == NULL) { 915 ret = -ENOMEM; 916 goto err_free_handles; 917 } 918 919 for (i = 0; i < count_handles; i++) { 920 syncobjs[i] = drm_syncobj_find(file_private, handles[i]); 921 if (!syncobjs[i]) { 922 ret = -ENOENT; 923 goto err_put_syncobjs; 924 } 925 } 926 927 kfree(handles); 928 *syncobjs_out = syncobjs; 929 return 0; 930 931 err_put_syncobjs: 932 while (i-- > 0) 933 drm_syncobj_put(syncobjs[i]); 934 kfree(syncobjs); 935 err_free_handles: 936 kfree(handles); 937 938 return ret; 939 } 940 941 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs, 942 uint32_t count) 943 { 944 uint32_t i; 945 for (i = 0; i < count; i++) 946 drm_syncobj_put(syncobjs[i]); 947 kfree(syncobjs); 948 } 949 950 int 951 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, 952 struct drm_file *file_private) 953 { 954 struct drm_syncobj_wait *args = data; 955 struct drm_syncobj **syncobjs; 956 int ret = 0; 957 958 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 959 return -ENODEV; 960 961 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | 962 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) 963 return -EINVAL; 964 965 if (args->count_handles == 0) 966 return -EINVAL; 967 968 ret = drm_syncobj_array_find(file_private, 969 u64_to_user_ptr(args->handles), 970 args->count_handles, 971 &syncobjs); 972 if (ret < 0) 973 return ret; 974 975 ret = drm_syncobj_array_wait(dev, file_private, 976 args, syncobjs); 977 978 drm_syncobj_array_free(syncobjs, args->count_handles); 979 980 return ret; 981 } 982 983 int 984 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, 985 struct drm_file *file_private) 986 { 987 struct drm_syncobj_array *args = data; 988 struct drm_syncobj **syncobjs; 989 uint32_t i; 990 int ret; 991 992 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 993 return -ENODEV; 994 995 if (args->pad != 0) 996 return -EINVAL; 997 998 if (args->count_handles == 0) 999 return -EINVAL; 1000 1001 ret = drm_syncobj_array_find(file_private, 1002 u64_to_user_ptr(args->handles), 1003 args->count_handles, 1004 &syncobjs); 1005 if (ret < 0) 1006 return ret; 1007 1008 for (i = 0; i < args->count_handles; i++) 1009 drm_syncobj_replace_fence(syncobjs[i], NULL); 1010 1011 drm_syncobj_array_free(syncobjs, args->count_handles); 1012 1013 return 0; 1014 } 1015 1016 int 1017 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, 1018 struct drm_file *file_private) 1019 { 1020 struct drm_syncobj_array *args = data; 1021 struct drm_syncobj **syncobjs; 1022 uint32_t i; 1023 int ret; 1024 1025 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 1026 return -ENODEV; 1027 1028 if (args->pad != 0) 1029 return -EINVAL; 1030 1031 if (args->count_handles == 0) 1032 return -EINVAL; 1033 1034 ret = drm_syncobj_array_find(file_private, 1035 u64_to_user_ptr(args->handles), 1036 args->count_handles, 1037 &syncobjs); 1038 if (ret < 0) 1039 return ret; 1040 1041 for (i = 0; i < args->count_handles; i++) { 1042 ret = drm_syncobj_assign_null_handle(syncobjs[i]); 1043 if (ret < 0) 1044 break; 1045 } 1046 1047 drm_syncobj_array_free(syncobjs, args->count_handles); 1048 1049 return ret; 1050 } 1051