1 /* $NetBSD: drm_syncobj.c,v 1.7 2021/12/19 12:35:45 riastradh Exp $ */ 2 3 /* 4 * Copyright 2017 Red Hat 5 * Parts ported from amdgpu (fence wait code). 6 * Copyright 2016 Advanced Micro Devices, Inc. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the next 16 * paragraph) shall be included in all copies or substantial portions of the 17 * Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 25 * IN THE SOFTWARE. 26 * 27 * Authors: 28 * 29 */ 30 31 /** 32 * DOC: Overview 33 * 34 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a 35 * container for a synchronization primitive which can be used by userspace 36 * to explicitly synchronize GPU commands, can be shared between userspace 37 * processes, and can be shared between different DRM drivers. 38 * Their primary use-case is to implement Vulkan fences and semaphores. 39 * The syncobj userspace API provides ioctls for several operations: 40 * 41 * - Creation and destruction of syncobjs 42 * - Import and export of syncobjs to/from a syncobj file descriptor 43 * - Import and export a syncobj's underlying fence to/from a sync file 44 * - Reset a syncobj (set its fence to NULL) 45 * - Signal a syncobj (set a trivially signaled fence) 46 * - Wait for a syncobj's fence to appear and be signaled 47 * 48 * At it's core, a syncobj is simply a wrapper around a pointer to a struct 49 * &dma_fence which may be NULL. 50 * When a syncobj is first created, its pointer is either NULL or a pointer 51 * to an already signaled fence depending on whether the 52 * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to 53 * &DRM_IOCTL_SYNCOBJ_CREATE. 54 * When GPU work which signals a syncobj is enqueued in a DRM driver, 55 * the syncobj fence is replaced with a fence which will be signaled by the 56 * completion of that work. 57 * When GPU work which waits on a syncobj is enqueued in a DRM driver, the 58 * driver retrieves syncobj's current fence at the time the work is enqueued 59 * waits on that fence before submitting the work to hardware. 60 * If the syncobj's fence is NULL, the enqueue operation is expected to fail. 61 * All manipulation of the syncobjs's fence happens in terms of the current 62 * fence at the time the ioctl is called by userspace regardless of whether 63 * that operation is an immediate host-side operation (signal or reset) or 64 * or an operation which is enqueued in some driver queue. 65 * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to 66 * manipulate a syncobj from the host by resetting its pointer to NULL or 67 * setting its pointer to a fence which is already signaled. 68 * 69 * 70 * Host-side wait on syncobjs 71 * -------------------------- 72 * 73 * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a 74 * host-side wait on all of the syncobj fences simultaneously. 75 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on 76 * all of the syncobj fences to be signaled before it returns. 77 * Otherwise, it returns once at least one syncobj fence has been signaled 78 * and the index of a signaled fence is written back to the client. 79 * 80 * Unlike the enqueued GPU work dependencies which fail if they see a NULL 81 * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set, 82 * the host-side wait will first wait for the syncobj to receive a non-NULL 83 * fence and then wait on that fence. 84 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the 85 * syncobjs in the array has a NULL fence, -EINVAL will be returned. 86 * Assuming the syncobj starts off with a NULL fence, this allows a client 87 * to do a host wait in one thread (or process) which waits on GPU work 88 * submitted in another thread (or process) without having to manually 89 * synchronize between the two. 90 * This requirement is inherited from the Vulkan fence API. 91 * 92 * 93 * Import/export of syncobjs 94 * ------------------------- 95 * 96 * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD 97 * provide two mechanisms for import/export of syncobjs. 98 * 99 * The first lets the client import or export an entire syncobj to a file 100 * descriptor. 101 * These fd's are opaque and have no other use case, except passing the 102 * syncobj between processes. 103 * All exported file descriptors and any syncobj handles created as a 104 * result of importing those file descriptors own a reference to the 105 * same underlying struct &drm_syncobj and the syncobj can be used 106 * persistently across all the processes with which it is shared. 107 * The syncobj is freed only once the last reference is dropped. 108 * Unlike dma-buf, importing a syncobj creates a new handle (with its own 109 * reference) for every import instead of de-duplicating. 110 * The primary use-case of this persistent import/export is for shared 111 * Vulkan fences and semaphores. 112 * 113 * The second import/export mechanism, which is indicated by 114 * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or 115 * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client 116 * import/export the syncobj's current fence from/to a &sync_file. 117 * When a syncobj is exported to a sync file, that sync file wraps the 118 * sycnobj's fence at the time of export and any later signal or reset 119 * operations on the syncobj will not affect the exported sync file. 120 * When a sync file is imported into a syncobj, the syncobj's fence is set 121 * to the fence wrapped by that sync file. 122 * Because sync files are immutable, resetting or signaling the syncobj 123 * will not affect any sync files whose fences have been imported into the 124 * syncobj. 125 */ 126 127 #include <sys/cdefs.h> 128 __KERNEL_RCSID(0, "$NetBSD: drm_syncobj.c,v 1.7 2021/12/19 12:35:45 riastradh Exp $"); 129 130 #include <linux/anon_inodes.h> 131 #include <linux/file.h> 132 #include <linux/fs.h> 133 #include <linux/sched/signal.h> 134 #include <linux/sync_file.h> 135 #include <linux/uaccess.h> 136 137 #include <drm/drm.h> 138 #include <drm/drm_drv.h> 139 #include <drm/drm_file.h> 140 #include <drm/drm_gem.h> 141 #include <drm/drm_print.h> 142 #include <drm/drm_syncobj.h> 143 #include <drm/drm_utils.h> 144 145 #include "drm_internal.h" 146 147 struct syncobj_wait_entry { 148 struct list_head node; 149 #ifdef __NetBSD__ 150 /* 151 * Lock order: 152 * syncobj->lock ???? fence lock 153 * syncobj->lock then wait->lock 154 * fence lock then wait->lock 155 * 156 * syncobj->lock serializes wait->node and wait->fence. 157 * wait->lock serializes wait->signalledp, and, by 158 * interlocking with syncobj->lock, coordinates wakeups on 159 * wait->cv for wait->fence. 160 */ 161 kmutex_t *lock; 162 kcondvar_t *cv; 163 bool *signalledp; 164 #else 165 struct task_struct *task; 166 #endif 167 struct dma_fence *fence; 168 struct dma_fence_cb fence_cb; 169 u64 point; 170 }; 171 172 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 173 struct syncobj_wait_entry *wait); 174 175 /** 176 * drm_syncobj_find - lookup and reference a sync object. 177 * @file_private: drm file private pointer 178 * @handle: sync object handle to lookup. 179 * 180 * Returns a reference to the syncobj pointed to by handle or NULL. The 181 * reference must be released by calling drm_syncobj_put(). 182 */ 183 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, 184 u32 handle) 185 { 186 struct drm_syncobj *syncobj; 187 188 spin_lock(&file_private->syncobj_table_lock); 189 190 /* Check if we currently have a reference on the object */ 191 syncobj = idr_find(&file_private->syncobj_idr, handle); 192 if (syncobj) 193 drm_syncobj_get(syncobj); 194 195 spin_unlock(&file_private->syncobj_table_lock); 196 197 return syncobj; 198 } 199 EXPORT_SYMBOL(drm_syncobj_find); 200 201 static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj, 202 struct syncobj_wait_entry *wait) 203 { 204 struct dma_fence *fence; 205 206 if (wait->fence) 207 return; 208 209 spin_lock(&syncobj->lock); 210 /* We've already tried once to get a fence and failed. Now that we 211 * have the lock, try one more time just to be sure we don't add a 212 * callback when a fence has already been set. 213 */ 214 fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1)); 215 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) { 216 dma_fence_put(fence); 217 list_add_tail(&wait->node, &syncobj->cb_list); 218 } else if (!fence) { 219 wait->fence = dma_fence_get_stub(); 220 } else { 221 wait->fence = fence; 222 } 223 spin_unlock(&syncobj->lock); 224 } 225 226 static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj, 227 struct syncobj_wait_entry *wait) 228 { 229 if (!wait->node.next) 230 return; 231 232 spin_lock(&syncobj->lock); 233 list_del_init(&wait->node); 234 spin_unlock(&syncobj->lock); 235 } 236 237 /** 238 * drm_syncobj_add_point - add new timeline point to the syncobj 239 * @syncobj: sync object to add timeline point do 240 * @chain: chain node to use to add the point 241 * @fence: fence to encapsulate in the chain node 242 * @point: sequence number to use for the point 243 * 244 * Add the chain node as new timeline point to the syncobj. 245 */ 246 void drm_syncobj_add_point(struct drm_syncobj *syncobj, 247 struct dma_fence_chain *chain, 248 struct dma_fence *fence, 249 uint64_t point) 250 { 251 struct syncobj_wait_entry *cur, *tmp; 252 struct dma_fence *prev; 253 254 dma_fence_get(fence); 255 256 spin_lock(&syncobj->lock); 257 258 prev = drm_syncobj_fence_get(syncobj); 259 /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */ 260 if (prev && prev->seqno >= point) 261 DRM_ERROR("You are adding an unorder point to timeline!\n"); 262 dma_fence_chain_init(chain, prev, fence, point); 263 rcu_assign_pointer(syncobj->fence, &chain->base); 264 265 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) 266 syncobj_wait_syncobj_func(syncobj, cur); 267 spin_unlock(&syncobj->lock); 268 269 /* Walk the chain once to trigger garbage collection */ 270 dma_fence_chain_for_each(fence, prev); 271 dma_fence_put(prev); 272 } 273 EXPORT_SYMBOL(drm_syncobj_add_point); 274 275 /** 276 * drm_syncobj_replace_fence - replace fence in a sync object. 277 * @syncobj: Sync object to replace fence in 278 * @fence: fence to install in sync file. 279 * 280 * This replaces the fence on a sync object. 281 */ 282 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, 283 struct dma_fence *fence) 284 { 285 struct dma_fence *old_fence; 286 struct syncobj_wait_entry *cur, *tmp; 287 288 if (fence) 289 dma_fence_get(fence); 290 291 spin_lock(&syncobj->lock); 292 293 old_fence = rcu_dereference_protected(syncobj->fence, 294 lockdep_is_held(&syncobj->lock)); 295 rcu_assign_pointer(syncobj->fence, fence); 296 297 if (fence != old_fence) { 298 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) 299 syncobj_wait_syncobj_func(syncobj, cur); 300 } 301 302 spin_unlock(&syncobj->lock); 303 304 dma_fence_put(old_fence); 305 } 306 EXPORT_SYMBOL(drm_syncobj_replace_fence); 307 308 /** 309 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object 310 * @syncobj: sync object to assign the fence on 311 * 312 * Assign a already signaled stub fence to the sync object. 313 */ 314 static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) 315 { 316 struct dma_fence *fence = dma_fence_get_stub(); 317 318 drm_syncobj_replace_fence(syncobj, fence); 319 dma_fence_put(fence); 320 } 321 322 /* 5s default for wait submission */ 323 #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL 324 /** 325 * drm_syncobj_find_fence - lookup and reference the fence in a sync object 326 * @file_private: drm file private pointer 327 * @handle: sync object handle to lookup. 328 * @point: timeline point 329 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not 330 * @fence: out parameter for the fence 331 * 332 * This is just a convenience function that combines drm_syncobj_find() and 333 * drm_syncobj_fence_get(). 334 * 335 * Returns 0 on success or a negative error value on failure. On success @fence 336 * contains a reference to the fence, which must be released by calling 337 * dma_fence_put(). 338 */ 339 int drm_syncobj_find_fence(struct drm_file *file_private, 340 u32 handle, u64 point, u64 flags, 341 struct dma_fence **fence) 342 { 343 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 344 struct syncobj_wait_entry wait; 345 u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT); 346 int ret; 347 348 if (!syncobj) 349 return -ENOENT; 350 351 *fence = drm_syncobj_fence_get(syncobj); 352 drm_syncobj_put(syncobj); 353 354 if (*fence) { 355 ret = dma_fence_chain_find_seqno(fence, point); 356 if (!ret) 357 return 0; 358 dma_fence_put(*fence); 359 } else { 360 ret = -EINVAL; 361 } 362 363 if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) 364 return ret; 365 366 memset(&wait, 0, sizeof(wait)); 367 #ifdef __NetBSD__ 368 kmutex_t lock; 369 kcondvar_t cv; 370 mutex_init(&lock, MUTEX_DEFAULT, IPL_VM); 371 cv_init(&cv, "drmfnfnc"); 372 wait.cv = &cv; 373 #else 374 wait.task = current; 375 #endif 376 wait.point = point; 377 drm_syncobj_fence_add_wait(syncobj, &wait); 378 379 #ifdef __NetBSD__ 380 spin_lock(&syncobj->lock); 381 ret = 0; 382 while (wait.fence == NULL) { 383 unsigned start, end; 384 385 if (timeout == 0) { 386 ret = -ETIME; 387 break; 388 } 389 mutex_spin_enter(&lock); 390 spin_unlock(&syncobj->lock); 391 start = getticks(); 392 /* XXX errno NetBSD->Linux */ 393 ret = -cv_timedwait_sig(&cv, &lock, MIN(timeout, INT_MAX/2)); 394 end = getticks(); 395 timeout -= MIN(timeout, end - start); 396 mutex_spin_exit(&lock); 397 spin_lock(&syncobj->lock); 398 KASSERTMSG((ret == 0 || ret == -EINTR || ret == -ERESTART || 399 ret == -EWOULDBLOCK), "ret=%d", ret); 400 if (ret == -EINTR || ret == -ERESTART) { 401 ret = -ERESTARTSYS; 402 break; 403 } else if (ret == -EWOULDBLOCK) { 404 /* Check once more, then give up. */ 405 ret = 0; 406 timeout = 0; 407 } else { 408 KASSERT(ret == 0); 409 } 410 } 411 *fence = wait.fence; 412 if (wait.node.next) 413 list_del_init(&wait.node); 414 spin_unlock(&syncobj->lock); 415 cv_destroy(&cv); 416 mutex_destroy(&lock); 417 #else 418 do { 419 set_current_state(TASK_INTERRUPTIBLE); 420 if (wait.fence) { 421 ret = 0; 422 break; 423 } 424 if (timeout == 0) { 425 ret = -ETIME; 426 break; 427 } 428 429 if (signal_pending(current)) { 430 ret = -ERESTARTSYS; 431 break; 432 } 433 434 timeout = schedule_timeout(timeout); 435 } while (1); 436 437 __set_current_state(TASK_RUNNING); 438 *fence = wait.fence; 439 440 if (wait.node.next) 441 drm_syncobj_remove_wait(syncobj, &wait); 442 #endif 443 444 return ret; 445 } 446 EXPORT_SYMBOL(drm_syncobj_find_fence); 447 448 /** 449 * drm_syncobj_free - free a sync object. 450 * @kref: kref to free. 451 * 452 * Only to be called from kref_put in drm_syncobj_put. 453 */ 454 void drm_syncobj_free(struct kref *kref) 455 { 456 struct drm_syncobj *syncobj = container_of(kref, 457 struct drm_syncobj, 458 refcount); 459 drm_syncobj_replace_fence(syncobj, NULL); 460 spin_lock_destroy(&syncobj->lock); 461 kfree(syncobj); 462 } 463 EXPORT_SYMBOL(drm_syncobj_free); 464 465 /** 466 * drm_syncobj_create - create a new syncobj 467 * @out_syncobj: returned syncobj 468 * @flags: DRM_SYNCOBJ_* flags 469 * @fence: if non-NULL, the syncobj will represent this fence 470 * 471 * This is the first function to create a sync object. After creating, drivers 472 * probably want to make it available to userspace, either through 473 * drm_syncobj_get_handle() or drm_syncobj_get_fd(). 474 * 475 * Returns 0 on success or a negative error value on failure. 476 */ 477 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, 478 struct dma_fence *fence) 479 { 480 struct drm_syncobj *syncobj; 481 482 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); 483 if (!syncobj) 484 return -ENOMEM; 485 486 kref_init(&syncobj->refcount); 487 INIT_LIST_HEAD(&syncobj->cb_list); 488 spin_lock_init(&syncobj->lock); 489 490 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) 491 drm_syncobj_assign_null_handle(syncobj); 492 493 if (fence) 494 drm_syncobj_replace_fence(syncobj, fence); 495 496 *out_syncobj = syncobj; 497 return 0; 498 } 499 EXPORT_SYMBOL(drm_syncobj_create); 500 501 /** 502 * drm_syncobj_get_handle - get a handle from a syncobj 503 * @file_private: drm file private pointer 504 * @syncobj: Sync object to export 505 * @handle: out parameter with the new handle 506 * 507 * Exports a sync object created with drm_syncobj_create() as a handle on 508 * @file_private to userspace. 509 * 510 * Returns 0 on success or a negative error value on failure. 511 */ 512 int drm_syncobj_get_handle(struct drm_file *file_private, 513 struct drm_syncobj *syncobj, u32 *handle) 514 { 515 int ret; 516 517 /* take a reference to put in the idr */ 518 drm_syncobj_get(syncobj); 519 520 idr_preload(GFP_KERNEL); 521 spin_lock(&file_private->syncobj_table_lock); 522 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 523 spin_unlock(&file_private->syncobj_table_lock); 524 525 idr_preload_end(); 526 527 if (ret < 0) { 528 drm_syncobj_put(syncobj); 529 return ret; 530 } 531 532 *handle = ret; 533 return 0; 534 } 535 EXPORT_SYMBOL(drm_syncobj_get_handle); 536 537 static int drm_syncobj_create_as_handle(struct drm_file *file_private, 538 u32 *handle, uint32_t flags) 539 { 540 int ret; 541 struct drm_syncobj *syncobj; 542 543 ret = drm_syncobj_create(&syncobj, flags, NULL); 544 if (ret) 545 return ret; 546 547 ret = drm_syncobj_get_handle(file_private, syncobj, handle); 548 drm_syncobj_put(syncobj); 549 return ret; 550 } 551 552 static int drm_syncobj_destroy(struct drm_file *file_private, 553 u32 handle) 554 { 555 struct drm_syncobj *syncobj; 556 557 spin_lock(&file_private->syncobj_table_lock); 558 syncobj = idr_remove(&file_private->syncobj_idr, handle); 559 spin_unlock(&file_private->syncobj_table_lock); 560 561 if (!syncobj) 562 return -EINVAL; 563 564 drm_syncobj_put(syncobj); 565 return 0; 566 } 567 568 #ifdef __NetBSD__ 569 static int drm_syncobj_fop_close(struct file *file) 570 #else 571 static int drm_syncobj_file_release(struct inode *inode, struct file *file) 572 #endif 573 { 574 #ifdef __NetBSD__ 575 struct drm_syncobj *syncobj = file->f_data; 576 #else 577 struct drm_syncobj *syncobj = file->private_data; 578 #endif 579 580 drm_syncobj_put(syncobj); 581 return 0; 582 } 583 584 #ifdef __NetBSD__ 585 static const struct fileops drm_syncobj_file_ops = { 586 .fo_name = "drm_syncobj", 587 .fo_read = fbadop_read, 588 .fo_write = fbadop_write, 589 .fo_ioctl = fbadop_ioctl, 590 .fo_fcntl = fnullop_fcntl, 591 .fo_poll = fnullop_poll, 592 .fo_stat = fbadop_stat, 593 .fo_close = drm_syncobj_fop_close, 594 .fo_kqfilter = fnullop_kqfilter, 595 .fo_restart = fnullop_restart, 596 }; 597 #else 598 static const struct file_operations drm_syncobj_file_fops = { 599 .release = drm_syncobj_file_release, 600 }; 601 #endif 602 603 /** 604 * drm_syncobj_get_fd - get a file descriptor from a syncobj 605 * @syncobj: Sync object to export 606 * @p_fd: out parameter with the new file descriptor 607 * 608 * Exports a sync object created with drm_syncobj_create() as a file descriptor. 609 * 610 * Returns 0 on success or a negative error value on failure. 611 */ 612 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd) 613 { 614 struct file *file; 615 int fd; 616 #ifdef __NetBSD__ 617 int ret; 618 #endif 619 620 #ifdef __NetBSD__ 621 fd = -1; 622 /* XXX errno NetBSD->Linux */ 623 ret = -fd_allocfile(&file, &fd); 624 if (ret) 625 return ret; 626 file->f_data = syncobj; 627 file->f_ops = &drm_syncobj_file_ops; 628 #else 629 fd = get_unused_fd_flags(O_CLOEXEC); 630 if (fd < 0) 631 return fd; 632 633 file = anon_inode_getfile("syncobj_file", 634 &drm_syncobj_file_fops, 635 syncobj, 0); 636 if (IS_ERR(file)) { 637 put_unused_fd(fd); 638 return PTR_ERR(file); 639 } 640 #endif 641 642 drm_syncobj_get(syncobj); 643 fd_install(fd, file); 644 645 *p_fd = fd; 646 return 0; 647 } 648 EXPORT_SYMBOL(drm_syncobj_get_fd); 649 650 static int drm_syncobj_handle_to_fd(struct drm_file *file_private, 651 u32 handle, int *p_fd) 652 { 653 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 654 int ret; 655 656 if (!syncobj) 657 return -EINVAL; 658 659 ret = drm_syncobj_get_fd(syncobj, p_fd); 660 drm_syncobj_put(syncobj); 661 return ret; 662 } 663 664 static int drm_syncobj_fd_to_handle(struct drm_file *file_private, 665 int fd, u32 *handle) 666 { 667 struct drm_syncobj *syncobj; 668 struct fd f = fdget(fd); 669 int ret; 670 671 if (!f.file) 672 return -EINVAL; 673 674 #ifdef __NetBSD__ 675 if (f.file->f_ops != &drm_syncobj_file_ops) 676 #else 677 if (f.file->f_op != &drm_syncobj_file_fops) 678 #endif 679 { 680 fdput(f); 681 return -EINVAL; 682 } 683 684 /* take a reference to put in the idr */ 685 #ifdef __NetBSD__ 686 syncobj = f.file->f_data; 687 #else 688 syncobj = f.file->private_data; 689 #endif 690 drm_syncobj_get(syncobj); 691 692 idr_preload(GFP_KERNEL); 693 spin_lock(&file_private->syncobj_table_lock); 694 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); 695 spin_unlock(&file_private->syncobj_table_lock); 696 idr_preload_end(); 697 698 if (ret > 0) { 699 *handle = ret; 700 ret = 0; 701 } else 702 drm_syncobj_put(syncobj); 703 704 fdput(f); 705 return ret; 706 } 707 708 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, 709 int fd, int handle) 710 { 711 struct dma_fence *fence = sync_file_get_fence(fd); 712 struct drm_syncobj *syncobj; 713 714 if (!fence) 715 return -EINVAL; 716 717 syncobj = drm_syncobj_find(file_private, handle); 718 if (!syncobj) { 719 dma_fence_put(fence); 720 return -ENOENT; 721 } 722 723 drm_syncobj_replace_fence(syncobj, fence); 724 dma_fence_put(fence); 725 drm_syncobj_put(syncobj); 726 return 0; 727 } 728 729 static int drm_syncobj_export_sync_file(struct drm_file *file_private, 730 int handle, int *p_fd) 731 { 732 #ifdef __NetBSD__ 733 struct dma_fence *fence; 734 struct sync_file *sync_file; 735 struct file *fp = NULL; 736 int fd = -1; 737 int ret; 738 739 /* Allocate a file and descriptor. */ 740 /* XXX errno NetBSD->Linux */ 741 ret = -fd_allocfile(&fp, &fd); 742 if (ret) 743 goto out; 744 745 /* Find the fence. */ 746 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence); 747 if (ret) 748 goto out; 749 750 /* Create the sync file. */ 751 sync_file = sync_file_create(fence, fp); 752 753 /* Release the fence. */ 754 dma_fence_put(fence); 755 756 /* If the sync file creation failed, bail. */ 757 if (sync_file == NULL) 758 goto out; 759 760 /* Success! */ 761 fd_affix(curproc, fp, fd); 762 fp = NULL; /* sync_file consumes */ 763 ret = 0; 764 765 out: 766 /* If anything went wrong and we still have an unused file, abort. */ 767 if (fp != NULL) { 768 fd_abort(curproc, fp, fd); 769 fd = -1; 770 } 771 772 /* Return the descriptor or -1. */ 773 *p_fd = fd; 774 return ret; 775 #else 776 int ret; 777 struct dma_fence *fence; 778 struct sync_file *sync_file; 779 int fd = get_unused_fd_flags(O_CLOEXEC); 780 781 if (fd < 0) 782 return fd; 783 784 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence); 785 if (ret) 786 goto err_put_fd; 787 788 sync_file = sync_file_create(fence); 789 790 dma_fence_put(fence); 791 792 if (!sync_file) { 793 ret = -EINVAL; 794 goto err_put_fd; 795 } 796 797 fd_install(fd, sync_file->file); 798 799 *p_fd = fd; 800 return 0; 801 err_put_fd: 802 put_unused_fd(fd); 803 return ret; 804 #endif 805 } 806 /** 807 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time 808 * @file_private: drm file-private structure to set up 809 * 810 * Called at device open time, sets up the structure for handling refcounting 811 * of sync objects. 812 */ 813 void 814 drm_syncobj_open(struct drm_file *file_private) 815 { 816 idr_init_base(&file_private->syncobj_idr, 1); 817 spin_lock_init(&file_private->syncobj_table_lock); 818 } 819 820 static int 821 drm_syncobj_release_handle(int id, void *ptr, void *data) 822 { 823 struct drm_syncobj *syncobj = ptr; 824 825 drm_syncobj_put(syncobj); 826 return 0; 827 } 828 829 /** 830 * drm_syncobj_release - release file-private sync object resources 831 * @file_private: drm file-private structure to clean up 832 * 833 * Called at close time when the filp is going away. 834 * 835 * Releases any remaining references on objects by this filp. 836 */ 837 void 838 drm_syncobj_release(struct drm_file *file_private) 839 { 840 idr_for_each(&file_private->syncobj_idr, 841 &drm_syncobj_release_handle, file_private); 842 spin_lock_destroy(&file_private->syncobj_table_lock); 843 idr_destroy(&file_private->syncobj_idr); 844 } 845 846 int 847 drm_syncobj_create_ioctl(struct drm_device *dev, void *data, 848 struct drm_file *file_private) 849 { 850 struct drm_syncobj_create *args = data; 851 852 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 853 return -EOPNOTSUPP; 854 855 /* no valid flags yet */ 856 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED) 857 return -EINVAL; 858 859 return drm_syncobj_create_as_handle(file_private, 860 &args->handle, args->flags); 861 } 862 863 int 864 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, 865 struct drm_file *file_private) 866 { 867 struct drm_syncobj_destroy *args = data; 868 869 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 870 return -EOPNOTSUPP; 871 872 /* make sure padding is empty */ 873 if (args->pad) 874 return -EINVAL; 875 return drm_syncobj_destroy(file_private, args->handle); 876 } 877 878 int 879 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, 880 struct drm_file *file_private) 881 { 882 struct drm_syncobj_handle *args = data; 883 884 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 885 return -EOPNOTSUPP; 886 887 if (args->pad) 888 return -EINVAL; 889 890 if (args->flags != 0 && 891 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 892 return -EINVAL; 893 894 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) 895 return drm_syncobj_export_sync_file(file_private, args->handle, 896 &args->fd); 897 898 return drm_syncobj_handle_to_fd(file_private, args->handle, 899 &args->fd); 900 } 901 902 int 903 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, 904 struct drm_file *file_private) 905 { 906 struct drm_syncobj_handle *args = data; 907 908 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 909 return -EOPNOTSUPP; 910 911 if (args->pad) 912 return -EINVAL; 913 914 if (args->flags != 0 && 915 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 916 return -EINVAL; 917 918 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) 919 return drm_syncobj_import_sync_file_fence(file_private, 920 args->fd, 921 args->handle); 922 923 return drm_syncobj_fd_to_handle(file_private, args->fd, 924 &args->handle); 925 } 926 927 static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private, 928 struct drm_syncobj_transfer *args) 929 { 930 struct drm_syncobj *timeline_syncobj = NULL; 931 struct dma_fence *fence; 932 struct dma_fence_chain *chain; 933 int ret; 934 935 timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle); 936 if (!timeline_syncobj) { 937 return -ENOENT; 938 } 939 ret = drm_syncobj_find_fence(file_private, args->src_handle, 940 args->src_point, args->flags, 941 &fence); 942 if (ret) 943 goto err; 944 chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); 945 if (!chain) { 946 ret = -ENOMEM; 947 goto err1; 948 } 949 drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point); 950 err1: 951 dma_fence_put(fence); 952 err: 953 drm_syncobj_put(timeline_syncobj); 954 955 return ret; 956 } 957 958 static int 959 drm_syncobj_transfer_to_binary(struct drm_file *file_private, 960 struct drm_syncobj_transfer *args) 961 { 962 struct drm_syncobj *binary_syncobj = NULL; 963 struct dma_fence *fence; 964 int ret; 965 966 binary_syncobj = drm_syncobj_find(file_private, args->dst_handle); 967 if (!binary_syncobj) 968 return -ENOENT; 969 ret = drm_syncobj_find_fence(file_private, args->src_handle, 970 args->src_point, args->flags, &fence); 971 if (ret) 972 goto err; 973 drm_syncobj_replace_fence(binary_syncobj, fence); 974 dma_fence_put(fence); 975 err: 976 drm_syncobj_put(binary_syncobj); 977 978 return ret; 979 } 980 int 981 drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data, 982 struct drm_file *file_private) 983 { 984 struct drm_syncobj_transfer *args = data; 985 int ret; 986 987 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 988 return -EOPNOTSUPP; 989 990 if (args->pad) 991 return -EINVAL; 992 993 if (args->dst_point) 994 ret = drm_syncobj_transfer_to_timeline(file_private, args); 995 else 996 ret = drm_syncobj_transfer_to_binary(file_private, args); 997 998 return ret; 999 } 1000 1001 static void syncobj_wait_fence_func(struct dma_fence *fence, 1002 struct dma_fence_cb *cb) 1003 { 1004 struct syncobj_wait_entry *wait = 1005 container_of(cb, struct syncobj_wait_entry, fence_cb); 1006 1007 #ifdef __NetBSD__ 1008 mutex_enter(wait->lock); 1009 *wait->signalledp = true; 1010 cv_broadcast(wait->cv); 1011 mutex_exit(wait->lock); 1012 #else 1013 wake_up_process(wait->task); 1014 #endif 1015 } 1016 1017 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, 1018 struct syncobj_wait_entry *wait) 1019 { 1020 struct dma_fence *fence; 1021 1022 /* This happens inside the syncobj lock */ 1023 fence = rcu_dereference_protected(syncobj->fence, 1024 lockdep_is_held(&syncobj->lock)); 1025 dma_fence_get(fence); 1026 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) { 1027 dma_fence_put(fence); 1028 return; 1029 } 1030 1031 if (!fence) { 1032 wait->fence = dma_fence_get_stub(); 1033 } else { 1034 wait->fence = fence; 1035 } 1036 1037 #ifdef __NetBSD__ 1038 KASSERT(spin_is_locked(&syncobj->lock)); 1039 mutex_enter(wait->lock); 1040 cv_broadcast(wait->cv); 1041 mutex_exit(wait->lock); 1042 #else 1043 wake_up_process(wait->task); 1044 #endif 1045 list_del_init(&wait->node); 1046 } 1047 1048 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, 1049 void __user *user_points, 1050 uint32_t count, 1051 uint32_t flags, 1052 signed long timeout, 1053 uint32_t *idx) 1054 { 1055 struct syncobj_wait_entry *entries; 1056 struct dma_fence *fence; 1057 uint64_t *points; 1058 uint32_t signaled_count, i; 1059 #ifdef __NetBSD__ 1060 kmutex_t lock; 1061 kcondvar_t cv; 1062 bool signalled = false; 1063 int ret; 1064 mutex_init(&lock, MUTEX_DEFAULT, IPL_VM); 1065 cv_init(&cv, "drmsynco"); 1066 #endif 1067 1068 points = kmalloc_array(count, sizeof(*points), GFP_KERNEL); 1069 if (points == NULL) 1070 return -ENOMEM; 1071 1072 if (!user_points) { 1073 memset(points, 0, count * sizeof(uint64_t)); 1074 1075 } else if (copy_from_user(points, user_points, 1076 sizeof(uint64_t) * count)) { 1077 timeout = -EFAULT; 1078 goto err_free_points; 1079 } 1080 1081 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 1082 if (!entries) { 1083 timeout = -ENOMEM; 1084 goto err_free_points; 1085 } 1086 /* Walk the list of sync objects and initialize entries. We do 1087 * this up-front so that we can properly return -EINVAL if there is 1088 * a syncobj with a missing fence and then never have the chance of 1089 * returning -EINVAL again. 1090 */ 1091 signaled_count = 0; 1092 for (i = 0; i < count; ++i) { 1093 #ifdef __NetBSD__ 1094 entries[i].lock = &lock; 1095 entries[i].cv = &cv; 1096 entries[i].signalledp = &signalled; 1097 #else 1098 entries[i].task = current; 1099 #endif 1100 entries[i].point = points[i]; 1101 fence = drm_syncobj_fence_get(syncobjs[i]); 1102 if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) { 1103 dma_fence_put(fence); 1104 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 1105 continue; 1106 } else { 1107 timeout = -EINVAL; 1108 goto cleanup_entries; 1109 } 1110 } 1111 1112 if (fence) 1113 entries[i].fence = fence; 1114 else 1115 entries[i].fence = dma_fence_get_stub(); 1116 1117 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) || 1118 dma_fence_is_signaled(entries[i].fence)) { 1119 if (signaled_count == 0 && idx) 1120 *idx = i; 1121 signaled_count++; 1122 } 1123 } 1124 1125 if (signaled_count == count || 1126 (signaled_count > 0 && 1127 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL))) 1128 goto cleanup_entries; 1129 1130 /* There's a very annoying laxness in the dma_fence API here, in 1131 * that backends are not required to automatically report when a 1132 * fence is signaled prior to fence->ops->enable_signaling() being 1133 * called. So here if we fail to match signaled_count, we need to 1134 * fallthough and try a 0 timeout wait! 1135 */ 1136 1137 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 1138 for (i = 0; i < count; ++i) 1139 drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); 1140 } 1141 1142 do { 1143 #ifndef __NetBSD__ 1144 set_current_state(TASK_INTERRUPTIBLE); 1145 #endif 1146 1147 signaled_count = 0; 1148 for (i = 0; i < count; ++i) { 1149 fence = entries[i].fence; 1150 if (!fence) 1151 continue; 1152 1153 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) || 1154 dma_fence_is_signaled(fence) || 1155 (!entries[i].fence_cb.func && 1156 dma_fence_add_callback(fence, 1157 &entries[i].fence_cb, 1158 syncobj_wait_fence_func))) { 1159 /* The fence has been signaled */ 1160 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) { 1161 signaled_count++; 1162 } else { 1163 if (idx) 1164 *idx = i; 1165 goto done_waiting; 1166 } 1167 } 1168 } 1169 1170 if (signaled_count == count) 1171 goto done_waiting; 1172 1173 if (timeout == 0) { 1174 timeout = -ETIME; 1175 goto done_waiting; 1176 } 1177 1178 #ifdef __NetBSD__ 1179 mutex_spin_enter(&lock); 1180 if (signalled) { 1181 ret = 0; 1182 } else { 1183 unsigned start, end; 1184 1185 start = getticks(); 1186 /* XXX errno NetBSD->Linux */ 1187 ret = -cv_timedwait_sig(&cv, &lock, 1188 MIN(timeout, INT_MAX/2)); 1189 end = getticks(); 1190 timeout -= MIN(timeout, end - start); 1191 } 1192 mutex_spin_exit(&lock); 1193 KASSERTMSG((ret == 0 || ret == -EINTR || ret == -ERESTART || 1194 ret == -EWOULDBLOCK), "ret=%d", ret); 1195 if (ret == -EINTR || ret == -ERESTART) { 1196 timeout = -ERESTARTSYS; 1197 goto done_waiting; 1198 } else if (ret == -EWOULDBLOCK) { 1199 /* Poll fences once more, then exit. */ 1200 timeout = 0; 1201 } else { 1202 KASSERT(ret == 0); 1203 } 1204 #else 1205 if (signal_pending(current)) { 1206 timeout = -ERESTARTSYS; 1207 goto done_waiting; 1208 } 1209 1210 timeout = schedule_timeout(timeout); 1211 #endif 1212 } while (1); 1213 1214 done_waiting: 1215 #ifndef __NetBSD__ 1216 __set_current_state(TASK_RUNNING); 1217 #endif 1218 1219 cleanup_entries: 1220 for (i = 0; i < count; ++i) { 1221 drm_syncobj_remove_wait(syncobjs[i], &entries[i]); 1222 if (entries[i].fence_cb.func) 1223 dma_fence_remove_callback(entries[i].fence, 1224 &entries[i].fence_cb); 1225 dma_fence_put(entries[i].fence); 1226 } 1227 kfree(entries); 1228 1229 err_free_points: 1230 kfree(points); 1231 #ifdef __NetBSD__ 1232 cv_destroy(&cv); 1233 mutex_destroy(&lock); 1234 #endif 1235 1236 return timeout; 1237 } 1238 1239 /** 1240 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value 1241 * 1242 * @timeout_nsec: timeout nsec component in ns, 0 for poll 1243 * 1244 * Calculate the timeout in jiffies from an absolute time in sec/nsec. 1245 */ 1246 signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) 1247 { 1248 ktime_t abs_timeout, now; 1249 u64 timeout_ns, timeout_jiffies64; 1250 1251 /* make 0 timeout means poll - absolute 0 doesn't seem valid */ 1252 if (timeout_nsec == 0) 1253 return 0; 1254 1255 abs_timeout = ns_to_ktime(timeout_nsec); 1256 now = ktime_get(); 1257 1258 if (!ktime_after(abs_timeout, now)) 1259 return 0; 1260 1261 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now)); 1262 1263 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns); 1264 /* clamp timeout to avoid infinite timeout */ 1265 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1) 1266 return MAX_SCHEDULE_TIMEOUT - 1; 1267 1268 return timeout_jiffies64 + 1; 1269 } 1270 EXPORT_SYMBOL(drm_timeout_abs_to_jiffies); 1271 1272 static int drm_syncobj_array_wait(struct drm_device *dev, 1273 struct drm_file *file_private, 1274 struct drm_syncobj_wait *wait, 1275 struct drm_syncobj_timeline_wait *timeline_wait, 1276 struct drm_syncobj **syncobjs, bool timeline) 1277 { 1278 signed long timeout = 0; 1279 uint32_t first = ~0; 1280 1281 if (!timeline) { 1282 timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); 1283 timeout = drm_syncobj_array_wait_timeout(syncobjs, 1284 NULL, 1285 wait->count_handles, 1286 wait->flags, 1287 timeout, &first); 1288 if (timeout < 0) 1289 return timeout; 1290 wait->first_signaled = first; 1291 } else { 1292 timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec); 1293 timeout = drm_syncobj_array_wait_timeout(syncobjs, 1294 u64_to_user_ptr(timeline_wait->points), 1295 timeline_wait->count_handles, 1296 timeline_wait->flags, 1297 timeout, &first); 1298 if (timeout < 0) 1299 return timeout; 1300 timeline_wait->first_signaled = first; 1301 } 1302 return 0; 1303 } 1304 1305 static int drm_syncobj_array_find(struct drm_file *file_private, 1306 void __user *user_handles, 1307 uint32_t count_handles, 1308 struct drm_syncobj ***syncobjs_out) 1309 { 1310 uint32_t i, *handles; 1311 struct drm_syncobj **syncobjs; 1312 int ret; 1313 1314 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL); 1315 if (handles == NULL) 1316 return -ENOMEM; 1317 1318 if (copy_from_user(handles, user_handles, 1319 sizeof(uint32_t) * count_handles)) { 1320 ret = -EFAULT; 1321 goto err_free_handles; 1322 } 1323 1324 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL); 1325 if (syncobjs == NULL) { 1326 ret = -ENOMEM; 1327 goto err_free_handles; 1328 } 1329 1330 for (i = 0; i < count_handles; i++) { 1331 syncobjs[i] = drm_syncobj_find(file_private, handles[i]); 1332 if (!syncobjs[i]) { 1333 ret = -ENOENT; 1334 goto err_put_syncobjs; 1335 } 1336 } 1337 1338 kfree(handles); 1339 *syncobjs_out = syncobjs; 1340 return 0; 1341 1342 err_put_syncobjs: 1343 while (i-- > 0) 1344 drm_syncobj_put(syncobjs[i]); 1345 kfree(syncobjs); 1346 err_free_handles: 1347 kfree(handles); 1348 1349 return ret; 1350 } 1351 1352 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs, 1353 uint32_t count) 1354 { 1355 uint32_t i; 1356 for (i = 0; i < count; i++) 1357 drm_syncobj_put(syncobjs[i]); 1358 kfree(syncobjs); 1359 } 1360 1361 int 1362 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, 1363 struct drm_file *file_private) 1364 { 1365 struct drm_syncobj_wait *args = data; 1366 struct drm_syncobj **syncobjs; 1367 int ret = 0; 1368 1369 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 1370 return -EOPNOTSUPP; 1371 1372 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | 1373 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) 1374 return -EINVAL; 1375 1376 if (args->count_handles == 0) 1377 return -EINVAL; 1378 1379 ret = drm_syncobj_array_find(file_private, 1380 u64_to_user_ptr(args->handles), 1381 args->count_handles, 1382 &syncobjs); 1383 if (ret < 0) 1384 return ret; 1385 1386 ret = drm_syncobj_array_wait(dev, file_private, 1387 args, NULL, syncobjs, false); 1388 1389 drm_syncobj_array_free(syncobjs, args->count_handles); 1390 1391 return ret; 1392 } 1393 1394 int 1395 drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, 1396 struct drm_file *file_private) 1397 { 1398 struct drm_syncobj_timeline_wait *args = data; 1399 struct drm_syncobj **syncobjs; 1400 int ret = 0; 1401 1402 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 1403 return -EOPNOTSUPP; 1404 1405 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | 1406 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | 1407 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) 1408 return -EINVAL; 1409 1410 if (args->count_handles == 0) 1411 return -EINVAL; 1412 1413 ret = drm_syncobj_array_find(file_private, 1414 u64_to_user_ptr(args->handles), 1415 args->count_handles, 1416 &syncobjs); 1417 if (ret < 0) 1418 return ret; 1419 1420 ret = drm_syncobj_array_wait(dev, file_private, 1421 NULL, args, syncobjs, true); 1422 1423 drm_syncobj_array_free(syncobjs, args->count_handles); 1424 1425 return ret; 1426 } 1427 1428 1429 int 1430 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, 1431 struct drm_file *file_private) 1432 { 1433 struct drm_syncobj_array *args = data; 1434 struct drm_syncobj **syncobjs; 1435 uint32_t i; 1436 int ret; 1437 1438 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 1439 return -EOPNOTSUPP; 1440 1441 if (args->pad != 0) 1442 return -EINVAL; 1443 1444 if (args->count_handles == 0) 1445 return -EINVAL; 1446 1447 ret = drm_syncobj_array_find(file_private, 1448 u64_to_user_ptr(args->handles), 1449 args->count_handles, 1450 &syncobjs); 1451 if (ret < 0) 1452 return ret; 1453 1454 for (i = 0; i < args->count_handles; i++) 1455 drm_syncobj_replace_fence(syncobjs[i], NULL); 1456 1457 drm_syncobj_array_free(syncobjs, args->count_handles); 1458 1459 return 0; 1460 } 1461 1462 int 1463 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, 1464 struct drm_file *file_private) 1465 { 1466 struct drm_syncobj_array *args = data; 1467 struct drm_syncobj **syncobjs; 1468 uint32_t i; 1469 int ret; 1470 1471 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 1472 return -EOPNOTSUPP; 1473 1474 if (args->pad != 0) 1475 return -EINVAL; 1476 1477 if (args->count_handles == 0) 1478 return -EINVAL; 1479 1480 ret = drm_syncobj_array_find(file_private, 1481 u64_to_user_ptr(args->handles), 1482 args->count_handles, 1483 &syncobjs); 1484 if (ret < 0) 1485 return ret; 1486 1487 for (i = 0; i < args->count_handles; i++) 1488 drm_syncobj_assign_null_handle(syncobjs[i]); 1489 1490 drm_syncobj_array_free(syncobjs, args->count_handles); 1491 1492 return ret; 1493 } 1494 1495 int 1496 drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data, 1497 struct drm_file *file_private) 1498 { 1499 struct drm_syncobj_timeline_array *args = data; 1500 struct drm_syncobj **syncobjs; 1501 struct dma_fence_chain **chains; 1502 uint64_t *points; 1503 uint32_t i, j; 1504 int ret; 1505 1506 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 1507 return -EOPNOTSUPP; 1508 1509 if (args->flags != 0) 1510 return -EINVAL; 1511 1512 if (args->count_handles == 0) 1513 return -EINVAL; 1514 1515 ret = drm_syncobj_array_find(file_private, 1516 u64_to_user_ptr(args->handles), 1517 args->count_handles, 1518 &syncobjs); 1519 if (ret < 0) 1520 return ret; 1521 1522 points = kmalloc_array(args->count_handles, sizeof(*points), 1523 GFP_KERNEL); 1524 if (!points) { 1525 ret = -ENOMEM; 1526 goto out; 1527 } 1528 if (!u64_to_user_ptr(args->points)) { 1529 memset(points, 0, args->count_handles * sizeof(uint64_t)); 1530 } else if (copy_from_user(points, u64_to_user_ptr(args->points), 1531 sizeof(uint64_t) * args->count_handles)) { 1532 ret = -EFAULT; 1533 goto err_points; 1534 } 1535 1536 chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL); 1537 if (!chains) { 1538 ret = -ENOMEM; 1539 goto err_points; 1540 } 1541 for (i = 0; i < args->count_handles; i++) { 1542 chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); 1543 if (!chains[i]) { 1544 for (j = 0; j < i; j++) 1545 kfree(chains[j]); 1546 ret = -ENOMEM; 1547 goto err_chains; 1548 } 1549 } 1550 1551 for (i = 0; i < args->count_handles; i++) { 1552 struct dma_fence *fence = dma_fence_get_stub(); 1553 1554 drm_syncobj_add_point(syncobjs[i], chains[i], 1555 fence, points[i]); 1556 dma_fence_put(fence); 1557 } 1558 err_chains: 1559 kfree(chains); 1560 err_points: 1561 kfree(points); 1562 out: 1563 drm_syncobj_array_free(syncobjs, args->count_handles); 1564 1565 return ret; 1566 } 1567 1568 int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, 1569 struct drm_file *file_private) 1570 { 1571 struct drm_syncobj_timeline_array *args = data; 1572 struct drm_syncobj **syncobjs; 1573 uint64_t __user *points = u64_to_user_ptr(args->points); 1574 uint32_t i; 1575 int ret; 1576 1577 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) 1578 return -EOPNOTSUPP; 1579 1580 if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) 1581 return -EINVAL; 1582 1583 if (args->count_handles == 0) 1584 return -EINVAL; 1585 1586 ret = drm_syncobj_array_find(file_private, 1587 u64_to_user_ptr(args->handles), 1588 args->count_handles, 1589 &syncobjs); 1590 if (ret < 0) 1591 return ret; 1592 1593 for (i = 0; i < args->count_handles; i++) { 1594 struct dma_fence_chain *chain; 1595 struct dma_fence *fence; 1596 uint64_t point; 1597 1598 fence = drm_syncobj_fence_get(syncobjs[i]); 1599 chain = to_dma_fence_chain(fence); 1600 if (chain) { 1601 struct dma_fence *iter, *last_signaled = 1602 dma_fence_get(fence); 1603 1604 if (args->flags & 1605 DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) { 1606 point = fence->seqno; 1607 } else { 1608 dma_fence_chain_for_each(iter, fence) { 1609 if (iter->context != fence->context) { 1610 dma_fence_put(iter); 1611 /* It is most likely that timeline has 1612 * unorder points. */ 1613 break; 1614 } 1615 dma_fence_put(last_signaled); 1616 last_signaled = dma_fence_get(iter); 1617 } 1618 point = dma_fence_is_signaled(last_signaled) ? 1619 last_signaled->seqno : 1620 to_dma_fence_chain(last_signaled)->prev_seqno; 1621 } 1622 dma_fence_put(last_signaled); 1623 } else { 1624 point = 0; 1625 } 1626 dma_fence_put(fence); 1627 ret = copy_to_user(&points[i], &point, sizeof(uint64_t)); 1628 ret = ret ? -EFAULT : 0; 1629 if (ret) 1630 break; 1631 } 1632 drm_syncobj_array_free(syncobjs, args->count_handles); 1633 1634 return ret; 1635 } 1636