1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/file.h> 34 #include <linux/module.h> 35 #include <linux/mman.h> 36 #include <linux/pagemap.h> 37 #include <linux/shmem_fs.h> 38 #include <linux/dma-buf.h> 39 #include <linux/dma-buf-map.h> 40 #include <linux/mem_encrypt.h> 41 #include <linux/pagevec.h> 42 43 #include <drm/drm.h> 44 #include <drm/drm_device.h> 45 #include <drm/drm_drv.h> 46 #include <drm/drm_file.h> 47 #include <drm/drm_gem.h> 48 #include <drm/drm_managed.h> 49 #include <drm/drm_print.h> 50 #include <drm/drm_vma_manager.h> 51 52 #include "drm_internal.h" 53 54 #include <sys/conf.h> 55 #include <uvm/uvm.h> 56 57 void drm_unref(struct uvm_object *); 58 void drm_ref(struct uvm_object *); 59 boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int); 60 int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int, 61 vm_fault_t, vm_prot_t, int); 62 63 const struct uvm_pagerops drm_pgops = { 64 .pgo_reference = drm_ref, 65 .pgo_detach = drm_unref, 66 .pgo_fault = drm_fault, 67 .pgo_flush = drm_flush, 68 }; 69 70 void 71 drm_ref(struct uvm_object *uobj) 72 { 73 struct drm_gem_object *obj = 74 container_of(uobj, struct drm_gem_object, uobj); 75 76 drm_gem_object_get(obj); 77 } 78 79 void 80 drm_unref(struct uvm_object *uobj) 81 { 82 struct drm_gem_object *obj = 83 container_of(uobj, struct drm_gem_object, uobj); 84 85 drm_gem_object_put(obj); 86 } 87 88 int 89 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, 90 int npages, int centeridx, vm_fault_t fault_type, 91 vm_prot_t access_type, int flags) 92 { 93 struct vm_map_entry *entry = ufi->entry; 94 struct uvm_object *uobj = entry->object.uvm_obj; 95 struct drm_gem_object *obj = 96 container_of(uobj, struct drm_gem_object, uobj); 97 struct drm_device *dev = obj->dev; 98 int ret; 99 100 /* 101 * we do not allow device mappings to be mapped copy-on-write 102 * so we kill any attempt to do so here. 103 */ 104 105 if (UVM_ET_ISCOPYONWRITE(entry)) { 106 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); 107 return(VM_PAGER_ERROR); 108 } 109 110 /* 111 * We could end up here as the result of a copyin(9) or 112 * copyout(9) while handling an ioctl. So we must be careful 113 * not to deadlock. Therefore we only block if the quiesce 114 * count is zero, which guarantees we didn't enter from within 115 * an ioctl code path. 116 */ 117 mtx_enter(&dev->quiesce_mtx); 118 if (dev->quiesce && dev->quiesce_count == 0) { 119 mtx_leave(&dev->quiesce_mtx); 120 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); 121 mtx_enter(&dev->quiesce_mtx); 122 while (dev->quiesce) { 123 msleep_nsec(&dev->quiesce, &dev->quiesce_mtx, 124 PZERO, "drmflt", INFSLP); 125 } 126 mtx_leave(&dev->quiesce_mtx); 127 return(VM_PAGER_REFAULT); 128 } 129 dev->quiesce_count++; 130 mtx_leave(&dev->quiesce_mtx); 131 132 /* Call down into driver to do the magic */ 133 ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr - 134 entry->start), vaddr, pps, npages, centeridx, 135 access_type, flags); 136 137 mtx_enter(&dev->quiesce_mtx); 138 dev->quiesce_count--; 139 if (dev->quiesce) 140 wakeup(&dev->quiesce_count); 141 mtx_leave(&dev->quiesce_mtx); 142 143 return (ret); 144 } 145 146 boolean_t 147 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) 148 { 149 return (TRUE); 150 } 151 152 struct uvm_object * 153 udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size) 154 { 155 struct drm_device *dev = drm_get_device_from_kdev(device); 156 struct drm_gem_object *obj = NULL; 157 struct drm_vma_offset_node *node; 158 struct drm_file *priv; 159 struct file *filp; 160 161 if (cdevsw[major(device)].d_mmap != drmmmap) 162 return NULL; 163 164 if (dev == NULL) 165 return NULL; 166 167 mutex_lock(&dev->filelist_mutex); 168 priv = drm_find_file_by_minor(dev, minor(device)); 169 if (priv == NULL) { 170 mutex_unlock(&dev->filelist_mutex); 171 return NULL; 172 } 173 filp = priv->filp; 174 mutex_unlock(&dev->filelist_mutex); 175 176 if (dev->driver->mmap) 177 return dev->driver->mmap(filp, accessprot, off, size); 178 179 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 180 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 181 off >> PAGE_SHIFT, 182 atop(round_page(size))); 183 if (likely(node)) { 184 obj = container_of(node, struct drm_gem_object, vma_node); 185 /* 186 * When the object is being freed, after it hits 0-refcnt it 187 * proceeds to tear down the object. In the process it will 188 * attempt to remove the VMA offset and so acquire this 189 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 190 * that matches our range, we know it is in the process of being 191 * destroyed and will be freed as soon as we release the lock - 192 * so we have to check for the 0-refcnted object and treat it as 193 * invalid. 194 */ 195 if (!kref_get_unless_zero(&obj->refcount)) 196 obj = NULL; 197 } 198 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 199 200 if (!obj) 201 return NULL; 202 203 if (!drm_vma_node_is_allowed(node, priv)) { 204 drm_gem_object_put(obj); 205 return NULL; 206 } 207 208 return &obj->uobj; 209 } 210 211 /** @file drm_gem.c 212 * 213 * This file provides some of the base ioctls and library routines for 214 * the graphics memory manager implemented by each device driver. 215 * 216 * Because various devices have different requirements in terms of 217 * synchronization and migration strategies, implementing that is left up to 218 * the driver, and all that the general API provides should be generic -- 219 * allocating objects, reading/writing data with the cpu, freeing objects. 220 * Even there, platform-dependent optimizations for reading/writing data with 221 * the CPU mean we'll likely hook those out to driver-specific calls. However, 222 * the DRI2 implementation wants to have at least allocate/mmap be generic. 223 * 224 * The goal was to have swap-backed object allocation managed through 225 * struct file. However, file descriptors as handles to a struct file have 226 * two major failings: 227 * - Process limits prevent more than 1024 or so being used at a time by 228 * default. 229 * - Inability to allocate high fds will aggravate the X Server's select() 230 * handling, and likely that of many GL client applications as well. 231 * 232 * This led to a plan of using our own integer IDs (called handles, following 233 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 234 * ioctls. The objects themselves will still include the struct file so 235 * that we can transition to fds if the required kernel infrastructure shows 236 * up at a later date, and as our interface with shmfs for memory allocation. 237 */ 238 239 static void 240 drm_gem_init_release(struct drm_device *dev, void *ptr) 241 { 242 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 243 } 244 245 /** 246 * drm_gem_init - Initialize the GEM device fields 247 * @dev: drm_devic structure to initialize 248 */ 249 int 250 drm_gem_init(struct drm_device *dev) 251 { 252 struct drm_vma_offset_manager *vma_offset_manager; 253 254 rw_init(&dev->object_name_lock, "drmonl"); 255 idr_init_base(&dev->object_name_idr, 1); 256 257 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 258 GFP_KERNEL); 259 if (!vma_offset_manager) { 260 DRM_ERROR("out of memory\n"); 261 return -ENOMEM; 262 } 263 264 dev->vma_offset_manager = vma_offset_manager; 265 drm_vma_offset_manager_init(vma_offset_manager, 266 DRM_FILE_PAGE_OFFSET_START, 267 DRM_FILE_PAGE_OFFSET_SIZE); 268 269 return drmm_add_action(dev, drm_gem_init_release, NULL); 270 } 271 272 #ifdef __linux__ 273 274 /** 275 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 276 * @dev: drm_device the object should be initialized for 277 * @obj: drm_gem_object to initialize 278 * @size: object size 279 * 280 * Initialize an already allocated GEM object of the specified size with 281 * shmfs backing store. 282 */ 283 int drm_gem_object_init(struct drm_device *dev, 284 struct drm_gem_object *obj, size_t size) 285 { 286 struct file *filp; 287 288 drm_gem_private_object_init(dev, obj, size); 289 290 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 291 if (IS_ERR(filp)) 292 return PTR_ERR(filp); 293 294 obj->filp = filp; 295 296 return 0; 297 } 298 EXPORT_SYMBOL(drm_gem_object_init); 299 300 #else 301 302 int drm_gem_object_init(struct drm_device *dev, 303 struct drm_gem_object *obj, size_t size) 304 { 305 drm_gem_private_object_init(dev, obj, size); 306 307 if (size > (512 * 1024 * 1024)) { 308 printf("%s size too big %lu\n", __func__, size); 309 return -ENOMEM; 310 } 311 312 obj->uao = uao_create(size, 0); 313 uvm_obj_init(&obj->uobj, &drm_pgops, 1); 314 315 return 0; 316 } 317 318 #endif 319 320 /** 321 * drm_gem_private_object_init - initialize an allocated private GEM object 322 * @dev: drm_device the object should be initialized for 323 * @obj: drm_gem_object to initialize 324 * @size: object size 325 * 326 * Initialize an already allocated GEM object of the specified size with 327 * no GEM provided backing store. Instead the caller is responsible for 328 * backing the object and handling it. 329 */ 330 void drm_gem_private_object_init(struct drm_device *dev, 331 struct drm_gem_object *obj, size_t size) 332 { 333 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 334 335 obj->dev = dev; 336 #ifdef __linux__ 337 obj->filp = NULL; 338 #else 339 obj->uao = NULL; 340 obj->uobj.pgops = NULL; 341 #endif 342 343 kref_init(&obj->refcount); 344 obj->handle_count = 0; 345 obj->size = size; 346 dma_resv_init(&obj->_resv); 347 if (!obj->resv) 348 obj->resv = &obj->_resv; 349 350 drm_vma_node_reset(&obj->vma_node); 351 } 352 EXPORT_SYMBOL(drm_gem_private_object_init); 353 354 static void 355 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 356 { 357 /* 358 * Note: obj->dma_buf can't disappear as long as we still hold a 359 * handle reference in obj->handle_count. 360 */ 361 mutex_lock(&filp->prime.lock); 362 if (obj->dma_buf) { 363 drm_prime_remove_buf_handle_locked(&filp->prime, 364 obj->dma_buf); 365 } 366 mutex_unlock(&filp->prime.lock); 367 } 368 369 /** 370 * drm_gem_object_handle_free - release resources bound to userspace handles 371 * @obj: GEM object to clean up. 372 * 373 * Called after the last handle to the object has been closed 374 * 375 * Removes any name for the object. Note that this must be 376 * called before drm_gem_object_free or we'll be touching 377 * freed memory 378 */ 379 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 380 { 381 struct drm_device *dev = obj->dev; 382 383 /* Remove any name for this object */ 384 if (obj->name) { 385 idr_remove(&dev->object_name_idr, obj->name); 386 obj->name = 0; 387 } 388 } 389 390 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 391 { 392 /* Unbreak the reference cycle if we have an exported dma_buf. */ 393 if (obj->dma_buf) { 394 dma_buf_put(obj->dma_buf); 395 obj->dma_buf = NULL; 396 } 397 } 398 399 static void 400 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 401 { 402 struct drm_device *dev = obj->dev; 403 bool final = false; 404 405 if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) 406 return; 407 408 /* 409 * Must bump handle count first as this may be the last 410 * ref, in which case the object would disappear before we 411 * checked for a name 412 */ 413 414 mutex_lock(&dev->object_name_lock); 415 if (--obj->handle_count == 0) { 416 drm_gem_object_handle_free(obj); 417 drm_gem_object_exported_dma_buf_free(obj); 418 final = true; 419 } 420 mutex_unlock(&dev->object_name_lock); 421 422 if (final) 423 drm_gem_object_put(obj); 424 } 425 426 /* 427 * Called at device or object close to release the file's 428 * handle references on objects. 429 */ 430 static int 431 drm_gem_object_release_handle(int id, void *ptr, void *data) 432 { 433 struct drm_file *file_priv = data; 434 struct drm_gem_object *obj = ptr; 435 436 if (obj->funcs->close) 437 obj->funcs->close(obj, file_priv); 438 439 drm_gem_remove_prime_handles(obj, file_priv); 440 drm_vma_node_revoke(&obj->vma_node, file_priv); 441 442 drm_gem_object_handle_put_unlocked(obj); 443 444 return 0; 445 } 446 447 /** 448 * drm_gem_handle_delete - deletes the given file-private handle 449 * @filp: drm file-private structure to use for the handle look up 450 * @handle: userspace handle to delete 451 * 452 * Removes the GEM handle from the @filp lookup table which has been added with 453 * drm_gem_handle_create(). If this is the last handle also cleans up linked 454 * resources like GEM names. 455 */ 456 int 457 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 458 { 459 struct drm_gem_object *obj; 460 461 spin_lock(&filp->table_lock); 462 463 /* Check if we currently have a reference on the object */ 464 obj = idr_replace(&filp->object_idr, NULL, handle); 465 spin_unlock(&filp->table_lock); 466 if (IS_ERR_OR_NULL(obj)) 467 return -EINVAL; 468 469 /* Release driver's reference and decrement refcount. */ 470 drm_gem_object_release_handle(handle, obj, filp); 471 472 /* And finally make the handle available for future allocations. */ 473 spin_lock(&filp->table_lock); 474 idr_remove(&filp->object_idr, handle); 475 spin_unlock(&filp->table_lock); 476 477 return 0; 478 } 479 EXPORT_SYMBOL(drm_gem_handle_delete); 480 481 /** 482 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 483 * @file: drm file-private structure containing the gem object 484 * @dev: corresponding drm_device 485 * @handle: gem object handle 486 * @offset: return location for the fake mmap offset 487 * 488 * This implements the &drm_driver.dumb_map_offset kms driver callback for 489 * drivers which use gem to manage their backing storage. 490 * 491 * Returns: 492 * 0 on success or a negative error code on failure. 493 */ 494 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 495 u32 handle, u64 *offset) 496 { 497 struct drm_gem_object *obj; 498 int ret; 499 500 obj = drm_gem_object_lookup(file, handle); 501 if (!obj) 502 return -ENOENT; 503 504 /* Don't allow imported objects to be mapped */ 505 if (obj->import_attach) { 506 ret = -EINVAL; 507 goto out; 508 } 509 510 ret = drm_gem_create_mmap_offset(obj); 511 if (ret) 512 goto out; 513 514 *offset = drm_vma_node_offset_addr(&obj->vma_node); 515 out: 516 drm_gem_object_put(obj); 517 518 return ret; 519 } 520 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 521 522 int drm_gem_dumb_destroy(struct drm_file *file, 523 struct drm_device *dev, 524 u32 handle) 525 { 526 return drm_gem_handle_delete(file, handle); 527 } 528 529 /** 530 * drm_gem_handle_create_tail - internal functions to create a handle 531 * @file_priv: drm file-private structure to register the handle for 532 * @obj: object to register 533 * @handlep: pointer to return the created handle to the caller 534 * 535 * This expects the &drm_device.object_name_lock to be held already and will 536 * drop it before returning. Used to avoid races in establishing new handles 537 * when importing an object from either an flink name or a dma-buf. 538 * 539 * Handles must be release again through drm_gem_handle_delete(). This is done 540 * when userspace closes @file_priv for all attached handles, or through the 541 * GEM_CLOSE ioctl for individual handles. 542 */ 543 int 544 drm_gem_handle_create_tail(struct drm_file *file_priv, 545 struct drm_gem_object *obj, 546 u32 *handlep) 547 { 548 struct drm_device *dev = obj->dev; 549 u32 handle; 550 int ret; 551 552 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 553 if (obj->handle_count++ == 0) 554 drm_gem_object_get(obj); 555 556 /* 557 * Get the user-visible handle using idr. Preload and perform 558 * allocation under our spinlock. 559 */ 560 idr_preload(GFP_KERNEL); 561 spin_lock(&file_priv->table_lock); 562 563 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 564 565 spin_unlock(&file_priv->table_lock); 566 idr_preload_end(); 567 568 mutex_unlock(&dev->object_name_lock); 569 if (ret < 0) 570 goto err_unref; 571 572 handle = ret; 573 574 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 575 if (ret) 576 goto err_remove; 577 578 if (obj->funcs->open) { 579 ret = obj->funcs->open(obj, file_priv); 580 if (ret) 581 goto err_revoke; 582 } 583 584 *handlep = handle; 585 return 0; 586 587 err_revoke: 588 drm_vma_node_revoke(&obj->vma_node, file_priv); 589 err_remove: 590 spin_lock(&file_priv->table_lock); 591 idr_remove(&file_priv->object_idr, handle); 592 spin_unlock(&file_priv->table_lock); 593 err_unref: 594 drm_gem_object_handle_put_unlocked(obj); 595 return ret; 596 } 597 598 /** 599 * drm_gem_handle_create - create a gem handle for an object 600 * @file_priv: drm file-private structure to register the handle for 601 * @obj: object to register 602 * @handlep: pointer to return the created handle to the caller 603 * 604 * Create a handle for this object. This adds a handle reference to the object, 605 * which includes a regular reference count. Callers will likely want to 606 * dereference the object afterwards. 607 * 608 * Since this publishes @obj to userspace it must be fully set up by this point, 609 * drivers must call this last in their buffer object creation callbacks. 610 */ 611 int drm_gem_handle_create(struct drm_file *file_priv, 612 struct drm_gem_object *obj, 613 u32 *handlep) 614 { 615 mutex_lock(&obj->dev->object_name_lock); 616 617 return drm_gem_handle_create_tail(file_priv, obj, handlep); 618 } 619 EXPORT_SYMBOL(drm_gem_handle_create); 620 621 622 /** 623 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 624 * @obj: obj in question 625 * 626 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 627 * 628 * Note that drm_gem_object_release() already calls this function, so drivers 629 * don't have to take care of releasing the mmap offset themselves when freeing 630 * the GEM object. 631 */ 632 void 633 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 634 { 635 struct drm_device *dev = obj->dev; 636 637 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 638 } 639 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 640 641 /** 642 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 643 * @obj: obj in question 644 * @size: the virtual size 645 * 646 * GEM memory mapping works by handing back to userspace a fake mmap offset 647 * it can use in a subsequent mmap(2) call. The DRM core code then looks 648 * up the object based on the offset and sets up the various memory mapping 649 * structures. 650 * 651 * This routine allocates and attaches a fake offset for @obj, in cases where 652 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 653 * Otherwise just use drm_gem_create_mmap_offset(). 654 * 655 * This function is idempotent and handles an already allocated mmap offset 656 * transparently. Drivers do not need to check for this case. 657 */ 658 int 659 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 660 { 661 struct drm_device *dev = obj->dev; 662 663 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 664 size / PAGE_SIZE); 665 } 666 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 667 668 /** 669 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 670 * @obj: obj in question 671 * 672 * GEM memory mapping works by handing back to userspace a fake mmap offset 673 * it can use in a subsequent mmap(2) call. The DRM core code then looks 674 * up the object based on the offset and sets up the various memory mapping 675 * structures. 676 * 677 * This routine allocates and attaches a fake offset for @obj. 678 * 679 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 680 * the fake offset again. 681 */ 682 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 683 { 684 return drm_gem_create_mmap_offset_size(obj, obj->size); 685 } 686 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 687 688 #ifdef notyet 689 /* 690 * Move pages to appropriate lru and release the pagevec, decrementing the 691 * ref count of those pages. 692 */ 693 static void drm_gem_check_release_pagevec(struct pagevec *pvec) 694 { 695 check_move_unevictable_pages(pvec); 696 __pagevec_release(pvec); 697 cond_resched(); 698 } 699 #endif 700 701 /** 702 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 703 * from shmem 704 * @obj: obj in question 705 * 706 * This reads the page-array of the shmem-backing storage of the given gem 707 * object. An array of pages is returned. If a page is not allocated or 708 * swapped-out, this will allocate/swap-in the required pages. Note that the 709 * whole object is covered by the page-array and pinned in memory. 710 * 711 * Use drm_gem_put_pages() to release the array and unpin all pages. 712 * 713 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 714 * If you require other GFP-masks, you have to do those allocations yourself. 715 * 716 * Note that you are not allowed to change gfp-zones during runtime. That is, 717 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 718 * set during initialization. If you have special zone constraints, set them 719 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 720 * to keep pages in the required zone during swap-in. 721 * 722 * This function is only valid on objects initialized with 723 * drm_gem_object_init(), but not for those initialized with 724 * drm_gem_private_object_init() only. 725 */ 726 struct vm_page **drm_gem_get_pages(struct drm_gem_object *obj) 727 { 728 STUB(); 729 return ERR_PTR(-ENOSYS); 730 #ifdef notyet 731 struct address_space *mapping; 732 struct vm_page *p, **pages; 733 struct pagevec pvec; 734 int i, npages; 735 736 737 if (WARN_ON(!obj->filp)) 738 return ERR_PTR(-EINVAL); 739 740 /* This is the shared memory object that backs the GEM resource */ 741 mapping = obj->filp->f_mapping; 742 743 /* We already BUG_ON() for non-page-aligned sizes in 744 * drm_gem_object_init(), so we should never hit this unless 745 * driver author is doing something really wrong: 746 */ 747 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 748 749 npages = obj->size >> PAGE_SHIFT; 750 751 pages = kvmalloc_array(npages, sizeof(struct vm_page *), GFP_KERNEL); 752 if (pages == NULL) 753 return ERR_PTR(-ENOMEM); 754 755 mapping_set_unevictable(mapping); 756 757 for (i = 0; i < npages; i++) { 758 p = shmem_read_mapping_page(mapping, i); 759 if (IS_ERR(p)) 760 goto fail; 761 pages[i] = p; 762 763 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 764 * correct region during swapin. Note that this requires 765 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 766 * so shmem can relocate pages during swapin if required. 767 */ 768 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 769 (page_to_pfn(p) >= 0x00100000UL)); 770 } 771 772 return pages; 773 774 fail: 775 mapping_clear_unevictable(mapping); 776 pagevec_init(&pvec); 777 while (i--) { 778 if (!pagevec_add(&pvec, pages[i])) 779 drm_gem_check_release_pagevec(&pvec); 780 } 781 if (pagevec_count(&pvec)) 782 drm_gem_check_release_pagevec(&pvec); 783 784 kvfree(pages); 785 return ERR_CAST(p); 786 #endif 787 } 788 EXPORT_SYMBOL(drm_gem_get_pages); 789 790 /** 791 * drm_gem_put_pages - helper to free backing pages for a GEM object 792 * @obj: obj in question 793 * @pages: pages to free 794 * @dirty: if true, pages will be marked as dirty 795 * @accessed: if true, the pages will be marked as accessed 796 */ 797 void drm_gem_put_pages(struct drm_gem_object *obj, struct vm_page **pages, 798 bool dirty, bool accessed) 799 { 800 STUB(); 801 #ifdef notyet 802 int i, npages; 803 struct address_space *mapping; 804 struct pagevec pvec; 805 806 mapping = file_inode(obj->filp)->i_mapping; 807 mapping_clear_unevictable(mapping); 808 809 /* We already BUG_ON() for non-page-aligned sizes in 810 * drm_gem_object_init(), so we should never hit this unless 811 * driver author is doing something really wrong: 812 */ 813 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 814 815 npages = obj->size >> PAGE_SHIFT; 816 817 pagevec_init(&pvec); 818 for (i = 0; i < npages; i++) { 819 if (!pages[i]) 820 continue; 821 822 if (dirty) 823 set_page_dirty(pages[i]); 824 825 if (accessed) 826 mark_page_accessed(pages[i]); 827 828 /* Undo the reference we took when populating the table */ 829 if (!pagevec_add(&pvec, pages[i])) 830 drm_gem_check_release_pagevec(&pvec); 831 } 832 if (pagevec_count(&pvec)) 833 drm_gem_check_release_pagevec(&pvec); 834 835 kvfree(pages); 836 #endif 837 } 838 EXPORT_SYMBOL(drm_gem_put_pages); 839 840 static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 841 struct drm_gem_object **objs) 842 { 843 int i, ret = 0; 844 struct drm_gem_object *obj; 845 846 spin_lock(&filp->table_lock); 847 848 for (i = 0; i < count; i++) { 849 /* Check if we currently have a reference on the object */ 850 obj = idr_find(&filp->object_idr, handle[i]); 851 if (!obj) { 852 ret = -ENOENT; 853 break; 854 } 855 drm_gem_object_get(obj); 856 objs[i] = obj; 857 } 858 spin_unlock(&filp->table_lock); 859 860 return ret; 861 } 862 863 /** 864 * drm_gem_objects_lookup - look up GEM objects from an array of handles 865 * @filp: DRM file private date 866 * @bo_handles: user pointer to array of userspace handle 867 * @count: size of handle array 868 * @objs_out: returned pointer to array of drm_gem_object pointers 869 * 870 * Takes an array of userspace handles and returns a newly allocated array of 871 * GEM objects. 872 * 873 * For a single handle lookup, use drm_gem_object_lookup(). 874 * 875 * Returns: 876 * 877 * @objs filled in with GEM object pointers. Returned GEM objects need to be 878 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 879 * failure. 0 is returned on success. 880 * 881 */ 882 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 883 int count, struct drm_gem_object ***objs_out) 884 { 885 int ret; 886 u32 *handles; 887 struct drm_gem_object **objs; 888 889 if (!count) 890 return 0; 891 892 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), 893 GFP_KERNEL | __GFP_ZERO); 894 if (!objs) 895 return -ENOMEM; 896 897 *objs_out = objs; 898 899 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); 900 if (!handles) { 901 ret = -ENOMEM; 902 goto out; 903 } 904 905 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { 906 ret = -EFAULT; 907 DRM_DEBUG("Failed to copy in GEM handles\n"); 908 goto out; 909 } 910 911 ret = objects_lookup(filp, handles, count, objs); 912 out: 913 kvfree(handles); 914 return ret; 915 916 } 917 EXPORT_SYMBOL(drm_gem_objects_lookup); 918 919 /** 920 * drm_gem_object_lookup - look up a GEM object from its handle 921 * @filp: DRM file private date 922 * @handle: userspace handle 923 * 924 * Returns: 925 * 926 * A reference to the object named by the handle if such exists on @filp, NULL 927 * otherwise. 928 * 929 * If looking up an array of handles, use drm_gem_objects_lookup(). 930 */ 931 struct drm_gem_object * 932 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 933 { 934 struct drm_gem_object *obj = NULL; 935 936 objects_lookup(filp, &handle, 1, &obj); 937 return obj; 938 } 939 EXPORT_SYMBOL(drm_gem_object_lookup); 940 941 /** 942 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 943 * shared and/or exclusive fences. 944 * @filep: DRM file private date 945 * @handle: userspace handle 946 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 947 * @timeout: timeout value in jiffies or zero to return immediately 948 * 949 * Returns: 950 * 951 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 952 * greater than 0 on success. 953 */ 954 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 955 bool wait_all, unsigned long timeout) 956 { 957 long ret; 958 struct drm_gem_object *obj; 959 960 obj = drm_gem_object_lookup(filep, handle); 961 if (!obj) { 962 DRM_DEBUG("Failed to look up GEM BO %d\n", handle); 963 return -EINVAL; 964 } 965 966 ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout); 967 if (ret == 0) 968 ret = -ETIME; 969 else if (ret > 0) 970 ret = 0; 971 972 drm_gem_object_put(obj); 973 974 return ret; 975 } 976 EXPORT_SYMBOL(drm_gem_dma_resv_wait); 977 978 /** 979 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 980 * @dev: drm_device 981 * @data: ioctl data 982 * @file_priv: drm file-private structure 983 * 984 * Releases the handle to an mm object. 985 */ 986 int 987 drm_gem_close_ioctl(struct drm_device *dev, void *data, 988 struct drm_file *file_priv) 989 { 990 struct drm_gem_close *args = data; 991 int ret; 992 993 if (!drm_core_check_feature(dev, DRIVER_GEM)) 994 return -EOPNOTSUPP; 995 996 ret = drm_gem_handle_delete(file_priv, args->handle); 997 998 return ret; 999 } 1000 1001 /** 1002 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl 1003 * @dev: drm_device 1004 * @data: ioctl data 1005 * @file_priv: drm file-private structure 1006 * 1007 * Create a global name for an object, returning the name. 1008 * 1009 * Note that the name does not hold a reference; when the object 1010 * is freed, the name goes away. 1011 */ 1012 int 1013 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 1014 struct drm_file *file_priv) 1015 { 1016 struct drm_gem_flink *args = data; 1017 struct drm_gem_object *obj; 1018 int ret; 1019 1020 if (!drm_core_check_feature(dev, DRIVER_GEM)) 1021 return -EOPNOTSUPP; 1022 1023 obj = drm_gem_object_lookup(file_priv, args->handle); 1024 if (obj == NULL) 1025 return -ENOENT; 1026 1027 mutex_lock(&dev->object_name_lock); 1028 /* prevent races with concurrent gem_close. */ 1029 if (obj->handle_count == 0) { 1030 ret = -ENOENT; 1031 goto err; 1032 } 1033 1034 if (!obj->name) { 1035 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 1036 if (ret < 0) 1037 goto err; 1038 1039 obj->name = ret; 1040 } 1041 1042 args->name = (uint64_t) obj->name; 1043 ret = 0; 1044 1045 err: 1046 mutex_unlock(&dev->object_name_lock); 1047 drm_gem_object_put(obj); 1048 return ret; 1049 } 1050 1051 /** 1052 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl 1053 * @dev: drm_device 1054 * @data: ioctl data 1055 * @file_priv: drm file-private structure 1056 * 1057 * Open an object using the global name, returning a handle and the size. 1058 * 1059 * This handle (of course) holds a reference to the object, so the object 1060 * will not go away until the handle is deleted. 1061 */ 1062 int 1063 drm_gem_open_ioctl(struct drm_device *dev, void *data, 1064 struct drm_file *file_priv) 1065 { 1066 struct drm_gem_open *args = data; 1067 struct drm_gem_object *obj; 1068 int ret; 1069 u32 handle; 1070 1071 if (!drm_core_check_feature(dev, DRIVER_GEM)) 1072 return -EOPNOTSUPP; 1073 1074 mutex_lock(&dev->object_name_lock); 1075 obj = idr_find(&dev->object_name_idr, (int) args->name); 1076 if (obj) { 1077 drm_gem_object_get(obj); 1078 } else { 1079 mutex_unlock(&dev->object_name_lock); 1080 return -ENOENT; 1081 } 1082 1083 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 1084 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 1085 if (ret) 1086 goto err; 1087 1088 args->handle = handle; 1089 args->size = obj->size; 1090 1091 err: 1092 drm_gem_object_put(obj); 1093 return ret; 1094 } 1095 1096 /** 1097 * drm_gem_open - initializes GEM file-private structures at devnode open time 1098 * @dev: drm_device which is being opened by userspace 1099 * @file_private: drm file-private structure to set up 1100 * 1101 * Called at device open time, sets up the structure for handling refcounting 1102 * of mm objects. 1103 */ 1104 void 1105 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 1106 { 1107 idr_init_base(&file_private->object_idr, 1); 1108 mtx_init(&file_private->table_lock, IPL_NONE); 1109 } 1110 1111 /** 1112 * drm_gem_release - release file-private GEM resources 1113 * @dev: drm_device which is being closed by userspace 1114 * @file_private: drm file-private structure to clean up 1115 * 1116 * Called at close time when the filp is going away. 1117 * 1118 * Releases any remaining references on objects by this filp. 1119 */ 1120 void 1121 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 1122 { 1123 idr_for_each(&file_private->object_idr, 1124 &drm_gem_object_release_handle, file_private); 1125 idr_destroy(&file_private->object_idr); 1126 } 1127 1128 /** 1129 * drm_gem_object_release - release GEM buffer object resources 1130 * @obj: GEM buffer object 1131 * 1132 * This releases any structures and resources used by @obj and is the inverse of 1133 * drm_gem_object_init(). 1134 */ 1135 void 1136 drm_gem_object_release(struct drm_gem_object *obj) 1137 { 1138 WARN_ON(obj->dma_buf); 1139 1140 #ifdef __linux__ 1141 if (obj->filp) 1142 fput(obj->filp); 1143 #else 1144 if (obj->uao) 1145 uao_detach(obj->uao); 1146 if (obj->uobj.pgops) 1147 uvm_obj_destroy(&obj->uobj); 1148 #endif 1149 1150 dma_resv_fini(&obj->_resv); 1151 drm_gem_free_mmap_offset(obj); 1152 } 1153 EXPORT_SYMBOL(drm_gem_object_release); 1154 1155 /** 1156 * drm_gem_object_free - free a GEM object 1157 * @kref: kref of the object to free 1158 * 1159 * Called after the last reference to the object has been lost. 1160 * 1161 * Frees the object 1162 */ 1163 void 1164 drm_gem_object_free(struct kref *kref) 1165 { 1166 struct drm_gem_object *obj = 1167 container_of(kref, struct drm_gem_object, refcount); 1168 1169 if (WARN_ON(!obj->funcs->free)) 1170 return; 1171 1172 obj->funcs->free(obj); 1173 } 1174 EXPORT_SYMBOL(drm_gem_object_free); 1175 1176 #ifdef __linux__ 1177 /** 1178 * drm_gem_vm_open - vma->ops->open implementation for GEM 1179 * @vma: VM area structure 1180 * 1181 * This function implements the #vm_operations_struct open() callback for GEM 1182 * drivers. This must be used together with drm_gem_vm_close(). 1183 */ 1184 void drm_gem_vm_open(struct vm_area_struct *vma) 1185 { 1186 struct drm_gem_object *obj = vma->vm_private_data; 1187 1188 drm_gem_object_get(obj); 1189 } 1190 EXPORT_SYMBOL(drm_gem_vm_open); 1191 1192 /** 1193 * drm_gem_vm_close - vma->ops->close implementation for GEM 1194 * @vma: VM area structure 1195 * 1196 * This function implements the #vm_operations_struct close() callback for GEM 1197 * drivers. This must be used together with drm_gem_vm_open(). 1198 */ 1199 void drm_gem_vm_close(struct vm_area_struct *vma) 1200 { 1201 struct drm_gem_object *obj = vma->vm_private_data; 1202 1203 drm_gem_object_put(obj); 1204 } 1205 EXPORT_SYMBOL(drm_gem_vm_close); 1206 1207 /** 1208 * drm_gem_mmap_obj - memory map a GEM object 1209 * @obj: the GEM object to map 1210 * @obj_size: the object size to be mapped, in bytes 1211 * @vma: VMA for the area to be mapped 1212 * 1213 * Set up the VMA to prepare mapping of the GEM object using the GEM object's 1214 * vm_ops. Depending on their requirements, GEM objects can either 1215 * provide a fault handler in their vm_ops (in which case any accesses to 1216 * the object will be trapped, to perform migration, GTT binding, surface 1217 * register allocation, or performance monitoring), or mmap the buffer memory 1218 * synchronously after calling drm_gem_mmap_obj. 1219 * 1220 * This function is mainly intended to implement the DMABUF mmap operation, when 1221 * the GEM object is not looked up based on its fake offset. To implement the 1222 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1223 * 1224 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1225 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1226 * callers must verify access restrictions before calling this helper. 1227 * 1228 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1229 * size, or if no vm_ops are provided. 1230 */ 1231 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1232 struct vm_area_struct *vma) 1233 { 1234 int ret; 1235 1236 /* Check for valid size. */ 1237 if (obj_size < vma->vm_end - vma->vm_start) 1238 return -EINVAL; 1239 1240 /* Take a ref for this mapping of the object, so that the fault 1241 * handler can dereference the mmap offset's pointer to the object. 1242 * This reference is cleaned up by the corresponding vm_close 1243 * (which should happen whether the vma was created by this call, or 1244 * by a vm_open due to mremap or partial unmap or whatever). 1245 */ 1246 drm_gem_object_get(obj); 1247 1248 vma->vm_private_data = obj; 1249 vma->vm_ops = obj->funcs->vm_ops; 1250 1251 if (obj->funcs->mmap) { 1252 ret = obj->funcs->mmap(obj, vma); 1253 if (ret) 1254 goto err_drm_gem_object_put; 1255 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1256 } else { 1257 if (!vma->vm_ops) { 1258 ret = -EINVAL; 1259 goto err_drm_gem_object_put; 1260 } 1261 1262 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 1263 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1264 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1265 } 1266 1267 return 0; 1268 1269 err_drm_gem_object_put: 1270 drm_gem_object_put(obj); 1271 return ret; 1272 } 1273 EXPORT_SYMBOL(drm_gem_mmap_obj); 1274 1275 /** 1276 * drm_gem_mmap - memory map routine for GEM objects 1277 * @filp: DRM file pointer 1278 * @vma: VMA for the area to be mapped 1279 * 1280 * If a driver supports GEM object mapping, mmap calls on the DRM file 1281 * descriptor will end up here. 1282 * 1283 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1284 * contain the fake offset we created when the GTT map ioctl was called on 1285 * the object) and map it with a call to drm_gem_mmap_obj(). 1286 * 1287 * If the caller is not granted access to the buffer object, the mmap will fail 1288 * with EACCES. Please see the vma manager for more information. 1289 */ 1290 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1291 { 1292 struct drm_file *priv = filp->private_data; 1293 struct drm_device *dev = priv->minor->dev; 1294 struct drm_gem_object *obj = NULL; 1295 struct drm_vma_offset_node *node; 1296 int ret; 1297 1298 if (drm_dev_is_unplugged(dev)) 1299 return -ENODEV; 1300 1301 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1302 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1303 vma->vm_pgoff, 1304 vma_pages(vma)); 1305 if (likely(node)) { 1306 obj = container_of(node, struct drm_gem_object, vma_node); 1307 /* 1308 * When the object is being freed, after it hits 0-refcnt it 1309 * proceeds to tear down the object. In the process it will 1310 * attempt to remove the VMA offset and so acquire this 1311 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1312 * that matches our range, we know it is in the process of being 1313 * destroyed and will be freed as soon as we release the lock - 1314 * so we have to check for the 0-refcnted object and treat it as 1315 * invalid. 1316 */ 1317 if (!kref_get_unless_zero(&obj->refcount)) 1318 obj = NULL; 1319 } 1320 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1321 1322 if (!obj) 1323 return -EINVAL; 1324 1325 if (!drm_vma_node_is_allowed(node, priv)) { 1326 drm_gem_object_put(obj); 1327 return -EACCES; 1328 } 1329 1330 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1331 vma); 1332 1333 drm_gem_object_put(obj); 1334 1335 return ret; 1336 } 1337 EXPORT_SYMBOL(drm_gem_mmap); 1338 #else /* ! __linux__ */ 1339 1340 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1341 vm_prot_t accessprot, voff_t off, vsize_t size) 1342 { 1343 int ret; 1344 1345 /* Check for valid size. */ 1346 if (obj_size < size) 1347 return -EINVAL; 1348 1349 /* Take a ref for this mapping of the object, so that the fault 1350 * handler can dereference the mmap offset's pointer to the object. 1351 * This reference is cleaned up by the corresponding vm_close 1352 * (which should happen whether the vma was created by this call, or 1353 * by a vm_open due to mremap or partial unmap or whatever). 1354 */ 1355 drm_gem_object_get(obj); 1356 1357 #ifdef __linux__ 1358 vma->vm_private_data = obj; 1359 vma->vm_ops = obj->funcs->vm_ops; 1360 #else 1361 if (obj->uobj.pgops == NULL) 1362 uvm_obj_init(&obj->uobj, obj->funcs->vm_ops, 1); 1363 #endif 1364 1365 if (obj->funcs->mmap) { 1366 ret = obj->funcs->mmap(obj, accessprot, off, size); 1367 if (ret) 1368 goto err_drm_gem_object_put; 1369 #ifdef notyet 1370 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1371 #endif 1372 } else { 1373 #ifdef notyet 1374 if (!vma->vm_ops) { 1375 ret = -EINVAL; 1376 goto err_drm_gem_object_put; 1377 } 1378 1379 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 1380 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1381 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1382 #else 1383 ret = -EINVAL; 1384 goto err_drm_gem_object_put; 1385 #endif 1386 } 1387 1388 return 0; 1389 1390 err_drm_gem_object_put: 1391 drm_gem_object_put(obj); 1392 return ret; 1393 } 1394 1395 struct uvm_object * 1396 drm_gem_mmap(struct file *filp, vm_prot_t accessprot, voff_t off, 1397 vsize_t size) 1398 { 1399 struct drm_file *priv = (void *)filp; 1400 struct drm_device *dev = priv->minor->dev; 1401 struct drm_gem_object *obj = NULL; 1402 struct drm_vma_offset_node *node; 1403 int ret; 1404 1405 if (drm_dev_is_unplugged(dev)) 1406 return NULL; 1407 1408 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1409 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1410 off >> PAGE_SHIFT, 1411 atop(round_page(size))); 1412 if (likely(node)) { 1413 obj = container_of(node, struct drm_gem_object, vma_node); 1414 /* 1415 * When the object is being freed, after it hits 0-refcnt it 1416 * proceeds to tear down the object. In the process it will 1417 * attempt to remove the VMA offset and so acquire this 1418 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1419 * that matches our range, we know it is in the process of being 1420 * destroyed and will be freed as soon as we release the lock - 1421 * so we have to check for the 0-refcnted object and treat it as 1422 * invalid. 1423 */ 1424 if (!kref_get_unless_zero(&obj->refcount)) 1425 obj = NULL; 1426 } 1427 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1428 1429 if (!obj) 1430 return NULL; 1431 1432 if (!drm_vma_node_is_allowed(node, priv)) { 1433 drm_gem_object_put(obj); 1434 return NULL; 1435 } 1436 1437 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1438 accessprot, off, size); 1439 1440 drm_gem_object_put(obj); 1441 1442 return &obj->uobj; 1443 } 1444 1445 #endif /* __linux__ */ 1446 1447 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1448 const struct drm_gem_object *obj) 1449 { 1450 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1451 drm_printf_indent(p, indent, "refcount=%u\n", 1452 kref_read(&obj->refcount)); 1453 drm_printf_indent(p, indent, "start=%08lx\n", 1454 drm_vma_node_start(&obj->vma_node)); 1455 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1456 drm_printf_indent(p, indent, "imported=%s\n", 1457 obj->import_attach ? "yes" : "no"); 1458 1459 if (obj->funcs->print_info) 1460 obj->funcs->print_info(p, indent, obj); 1461 } 1462 1463 int drm_gem_pin(struct drm_gem_object *obj) 1464 { 1465 if (obj->funcs->pin) 1466 return obj->funcs->pin(obj); 1467 else 1468 return 0; 1469 } 1470 1471 void drm_gem_unpin(struct drm_gem_object *obj) 1472 { 1473 if (obj->funcs->unpin) 1474 obj->funcs->unpin(obj); 1475 } 1476 1477 int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 1478 { 1479 int ret; 1480 1481 if (!obj->funcs->vmap) 1482 return -EOPNOTSUPP; 1483 1484 ret = obj->funcs->vmap(obj, map); 1485 if (ret) 1486 return ret; 1487 else if (dma_buf_map_is_null(map)) 1488 return -ENOMEM; 1489 1490 return 0; 1491 } 1492 EXPORT_SYMBOL(drm_gem_vmap); 1493 1494 void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) 1495 { 1496 if (dma_buf_map_is_null(map)) 1497 return; 1498 1499 if (obj->funcs->vunmap) 1500 obj->funcs->vunmap(obj, map); 1501 1502 /* Always set the mapping to NULL. Callers may rely on this. */ 1503 dma_buf_map_clear(map); 1504 } 1505 EXPORT_SYMBOL(drm_gem_vunmap); 1506 1507 /** 1508 * drm_gem_lock_reservations - Sets up the ww context and acquires 1509 * the lock on an array of GEM objects. 1510 * 1511 * Once you've locked your reservations, you'll want to set up space 1512 * for your shared fences (if applicable), submit your job, then 1513 * drm_gem_unlock_reservations(). 1514 * 1515 * @objs: drm_gem_objects to lock 1516 * @count: Number of objects in @objs 1517 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1518 * part of tracking this set of locked reservations. 1519 */ 1520 int 1521 drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1522 struct ww_acquire_ctx *acquire_ctx) 1523 { 1524 int contended = -1; 1525 int i, ret; 1526 1527 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1528 1529 retry: 1530 if (contended != -1) { 1531 struct drm_gem_object *obj = objs[contended]; 1532 1533 ret = dma_resv_lock_slow_interruptible(obj->resv, 1534 acquire_ctx); 1535 if (ret) { 1536 ww_acquire_done(acquire_ctx); 1537 return ret; 1538 } 1539 } 1540 1541 for (i = 0; i < count; i++) { 1542 if (i == contended) 1543 continue; 1544 1545 ret = dma_resv_lock_interruptible(objs[i]->resv, 1546 acquire_ctx); 1547 if (ret) { 1548 int j; 1549 1550 for (j = 0; j < i; j++) 1551 dma_resv_unlock(objs[j]->resv); 1552 1553 if (contended != -1 && contended >= i) 1554 dma_resv_unlock(objs[contended]->resv); 1555 1556 if (ret == -EDEADLK) { 1557 contended = i; 1558 goto retry; 1559 } 1560 1561 ww_acquire_done(acquire_ctx); 1562 return ret; 1563 } 1564 } 1565 1566 ww_acquire_done(acquire_ctx); 1567 1568 return 0; 1569 } 1570 EXPORT_SYMBOL(drm_gem_lock_reservations); 1571 1572 void 1573 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1574 struct ww_acquire_ctx *acquire_ctx) 1575 { 1576 int i; 1577 1578 for (i = 0; i < count; i++) 1579 dma_resv_unlock(objs[i]->resv); 1580 1581 ww_acquire_fini(acquire_ctx); 1582 } 1583 EXPORT_SYMBOL(drm_gem_unlock_reservations); 1584 1585 #ifdef notyet 1586 /** 1587 * drm_gem_fence_array_add - Adds the fence to an array of fences to be 1588 * waited on, deduplicating fences from the same context. 1589 * 1590 * @fence_array: array of dma_fence * for the job to block on. 1591 * @fence: the dma_fence to add to the list of dependencies. 1592 * 1593 * This functions consumes the reference for @fence both on success and error 1594 * cases. 1595 * 1596 * Returns: 1597 * 0 on success, or an error on failing to expand the array. 1598 */ 1599 int drm_gem_fence_array_add(struct xarray *fence_array, 1600 struct dma_fence *fence) 1601 { 1602 struct dma_fence *entry; 1603 unsigned long index; 1604 u32 id = 0; 1605 int ret; 1606 1607 if (!fence) 1608 return 0; 1609 1610 /* Deduplicate if we already depend on a fence from the same context. 1611 * This lets the size of the array of deps scale with the number of 1612 * engines involved, rather than the number of BOs. 1613 */ 1614 xa_for_each(fence_array, index, entry) { 1615 if (entry->context != fence->context) 1616 continue; 1617 1618 if (dma_fence_is_later(fence, entry)) { 1619 dma_fence_put(entry); 1620 xa_store(fence_array, index, fence, GFP_KERNEL); 1621 } else { 1622 dma_fence_put(fence); 1623 } 1624 return 0; 1625 } 1626 1627 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL); 1628 if (ret != 0) 1629 dma_fence_put(fence); 1630 1631 return ret; 1632 } 1633 EXPORT_SYMBOL(drm_gem_fence_array_add); 1634 1635 /** 1636 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked 1637 * in the GEM object's reservation object to an array of dma_fences for use in 1638 * scheduling a rendering job. 1639 * 1640 * This should be called after drm_gem_lock_reservations() on your array of 1641 * GEM objects used in the job but before updating the reservations with your 1642 * own fences. 1643 * 1644 * @fence_array: array of dma_fence * for the job to block on. 1645 * @obj: the gem object to add new dependencies from. 1646 * @write: whether the job might write the object (so we need to depend on 1647 * shared fences in the reservation object). 1648 */ 1649 int drm_gem_fence_array_add_implicit(struct xarray *fence_array, 1650 struct drm_gem_object *obj, 1651 bool write) 1652 { 1653 int ret; 1654 struct dma_fence **fences; 1655 unsigned int i, fence_count; 1656 1657 if (!write) { 1658 struct dma_fence *fence = 1659 dma_resv_get_excl_unlocked(obj->resv); 1660 1661 return drm_gem_fence_array_add(fence_array, fence); 1662 } 1663 1664 ret = dma_resv_get_fences(obj->resv, NULL, 1665 &fence_count, &fences); 1666 if (ret || !fence_count) 1667 return ret; 1668 1669 for (i = 0; i < fence_count; i++) { 1670 ret = drm_gem_fence_array_add(fence_array, fences[i]); 1671 if (ret) 1672 break; 1673 } 1674 1675 for (; i < fence_count; i++) 1676 dma_fence_put(fences[i]); 1677 kfree(fences); 1678 return ret; 1679 } 1680 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit); 1681 1682 #endif /* notyet */ 1683