1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/file.h> 34 #include <linux/module.h> 35 #include <linux/mman.h> 36 #include <linux/pagemap.h> 37 #include <linux/shmem_fs.h> 38 #include <linux/dma-buf.h> 39 #include <linux/err.h> 40 #include <linux/export.h> 41 #include <asm/bug.h> 42 #include <drm/drmP.h> 43 #include <drm/drm_vma_manager.h> 44 45 #ifdef __NetBSD__ 46 #include <uvm/uvm_extern.h> 47 #endif 48 49 /** @file drm_gem.c 50 * 51 * This file provides some of the base ioctls and library routines for 52 * the graphics memory manager implemented by each device driver. 53 * 54 * Because various devices have different requirements in terms of 55 * synchronization and migration strategies, implementing that is left up to 56 * the driver, and all that the general API provides should be generic -- 57 * allocating objects, reading/writing data with the cpu, freeing objects. 58 * Even there, platform-dependent optimizations for reading/writing data with 59 * the CPU mean we'll likely hook those out to driver-specific calls. However, 60 * the DRI2 implementation wants to have at least allocate/mmap be generic. 61 * 62 * The goal was to have swap-backed object allocation managed through 63 * struct file. However, file descriptors as handles to a struct file have 64 * two major failings: 65 * - Process limits prevent more than 1024 or so being used at a time by 66 * default. 67 * - Inability to allocate high fds will aggravate the X Server's select() 68 * handling, and likely that of many GL client applications as well. 69 * 70 * This led to a plan of using our own integer IDs (called handles, following 71 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 72 * ioctls. The objects themselves will still include the struct file so 73 * that we can transition to fds if the required kernel infrastructure shows 74 * up at a later date, and as our interface with shmfs for memory allocation. 75 */ 76 77 /* 78 * We make up offsets for buffer objects so we can recognize them at 79 * mmap time. 80 */ 81 82 /* pgoff in mmap is an unsigned long, so we need to make sure that 83 * the faked up offset will fit 84 */ 85 86 #if BITS_PER_LONG == 64 87 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) 88 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) 89 #else 90 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) 91 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) 92 #endif 93 94 /** 95 * drm_gem_init - Initialize the GEM device fields 96 * @dev: drm_devic structure to initialize 97 */ 98 int 99 drm_gem_init(struct drm_device *dev) 100 { 101 struct drm_vma_offset_manager *vma_offset_manager; 102 103 #ifdef __NetBSD__ 104 linux_mutex_init(&dev->object_name_lock); 105 #else 106 mutex_init(&dev->object_name_lock); 107 #endif 108 idr_init(&dev->object_name_idr); 109 110 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); 111 if (!vma_offset_manager) { 112 DRM_ERROR("out of memory\n"); 113 return -ENOMEM; 114 } 115 116 dev->vma_offset_manager = vma_offset_manager; 117 drm_vma_offset_manager_init(vma_offset_manager, 118 DRM_FILE_PAGE_OFFSET_START, 119 DRM_FILE_PAGE_OFFSET_SIZE); 120 121 return 0; 122 } 123 124 void 125 drm_gem_destroy(struct drm_device *dev) 126 { 127 128 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 129 kfree(dev->vma_offset_manager); 130 dev->vma_offset_manager = NULL; 131 132 idr_destroy(&dev->object_name_idr); 133 #ifdef __NetBSD__ 134 linux_mutex_destroy(&dev->object_name_lock); 135 #endif 136 } 137 138 /** 139 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 140 * @dev: drm_device the object should be initialized for 141 * @obj: drm_gem_object to initialize 142 * @size: object size 143 * 144 * Initialize an already allocated GEM object of the specified size with 145 * shmfs backing store. 146 */ 147 int drm_gem_object_init(struct drm_device *dev, 148 struct drm_gem_object *obj, size_t size) 149 { 150 #ifndef __NetBSD__ 151 struct file *filp; 152 #endif 153 154 drm_gem_private_object_init(dev, obj, size); 155 156 #ifdef __NetBSD__ 157 obj->gemo_shm_uao = uao_create(size, 0); 158 /* 159 * XXX This is gross. We ought to do it the other way around: 160 * set the uao to have the main uvm object's lock. However, 161 * uvm_obj_setlock is not safe on uvm_aobjs. 162 */ 163 mutex_obj_hold(obj->gemo_shm_uao->vmobjlock); 164 uvm_obj_setlock(&obj->gemo_uvmobj, obj->gemo_shm_uao->vmobjlock); 165 #else 166 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 167 if (IS_ERR(filp)) 168 return PTR_ERR(filp); 169 170 obj->filp = filp; 171 #endif 172 173 return 0; 174 } 175 EXPORT_SYMBOL(drm_gem_object_init); 176 177 /** 178 * drm_gem_object_init - initialize an allocated private GEM object 179 * @dev: drm_device the object should be initialized for 180 * @obj: drm_gem_object to initialize 181 * @size: object size 182 * 183 * Initialize an already allocated GEM object of the specified size with 184 * no GEM provided backing store. Instead the caller is responsible for 185 * backing the object and handling it. 186 */ 187 void drm_gem_private_object_init(struct drm_device *dev, 188 struct drm_gem_object *obj, size_t size) 189 { 190 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 191 192 obj->dev = dev; 193 #ifdef __NetBSD__ 194 obj->gemo_shm_uao = NULL; 195 KASSERT(drm_core_check_feature(dev, DRIVER_GEM)); 196 KASSERT(dev->driver->gem_uvm_ops != NULL); 197 uvm_obj_init(&obj->gemo_uvmobj, dev->driver->gem_uvm_ops, true, 1); 198 #else 199 obj->filp = NULL; 200 #endif 201 202 kref_init(&obj->refcount); 203 obj->handle_count = 0; 204 obj->size = size; 205 #ifdef __NetBSD__ 206 drm_vma_node_init(&obj->vma_node); 207 #else 208 drm_vma_node_reset(&obj->vma_node); 209 #endif 210 } 211 EXPORT_SYMBOL(drm_gem_private_object_init); 212 213 static void 214 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 215 { 216 #ifndef __NetBSD__ /* XXX drm prime */ 217 /* 218 * Note: obj->dma_buf can't disappear as long as we still hold a 219 * handle reference in obj->handle_count. 220 */ 221 mutex_lock(&filp->prime.lock); 222 if (obj->dma_buf) { 223 drm_prime_remove_buf_handle_locked(&filp->prime, 224 obj->dma_buf); 225 } 226 mutex_unlock(&filp->prime.lock); 227 #endif 228 } 229 230 /** 231 * drm_gem_object_free - release resources bound to userspace handles 232 * @obj: GEM object to clean up. 233 * 234 * Called after the last handle to the object has been closed 235 * 236 * Removes any name for the object. Note that this must be 237 * called before drm_gem_object_free or we'll be touching 238 * freed memory 239 */ 240 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 241 { 242 struct drm_device *dev = obj->dev; 243 244 /* Remove any name for this object */ 245 if (obj->name) { 246 idr_remove(&dev->object_name_idr, obj->name); 247 obj->name = 0; 248 } 249 } 250 251 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 252 { 253 #ifndef __NetBSD__ 254 /* Unbreak the reference cycle if we have an exported dma_buf. */ 255 if (obj->dma_buf) { 256 dma_buf_put(obj->dma_buf); 257 obj->dma_buf = NULL; 258 } 259 #endif 260 } 261 262 static void 263 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) 264 { 265 if (WARN_ON(obj->handle_count == 0)) 266 return; 267 268 /* 269 * Must bump handle count first as this may be the last 270 * ref, in which case the object would disappear before we 271 * checked for a name 272 */ 273 274 mutex_lock(&obj->dev->object_name_lock); 275 if (--obj->handle_count == 0) { 276 drm_gem_object_handle_free(obj); 277 drm_gem_object_exported_dma_buf_free(obj); 278 } 279 mutex_unlock(&obj->dev->object_name_lock); 280 281 drm_gem_object_unreference_unlocked(obj); 282 } 283 284 /** 285 * drm_gem_handle_delete - deletes the given file-private handle 286 * @filp: drm file-private structure to use for the handle look up 287 * @handle: userspace handle to delete 288 * 289 * Removes the GEM handle from the @filp lookup table and if this is the last 290 * handle also cleans up linked resources like GEM names. 291 */ 292 int 293 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 294 { 295 struct drm_device *dev; 296 struct drm_gem_object *obj; 297 298 /* This is gross. The idr system doesn't let us try a delete and 299 * return an error code. It just spews if you fail at deleting. 300 * So, we have to grab a lock around finding the object and then 301 * doing the delete on it and dropping the refcount, or the user 302 * could race us to double-decrement the refcount and cause a 303 * use-after-free later. Given the frequency of our handle lookups, 304 * we may want to use ida for number allocation and a hash table 305 * for the pointers, anyway. 306 */ 307 spin_lock(&filp->table_lock); 308 309 /* Check if we currently have a reference on the object */ 310 obj = idr_find(&filp->object_idr, handle); 311 if (obj == NULL) { 312 spin_unlock(&filp->table_lock); 313 return -EINVAL; 314 } 315 dev = obj->dev; 316 317 /* Release reference and decrement refcount. */ 318 idr_remove(&filp->object_idr, handle); 319 spin_unlock(&filp->table_lock); 320 321 if (drm_core_check_feature(dev, DRIVER_PRIME)) 322 drm_gem_remove_prime_handles(obj, filp); 323 drm_vma_node_revoke(&obj->vma_node, filp->filp); 324 325 if (dev->driver->gem_close_object) 326 dev->driver->gem_close_object(obj, filp); 327 drm_gem_object_handle_unreference_unlocked(obj); 328 329 return 0; 330 } 331 EXPORT_SYMBOL(drm_gem_handle_delete); 332 333 /** 334 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers 335 * @file: drm file-private structure to remove the dumb handle from 336 * @dev: corresponding drm_device 337 * @handle: the dumb handle to remove 338 * 339 * This implements the ->dumb_destroy kms driver callback for drivers which use 340 * gem to manage their backing storage. 341 */ 342 int drm_gem_dumb_destroy(struct drm_file *file, 343 struct drm_device *dev, 344 uint32_t handle) 345 { 346 return drm_gem_handle_delete(file, handle); 347 } 348 EXPORT_SYMBOL(drm_gem_dumb_destroy); 349 350 /** 351 * drm_gem_handle_create_tail - internal functions to create a handle 352 * @file_priv: drm file-private structure to register the handle for 353 * @obj: object to register 354 * @handlep: pionter to return the created handle to the caller 355 * 356 * This expects the dev->object_name_lock to be held already and will drop it 357 * before returning. Used to avoid races in establishing new handles when 358 * importing an object from either an flink name or a dma-buf. 359 */ 360 int 361 drm_gem_handle_create_tail(struct drm_file *file_priv, 362 struct drm_gem_object *obj, 363 u32 *handlep) 364 { 365 struct drm_device *dev = obj->dev; 366 int ret; 367 368 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 369 370 /* 371 * Get the user-visible handle using idr. Preload and perform 372 * allocation under our spinlock. 373 */ 374 idr_preload(GFP_KERNEL); 375 spin_lock(&file_priv->table_lock); 376 377 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 378 drm_gem_object_reference(obj); 379 obj->handle_count++; 380 spin_unlock(&file_priv->table_lock); 381 idr_preload_end(); 382 mutex_unlock(&dev->object_name_lock); 383 if (ret < 0) { 384 drm_gem_object_handle_unreference_unlocked(obj); 385 return ret; 386 } 387 *handlep = ret; 388 389 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); 390 if (ret) { 391 drm_gem_handle_delete(file_priv, *handlep); 392 return ret; 393 } 394 395 if (dev->driver->gem_open_object) { 396 ret = dev->driver->gem_open_object(obj, file_priv); 397 if (ret) { 398 drm_gem_handle_delete(file_priv, *handlep); 399 return ret; 400 } 401 } 402 403 return 0; 404 } 405 406 /** 407 * gem_handle_create - create a gem handle for an object 408 * @file_priv: drm file-private structure to register the handle for 409 * @obj: object to register 410 * @handlep: pionter to return the created handle to the caller 411 * 412 * Create a handle for this object. This adds a handle reference 413 * to the object, which includes a regular reference count. Callers 414 * will likely want to dereference the object afterwards. 415 */ 416 int 417 drm_gem_handle_create(struct drm_file *file_priv, 418 struct drm_gem_object *obj, 419 u32 *handlep) 420 { 421 mutex_lock(&obj->dev->object_name_lock); 422 423 return drm_gem_handle_create_tail(file_priv, obj, handlep); 424 } 425 EXPORT_SYMBOL(drm_gem_handle_create); 426 427 428 /** 429 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 430 * @obj: obj in question 431 * 432 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 433 */ 434 void 435 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 436 { 437 struct drm_device *dev = obj->dev; 438 439 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 440 } 441 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 442 443 /** 444 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 445 * @obj: obj in question 446 * @size: the virtual size 447 * 448 * GEM memory mapping works by handing back to userspace a fake mmap offset 449 * it can use in a subsequent mmap(2) call. The DRM core code then looks 450 * up the object based on the offset and sets up the various memory mapping 451 * structures. 452 * 453 * This routine allocates and attaches a fake offset for @obj, in cases where 454 * the virtual size differs from the physical size (ie. obj->size). Otherwise 455 * just use drm_gem_create_mmap_offset(). 456 */ 457 int 458 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 459 { 460 struct drm_device *dev = obj->dev; 461 462 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 463 size / PAGE_SIZE); 464 } 465 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 466 467 /** 468 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 469 * @obj: obj in question 470 * 471 * GEM memory mapping works by handing back to userspace a fake mmap offset 472 * it can use in a subsequent mmap(2) call. The DRM core code then looks 473 * up the object based on the offset and sets up the various memory mapping 474 * structures. 475 * 476 * This routine allocates and attaches a fake offset for @obj. 477 */ 478 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 479 { 480 return drm_gem_create_mmap_offset_size(obj, obj->size); 481 } 482 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 483 484 /** 485 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 486 * from shmem 487 * @obj: obj in question 488 * @gfpmask: gfp mask of requested pages 489 */ 490 #ifdef __NetBSD__ 491 struct page ** 492 drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) 493 { 494 struct pglist pglist; 495 struct vm_page *vm_page; 496 struct page **pages; 497 unsigned i; 498 int ret; 499 500 KASSERT((obj->size & (PAGE_SIZE - 1)) != 0); 501 502 pages = drm_malloc_ab(obj->size >> PAGE_SHIFT, sizeof(*pages)); 503 if (pages == NULL) { 504 ret = -ENOMEM; 505 goto fail0; 506 } 507 508 TAILQ_INIT(&pglist); 509 /* XXX errno NetBSD->Linux */ 510 ret = -uvm_obj_wirepages(obj->gemo_shm_uao, 0, obj->size, &pglist); 511 if (ret) 512 goto fail1; 513 514 i = 0; 515 TAILQ_FOREACH(vm_page, &pglist, pageq.queue) 516 pages[i++] = container_of(vm_page, struct page, p_vmp); 517 518 return pages; 519 520 fail1: drm_free_large(pages); 521 fail0: return ERR_PTR(ret); 522 } 523 #else 524 struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) 525 { 526 struct inode *inode; 527 struct address_space *mapping; 528 struct page *p, **pages; 529 int i, npages; 530 531 /* This is the shared memory object that backs the GEM resource */ 532 inode = file_inode(obj->filp); 533 mapping = inode->i_mapping; 534 535 /* We already BUG_ON() for non-page-aligned sizes in 536 * drm_gem_object_init(), so we should never hit this unless 537 * driver author is doing something really wrong: 538 */ 539 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 540 541 npages = obj->size >> PAGE_SHIFT; 542 543 pages = drm_malloc_ab(npages, sizeof(struct page *)); 544 if (pages == NULL) 545 return ERR_PTR(-ENOMEM); 546 547 gfpmask |= mapping_gfp_mask(mapping); 548 549 for (i = 0; i < npages; i++) { 550 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); 551 if (IS_ERR(p)) 552 goto fail; 553 pages[i] = p; 554 555 /* There is a hypothetical issue w/ drivers that require 556 * buffer memory in the low 4GB.. if the pages are un- 557 * pinned, and swapped out, they can end up swapped back 558 * in above 4GB. If pages are already in memory, then 559 * shmem_read_mapping_page_gfp will ignore the gfpmask, 560 * even if the already in-memory page disobeys the mask. 561 * 562 * It is only a theoretical issue today, because none of 563 * the devices with this limitation can be populated with 564 * enough memory to trigger the issue. But this BUG_ON() 565 * is here as a reminder in case the problem with 566 * shmem_read_mapping_page_gfp() isn't solved by the time 567 * it does become a real issue. 568 * 569 * See this thread: http://lkml.org/lkml/2011/7/11/238 570 */ 571 BUG_ON((gfpmask & __GFP_DMA32) && 572 (page_to_pfn(p) >= 0x00100000UL)); 573 } 574 575 return pages; 576 577 fail: 578 while (i--) 579 page_cache_release(pages[i]); 580 581 drm_free_large(pages); 582 return ERR_CAST(p); 583 } 584 #endif 585 EXPORT_SYMBOL(drm_gem_get_pages); 586 587 /** 588 * drm_gem_put_pages - helper to free backing pages for a GEM object 589 * @obj: obj in question 590 * @pages: pages to free 591 * @dirty: if true, pages will be marked as dirty 592 * @accessed: if true, the pages will be marked as accessed 593 */ 594 #ifdef __NetBSD__ 595 void 596 drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, 597 bool accessed __unused /* XXX */) 598 { 599 unsigned i; 600 601 for (i = 0; i < (obj->size >> PAGE_SHIFT); i++) { 602 if (dirty) 603 pages[i]->p_vmp.flags &= ~PG_CLEAN; 604 } 605 606 uvm_obj_unwirepages(obj->gemo_shm_uao, 0, obj->size); 607 } 608 #else 609 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 610 bool dirty, bool accessed) 611 { 612 int i, npages; 613 614 /* We already BUG_ON() for non-page-aligned sizes in 615 * drm_gem_object_init(), so we should never hit this unless 616 * driver author is doing something really wrong: 617 */ 618 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 619 620 npages = obj->size >> PAGE_SHIFT; 621 622 for (i = 0; i < npages; i++) { 623 if (dirty) 624 set_page_dirty(pages[i]); 625 626 if (accessed) 627 mark_page_accessed(pages[i]); 628 629 /* Undo the reference we took when populating the table */ 630 page_cache_release(pages[i]); 631 } 632 633 drm_free_large(pages); 634 } 635 #endif 636 EXPORT_SYMBOL(drm_gem_put_pages); 637 638 /** Returns a reference to the object named by the handle. */ 639 struct drm_gem_object * 640 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 641 u32 handle) 642 { 643 struct drm_gem_object *obj; 644 645 spin_lock(&filp->table_lock); 646 647 /* Check if we currently have a reference on the object */ 648 obj = idr_find(&filp->object_idr, handle); 649 if (obj == NULL) { 650 spin_unlock(&filp->table_lock); 651 return NULL; 652 } 653 654 drm_gem_object_reference(obj); 655 656 spin_unlock(&filp->table_lock); 657 658 return obj; 659 } 660 EXPORT_SYMBOL(drm_gem_object_lookup); 661 662 /** 663 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 664 * @dev: drm_device 665 * @data: ioctl data 666 * @file_priv: drm file-private structure 667 * 668 * Releases the handle to an mm object. 669 */ 670 int 671 drm_gem_close_ioctl(struct drm_device *dev, void *data, 672 struct drm_file *file_priv) 673 { 674 struct drm_gem_close *args = data; 675 int ret; 676 677 if (!(dev->driver->driver_features & DRIVER_GEM)) 678 return -ENODEV; 679 680 ret = drm_gem_handle_delete(file_priv, args->handle); 681 682 return ret; 683 } 684 685 /** 686 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl 687 * @dev: drm_device 688 * @data: ioctl data 689 * @file_priv: drm file-private structure 690 * 691 * Create a global name for an object, returning the name. 692 * 693 * Note that the name does not hold a reference; when the object 694 * is freed, the name goes away. 695 */ 696 int 697 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 698 struct drm_file *file_priv) 699 { 700 struct drm_gem_flink *args = data; 701 struct drm_gem_object *obj; 702 int ret; 703 704 if (!(dev->driver->driver_features & DRIVER_GEM)) 705 return -ENODEV; 706 707 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 708 if (obj == NULL) 709 return -ENOENT; 710 711 mutex_lock(&dev->object_name_lock); 712 idr_preload(GFP_KERNEL); 713 /* prevent races with concurrent gem_close. */ 714 if (obj->handle_count == 0) { 715 ret = -ENOENT; 716 goto err; 717 } 718 719 if (!obj->name) { 720 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); 721 if (ret < 0) 722 goto err; 723 724 obj->name = ret; 725 } 726 727 args->name = (uint64_t) obj->name; 728 ret = 0; 729 730 err: 731 idr_preload_end(); 732 mutex_unlock(&dev->object_name_lock); 733 drm_gem_object_unreference_unlocked(obj); 734 return ret; 735 } 736 737 /** 738 * drm_gem_open - implementation of the GEM_OPEN ioctl 739 * @dev: drm_device 740 * @data: ioctl data 741 * @file_priv: drm file-private structure 742 * 743 * Open an object using the global name, returning a handle and the size. 744 * 745 * This handle (of course) holds a reference to the object, so the object 746 * will not go away until the handle is deleted. 747 */ 748 int 749 drm_gem_open_ioctl(struct drm_device *dev, void *data, 750 struct drm_file *file_priv) 751 { 752 struct drm_gem_open *args = data; 753 struct drm_gem_object *obj; 754 int ret; 755 u32 handle; 756 757 if (!(dev->driver->driver_features & DRIVER_GEM)) 758 return -ENODEV; 759 760 mutex_lock(&dev->object_name_lock); 761 obj = idr_find(&dev->object_name_idr, (int) args->name); 762 if (obj) { 763 drm_gem_object_reference(obj); 764 } else { 765 mutex_unlock(&dev->object_name_lock); 766 return -ENOENT; 767 } 768 769 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 770 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 771 drm_gem_object_unreference_unlocked(obj); 772 if (ret) 773 return ret; 774 775 args->handle = handle; 776 args->size = obj->size; 777 778 return 0; 779 } 780 781 /** 782 * gem_gem_open - initalizes GEM file-private structures at devnode open time 783 * @dev: drm_device which is being opened by userspace 784 * @file_private: drm file-private structure to set up 785 * 786 * Called at device open time, sets up the structure for handling refcounting 787 * of mm objects. 788 */ 789 void 790 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 791 { 792 idr_init(&file_private->object_idr); 793 spin_lock_init(&file_private->table_lock); 794 } 795 796 /* 797 * Called at device close to release the file's 798 * handle references on objects. 799 */ 800 static int 801 drm_gem_object_release_handle(int id, void *ptr, void *data) 802 { 803 struct drm_file *file_priv = data; 804 struct drm_gem_object *obj = ptr; 805 struct drm_device *dev = obj->dev; 806 807 #ifndef __NetBSD__ /* XXX drm prime */ 808 if (drm_core_check_feature(dev, DRIVER_PRIME)) 809 drm_gem_remove_prime_handles(obj, file_priv); 810 #endif 811 drm_vma_node_revoke(&obj->vma_node, file_priv->filp); 812 813 if (dev->driver->gem_close_object) 814 dev->driver->gem_close_object(obj, file_priv); 815 816 drm_gem_object_handle_unreference_unlocked(obj); 817 818 return 0; 819 } 820 821 /** 822 * drm_gem_release - release file-private GEM resources 823 * @dev: drm_device which is being closed by userspace 824 * @file_private: drm file-private structure to clean up 825 * 826 * Called at close time when the filp is going away. 827 * 828 * Releases any remaining references on objects by this filp. 829 */ 830 void 831 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 832 { 833 idr_for_each(&file_private->object_idr, 834 &drm_gem_object_release_handle, file_private); 835 idr_destroy(&file_private->object_idr); 836 #ifdef __NetBSD__ 837 spin_lock_destroy(&file_private->table_lock); 838 #endif 839 } 840 841 void 842 drm_gem_object_release(struct drm_gem_object *obj) 843 { 844 #ifndef __NetBSD__ 845 WARN_ON(obj->dma_buf); 846 #endif 847 848 #ifdef __NetBSD__ 849 drm_vma_node_destroy(&obj->vma_node); 850 if (obj->gemo_shm_uao) 851 uao_detach(obj->gemo_shm_uao); 852 uvm_obj_destroy(&obj->gemo_uvmobj, true); 853 #else 854 if (obj->filp) 855 fput(obj->filp); 856 #endif 857 858 drm_gem_free_mmap_offset(obj); 859 } 860 EXPORT_SYMBOL(drm_gem_object_release); 861 862 /** 863 * drm_gem_object_free - free a GEM object 864 * @kref: kref of the object to free 865 * 866 * Called after the last reference to the object has been lost. 867 * Must be called holding struct_ mutex 868 * 869 * Frees the object 870 */ 871 void 872 drm_gem_object_free(struct kref *kref) 873 { 874 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 875 struct drm_device *dev = obj->dev; 876 877 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 878 879 if (dev->driver->gem_free_object != NULL) 880 dev->driver->gem_free_object(obj); 881 } 882 EXPORT_SYMBOL(drm_gem_object_free); 883 884 #ifndef __NetBSD__ 885 886 void drm_gem_vm_open(struct vm_area_struct *vma) 887 { 888 struct drm_gem_object *obj = vma->vm_private_data; 889 890 drm_gem_object_reference(obj); 891 892 mutex_lock(&obj->dev->struct_mutex); 893 drm_vm_open_locked(obj->dev, vma); 894 mutex_unlock(&obj->dev->struct_mutex); 895 } 896 EXPORT_SYMBOL(drm_gem_vm_open); 897 898 void drm_gem_vm_close(struct vm_area_struct *vma) 899 { 900 struct drm_gem_object *obj = vma->vm_private_data; 901 struct drm_device *dev = obj->dev; 902 903 mutex_lock(&dev->struct_mutex); 904 drm_vm_close_locked(obj->dev, vma); 905 drm_gem_object_unreference(obj); 906 mutex_unlock(&dev->struct_mutex); 907 } 908 EXPORT_SYMBOL(drm_gem_vm_close); 909 910 /** 911 * drm_gem_mmap_obj - memory map a GEM object 912 * @obj: the GEM object to map 913 * @obj_size: the object size to be mapped, in bytes 914 * @vma: VMA for the area to be mapped 915 * 916 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops 917 * provided by the driver. Depending on their requirements, drivers can either 918 * provide a fault handler in their gem_vm_ops (in which case any accesses to 919 * the object will be trapped, to perform migration, GTT binding, surface 920 * register allocation, or performance monitoring), or mmap the buffer memory 921 * synchronously after calling drm_gem_mmap_obj. 922 * 923 * This function is mainly intended to implement the DMABUF mmap operation, when 924 * the GEM object is not looked up based on its fake offset. To implement the 925 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 926 * 927 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 928 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 929 * callers must verify access restrictions before calling this helper. 930 * 931 * NOTE: This function has to be protected with dev->struct_mutex 932 * 933 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 934 * size, or if no gem_vm_ops are provided. 935 */ 936 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 937 struct vm_area_struct *vma) 938 { 939 struct drm_device *dev = obj->dev; 940 941 lockdep_assert_held(&dev->struct_mutex); 942 943 /* Check for valid size. */ 944 if (obj_size < vma->vm_end - vma->vm_start) 945 return -EINVAL; 946 947 if (!dev->driver->gem_vm_ops) 948 return -EINVAL; 949 950 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 951 vma->vm_ops = dev->driver->gem_vm_ops; 952 vma->vm_private_data = obj; 953 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 954 955 /* Take a ref for this mapping of the object, so that the fault 956 * handler can dereference the mmap offset's pointer to the object. 957 * This reference is cleaned up by the corresponding vm_close 958 * (which should happen whether the vma was created by this call, or 959 * by a vm_open due to mremap or partial unmap or whatever). 960 */ 961 drm_gem_object_reference(obj); 962 963 drm_vm_open_locked(dev, vma); 964 return 0; 965 } 966 EXPORT_SYMBOL(drm_gem_mmap_obj); 967 968 /** 969 * drm_gem_mmap - memory map routine for GEM objects 970 * @filp: DRM file pointer 971 * @vma: VMA for the area to be mapped 972 * 973 * If a driver supports GEM object mapping, mmap calls on the DRM file 974 * descriptor will end up here. 975 * 976 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 977 * contain the fake offset we created when the GTT map ioctl was called on 978 * the object) and map it with a call to drm_gem_mmap_obj(). 979 * 980 * If the caller is not granted access to the buffer object, the mmap will fail 981 * with EACCES. Please see the vma manager for more information. 982 */ 983 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 984 { 985 struct drm_file *priv = filp->private_data; 986 struct drm_device *dev = priv->minor->dev; 987 struct drm_gem_object *obj; 988 struct drm_vma_offset_node *node; 989 int ret; 990 991 if (drm_device_is_unplugged(dev)) 992 return -ENODEV; 993 994 mutex_lock(&dev->struct_mutex); 995 996 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, 997 vma->vm_pgoff, 998 vma_pages(vma)); 999 if (!node) { 1000 mutex_unlock(&dev->struct_mutex); 1001 return drm_mmap(filp, vma); 1002 } else if (!drm_vma_node_is_allowed(node, filp)) { 1003 mutex_unlock(&dev->struct_mutex); 1004 return -EACCES; 1005 } 1006 1007 obj = container_of(node, struct drm_gem_object, vma_node); 1008 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); 1009 1010 mutex_unlock(&dev->struct_mutex); 1011 1012 return ret; 1013 } 1014 EXPORT_SYMBOL(drm_gem_mmap); 1015 1016 #endif /* defined(__NetBSD__) */ 1017