1 /* $NetBSD: drm_drv.c,v 1.14 2020/04/19 17:19:13 maya Exp $ */ 2 3 /* 4 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org 5 * 6 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. 7 * All Rights Reserved. 8 * 9 * Author Rickard E. (Rik) Faith <faith@valinux.com> 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a 12 * copy of this software and associated documentation files (the "Software"), 13 * to deal in the Software without restriction, including without limitation 14 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 15 * and/or sell copies of the Software, and to permit persons to whom the 16 * Software is furnished to do so, subject to the following conditions: 17 * 18 * The above copyright notice and this permission notice (including the next 19 * paragraph) shall be included in all copies or substantial portions of the 20 * Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 25 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 28 * DEALINGS IN THE SOFTWARE. 29 */ 30 31 #include <sys/cdefs.h> 32 __KERNEL_RCSID(0, "$NetBSD: drm_drv.c,v 1.14 2020/04/19 17:19:13 maya Exp $"); 33 34 #include <linux/debugfs.h> 35 #include <linux/fs.h> 36 #include <linux/module.h> 37 #include <linux/moduleparam.h> 38 #include <linux/mount.h> 39 #include <linux/slab.h> 40 #include <drm/drmP.h> 41 #include <drm/drm_core.h> 42 #include "drm_legacy.h" 43 #include "drm_internal.h" 44 45 #include <linux/nbsd-namespace.h> 46 47 unsigned int drm_debug = 0; /* bitmask of DRM_UT_x */ 48 EXPORT_SYMBOL(drm_debug); 49 50 MODULE_AUTHOR(CORE_AUTHOR); 51 MODULE_DESCRIPTION(CORE_DESC); 52 MODULE_LICENSE("GPL and additional rights"); 53 MODULE_PARM_DESC(debug, "Enable debug output"); 54 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)"); 55 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); 56 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); 57 58 module_param_named(debug, drm_debug, int, 0600); 59 60 #ifdef __NetBSD__ 61 spinlock_t drm_minor_lock; 62 struct idr drm_minors_idr; 63 #else 64 static DEFINE_SPINLOCK(drm_minor_lock); 65 static struct idr drm_minors_idr; 66 #endif 67 68 #ifndef __NetBSD__ 69 static struct dentry *drm_debugfs_root; 70 #endif 71 72 #ifdef __NetBSD__ 73 void 74 drm_err(const char *file, int line, const char *func, const char *format, ...) 75 { 76 va_list args; 77 78 va_start(args, format); 79 printf(KERN_ERR "[" DRM_NAME ":(%s:%d)%s] *ERROR* ", file, line, func); 80 vprintf(format, args); 81 va_end(args); 82 } 83 #else 84 void drm_err(const char *format, ...) 85 { 86 struct va_format vaf; 87 va_list args; 88 89 va_start(args, format); 90 91 vaf.fmt = format; 92 vaf.va = &args; 93 94 printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV", 95 __builtin_return_address(0), &vaf); 96 97 va_end(args); 98 } 99 #endif 100 EXPORT_SYMBOL(drm_err); 101 102 void drm_ut_debug_printk(const char *function_name, const char *format, ...) 103 { 104 #ifdef __NetBSD__ 105 va_list args; 106 107 va_start(args, format); 108 printf("DRM debug in %s: ", function_name); 109 vprintf(format, args); 110 va_end(args); 111 #else 112 struct va_format vaf; 113 va_list args; 114 115 va_start(args, format); 116 vaf.fmt = format; 117 vaf.va = &args; 118 119 printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf); 120 121 va_end(args); 122 #endif 123 } 124 EXPORT_SYMBOL(drm_ut_debug_printk); 125 126 struct drm_master *drm_master_create(struct drm_minor *minor) 127 { 128 struct drm_master *master; 129 130 master = kzalloc(sizeof(*master), GFP_KERNEL); 131 if (!master) 132 return NULL; 133 134 kref_init(&master->refcount); 135 spin_lock_init(&master->lock.spinlock); 136 #ifdef __NetBSD__ 137 DRM_INIT_WAITQUEUE(&master->lock.lock_queue, "drmlockq"); 138 #else 139 init_waitqueue_head(&master->lock.lock_queue); 140 #endif 141 idr_init(&master->magic_map); 142 master->minor = minor; 143 144 return master; 145 } 146 147 struct drm_master *drm_master_get(struct drm_master *master) 148 { 149 kref_get(&master->refcount); 150 return master; 151 } 152 EXPORT_SYMBOL(drm_master_get); 153 154 static void drm_master_destroy(struct kref *kref) 155 { 156 struct drm_master *master = container_of(kref, struct drm_master, refcount); 157 struct drm_device *dev = master->minor->dev; 158 struct drm_map_list *r_list, *list_temp; 159 160 mutex_lock(&dev->struct_mutex); 161 if (dev->driver->master_destroy) 162 dev->driver->master_destroy(dev, master); 163 164 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { 165 if (r_list->master == master) { 166 drm_legacy_rmmap_locked(dev, r_list->map); 167 r_list = NULL; 168 } 169 } 170 mutex_unlock(&dev->struct_mutex); 171 172 idr_destroy(&master->magic_map); 173 #ifdef __NetBSD__ 174 DRM_DESTROY_WAITQUEUE(&master->lock.lock_queue); 175 spin_lock_destroy(&master->lock.spinlock); 176 #endif 177 kfree(master->unique); 178 kfree(master); 179 } 180 181 void drm_master_put(struct drm_master **master) 182 { 183 kref_put(&(*master)->refcount, drm_master_destroy); 184 *master = NULL; 185 } 186 EXPORT_SYMBOL(drm_master_put); 187 188 int drm_setmaster_ioctl(struct drm_device *dev, void *data, 189 struct drm_file *file_priv) 190 { 191 int ret = 0; 192 193 mutex_lock(&dev->master_mutex); 194 if (file_priv->is_master) 195 goto out_unlock; 196 197 if (file_priv->minor->master) { 198 ret = -EINVAL; 199 goto out_unlock; 200 } 201 202 if (!file_priv->master) { 203 ret = -EINVAL; 204 goto out_unlock; 205 } 206 207 if (!file_priv->allowed_master) { 208 ret = drm_new_set_master(dev, file_priv); 209 goto out_unlock; 210 } 211 212 file_priv->minor->master = drm_master_get(file_priv->master); 213 file_priv->is_master = 1; 214 if (dev->driver->master_set) { 215 ret = dev->driver->master_set(dev, file_priv, false); 216 if (unlikely(ret != 0)) { 217 file_priv->is_master = 0; 218 drm_master_put(&file_priv->minor->master); 219 } 220 } 221 222 out_unlock: 223 mutex_unlock(&dev->master_mutex); 224 return ret; 225 } 226 227 int drm_dropmaster_ioctl(struct drm_device *dev, void *data, 228 struct drm_file *file_priv) 229 { 230 int ret = -EINVAL; 231 232 mutex_lock(&dev->master_mutex); 233 if (!file_priv->is_master) 234 goto out_unlock; 235 236 if (!file_priv->minor->master) 237 goto out_unlock; 238 239 ret = 0; 240 if (dev->driver->master_drop) 241 dev->driver->master_drop(dev, file_priv, false); 242 drm_master_put(&file_priv->minor->master); 243 file_priv->is_master = 0; 244 245 out_unlock: 246 mutex_unlock(&dev->master_mutex); 247 return ret; 248 } 249 250 /* 251 * DRM Minors 252 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each 253 * of them is represented by a drm_minor object. Depending on the capabilities 254 * of the device-driver, different interfaces are registered. 255 * 256 * Minors can be accessed via dev->$minor_name. This pointer is either 257 * NULL or a valid drm_minor pointer and stays valid as long as the device is 258 * valid. This means, DRM minors have the same life-time as the underlying 259 * device. However, this doesn't mean that the minor is active. Minors are 260 * registered and unregistered dynamically according to device-state. 261 */ 262 263 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, 264 unsigned int type) 265 { 266 switch (type) { 267 case DRM_MINOR_LEGACY: 268 return &dev->primary; 269 case DRM_MINOR_RENDER: 270 return &dev->render; 271 case DRM_MINOR_CONTROL: 272 return &dev->control; 273 default: 274 return NULL; 275 } 276 } 277 278 static int drm_minor_alloc(struct drm_device *dev, unsigned int type) 279 { 280 struct drm_minor *minor; 281 unsigned long flags; 282 int r; 283 284 minor = kzalloc(sizeof(*minor), GFP_KERNEL); 285 if (!minor) 286 return -ENOMEM; 287 288 minor->type = type; 289 minor->dev = dev; 290 291 idr_preload(GFP_KERNEL); 292 spin_lock_irqsave(&drm_minor_lock, flags); 293 r = idr_alloc(&drm_minors_idr, 294 NULL, 295 64 * type, 296 64 * (type + 1), 297 GFP_NOWAIT); 298 spin_unlock_irqrestore(&drm_minor_lock, flags); 299 idr_preload_end(); 300 301 if (r < 0) 302 goto err_free; 303 304 minor->index = r; 305 306 #ifndef __NetBSD__ /* XXX drm sysfs */ 307 minor->kdev = drm_sysfs_minor_alloc(minor); 308 if (IS_ERR(minor->kdev)) { 309 r = PTR_ERR(minor->kdev); 310 goto err_index; 311 } 312 #endif 313 314 *drm_minor_get_slot(dev, type) = minor; 315 return 0; 316 317 err_index: __unused 318 spin_lock_irqsave(&drm_minor_lock, flags); 319 idr_remove(&drm_minors_idr, minor->index); 320 spin_unlock_irqrestore(&drm_minor_lock, flags); 321 err_free: 322 kfree(minor); 323 return r; 324 } 325 326 static void drm_minor_free(struct drm_device *dev, unsigned int type) 327 { 328 struct drm_minor **slot, *minor; 329 unsigned long flags; 330 331 slot = drm_minor_get_slot(dev, type); 332 minor = *slot; 333 if (!minor) 334 return; 335 336 #ifndef __NetBSD__ /* XXX drm sysfs */ 337 put_device(minor->kdev); 338 #endif 339 340 spin_lock_irqsave(&drm_minor_lock, flags); 341 idr_remove(&drm_minors_idr, minor->index); 342 spin_unlock_irqrestore(&drm_minor_lock, flags); 343 344 kfree(minor); 345 *slot = NULL; 346 } 347 348 static int drm_minor_register(struct drm_device *dev, unsigned int type) 349 { 350 struct drm_minor *minor; 351 unsigned long flags; 352 #ifndef __NetBSD__ 353 int ret; 354 #endif 355 356 DRM_DEBUG("\n"); 357 358 minor = *drm_minor_get_slot(dev, type); 359 if (!minor) 360 return 0; 361 362 #ifndef __NetBSD__ 363 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); 364 if (ret) { 365 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 366 goto err_debugfs; 367 } 368 369 ret = device_add(minor->kdev); 370 if (ret) 371 goto err_debugfs; 372 #endif 373 374 /* replace NULL with @minor so lookups will succeed from now on */ 375 spin_lock_irqsave(&drm_minor_lock, flags); 376 idr_replace(&drm_minors_idr, minor, minor->index); 377 spin_unlock_irqrestore(&drm_minor_lock, flags); 378 379 DRM_DEBUG("new minor registered %d\n", minor->index); 380 return 0; 381 382 #ifndef __NetBSD__ 383 err_debugfs: 384 drm_debugfs_cleanup(minor); 385 return ret; 386 #endif 387 } 388 389 static void drm_minor_unregister(struct drm_device *dev, unsigned int type) 390 { 391 struct drm_minor *minor; 392 unsigned long flags; 393 394 minor = *drm_minor_get_slot(dev, type); 395 #ifdef __NetBSD__ 396 if (!minor) 397 #else 398 if (!minor || !device_is_registered(minor->kdev)) 399 #endif 400 return; 401 402 /* replace @minor with NULL so lookups will fail from now on */ 403 spin_lock_irqsave(&drm_minor_lock, flags); 404 idr_replace(&drm_minors_idr, NULL, minor->index); 405 spin_unlock_irqrestore(&drm_minor_lock, flags); 406 407 #ifndef __NetBSD__ 408 device_del(minor->kdev); 409 dev_set_drvdata(minor->kdev, NULL); /* safety belt */ 410 drm_debugfs_cleanup(minor); 411 #endif 412 } 413 414 /** 415 * drm_minor_acquire - Acquire a DRM minor 416 * @minor_id: Minor ID of the DRM-minor 417 * 418 * Looks up the given minor-ID and returns the respective DRM-minor object. The 419 * refence-count of the underlying device is increased so you must release this 420 * object with drm_minor_release(). 421 * 422 * As long as you hold this minor, it is guaranteed that the object and the 423 * minor->dev pointer will stay valid! However, the device may get unplugged and 424 * unregistered while you hold the minor. 425 * 426 * Returns: 427 * Pointer to minor-object with increased device-refcount, or PTR_ERR on 428 * failure. 429 */ 430 struct drm_minor *drm_minor_acquire(unsigned int minor_id) 431 { 432 struct drm_minor *minor; 433 unsigned long flags; 434 435 spin_lock_irqsave(&drm_minor_lock, flags); 436 minor = idr_find(&drm_minors_idr, minor_id); 437 if (minor) 438 drm_dev_ref(minor->dev); 439 spin_unlock_irqrestore(&drm_minor_lock, flags); 440 441 if (!minor) { 442 return ERR_PTR(-ENODEV); 443 } else if (drm_device_is_unplugged(minor->dev)) { 444 drm_dev_unref(minor->dev); 445 return ERR_PTR(-ENODEV); 446 } 447 448 return minor; 449 } 450 451 /** 452 * drm_minor_release - Release DRM minor 453 * @minor: Pointer to DRM minor object 454 * 455 * Release a minor that was previously acquired via drm_minor_acquire(). 456 */ 457 void drm_minor_release(struct drm_minor *minor) 458 { 459 drm_dev_unref(minor->dev); 460 } 461 462 /** 463 * DOC: driver instance overview 464 * 465 * A device instance for a drm driver is represented by struct &drm_device. This 466 * is allocated with drm_dev_alloc(), usually from bus-specific ->probe() 467 * callbacks implemented by the driver. The driver then needs to initialize all 468 * the various subsystems for the drm device like memory management, vblank 469 * handling, modesetting support and intial output configuration plus obviously 470 * initialize all the corresponding hardware bits. An important part of this is 471 * also calling drm_dev_set_unique() to set the userspace-visible unique name of 472 * this device instance. Finally when everything is up and running and ready for 473 * userspace the device instance can be published using drm_dev_register(). 474 * 475 * There is also deprecated support for initalizing device instances using 476 * bus-specific helpers and the ->load() callback. But due to 477 * backwards-compatibility needs the device instance have to be published too 478 * early, which requires unpretty global locking to make safe and is therefore 479 * only support for existing drivers not yet converted to the new scheme. 480 * 481 * When cleaning up a device instance everything needs to be done in reverse: 482 * First unpublish the device instance with drm_dev_unregister(). Then clean up 483 * any other resources allocated at device initialization and drop the driver's 484 * reference to &drm_device using drm_dev_unref(). 485 * 486 * Note that the lifetime rules for &drm_device instance has still a lot of 487 * historical baggage. Hence use the reference counting provided by 488 * drm_dev_ref() and drm_dev_unref() only carefully. 489 * 490 * Also note that embedding of &drm_device is currently not (yet) supported (but 491 * it would be easy to add). Drivers can store driver-private data in the 492 * dev_priv field of &drm_device. 493 */ 494 495 /** 496 * drm_put_dev - Unregister and release a DRM device 497 * @dev: DRM device 498 * 499 * Called at module unload time or when a PCI device is unplugged. 500 * 501 * Cleans up all DRM device, calling drm_lastclose(). 502 * 503 * Note: Use of this function is deprecated. It will eventually go away 504 * completely. Please use drm_dev_unregister() and drm_dev_unref() explicitly 505 * instead to make sure that the device isn't userspace accessible any more 506 * while teardown is in progress, ensuring that userspace can't access an 507 * inconsistent state. 508 */ 509 void drm_put_dev(struct drm_device *dev) 510 { 511 DRM_DEBUG("\n"); 512 513 if (!dev) { 514 DRM_ERROR("cleanup called no dev\n"); 515 return; 516 } 517 518 drm_dev_unregister(dev); 519 drm_dev_unref(dev); 520 } 521 EXPORT_SYMBOL(drm_put_dev); 522 523 void drm_unplug_dev(struct drm_device *dev) 524 { 525 /* for a USB device */ 526 drm_minor_unregister(dev, DRM_MINOR_LEGACY); 527 drm_minor_unregister(dev, DRM_MINOR_RENDER); 528 drm_minor_unregister(dev, DRM_MINOR_CONTROL); 529 530 mutex_lock(&drm_global_mutex); 531 532 drm_device_set_unplugged(dev); 533 534 if (dev->open_count == 0) { 535 drm_put_dev(dev); 536 } 537 mutex_unlock(&drm_global_mutex); 538 } 539 EXPORT_SYMBOL(drm_unplug_dev); 540 541 #ifdef __NetBSD__ 542 543 static void * 544 drm_fs_inode_new(void) 545 { 546 return NULL; 547 } 548 549 static void 550 drm_fs_inode_free(void *inode) 551 { 552 KASSERT(inode == NULL); 553 } 554 555 #else 556 557 /* 558 * DRM internal mount 559 * We want to be able to allocate our own "struct address_space" to control 560 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow 561 * stand-alone address_space objects, so we need an underlying inode. As there 562 * is no way to allocate an independent inode easily, we need a fake internal 563 * VFS mount-point. 564 * 565 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() 566 * frees it again. You are allowed to use iget() and iput() to get references to 567 * the inode. But each drm_fs_inode_new() call must be paired with exactly one 568 * drm_fs_inode_free() call (which does not have to be the last iput()). 569 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it 570 * between multiple inode-users. You could, technically, call 571 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an 572 * iput(), but this way you'd end up with a new vfsmount for each inode. 573 */ 574 575 static int drm_fs_cnt; 576 static struct vfsmount *drm_fs_mnt; 577 578 static const struct dentry_operations drm_fs_dops = { 579 .d_dname = simple_dname, 580 }; 581 582 static const struct super_operations drm_fs_sops = { 583 .statfs = simple_statfs, 584 }; 585 586 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags, 587 const char *dev_name, void *data) 588 { 589 return mount_pseudo(fs_type, 590 "drm:", 591 &drm_fs_sops, 592 &drm_fs_dops, 593 0x010203ff); 594 } 595 596 static struct file_system_type drm_fs_type = { 597 .name = "drm", 598 .owner = THIS_MODULE, 599 .mount = drm_fs_mount, 600 .kill_sb = kill_anon_super, 601 }; 602 603 static struct inode *drm_fs_inode_new(void) 604 { 605 struct inode *inode; 606 int r; 607 608 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); 609 if (r < 0) { 610 DRM_ERROR("Cannot mount pseudo fs: %d\n", r); 611 return ERR_PTR(r); 612 } 613 614 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); 615 if (IS_ERR(inode)) 616 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 617 618 return inode; 619 } 620 621 static void drm_fs_inode_free(struct inode *inode) 622 { 623 if (inode) { 624 iput(inode); 625 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 626 } 627 } 628 629 #endif 630 631 /** 632 * drm_dev_alloc - Allocate new DRM device 633 * @driver: DRM driver to allocate device for 634 * @parent: Parent device object 635 * 636 * Allocate and initialize a new DRM device. No device registration is done. 637 * Call drm_dev_register() to advertice the device to user space and register it 638 * with other core subsystems. This should be done last in the device 639 * initialization sequence to make sure userspace can't access an inconsistent 640 * state. 641 * 642 * The initial ref-count of the object is 1. Use drm_dev_ref() and 643 * drm_dev_unref() to take and drop further ref-counts. 644 * 645 * Note that for purely virtual devices @parent can be NULL. 646 * 647 * RETURNS: 648 * Pointer to new DRM device, or NULL if out of memory. 649 */ 650 struct drm_device *drm_dev_alloc(struct drm_driver *driver, 651 struct device *parent) 652 { 653 struct drm_device *dev; 654 int ret; 655 656 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 657 if (!dev) 658 return NULL; 659 660 kref_init(&dev->ref); 661 dev->dev = parent; 662 dev->driver = driver; 663 #ifdef __NetBSD__ 664 dev->sc_monitor_hotplug.smpsw_name = PSWITCH_HK_DISPLAY_CYCLE; 665 dev->sc_monitor_hotplug.smpsw_type = PSWITCH_TYPE_HOTKEY; 666 667 ret = sysmon_pswitch_register(&dev->sc_monitor_hotplug); 668 if (ret) 669 goto err_pswitch; 670 #endif 671 672 INIT_LIST_HEAD(&dev->filelist); 673 INIT_LIST_HEAD(&dev->ctxlist); 674 INIT_LIST_HEAD(&dev->vmalist); 675 INIT_LIST_HEAD(&dev->maplist); 676 INIT_LIST_HEAD(&dev->vblank_event_list); 677 678 spin_lock_init(&dev->buf_lock); 679 spin_lock_init(&dev->event_lock); 680 mutex_init(&dev->struct_mutex); 681 mutex_init(&dev->ctxlist_mutex); 682 mutex_init(&dev->master_mutex); 683 684 dev->anon_inode = drm_fs_inode_new(); 685 if (IS_ERR(dev->anon_inode)) { 686 ret = PTR_ERR(dev->anon_inode); 687 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); 688 goto err_free; 689 } 690 691 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 692 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); 693 if (ret) 694 goto err_minors; 695 696 WARN_ON(driver->suspend || driver->resume); 697 } 698 699 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 700 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); 701 if (ret) 702 goto err_minors; 703 } 704 705 ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY); 706 if (ret) 707 goto err_minors; 708 709 if (drm_ht_create(&dev->map_hash, 12)) 710 goto err_minors; 711 712 drm_legacy_ctxbitmap_init(dev); 713 714 if (drm_core_check_feature(dev, DRIVER_GEM)) { 715 ret = drm_gem_init(dev); 716 if (ret) { 717 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); 718 goto err_ctxbitmap; 719 } 720 } 721 722 return dev; 723 724 err_ctxbitmap: 725 drm_legacy_ctxbitmap_cleanup(dev); 726 drm_ht_remove(&dev->map_hash); 727 err_minors: 728 drm_minor_free(dev, DRM_MINOR_LEGACY); 729 drm_minor_free(dev, DRM_MINOR_RENDER); 730 drm_minor_free(dev, DRM_MINOR_CONTROL); 731 drm_fs_inode_free(dev->anon_inode); 732 err_free: 733 spin_lock_destroy(&dev->event_lock); 734 spin_lock_destroy(&dev->buf_lock); 735 mutex_destroy(&dev->master_mutex); 736 mutex_destroy(&dev->ctxlist_mutex); 737 mutex_destroy(&dev->struct_mutex); 738 #ifdef __NetBSD__ 739 err_pswitch: 740 sysmon_pswitch_unregister(&dev->sc_monitor_hotplug); 741 #endif 742 kfree(dev); 743 return NULL; 744 } 745 EXPORT_SYMBOL(drm_dev_alloc); 746 747 static void drm_dev_release(struct kref *ref) 748 { 749 struct drm_device *dev = container_of(ref, struct drm_device, ref); 750 751 if (drm_core_check_feature(dev, DRIVER_GEM)) 752 drm_gem_destroy(dev); 753 754 #ifdef __NetBSD__ 755 sysmon_pswitch_unregister(&dev->sc_monitor_hotplug); 756 #endif 757 758 drm_legacy_ctxbitmap_cleanup(dev); 759 drm_ht_remove(&dev->map_hash); 760 drm_fs_inode_free(dev->anon_inode); 761 762 drm_minor_free(dev, DRM_MINOR_LEGACY); 763 drm_minor_free(dev, DRM_MINOR_RENDER); 764 drm_minor_free(dev, DRM_MINOR_CONTROL); 765 766 spin_lock_destroy(&dev->event_lock); 767 spin_lock_destroy(&dev->buf_lock); 768 mutex_destroy(&dev->master_mutex); 769 mutex_destroy(&dev->ctxlist_mutex); 770 mutex_destroy(&dev->struct_mutex); 771 kfree(dev->unique); 772 kfree(dev); 773 } 774 775 /** 776 * drm_dev_ref - Take reference of a DRM device 777 * @dev: device to take reference of or NULL 778 * 779 * This increases the ref-count of @dev by one. You *must* already own a 780 * reference when calling this. Use drm_dev_unref() to drop this reference 781 * again. 782 * 783 * This function never fails. However, this function does not provide *any* 784 * guarantee whether the device is alive or running. It only provides a 785 * reference to the object and the memory associated with it. 786 */ 787 void drm_dev_ref(struct drm_device *dev) 788 { 789 if (dev) 790 kref_get(&dev->ref); 791 } 792 EXPORT_SYMBOL(drm_dev_ref); 793 794 /** 795 * drm_dev_unref - Drop reference of a DRM device 796 * @dev: device to drop reference of or NULL 797 * 798 * This decreases the ref-count of @dev by one. The device is destroyed if the 799 * ref-count drops to zero. 800 */ 801 void drm_dev_unref(struct drm_device *dev) 802 { 803 if (dev) 804 kref_put(&dev->ref, drm_dev_release); 805 } 806 EXPORT_SYMBOL(drm_dev_unref); 807 808 /** 809 * drm_dev_register - Register DRM device 810 * @dev: Device to register 811 * @flags: Flags passed to the driver's .load() function 812 * 813 * Register the DRM device @dev with the system, advertise device to user-space 814 * and start normal device operation. @dev must be allocated via drm_dev_alloc() 815 * previously. 816 * 817 * Never call this twice on any device! 818 * 819 * NOTE: To ensure backward compatibility with existing drivers method this 820 * function calls the ->load() method after registering the device nodes, 821 * creating race conditions. Usage of the ->load() methods is therefore 822 * deprecated, drivers must perform all initialization before calling 823 * drm_dev_register(). 824 * 825 * RETURNS: 826 * 0 on success, negative error code on failure. 827 */ 828 int drm_dev_register(struct drm_device *dev, unsigned long flags) 829 { 830 int ret; 831 832 #ifndef __NetBSD__ 833 mutex_lock(&drm_global_mutex); 834 #endif 835 836 ret = drm_minor_register(dev, DRM_MINOR_CONTROL); 837 if (ret) 838 goto err_minors; 839 840 ret = drm_minor_register(dev, DRM_MINOR_RENDER); 841 if (ret) 842 goto err_minors; 843 844 ret = drm_minor_register(dev, DRM_MINOR_LEGACY); 845 if (ret) 846 goto err_minors; 847 848 if (dev->driver->load) { 849 ret = dev->driver->load(dev, flags); 850 if (ret) 851 goto err_minors; 852 } 853 854 ret = 0; 855 goto out_unlock; 856 857 err_minors: 858 drm_minor_unregister(dev, DRM_MINOR_LEGACY); 859 drm_minor_unregister(dev, DRM_MINOR_RENDER); 860 drm_minor_unregister(dev, DRM_MINOR_CONTROL); 861 out_unlock: 862 #ifndef __NetBSD__ 863 mutex_unlock(&drm_global_mutex); 864 #endif 865 return ret; 866 } 867 EXPORT_SYMBOL(drm_dev_register); 868 869 /** 870 * drm_dev_unregister - Unregister DRM device 871 * @dev: Device to unregister 872 * 873 * Unregister the DRM device from the system. This does the reverse of 874 * drm_dev_register() but does not deallocate the device. The caller must call 875 * drm_dev_unref() to drop their final reference. 876 * 877 * This should be called first in the device teardown code to make sure 878 * userspace can't access the device instance any more. 879 */ 880 void drm_dev_unregister(struct drm_device *dev) 881 { 882 struct drm_map_list *r_list, *list_temp; 883 884 drm_lastclose(dev); 885 886 if (dev->driver->unload) 887 dev->driver->unload(dev); 888 889 #ifndef __NetBSD__ /* Moved to drm_pci. */ 890 if (dev->agp) 891 drm_pci_agp_destroy(dev); 892 #endif 893 894 drm_vblank_cleanup(dev); 895 896 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 897 drm_legacy_rmmap(dev, r_list->map); 898 899 drm_minor_unregister(dev, DRM_MINOR_LEGACY); 900 drm_minor_unregister(dev, DRM_MINOR_RENDER); 901 drm_minor_unregister(dev, DRM_MINOR_CONTROL); 902 } 903 EXPORT_SYMBOL(drm_dev_unregister); 904 905 /** 906 * drm_dev_set_unique - Set the unique name of a DRM device 907 * @dev: device of which to set the unique name 908 * @fmt: format string for unique name 909 * 910 * Sets the unique name of a DRM device using the specified format string and 911 * a variable list of arguments. Drivers can use this at driver probe time if 912 * the unique name of the devices they drive is static. 913 * 914 * Return: 0 on success or a negative error code on failure. 915 */ 916 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...) 917 { 918 va_list ap; 919 920 kfree(dev->unique); 921 922 va_start(ap, fmt); 923 dev->unique = kvasprintf(GFP_KERNEL, fmt, ap); 924 va_end(ap); 925 926 return dev->unique ? 0 : -ENOMEM; 927 } 928 EXPORT_SYMBOL(drm_dev_set_unique); 929 930 #ifndef __NetBSD__ 931 932 /* 933 * DRM Core 934 * The DRM core module initializes all global DRM objects and makes them 935 * available to drivers. Once setup, drivers can probe their respective 936 * devices. 937 * Currently, core management includes: 938 * - The "DRM-Global" key/value database 939 * - Global ID management for connectors 940 * - DRM major number allocation 941 * - DRM minor management 942 * - DRM sysfs class 943 * - DRM debugfs root 944 * 945 * Furthermore, the DRM core provides dynamic char-dev lookups. For each 946 * interface registered on a DRM device, you can request minor numbers from DRM 947 * core. DRM core takes care of major-number management and char-dev 948 * registration. A stub ->open() callback forwards any open() requests to the 949 * registered minor. 950 */ 951 952 static int drm_stub_open(struct inode *inode, struct file *filp) 953 { 954 const struct file_operations *new_fops; 955 struct drm_minor *minor; 956 int err; 957 958 DRM_DEBUG("\n"); 959 960 mutex_lock(&drm_global_mutex); 961 minor = drm_minor_acquire(iminor(inode)); 962 if (IS_ERR(minor)) { 963 err = PTR_ERR(minor); 964 goto out_unlock; 965 } 966 967 new_fops = fops_get(minor->dev->driver->fops); 968 if (!new_fops) { 969 err = -ENODEV; 970 goto out_release; 971 } 972 973 replace_fops(filp, new_fops); 974 if (filp->f_op->open) 975 err = filp->f_op->open(inode, filp); 976 else 977 err = 0; 978 979 out_release: 980 drm_minor_release(minor); 981 out_unlock: 982 mutex_unlock(&drm_global_mutex); 983 return err; 984 } 985 986 static const struct file_operations drm_stub_fops = { 987 .owner = THIS_MODULE, 988 .open = drm_stub_open, 989 .llseek = noop_llseek, 990 }; 991 992 static int __init drm_core_init(void) 993 { 994 int ret = -ENOMEM; 995 996 drm_global_init(); 997 drm_connector_ida_init(); 998 idr_init(&drm_minors_idr); 999 1000 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) 1001 goto err_p1; 1002 1003 ret = drm_sysfs_init(); 1004 if (ret < 0) { 1005 printk(KERN_ERR "DRM: Error creating drm class.\n"); 1006 goto err_p2; 1007 } 1008 1009 drm_debugfs_root = debugfs_create_dir("dri", NULL); 1010 if (!drm_debugfs_root) { 1011 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n"); 1012 ret = -1; 1013 goto err_p3; 1014 } 1015 1016 DRM_INFO("Initialized %s %d.%d.%d %s\n", 1017 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); 1018 return 0; 1019 err_p3: 1020 drm_sysfs_destroy(); 1021 err_p2: 1022 unregister_chrdev(DRM_MAJOR, "drm"); 1023 1024 idr_destroy(&drm_minors_idr); 1025 err_p1: 1026 return ret; 1027 } 1028 1029 static void __exit drm_core_exit(void) 1030 { 1031 debugfs_remove(drm_debugfs_root); 1032 drm_sysfs_destroy(); 1033 1034 unregister_chrdev(DRM_MAJOR, "drm"); 1035 1036 drm_connector_ida_destroy(); 1037 idr_destroy(&drm_minors_idr); 1038 } 1039 1040 module_init(drm_core_init); 1041 module_exit(drm_core_exit); 1042 1043 #endif 1044