Lines Matching defs:dev

37 #include <dev/acpi/acpidev.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/dsdt.h>
136 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
140 * Minors can be accessed via dev->$minor_name. This pointer is either
159 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
164 return &dev->primary;
166 return &dev->render;
168 return &dev->accel;
174 static void drm_minor_alloc_release(struct drm_device *dev, void *data)
178 WARN_ON(dev != minor->dev);
201 static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
206 minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
211 minor->dev = dev;
221 r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
231 *drm_minor_get_slot(dev, type) = minor;
235 static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
243 minor = *drm_minor_get_slot(dev, type);
283 static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)
287 minor = *drm_minor_get_slot(dev, type);
311 * minor->dev pointer will stay valid! However, the device may get unplugged and
321 drm_dev_get(minor->dev);
326 } else if (drm_dev_is_unplugged(minor->dev)) {
327 drm_dev_put(minor->dev);
336 drm_dev_put(minor->dev);
397 * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
411 * priv->pclk = devm_clk_get(dev, "PCLK");
447 * static int __maybe_unused driver_pm_suspend(struct device *dev)
449 * return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
452 * static int __maybe_unused driver_pm_resume(struct device *dev)
454 * drm_mode_config_helper_resume(dev_get_drvdata(dev));
486 * @dev: DRM device
498 void drm_put_dev(struct drm_device *dev)
502 if (!dev) {
503 DRM_ERROR("cleanup called no dev\n");
507 drm_dev_unregister(dev);
508 drm_dev_put(dev);
514 * @dev: DRM device
524 bool drm_dev_enter(struct drm_device *dev, int *idx)
529 if (dev->unplugged) {
556 * @dev: DRM device
562 * called while there are still open users of @dev.
564 void drm_dev_unplug(struct drm_device *dev)
574 dev->unplugged = true;
577 drm_dev_unregister(dev);
580 unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);
672 static void drm_dev_init_release(struct drm_device *dev, void *res)
674 drm_legacy_ctxbitmap_cleanup(dev);
675 drm_legacy_remove_map_hash(dev);
677 drm_fs_inode_free(dev->anon_inode);
679 put_device(dev->dev);
683 dev->dev = NULL;
684 mutex_destroy(&dev->master_mutex);
685 mutex_destroy(&dev->clientlist_mutex);
686 mutex_destroy(&dev->filelist_mutex);
687 mutex_destroy(&dev->struct_mutex);
688 mutex_destroy(&dev->debugfs_mutex);
689 drm_legacy_destroy_members(dev);
694 static int drm_dev_init(struct drm_device *dev,
709 kref_init(&dev->ref);
710 dev->dev = get_device(parent);
711 dev->driver = driver;
713 INIT_LIST_HEAD(&dev->managed.resources);
714 spin_lock_init(&dev->managed.lock);
717 dev->driver_features = ~0u;
719 if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL) &&
720 (drm_core_check_feature(dev, DRIVER_RENDER) ||
721 drm_core_check_feature(dev, DRIVER_MODESET))) {
726 drm_legacy_init_members(dev);
727 INIT_LIST_HEAD(&dev->filelist);
728 INIT_LIST_HEAD(&dev->filelist_internal);
729 INIT_LIST_HEAD(&dev->clientlist);
730 INIT_LIST_HEAD(&dev->vblank_event_list);
731 INIT_LIST_HEAD(&dev->debugfs_list);
733 spin_lock_init(&dev->event_lock);
734 mutex_init(&dev->struct_mutex);
735 mutex_init(&dev->filelist_mutex);
736 mutex_init(&dev->clientlist_mutex);
737 mutex_init(&dev->master_mutex);
738 mutex_init(&dev->debugfs_mutex);
740 ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
751 dev->anon_inode = inode;
753 if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) {
754 ret = drm_minor_alloc(dev, DRM_MINOR_ACCEL);
758 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
759 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
764 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
769 ret = drm_legacy_create_map_hash(dev);
773 drm_legacy_ctxbitmap_init(dev);
775 if (drm_core_check_feature(dev, DRIVER_GEM)) {
776 ret = drm_gem_init(dev);
783 dev->unique = drmm_kstrdup(dev, dev_name(parent), GFP_KERNEL);
784 if (!dev->unique) {
792 drm_managed_release(dev);
803 struct drm_device *dev,
808 ret = drm_dev_init(dev, driver, parent);
813 devm_drm_dev_init_release, dev);
863 struct drm_device *dev;
866 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
867 if (!dev)
870 ret = drm_dev_init(dev, driver, parent);
872 kfree(dev);
876 drmm_add_final_kfree(dev, dev);
878 return dev;
886 struct drm_device *dev = container_of(ref, struct drm_device, ref);
888 if (dev->driver->release)
889 dev->driver->release(dev);
891 drm_managed_release(dev);
893 kfree(dev->managed.final_kfree);
898 * @dev: device to take reference of or NULL
900 * This increases the ref-count of @dev by one. You *must* already own a
908 void drm_dev_get(struct drm_device *dev)
910 if (dev)
911 kref_get(&dev->ref);
917 * @dev: device to drop reference of or NULL
919 * This decreases the ref-count of @dev by one. The device is destroyed if the
922 void drm_dev_put(struct drm_device *dev)
924 if (dev)
925 kref_put(&dev->ref, drm_dev_release);
929 static int create_compat_control_link(struct drm_device *dev)
935 if (!drm_core_check_feature(dev, DRIVER_MODESET))
938 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
964 static void remove_compat_control_link(struct drm_device *dev)
969 if (!drm_core_check_feature(dev, DRIVER_MODESET))
972 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
987 * @dev: Device to register
990 * Register the DRM device @dev with the system, advertise device to user-space
991 * and start normal device operation. @dev must be initialized via drm_dev_init()
1005 int drm_dev_register(struct drm_device *dev, unsigned long flags)
1007 const struct drm_driver *driver = dev->driver;
1011 drm_mode_config_validate(dev);
1013 WARN_ON(!dev->managed.final_kfree);
1015 if (drm_dev_needs_global_mutex(dev))
1018 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
1022 ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
1026 ret = drm_minor_register(dev, DRM_MINOR_ACCEL);
1030 ret = create_compat_control_link(dev);
1034 dev->registered = true;
1037 ret = driver->load(dev, flags);
1042 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1043 ret = drm_modeset_register_all(dev);
1051 dev->dev ? dev_name(dev->dev) : "virtual device",
1052 dev->primary ? dev->primary->index : dev->accel->index);
1057 if (dev->driver->unload)
1058 dev->driver->unload(dev);
1060 remove_compat_control_link(dev);
1061 drm_minor_unregister(dev, DRM_MINOR_ACCEL);
1062 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1063 drm_minor_unregister(dev, DRM_MINOR_RENDER);
1065 if (drm_dev_needs_global_mutex(dev))
1073 * @dev: Device to unregister
1082 * which can be called while there are still open users of @dev.
1087 void drm_dev_unregister(struct drm_device *dev)
1089 if (drm_core_check_feature(dev, DRIVER_LEGACY))
1090 drm_lastclose(dev);
1092 dev->registered = false;
1094 drm_client_dev_unregister(dev);
1096 if (drm_core_check_feature(dev, DRIVER_MODESET))
1097 drm_modeset_unregister_all(dev);
1099 if (dev->driver->unload)
1100 dev->driver->unload(dev);
1102 drm_legacy_pci_agp_destroy(dev);
1103 drm_legacy_rmmaps(dev);
1105 remove_compat_control_link(dev);
1106 drm_minor_unregister(dev, DRM_MINOR_ACCEL);
1107 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1108 drm_minor_unregister(dev, DRM_MINOR_RENDER);
1125 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
1127 * core. DRM core takes care of major-number management and char-dev
1145 new_fops = fops_get(minor->dev->driver->fops);
1230 bus_dma_tag_t dmat, struct device *dev, struct drm_device *drm)
1240 arg.busid = dev->dv_xname;
1241 arg.busid_len = strlen(dev->dv_xname) + 1;
1242 config_found_sm(dev, &arg, drmprint, drmsubmatch);
1247 int is_agp, int primary, struct device *dev, struct drm_device *drm)
1263 printf("%s: no memory for drm\n", dev->dv_xname);
1269 sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch);
1338 struct drm_device *dev = da->drm;
1348 if (dev == NULL) {
1349 dev = malloc(sizeof(struct drm_device), M_DRM,
1354 sc->sc_drm = dev;
1356 kref_init(&dev->ref);
1357 dev->dev = self;
1358 dev->dev_private = parent;
1359 dev->driver = da->driver;
1361 INIT_LIST_HEAD(&dev->managed.resources);
1362 mtx_init(&dev->managed.lock, IPL_TTY);
1365 dev->driver_features = ~0u;
1367 dev->dmat = da->dmat;
1368 dev->bst = da->bst;
1369 dev->unique = da->busid;
1378 dev->pdev = &dev->_pdev;
1379 dev->pdev->vendor = PCI_VENDOR(pa->pa_id);
1380 dev->pdev->device = PCI_PRODUCT(pa->pa_id);
1381 dev->pdev->subsystem_vendor = PCI_VENDOR(subsys);
1382 dev->pdev->subsystem_device = PCI_PRODUCT(subsys);
1383 dev->pdev->revision = PCI_REVISION(pa->pa_class);
1384 dev->pdev->class = (PCI_CLASS(pa->pa_class) << 16) |
1388 dev->pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
1389 dev->pdev->bus = &dev->pdev->_bus;
1390 dev->pdev->bus->pc = pa->pa_pc;
1391 dev->pdev->bus->number = pa->pa_bus;
1392 dev->pdev->bus->domain_nr = pa->pa_domain;
1393 dev->pdev->bus->bridgetag = pa->pa_bridgetag;
1396 dev->pdev->bus->self = malloc(sizeof(struct pci_dev),
1398 dev->pdev->bus->self->pc = pa->pa_pc;
1399 dev->pdev->bus->self->tag = *pa->pa_bridgetag;
1402 dev->pdev->pc = pa->pa_pc;
1403 dev->pdev->tag = pa->pa_tag;
1404 dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
1405 dev->pdev->_dev = parent;
1408 dev->pdev->dev.node = acpi_find_pci(pa->pa_pc, pa->pa_tag);
1409 aml_register_notify(dev->pdev->dev.node, NULL,
1414 mtx_init(&dev->quiesce_mtx, IPL_NONE);
1415 mtx_init(&dev->event_lock, IPL_TTY);
1416 rw_init(&dev->struct_mutex, "drmdevlk");
1417 rw_init(&dev->filelist_mutex, "drmflist");
1418 rw_init(&dev->clientlist_mutex, "drmclist");
1419 rw_init(&dev->master_mutex, "drmmast");
1421 ret = drmm_add_action(dev, drm_dev_init_release, NULL);
1425 SPLAY_INIT(&dev->files);
1426 INIT_LIST_HEAD(&dev->filelist_internal);
1427 INIT_LIST_HEAD(&dev->clientlist);
1428 INIT_LIST_HEAD(&dev->vblank_event_list);
1430 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
1431 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
1436 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
1441 if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1444 dev->agp = drm_agp_init();
1446 if (dev->agp != NULL) {
1447 if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
1448 dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
1449 dev->agp->mtrr = 1;
1454 if (dev->driver->gem_size > 0) {
1455 KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
1457 pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE, 0,
1461 if (drm_core_check_feature(dev, DRIVER_GEM)) {
1462 ret = drm_gem_init(dev);
1469 drmm_add_final_kfree(dev, dev);
1475 drm_managed_release(dev);
1476 dev->dev_private = NULL;
1483 struct drm_device *dev = sc->sc_drm;
1492 drm_lastclose(dev);
1494 if (drm_core_check_feature(dev, DRIVER_GEM)) {
1495 if (dev->driver->gem_size > 0)
1496 pool_destroy(&dev->objpl);
1500 if (dev->agp && dev->agp->mtrr) {
1503 retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
1504 dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
1508 free(dev->agp, M_DRM, 0);
1510 if (dev->pdev && dev->pdev->bus)
1511 free(dev->pdev->bus->self, M_DRM, sizeof(struct pci_dev));
1514 free(dev, M_DRM, sizeof(struct drm_device));
1520 drm_quiesce(struct drm_device *dev)
1522 mtx_enter(&dev->quiesce_mtx);
1523 dev->quiesce = 1;
1524 while (dev->quiesce_count > 0) {
1525 msleep_nsec(&dev->quiesce_count, &dev->quiesce_mtx,
1528 mtx_leave(&dev->quiesce_mtx);
1532 drm_wakeup(struct drm_device *dev)
1534 mtx_enter(&dev->quiesce_mtx);
1535 dev->quiesce = 0;
1536 wakeup(&dev->quiesce);
1537 mtx_leave(&dev->quiesce_mtx);
1544 struct drm_device *dev = sc->sc_drm;
1562 drm_quiesce(dev);
1565 drm_wakeup(dev);
1607 drm_find_file_by_minor(struct drm_device *dev, int minor)
1612 return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
1636 struct drm_device *dev = kn->kn_hook;
1640 klist_remove_locked(&dev->note, kn);
1670 mtx_enter(&file_priv->minor->dev->event_lock);
1673 mtx_leave(&file_priv->minor->dev->event_lock);
1694 struct drm_device *dev = NULL;
1698 dev = drm_get_device_from_kdev(kdev);
1699 if (dev == NULL || dev->dev_private == NULL)
1704 mutex_lock(&dev->struct_mutex);
1705 file_priv = drm_find_file_by_minor(dev, minor(kdev));
1706 mutex_unlock(&dev->struct_mutex);
1719 kn->kn_hook = dev;
1722 klist_insert_locked(&dev->note, kn);
1735 struct drm_device *dev = NULL;
1742 dev = drm_get_device_from_kdev(kdev);
1743 if (dev == NULL || dev->dev_private == NULL)
1746 DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1751 if (drm_dev_needs_global_mutex(dev))
1754 if (!atomic_fetch_inc(&dev->open_count))
1768 dm = *drm_minor_get_slot(dev, minor_type);
1791 mutex_lock(&dev->filelist_mutex);
1792 SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
1793 mutex_unlock(&dev->filelist_mutex);
1796 ret = drm_legacy_setup(dev);
1801 if (drm_dev_needs_global_mutex(dev))
1809 atomic_dec(&dev->open_count);
1810 if (drm_dev_needs_global_mutex(dev))
1818 struct drm_device *dev = drm_get_device_from_kdev(kdev);
1822 if (dev == NULL)
1825 if (drm_dev_needs_global_mutex(dev))
1828 DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1830 mutex_lock(&dev->filelist_mutex);
1831 file_priv = drm_find_file_by_minor(dev, minor(kdev));
1835 mutex_unlock(&dev->filelist_mutex);
1839 SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
1840 mutex_unlock(&dev->filelist_mutex);
1843 if (atomic_dec_and_test(&dev->open_count))
1844 drm_lastclose(dev);
1846 if (drm_dev_needs_global_mutex(dev))
1855 struct drm_device *dev = drm_get_device_from_kdev(kdev);
1860 if (dev == NULL)
1863 mutex_lock(&dev->filelist_mutex);
1864 file_priv = drm_find_file_by_minor(dev, minor(kdev));
1865 mutex_unlock(&dev->filelist_mutex);
1876 mtx_enter(&dev->event_lock);
1879 mtx_leave(&dev->event_lock);
1882 error = msleep_nsec(&file_priv->event_wait, &dev->event_lock,
1886 mtx_leave(&dev->event_lock);
1889 while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
1890 MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1896 mtx_enter(&dev->event_lock);
1898 MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1913 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
1919 MUTEX_ASSERT_LOCKED(&dev->event_lock);
1935 mtx_leave(&dev->event_lock);
2007 drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
2012 dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size,
2025 drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
2030 drm_dmamem_free(dev->dmat, dmah->mem);
2053 drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv)
2057 if (dev->pdev == NULL)
2060 info->domain = dev->pdev->bus->domain_nr;
2061 info->bus = dev->pdev->bus->number;
2062 info->dev = PCI_SLOT(dev->pdev->devfn);
2063 info->func = PCI_FUNC(dev->pdev->devfn);
2064 info->vendor_id = dev->pdev->vendor;
2065 info->device_id = dev->pdev->device;
2066 info->subvendor_id = dev->pdev->subsystem_vendor;
2067 info->subdevice_id = dev->pdev->subsystem_device;