xref: /openbsd-src/sys/dev/pci/drm/drm_drv.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/fcntl.h>
31 #include <sys/poll.h>
32 #include <sys/specdev.h>
33 #include <sys/vnode.h>
34 
35 #include <machine/bus.h>
36 
37 #ifdef __HAVE_ACPI
38 #include <dev/acpi/acpidev.h>
39 #include <dev/acpi/acpivar.h>
40 #include <dev/acpi/dsdt.h>
41 #endif
42 
43 #include <linux/debugfs.h>
44 #include <linux/fs.h>
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/mount.h>
48 #include <linux/pseudo_fs.h>
49 #include <linux/slab.h>
50 #include <linux/srcu.h>
51 
52 #include <drm/drm_client.h>
53 #include <drm/drm_color_mgmt.h>
54 #include <drm/drm_drv.h>
55 #include <drm/drm_file.h>
56 #include <drm/drm_mode_object.h>
57 #include <drm/drm_print.h>
58 
59 #include <drm/drm_gem.h>
60 #include <drm/drm_agpsupport.h>
61 #include <drm/drm_irq.h>
62 
63 #include "drm_crtc_internal.h"
64 #include "drm_internal.h"
65 #include "drm_legacy.h"
66 
67 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
68 MODULE_DESCRIPTION("DRM shared core routines");
69 MODULE_LICENSE("GPL and additional rights");
70 
71 static DEFINE_SPINLOCK(drm_minor_lock);
72 static struct idr drm_minors_idr;
73 
74 /*
75  * If the drm core fails to init for whatever reason,
76  * we should prevent any drivers from registering with it.
77  * It's best to check this at drm_dev_init(), as some drivers
78  * prefer to embed struct drm_device into their own device
79  * structure and call drm_dev_init() themselves.
80  */
81 static bool drm_core_init_complete = false;
82 
83 static struct dentry *drm_debugfs_root;
84 
85 #ifdef notyet
86 DEFINE_STATIC_SRCU(drm_unplug_srcu);
87 #endif
88 
89 /*
90  * Some functions are only called once on init regardless of how many times
91  * drm attaches.  In linux this is handled via module_init()/module_exit()
92  */
93 int drm_refcnt;
94 
95 struct drm_softc {
96 	struct device		sc_dev;
97 	struct drm_device 	*sc_drm;
98 	int			sc_allocated;
99 };
100 
101 struct drm_attach_args {
102 	struct drm_device		*drm;
103 	struct drm_driver		*driver;
104 	char				*busid;
105 	bus_dma_tag_t			 dmat;
106 	bus_space_tag_t			 bst;
107 	size_t				 busid_len;
108 	int				 is_agp;
109 	struct pci_attach_args		*pa;
110 	int				 primary;
111 };
112 
113 void	drm_linux_init(void);
114 int	drm_linux_acpi_notify(struct aml_node *, int, void *);
115 
116 int	drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
117 	    struct drm_pending_event **);
118 
119 int	drmprint(void *, const char *);
120 int	drmsubmatch(struct device *, void *, void *);
121 const struct pci_device_id *
122 	drm_find_description(int, int, const struct pci_device_id *);
123 
124 int	drm_file_cmp(struct drm_file *, struct drm_file *);
125 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
126 
127 #define DRMDEVCF_PRIMARY	0
128 #define drmdevcf_primary	cf_loc[DRMDEVCF_PRIMARY]	/* spec'd as primary? */
129 #define DRMDEVCF_PRIMARY_UNK	-1
130 
131 /*
132  * DRM Minors
133  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
134  * of them is represented by a drm_minor object. Depending on the capabilities
135  * of the device-driver, different interfaces are registered.
136  *
137  * Minors can be accessed via dev->$minor_name. This pointer is either
138  * NULL or a valid drm_minor pointer and stays valid as long as the device is
139  * valid. This means, DRM minors have the same life-time as the underlying
140  * device. However, this doesn't mean that the minor is active. Minors are
141  * registered and unregistered dynamically according to device-state.
142  */
143 
144 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
145 					     unsigned int type)
146 {
147 	switch (type) {
148 	case DRM_MINOR_PRIMARY:
149 		return &dev->primary;
150 	case DRM_MINOR_RENDER:
151 		return &dev->render;
152 	default:
153 		BUG();
154 	}
155 }
156 
157 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
158 {
159 	struct drm_minor *minor;
160 	unsigned long flags;
161 	int r;
162 
163 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
164 	if (!minor)
165 		return -ENOMEM;
166 
167 	minor->type = type;
168 	minor->dev = dev;
169 
170 	idr_preload(GFP_KERNEL);
171 	spin_lock_irqsave(&drm_minor_lock, flags);
172 	r = idr_alloc(&drm_minors_idr,
173 		      NULL,
174 		      64 * type,
175 		      64 * (type + 1),
176 		      GFP_NOWAIT);
177 	spin_unlock_irqrestore(&drm_minor_lock, flags);
178 	idr_preload_end();
179 
180 	if (r < 0)
181 		goto err_free;
182 
183 	minor->index = r;
184 
185 #ifdef __linux__
186 	minor->kdev = drm_sysfs_minor_alloc(minor);
187 	if (IS_ERR(minor->kdev)) {
188 		r = PTR_ERR(minor->kdev);
189 		goto err_index;
190 	}
191 #endif
192 
193 	*drm_minor_get_slot(dev, type) = minor;
194 	return 0;
195 
196 #ifdef __linux__
197 err_index:
198 	spin_lock_irqsave(&drm_minor_lock, flags);
199 	idr_remove(&drm_minors_idr, minor->index);
200 	spin_unlock_irqrestore(&drm_minor_lock, flags);
201 #endif
202 err_free:
203 	kfree(minor);
204 	return r;
205 }
206 
207 static void drm_minor_free(struct drm_device *dev, unsigned int type)
208 {
209 	struct drm_minor **slot, *minor;
210 	unsigned long flags;
211 
212 	slot = drm_minor_get_slot(dev, type);
213 	minor = *slot;
214 	if (!minor)
215 		return;
216 
217 #ifdef __linux__
218 	put_device(minor->kdev);
219 #endif
220 
221 	spin_lock_irqsave(&drm_minor_lock, flags);
222 	idr_remove(&drm_minors_idr, minor->index);
223 	spin_unlock_irqrestore(&drm_minor_lock, flags);
224 
225 	kfree(minor);
226 	*slot = NULL;
227 }
228 
229 static int drm_minor_register(struct drm_device *dev, unsigned int type)
230 {
231 	struct drm_minor *minor;
232 	unsigned long flags;
233 #ifdef __linux__
234 	int ret;
235 #endif
236 
237 	DRM_DEBUG("\n");
238 
239 	minor = *drm_minor_get_slot(dev, type);
240 	if (!minor)
241 		return 0;
242 
243 #ifdef __linux__
244 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
245 	if (ret) {
246 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
247 		goto err_debugfs;
248 	}
249 
250 	ret = device_add(minor->kdev);
251 	if (ret)
252 		goto err_debugfs;
253 #else
254 	drm_debugfs_root = NULL;
255 #endif
256 
257 	/* replace NULL with @minor so lookups will succeed from now on */
258 	spin_lock_irqsave(&drm_minor_lock, flags);
259 	idr_replace(&drm_minors_idr, minor, minor->index);
260 	spin_unlock_irqrestore(&drm_minor_lock, flags);
261 
262 	DRM_DEBUG("new minor registered %d\n", minor->index);
263 	return 0;
264 
265 #ifdef __linux__
266 err_debugfs:
267 	drm_debugfs_cleanup(minor);
268 	return ret;
269 #endif
270 }
271 
272 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
273 {
274 	struct drm_minor *minor;
275 	unsigned long flags;
276 
277 	minor = *drm_minor_get_slot(dev, type);
278 #ifdef __linux__
279 	if (!minor || !device_is_registered(minor->kdev))
280 #else
281 	if (!minor)
282 #endif
283 		return;
284 
285 	/* replace @minor with NULL so lookups will fail from now on */
286 	spin_lock_irqsave(&drm_minor_lock, flags);
287 	idr_replace(&drm_minors_idr, NULL, minor->index);
288 	spin_unlock_irqrestore(&drm_minor_lock, flags);
289 
290 #ifdef __linux__
291 	device_del(minor->kdev);
292 #endif
293 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
294 	drm_debugfs_cleanup(minor);
295 }
296 
297 /*
298  * Looks up the given minor-ID and returns the respective DRM-minor object. The
299  * refence-count of the underlying device is increased so you must release this
300  * object with drm_minor_release().
301  *
302  * As long as you hold this minor, it is guaranteed that the object and the
303  * minor->dev pointer will stay valid! However, the device may get unplugged and
304  * unregistered while you hold the minor.
305  */
306 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
307 {
308 	struct drm_minor *minor;
309 	unsigned long flags;
310 
311 	spin_lock_irqsave(&drm_minor_lock, flags);
312 	minor = idr_find(&drm_minors_idr, minor_id);
313 	if (minor)
314 		drm_dev_get(minor->dev);
315 	spin_unlock_irqrestore(&drm_minor_lock, flags);
316 
317 	if (!minor) {
318 		return ERR_PTR(-ENODEV);
319 	} else if (drm_dev_is_unplugged(minor->dev)) {
320 		drm_dev_put(minor->dev);
321 		return ERR_PTR(-ENODEV);
322 	}
323 
324 	return minor;
325 }
326 
327 void drm_minor_release(struct drm_minor *minor)
328 {
329 	drm_dev_put(minor->dev);
330 }
331 
332 /**
333  * DOC: driver instance overview
334  *
335  * A device instance for a drm driver is represented by &struct drm_device. This
336  * is initialized with drm_dev_init(), usually from bus-specific ->probe()
337  * callbacks implemented by the driver. The driver then needs to initialize all
338  * the various subsystems for the drm device like memory management, vblank
339  * handling, modesetting support and intial output configuration plus obviously
340  * initialize all the corresponding hardware bits. Finally when everything is up
341  * and running and ready for userspace the device instance can be published
342  * using drm_dev_register().
343  *
344  * There is also deprecated support for initalizing device instances using
345  * bus-specific helpers and the &drm_driver.load callback. But due to
346  * backwards-compatibility needs the device instance have to be published too
347  * early, which requires unpretty global locking to make safe and is therefore
348  * only support for existing drivers not yet converted to the new scheme.
349  *
350  * When cleaning up a device instance everything needs to be done in reverse:
351  * First unpublish the device instance with drm_dev_unregister(). Then clean up
352  * any other resources allocated at device initialization and drop the driver's
353  * reference to &drm_device using drm_dev_put().
354  *
355  * Note that the lifetime rules for &drm_device instance has still a lot of
356  * historical baggage. Hence use the reference counting provided by
357  * drm_dev_get() and drm_dev_put() only carefully.
358  *
359  * Display driver example
360  * ~~~~~~~~~~~~~~~~~~~~~~
361  *
362  * The following example shows a typical structure of a DRM display driver.
363  * The example focus on the probe() function and the other functions that is
364  * almost always present and serves as a demonstration of devm_drm_dev_init()
365  * usage with its accompanying drm_driver->release callback.
366  *
367  * .. code-block:: c
368  *
369  *	struct driver_device {
370  *		struct drm_device drm;
371  *		void *userspace_facing;
372  *		struct clk *pclk;
373  *	};
374  *
375  *	static void driver_drm_release(struct drm_device *drm)
376  *	{
377  *		struct driver_device *priv = container_of(...);
378  *
379  *		drm_mode_config_cleanup(drm);
380  *		drm_dev_fini(drm);
381  *		kfree(priv->userspace_facing);
382  *		kfree(priv);
383  *	}
384  *
385  *	static struct drm_driver driver_drm_driver = {
386  *		[...]
387  *		.release = driver_drm_release,
388  *	};
389  *
390  *	static int driver_probe(struct platform_device *pdev)
391  *	{
392  *		struct driver_device *priv;
393  *		struct drm_device *drm;
394  *		int ret;
395  *
396  *		// devm_kzalloc() can't be used here because the drm_device '
397  *		// lifetime can exceed the device lifetime if driver unbind
398  *		// happens when userspace still has open file descriptors.
399  *		priv = kzalloc(sizeof(*priv), GFP_KERNEL);
400  *		if (!priv)
401  *			return -ENOMEM;
402  *
403  *		drm = &priv->drm;
404  *
405  *		ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
406  *		if (ret) {
407  *			kfree(drm);
408  *			return ret;
409  *		}
410  *
411  *		drm_mode_config_init(drm);
412  *
413  *		priv->userspace_facing = kzalloc(..., GFP_KERNEL);
414  *		if (!priv->userspace_facing)
415  *			return -ENOMEM;
416  *
417  *		priv->pclk = devm_clk_get(dev, "PCLK");
418  *		if (IS_ERR(priv->pclk))
419  *			return PTR_ERR(priv->pclk);
420  *
421  *		// Further setup, display pipeline etc
422  *
423  *		platform_set_drvdata(pdev, drm);
424  *
425  *		drm_mode_config_reset(drm);
426  *
427  *		ret = drm_dev_register(drm);
428  *		if (ret)
429  *			return ret;
430  *
431  *		drm_fbdev_generic_setup(drm, 32);
432  *
433  *		return 0;
434  *	}
435  *
436  *	// This function is called before the devm_ resources are released
437  *	static int driver_remove(struct platform_device *pdev)
438  *	{
439  *		struct drm_device *drm = platform_get_drvdata(pdev);
440  *
441  *		drm_dev_unregister(drm);
442  *		drm_atomic_helper_shutdown(drm)
443  *
444  *		return 0;
445  *	}
446  *
447  *	// This function is called on kernel restart and shutdown
448  *	static void driver_shutdown(struct platform_device *pdev)
449  *	{
450  *		drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
451  *	}
452  *
453  *	static int __maybe_unused driver_pm_suspend(struct device *dev)
454  *	{
455  *		return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
456  *	}
457  *
458  *	static int __maybe_unused driver_pm_resume(struct device *dev)
459  *	{
460  *		drm_mode_config_helper_resume(dev_get_drvdata(dev));
461  *
462  *		return 0;
463  *	}
464  *
465  *	static const struct dev_pm_ops driver_pm_ops = {
466  *		SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
467  *	};
468  *
469  *	static struct platform_driver driver_driver = {
470  *		.driver = {
471  *			[...]
472  *			.pm = &driver_pm_ops,
473  *		},
474  *		.probe = driver_probe,
475  *		.remove = driver_remove,
476  *		.shutdown = driver_shutdown,
477  *	};
478  *	module_platform_driver(driver_driver);
479  *
480  * Drivers that want to support device unplugging (USB, DT overlay unload) should
481  * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
482  * regions that is accessing device resources to prevent use after they're
483  * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
484  * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
485  * drm_atomic_helper_shutdown() is called. This means that if the disable code
486  * paths are protected, they will not run on regular driver module unload,
487  * possibily leaving the hardware enabled.
488  */
489 
490 /**
491  * drm_put_dev - Unregister and release a DRM device
492  * @dev: DRM device
493  *
494  * Called at module unload time or when a PCI device is unplugged.
495  *
496  * Cleans up all DRM device, calling drm_lastclose().
497  *
498  * Note: Use of this function is deprecated. It will eventually go away
499  * completely.  Please use drm_dev_unregister() and drm_dev_put() explicitly
500  * instead to make sure that the device isn't userspace accessible any more
501  * while teardown is in progress, ensuring that userspace can't access an
502  * inconsistent state.
503  */
504 void drm_put_dev(struct drm_device *dev)
505 {
506 	DRM_DEBUG("\n");
507 
508 	if (!dev) {
509 		DRM_ERROR("cleanup called no dev\n");
510 		return;
511 	}
512 
513 	drm_dev_unregister(dev);
514 	drm_dev_put(dev);
515 }
516 EXPORT_SYMBOL(drm_put_dev);
517 
518 /**
519  * drm_dev_enter - Enter device critical section
520  * @dev: DRM device
521  * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
522  *
523  * This function marks and protects the beginning of a section that should not
524  * be entered after the device has been unplugged. The section end is marked
525  * with drm_dev_exit(). Calls to this function can be nested.
526  *
527  * Returns:
528  * True if it is OK to enter the section, false otherwise.
529  */
530 bool drm_dev_enter(struct drm_device *dev, int *idx)
531 {
532 #ifdef notyet
533 	*idx = srcu_read_lock(&drm_unplug_srcu);
534 
535 	if (dev->unplugged) {
536 		srcu_read_unlock(&drm_unplug_srcu, *idx);
537 		return false;
538 	}
539 #endif
540 
541 	return true;
542 }
543 EXPORT_SYMBOL(drm_dev_enter);
544 
545 /**
546  * drm_dev_exit - Exit device critical section
547  * @idx: index returned from drm_dev_enter()
548  *
549  * This function marks the end of a section that should not be entered after
550  * the device has been unplugged.
551  */
552 void drm_dev_exit(int idx)
553 {
554 #ifdef notyet
555 	srcu_read_unlock(&drm_unplug_srcu, idx);
556 #endif
557 }
558 EXPORT_SYMBOL(drm_dev_exit);
559 
560 /**
561  * drm_dev_unplug - unplug a DRM device
562  * @dev: DRM device
563  *
564  * This unplugs a hotpluggable DRM device, which makes it inaccessible to
565  * userspace operations. Entry-points can use drm_dev_enter() and
566  * drm_dev_exit() to protect device resources in a race free manner. This
567  * essentially unregisters the device like drm_dev_unregister(), but can be
568  * called while there are still open users of @dev.
569  */
570 void drm_dev_unplug(struct drm_device *dev)
571 {
572 	STUB();
573 #ifdef notyet
574 	/*
575 	 * After synchronizing any critical read section is guaranteed to see
576 	 * the new value of ->unplugged, and any critical section which might
577 	 * still have seen the old value of ->unplugged is guaranteed to have
578 	 * finished.
579 	 */
580 	dev->unplugged = true;
581 	synchronize_srcu(&drm_unplug_srcu);
582 
583 	drm_dev_unregister(dev);
584 #endif
585 }
586 EXPORT_SYMBOL(drm_dev_unplug);
587 
588 #ifdef __linux__
589 /*
590  * DRM internal mount
591  * We want to be able to allocate our own "struct address_space" to control
592  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
593  * stand-alone address_space objects, so we need an underlying inode. As there
594  * is no way to allocate an independent inode easily, we need a fake internal
595  * VFS mount-point.
596  *
597  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
598  * frees it again. You are allowed to use iget() and iput() to get references to
599  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
600  * drm_fs_inode_free() call (which does not have to be the last iput()).
601  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
602  * between multiple inode-users. You could, technically, call
603  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
604  * iput(), but this way you'd end up with a new vfsmount for each inode.
605  */
606 
607 static int drm_fs_cnt;
608 static struct vfsmount *drm_fs_mnt;
609 
610 static int drm_fs_init_fs_context(struct fs_context *fc)
611 {
612 	return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM;
613 }
614 
615 static struct file_system_type drm_fs_type = {
616 	.name		= "drm",
617 	.owner		= THIS_MODULE,
618 	.init_fs_context = drm_fs_init_fs_context,
619 	.kill_sb	= kill_anon_super,
620 };
621 
622 static struct inode *drm_fs_inode_new(void)
623 {
624 	struct inode *inode;
625 	int r;
626 
627 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
628 	if (r < 0) {
629 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
630 		return ERR_PTR(r);
631 	}
632 
633 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
634 	if (IS_ERR(inode))
635 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
636 
637 	return inode;
638 }
639 
640 static void drm_fs_inode_free(struct inode *inode)
641 {
642 	if (inode) {
643 		iput(inode);
644 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
645 	}
646 }
647 
648 #endif /* __linux__ */
649 
650 /**
651  * DOC: component helper usage recommendations
652  *
653  * DRM drivers that drive hardware where a logical device consists of a pile of
654  * independent hardware blocks are recommended to use the :ref:`component helper
655  * library<component>`. For consistency and better options for code reuse the
656  * following guidelines apply:
657  *
658  *  - The entire device initialization procedure should be run from the
659  *    &component_master_ops.master_bind callback, starting with drm_dev_init(),
660  *    then binding all components with component_bind_all() and finishing with
661  *    drm_dev_register().
662  *
663  *  - The opaque pointer passed to all components through component_bind_all()
664  *    should point at &struct drm_device of the device instance, not some driver
665  *    specific private structure.
666  *
667  *  - The component helper fills the niche where further standardization of
668  *    interfaces is not practical. When there already is, or will be, a
669  *    standardized interface like &drm_bridge or &drm_panel, providing its own
670  *    functions to find such components at driver load time, like
671  *    drm_of_find_panel_or_bridge(), then the component helper should not be
672  *    used.
673  */
674 
675 /**
676  * drm_dev_init - Initialise new DRM device
677  * @dev: DRM device
678  * @driver: DRM driver
679  * @parent: Parent device object
680  *
681  * Initialize a new DRM device. No device registration is done.
682  * Call drm_dev_register() to advertice the device to user space and register it
683  * with other core subsystems. This should be done last in the device
684  * initialization sequence to make sure userspace can't access an inconsistent
685  * state.
686  *
687  * The initial ref-count of the object is 1. Use drm_dev_get() and
688  * drm_dev_put() to take and drop further ref-counts.
689  *
690  * It is recommended that drivers embed &struct drm_device into their own device
691  * structure.
692  *
693  * Drivers that do not want to allocate their own device struct
694  * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
695  * that do embed &struct drm_device it must be placed first in the overall
696  * structure, and the overall structure must be allocated using kmalloc(): The
697  * drm core's release function unconditionally calls kfree() on the @dev pointer
698  * when the final reference is released. To override this behaviour, and so
699  * allow embedding of the drm_device inside the driver's device struct at an
700  * arbitrary offset, you must supply a &drm_driver.release callback and control
701  * the finalization explicitly.
702  *
703  * RETURNS:
704  * 0 on success, or error code on failure.
705  */
706 int drm_dev_init(struct drm_device *dev,
707 		 struct drm_driver *driver,
708 		 struct device *parent)
709 {
710 	int ret;
711 
712 	if (!drm_core_init_complete) {
713 		DRM_ERROR("DRM core is not initialized\n");
714 		return -ENODEV;
715 	}
716 
717 	if (WARN_ON(!parent))
718 		return -EINVAL;
719 
720 	kref_init(&dev->ref);
721 #ifdef __linux__
722 	dev->dev = get_device(parent);
723 #endif
724 	dev->driver = driver;
725 
726 	/* no per-device feature limits by default */
727 	dev->driver_features = ~0u;
728 
729 	drm_legacy_init_members(dev);
730 #ifdef notyet
731 	INIT_LIST_HEAD(&dev->filelist);
732 #else
733 	SPLAY_INIT(&dev->files);
734 #endif
735 	INIT_LIST_HEAD(&dev->filelist_internal);
736 	INIT_LIST_HEAD(&dev->clientlist);
737 	INIT_LIST_HEAD(&dev->vblank_event_list);
738 
739 	mtx_init(&dev->event_lock, IPL_TTY);
740 	mtx_init(&dev->event_lock, IPL_TTY);
741 	rw_init(&dev->struct_mutex, "drmdevlk");
742 	rw_init(&dev->filelist_mutex, "drmflist");
743 	rw_init(&dev->clientlist_mutex, "drmclist");
744 	rw_init(&dev->master_mutex, "drmmast");
745 
746 #ifdef __linux__
747 	dev->anon_inode = drm_fs_inode_new();
748 	if (IS_ERR(dev->anon_inode)) {
749 		ret = PTR_ERR(dev->anon_inode);
750 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
751 		goto err_free;
752 	}
753 #endif
754 
755 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
756 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
757 		if (ret)
758 			goto err_minors;
759 	}
760 
761 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
762 	if (ret)
763 		goto err_minors;
764 
765 	ret = drm_legacy_create_map_hash(dev);
766 	if (ret)
767 		goto err_minors;
768 
769 	drm_legacy_ctxbitmap_init(dev);
770 
771 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
772 		ret = drm_gem_init(dev);
773 		if (ret) {
774 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
775 			goto err_ctxbitmap;
776 		}
777 	}
778 
779 	ret = drm_dev_set_unique(dev, dev_name(parent));
780 	if (ret)
781 		goto err_setunique;
782 
783 	return 0;
784 
785 err_setunique:
786 	if (drm_core_check_feature(dev, DRIVER_GEM))
787 		drm_gem_destroy(dev);
788 err_ctxbitmap:
789 	drm_legacy_ctxbitmap_cleanup(dev);
790 	drm_legacy_remove_map_hash(dev);
791 err_minors:
792 	drm_minor_free(dev, DRM_MINOR_PRIMARY);
793 	drm_minor_free(dev, DRM_MINOR_RENDER);
794 #ifdef __linux__
795 	drm_fs_inode_free(dev->anon_inode);
796 err_free:
797 	put_device(dev->dev);
798 #endif
799 	mutex_destroy(&dev->master_mutex);
800 	mutex_destroy(&dev->clientlist_mutex);
801 	mutex_destroy(&dev->filelist_mutex);
802 	mutex_destroy(&dev->struct_mutex);
803 	drm_legacy_destroy_members(dev);
804 	return ret;
805 }
806 EXPORT_SYMBOL(drm_dev_init);
807 
808 #ifdef notyet
809 static void devm_drm_dev_init_release(void *data)
810 {
811 	drm_dev_put(data);
812 }
813 #endif
814 
815 /**
816  * devm_drm_dev_init - Resource managed drm_dev_init()
817  * @parent: Parent device object
818  * @dev: DRM device
819  * @driver: DRM driver
820  *
821  * Managed drm_dev_init(). The DRM device initialized with this function is
822  * automatically put on driver detach using drm_dev_put(). You must supply a
823  * &drm_driver.release callback to control the finalization explicitly.
824  *
825  * RETURNS:
826  * 0 on success, or error code on failure.
827  */
828 int devm_drm_dev_init(struct device *parent,
829 		      struct drm_device *dev,
830 		      struct drm_driver *driver)
831 {
832 	STUB();
833 	return -ENOSYS;
834 #ifdef notyet
835 	int ret;
836 
837 	if (WARN_ON(!driver->release))
838 		return -EINVAL;
839 
840 	ret = drm_dev_init(dev, driver, parent);
841 	if (ret)
842 		return ret;
843 
844 	ret = devm_add_action(parent, devm_drm_dev_init_release, dev);
845 	if (ret)
846 		devm_drm_dev_init_release(dev);
847 
848 	return ret;
849 #endif
850 }
851 EXPORT_SYMBOL(devm_drm_dev_init);
852 
853 /**
854  * drm_dev_fini - Finalize a dead DRM device
855  * @dev: DRM device
856  *
857  * Finalize a dead DRM device. This is the converse to drm_dev_init() and
858  * frees up all data allocated by it. All driver private data should be
859  * finalized first. Note that this function does not free the @dev, that is
860  * left to the caller.
861  *
862  * The ref-count of @dev must be zero, and drm_dev_fini() should only be called
863  * from a &drm_driver.release callback.
864  */
865 void drm_dev_fini(struct drm_device *dev)
866 {
867 	drm_vblank_cleanup(dev);
868 
869 	if (drm_core_check_feature(dev, DRIVER_GEM))
870 		drm_gem_destroy(dev);
871 
872 	drm_legacy_ctxbitmap_cleanup(dev);
873 	drm_legacy_remove_map_hash(dev);
874 #ifdef __linux__
875 	drm_fs_inode_free(dev->anon_inode);
876 #endif
877 
878 	drm_minor_free(dev, DRM_MINOR_PRIMARY);
879 	drm_minor_free(dev, DRM_MINOR_RENDER);
880 
881 #ifdef __linux__
882 	put_device(dev->dev);
883 #endif
884 
885 	mutex_destroy(&dev->master_mutex);
886 	mutex_destroy(&dev->clientlist_mutex);
887 	mutex_destroy(&dev->filelist_mutex);
888 	mutex_destroy(&dev->struct_mutex);
889 	drm_legacy_destroy_members(dev);
890 	kfree(dev->unique);
891 }
892 EXPORT_SYMBOL(drm_dev_fini);
893 
894 /**
895  * drm_dev_alloc - Allocate new DRM device
896  * @driver: DRM driver to allocate device for
897  * @parent: Parent device object
898  *
899  * Allocate and initialize a new DRM device. No device registration is done.
900  * Call drm_dev_register() to advertice the device to user space and register it
901  * with other core subsystems. This should be done last in the device
902  * initialization sequence to make sure userspace can't access an inconsistent
903  * state.
904  *
905  * The initial ref-count of the object is 1. Use drm_dev_get() and
906  * drm_dev_put() to take and drop further ref-counts.
907  *
908  * Note that for purely virtual devices @parent can be NULL.
909  *
910  * Drivers that wish to subclass or embed &struct drm_device into their
911  * own struct should look at using drm_dev_init() instead.
912  *
913  * RETURNS:
914  * Pointer to new DRM device, or ERR_PTR on failure.
915  */
916 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
917 				 struct device *parent)
918 {
919 	struct drm_device *dev;
920 	int ret;
921 
922 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
923 	if (!dev)
924 		return ERR_PTR(-ENOMEM);
925 
926 	ret = drm_dev_init(dev, driver, parent);
927 	if (ret) {
928 		kfree(dev);
929 		return ERR_PTR(ret);
930 	}
931 
932 	return dev;
933 }
934 EXPORT_SYMBOL(drm_dev_alloc);
935 
936 static void drm_dev_release(struct kref *ref)
937 {
938 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
939 
940 	if (dev->driver->release) {
941 		dev->driver->release(dev);
942 	} else {
943 		drm_dev_fini(dev);
944 		kfree(dev);
945 	}
946 }
947 
948 /**
949  * drm_dev_get - Take reference of a DRM device
950  * @dev: device to take reference of or NULL
951  *
952  * This increases the ref-count of @dev by one. You *must* already own a
953  * reference when calling this. Use drm_dev_put() to drop this reference
954  * again.
955  *
956  * This function never fails. However, this function does not provide *any*
957  * guarantee whether the device is alive or running. It only provides a
958  * reference to the object and the memory associated with it.
959  */
960 void drm_dev_get(struct drm_device *dev)
961 {
962 	if (dev)
963 		kref_get(&dev->ref);
964 }
965 EXPORT_SYMBOL(drm_dev_get);
966 
967 /**
968  * drm_dev_put - Drop reference of a DRM device
969  * @dev: device to drop reference of or NULL
970  *
971  * This decreases the ref-count of @dev by one. The device is destroyed if the
972  * ref-count drops to zero.
973  */
974 void drm_dev_put(struct drm_device *dev)
975 {
976 	if (dev)
977 		kref_put(&dev->ref, drm_dev_release);
978 }
979 EXPORT_SYMBOL(drm_dev_put);
980 
981 static int create_compat_control_link(struct drm_device *dev)
982 {
983 	struct drm_minor *minor;
984 	char *name;
985 	int ret;
986 
987 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
988 		return 0;
989 
990 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
991 	if (!minor)
992 		return 0;
993 
994 	/*
995 	 * Some existing userspace out there uses the existing of the controlD*
996 	 * sysfs files to figure out whether it's a modeset driver. It only does
997 	 * readdir, hence a symlink is sufficient (and the least confusing
998 	 * option). Otherwise controlD* is entirely unused.
999 	 *
1000 	 * Old controlD chardev have been allocated in the range
1001 	 * 64-127.
1002 	 */
1003 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
1004 	if (!name)
1005 		return -ENOMEM;
1006 
1007 	ret = sysfs_create_link(minor->kdev->kobj.parent,
1008 				&minor->kdev->kobj,
1009 				name);
1010 
1011 	kfree(name);
1012 
1013 	return ret;
1014 }
1015 
1016 static void remove_compat_control_link(struct drm_device *dev)
1017 {
1018 	struct drm_minor *minor;
1019 	char *name;
1020 
1021 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
1022 		return;
1023 
1024 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
1025 	if (!minor)
1026 		return;
1027 
1028 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
1029 	if (!name)
1030 		return;
1031 
1032 	sysfs_remove_link(minor->kdev->kobj.parent, name);
1033 
1034 	kfree(name);
1035 }
1036 
1037 /**
1038  * drm_dev_register - Register DRM device
1039  * @dev: Device to register
1040  * @flags: Flags passed to the driver's .load() function
1041  *
1042  * Register the DRM device @dev with the system, advertise device to user-space
1043  * and start normal device operation. @dev must be initialized via drm_dev_init()
1044  * previously.
1045  *
1046  * Never call this twice on any device!
1047  *
1048  * NOTE: To ensure backward compatibility with existing drivers method this
1049  * function calls the &drm_driver.load method after registering the device
1050  * nodes, creating race conditions. Usage of the &drm_driver.load methods is
1051  * therefore deprecated, drivers must perform all initialization before calling
1052  * drm_dev_register().
1053  *
1054  * RETURNS:
1055  * 0 on success, negative error code on failure.
1056  */
1057 int drm_dev_register(struct drm_device *dev, unsigned long flags)
1058 {
1059 	struct drm_driver *driver = dev->driver;
1060 	int ret;
1061 
1062 	if (drm_dev_needs_global_mutex(dev))
1063 		mutex_lock(&drm_global_mutex);
1064 
1065 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
1066 	if (ret)
1067 		goto err_minors;
1068 
1069 	ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
1070 	if (ret)
1071 		goto err_minors;
1072 
1073 	ret = create_compat_control_link(dev);
1074 	if (ret)
1075 		goto err_minors;
1076 
1077 	dev->registered = true;
1078 
1079 	if (dev->driver->load) {
1080 		ret = dev->driver->load(dev, flags);
1081 		if (ret)
1082 			goto err_minors;
1083 	}
1084 
1085 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1086 		drm_modeset_register_all(dev);
1087 
1088 	ret = 0;
1089 
1090 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
1091 		 driver->name, driver->major, driver->minor,
1092 		 driver->patchlevel, driver->date,
1093 		 dev->dev ? dev_name(dev->dev) : "virtual device",
1094 		 dev->primary->index);
1095 
1096 	goto out_unlock;
1097 
1098 err_minors:
1099 	remove_compat_control_link(dev);
1100 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1101 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
1102 out_unlock:
1103 	if (drm_dev_needs_global_mutex(dev))
1104 		mutex_unlock(&drm_global_mutex);
1105 	return ret;
1106 }
1107 EXPORT_SYMBOL(drm_dev_register);
1108 
1109 /**
1110  * drm_dev_unregister - Unregister DRM device
1111  * @dev: Device to unregister
1112  *
1113  * Unregister the DRM device from the system. This does the reverse of
1114  * drm_dev_register() but does not deallocate the device. The caller must call
1115  * drm_dev_put() to drop their final reference.
1116  *
1117  * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
1118  * which can be called while there are still open users of @dev.
1119  *
1120  * This should be called first in the device teardown code to make sure
1121  * userspace can't access the device instance any more.
1122  */
1123 void drm_dev_unregister(struct drm_device *dev)
1124 {
1125 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
1126 		drm_lastclose(dev);
1127 
1128 	dev->registered = false;
1129 
1130 	drm_client_dev_unregister(dev);
1131 
1132 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1133 		drm_modeset_unregister_all(dev);
1134 
1135 	if (dev->driver->unload)
1136 		dev->driver->unload(dev);
1137 
1138 #if IS_ENABLED(CONFIG_AGP)
1139 	if (dev->agp)
1140 		drm_agp_takedown(dev);
1141 #endif
1142 
1143 	drm_legacy_rmmaps(dev);
1144 
1145 	remove_compat_control_link(dev);
1146 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1147 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
1148 }
1149 EXPORT_SYMBOL(drm_dev_unregister);
1150 
1151 /**
1152  * drm_dev_set_unique - Set the unique name of a DRM device
1153  * @dev: device of which to set the unique name
1154  * @name: unique name
1155  *
1156  * Sets the unique name of a DRM device using the specified string. This is
1157  * already done by drm_dev_init(), drivers should only override the default
1158  * unique name for backwards compatibility reasons.
1159  *
1160  * Return: 0 on success or a negative error code on failure.
1161  */
1162 int drm_dev_set_unique(struct drm_device *dev, const char *name)
1163 {
1164 	kfree(dev->unique);
1165 	dev->unique = kstrdup(name, GFP_KERNEL);
1166 
1167 	return dev->unique ? 0 : -ENOMEM;
1168 }
1169 EXPORT_SYMBOL(drm_dev_set_unique);
1170 
1171 /*
1172  * DRM Core
1173  * The DRM core module initializes all global DRM objects and makes them
1174  * available to drivers. Once setup, drivers can probe their respective
1175  * devices.
1176  * Currently, core management includes:
1177  *  - The "DRM-Global" key/value database
1178  *  - Global ID management for connectors
1179  *  - DRM major number allocation
1180  *  - DRM minor management
1181  *  - DRM sysfs class
1182  *  - DRM debugfs root
1183  *
1184  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
1185  * interface registered on a DRM device, you can request minor numbers from DRM
1186  * core. DRM core takes care of major-number management and char-dev
1187  * registration. A stub ->open() callback forwards any open() requests to the
1188  * registered minor.
1189  */
1190 
1191 #ifdef __linux__
1192 static int drm_stub_open(struct inode *inode, struct file *filp)
1193 {
1194 	const struct file_operations *new_fops;
1195 	struct drm_minor *minor;
1196 	int err;
1197 
1198 	DRM_DEBUG("\n");
1199 
1200 	minor = drm_minor_acquire(iminor(inode));
1201 	if (IS_ERR(minor))
1202 		return PTR_ERR(minor);
1203 
1204 	new_fops = fops_get(minor->dev->driver->fops);
1205 	if (!new_fops) {
1206 		err = -ENODEV;
1207 		goto out;
1208 	}
1209 
1210 	replace_fops(filp, new_fops);
1211 	if (filp->f_op->open)
1212 		err = filp->f_op->open(inode, filp);
1213 	else
1214 		err = 0;
1215 
1216 out:
1217 	drm_minor_release(minor);
1218 
1219 	return err;
1220 }
1221 
1222 static const struct file_operations drm_stub_fops = {
1223 	.owner = THIS_MODULE,
1224 	.open = drm_stub_open,
1225 	.llseek = noop_llseek,
1226 };
1227 #endif /* __linux__ */
1228 
1229 static void drm_core_exit(void)
1230 {
1231 #ifdef __linux__
1232 	unregister_chrdev(DRM_MAJOR, "drm");
1233 	debugfs_remove(drm_debugfs_root);
1234 	drm_sysfs_destroy();
1235 #endif
1236 	idr_destroy(&drm_minors_idr);
1237 	drm_connector_ida_destroy();
1238 }
1239 
1240 static int __init drm_core_init(void)
1241 {
1242 #ifdef __linux__
1243 	int ret;
1244 #endif
1245 
1246 	drm_connector_ida_init();
1247 	idr_init(&drm_minors_idr);
1248 
1249 #ifdef __linux__
1250 	ret = drm_sysfs_init();
1251 	if (ret < 0) {
1252 		DRM_ERROR("Cannot create DRM class: %d\n", ret);
1253 		goto error;
1254 	}
1255 
1256 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
1257 
1258 	ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
1259 	if (ret < 0)
1260 		goto error;
1261 #endif
1262 
1263 	drm_core_init_complete = true;
1264 
1265 	DRM_DEBUG("Initialized\n");
1266 	return 0;
1267 #ifdef __linux__
1268 error:
1269 	drm_core_exit();
1270 	return ret;
1271 #endif
1272 }
1273 
1274 #ifdef __linux__
1275 module_init(drm_core_init);
1276 module_exit(drm_core_exit);
1277 #endif
1278 
1279 void
1280 drm_attach_platform(struct drm_driver *driver, bus_space_tag_t iot,
1281     bus_dma_tag_t dmat, struct device *dev, struct drm_device *drm)
1282 {
1283 	struct drm_attach_args arg;
1284 
1285 	memset(&arg, 0, sizeof(arg));
1286 	arg.driver = driver;
1287 	arg.bst = iot;
1288 	arg.dmat = dmat;
1289 	arg.drm = drm;
1290 
1291 	arg.busid = dev->dv_xname;
1292 	arg.busid_len = strlen(dev->dv_xname) + 1;
1293 	config_found_sm(dev, &arg, drmprint, drmsubmatch);
1294 }
1295 
1296 struct drm_device *
1297 drm_attach_pci(struct drm_driver *driver, struct pci_attach_args *pa,
1298     int is_agp, int primary, struct device *dev, struct drm_device *drm)
1299 {
1300 	struct drm_attach_args arg;
1301 	struct drm_softc *sc;
1302 
1303 	arg.drm = drm;
1304 	arg.driver = driver;
1305 	arg.dmat = pa->pa_dmat;
1306 	arg.bst = pa->pa_memt;
1307 	arg.is_agp = is_agp;
1308 	arg.primary = primary;
1309 	arg.pa = pa;
1310 
1311 	arg.busid_len = 20;
1312 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
1313 	if (arg.busid == NULL) {
1314 		printf("%s: no memory for drm\n", dev->dv_xname);
1315 		return (NULL);
1316 	}
1317 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
1318 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
1319 
1320 	sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch);
1321 	if (sc == NULL)
1322 		return NULL;
1323 
1324 	return sc->sc_drm;
1325 }
1326 
1327 int
1328 drmprint(void *aux, const char *pnp)
1329 {
1330 	if (pnp != NULL)
1331 		printf("drm at %s", pnp);
1332 	return (UNCONF);
1333 }
1334 
1335 int
1336 drmsubmatch(struct device *parent, void *match, void *aux)
1337 {
1338 	extern struct cfdriver drm_cd;
1339 	struct cfdata *cf = match;
1340 
1341 	/* only allow drm to attach */
1342 	if (cf->cf_driver == &drm_cd)
1343 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
1344 	return (0);
1345 }
1346 
1347 int
1348 drm_pciprobe(struct pci_attach_args *pa, const struct pci_device_id *idlist)
1349 {
1350 	const struct pci_device_id *id_entry;
1351 
1352 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
1353 	    PCI_PRODUCT(pa->pa_id), idlist);
1354 	if (id_entry != NULL)
1355 		return 1;
1356 
1357 	return 0;
1358 }
1359 
1360 int
1361 drm_probe(struct device *parent, void *match, void *aux)
1362 {
1363 	struct cfdata *cf = match;
1364 	struct drm_attach_args *da = aux;
1365 
1366 	if (cf->drmdevcf_primary != DRMDEVCF_PRIMARY_UNK) {
1367 		/*
1368 		 * If primary-ness of device specified, either match
1369 		 * exactly (at high priority), or fail.
1370 		 */
1371 		if (cf->drmdevcf_primary != 0 && da->primary != 0)
1372 			return (10);
1373 		else
1374 			return (0);
1375 	}
1376 
1377 	/* If primary-ness unspecified, it wins. */
1378 	return (1);
1379 }
1380 
1381 void
1382 drm_attach(struct device *parent, struct device *self, void *aux)
1383 {
1384 	struct drm_softc *sc = (struct drm_softc *)self;
1385 	struct drm_attach_args *da = aux;
1386 	struct drm_device *dev = da->drm;
1387 	int ret;
1388 
1389 	if (drm_refcnt == 0)
1390 		drm_core_init();
1391 	drm_refcnt++;
1392 
1393 	drm_linux_init();
1394 
1395 	if (dev == NULL) {
1396 		dev = malloc(sizeof(struct drm_device), M_DRM,
1397 		    M_WAITOK | M_ZERO);
1398 		sc->sc_allocated = 1;
1399 	}
1400 
1401 	sc->sc_drm = dev;
1402 
1403 	dev->dev = self;
1404 	dev->dev_private = parent;
1405 	dev->driver = da->driver;
1406 
1407 	/* no per-device feature limits by default */
1408 	dev->driver_features = ~0u;
1409 
1410 	dev->dmat = da->dmat;
1411 	dev->bst = da->bst;
1412 	dev->unique = da->busid;
1413 
1414 	if (da->pa) {
1415 		struct pci_attach_args *pa = da->pa;
1416 		pcireg_t subsys;
1417 
1418 		subsys = pci_conf_read(pa->pa_pc, pa->pa_tag,
1419 		    PCI_SUBSYS_ID_REG);
1420 
1421 		dev->pdev = &dev->_pdev;
1422 		dev->pdev->vendor = PCI_VENDOR(pa->pa_id);
1423 		dev->pdev->device = PCI_PRODUCT(pa->pa_id);
1424 		dev->pdev->subsystem_vendor = PCI_VENDOR(subsys);
1425 		dev->pdev->subsystem_device = PCI_PRODUCT(subsys);
1426 		dev->pdev->revision = PCI_REVISION(pa->pa_class);
1427 
1428 		dev->pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
1429 		dev->pdev->bus = &dev->pdev->_bus;
1430 		dev->pdev->bus->pc = pa->pa_pc;
1431 		dev->pdev->bus->number = pa->pa_bus;
1432 		dev->pdev->bus->domain_nr = pa->pa_domain;
1433 		dev->pdev->bus->bridgetag = pa->pa_bridgetag;
1434 
1435 		if (pa->pa_bridgetag != NULL) {
1436 			dev->pdev->bus->self = malloc(sizeof(struct pci_dev),
1437 			    M_DRM, M_WAITOK | M_ZERO);
1438 			dev->pdev->bus->self->pc = pa->pa_pc;
1439 			dev->pdev->bus->self->tag = *pa->pa_bridgetag;
1440 		}
1441 
1442 		dev->pdev->pc = pa->pa_pc;
1443 		dev->pdev->tag = pa->pa_tag;
1444 		dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
1445 
1446 #ifdef CONFIG_ACPI
1447 		dev->pdev->dev.node = acpi_find_pci(pa->pa_pc, pa->pa_tag);
1448 		aml_register_notify(dev->pdev->dev.node, NULL,
1449 		    drm_linux_acpi_notify, NULL, ACPIDEV_NOPOLL);
1450 #endif
1451 	}
1452 
1453 	mtx_init(&dev->quiesce_mtx, IPL_NONE);
1454 	mtx_init(&dev->event_lock, IPL_TTY);
1455 	rw_init(&dev->struct_mutex, "drmdevlk");
1456 	rw_init(&dev->filelist_mutex, "drmflist");
1457 	rw_init(&dev->clientlist_mutex, "drmclist");
1458 	rw_init(&dev->master_mutex, "drmmast");
1459 
1460 	SPLAY_INIT(&dev->files);
1461 	INIT_LIST_HEAD(&dev->filelist_internal);
1462 	INIT_LIST_HEAD(&dev->clientlist);
1463 	INIT_LIST_HEAD(&dev->vblank_event_list);
1464 
1465 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
1466 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
1467 		if (ret)
1468 			goto error;
1469 	}
1470 
1471 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
1472 	if (ret)
1473 		goto error;
1474 
1475 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1476 #if IS_ENABLED(CONFIG_AGP)
1477 		if (da->is_agp)
1478 			dev->agp = drm_agp_init();
1479 #endif
1480 		if (dev->agp != NULL) {
1481 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
1482 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
1483 				dev->agp->mtrr = 1;
1484 		}
1485 	}
1486 
1487 	if (dev->driver->gem_size > 0) {
1488 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
1489 		/* XXX unique name */
1490 		pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE, 0,
1491 		    "drmobjpl", NULL);
1492 	}
1493 
1494 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
1495 		ret = drm_gem_init(dev);
1496 		if (ret) {
1497 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
1498 			goto error;
1499 		}
1500 	}
1501 
1502 	printf("\n");
1503 	return;
1504 
1505 error:
1506 	drm_lastclose(dev);
1507 	dev->dev_private = NULL;
1508 }
1509 
1510 int
1511 drm_detach(struct device *self, int flags)
1512 {
1513 	struct drm_softc *sc = (struct drm_softc *)self;
1514 	struct drm_device *dev = sc->sc_drm;
1515 
1516 	drm_refcnt--;
1517 	if (drm_refcnt == 0)
1518 		drm_core_exit();
1519 
1520 	drm_lastclose(dev);
1521 
1522 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
1523 		drm_gem_destroy(dev);
1524 
1525 		if (dev->driver->gem_size > 0)
1526 			pool_destroy(&dev->objpl);
1527 	}
1528 
1529 	drm_vblank_cleanup(dev);
1530 
1531 	if (dev->agp && dev->agp->mtrr) {
1532 		int retcode;
1533 
1534 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
1535 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
1536 		DRM_DEBUG("mtrr_del = %d", retcode);
1537 	}
1538 
1539 	free(dev->agp, M_DRM, 0);
1540 	if (dev->pdev && dev->pdev->bus)
1541 		free(dev->pdev->bus->self, M_DRM, sizeof(struct pci_dev));
1542 
1543 	if (sc->sc_allocated)
1544 		free(dev, M_DRM, sizeof(struct drm_device));
1545 
1546 	return 0;
1547 }
1548 
1549 void
1550 drm_quiesce(struct drm_device *dev)
1551 {
1552 	mtx_enter(&dev->quiesce_mtx);
1553 	dev->quiesce = 1;
1554 	while (dev->quiesce_count > 0) {
1555 		msleep_nsec(&dev->quiesce_count, &dev->quiesce_mtx,
1556 		    PZERO, "drmqui", INFSLP);
1557 	}
1558 	mtx_leave(&dev->quiesce_mtx);
1559 }
1560 
1561 void
1562 drm_wakeup(struct drm_device *dev)
1563 {
1564 	mtx_enter(&dev->quiesce_mtx);
1565 	dev->quiesce = 0;
1566 	wakeup(&dev->quiesce);
1567 	mtx_leave(&dev->quiesce_mtx);
1568 }
1569 
1570 int
1571 drm_activate(struct device *self, int act)
1572 {
1573 	struct drm_softc *sc = (struct drm_softc *)self;
1574 	struct drm_device *dev = sc->sc_drm;
1575 
1576 	switch (act) {
1577 	case DVACT_QUIESCE:
1578 		drm_quiesce(dev);
1579 		break;
1580 	case DVACT_WAKEUP:
1581 		drm_wakeup(dev);
1582 		break;
1583 	}
1584 
1585 	return (0);
1586 }
1587 
1588 struct cfattach drm_ca = {
1589 	sizeof(struct drm_softc), drm_probe, drm_attach,
1590 	drm_detach, drm_activate
1591 };
1592 
1593 struct cfdriver drm_cd = {
1594 	0, "drm", DV_DULL
1595 };
1596 
1597 const struct pci_device_id *
1598 drm_find_description(int vendor, int device, const struct pci_device_id *idlist)
1599 {
1600 	int i = 0;
1601 
1602 	for (i = 0; idlist[i].vendor != 0; i++) {
1603 		if ((idlist[i].vendor == vendor) &&
1604 		    (idlist[i].device == device) &&
1605 		    (idlist[i].subvendor == PCI_ANY_ID) &&
1606 		    (idlist[i].subdevice == PCI_ANY_ID))
1607 			return &idlist[i];
1608 	}
1609 	return NULL;
1610 }
1611 
1612 int
1613 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
1614 {
1615 	return (f1->fminor < f2->fminor ? -1 : f1->fminor > f2->fminor);
1616 }
1617 
1618 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
1619 
1620 struct drm_file *
1621 drm_find_file_by_minor(struct drm_device *dev, int minor)
1622 {
1623 	struct drm_file	key;
1624 
1625 	key.fminor = minor;
1626 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
1627 }
1628 
1629 struct drm_device *
1630 drm_get_device_from_kdev(dev_t kdev)
1631 {
1632 	int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
1633 	/* control */
1634 	if (unit >= 64 && unit < 128)
1635 		unit -= 64;
1636 	/* render */
1637 	if (unit >= 128)
1638 		unit -= 128;
1639 	struct drm_softc *sc;
1640 
1641 	if (unit < drm_cd.cd_ndevs) {
1642 		sc = (struct drm_softc *)drm_cd.cd_devs[unit];
1643 		if (sc)
1644 			return sc->sc_drm;
1645 	}
1646 
1647 	return NULL;
1648 }
1649 
1650 void
1651 filt_drmdetach(struct knote *kn)
1652 {
1653 	struct drm_device *dev = kn->kn_hook;
1654 	int s;
1655 
1656 	s = spltty();
1657 	klist_remove(&dev->note, kn);
1658 	splx(s);
1659 }
1660 
1661 int
1662 filt_drmkms(struct knote *kn, long hint)
1663 {
1664 	if (kn->kn_sfflags & hint)
1665 		kn->kn_fflags |= hint;
1666 	return (kn->kn_fflags != 0);
1667 }
1668 
1669 void
1670 filt_drmreaddetach(struct knote *kn)
1671 {
1672 	struct drm_file		*file_priv = kn->kn_hook;
1673 	int s;
1674 
1675 	s = spltty();
1676 	klist_remove(&file_priv->rsel.si_note, kn);
1677 	splx(s);
1678 }
1679 
1680 int
1681 filt_drmread(struct knote *kn, long hint)
1682 {
1683 	struct drm_file		*file_priv = kn->kn_hook;
1684 	int			 val = 0;
1685 
1686 	if ((hint & NOTE_SUBMIT) == 0)
1687 		mtx_enter(&file_priv->minor->dev->event_lock);
1688 	val = !list_empty(&file_priv->event_list);
1689 	if ((hint & NOTE_SUBMIT) == 0)
1690 		mtx_leave(&file_priv->minor->dev->event_lock);
1691 	return (val);
1692 }
1693 
1694 const struct filterops drm_filtops = {
1695 	.f_flags	= FILTEROP_ISFD,
1696 	.f_attach	= NULL,
1697 	.f_detach	= filt_drmdetach,
1698 	.f_event	= filt_drmkms,
1699 };
1700 
1701 const struct filterops drmread_filtops = {
1702 	.f_flags	= FILTEROP_ISFD,
1703 	.f_attach	= NULL,
1704 	.f_detach	= filt_drmreaddetach,
1705 	.f_event	= filt_drmread,
1706 };
1707 
1708 int
1709 drmkqfilter(dev_t kdev, struct knote *kn)
1710 {
1711 	struct drm_device	*dev = NULL;
1712 	struct drm_file		*file_priv = NULL;
1713 	int			 s;
1714 
1715 	dev = drm_get_device_from_kdev(kdev);
1716 	if (dev == NULL || dev->dev_private == NULL)
1717 		return (ENXIO);
1718 
1719 	switch (kn->kn_filter) {
1720 	case EVFILT_READ:
1721 		mutex_lock(&dev->struct_mutex);
1722 		file_priv = drm_find_file_by_minor(dev, minor(kdev));
1723 		mutex_unlock(&dev->struct_mutex);
1724 		if (file_priv == NULL)
1725 			return (ENXIO);
1726 
1727 		kn->kn_fop = &drmread_filtops;
1728 		kn->kn_hook = file_priv;
1729 
1730 		s = spltty();
1731 		klist_insert(&file_priv->rsel.si_note, kn);
1732 		splx(s);
1733 		break;
1734 	case EVFILT_DEVICE:
1735 		kn->kn_fop = &drm_filtops;
1736 		kn->kn_hook = dev;
1737 
1738 		s = spltty();
1739 		klist_insert(&dev->note, kn);
1740 		splx(s);
1741 		break;
1742 	default:
1743 		return (EINVAL);
1744 	}
1745 
1746 	return (0);
1747 }
1748 
1749 int
1750 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
1751 {
1752 	struct drm_device	*dev = NULL;
1753 	struct drm_file		*file_priv;
1754 	struct drm_minor	*dm;
1755 	int			 ret = 0;
1756 	int			 dminor, realminor, minor_type;
1757 	int need_setup = 0;
1758 
1759 	dev = drm_get_device_from_kdev(kdev);
1760 	if (dev == NULL || dev->dev_private == NULL)
1761 		return (ENXIO);
1762 
1763 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1764 
1765 	if (flags & O_EXCL)
1766 		return (EBUSY); /* No exclusive opens */
1767 
1768 	if (drm_dev_needs_global_mutex(dev))
1769 		mutex_lock(&drm_global_mutex);
1770 
1771 	if (!atomic_fetch_inc(&dev->open_count))
1772 		need_setup = 1;
1773 
1774 	dminor = minor(kdev);
1775 	realminor =  dminor & ((1 << CLONE_SHIFT) - 1);
1776 	if (realminor < 64)
1777 		minor_type = DRM_MINOR_PRIMARY;
1778 	else if (realminor >= 64 && realminor < 128)
1779 		minor_type = DRM_MINOR_CONTROL;
1780 	else
1781 		minor_type = DRM_MINOR_RENDER;
1782 
1783 	dm = *drm_minor_get_slot(dev, minor_type);
1784 	dm->index = minor(kdev);
1785 
1786 	file_priv = drm_file_alloc(dm);
1787 	if (IS_ERR(file_priv)) {
1788 		ret = ENOMEM;
1789 		goto err;
1790 	}
1791 
1792 	/* first opener automatically becomes master */
1793 	if (drm_is_primary_client(file_priv)) {
1794 		ret = drm_master_open(file_priv);
1795 		if (ret != 0)
1796 			goto out_file_free;
1797 	}
1798 
1799 	file_priv->filp = (void *)file_priv;
1800 	file_priv->fminor = minor(kdev);
1801 
1802 	mutex_lock(&dev->filelist_mutex);
1803 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
1804 	mutex_unlock(&dev->filelist_mutex);
1805 
1806 	if (need_setup) {
1807 		ret = drm_legacy_setup(dev);
1808 		if (ret)
1809 			goto out_file_free;
1810 	}
1811 
1812 	if (drm_dev_needs_global_mutex(dev))
1813 		mutex_unlock(&drm_global_mutex);
1814 
1815 	return 0;
1816 
1817 out_file_free:
1818 	drm_file_free(file_priv);
1819 err:
1820 	atomic_dec(&dev->open_count);
1821 	if (drm_dev_needs_global_mutex(dev))
1822 		mutex_unlock(&drm_global_mutex);
1823 	return (ret);
1824 }
1825 
1826 int
1827 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
1828 {
1829 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
1830 	struct drm_file			*file_priv;
1831 	int				 retcode = 0;
1832 
1833 	if (dev == NULL)
1834 		return (ENXIO);
1835 
1836 	if (drm_dev_needs_global_mutex(dev))
1837 		mutex_lock(&drm_global_mutex);
1838 
1839 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1840 
1841 	mutex_lock(&dev->filelist_mutex);
1842 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1843 	if (file_priv == NULL) {
1844 		DRM_ERROR("can't find authenticator\n");
1845 		retcode = EINVAL;
1846 		mutex_unlock(&dev->filelist_mutex);
1847 		goto done;
1848 	}
1849 
1850 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
1851 	mutex_unlock(&dev->filelist_mutex);
1852 	drm_file_free(file_priv);
1853 done:
1854 	if (atomic_dec_and_test(&dev->open_count))
1855 		drm_lastclose(dev);
1856 
1857 	if (drm_dev_needs_global_mutex(dev))
1858 		mutex_unlock(&drm_global_mutex);
1859 
1860 	return (retcode);
1861 }
1862 
1863 int
1864 drmread(dev_t kdev, struct uio *uio, int ioflag)
1865 {
1866 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
1867 	struct drm_file			*file_priv;
1868 	struct drm_pending_event	*ev;
1869 	int		 		 error = 0;
1870 
1871 	if (dev == NULL)
1872 		return (ENXIO);
1873 
1874 	mutex_lock(&dev->filelist_mutex);
1875 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1876 	mutex_unlock(&dev->filelist_mutex);
1877 	if (file_priv == NULL)
1878 		return (ENXIO);
1879 
1880 	/*
1881 	 * The semantics are a little weird here. We will wait until we
1882 	 * have events to process, but as soon as we have events we will
1883 	 * only deliver as many as we have.
1884 	 * Note that events are atomic, if the read buffer will not fit in
1885 	 * a whole event, we won't read any of it out.
1886 	 */
1887 	mtx_enter(&dev->event_lock);
1888 	while (error == 0 && list_empty(&file_priv->event_list)) {
1889 		if (ioflag & IO_NDELAY) {
1890 			mtx_leave(&dev->event_lock);
1891 			return (EAGAIN);
1892 		}
1893 		error = msleep_nsec(&file_priv->event_wait, &dev->event_lock,
1894 		    PWAIT | PCATCH, "drmread", INFSLP);
1895 	}
1896 	if (error) {
1897 		mtx_leave(&dev->event_lock);
1898 		return (error);
1899 	}
1900 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
1901 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1902 		/* XXX we always destroy the event on error. */
1903 		error = uiomove(ev->event, ev->event->length, uio);
1904 		kfree(ev);
1905 		if (error)
1906 			break;
1907 		mtx_enter(&dev->event_lock);
1908 	}
1909 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1910 
1911 	return (error);
1912 }
1913 
1914 /*
1915  * Deqeue an event from the file priv in question. returning 1 if an
1916  * event was found. We take the resid from the read as a parameter because
1917  * we will only dequeue and event if the read buffer has space to fit the
1918  * entire thing.
1919  *
1920  * We are called locked, but we will *unlock* the queue on return so that
1921  * we may sleep to copyout the event.
1922  */
1923 int
1924 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
1925     size_t resid, struct drm_pending_event **out)
1926 {
1927 	struct drm_pending_event *e = NULL;
1928 	int gotone = 0;
1929 
1930 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
1931 
1932 	*out = NULL;
1933 	if (list_empty(&file_priv->event_list))
1934 		goto out;
1935 	e = list_first_entry(&file_priv->event_list,
1936 			     struct drm_pending_event, link);
1937 	if (e->event->length > resid)
1938 		goto out;
1939 
1940 	file_priv->event_space += e->event->length;
1941 	list_del(&e->link);
1942 	*out = e;
1943 	gotone = 1;
1944 
1945 out:
1946 	mtx_leave(&dev->event_lock);
1947 
1948 	return (gotone);
1949 }
1950 
1951 int
1952 drmpoll(dev_t kdev, int events, struct proc *p)
1953 {
1954 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
1955 	struct drm_file		*file_priv;
1956 	int		 	 revents = 0;
1957 
1958 	if (dev == NULL)
1959 		return (POLLERR);
1960 
1961 	mutex_lock(&dev->filelist_mutex);
1962 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1963 	mutex_unlock(&dev->filelist_mutex);
1964 	if (file_priv == NULL)
1965 		return (POLLERR);
1966 
1967 	mtx_enter(&dev->event_lock);
1968 	if (events & (POLLIN | POLLRDNORM)) {
1969 		if (!list_empty(&file_priv->event_list))
1970 			revents |=  events & (POLLIN | POLLRDNORM);
1971 		else
1972 			selrecord(p, &file_priv->rsel);
1973 	}
1974 	mtx_leave(&dev->event_lock);
1975 
1976 	return (revents);
1977 }
1978 
1979 paddr_t
1980 drmmmap(dev_t kdev, off_t offset, int prot)
1981 {
1982 	return -1;
1983 }
1984 
1985 struct drm_dmamem *
1986 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1987     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1988 {
1989 	struct drm_dmamem	*mem;
1990 	size_t			 strsize;
1991 	/*
1992 	 * segs is the last member of the struct since we modify the size
1993 	 * to allow extra segments if more than one are allowed.
1994 	 */
1995 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1996 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
1997 	if (mem == NULL)
1998 		return (NULL);
1999 
2000 	mem->size = size;
2001 
2002 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
2003 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
2004 		goto strfree;
2005 
2006 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
2007 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
2008 		goto destroy;
2009 
2010 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
2011 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
2012 		goto free;
2013 
2014 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
2015 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
2016 		goto unmap;
2017 
2018 	return (mem);
2019 
2020 unmap:
2021 	bus_dmamem_unmap(dmat, mem->kva, size);
2022 free:
2023 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
2024 destroy:
2025 	bus_dmamap_destroy(dmat, mem->map);
2026 strfree:
2027 	free(mem, M_DRM, 0);
2028 
2029 	return (NULL);
2030 }
2031 
2032 void
2033 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
2034 {
2035 	if (mem == NULL)
2036 		return;
2037 
2038 	bus_dmamap_unload(dmat, mem->map);
2039 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
2040 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
2041 	bus_dmamap_destroy(dmat, mem->map);
2042 	free(mem, M_DRM, 0);
2043 }
2044 
2045 struct drm_dma_handle *
2046 drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
2047 {
2048 	struct drm_dma_handle *dmah;
2049 
2050 	dmah = malloc(sizeof(*dmah), M_DRM, M_WAITOK);
2051 	dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size,
2052 	    BUS_DMA_NOCACHE, 0);
2053 	if (dmah->mem == NULL) {
2054 		free(dmah, M_DRM, sizeof(*dmah));
2055 		return NULL;
2056 	}
2057 	dmah->busaddr = dmah->mem->segs[0].ds_addr;
2058 	dmah->size = dmah->mem->size;
2059 	dmah->vaddr = dmah->mem->kva;
2060 	return (dmah);
2061 }
2062 
2063 void
2064 drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
2065 {
2066 	if (dmah == NULL)
2067 		return;
2068 
2069 	drm_dmamem_free(dev->dmat, dmah->mem);
2070 	free(dmah, M_DRM, sizeof(*dmah));
2071 }
2072 
2073 /*
2074  * Compute order.  Can be made faster.
2075  */
2076 int
2077 drm_order(unsigned long size)
2078 {
2079 	int order;
2080 	unsigned long tmp;
2081 
2082 	for (order = 0, tmp = size; tmp >>= 1; ++order)
2083 		;
2084 
2085 	if (size & ~(1 << order))
2086 		++order;
2087 
2088 	return order;
2089 }
2090 
2091 int
2092 drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv)
2093 {
2094 	struct drm_pciinfo *info = data;
2095 
2096 	if (dev->pdev == NULL)
2097 		return -ENOTTY;
2098 
2099 	info->domain = dev->pdev->bus->domain_nr;
2100 	info->bus = dev->pdev->bus->number;
2101 	info->dev = PCI_SLOT(dev->pdev->devfn);
2102 	info->func = PCI_FUNC(dev->pdev->devfn);
2103 	info->vendor_id = dev->pdev->vendor;
2104 	info->device_id = dev->pdev->device;
2105 	info->subvendor_id = dev->pdev->subsystem_vendor;
2106 	info->subdevice_id = dev->pdev->subsystem_device;
2107 	info->revision_id = 0;
2108 
2109 	return 0;
2110 }
2111