xref: /openbsd-src/sys/dev/pci/drm/drm_drv.c (revision 3374c67d44f9b75b98444cbf63020f777792342e)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/fcntl.h>
31 #include <sys/specdev.h>
32 #include <sys/vnode.h>
33 
34 #include <machine/bus.h>
35 
36 #ifdef __HAVE_ACPI
37 #include <dev/acpi/acpidev.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/dsdt.h>
40 #endif
41 
42 #include <linux/debugfs.h>
43 #include <linux/fs.h>
44 #include <linux/module.h>
45 #include <linux/moduleparam.h>
46 #include <linux/mount.h>
47 #include <linux/pseudo_fs.h>
48 #include <linux/slab.h>
49 #include <linux/srcu.h>
50 
51 #include <drm/drm_cache.h>
52 #include <drm/drm_client.h>
53 #include <drm/drm_color_mgmt.h>
54 #include <drm/drm_drv.h>
55 #include <drm/drm_file.h>
56 #include <drm/drm_managed.h>
57 #include <drm/drm_mode_object.h>
58 #include <drm/drm_print.h>
59 #include <drm/drm_privacy_screen_machine.h>
60 
61 #include <drm/drm_gem.h>
62 
63 #include "drm_crtc_internal.h"
64 #include "drm_internal.h"
65 #include "drm_legacy.h"
66 
67 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
68 MODULE_DESCRIPTION("DRM shared core routines");
69 MODULE_LICENSE("GPL and additional rights");
70 
71 static DEFINE_SPINLOCK(drm_minor_lock);
72 static struct idr drm_minors_idr;
73 
74 /*
75  * If the drm core fails to init for whatever reason,
76  * we should prevent any drivers from registering with it.
77  * It's best to check this at drm_dev_init(), as some drivers
78  * prefer to embed struct drm_device into their own device
79  * structure and call drm_dev_init() themselves.
80  */
81 static bool drm_core_init_complete;
82 
83 static struct dentry *drm_debugfs_root;
84 
85 #ifdef notyet
86 DEFINE_STATIC_SRCU(drm_unplug_srcu);
87 #endif
88 
89 /*
90  * Some functions are only called once on init regardless of how many times
91  * drm attaches.  In linux this is handled via module_init()/module_exit()
92  */
93 int drm_refcnt;
94 
95 struct drm_softc {
96 	struct device		sc_dev;
97 	struct drm_device 	*sc_drm;
98 	int			sc_allocated;
99 };
100 
101 struct drm_attach_args {
102 	struct drm_device		*drm;
103 	const struct drm_driver		*driver;
104 	char				*busid;
105 	bus_dma_tag_t			 dmat;
106 	bus_space_tag_t			 bst;
107 	size_t				 busid_len;
108 	int				 is_agp;
109 	struct pci_attach_args		*pa;
110 	int				 primary;
111 };
112 
113 void	drm_linux_init(void);
114 void	drm_linux_exit(void);
115 int	drm_linux_acpi_notify(struct aml_node *, int, void *);
116 
117 int	drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
118 	    struct drm_pending_event **);
119 
120 int	drmprint(void *, const char *);
121 int	drmsubmatch(struct device *, void *, void *);
122 const struct pci_device_id *
123 	drm_find_description(int, int, const struct pci_device_id *);
124 
125 int	drm_file_cmp(struct drm_file *, struct drm_file *);
126 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
127 
128 #define DRMDEVCF_PRIMARY	0
129 #define drmdevcf_primary	cf_loc[DRMDEVCF_PRIMARY]	/* spec'd as primary? */
130 #define DRMDEVCF_PRIMARY_UNK	-1
131 
132 /*
133  * DRM Minors
134  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
135  * of them is represented by a drm_minor object. Depending on the capabilities
136  * of the device-driver, different interfaces are registered.
137  *
138  * Minors can be accessed via dev->$minor_name. This pointer is either
139  * NULL or a valid drm_minor pointer and stays valid as long as the device is
140  * valid. This means, DRM minors have the same life-time as the underlying
141  * device. However, this doesn't mean that the minor is active. Minors are
142  * registered and unregistered dynamically according to device-state.
143  */
144 
145 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
146 					     unsigned int type)
147 {
148 	switch (type) {
149 	case DRM_MINOR_PRIMARY:
150 		return &dev->primary;
151 	case DRM_MINOR_RENDER:
152 		return &dev->render;
153 	default:
154 		BUG();
155 	}
156 }
157 
158 static void drm_minor_alloc_release(struct drm_device *dev, void *data)
159 {
160 	struct drm_minor *minor = data;
161 	unsigned long flags;
162 
163 	WARN_ON(dev != minor->dev);
164 
165 #ifdef __linux__
166 	put_device(minor->kdev);
167 #endif
168 
169 	spin_lock_irqsave(&drm_minor_lock, flags);
170 	idr_remove(&drm_minors_idr, minor->index);
171 	spin_unlock_irqrestore(&drm_minor_lock, flags);
172 }
173 
174 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
175 {
176 	struct drm_minor *minor;
177 	unsigned long flags;
178 	int r;
179 
180 	minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
181 	if (!minor)
182 		return -ENOMEM;
183 
184 	minor->type = type;
185 	minor->dev = dev;
186 
187 	idr_preload(GFP_KERNEL);
188 	spin_lock_irqsave(&drm_minor_lock, flags);
189 	r = idr_alloc(&drm_minors_idr,
190 		      NULL,
191 		      64 * type,
192 		      64 * (type + 1),
193 		      GFP_NOWAIT);
194 	spin_unlock_irqrestore(&drm_minor_lock, flags);
195 	idr_preload_end();
196 
197 	if (r < 0)
198 		return r;
199 
200 	minor->index = r;
201 
202 	r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
203 	if (r)
204 		return r;
205 
206 #ifdef __linux__
207 	minor->kdev = drm_sysfs_minor_alloc(minor);
208 	if (IS_ERR(minor->kdev))
209 		return PTR_ERR(minor->kdev);
210 #endif
211 
212 	*drm_minor_get_slot(dev, type) = minor;
213 	return 0;
214 }
215 
216 static int drm_minor_register(struct drm_device *dev, unsigned int type)
217 {
218 	struct drm_minor *minor;
219 	unsigned long flags;
220 #ifdef __linux__
221 	int ret;
222 #endif
223 
224 	DRM_DEBUG("\n");
225 
226 	minor = *drm_minor_get_slot(dev, type);
227 	if (!minor)
228 		return 0;
229 
230 #ifdef __linux__
231 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
232 	if (ret) {
233 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
234 		goto err_debugfs;
235 	}
236 
237 	ret = device_add(minor->kdev);
238 	if (ret)
239 		goto err_debugfs;
240 #else
241 	drm_debugfs_root = NULL;
242 #endif
243 
244 	/* replace NULL with @minor so lookups will succeed from now on */
245 	spin_lock_irqsave(&drm_minor_lock, flags);
246 	idr_replace(&drm_minors_idr, minor, minor->index);
247 	spin_unlock_irqrestore(&drm_minor_lock, flags);
248 
249 	DRM_DEBUG("new minor registered %d\n", minor->index);
250 	return 0;
251 
252 #ifdef __linux__
253 err_debugfs:
254 	drm_debugfs_cleanup(minor);
255 	return ret;
256 #endif
257 }
258 
259 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
260 {
261 	struct drm_minor *minor;
262 	unsigned long flags;
263 
264 	minor = *drm_minor_get_slot(dev, type);
265 #ifdef __linux__
266 	if (!minor || !device_is_registered(minor->kdev))
267 #else
268 	if (!minor)
269 #endif
270 		return;
271 
272 	/* replace @minor with NULL so lookups will fail from now on */
273 	spin_lock_irqsave(&drm_minor_lock, flags);
274 	idr_replace(&drm_minors_idr, NULL, minor->index);
275 	spin_unlock_irqrestore(&drm_minor_lock, flags);
276 
277 #ifdef __linux__
278 	device_del(minor->kdev);
279 #endif
280 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
281 	drm_debugfs_cleanup(minor);
282 }
283 
284 /*
285  * Looks up the given minor-ID and returns the respective DRM-minor object. The
286  * refence-count of the underlying device is increased so you must release this
287  * object with drm_minor_release().
288  *
289  * As long as you hold this minor, it is guaranteed that the object and the
290  * minor->dev pointer will stay valid! However, the device may get unplugged and
291  * unregistered while you hold the minor.
292  */
293 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
294 {
295 	struct drm_minor *minor;
296 	unsigned long flags;
297 
298 	spin_lock_irqsave(&drm_minor_lock, flags);
299 	minor = idr_find(&drm_minors_idr, minor_id);
300 	if (minor)
301 		drm_dev_get(minor->dev);
302 	spin_unlock_irqrestore(&drm_minor_lock, flags);
303 
304 	if (!minor) {
305 		return ERR_PTR(-ENODEV);
306 	} else if (drm_dev_is_unplugged(minor->dev)) {
307 		drm_dev_put(minor->dev);
308 		return ERR_PTR(-ENODEV);
309 	}
310 
311 	return minor;
312 }
313 
314 void drm_minor_release(struct drm_minor *minor)
315 {
316 	drm_dev_put(minor->dev);
317 }
318 
319 /**
320  * DOC: driver instance overview
321  *
322  * A device instance for a drm driver is represented by &struct drm_device. This
323  * is allocated and initialized with devm_drm_dev_alloc(), usually from
324  * bus-specific ->probe() callbacks implemented by the driver. The driver then
325  * needs to initialize all the various subsystems for the drm device like memory
326  * management, vblank handling, modesetting support and initial output
327  * configuration plus obviously initialize all the corresponding hardware bits.
328  * Finally when everything is up and running and ready for userspace the device
329  * instance can be published using drm_dev_register().
330  *
331  * There is also deprecated support for initializing device instances using
332  * bus-specific helpers and the &drm_driver.load callback. But due to
333  * backwards-compatibility needs the device instance have to be published too
334  * early, which requires unpretty global locking to make safe and is therefore
335  * only support for existing drivers not yet converted to the new scheme.
336  *
337  * When cleaning up a device instance everything needs to be done in reverse:
338  * First unpublish the device instance with drm_dev_unregister(). Then clean up
339  * any other resources allocated at device initialization and drop the driver's
340  * reference to &drm_device using drm_dev_put().
341  *
342  * Note that any allocation or resource which is visible to userspace must be
343  * released only when the final drm_dev_put() is called, and not when the
344  * driver is unbound from the underlying physical struct &device. Best to use
345  * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
346  * related functions.
347  *
348  * devres managed resources like devm_kmalloc() can only be used for resources
349  * directly related to the underlying hardware device, and only used in code
350  * paths fully protected by drm_dev_enter() and drm_dev_exit().
351  *
352  * Display driver example
353  * ~~~~~~~~~~~~~~~~~~~~~~
354  *
355  * The following example shows a typical structure of a DRM display driver.
356  * The example focus on the probe() function and the other functions that is
357  * almost always present and serves as a demonstration of devm_drm_dev_alloc().
358  *
359  * .. code-block:: c
360  *
361  *	struct driver_device {
362  *		struct drm_device drm;
363  *		void *userspace_facing;
364  *		struct clk *pclk;
365  *	};
366  *
367  *	static const struct drm_driver driver_drm_driver = {
368  *		[...]
369  *	};
370  *
371  *	static int driver_probe(struct platform_device *pdev)
372  *	{
373  *		struct driver_device *priv;
374  *		struct drm_device *drm;
375  *		int ret;
376  *
377  *		priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
378  *					  struct driver_device, drm);
379  *		if (IS_ERR(priv))
380  *			return PTR_ERR(priv);
381  *		drm = &priv->drm;
382  *
383  *		ret = drmm_mode_config_init(drm);
384  *		if (ret)
385  *			return ret;
386  *
387  *		priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
388  *		if (!priv->userspace_facing)
389  *			return -ENOMEM;
390  *
391  *		priv->pclk = devm_clk_get(dev, "PCLK");
392  *		if (IS_ERR(priv->pclk))
393  *			return PTR_ERR(priv->pclk);
394  *
395  *		// Further setup, display pipeline etc
396  *
397  *		platform_set_drvdata(pdev, drm);
398  *
399  *		drm_mode_config_reset(drm);
400  *
401  *		ret = drm_dev_register(drm);
402  *		if (ret)
403  *			return ret;
404  *
405  *		drm_fbdev_generic_setup(drm, 32);
406  *
407  *		return 0;
408  *	}
409  *
410  *	// This function is called before the devm_ resources are released
411  *	static int driver_remove(struct platform_device *pdev)
412  *	{
413  *		struct drm_device *drm = platform_get_drvdata(pdev);
414  *
415  *		drm_dev_unregister(drm);
416  *		drm_atomic_helper_shutdown(drm)
417  *
418  *		return 0;
419  *	}
420  *
421  *	// This function is called on kernel restart and shutdown
422  *	static void driver_shutdown(struct platform_device *pdev)
423  *	{
424  *		drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
425  *	}
426  *
427  *	static int __maybe_unused driver_pm_suspend(struct device *dev)
428  *	{
429  *		return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
430  *	}
431  *
432  *	static int __maybe_unused driver_pm_resume(struct device *dev)
433  *	{
434  *		drm_mode_config_helper_resume(dev_get_drvdata(dev));
435  *
436  *		return 0;
437  *	}
438  *
439  *	static const struct dev_pm_ops driver_pm_ops = {
440  *		SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
441  *	};
442  *
443  *	static struct platform_driver driver_driver = {
444  *		.driver = {
445  *			[...]
446  *			.pm = &driver_pm_ops,
447  *		},
448  *		.probe = driver_probe,
449  *		.remove = driver_remove,
450  *		.shutdown = driver_shutdown,
451  *	};
452  *	module_platform_driver(driver_driver);
453  *
454  * Drivers that want to support device unplugging (USB, DT overlay unload) should
455  * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
456  * regions that is accessing device resources to prevent use after they're
457  * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
458  * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
459  * drm_atomic_helper_shutdown() is called. This means that if the disable code
460  * paths are protected, they will not run on regular driver module unload,
461  * possibly leaving the hardware enabled.
462  */
463 
464 /**
465  * drm_put_dev - Unregister and release a DRM device
466  * @dev: DRM device
467  *
468  * Called at module unload time or when a PCI device is unplugged.
469  *
470  * Cleans up all DRM device, calling drm_lastclose().
471  *
472  * Note: Use of this function is deprecated. It will eventually go away
473  * completely.  Please use drm_dev_unregister() and drm_dev_put() explicitly
474  * instead to make sure that the device isn't userspace accessible any more
475  * while teardown is in progress, ensuring that userspace can't access an
476  * inconsistent state.
477  */
478 void drm_put_dev(struct drm_device *dev)
479 {
480 	DRM_DEBUG("\n");
481 
482 	if (!dev) {
483 		DRM_ERROR("cleanup called no dev\n");
484 		return;
485 	}
486 
487 	drm_dev_unregister(dev);
488 	drm_dev_put(dev);
489 }
490 EXPORT_SYMBOL(drm_put_dev);
491 
492 /**
493  * drm_dev_enter - Enter device critical section
494  * @dev: DRM device
495  * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
496  *
497  * This function marks and protects the beginning of a section that should not
498  * be entered after the device has been unplugged. The section end is marked
499  * with drm_dev_exit(). Calls to this function can be nested.
500  *
501  * Returns:
502  * True if it is OK to enter the section, false otherwise.
503  */
504 bool drm_dev_enter(struct drm_device *dev, int *idx)
505 {
506 #ifdef notyet
507 	*idx = srcu_read_lock(&drm_unplug_srcu);
508 
509 	if (dev->unplugged) {
510 		srcu_read_unlock(&drm_unplug_srcu, *idx);
511 		return false;
512 	}
513 #endif
514 
515 	return true;
516 }
517 EXPORT_SYMBOL(drm_dev_enter);
518 
519 /**
520  * drm_dev_exit - Exit device critical section
521  * @idx: index returned from drm_dev_enter()
522  *
523  * This function marks the end of a section that should not be entered after
524  * the device has been unplugged.
525  */
526 void drm_dev_exit(int idx)
527 {
528 #ifdef notyet
529 	srcu_read_unlock(&drm_unplug_srcu, idx);
530 #endif
531 }
532 EXPORT_SYMBOL(drm_dev_exit);
533 
534 /**
535  * drm_dev_unplug - unplug a DRM device
536  * @dev: DRM device
537  *
538  * This unplugs a hotpluggable DRM device, which makes it inaccessible to
539  * userspace operations. Entry-points can use drm_dev_enter() and
540  * drm_dev_exit() to protect device resources in a race free manner. This
541  * essentially unregisters the device like drm_dev_unregister(), but can be
542  * called while there are still open users of @dev.
543  */
544 void drm_dev_unplug(struct drm_device *dev)
545 {
546 	STUB();
547 #ifdef notyet
548 	/*
549 	 * After synchronizing any critical read section is guaranteed to see
550 	 * the new value of ->unplugged, and any critical section which might
551 	 * still have seen the old value of ->unplugged is guaranteed to have
552 	 * finished.
553 	 */
554 	dev->unplugged = true;
555 	synchronize_srcu(&drm_unplug_srcu);
556 
557 	drm_dev_unregister(dev);
558 
559 	/* Clear all CPU mappings pointing to this device */
560 	unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);
561 #endif
562 }
563 EXPORT_SYMBOL(drm_dev_unplug);
564 
565 #ifdef __linux__
566 /*
567  * DRM internal mount
568  * We want to be able to allocate our own "struct address_space" to control
569  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
570  * stand-alone address_space objects, so we need an underlying inode. As there
571  * is no way to allocate an independent inode easily, we need a fake internal
572  * VFS mount-point.
573  *
574  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
575  * frees it again. You are allowed to use iget() and iput() to get references to
576  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
577  * drm_fs_inode_free() call (which does not have to be the last iput()).
578  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
579  * between multiple inode-users. You could, technically, call
580  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
581  * iput(), but this way you'd end up with a new vfsmount for each inode.
582  */
583 
584 static int drm_fs_cnt;
585 static struct vfsmount *drm_fs_mnt;
586 
587 static int drm_fs_init_fs_context(struct fs_context *fc)
588 {
589 	return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM;
590 }
591 
592 static struct file_system_type drm_fs_type = {
593 	.name		= "drm",
594 	.owner		= THIS_MODULE,
595 	.init_fs_context = drm_fs_init_fs_context,
596 	.kill_sb	= kill_anon_super,
597 };
598 
599 static struct inode *drm_fs_inode_new(void)
600 {
601 	struct inode *inode;
602 	int r;
603 
604 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
605 	if (r < 0) {
606 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
607 		return ERR_PTR(r);
608 	}
609 
610 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
611 	if (IS_ERR(inode))
612 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
613 
614 	return inode;
615 }
616 
617 static void drm_fs_inode_free(struct inode *inode)
618 {
619 	if (inode) {
620 		iput(inode);
621 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
622 	}
623 }
624 
625 #endif /* __linux__ */
626 
627 /**
628  * DOC: component helper usage recommendations
629  *
630  * DRM drivers that drive hardware where a logical device consists of a pile of
631  * independent hardware blocks are recommended to use the :ref:`component helper
632  * library<component>`. For consistency and better options for code reuse the
633  * following guidelines apply:
634  *
635  *  - The entire device initialization procedure should be run from the
636  *    &component_master_ops.master_bind callback, starting with
637  *    devm_drm_dev_alloc(), then binding all components with
638  *    component_bind_all() and finishing with drm_dev_register().
639  *
640  *  - The opaque pointer passed to all components through component_bind_all()
641  *    should point at &struct drm_device of the device instance, not some driver
642  *    specific private structure.
643  *
644  *  - The component helper fills the niche where further standardization of
645  *    interfaces is not practical. When there already is, or will be, a
646  *    standardized interface like &drm_bridge or &drm_panel, providing its own
647  *    functions to find such components at driver load time, like
648  *    drm_of_find_panel_or_bridge(), then the component helper should not be
649  *    used.
650  */
651 
652 static void drm_dev_init_release(struct drm_device *dev, void *res)
653 {
654 	drm_legacy_ctxbitmap_cleanup(dev);
655 	drm_legacy_remove_map_hash(dev);
656 #ifdef __linux__
657 	drm_fs_inode_free(dev->anon_inode);
658 
659 	put_device(dev->dev);
660 #endif
661 	/* Prevent use-after-free in drm_managed_release when debugging is
662 	 * enabled. Slightly awkward, but can't really be helped. */
663 	dev->dev = NULL;
664 	mutex_destroy(&dev->master_mutex);
665 	mutex_destroy(&dev->clientlist_mutex);
666 	mutex_destroy(&dev->filelist_mutex);
667 	mutex_destroy(&dev->struct_mutex);
668 	drm_legacy_destroy_members(dev);
669 }
670 
671 #ifdef notyet
672 
673 static int drm_dev_init(struct drm_device *dev,
674 			const struct drm_driver *driver,
675 			struct device *parent)
676 {
677 	struct inode *inode;
678 	int ret;
679 
680 	if (!drm_core_init_complete) {
681 		DRM_ERROR("DRM core is not initialized\n");
682 		return -ENODEV;
683 	}
684 
685 	if (WARN_ON(!parent))
686 		return -EINVAL;
687 
688 	kref_init(&dev->ref);
689 	dev->dev = get_device(parent);
690 	dev->driver = driver;
691 
692 	INIT_LIST_HEAD(&dev->managed.resources);
693 	spin_lock_init(&dev->managed.lock);
694 
695 	/* no per-device feature limits by default */
696 	dev->driver_features = ~0u;
697 
698 	drm_legacy_init_members(dev);
699 	INIT_LIST_HEAD(&dev->filelist);
700 	INIT_LIST_HEAD(&dev->filelist_internal);
701 	INIT_LIST_HEAD(&dev->clientlist);
702 	INIT_LIST_HEAD(&dev->vblank_event_list);
703 
704 	spin_lock_init(&dev->event_lock);
705 	mutex_init(&dev->struct_mutex);
706 	mutex_init(&dev->filelist_mutex);
707 	mutex_init(&dev->clientlist_mutex);
708 	mutex_init(&dev->master_mutex);
709 
710 	ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
711 	if (ret)
712 		return ret;
713 
714 	inode = drm_fs_inode_new();
715 	if (IS_ERR(inode)) {
716 		ret = PTR_ERR(inode);
717 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
718 		goto err;
719 	}
720 
721 	dev->anon_inode = inode;
722 
723 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
724 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
725 		if (ret)
726 			goto err;
727 	}
728 
729 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
730 	if (ret)
731 		goto err;
732 
733 	ret = drm_legacy_create_map_hash(dev);
734 	if (ret)
735 		goto err;
736 
737 	drm_legacy_ctxbitmap_init(dev);
738 
739 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
740 		ret = drm_gem_init(dev);
741 		if (ret) {
742 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
743 			goto err;
744 		}
745 	}
746 
747 	ret = drm_dev_set_unique(dev, dev_name(parent));
748 	if (ret)
749 		goto err;
750 
751 	return 0;
752 
753 err:
754 	drm_managed_release(dev);
755 
756 	return ret;
757 }
758 
759 static void devm_drm_dev_init_release(void *data)
760 {
761 	drm_dev_put(data);
762 }
763 
764 static int devm_drm_dev_init(struct device *parent,
765 			     struct drm_device *dev,
766 			     const struct drm_driver *driver)
767 {
768 	int ret;
769 
770 	ret = drm_dev_init(dev, driver, parent);
771 	if (ret)
772 		return ret;
773 
774 	return devm_add_action_or_reset(parent,
775 					devm_drm_dev_init_release, dev);
776 }
777 
778 void *__devm_drm_dev_alloc(struct device *parent,
779 			   const struct drm_driver *driver,
780 			   size_t size, size_t offset)
781 {
782 	void *container;
783 	struct drm_device *drm;
784 	int ret;
785 
786 	container = kzalloc(size, GFP_KERNEL);
787 	if (!container)
788 		return ERR_PTR(-ENOMEM);
789 
790 	drm = container + offset;
791 	ret = devm_drm_dev_init(parent, drm, driver);
792 	if (ret) {
793 		kfree(container);
794 		return ERR_PTR(ret);
795 	}
796 	drmm_add_final_kfree(drm, container);
797 
798 	return container;
799 }
800 EXPORT_SYMBOL(__devm_drm_dev_alloc);
801 
802 /**
803  * drm_dev_alloc - Allocate new DRM device
804  * @driver: DRM driver to allocate device for
805  * @parent: Parent device object
806  *
807  * This is the deprecated version of devm_drm_dev_alloc(), which does not support
808  * subclassing through embedding the struct &drm_device in a driver private
809  * structure, and which does not support automatic cleanup through devres.
810  *
811  * RETURNS:
812  * Pointer to new DRM device, or ERR_PTR on failure.
813  */
814 struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
815 				 struct device *parent)
816 {
817 	struct drm_device *dev;
818 	int ret;
819 
820 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
821 	if (!dev)
822 		return ERR_PTR(-ENOMEM);
823 
824 	ret = drm_dev_init(dev, driver, parent);
825 	if (ret) {
826 		kfree(dev);
827 		return ERR_PTR(ret);
828 	}
829 
830 	drmm_add_final_kfree(dev, dev);
831 
832 	return dev;
833 }
834 EXPORT_SYMBOL(drm_dev_alloc);
835 
836 #endif
837 
838 static void drm_dev_release(struct kref *ref)
839 {
840 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
841 
842 	if (dev->driver->release)
843 		dev->driver->release(dev);
844 
845 	drm_managed_release(dev);
846 
847 	kfree(dev->managed.final_kfree);
848 }
849 
850 /**
851  * drm_dev_get - Take reference of a DRM device
852  * @dev: device to take reference of or NULL
853  *
854  * This increases the ref-count of @dev by one. You *must* already own a
855  * reference when calling this. Use drm_dev_put() to drop this reference
856  * again.
857  *
858  * This function never fails. However, this function does not provide *any*
859  * guarantee whether the device is alive or running. It only provides a
860  * reference to the object and the memory associated with it.
861  */
862 void drm_dev_get(struct drm_device *dev)
863 {
864 	if (dev)
865 		kref_get(&dev->ref);
866 }
867 EXPORT_SYMBOL(drm_dev_get);
868 
869 /**
870  * drm_dev_put - Drop reference of a DRM device
871  * @dev: device to drop reference of or NULL
872  *
873  * This decreases the ref-count of @dev by one. The device is destroyed if the
874  * ref-count drops to zero.
875  */
876 void drm_dev_put(struct drm_device *dev)
877 {
878 	if (dev)
879 		kref_put(&dev->ref, drm_dev_release);
880 }
881 EXPORT_SYMBOL(drm_dev_put);
882 
883 static int create_compat_control_link(struct drm_device *dev)
884 {
885 	struct drm_minor *minor;
886 	char *name;
887 	int ret;
888 
889 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
890 		return 0;
891 
892 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
893 	if (!minor)
894 		return 0;
895 
896 	/*
897 	 * Some existing userspace out there uses the existing of the controlD*
898 	 * sysfs files to figure out whether it's a modeset driver. It only does
899 	 * readdir, hence a symlink is sufficient (and the least confusing
900 	 * option). Otherwise controlD* is entirely unused.
901 	 *
902 	 * Old controlD chardev have been allocated in the range
903 	 * 64-127.
904 	 */
905 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
906 	if (!name)
907 		return -ENOMEM;
908 
909 	ret = sysfs_create_link(minor->kdev->kobj.parent,
910 				&minor->kdev->kobj,
911 				name);
912 
913 	kfree(name);
914 
915 	return ret;
916 }
917 
918 static void remove_compat_control_link(struct drm_device *dev)
919 {
920 	struct drm_minor *minor;
921 	char *name;
922 
923 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
924 		return;
925 
926 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
927 	if (!minor)
928 		return;
929 
930 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
931 	if (!name)
932 		return;
933 
934 	sysfs_remove_link(minor->kdev->kobj.parent, name);
935 
936 	kfree(name);
937 }
938 
939 /**
940  * drm_dev_register - Register DRM device
941  * @dev: Device to register
942  * @flags: Flags passed to the driver's .load() function
943  *
944  * Register the DRM device @dev with the system, advertise device to user-space
945  * and start normal device operation. @dev must be initialized via drm_dev_init()
946  * previously.
947  *
948  * Never call this twice on any device!
949  *
950  * NOTE: To ensure backward compatibility with existing drivers method this
951  * function calls the &drm_driver.load method after registering the device
952  * nodes, creating race conditions. Usage of the &drm_driver.load methods is
953  * therefore deprecated, drivers must perform all initialization before calling
954  * drm_dev_register().
955  *
956  * RETURNS:
957  * 0 on success, negative error code on failure.
958  */
959 int drm_dev_register(struct drm_device *dev, unsigned long flags)
960 {
961 	const struct drm_driver *driver = dev->driver;
962 	int ret;
963 
964 	if (!driver->load)
965 		drm_mode_config_validate(dev);
966 
967 	WARN_ON(!dev->managed.final_kfree);
968 
969 	if (drm_dev_needs_global_mutex(dev))
970 		mutex_lock(&drm_global_mutex);
971 
972 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
973 	if (ret)
974 		goto err_minors;
975 
976 	ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
977 	if (ret)
978 		goto err_minors;
979 
980 	ret = create_compat_control_link(dev);
981 	if (ret)
982 		goto err_minors;
983 
984 	dev->registered = true;
985 
986 	if (dev->driver->load) {
987 		ret = dev->driver->load(dev, flags);
988 		if (ret)
989 			goto err_minors;
990 	}
991 
992 	if (drm_core_check_feature(dev, DRIVER_MODESET))
993 		drm_modeset_register_all(dev);
994 
995 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
996 		 driver->name, driver->major, driver->minor,
997 		 driver->patchlevel, driver->date,
998 		 dev->dev ? dev_name(dev->dev) : "virtual device",
999 		 dev->primary->index);
1000 
1001 	goto out_unlock;
1002 
1003 err_minors:
1004 	remove_compat_control_link(dev);
1005 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1006 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
1007 out_unlock:
1008 	if (drm_dev_needs_global_mutex(dev))
1009 		mutex_unlock(&drm_global_mutex);
1010 	return ret;
1011 }
1012 EXPORT_SYMBOL(drm_dev_register);
1013 
1014 /**
1015  * drm_dev_unregister - Unregister DRM device
1016  * @dev: Device to unregister
1017  *
1018  * Unregister the DRM device from the system. This does the reverse of
1019  * drm_dev_register() but does not deallocate the device. The caller must call
1020  * drm_dev_put() to drop their final reference.
1021  *
1022  * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
1023  * which can be called while there are still open users of @dev.
1024  *
1025  * This should be called first in the device teardown code to make sure
1026  * userspace can't access the device instance any more.
1027  */
1028 void drm_dev_unregister(struct drm_device *dev)
1029 {
1030 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
1031 		drm_lastclose(dev);
1032 
1033 	dev->registered = false;
1034 
1035 	drm_client_dev_unregister(dev);
1036 
1037 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1038 		drm_modeset_unregister_all(dev);
1039 
1040 	if (dev->driver->unload)
1041 		dev->driver->unload(dev);
1042 
1043 	drm_legacy_pci_agp_destroy(dev);
1044 	drm_legacy_rmmaps(dev);
1045 
1046 	remove_compat_control_link(dev);
1047 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1048 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
1049 }
1050 EXPORT_SYMBOL(drm_dev_unregister);
1051 
1052 /**
1053  * drm_dev_set_unique - Set the unique name of a DRM device
1054  * @dev: device of which to set the unique name
1055  * @name: unique name
1056  *
1057  * Sets the unique name of a DRM device using the specified string. This is
1058  * already done by drm_dev_init(), drivers should only override the default
1059  * unique name for backwards compatibility reasons.
1060  *
1061  * Return: 0 on success or a negative error code on failure.
1062  */
1063 int drm_dev_set_unique(struct drm_device *dev, const char *name)
1064 {
1065 	drmm_kfree(dev, dev->unique);
1066 	dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL);
1067 
1068 	return dev->unique ? 0 : -ENOMEM;
1069 }
1070 EXPORT_SYMBOL(drm_dev_set_unique);
1071 
1072 /*
1073  * DRM Core
1074  * The DRM core module initializes all global DRM objects and makes them
1075  * available to drivers. Once setup, drivers can probe their respective
1076  * devices.
1077  * Currently, core management includes:
1078  *  - The "DRM-Global" key/value database
1079  *  - Global ID management for connectors
1080  *  - DRM major number allocation
1081  *  - DRM minor management
1082  *  - DRM sysfs class
1083  *  - DRM debugfs root
1084  *
1085  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
1086  * interface registered on a DRM device, you can request minor numbers from DRM
1087  * core. DRM core takes care of major-number management and char-dev
1088  * registration. A stub ->open() callback forwards any open() requests to the
1089  * registered minor.
1090  */
1091 
1092 #ifdef __linux__
1093 static int drm_stub_open(struct inode *inode, struct file *filp)
1094 {
1095 	const struct file_operations *new_fops;
1096 	struct drm_minor *minor;
1097 	int err;
1098 
1099 	DRM_DEBUG("\n");
1100 
1101 	minor = drm_minor_acquire(iminor(inode));
1102 	if (IS_ERR(minor))
1103 		return PTR_ERR(minor);
1104 
1105 	new_fops = fops_get(minor->dev->driver->fops);
1106 	if (!new_fops) {
1107 		err = -ENODEV;
1108 		goto out;
1109 	}
1110 
1111 	replace_fops(filp, new_fops);
1112 	if (filp->f_op->open)
1113 		err = filp->f_op->open(inode, filp);
1114 	else
1115 		err = 0;
1116 
1117 out:
1118 	drm_minor_release(minor);
1119 
1120 	return err;
1121 }
1122 
1123 static const struct file_operations drm_stub_fops = {
1124 	.owner = THIS_MODULE,
1125 	.open = drm_stub_open,
1126 	.llseek = noop_llseek,
1127 };
1128 #endif /* __linux__ */
1129 
1130 static void drm_core_exit(void)
1131 {
1132 	drm_privacy_screen_lookup_exit();
1133 #ifdef __linux__
1134 	unregister_chrdev(DRM_MAJOR, "drm");
1135 	debugfs_remove(drm_debugfs_root);
1136 	drm_sysfs_destroy();
1137 #endif
1138 	idr_destroy(&drm_minors_idr);
1139 	drm_connector_ida_destroy();
1140 }
1141 
1142 static int __init drm_core_init(void)
1143 {
1144 #ifdef __linux__
1145 	int ret;
1146 #endif
1147 
1148 	drm_connector_ida_init();
1149 	idr_init(&drm_minors_idr);
1150 	drm_memcpy_init_early();
1151 
1152 #ifdef __linux__
1153 	ret = drm_sysfs_init();
1154 	if (ret < 0) {
1155 		DRM_ERROR("Cannot create DRM class: %d\n", ret);
1156 		goto error;
1157 	}
1158 
1159 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
1160 
1161 	ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
1162 	if (ret < 0)
1163 		goto error;
1164 #endif
1165 
1166 	drm_privacy_screen_lookup_init();
1167 
1168 	drm_core_init_complete = true;
1169 
1170 	DRM_DEBUG("Initialized\n");
1171 	return 0;
1172 #ifdef __linux__
1173 error:
1174 	drm_core_exit();
1175 	return ret;
1176 #endif
1177 }
1178 
1179 #ifdef __linux__
1180 module_init(drm_core_init);
1181 module_exit(drm_core_exit);
1182 #endif
1183 
1184 void
1185 drm_attach_platform(struct drm_driver *driver, bus_space_tag_t iot,
1186     bus_dma_tag_t dmat, struct device *dev, struct drm_device *drm)
1187 {
1188 	struct drm_attach_args arg;
1189 
1190 	memset(&arg, 0, sizeof(arg));
1191 	arg.driver = driver;
1192 	arg.bst = iot;
1193 	arg.dmat = dmat;
1194 	arg.drm = drm;
1195 
1196 	arg.busid = dev->dv_xname;
1197 	arg.busid_len = strlen(dev->dv_xname) + 1;
1198 	config_found_sm(dev, &arg, drmprint, drmsubmatch);
1199 }
1200 
1201 struct drm_device *
1202 drm_attach_pci(const struct drm_driver *driver, struct pci_attach_args *pa,
1203     int is_agp, int primary, struct device *dev, struct drm_device *drm)
1204 {
1205 	struct drm_attach_args arg;
1206 	struct drm_softc *sc;
1207 
1208 	arg.drm = drm;
1209 	arg.driver = driver;
1210 	arg.dmat = pa->pa_dmat;
1211 	arg.bst = pa->pa_memt;
1212 	arg.is_agp = is_agp;
1213 	arg.primary = primary;
1214 	arg.pa = pa;
1215 
1216 	arg.busid_len = 20;
1217 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
1218 	if (arg.busid == NULL) {
1219 		printf("%s: no memory for drm\n", dev->dv_xname);
1220 		return (NULL);
1221 	}
1222 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
1223 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
1224 
1225 	sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch);
1226 	if (sc == NULL)
1227 		return NULL;
1228 
1229 	return sc->sc_drm;
1230 }
1231 
1232 int
1233 drmprint(void *aux, const char *pnp)
1234 {
1235 	if (pnp != NULL)
1236 		printf("drm at %s", pnp);
1237 	return (UNCONF);
1238 }
1239 
1240 int
1241 drmsubmatch(struct device *parent, void *match, void *aux)
1242 {
1243 	extern struct cfdriver drm_cd;
1244 	struct cfdata *cf = match;
1245 
1246 	/* only allow drm to attach */
1247 	if (cf->cf_driver == &drm_cd)
1248 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
1249 	return (0);
1250 }
1251 
1252 int
1253 drm_pciprobe(struct pci_attach_args *pa, const struct pci_device_id *idlist)
1254 {
1255 	const struct pci_device_id *id_entry;
1256 
1257 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
1258 	    PCI_PRODUCT(pa->pa_id), idlist);
1259 	if (id_entry != NULL)
1260 		return 1;
1261 
1262 	return 0;
1263 }
1264 
1265 int
1266 drm_probe(struct device *parent, void *match, void *aux)
1267 {
1268 	struct cfdata *cf = match;
1269 	struct drm_attach_args *da = aux;
1270 
1271 	if (cf->drmdevcf_primary != DRMDEVCF_PRIMARY_UNK) {
1272 		/*
1273 		 * If primary-ness of device specified, either match
1274 		 * exactly (at high priority), or fail.
1275 		 */
1276 		if (cf->drmdevcf_primary != 0 && da->primary != 0)
1277 			return (10);
1278 		else
1279 			return (0);
1280 	}
1281 
1282 	/* If primary-ness unspecified, it wins. */
1283 	return (1);
1284 }
1285 
1286 int drm_buddy_module_init(void);
1287 void drm_buddy_module_exit(void);
1288 
1289 void
1290 drm_attach(struct device *parent, struct device *self, void *aux)
1291 {
1292 	struct drm_softc *sc = (struct drm_softc *)self;
1293 	struct drm_attach_args *da = aux;
1294 	struct drm_device *dev = da->drm;
1295 	int ret;
1296 
1297 	if (drm_refcnt == 0) {
1298 		drm_linux_init();
1299 		drm_core_init();
1300 		drm_buddy_module_init();
1301 	}
1302 	drm_refcnt++;
1303 
1304 	if (dev == NULL) {
1305 		dev = malloc(sizeof(struct drm_device), M_DRM,
1306 		    M_WAITOK | M_ZERO);
1307 		sc->sc_allocated = 1;
1308 	}
1309 
1310 	sc->sc_drm = dev;
1311 
1312 	dev->dev = self;
1313 	dev->dev_private = parent;
1314 	dev->driver = da->driver;
1315 
1316 	INIT_LIST_HEAD(&dev->managed.resources);
1317 	mtx_init(&dev->managed.lock, IPL_TTY);
1318 
1319 	/* no per-device feature limits by default */
1320 	dev->driver_features = ~0u;
1321 
1322 	dev->dmat = da->dmat;
1323 	dev->bst = da->bst;
1324 	dev->unique = da->busid;
1325 
1326 	if (da->pa) {
1327 		struct pci_attach_args *pa = da->pa;
1328 		pcireg_t subsys;
1329 
1330 		subsys = pci_conf_read(pa->pa_pc, pa->pa_tag,
1331 		    PCI_SUBSYS_ID_REG);
1332 
1333 		dev->pdev = &dev->_pdev;
1334 		dev->pdev->vendor = PCI_VENDOR(pa->pa_id);
1335 		dev->pdev->device = PCI_PRODUCT(pa->pa_id);
1336 		dev->pdev->subsystem_vendor = PCI_VENDOR(subsys);
1337 		dev->pdev->subsystem_device = PCI_PRODUCT(subsys);
1338 		dev->pdev->revision = PCI_REVISION(pa->pa_class);
1339 		dev->pdev->class = (PCI_CLASS(pa->pa_class) << 16) |
1340 		    (PCI_SUBCLASS(pa->pa_class) << 8) |
1341 		    PCI_INTERFACE(pa->pa_class);
1342 
1343 		dev->pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
1344 		dev->pdev->bus = &dev->pdev->_bus;
1345 		dev->pdev->bus->pc = pa->pa_pc;
1346 		dev->pdev->bus->number = pa->pa_bus;
1347 		dev->pdev->bus->domain_nr = pa->pa_domain;
1348 		dev->pdev->bus->bridgetag = pa->pa_bridgetag;
1349 
1350 		if (pa->pa_bridgetag != NULL) {
1351 			dev->pdev->bus->self = malloc(sizeof(struct pci_dev),
1352 			    M_DRM, M_WAITOK | M_ZERO);
1353 			dev->pdev->bus->self->pc = pa->pa_pc;
1354 			dev->pdev->bus->self->tag = *pa->pa_bridgetag;
1355 		}
1356 
1357 		dev->pdev->pc = pa->pa_pc;
1358 		dev->pdev->tag = pa->pa_tag;
1359 		dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
1360 
1361 #ifdef CONFIG_ACPI
1362 		dev->pdev->dev.node = acpi_find_pci(pa->pa_pc, pa->pa_tag);
1363 		aml_register_notify(dev->pdev->dev.node, NULL,
1364 		    drm_linux_acpi_notify, NULL, ACPIDEV_NOPOLL);
1365 #endif
1366 	}
1367 
1368 	mtx_init(&dev->quiesce_mtx, IPL_NONE);
1369 	mtx_init(&dev->event_lock, IPL_TTY);
1370 	rw_init(&dev->struct_mutex, "drmdevlk");
1371 	rw_init(&dev->filelist_mutex, "drmflist");
1372 	rw_init(&dev->clientlist_mutex, "drmclist");
1373 	rw_init(&dev->master_mutex, "drmmast");
1374 
1375 	ret = drmm_add_action(dev, drm_dev_init_release, NULL);
1376 	if (ret)
1377 		goto error;
1378 
1379 	SPLAY_INIT(&dev->files);
1380 	INIT_LIST_HEAD(&dev->filelist_internal);
1381 	INIT_LIST_HEAD(&dev->clientlist);
1382 	INIT_LIST_HEAD(&dev->vblank_event_list);
1383 
1384 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
1385 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
1386 		if (ret)
1387 			goto error;
1388 	}
1389 
1390 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
1391 	if (ret)
1392 		goto error;
1393 
1394 #ifdef CONFIG_DRM_LEGACY
1395 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1396 #if IS_ENABLED(CONFIG_AGP)
1397 		if (da->is_agp)
1398 			dev->agp = drm_agp_init();
1399 #endif
1400 		if (dev->agp != NULL) {
1401 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
1402 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
1403 				dev->agp->mtrr = 1;
1404 		}
1405 	}
1406 #endif
1407 
1408 	if (dev->driver->gem_size > 0) {
1409 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
1410 		/* XXX unique name */
1411 		pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE, 0,
1412 		    "drmobjpl", NULL);
1413 	}
1414 
1415 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
1416 		ret = drm_gem_init(dev);
1417 		if (ret) {
1418 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
1419 			goto error;
1420 		}
1421 	}
1422 
1423 	drmm_add_final_kfree(dev, dev);
1424 
1425 	printf("\n");
1426 	return;
1427 
1428 error:
1429 	drm_managed_release(dev);
1430 	dev->dev_private = NULL;
1431 }
1432 
1433 int
1434 drm_detach(struct device *self, int flags)
1435 {
1436 	struct drm_softc *sc = (struct drm_softc *)self;
1437 	struct drm_device *dev = sc->sc_drm;
1438 
1439 	drm_refcnt--;
1440 	if (drm_refcnt == 0) {
1441 		drm_buddy_module_exit();
1442 		drm_core_exit();
1443 		drm_linux_exit();
1444 	}
1445 
1446 	drm_lastclose(dev);
1447 
1448 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
1449 		if (dev->driver->gem_size > 0)
1450 			pool_destroy(&dev->objpl);
1451 	}
1452 
1453 #ifdef CONFIG_DRM_LEGACY
1454 	if (dev->agp && dev->agp->mtrr) {
1455 		int retcode;
1456 
1457 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
1458 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
1459 		DRM_DEBUG("mtrr_del = %d", retcode);
1460 	}
1461 
1462 	free(dev->agp, M_DRM, 0);
1463 #endif
1464 	if (dev->pdev && dev->pdev->bus)
1465 		free(dev->pdev->bus->self, M_DRM, sizeof(struct pci_dev));
1466 
1467 	if (sc->sc_allocated)
1468 		free(dev, M_DRM, sizeof(struct drm_device));
1469 
1470 	return 0;
1471 }
1472 
1473 void
1474 drm_quiesce(struct drm_device *dev)
1475 {
1476 	mtx_enter(&dev->quiesce_mtx);
1477 	dev->quiesce = 1;
1478 	while (dev->quiesce_count > 0) {
1479 		msleep_nsec(&dev->quiesce_count, &dev->quiesce_mtx,
1480 		    PZERO, "drmqui", INFSLP);
1481 	}
1482 	mtx_leave(&dev->quiesce_mtx);
1483 }
1484 
1485 void
1486 drm_wakeup(struct drm_device *dev)
1487 {
1488 	mtx_enter(&dev->quiesce_mtx);
1489 	dev->quiesce = 0;
1490 	wakeup(&dev->quiesce);
1491 	mtx_leave(&dev->quiesce_mtx);
1492 }
1493 
1494 int
1495 drm_activate(struct device *self, int act)
1496 {
1497 	struct drm_softc *sc = (struct drm_softc *)self;
1498 	struct drm_device *dev = sc->sc_drm;
1499 
1500 	switch (act) {
1501 	case DVACT_QUIESCE:
1502 		drm_quiesce(dev);
1503 		break;
1504 	case DVACT_WAKEUP:
1505 		drm_wakeup(dev);
1506 		break;
1507 	}
1508 
1509 	return (0);
1510 }
1511 
1512 const struct cfattach drm_ca = {
1513 	sizeof(struct drm_softc), drm_probe, drm_attach,
1514 	drm_detach, drm_activate
1515 };
1516 
1517 struct cfdriver drm_cd = {
1518 	0, "drm", DV_DULL
1519 };
1520 
1521 const struct pci_device_id *
1522 drm_find_description(int vendor, int device, const struct pci_device_id *idlist)
1523 {
1524 	int i = 0;
1525 
1526 	for (i = 0; idlist[i].vendor != 0; i++) {
1527 		if ((idlist[i].vendor == vendor) &&
1528 		    (idlist[i].device == device ||
1529 		     idlist[i].device == PCI_ANY_ID) &&
1530 		    (idlist[i].subvendor == PCI_ANY_ID) &&
1531 		    (idlist[i].subdevice == PCI_ANY_ID))
1532 			return &idlist[i];
1533 	}
1534 	return NULL;
1535 }
1536 
1537 int
1538 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
1539 {
1540 	return (f1->fminor < f2->fminor ? -1 : f1->fminor > f2->fminor);
1541 }
1542 
1543 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
1544 
1545 struct drm_file *
1546 drm_find_file_by_minor(struct drm_device *dev, int minor)
1547 {
1548 	struct drm_file	key;
1549 
1550 	key.fminor = minor;
1551 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
1552 }
1553 
1554 struct drm_device *
1555 drm_get_device_from_kdev(dev_t kdev)
1556 {
1557 	int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
1558 	/* control */
1559 	if (unit >= 64 && unit < 128)
1560 		unit -= 64;
1561 	/* render */
1562 	if (unit >= 128)
1563 		unit -= 128;
1564 	struct drm_softc *sc;
1565 
1566 	if (unit < drm_cd.cd_ndevs) {
1567 		sc = (struct drm_softc *)drm_cd.cd_devs[unit];
1568 		if (sc)
1569 			return sc->sc_drm;
1570 	}
1571 
1572 	return NULL;
1573 }
1574 
1575 void
1576 filt_drmdetach(struct knote *kn)
1577 {
1578 	struct drm_device *dev = kn->kn_hook;
1579 	int s;
1580 
1581 	s = spltty();
1582 	klist_remove_locked(&dev->note, kn);
1583 	splx(s);
1584 }
1585 
1586 int
1587 filt_drmkms(struct knote *kn, long hint)
1588 {
1589 	if (kn->kn_sfflags & hint)
1590 		kn->kn_fflags |= hint;
1591 	return (kn->kn_fflags != 0);
1592 }
1593 
1594 void
1595 filt_drmreaddetach(struct knote *kn)
1596 {
1597 	struct drm_file		*file_priv = kn->kn_hook;
1598 	int s;
1599 
1600 	s = spltty();
1601 	klist_remove_locked(&file_priv->rsel.si_note, kn);
1602 	splx(s);
1603 }
1604 
1605 int
1606 filt_drmread(struct knote *kn, long hint)
1607 {
1608 	struct drm_file		*file_priv = kn->kn_hook;
1609 	int			 val = 0;
1610 
1611 	if ((hint & NOTE_SUBMIT) == 0)
1612 		mtx_enter(&file_priv->minor->dev->event_lock);
1613 	val = !list_empty(&file_priv->event_list);
1614 	if ((hint & NOTE_SUBMIT) == 0)
1615 		mtx_leave(&file_priv->minor->dev->event_lock);
1616 	return (val);
1617 }
1618 
1619 const struct filterops drm_filtops = {
1620 	.f_flags	= FILTEROP_ISFD,
1621 	.f_attach	= NULL,
1622 	.f_detach	= filt_drmdetach,
1623 	.f_event	= filt_drmkms,
1624 };
1625 
1626 const struct filterops drmread_filtops = {
1627 	.f_flags	= FILTEROP_ISFD,
1628 	.f_attach	= NULL,
1629 	.f_detach	= filt_drmreaddetach,
1630 	.f_event	= filt_drmread,
1631 };
1632 
1633 int
1634 drmkqfilter(dev_t kdev, struct knote *kn)
1635 {
1636 	struct drm_device	*dev = NULL;
1637 	struct drm_file		*file_priv = NULL;
1638 	int			 s;
1639 
1640 	dev = drm_get_device_from_kdev(kdev);
1641 	if (dev == NULL || dev->dev_private == NULL)
1642 		return (ENXIO);
1643 
1644 	switch (kn->kn_filter) {
1645 	case EVFILT_READ:
1646 		mutex_lock(&dev->struct_mutex);
1647 		file_priv = drm_find_file_by_minor(dev, minor(kdev));
1648 		mutex_unlock(&dev->struct_mutex);
1649 		if (file_priv == NULL)
1650 			return (ENXIO);
1651 
1652 		kn->kn_fop = &drmread_filtops;
1653 		kn->kn_hook = file_priv;
1654 
1655 		s = spltty();
1656 		klist_insert_locked(&file_priv->rsel.si_note, kn);
1657 		splx(s);
1658 		break;
1659 	case EVFILT_DEVICE:
1660 		kn->kn_fop = &drm_filtops;
1661 		kn->kn_hook = dev;
1662 
1663 		s = spltty();
1664 		klist_insert_locked(&dev->note, kn);
1665 		splx(s);
1666 		break;
1667 	default:
1668 		return (EINVAL);
1669 	}
1670 
1671 	return (0);
1672 }
1673 
1674 int
1675 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
1676 {
1677 	struct drm_device	*dev = NULL;
1678 	struct drm_file		*file_priv;
1679 	struct drm_minor	*dm;
1680 	int			 ret = 0;
1681 	int			 dminor, realminor, minor_type;
1682 	int need_setup = 0;
1683 
1684 	dev = drm_get_device_from_kdev(kdev);
1685 	if (dev == NULL || dev->dev_private == NULL)
1686 		return (ENXIO);
1687 
1688 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1689 
1690 	if (flags & O_EXCL)
1691 		return (EBUSY); /* No exclusive opens */
1692 
1693 	if (drm_dev_needs_global_mutex(dev))
1694 		mutex_lock(&drm_global_mutex);
1695 
1696 	if (!atomic_fetch_inc(&dev->open_count))
1697 		need_setup = 1;
1698 
1699 	dminor = minor(kdev);
1700 	realminor =  dminor & ((1 << CLONE_SHIFT) - 1);
1701 	if (realminor < 64)
1702 		minor_type = DRM_MINOR_PRIMARY;
1703 	else if (realminor >= 64 && realminor < 128)
1704 		minor_type = DRM_MINOR_CONTROL;
1705 	else
1706 		minor_type = DRM_MINOR_RENDER;
1707 
1708 	dm = *drm_minor_get_slot(dev, minor_type);
1709 	dm->index = minor(kdev);
1710 
1711 	file_priv = drm_file_alloc(dm);
1712 	if (IS_ERR(file_priv)) {
1713 		ret = ENOMEM;
1714 		goto err;
1715 	}
1716 
1717 	/* first opener automatically becomes master */
1718 	if (drm_is_primary_client(file_priv)) {
1719 		ret = drm_master_open(file_priv);
1720 		if (ret != 0)
1721 			goto out_file_free;
1722 	}
1723 
1724 	file_priv->filp = (void *)file_priv;
1725 	file_priv->fminor = minor(kdev);
1726 
1727 	mutex_lock(&dev->filelist_mutex);
1728 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
1729 	mutex_unlock(&dev->filelist_mutex);
1730 
1731 	if (need_setup) {
1732 		ret = drm_legacy_setup(dev);
1733 		if (ret)
1734 			goto out_file_free;
1735 	}
1736 
1737 	if (drm_dev_needs_global_mutex(dev))
1738 		mutex_unlock(&drm_global_mutex);
1739 
1740 	return 0;
1741 
1742 out_file_free:
1743 	drm_file_free(file_priv);
1744 err:
1745 	atomic_dec(&dev->open_count);
1746 	if (drm_dev_needs_global_mutex(dev))
1747 		mutex_unlock(&drm_global_mutex);
1748 	return (ret);
1749 }
1750 
1751 int
1752 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
1753 {
1754 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
1755 	struct drm_file			*file_priv;
1756 	int				 retcode = 0;
1757 
1758 	if (dev == NULL)
1759 		return (ENXIO);
1760 
1761 	if (drm_dev_needs_global_mutex(dev))
1762 		mutex_lock(&drm_global_mutex);
1763 
1764 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1765 
1766 	mutex_lock(&dev->filelist_mutex);
1767 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1768 	if (file_priv == NULL) {
1769 		DRM_ERROR("can't find authenticator\n");
1770 		retcode = EINVAL;
1771 		mutex_unlock(&dev->filelist_mutex);
1772 		goto done;
1773 	}
1774 
1775 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
1776 	mutex_unlock(&dev->filelist_mutex);
1777 	drm_file_free(file_priv);
1778 done:
1779 	if (atomic_dec_and_test(&dev->open_count))
1780 		drm_lastclose(dev);
1781 
1782 	if (drm_dev_needs_global_mutex(dev))
1783 		mutex_unlock(&drm_global_mutex);
1784 
1785 	return (retcode);
1786 }
1787 
1788 int
1789 drmread(dev_t kdev, struct uio *uio, int ioflag)
1790 {
1791 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
1792 	struct drm_file			*file_priv;
1793 	struct drm_pending_event	*ev;
1794 	int		 		 error = 0;
1795 
1796 	if (dev == NULL)
1797 		return (ENXIO);
1798 
1799 	mutex_lock(&dev->filelist_mutex);
1800 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1801 	mutex_unlock(&dev->filelist_mutex);
1802 	if (file_priv == NULL)
1803 		return (ENXIO);
1804 
1805 	/*
1806 	 * The semantics are a little weird here. We will wait until we
1807 	 * have events to process, but as soon as we have events we will
1808 	 * only deliver as many as we have.
1809 	 * Note that events are atomic, if the read buffer will not fit in
1810 	 * a whole event, we won't read any of it out.
1811 	 */
1812 	mtx_enter(&dev->event_lock);
1813 	while (error == 0 && list_empty(&file_priv->event_list)) {
1814 		if (ioflag & IO_NDELAY) {
1815 			mtx_leave(&dev->event_lock);
1816 			return (EAGAIN);
1817 		}
1818 		error = msleep_nsec(&file_priv->event_wait, &dev->event_lock,
1819 		    PWAIT | PCATCH, "drmread", INFSLP);
1820 	}
1821 	if (error) {
1822 		mtx_leave(&dev->event_lock);
1823 		return (error);
1824 	}
1825 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
1826 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1827 		/* XXX we always destroy the event on error. */
1828 		error = uiomove(ev->event, ev->event->length, uio);
1829 		kfree(ev);
1830 		if (error)
1831 			break;
1832 		mtx_enter(&dev->event_lock);
1833 	}
1834 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1835 
1836 	return (error);
1837 }
1838 
1839 /*
1840  * Deqeue an event from the file priv in question. returning 1 if an
1841  * event was found. We take the resid from the read as a parameter because
1842  * we will only dequeue and event if the read buffer has space to fit the
1843  * entire thing.
1844  *
1845  * We are called locked, but we will *unlock* the queue on return so that
1846  * we may sleep to copyout the event.
1847  */
1848 int
1849 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
1850     size_t resid, struct drm_pending_event **out)
1851 {
1852 	struct drm_pending_event *e = NULL;
1853 	int gotone = 0;
1854 
1855 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
1856 
1857 	*out = NULL;
1858 	if (list_empty(&file_priv->event_list))
1859 		goto out;
1860 	e = list_first_entry(&file_priv->event_list,
1861 			     struct drm_pending_event, link);
1862 	if (e->event->length > resid)
1863 		goto out;
1864 
1865 	file_priv->event_space += e->event->length;
1866 	list_del(&e->link);
1867 	*out = e;
1868 	gotone = 1;
1869 
1870 out:
1871 	mtx_leave(&dev->event_lock);
1872 
1873 	return (gotone);
1874 }
1875 
1876 paddr_t
1877 drmmmap(dev_t kdev, off_t offset, int prot)
1878 {
1879 	return -1;
1880 }
1881 
1882 struct drm_dmamem *
1883 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1884     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1885 {
1886 	struct drm_dmamem	*mem;
1887 	size_t			 strsize;
1888 	/*
1889 	 * segs is the last member of the struct since we modify the size
1890 	 * to allow extra segments if more than one are allowed.
1891 	 */
1892 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1893 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
1894 	if (mem == NULL)
1895 		return (NULL);
1896 
1897 	mem->size = size;
1898 
1899 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
1900 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
1901 		goto strfree;
1902 
1903 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
1904 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1905 		goto destroy;
1906 
1907 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
1908 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
1909 		goto free;
1910 
1911 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
1912 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
1913 		goto unmap;
1914 
1915 	return (mem);
1916 
1917 unmap:
1918 	bus_dmamem_unmap(dmat, mem->kva, size);
1919 free:
1920 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1921 destroy:
1922 	bus_dmamap_destroy(dmat, mem->map);
1923 strfree:
1924 	free(mem, M_DRM, 0);
1925 
1926 	return (NULL);
1927 }
1928 
1929 void
1930 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
1931 {
1932 	if (mem == NULL)
1933 		return;
1934 
1935 	bus_dmamap_unload(dmat, mem->map);
1936 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
1937 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1938 	bus_dmamap_destroy(dmat, mem->map);
1939 	free(mem, M_DRM, 0);
1940 }
1941 
1942 struct drm_dma_handle *
1943 drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
1944 {
1945 	struct drm_dma_handle *dmah;
1946 
1947 	dmah = malloc(sizeof(*dmah), M_DRM, M_WAITOK);
1948 	dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size,
1949 	    BUS_DMA_NOCACHE, 0);
1950 	if (dmah->mem == NULL) {
1951 		free(dmah, M_DRM, sizeof(*dmah));
1952 		return NULL;
1953 	}
1954 	dmah->busaddr = dmah->mem->segs[0].ds_addr;
1955 	dmah->size = dmah->mem->size;
1956 	dmah->vaddr = dmah->mem->kva;
1957 	return (dmah);
1958 }
1959 
1960 void
1961 drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
1962 {
1963 	if (dmah == NULL)
1964 		return;
1965 
1966 	drm_dmamem_free(dev->dmat, dmah->mem);
1967 	free(dmah, M_DRM, sizeof(*dmah));
1968 }
1969 
1970 /*
1971  * Compute order.  Can be made faster.
1972  */
1973 int
1974 drm_order(unsigned long size)
1975 {
1976 	int order;
1977 	unsigned long tmp;
1978 
1979 	for (order = 0, tmp = size; tmp >>= 1; ++order)
1980 		;
1981 
1982 	if (size & ~(1 << order))
1983 		++order;
1984 
1985 	return order;
1986 }
1987 
1988 int
1989 drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv)
1990 {
1991 	struct drm_pciinfo *info = data;
1992 
1993 	if (dev->pdev == NULL)
1994 		return -ENOTTY;
1995 
1996 	info->domain = dev->pdev->bus->domain_nr;
1997 	info->bus = dev->pdev->bus->number;
1998 	info->dev = PCI_SLOT(dev->pdev->devfn);
1999 	info->func = PCI_FUNC(dev->pdev->devfn);
2000 	info->vendor_id = dev->pdev->vendor;
2001 	info->device_id = dev->pdev->device;
2002 	info->subvendor_id = dev->pdev->subsystem_vendor;
2003 	info->subdevice_id = dev->pdev->subsystem_device;
2004 	info->revision_id = 0;
2005 
2006 	return 0;
2007 }
2008