xref: /openbsd-src/sys/dev/pci/drm/drm_drv.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/fcntl.h>
31 #include <sys/poll.h>
32 #include <sys/specdev.h>
33 #include <sys/vnode.h>
34 
35 #include <machine/bus.h>
36 
37 #ifdef __HAVE_ACPI
38 #include <dev/acpi/acpidev.h>
39 #include <dev/acpi/acpivar.h>
40 #include <dev/acpi/dsdt.h>
41 #endif
42 
43 #include <linux/debugfs.h>
44 #include <linux/fs.h>
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/mount.h>
48 #include <linux/pseudo_fs.h>
49 #include <linux/slab.h>
50 #include <linux/srcu.h>
51 
52 #include <drm/drm_client.h>
53 #include <drm/drm_color_mgmt.h>
54 #include <drm/drm_drv.h>
55 #include <drm/drm_file.h>
56 #include <drm/drm_managed.h>
57 #include <drm/drm_mode_object.h>
58 #include <drm/drm_print.h>
59 
60 #include <drm/drm_gem.h>
61 #include <drm/drm_agpsupport.h>
62 #include <drm/drm_irq.h>
63 
64 #include "drm_crtc_internal.h"
65 #include "drm_internal.h"
66 #include "drm_legacy.h"
67 
68 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
69 MODULE_DESCRIPTION("DRM shared core routines");
70 MODULE_LICENSE("GPL and additional rights");
71 
72 static DEFINE_SPINLOCK(drm_minor_lock);
73 static struct idr drm_minors_idr;
74 
75 /*
76  * If the drm core fails to init for whatever reason,
77  * we should prevent any drivers from registering with it.
78  * It's best to check this at drm_dev_init(), as some drivers
79  * prefer to embed struct drm_device into their own device
80  * structure and call drm_dev_init() themselves.
81  */
82 static bool drm_core_init_complete = false;
83 
84 static struct dentry *drm_debugfs_root;
85 
86 #ifdef notyet
87 DEFINE_STATIC_SRCU(drm_unplug_srcu);
88 #endif
89 
90 /*
91  * Some functions are only called once on init regardless of how many times
92  * drm attaches.  In linux this is handled via module_init()/module_exit()
93  */
94 int drm_refcnt;
95 
96 struct drm_softc {
97 	struct device		sc_dev;
98 	struct drm_device 	*sc_drm;
99 	int			sc_allocated;
100 };
101 
102 struct drm_attach_args {
103 	struct drm_device		*drm;
104 	struct drm_driver		*driver;
105 	char				*busid;
106 	bus_dma_tag_t			 dmat;
107 	bus_space_tag_t			 bst;
108 	size_t				 busid_len;
109 	int				 is_agp;
110 	struct pci_attach_args		*pa;
111 	int				 primary;
112 };
113 
114 void	drm_linux_init(void);
115 void	drm_linux_exit(void);
116 int	drm_linux_acpi_notify(struct aml_node *, int, void *);
117 
118 int	drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
119 	    struct drm_pending_event **);
120 
121 int	drmprint(void *, const char *);
122 int	drmsubmatch(struct device *, void *, void *);
123 const struct pci_device_id *
124 	drm_find_description(int, int, const struct pci_device_id *);
125 
126 int	drm_file_cmp(struct drm_file *, struct drm_file *);
127 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
128 
129 #define DRMDEVCF_PRIMARY	0
130 #define drmdevcf_primary	cf_loc[DRMDEVCF_PRIMARY]	/* spec'd as primary? */
131 #define DRMDEVCF_PRIMARY_UNK	-1
132 
133 /*
134  * DRM Minors
135  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
136  * of them is represented by a drm_minor object. Depending on the capabilities
137  * of the device-driver, different interfaces are registered.
138  *
139  * Minors can be accessed via dev->$minor_name. This pointer is either
140  * NULL or a valid drm_minor pointer and stays valid as long as the device is
141  * valid. This means, DRM minors have the same life-time as the underlying
142  * device. However, this doesn't mean that the minor is active. Minors are
143  * registered and unregistered dynamically according to device-state.
144  */
145 
146 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
147 					     unsigned int type)
148 {
149 	switch (type) {
150 	case DRM_MINOR_PRIMARY:
151 		return &dev->primary;
152 	case DRM_MINOR_RENDER:
153 		return &dev->render;
154 	default:
155 		BUG();
156 	}
157 }
158 
159 static void drm_minor_alloc_release(struct drm_device *dev, void *data)
160 {
161 	struct drm_minor *minor = data;
162 	unsigned long flags;
163 
164 	WARN_ON(dev != minor->dev);
165 
166 #ifdef __linux__
167 	put_device(minor->kdev);
168 #endif
169 
170 	spin_lock_irqsave(&drm_minor_lock, flags);
171 	idr_remove(&drm_minors_idr, minor->index);
172 	spin_unlock_irqrestore(&drm_minor_lock, flags);
173 }
174 
175 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
176 {
177 	struct drm_minor *minor;
178 	unsigned long flags;
179 	int r;
180 
181 	minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
182 	if (!minor)
183 		return -ENOMEM;
184 
185 	minor->type = type;
186 	minor->dev = dev;
187 
188 	idr_preload(GFP_KERNEL);
189 	spin_lock_irqsave(&drm_minor_lock, flags);
190 	r = idr_alloc(&drm_minors_idr,
191 		      NULL,
192 		      64 * type,
193 		      64 * (type + 1),
194 		      GFP_NOWAIT);
195 	spin_unlock_irqrestore(&drm_minor_lock, flags);
196 	idr_preload_end();
197 
198 	if (r < 0)
199 		return r;
200 
201 	minor->index = r;
202 
203 	r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
204 	if (r)
205 		return r;
206 
207 #ifdef __linux__
208 	minor->kdev = drm_sysfs_minor_alloc(minor);
209 	if (IS_ERR(minor->kdev))
210 		return PTR_ERR(minor->kdev);
211 #endif
212 
213 	*drm_minor_get_slot(dev, type) = minor;
214 	return 0;
215 }
216 
217 static int drm_minor_register(struct drm_device *dev, unsigned int type)
218 {
219 	struct drm_minor *minor;
220 	unsigned long flags;
221 #ifdef __linux__
222 	int ret;
223 #endif
224 
225 	DRM_DEBUG("\n");
226 
227 	minor = *drm_minor_get_slot(dev, type);
228 	if (!minor)
229 		return 0;
230 
231 #ifdef __linux__
232 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
233 	if (ret) {
234 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
235 		goto err_debugfs;
236 	}
237 
238 	ret = device_add(minor->kdev);
239 	if (ret)
240 		goto err_debugfs;
241 #else
242 	drm_debugfs_root = NULL;
243 #endif
244 
245 	/* replace NULL with @minor so lookups will succeed from now on */
246 	spin_lock_irqsave(&drm_minor_lock, flags);
247 	idr_replace(&drm_minors_idr, minor, minor->index);
248 	spin_unlock_irqrestore(&drm_minor_lock, flags);
249 
250 	DRM_DEBUG("new minor registered %d\n", minor->index);
251 	return 0;
252 
253 #ifdef __linux__
254 err_debugfs:
255 	drm_debugfs_cleanup(minor);
256 	return ret;
257 #endif
258 }
259 
260 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
261 {
262 	struct drm_minor *minor;
263 	unsigned long flags;
264 
265 	minor = *drm_minor_get_slot(dev, type);
266 #ifdef __linux__
267 	if (!minor || !device_is_registered(minor->kdev))
268 #else
269 	if (!minor)
270 #endif
271 		return;
272 
273 	/* replace @minor with NULL so lookups will fail from now on */
274 	spin_lock_irqsave(&drm_minor_lock, flags);
275 	idr_replace(&drm_minors_idr, NULL, minor->index);
276 	spin_unlock_irqrestore(&drm_minor_lock, flags);
277 
278 #ifdef __linux__
279 	device_del(minor->kdev);
280 #endif
281 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
282 	drm_debugfs_cleanup(minor);
283 }
284 
285 /*
286  * Looks up the given minor-ID and returns the respective DRM-minor object. The
287  * refence-count of the underlying device is increased so you must release this
288  * object with drm_minor_release().
289  *
290  * As long as you hold this minor, it is guaranteed that the object and the
291  * minor->dev pointer will stay valid! However, the device may get unplugged and
292  * unregistered while you hold the minor.
293  */
294 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
295 {
296 	struct drm_minor *minor;
297 	unsigned long flags;
298 
299 	spin_lock_irqsave(&drm_minor_lock, flags);
300 	minor = idr_find(&drm_minors_idr, minor_id);
301 	if (minor)
302 		drm_dev_get(minor->dev);
303 	spin_unlock_irqrestore(&drm_minor_lock, flags);
304 
305 	if (!minor) {
306 		return ERR_PTR(-ENODEV);
307 	} else if (drm_dev_is_unplugged(minor->dev)) {
308 		drm_dev_put(minor->dev);
309 		return ERR_PTR(-ENODEV);
310 	}
311 
312 	return minor;
313 }
314 
315 void drm_minor_release(struct drm_minor *minor)
316 {
317 	drm_dev_put(minor->dev);
318 }
319 
320 /**
321  * DOC: driver instance overview
322  *
323  * A device instance for a drm driver is represented by &struct drm_device. This
324  * is allocated and initialized with devm_drm_dev_alloc(), usually from
325  * bus-specific ->probe() callbacks implemented by the driver. The driver then
326  * needs to initialize all the various subsystems for the drm device like memory
327  * management, vblank handling, modesetting support and initial output
328  * configuration plus obviously initialize all the corresponding hardware bits.
329  * Finally when everything is up and running and ready for userspace the device
330  * instance can be published using drm_dev_register().
331  *
332  * There is also deprecated support for initalizing device instances using
333  * bus-specific helpers and the &drm_driver.load callback. But due to
334  * backwards-compatibility needs the device instance have to be published too
335  * early, which requires unpretty global locking to make safe and is therefore
336  * only support for existing drivers not yet converted to the new scheme.
337  *
338  * When cleaning up a device instance everything needs to be done in reverse:
339  * First unpublish the device instance with drm_dev_unregister(). Then clean up
340  * any other resources allocated at device initialization and drop the driver's
341  * reference to &drm_device using drm_dev_put().
342  *
343  * Note that any allocation or resource which is visible to userspace must be
344  * released only when the final drm_dev_put() is called, and not when the
345  * driver is unbound from the underlying physical struct &device. Best to use
346  * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
347  * related functions.
348  *
349  * devres managed resources like devm_kmalloc() can only be used for resources
350  * directly related to the underlying hardware device, and only used in code
351  * paths fully protected by drm_dev_enter() and drm_dev_exit().
352  *
353  * Display driver example
354  * ~~~~~~~~~~~~~~~~~~~~~~
355  *
356  * The following example shows a typical structure of a DRM display driver.
357  * The example focus on the probe() function and the other functions that is
358  * almost always present and serves as a demonstration of devm_drm_dev_alloc().
359  *
360  * .. code-block:: c
361  *
362  *	struct driver_device {
363  *		struct drm_device drm;
364  *		void *userspace_facing;
365  *		struct clk *pclk;
366  *	};
367  *
368  *	static struct drm_driver driver_drm_driver = {
369  *		[...]
370  *	};
371  *
372  *	static int driver_probe(struct platform_device *pdev)
373  *	{
374  *		struct driver_device *priv;
375  *		struct drm_device *drm;
376  *		int ret;
377  *
378  *		priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
379  *					  struct driver_device, drm);
380  *		if (IS_ERR(priv))
381  *			return PTR_ERR(priv);
382  *		drm = &priv->drm;
383  *
384  *		ret = drmm_mode_config_init(drm);
385  *		if (ret)
386  *			return ret;
387  *
388  *		priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
389  *		if (!priv->userspace_facing)
390  *			return -ENOMEM;
391  *
392  *		priv->pclk = devm_clk_get(dev, "PCLK");
393  *		if (IS_ERR(priv->pclk))
394  *			return PTR_ERR(priv->pclk);
395  *
396  *		// Further setup, display pipeline etc
397  *
398  *		platform_set_drvdata(pdev, drm);
399  *
400  *		drm_mode_config_reset(drm);
401  *
402  *		ret = drm_dev_register(drm);
403  *		if (ret)
404  *			return ret;
405  *
406  *		drm_fbdev_generic_setup(drm, 32);
407  *
408  *		return 0;
409  *	}
410  *
411  *	// This function is called before the devm_ resources are released
412  *	static int driver_remove(struct platform_device *pdev)
413  *	{
414  *		struct drm_device *drm = platform_get_drvdata(pdev);
415  *
416  *		drm_dev_unregister(drm);
417  *		drm_atomic_helper_shutdown(drm)
418  *
419  *		return 0;
420  *	}
421  *
422  *	// This function is called on kernel restart and shutdown
423  *	static void driver_shutdown(struct platform_device *pdev)
424  *	{
425  *		drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
426  *	}
427  *
428  *	static int __maybe_unused driver_pm_suspend(struct device *dev)
429  *	{
430  *		return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
431  *	}
432  *
433  *	static int __maybe_unused driver_pm_resume(struct device *dev)
434  *	{
435  *		drm_mode_config_helper_resume(dev_get_drvdata(dev));
436  *
437  *		return 0;
438  *	}
439  *
440  *	static const struct dev_pm_ops driver_pm_ops = {
441  *		SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
442  *	};
443  *
444  *	static struct platform_driver driver_driver = {
445  *		.driver = {
446  *			[...]
447  *			.pm = &driver_pm_ops,
448  *		},
449  *		.probe = driver_probe,
450  *		.remove = driver_remove,
451  *		.shutdown = driver_shutdown,
452  *	};
453  *	module_platform_driver(driver_driver);
454  *
455  * Drivers that want to support device unplugging (USB, DT overlay unload) should
456  * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
457  * regions that is accessing device resources to prevent use after they're
458  * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
459  * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
460  * drm_atomic_helper_shutdown() is called. This means that if the disable code
461  * paths are protected, they will not run on regular driver module unload,
462  * possibily leaving the hardware enabled.
463  */
464 
465 /**
466  * drm_put_dev - Unregister and release a DRM device
467  * @dev: DRM device
468  *
469  * Called at module unload time or when a PCI device is unplugged.
470  *
471  * Cleans up all DRM device, calling drm_lastclose().
472  *
473  * Note: Use of this function is deprecated. It will eventually go away
474  * completely.  Please use drm_dev_unregister() and drm_dev_put() explicitly
475  * instead to make sure that the device isn't userspace accessible any more
476  * while teardown is in progress, ensuring that userspace can't access an
477  * inconsistent state.
478  */
479 void drm_put_dev(struct drm_device *dev)
480 {
481 	DRM_DEBUG("\n");
482 
483 	if (!dev) {
484 		DRM_ERROR("cleanup called no dev\n");
485 		return;
486 	}
487 
488 	drm_dev_unregister(dev);
489 	drm_dev_put(dev);
490 }
491 EXPORT_SYMBOL(drm_put_dev);
492 
493 /**
494  * drm_dev_enter - Enter device critical section
495  * @dev: DRM device
496  * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
497  *
498  * This function marks and protects the beginning of a section that should not
499  * be entered after the device has been unplugged. The section end is marked
500  * with drm_dev_exit(). Calls to this function can be nested.
501  *
502  * Returns:
503  * True if it is OK to enter the section, false otherwise.
504  */
505 bool drm_dev_enter(struct drm_device *dev, int *idx)
506 {
507 #ifdef notyet
508 	*idx = srcu_read_lock(&drm_unplug_srcu);
509 
510 	if (dev->unplugged) {
511 		srcu_read_unlock(&drm_unplug_srcu, *idx);
512 		return false;
513 	}
514 #endif
515 
516 	return true;
517 }
518 EXPORT_SYMBOL(drm_dev_enter);
519 
520 /**
521  * drm_dev_exit - Exit device critical section
522  * @idx: index returned from drm_dev_enter()
523  *
524  * This function marks the end of a section that should not be entered after
525  * the device has been unplugged.
526  */
527 void drm_dev_exit(int idx)
528 {
529 #ifdef notyet
530 	srcu_read_unlock(&drm_unplug_srcu, idx);
531 #endif
532 }
533 EXPORT_SYMBOL(drm_dev_exit);
534 
535 /**
536  * drm_dev_unplug - unplug a DRM device
537  * @dev: DRM device
538  *
539  * This unplugs a hotpluggable DRM device, which makes it inaccessible to
540  * userspace operations. Entry-points can use drm_dev_enter() and
541  * drm_dev_exit() to protect device resources in a race free manner. This
542  * essentially unregisters the device like drm_dev_unregister(), but can be
543  * called while there are still open users of @dev.
544  */
545 void drm_dev_unplug(struct drm_device *dev)
546 {
547 	STUB();
548 #ifdef notyet
549 	/*
550 	 * After synchronizing any critical read section is guaranteed to see
551 	 * the new value of ->unplugged, and any critical section which might
552 	 * still have seen the old value of ->unplugged is guaranteed to have
553 	 * finished.
554 	 */
555 	dev->unplugged = true;
556 	synchronize_srcu(&drm_unplug_srcu);
557 
558 	drm_dev_unregister(dev);
559 #endif
560 }
561 EXPORT_SYMBOL(drm_dev_unplug);
562 
563 #ifdef __linux__
564 /*
565  * DRM internal mount
566  * We want to be able to allocate our own "struct address_space" to control
567  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
568  * stand-alone address_space objects, so we need an underlying inode. As there
569  * is no way to allocate an independent inode easily, we need a fake internal
570  * VFS mount-point.
571  *
572  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
573  * frees it again. You are allowed to use iget() and iput() to get references to
574  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
575  * drm_fs_inode_free() call (which does not have to be the last iput()).
576  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
577  * between multiple inode-users. You could, technically, call
578  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
579  * iput(), but this way you'd end up with a new vfsmount for each inode.
580  */
581 
582 static int drm_fs_cnt;
583 static struct vfsmount *drm_fs_mnt;
584 
585 static int drm_fs_init_fs_context(struct fs_context *fc)
586 {
587 	return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM;
588 }
589 
590 static struct file_system_type drm_fs_type = {
591 	.name		= "drm",
592 	.owner		= THIS_MODULE,
593 	.init_fs_context = drm_fs_init_fs_context,
594 	.kill_sb	= kill_anon_super,
595 };
596 
597 static struct inode *drm_fs_inode_new(void)
598 {
599 	struct inode *inode;
600 	int r;
601 
602 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
603 	if (r < 0) {
604 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
605 		return ERR_PTR(r);
606 	}
607 
608 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
609 	if (IS_ERR(inode))
610 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
611 
612 	return inode;
613 }
614 
615 static void drm_fs_inode_free(struct inode *inode)
616 {
617 	if (inode) {
618 		iput(inode);
619 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
620 	}
621 }
622 
623 #endif /* __linux__ */
624 
625 /**
626  * DOC: component helper usage recommendations
627  *
628  * DRM drivers that drive hardware where a logical device consists of a pile of
629  * independent hardware blocks are recommended to use the :ref:`component helper
630  * library<component>`. For consistency and better options for code reuse the
631  * following guidelines apply:
632  *
633  *  - The entire device initialization procedure should be run from the
634  *    &component_master_ops.master_bind callback, starting with
635  *    devm_drm_dev_alloc(), then binding all components with
636  *    component_bind_all() and finishing with drm_dev_register().
637  *
638  *  - The opaque pointer passed to all components through component_bind_all()
639  *    should point at &struct drm_device of the device instance, not some driver
640  *    specific private structure.
641  *
642  *  - The component helper fills the niche where further standardization of
643  *    interfaces is not practical. When there already is, or will be, a
644  *    standardized interface like &drm_bridge or &drm_panel, providing its own
645  *    functions to find such components at driver load time, like
646  *    drm_of_find_panel_or_bridge(), then the component helper should not be
647  *    used.
648  */
649 
650 static void drm_dev_init_release(struct drm_device *dev, void *res)
651 {
652 	drm_legacy_ctxbitmap_cleanup(dev);
653 	drm_legacy_remove_map_hash(dev);
654 #ifdef __linux__
655 	drm_fs_inode_free(dev->anon_inode);
656 
657 	put_device(dev->dev);
658 #endif
659 	/* Prevent use-after-free in drm_managed_release when debugging is
660 	 * enabled. Slightly awkward, but can't really be helped. */
661 	dev->dev = NULL;
662 	mutex_destroy(&dev->master_mutex);
663 	mutex_destroy(&dev->clientlist_mutex);
664 	mutex_destroy(&dev->filelist_mutex);
665 	mutex_destroy(&dev->struct_mutex);
666 	drm_legacy_destroy_members(dev);
667 }
668 
669 static int drm_dev_init(struct drm_device *dev,
670 			struct drm_driver *driver,
671 			struct device *parent)
672 {
673 	int ret;
674 
675 	if (!drm_core_init_complete) {
676 		DRM_ERROR("DRM core is not initialized\n");
677 		return -ENODEV;
678 	}
679 
680 	if (WARN_ON(!parent))
681 		return -EINVAL;
682 
683 	kref_init(&dev->ref);
684 #ifdef __linux__
685 	dev->dev = get_device(parent);
686 #endif
687 	dev->driver = driver;
688 
689 	INIT_LIST_HEAD(&dev->managed.resources);
690 	mtx_init(&dev->managed.lock, IPL_TTY);
691 
692 	/* no per-device feature limits by default */
693 	dev->driver_features = ~0u;
694 
695 	drm_legacy_init_members(dev);
696 #ifdef notyet
697 	INIT_LIST_HEAD(&dev->filelist);
698 #else
699 	SPLAY_INIT(&dev->files);
700 #endif
701 	INIT_LIST_HEAD(&dev->filelist_internal);
702 	INIT_LIST_HEAD(&dev->clientlist);
703 	INIT_LIST_HEAD(&dev->vblank_event_list);
704 
705 	mtx_init(&dev->event_lock, IPL_TTY);
706 	rw_init(&dev->struct_mutex, "drmdevlk");
707 	rw_init(&dev->filelist_mutex, "drmflist");
708 	rw_init(&dev->clientlist_mutex, "drmclist");
709 	rw_init(&dev->master_mutex, "drmmast");
710 
711 	ret = drmm_add_action(dev, drm_dev_init_release, NULL);
712 	if (ret)
713 		return ret;
714 
715 #ifdef __linux__
716 	dev->anon_inode = drm_fs_inode_new();
717 	if (IS_ERR(dev->anon_inode)) {
718 		ret = PTR_ERR(dev->anon_inode);
719 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
720 		goto err;
721 	}
722 #endif
723 
724 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
725 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
726 		if (ret)
727 			goto err;
728 	}
729 
730 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
731 	if (ret)
732 		goto err;
733 
734 	ret = drm_legacy_create_map_hash(dev);
735 	if (ret)
736 		goto err;
737 
738 	drm_legacy_ctxbitmap_init(dev);
739 
740 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
741 		ret = drm_gem_init(dev);
742 		if (ret) {
743 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
744 			goto err;
745 		}
746 	}
747 
748 	ret = drm_dev_set_unique(dev, dev_name(parent));
749 	if (ret)
750 		goto err;
751 
752 	return 0;
753 
754 err:
755 	drm_managed_release(dev);
756 
757 	return ret;
758 }
759 
760 #ifdef notyet
761 static void devm_drm_dev_init_release(void *data)
762 {
763 	drm_dev_put(data);
764 }
765 #endif
766 
767 static int devm_drm_dev_init(struct device *parent,
768 			     struct drm_device *dev,
769 			     struct drm_driver *driver)
770 {
771 	STUB();
772 	return -ENOSYS;
773 #ifdef notyet
774 	int ret;
775 
776 	ret = drm_dev_init(dev, driver, parent);
777 	if (ret)
778 		return ret;
779 
780 	ret = devm_add_action(parent, devm_drm_dev_init_release, dev);
781 	if (ret)
782 		devm_drm_dev_init_release(dev);
783 
784 	return ret;
785 #endif
786 }
787 
788 void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
789 			   size_t size, size_t offset)
790 {
791 	void *container;
792 	struct drm_device *drm;
793 	int ret;
794 
795 	container = kzalloc(size, GFP_KERNEL);
796 	if (!container)
797 		return ERR_PTR(-ENOMEM);
798 
799 	drm = container + offset;
800 	ret = devm_drm_dev_init(parent, drm, driver);
801 	if (ret) {
802 		kfree(container);
803 		return ERR_PTR(ret);
804 	}
805 	drmm_add_final_kfree(drm, container);
806 
807 	return container;
808 }
809 EXPORT_SYMBOL(__devm_drm_dev_alloc);
810 
811 /**
812  * drm_dev_alloc - Allocate new DRM device
813  * @driver: DRM driver to allocate device for
814  * @parent: Parent device object
815  *
816  * This is the deprecated version of devm_drm_dev_alloc(), which does not support
817  * subclassing through embedding the struct &drm_device in a driver private
818  * structure, and which does not support automatic cleanup through devres.
819  *
820  * RETURNS:
821  * Pointer to new DRM device, or ERR_PTR on failure.
822  */
823 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
824 				 struct device *parent)
825 {
826 	struct drm_device *dev;
827 	int ret;
828 
829 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
830 	if (!dev)
831 		return ERR_PTR(-ENOMEM);
832 
833 	ret = drm_dev_init(dev, driver, parent);
834 	if (ret) {
835 		kfree(dev);
836 		return ERR_PTR(ret);
837 	}
838 
839 	drmm_add_final_kfree(dev, dev);
840 
841 	return dev;
842 }
843 EXPORT_SYMBOL(drm_dev_alloc);
844 
845 static void drm_dev_release(struct kref *ref)
846 {
847 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
848 
849 	if (dev->driver->release)
850 		dev->driver->release(dev);
851 
852 	drm_managed_release(dev);
853 
854 	kfree(dev->managed.final_kfree);
855 }
856 
857 /**
858  * drm_dev_get - Take reference of a DRM device
859  * @dev: device to take reference of or NULL
860  *
861  * This increases the ref-count of @dev by one. You *must* already own a
862  * reference when calling this. Use drm_dev_put() to drop this reference
863  * again.
864  *
865  * This function never fails. However, this function does not provide *any*
866  * guarantee whether the device is alive or running. It only provides a
867  * reference to the object and the memory associated with it.
868  */
869 void drm_dev_get(struct drm_device *dev)
870 {
871 	if (dev)
872 		kref_get(&dev->ref);
873 }
874 EXPORT_SYMBOL(drm_dev_get);
875 
876 /**
877  * drm_dev_put - Drop reference of a DRM device
878  * @dev: device to drop reference of or NULL
879  *
880  * This decreases the ref-count of @dev by one. The device is destroyed if the
881  * ref-count drops to zero.
882  */
883 void drm_dev_put(struct drm_device *dev)
884 {
885 	if (dev)
886 		kref_put(&dev->ref, drm_dev_release);
887 }
888 EXPORT_SYMBOL(drm_dev_put);
889 
890 static int create_compat_control_link(struct drm_device *dev)
891 {
892 	struct drm_minor *minor;
893 	char *name;
894 	int ret;
895 
896 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
897 		return 0;
898 
899 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
900 	if (!minor)
901 		return 0;
902 
903 	/*
904 	 * Some existing userspace out there uses the existing of the controlD*
905 	 * sysfs files to figure out whether it's a modeset driver. It only does
906 	 * readdir, hence a symlink is sufficient (and the least confusing
907 	 * option). Otherwise controlD* is entirely unused.
908 	 *
909 	 * Old controlD chardev have been allocated in the range
910 	 * 64-127.
911 	 */
912 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
913 	if (!name)
914 		return -ENOMEM;
915 
916 	ret = sysfs_create_link(minor->kdev->kobj.parent,
917 				&minor->kdev->kobj,
918 				name);
919 
920 	kfree(name);
921 
922 	return ret;
923 }
924 
925 static void remove_compat_control_link(struct drm_device *dev)
926 {
927 	struct drm_minor *minor;
928 	char *name;
929 
930 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
931 		return;
932 
933 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
934 	if (!minor)
935 		return;
936 
937 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
938 	if (!name)
939 		return;
940 
941 	sysfs_remove_link(minor->kdev->kobj.parent, name);
942 
943 	kfree(name);
944 }
945 
946 /**
947  * drm_dev_register - Register DRM device
948  * @dev: Device to register
949  * @flags: Flags passed to the driver's .load() function
950  *
951  * Register the DRM device @dev with the system, advertise device to user-space
952  * and start normal device operation. @dev must be initialized via drm_dev_init()
953  * previously.
954  *
955  * Never call this twice on any device!
956  *
957  * NOTE: To ensure backward compatibility with existing drivers method this
958  * function calls the &drm_driver.load method after registering the device
959  * nodes, creating race conditions. Usage of the &drm_driver.load methods is
960  * therefore deprecated, drivers must perform all initialization before calling
961  * drm_dev_register().
962  *
963  * RETURNS:
964  * 0 on success, negative error code on failure.
965  */
966 int drm_dev_register(struct drm_device *dev, unsigned long flags)
967 {
968 	struct drm_driver *driver = dev->driver;
969 	int ret;
970 
971 	if (!driver->load)
972 		drm_mode_config_validate(dev);
973 
974 	WARN_ON(!dev->managed.final_kfree);
975 
976 	if (drm_dev_needs_global_mutex(dev))
977 		mutex_lock(&drm_global_mutex);
978 
979 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
980 	if (ret)
981 		goto err_minors;
982 
983 	ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
984 	if (ret)
985 		goto err_minors;
986 
987 	ret = create_compat_control_link(dev);
988 	if (ret)
989 		goto err_minors;
990 
991 	dev->registered = true;
992 
993 	if (dev->driver->load) {
994 		ret = dev->driver->load(dev, flags);
995 		if (ret)
996 			goto err_minors;
997 	}
998 
999 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1000 		drm_modeset_register_all(dev);
1001 
1002 	ret = 0;
1003 
1004 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
1005 		 driver->name, driver->major, driver->minor,
1006 		 driver->patchlevel, driver->date,
1007 		 dev->dev ? dev_name(dev->dev) : "virtual device",
1008 		 dev->primary->index);
1009 
1010 	goto out_unlock;
1011 
1012 err_minors:
1013 	remove_compat_control_link(dev);
1014 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1015 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
1016 out_unlock:
1017 	if (drm_dev_needs_global_mutex(dev))
1018 		mutex_unlock(&drm_global_mutex);
1019 	return ret;
1020 }
1021 EXPORT_SYMBOL(drm_dev_register);
1022 
1023 /**
1024  * drm_dev_unregister - Unregister DRM device
1025  * @dev: Device to unregister
1026  *
1027  * Unregister the DRM device from the system. This does the reverse of
1028  * drm_dev_register() but does not deallocate the device. The caller must call
1029  * drm_dev_put() to drop their final reference.
1030  *
1031  * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
1032  * which can be called while there are still open users of @dev.
1033  *
1034  * This should be called first in the device teardown code to make sure
1035  * userspace can't access the device instance any more.
1036  */
1037 void drm_dev_unregister(struct drm_device *dev)
1038 {
1039 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
1040 		drm_lastclose(dev);
1041 
1042 	dev->registered = false;
1043 
1044 	drm_client_dev_unregister(dev);
1045 
1046 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1047 		drm_modeset_unregister_all(dev);
1048 
1049 	if (dev->driver->unload)
1050 		dev->driver->unload(dev);
1051 
1052 #if IS_ENABLED(CONFIG_AGP)
1053 	if (dev->agp)
1054 		drm_agp_takedown(dev);
1055 #endif
1056 
1057 	drm_legacy_rmmaps(dev);
1058 
1059 	remove_compat_control_link(dev);
1060 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1061 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
1062 }
1063 EXPORT_SYMBOL(drm_dev_unregister);
1064 
1065 /**
1066  * drm_dev_set_unique - Set the unique name of a DRM device
1067  * @dev: device of which to set the unique name
1068  * @name: unique name
1069  *
1070  * Sets the unique name of a DRM device using the specified string. This is
1071  * already done by drm_dev_init(), drivers should only override the default
1072  * unique name for backwards compatibility reasons.
1073  *
1074  * Return: 0 on success or a negative error code on failure.
1075  */
1076 int drm_dev_set_unique(struct drm_device *dev, const char *name)
1077 {
1078 	drmm_kfree(dev, dev->unique);
1079 	dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL);
1080 
1081 	return dev->unique ? 0 : -ENOMEM;
1082 }
1083 EXPORT_SYMBOL(drm_dev_set_unique);
1084 
1085 /*
1086  * DRM Core
1087  * The DRM core module initializes all global DRM objects and makes them
1088  * available to drivers. Once setup, drivers can probe their respective
1089  * devices.
1090  * Currently, core management includes:
1091  *  - The "DRM-Global" key/value database
1092  *  - Global ID management for connectors
1093  *  - DRM major number allocation
1094  *  - DRM minor management
1095  *  - DRM sysfs class
1096  *  - DRM debugfs root
1097  *
1098  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
1099  * interface registered on a DRM device, you can request minor numbers from DRM
1100  * core. DRM core takes care of major-number management and char-dev
1101  * registration. A stub ->open() callback forwards any open() requests to the
1102  * registered minor.
1103  */
1104 
1105 #ifdef __linux__
1106 static int drm_stub_open(struct inode *inode, struct file *filp)
1107 {
1108 	const struct file_operations *new_fops;
1109 	struct drm_minor *minor;
1110 	int err;
1111 
1112 	DRM_DEBUG("\n");
1113 
1114 	minor = drm_minor_acquire(iminor(inode));
1115 	if (IS_ERR(minor))
1116 		return PTR_ERR(minor);
1117 
1118 	new_fops = fops_get(minor->dev->driver->fops);
1119 	if (!new_fops) {
1120 		err = -ENODEV;
1121 		goto out;
1122 	}
1123 
1124 	replace_fops(filp, new_fops);
1125 	if (filp->f_op->open)
1126 		err = filp->f_op->open(inode, filp);
1127 	else
1128 		err = 0;
1129 
1130 out:
1131 	drm_minor_release(minor);
1132 
1133 	return err;
1134 }
1135 
1136 static const struct file_operations drm_stub_fops = {
1137 	.owner = THIS_MODULE,
1138 	.open = drm_stub_open,
1139 	.llseek = noop_llseek,
1140 };
1141 #endif /* __linux__ */
1142 
1143 static void drm_core_exit(void)
1144 {
1145 #ifdef __linux__
1146 	unregister_chrdev(DRM_MAJOR, "drm");
1147 	debugfs_remove(drm_debugfs_root);
1148 	drm_sysfs_destroy();
1149 #endif
1150 	idr_destroy(&drm_minors_idr);
1151 	drm_connector_ida_destroy();
1152 }
1153 
1154 static int __init drm_core_init(void)
1155 {
1156 #ifdef __linux__
1157 	int ret;
1158 #endif
1159 
1160 	drm_connector_ida_init();
1161 	idr_init(&drm_minors_idr);
1162 
1163 #ifdef __linux__
1164 	ret = drm_sysfs_init();
1165 	if (ret < 0) {
1166 		DRM_ERROR("Cannot create DRM class: %d\n", ret);
1167 		goto error;
1168 	}
1169 
1170 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
1171 
1172 	ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
1173 	if (ret < 0)
1174 		goto error;
1175 #endif
1176 
1177 	drm_core_init_complete = true;
1178 
1179 	DRM_DEBUG("Initialized\n");
1180 	return 0;
1181 #ifdef __linux__
1182 error:
1183 	drm_core_exit();
1184 	return ret;
1185 #endif
1186 }
1187 
1188 #ifdef __linux__
1189 module_init(drm_core_init);
1190 module_exit(drm_core_exit);
1191 #endif
1192 
1193 void
1194 drm_attach_platform(struct drm_driver *driver, bus_space_tag_t iot,
1195     bus_dma_tag_t dmat, struct device *dev, struct drm_device *drm)
1196 {
1197 	struct drm_attach_args arg;
1198 
1199 	memset(&arg, 0, sizeof(arg));
1200 	arg.driver = driver;
1201 	arg.bst = iot;
1202 	arg.dmat = dmat;
1203 	arg.drm = drm;
1204 
1205 	arg.busid = dev->dv_xname;
1206 	arg.busid_len = strlen(dev->dv_xname) + 1;
1207 	config_found_sm(dev, &arg, drmprint, drmsubmatch);
1208 }
1209 
1210 struct drm_device *
1211 drm_attach_pci(struct drm_driver *driver, struct pci_attach_args *pa,
1212     int is_agp, int primary, struct device *dev, struct drm_device *drm)
1213 {
1214 	struct drm_attach_args arg;
1215 	struct drm_softc *sc;
1216 
1217 	arg.drm = drm;
1218 	arg.driver = driver;
1219 	arg.dmat = pa->pa_dmat;
1220 	arg.bst = pa->pa_memt;
1221 	arg.is_agp = is_agp;
1222 	arg.primary = primary;
1223 	arg.pa = pa;
1224 
1225 	arg.busid_len = 20;
1226 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
1227 	if (arg.busid == NULL) {
1228 		printf("%s: no memory for drm\n", dev->dv_xname);
1229 		return (NULL);
1230 	}
1231 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
1232 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
1233 
1234 	sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch);
1235 	if (sc == NULL)
1236 		return NULL;
1237 
1238 	return sc->sc_drm;
1239 }
1240 
1241 int
1242 drmprint(void *aux, const char *pnp)
1243 {
1244 	if (pnp != NULL)
1245 		printf("drm at %s", pnp);
1246 	return (UNCONF);
1247 }
1248 
1249 int
1250 drmsubmatch(struct device *parent, void *match, void *aux)
1251 {
1252 	extern struct cfdriver drm_cd;
1253 	struct cfdata *cf = match;
1254 
1255 	/* only allow drm to attach */
1256 	if (cf->cf_driver == &drm_cd)
1257 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
1258 	return (0);
1259 }
1260 
1261 int
1262 drm_pciprobe(struct pci_attach_args *pa, const struct pci_device_id *idlist)
1263 {
1264 	const struct pci_device_id *id_entry;
1265 
1266 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
1267 	    PCI_PRODUCT(pa->pa_id), idlist);
1268 	if (id_entry != NULL)
1269 		return 1;
1270 
1271 	return 0;
1272 }
1273 
1274 int
1275 drm_probe(struct device *parent, void *match, void *aux)
1276 {
1277 	struct cfdata *cf = match;
1278 	struct drm_attach_args *da = aux;
1279 
1280 	if (cf->drmdevcf_primary != DRMDEVCF_PRIMARY_UNK) {
1281 		/*
1282 		 * If primary-ness of device specified, either match
1283 		 * exactly (at high priority), or fail.
1284 		 */
1285 		if (cf->drmdevcf_primary != 0 && da->primary != 0)
1286 			return (10);
1287 		else
1288 			return (0);
1289 	}
1290 
1291 	/* If primary-ness unspecified, it wins. */
1292 	return (1);
1293 }
1294 
1295 void
1296 drm_attach(struct device *parent, struct device *self, void *aux)
1297 {
1298 	struct drm_softc *sc = (struct drm_softc *)self;
1299 	struct drm_attach_args *da = aux;
1300 	struct drm_device *dev = da->drm;
1301 	int ret;
1302 
1303 	if (drm_refcnt == 0) {
1304 		drm_linux_init();
1305 		drm_core_init();
1306 	}
1307 	drm_refcnt++;
1308 
1309 	if (dev == NULL) {
1310 		dev = malloc(sizeof(struct drm_device), M_DRM,
1311 		    M_WAITOK | M_ZERO);
1312 		sc->sc_allocated = 1;
1313 	}
1314 
1315 	sc->sc_drm = dev;
1316 
1317 	dev->dev = self;
1318 	dev->dev_private = parent;
1319 	dev->driver = da->driver;
1320 
1321 	INIT_LIST_HEAD(&dev->managed.resources);
1322 	mtx_init(&dev->managed.lock, IPL_TTY);
1323 
1324 	/* no per-device feature limits by default */
1325 	dev->driver_features = ~0u;
1326 
1327 	dev->dmat = da->dmat;
1328 	dev->bst = da->bst;
1329 	dev->unique = da->busid;
1330 
1331 	if (da->pa) {
1332 		struct pci_attach_args *pa = da->pa;
1333 		pcireg_t subsys;
1334 
1335 		subsys = pci_conf_read(pa->pa_pc, pa->pa_tag,
1336 		    PCI_SUBSYS_ID_REG);
1337 
1338 		dev->pdev = &dev->_pdev;
1339 		dev->pdev->vendor = PCI_VENDOR(pa->pa_id);
1340 		dev->pdev->device = PCI_PRODUCT(pa->pa_id);
1341 		dev->pdev->subsystem_vendor = PCI_VENDOR(subsys);
1342 		dev->pdev->subsystem_device = PCI_PRODUCT(subsys);
1343 		dev->pdev->revision = PCI_REVISION(pa->pa_class);
1344 
1345 		dev->pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
1346 		dev->pdev->bus = &dev->pdev->_bus;
1347 		dev->pdev->bus->pc = pa->pa_pc;
1348 		dev->pdev->bus->number = pa->pa_bus;
1349 		dev->pdev->bus->domain_nr = pa->pa_domain;
1350 		dev->pdev->bus->bridgetag = pa->pa_bridgetag;
1351 
1352 		if (pa->pa_bridgetag != NULL) {
1353 			dev->pdev->bus->self = malloc(sizeof(struct pci_dev),
1354 			    M_DRM, M_WAITOK | M_ZERO);
1355 			dev->pdev->bus->self->pc = pa->pa_pc;
1356 			dev->pdev->bus->self->tag = *pa->pa_bridgetag;
1357 		}
1358 
1359 		dev->pdev->pc = pa->pa_pc;
1360 		dev->pdev->tag = pa->pa_tag;
1361 		dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
1362 
1363 #ifdef CONFIG_ACPI
1364 		dev->pdev->dev.node = acpi_find_pci(pa->pa_pc, pa->pa_tag);
1365 		aml_register_notify(dev->pdev->dev.node, NULL,
1366 		    drm_linux_acpi_notify, NULL, ACPIDEV_NOPOLL);
1367 #endif
1368 	}
1369 
1370 	mtx_init(&dev->quiesce_mtx, IPL_NONE);
1371 	mtx_init(&dev->event_lock, IPL_TTY);
1372 	rw_init(&dev->struct_mutex, "drmdevlk");
1373 	rw_init(&dev->filelist_mutex, "drmflist");
1374 	rw_init(&dev->clientlist_mutex, "drmclist");
1375 	rw_init(&dev->master_mutex, "drmmast");
1376 
1377 	ret = drmm_add_action(dev, drm_dev_init_release, NULL);
1378 	if (ret)
1379 		goto error;
1380 
1381 	SPLAY_INIT(&dev->files);
1382 	INIT_LIST_HEAD(&dev->filelist_internal);
1383 	INIT_LIST_HEAD(&dev->clientlist);
1384 	INIT_LIST_HEAD(&dev->vblank_event_list);
1385 
1386 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
1387 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
1388 		if (ret)
1389 			goto error;
1390 	}
1391 
1392 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
1393 	if (ret)
1394 		goto error;
1395 
1396 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1397 #if IS_ENABLED(CONFIG_AGP)
1398 		if (da->is_agp)
1399 			dev->agp = drm_agp_init();
1400 #endif
1401 		if (dev->agp != NULL) {
1402 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
1403 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
1404 				dev->agp->mtrr = 1;
1405 		}
1406 	}
1407 
1408 	if (dev->driver->gem_size > 0) {
1409 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
1410 		/* XXX unique name */
1411 		pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE, 0,
1412 		    "drmobjpl", NULL);
1413 	}
1414 
1415 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
1416 		ret = drm_gem_init(dev);
1417 		if (ret) {
1418 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
1419 			goto error;
1420 		}
1421 	}
1422 
1423 	drmm_add_final_kfree(dev, dev);
1424 
1425 	printf("\n");
1426 	return;
1427 
1428 error:
1429 	drm_managed_release(dev);
1430 	dev->dev_private = NULL;
1431 }
1432 
1433 int
1434 drm_detach(struct device *self, int flags)
1435 {
1436 	struct drm_softc *sc = (struct drm_softc *)self;
1437 	struct drm_device *dev = sc->sc_drm;
1438 
1439 	drm_refcnt--;
1440 	if (drm_refcnt == 0) {
1441 		drm_core_exit();
1442 		drm_linux_exit();
1443 	}
1444 
1445 	drm_lastclose(dev);
1446 
1447 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
1448 		if (dev->driver->gem_size > 0)
1449 			pool_destroy(&dev->objpl);
1450 	}
1451 
1452 	if (dev->agp && dev->agp->mtrr) {
1453 		int retcode;
1454 
1455 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
1456 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
1457 		DRM_DEBUG("mtrr_del = %d", retcode);
1458 	}
1459 
1460 	free(dev->agp, M_DRM, 0);
1461 	if (dev->pdev && dev->pdev->bus)
1462 		free(dev->pdev->bus->self, M_DRM, sizeof(struct pci_dev));
1463 
1464 	if (sc->sc_allocated)
1465 		free(dev, M_DRM, sizeof(struct drm_device));
1466 
1467 	return 0;
1468 }
1469 
1470 void
1471 drm_quiesce(struct drm_device *dev)
1472 {
1473 	mtx_enter(&dev->quiesce_mtx);
1474 	dev->quiesce = 1;
1475 	while (dev->quiesce_count > 0) {
1476 		msleep_nsec(&dev->quiesce_count, &dev->quiesce_mtx,
1477 		    PZERO, "drmqui", INFSLP);
1478 	}
1479 	mtx_leave(&dev->quiesce_mtx);
1480 }
1481 
1482 void
1483 drm_wakeup(struct drm_device *dev)
1484 {
1485 	mtx_enter(&dev->quiesce_mtx);
1486 	dev->quiesce = 0;
1487 	wakeup(&dev->quiesce);
1488 	mtx_leave(&dev->quiesce_mtx);
1489 }
1490 
1491 int
1492 drm_activate(struct device *self, int act)
1493 {
1494 	struct drm_softc *sc = (struct drm_softc *)self;
1495 	struct drm_device *dev = sc->sc_drm;
1496 
1497 	switch (act) {
1498 	case DVACT_QUIESCE:
1499 		drm_quiesce(dev);
1500 		break;
1501 	case DVACT_WAKEUP:
1502 		drm_wakeup(dev);
1503 		break;
1504 	}
1505 
1506 	return (0);
1507 }
1508 
1509 struct cfattach drm_ca = {
1510 	sizeof(struct drm_softc), drm_probe, drm_attach,
1511 	drm_detach, drm_activate
1512 };
1513 
1514 struct cfdriver drm_cd = {
1515 	0, "drm", DV_DULL
1516 };
1517 
1518 const struct pci_device_id *
1519 drm_find_description(int vendor, int device, const struct pci_device_id *idlist)
1520 {
1521 	int i = 0;
1522 
1523 	for (i = 0; idlist[i].vendor != 0; i++) {
1524 		if ((idlist[i].vendor == vendor) &&
1525 		    (idlist[i].device == device) &&
1526 		    (idlist[i].subvendor == PCI_ANY_ID) &&
1527 		    (idlist[i].subdevice == PCI_ANY_ID))
1528 			return &idlist[i];
1529 	}
1530 	return NULL;
1531 }
1532 
1533 int
1534 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
1535 {
1536 	return (f1->fminor < f2->fminor ? -1 : f1->fminor > f2->fminor);
1537 }
1538 
1539 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
1540 
1541 struct drm_file *
1542 drm_find_file_by_minor(struct drm_device *dev, int minor)
1543 {
1544 	struct drm_file	key;
1545 
1546 	key.fminor = minor;
1547 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
1548 }
1549 
1550 struct drm_device *
1551 drm_get_device_from_kdev(dev_t kdev)
1552 {
1553 	int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
1554 	/* control */
1555 	if (unit >= 64 && unit < 128)
1556 		unit -= 64;
1557 	/* render */
1558 	if (unit >= 128)
1559 		unit -= 128;
1560 	struct drm_softc *sc;
1561 
1562 	if (unit < drm_cd.cd_ndevs) {
1563 		sc = (struct drm_softc *)drm_cd.cd_devs[unit];
1564 		if (sc)
1565 			return sc->sc_drm;
1566 	}
1567 
1568 	return NULL;
1569 }
1570 
1571 void
1572 filt_drmdetach(struct knote *kn)
1573 {
1574 	struct drm_device *dev = kn->kn_hook;
1575 	int s;
1576 
1577 	s = spltty();
1578 	klist_remove_locked(&dev->note, kn);
1579 	splx(s);
1580 }
1581 
1582 int
1583 filt_drmkms(struct knote *kn, long hint)
1584 {
1585 	if (kn->kn_sfflags & hint)
1586 		kn->kn_fflags |= hint;
1587 	return (kn->kn_fflags != 0);
1588 }
1589 
1590 void
1591 filt_drmreaddetach(struct knote *kn)
1592 {
1593 	struct drm_file		*file_priv = kn->kn_hook;
1594 	int s;
1595 
1596 	s = spltty();
1597 	klist_remove_locked(&file_priv->rsel.si_note, kn);
1598 	splx(s);
1599 }
1600 
1601 int
1602 filt_drmread(struct knote *kn, long hint)
1603 {
1604 	struct drm_file		*file_priv = kn->kn_hook;
1605 	int			 val = 0;
1606 
1607 	if ((hint & NOTE_SUBMIT) == 0)
1608 		mtx_enter(&file_priv->minor->dev->event_lock);
1609 	val = !list_empty(&file_priv->event_list);
1610 	if ((hint & NOTE_SUBMIT) == 0)
1611 		mtx_leave(&file_priv->minor->dev->event_lock);
1612 	return (val);
1613 }
1614 
1615 const struct filterops drm_filtops = {
1616 	.f_flags	= FILTEROP_ISFD,
1617 	.f_attach	= NULL,
1618 	.f_detach	= filt_drmdetach,
1619 	.f_event	= filt_drmkms,
1620 };
1621 
1622 const struct filterops drmread_filtops = {
1623 	.f_flags	= FILTEROP_ISFD,
1624 	.f_attach	= NULL,
1625 	.f_detach	= filt_drmreaddetach,
1626 	.f_event	= filt_drmread,
1627 };
1628 
1629 int
1630 drmkqfilter(dev_t kdev, struct knote *kn)
1631 {
1632 	struct drm_device	*dev = NULL;
1633 	struct drm_file		*file_priv = NULL;
1634 	int			 s;
1635 
1636 	dev = drm_get_device_from_kdev(kdev);
1637 	if (dev == NULL || dev->dev_private == NULL)
1638 		return (ENXIO);
1639 
1640 	switch (kn->kn_filter) {
1641 	case EVFILT_READ:
1642 		mutex_lock(&dev->struct_mutex);
1643 		file_priv = drm_find_file_by_minor(dev, minor(kdev));
1644 		mutex_unlock(&dev->struct_mutex);
1645 		if (file_priv == NULL)
1646 			return (ENXIO);
1647 
1648 		kn->kn_fop = &drmread_filtops;
1649 		kn->kn_hook = file_priv;
1650 
1651 		s = spltty();
1652 		klist_insert_locked(&file_priv->rsel.si_note, kn);
1653 		splx(s);
1654 		break;
1655 	case EVFILT_DEVICE:
1656 		kn->kn_fop = &drm_filtops;
1657 		kn->kn_hook = dev;
1658 
1659 		s = spltty();
1660 		klist_insert_locked(&dev->note, kn);
1661 		splx(s);
1662 		break;
1663 	default:
1664 		return (EINVAL);
1665 	}
1666 
1667 	return (0);
1668 }
1669 
1670 int
1671 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
1672 {
1673 	struct drm_device	*dev = NULL;
1674 	struct drm_file		*file_priv;
1675 	struct drm_minor	*dm;
1676 	int			 ret = 0;
1677 	int			 dminor, realminor, minor_type;
1678 	int need_setup = 0;
1679 
1680 	dev = drm_get_device_from_kdev(kdev);
1681 	if (dev == NULL || dev->dev_private == NULL)
1682 		return (ENXIO);
1683 
1684 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1685 
1686 	if (flags & O_EXCL)
1687 		return (EBUSY); /* No exclusive opens */
1688 
1689 	if (drm_dev_needs_global_mutex(dev))
1690 		mutex_lock(&drm_global_mutex);
1691 
1692 	if (!atomic_fetch_inc(&dev->open_count))
1693 		need_setup = 1;
1694 
1695 	dminor = minor(kdev);
1696 	realminor =  dminor & ((1 << CLONE_SHIFT) - 1);
1697 	if (realminor < 64)
1698 		minor_type = DRM_MINOR_PRIMARY;
1699 	else if (realminor >= 64 && realminor < 128)
1700 		minor_type = DRM_MINOR_CONTROL;
1701 	else
1702 		minor_type = DRM_MINOR_RENDER;
1703 
1704 	dm = *drm_minor_get_slot(dev, minor_type);
1705 	dm->index = minor(kdev);
1706 
1707 	file_priv = drm_file_alloc(dm);
1708 	if (IS_ERR(file_priv)) {
1709 		ret = ENOMEM;
1710 		goto err;
1711 	}
1712 
1713 	/* first opener automatically becomes master */
1714 	if (drm_is_primary_client(file_priv)) {
1715 		ret = drm_master_open(file_priv);
1716 		if (ret != 0)
1717 			goto out_file_free;
1718 	}
1719 
1720 	file_priv->filp = (void *)file_priv;
1721 	file_priv->fminor = minor(kdev);
1722 
1723 	mutex_lock(&dev->filelist_mutex);
1724 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
1725 	mutex_unlock(&dev->filelist_mutex);
1726 
1727 	if (need_setup) {
1728 		ret = drm_legacy_setup(dev);
1729 		if (ret)
1730 			goto out_file_free;
1731 	}
1732 
1733 	if (drm_dev_needs_global_mutex(dev))
1734 		mutex_unlock(&drm_global_mutex);
1735 
1736 	return 0;
1737 
1738 out_file_free:
1739 	drm_file_free(file_priv);
1740 err:
1741 	atomic_dec(&dev->open_count);
1742 	if (drm_dev_needs_global_mutex(dev))
1743 		mutex_unlock(&drm_global_mutex);
1744 	return (ret);
1745 }
1746 
1747 int
1748 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
1749 {
1750 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
1751 	struct drm_file			*file_priv;
1752 	int				 retcode = 0;
1753 
1754 	if (dev == NULL)
1755 		return (ENXIO);
1756 
1757 	if (drm_dev_needs_global_mutex(dev))
1758 		mutex_lock(&drm_global_mutex);
1759 
1760 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1761 
1762 	mutex_lock(&dev->filelist_mutex);
1763 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1764 	if (file_priv == NULL) {
1765 		DRM_ERROR("can't find authenticator\n");
1766 		retcode = EINVAL;
1767 		mutex_unlock(&dev->filelist_mutex);
1768 		goto done;
1769 	}
1770 
1771 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
1772 	mutex_unlock(&dev->filelist_mutex);
1773 	drm_file_free(file_priv);
1774 done:
1775 	if (atomic_dec_and_test(&dev->open_count))
1776 		drm_lastclose(dev);
1777 
1778 	if (drm_dev_needs_global_mutex(dev))
1779 		mutex_unlock(&drm_global_mutex);
1780 
1781 	return (retcode);
1782 }
1783 
1784 int
1785 drmread(dev_t kdev, struct uio *uio, int ioflag)
1786 {
1787 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
1788 	struct drm_file			*file_priv;
1789 	struct drm_pending_event	*ev;
1790 	int		 		 error = 0;
1791 
1792 	if (dev == NULL)
1793 		return (ENXIO);
1794 
1795 	mutex_lock(&dev->filelist_mutex);
1796 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1797 	mutex_unlock(&dev->filelist_mutex);
1798 	if (file_priv == NULL)
1799 		return (ENXIO);
1800 
1801 	/*
1802 	 * The semantics are a little weird here. We will wait until we
1803 	 * have events to process, but as soon as we have events we will
1804 	 * only deliver as many as we have.
1805 	 * Note that events are atomic, if the read buffer will not fit in
1806 	 * a whole event, we won't read any of it out.
1807 	 */
1808 	mtx_enter(&dev->event_lock);
1809 	while (error == 0 && list_empty(&file_priv->event_list)) {
1810 		if (ioflag & IO_NDELAY) {
1811 			mtx_leave(&dev->event_lock);
1812 			return (EAGAIN);
1813 		}
1814 		error = msleep_nsec(&file_priv->event_wait, &dev->event_lock,
1815 		    PWAIT | PCATCH, "drmread", INFSLP);
1816 	}
1817 	if (error) {
1818 		mtx_leave(&dev->event_lock);
1819 		return (error);
1820 	}
1821 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
1822 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1823 		/* XXX we always destroy the event on error. */
1824 		error = uiomove(ev->event, ev->event->length, uio);
1825 		kfree(ev);
1826 		if (error)
1827 			break;
1828 		mtx_enter(&dev->event_lock);
1829 	}
1830 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1831 
1832 	return (error);
1833 }
1834 
1835 /*
1836  * Deqeue an event from the file priv in question. returning 1 if an
1837  * event was found. We take the resid from the read as a parameter because
1838  * we will only dequeue and event if the read buffer has space to fit the
1839  * entire thing.
1840  *
1841  * We are called locked, but we will *unlock* the queue on return so that
1842  * we may sleep to copyout the event.
1843  */
1844 int
1845 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
1846     size_t resid, struct drm_pending_event **out)
1847 {
1848 	struct drm_pending_event *e = NULL;
1849 	int gotone = 0;
1850 
1851 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
1852 
1853 	*out = NULL;
1854 	if (list_empty(&file_priv->event_list))
1855 		goto out;
1856 	e = list_first_entry(&file_priv->event_list,
1857 			     struct drm_pending_event, link);
1858 	if (e->event->length > resid)
1859 		goto out;
1860 
1861 	file_priv->event_space += e->event->length;
1862 	list_del(&e->link);
1863 	*out = e;
1864 	gotone = 1;
1865 
1866 out:
1867 	mtx_leave(&dev->event_lock);
1868 
1869 	return (gotone);
1870 }
1871 
1872 int
1873 drmpoll(dev_t kdev, int events, struct proc *p)
1874 {
1875 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
1876 	struct drm_file		*file_priv;
1877 	int		 	 revents = 0;
1878 
1879 	if (dev == NULL)
1880 		return (POLLERR);
1881 
1882 	mutex_lock(&dev->filelist_mutex);
1883 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1884 	mutex_unlock(&dev->filelist_mutex);
1885 	if (file_priv == NULL)
1886 		return (POLLERR);
1887 
1888 	mtx_enter(&dev->event_lock);
1889 	if (events & (POLLIN | POLLRDNORM)) {
1890 		if (!list_empty(&file_priv->event_list))
1891 			revents |=  events & (POLLIN | POLLRDNORM);
1892 		else
1893 			selrecord(p, &file_priv->rsel);
1894 	}
1895 	mtx_leave(&dev->event_lock);
1896 
1897 	return (revents);
1898 }
1899 
1900 paddr_t
1901 drmmmap(dev_t kdev, off_t offset, int prot)
1902 {
1903 	return -1;
1904 }
1905 
1906 struct drm_dmamem *
1907 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1908     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1909 {
1910 	struct drm_dmamem	*mem;
1911 	size_t			 strsize;
1912 	/*
1913 	 * segs is the last member of the struct since we modify the size
1914 	 * to allow extra segments if more than one are allowed.
1915 	 */
1916 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1917 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
1918 	if (mem == NULL)
1919 		return (NULL);
1920 
1921 	mem->size = size;
1922 
1923 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
1924 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
1925 		goto strfree;
1926 
1927 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
1928 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1929 		goto destroy;
1930 
1931 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
1932 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
1933 		goto free;
1934 
1935 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
1936 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
1937 		goto unmap;
1938 
1939 	return (mem);
1940 
1941 unmap:
1942 	bus_dmamem_unmap(dmat, mem->kva, size);
1943 free:
1944 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1945 destroy:
1946 	bus_dmamap_destroy(dmat, mem->map);
1947 strfree:
1948 	free(mem, M_DRM, 0);
1949 
1950 	return (NULL);
1951 }
1952 
1953 void
1954 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
1955 {
1956 	if (mem == NULL)
1957 		return;
1958 
1959 	bus_dmamap_unload(dmat, mem->map);
1960 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
1961 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1962 	bus_dmamap_destroy(dmat, mem->map);
1963 	free(mem, M_DRM, 0);
1964 }
1965 
1966 struct drm_dma_handle *
1967 drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
1968 {
1969 	struct drm_dma_handle *dmah;
1970 
1971 	dmah = malloc(sizeof(*dmah), M_DRM, M_WAITOK);
1972 	dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size,
1973 	    BUS_DMA_NOCACHE, 0);
1974 	if (dmah->mem == NULL) {
1975 		free(dmah, M_DRM, sizeof(*dmah));
1976 		return NULL;
1977 	}
1978 	dmah->busaddr = dmah->mem->segs[0].ds_addr;
1979 	dmah->size = dmah->mem->size;
1980 	dmah->vaddr = dmah->mem->kva;
1981 	return (dmah);
1982 }
1983 
1984 void
1985 drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
1986 {
1987 	if (dmah == NULL)
1988 		return;
1989 
1990 	drm_dmamem_free(dev->dmat, dmah->mem);
1991 	free(dmah, M_DRM, sizeof(*dmah));
1992 }
1993 
1994 /*
1995  * Compute order.  Can be made faster.
1996  */
1997 int
1998 drm_order(unsigned long size)
1999 {
2000 	int order;
2001 	unsigned long tmp;
2002 
2003 	for (order = 0, tmp = size; tmp >>= 1; ++order)
2004 		;
2005 
2006 	if (size & ~(1 << order))
2007 		++order;
2008 
2009 	return order;
2010 }
2011 
2012 int
2013 drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv)
2014 {
2015 	struct drm_pciinfo *info = data;
2016 
2017 	if (dev->pdev == NULL)
2018 		return -ENOTTY;
2019 
2020 	info->domain = dev->pdev->bus->domain_nr;
2021 	info->bus = dev->pdev->bus->number;
2022 	info->dev = PCI_SLOT(dev->pdev->devfn);
2023 	info->func = PCI_FUNC(dev->pdev->devfn);
2024 	info->vendor_id = dev->pdev->vendor;
2025 	info->device_id = dev->pdev->device;
2026 	info->subvendor_id = dev->pdev->subsystem_vendor;
2027 	info->subdevice_id = dev->pdev->subsystem_device;
2028 	info->revision_id = 0;
2029 
2030 	return 0;
2031 }
2032