xref: /openbsd-src/sys/dev/pci/drm/drm_drv.c (revision 25c4e8bd056e974b28f4a0ffd39d76c190a56013)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/fcntl.h>
31 #include <sys/specdev.h>
32 #include <sys/vnode.h>
33 
34 #include <machine/bus.h>
35 
36 #ifdef __HAVE_ACPI
37 #include <dev/acpi/acpidev.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/dsdt.h>
40 #endif
41 
42 #include <linux/debugfs.h>
43 #include <linux/fs.h>
44 #include <linux/module.h>
45 #include <linux/moduleparam.h>
46 #include <linux/mount.h>
47 #include <linux/pseudo_fs.h>
48 #include <linux/slab.h>
49 #include <linux/srcu.h>
50 
51 #include <drm/drm_cache.h>
52 #include <drm/drm_client.h>
53 #include <drm/drm_color_mgmt.h>
54 #include <drm/drm_drv.h>
55 #include <drm/drm_file.h>
56 #include <drm/drm_managed.h>
57 #include <drm/drm_mode_object.h>
58 #include <drm/drm_print.h>
59 
60 #include <drm/drm_gem.h>
61 
62 #include "drm_crtc_internal.h"
63 #include "drm_internal.h"
64 #include "drm_legacy.h"
65 
66 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
67 MODULE_DESCRIPTION("DRM shared core routines");
68 MODULE_LICENSE("GPL and additional rights");
69 
70 static DEFINE_SPINLOCK(drm_minor_lock);
71 static struct idr drm_minors_idr;
72 
73 /*
74  * If the drm core fails to init for whatever reason,
75  * we should prevent any drivers from registering with it.
76  * It's best to check this at drm_dev_init(), as some drivers
77  * prefer to embed struct drm_device into their own device
78  * structure and call drm_dev_init() themselves.
79  */
80 static bool drm_core_init_complete;
81 
82 static struct dentry *drm_debugfs_root;
83 
84 #ifdef notyet
85 DEFINE_STATIC_SRCU(drm_unplug_srcu);
86 #endif
87 
88 /*
89  * Some functions are only called once on init regardless of how many times
90  * drm attaches.  In linux this is handled via module_init()/module_exit()
91  */
92 int drm_refcnt;
93 
94 struct drm_softc {
95 	struct device		sc_dev;
96 	struct drm_device 	*sc_drm;
97 	int			sc_allocated;
98 };
99 
100 struct drm_attach_args {
101 	struct drm_device		*drm;
102 	const struct drm_driver		*driver;
103 	char				*busid;
104 	bus_dma_tag_t			 dmat;
105 	bus_space_tag_t			 bst;
106 	size_t				 busid_len;
107 	int				 is_agp;
108 	struct pci_attach_args		*pa;
109 	int				 primary;
110 };
111 
112 void	drm_linux_init(void);
113 void	drm_linux_exit(void);
114 int	drm_linux_acpi_notify(struct aml_node *, int, void *);
115 
116 int	drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
117 	    struct drm_pending_event **);
118 
119 int	drmprint(void *, const char *);
120 int	drmsubmatch(struct device *, void *, void *);
121 const struct pci_device_id *
122 	drm_find_description(int, int, const struct pci_device_id *);
123 
124 int	drm_file_cmp(struct drm_file *, struct drm_file *);
125 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
126 
127 #define DRMDEVCF_PRIMARY	0
128 #define drmdevcf_primary	cf_loc[DRMDEVCF_PRIMARY]	/* spec'd as primary? */
129 #define DRMDEVCF_PRIMARY_UNK	-1
130 
131 /*
132  * DRM Minors
133  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
134  * of them is represented by a drm_minor object. Depending on the capabilities
135  * of the device-driver, different interfaces are registered.
136  *
137  * Minors can be accessed via dev->$minor_name. This pointer is either
138  * NULL or a valid drm_minor pointer and stays valid as long as the device is
139  * valid. This means, DRM minors have the same life-time as the underlying
140  * device. However, this doesn't mean that the minor is active. Minors are
141  * registered and unregistered dynamically according to device-state.
142  */
143 
144 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
145 					     unsigned int type)
146 {
147 	switch (type) {
148 	case DRM_MINOR_PRIMARY:
149 		return &dev->primary;
150 	case DRM_MINOR_RENDER:
151 		return &dev->render;
152 	default:
153 		BUG();
154 	}
155 }
156 
157 static void drm_minor_alloc_release(struct drm_device *dev, void *data)
158 {
159 	struct drm_minor *minor = data;
160 	unsigned long flags;
161 
162 	WARN_ON(dev != minor->dev);
163 
164 #ifdef __linux__
165 	put_device(minor->kdev);
166 #endif
167 
168 	spin_lock_irqsave(&drm_minor_lock, flags);
169 	idr_remove(&drm_minors_idr, minor->index);
170 	spin_unlock_irqrestore(&drm_minor_lock, flags);
171 }
172 
173 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
174 {
175 	struct drm_minor *minor;
176 	unsigned long flags;
177 	int r;
178 
179 	minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
180 	if (!minor)
181 		return -ENOMEM;
182 
183 	minor->type = type;
184 	minor->dev = dev;
185 
186 	idr_preload(GFP_KERNEL);
187 	spin_lock_irqsave(&drm_minor_lock, flags);
188 	r = idr_alloc(&drm_minors_idr,
189 		      NULL,
190 		      64 * type,
191 		      64 * (type + 1),
192 		      GFP_NOWAIT);
193 	spin_unlock_irqrestore(&drm_minor_lock, flags);
194 	idr_preload_end();
195 
196 	if (r < 0)
197 		return r;
198 
199 	minor->index = r;
200 
201 	r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
202 	if (r)
203 		return r;
204 
205 #ifdef __linux__
206 	minor->kdev = drm_sysfs_minor_alloc(minor);
207 	if (IS_ERR(minor->kdev))
208 		return PTR_ERR(minor->kdev);
209 #endif
210 
211 	*drm_minor_get_slot(dev, type) = minor;
212 	return 0;
213 }
214 
215 static int drm_minor_register(struct drm_device *dev, unsigned int type)
216 {
217 	struct drm_minor *minor;
218 	unsigned long flags;
219 #ifdef __linux__
220 	int ret;
221 #endif
222 
223 	DRM_DEBUG("\n");
224 
225 	minor = *drm_minor_get_slot(dev, type);
226 	if (!minor)
227 		return 0;
228 
229 #ifdef __linux__
230 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
231 	if (ret) {
232 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
233 		goto err_debugfs;
234 	}
235 
236 	ret = device_add(minor->kdev);
237 	if (ret)
238 		goto err_debugfs;
239 #else
240 	drm_debugfs_root = NULL;
241 #endif
242 
243 	/* replace NULL with @minor so lookups will succeed from now on */
244 	spin_lock_irqsave(&drm_minor_lock, flags);
245 	idr_replace(&drm_minors_idr, minor, minor->index);
246 	spin_unlock_irqrestore(&drm_minor_lock, flags);
247 
248 	DRM_DEBUG("new minor registered %d\n", minor->index);
249 	return 0;
250 
251 #ifdef __linux__
252 err_debugfs:
253 	drm_debugfs_cleanup(minor);
254 	return ret;
255 #endif
256 }
257 
258 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
259 {
260 	struct drm_minor *minor;
261 	unsigned long flags;
262 
263 	minor = *drm_minor_get_slot(dev, type);
264 #ifdef __linux__
265 	if (!minor || !device_is_registered(minor->kdev))
266 #else
267 	if (!minor)
268 #endif
269 		return;
270 
271 	/* replace @minor with NULL so lookups will fail from now on */
272 	spin_lock_irqsave(&drm_minor_lock, flags);
273 	idr_replace(&drm_minors_idr, NULL, minor->index);
274 	spin_unlock_irqrestore(&drm_minor_lock, flags);
275 
276 #ifdef __linux__
277 	device_del(minor->kdev);
278 #endif
279 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
280 	drm_debugfs_cleanup(minor);
281 }
282 
283 /*
284  * Looks up the given minor-ID and returns the respective DRM-minor object. The
285  * refence-count of the underlying device is increased so you must release this
286  * object with drm_minor_release().
287  *
288  * As long as you hold this minor, it is guaranteed that the object and the
289  * minor->dev pointer will stay valid! However, the device may get unplugged and
290  * unregistered while you hold the minor.
291  */
292 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
293 {
294 	struct drm_minor *minor;
295 	unsigned long flags;
296 
297 	spin_lock_irqsave(&drm_minor_lock, flags);
298 	minor = idr_find(&drm_minors_idr, minor_id);
299 	if (minor)
300 		drm_dev_get(minor->dev);
301 	spin_unlock_irqrestore(&drm_minor_lock, flags);
302 
303 	if (!minor) {
304 		return ERR_PTR(-ENODEV);
305 	} else if (drm_dev_is_unplugged(minor->dev)) {
306 		drm_dev_put(minor->dev);
307 		return ERR_PTR(-ENODEV);
308 	}
309 
310 	return minor;
311 }
312 
313 void drm_minor_release(struct drm_minor *minor)
314 {
315 	drm_dev_put(minor->dev);
316 }
317 
318 /**
319  * DOC: driver instance overview
320  *
321  * A device instance for a drm driver is represented by &struct drm_device. This
322  * is allocated and initialized with devm_drm_dev_alloc(), usually from
323  * bus-specific ->probe() callbacks implemented by the driver. The driver then
324  * needs to initialize all the various subsystems for the drm device like memory
325  * management, vblank handling, modesetting support and initial output
326  * configuration plus obviously initialize all the corresponding hardware bits.
327  * Finally when everything is up and running and ready for userspace the device
328  * instance can be published using drm_dev_register().
329  *
330  * There is also deprecated support for initializing device instances using
331  * bus-specific helpers and the &drm_driver.load callback. But due to
332  * backwards-compatibility needs the device instance have to be published too
333  * early, which requires unpretty global locking to make safe and is therefore
334  * only support for existing drivers not yet converted to the new scheme.
335  *
336  * When cleaning up a device instance everything needs to be done in reverse:
337  * First unpublish the device instance with drm_dev_unregister(). Then clean up
338  * any other resources allocated at device initialization and drop the driver's
339  * reference to &drm_device using drm_dev_put().
340  *
341  * Note that any allocation or resource which is visible to userspace must be
342  * released only when the final drm_dev_put() is called, and not when the
343  * driver is unbound from the underlying physical struct &device. Best to use
344  * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
345  * related functions.
346  *
347  * devres managed resources like devm_kmalloc() can only be used for resources
348  * directly related to the underlying hardware device, and only used in code
349  * paths fully protected by drm_dev_enter() and drm_dev_exit().
350  *
351  * Display driver example
352  * ~~~~~~~~~~~~~~~~~~~~~~
353  *
354  * The following example shows a typical structure of a DRM display driver.
355  * The example focus on the probe() function and the other functions that is
356  * almost always present and serves as a demonstration of devm_drm_dev_alloc().
357  *
358  * .. code-block:: c
359  *
360  *	struct driver_device {
361  *		struct drm_device drm;
362  *		void *userspace_facing;
363  *		struct clk *pclk;
364  *	};
365  *
366  *	static const struct drm_driver driver_drm_driver = {
367  *		[...]
368  *	};
369  *
370  *	static int driver_probe(struct platform_device *pdev)
371  *	{
372  *		struct driver_device *priv;
373  *		struct drm_device *drm;
374  *		int ret;
375  *
376  *		priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
377  *					  struct driver_device, drm);
378  *		if (IS_ERR(priv))
379  *			return PTR_ERR(priv);
380  *		drm = &priv->drm;
381  *
382  *		ret = drmm_mode_config_init(drm);
383  *		if (ret)
384  *			return ret;
385  *
386  *		priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
387  *		if (!priv->userspace_facing)
388  *			return -ENOMEM;
389  *
390  *		priv->pclk = devm_clk_get(dev, "PCLK");
391  *		if (IS_ERR(priv->pclk))
392  *			return PTR_ERR(priv->pclk);
393  *
394  *		// Further setup, display pipeline etc
395  *
396  *		platform_set_drvdata(pdev, drm);
397  *
398  *		drm_mode_config_reset(drm);
399  *
400  *		ret = drm_dev_register(drm);
401  *		if (ret)
402  *			return ret;
403  *
404  *		drm_fbdev_generic_setup(drm, 32);
405  *
406  *		return 0;
407  *	}
408  *
409  *	// This function is called before the devm_ resources are released
410  *	static int driver_remove(struct platform_device *pdev)
411  *	{
412  *		struct drm_device *drm = platform_get_drvdata(pdev);
413  *
414  *		drm_dev_unregister(drm);
415  *		drm_atomic_helper_shutdown(drm)
416  *
417  *		return 0;
418  *	}
419  *
420  *	// This function is called on kernel restart and shutdown
421  *	static void driver_shutdown(struct platform_device *pdev)
422  *	{
423  *		drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
424  *	}
425  *
426  *	static int __maybe_unused driver_pm_suspend(struct device *dev)
427  *	{
428  *		return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
429  *	}
430  *
431  *	static int __maybe_unused driver_pm_resume(struct device *dev)
432  *	{
433  *		drm_mode_config_helper_resume(dev_get_drvdata(dev));
434  *
435  *		return 0;
436  *	}
437  *
438  *	static const struct dev_pm_ops driver_pm_ops = {
439  *		SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
440  *	};
441  *
442  *	static struct platform_driver driver_driver = {
443  *		.driver = {
444  *			[...]
445  *			.pm = &driver_pm_ops,
446  *		},
447  *		.probe = driver_probe,
448  *		.remove = driver_remove,
449  *		.shutdown = driver_shutdown,
450  *	};
451  *	module_platform_driver(driver_driver);
452  *
453  * Drivers that want to support device unplugging (USB, DT overlay unload) should
454  * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
455  * regions that is accessing device resources to prevent use after they're
456  * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
457  * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
458  * drm_atomic_helper_shutdown() is called. This means that if the disable code
459  * paths are protected, they will not run on regular driver module unload,
460  * possibly leaving the hardware enabled.
461  */
462 
463 /**
464  * drm_put_dev - Unregister and release a DRM device
465  * @dev: DRM device
466  *
467  * Called at module unload time or when a PCI device is unplugged.
468  *
469  * Cleans up all DRM device, calling drm_lastclose().
470  *
471  * Note: Use of this function is deprecated. It will eventually go away
472  * completely.  Please use drm_dev_unregister() and drm_dev_put() explicitly
473  * instead to make sure that the device isn't userspace accessible any more
474  * while teardown is in progress, ensuring that userspace can't access an
475  * inconsistent state.
476  */
477 void drm_put_dev(struct drm_device *dev)
478 {
479 	DRM_DEBUG("\n");
480 
481 	if (!dev) {
482 		DRM_ERROR("cleanup called no dev\n");
483 		return;
484 	}
485 
486 	drm_dev_unregister(dev);
487 	drm_dev_put(dev);
488 }
489 EXPORT_SYMBOL(drm_put_dev);
490 
491 /**
492  * drm_dev_enter - Enter device critical section
493  * @dev: DRM device
494  * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
495  *
496  * This function marks and protects the beginning of a section that should not
497  * be entered after the device has been unplugged. The section end is marked
498  * with drm_dev_exit(). Calls to this function can be nested.
499  *
500  * Returns:
501  * True if it is OK to enter the section, false otherwise.
502  */
503 bool drm_dev_enter(struct drm_device *dev, int *idx)
504 {
505 #ifdef notyet
506 	*idx = srcu_read_lock(&drm_unplug_srcu);
507 
508 	if (dev->unplugged) {
509 		srcu_read_unlock(&drm_unplug_srcu, *idx);
510 		return false;
511 	}
512 #endif
513 
514 	return true;
515 }
516 EXPORT_SYMBOL(drm_dev_enter);
517 
518 /**
519  * drm_dev_exit - Exit device critical section
520  * @idx: index returned from drm_dev_enter()
521  *
522  * This function marks the end of a section that should not be entered after
523  * the device has been unplugged.
524  */
525 void drm_dev_exit(int idx)
526 {
527 #ifdef notyet
528 	srcu_read_unlock(&drm_unplug_srcu, idx);
529 #endif
530 }
531 EXPORT_SYMBOL(drm_dev_exit);
532 
533 /**
534  * drm_dev_unplug - unplug a DRM device
535  * @dev: DRM device
536  *
537  * This unplugs a hotpluggable DRM device, which makes it inaccessible to
538  * userspace operations. Entry-points can use drm_dev_enter() and
539  * drm_dev_exit() to protect device resources in a race free manner. This
540  * essentially unregisters the device like drm_dev_unregister(), but can be
541  * called while there are still open users of @dev.
542  */
543 void drm_dev_unplug(struct drm_device *dev)
544 {
545 	STUB();
546 #ifdef notyet
547 	/*
548 	 * After synchronizing any critical read section is guaranteed to see
549 	 * the new value of ->unplugged, and any critical section which might
550 	 * still have seen the old value of ->unplugged is guaranteed to have
551 	 * finished.
552 	 */
553 	dev->unplugged = true;
554 	synchronize_srcu(&drm_unplug_srcu);
555 
556 	drm_dev_unregister(dev);
557 
558 	/* Clear all CPU mappings pointing to this device */
559 	unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);
560 #endif
561 }
562 EXPORT_SYMBOL(drm_dev_unplug);
563 
564 #ifdef __linux__
565 /*
566  * DRM internal mount
567  * We want to be able to allocate our own "struct address_space" to control
568  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
569  * stand-alone address_space objects, so we need an underlying inode. As there
570  * is no way to allocate an independent inode easily, we need a fake internal
571  * VFS mount-point.
572  *
573  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
574  * frees it again. You are allowed to use iget() and iput() to get references to
575  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
576  * drm_fs_inode_free() call (which does not have to be the last iput()).
577  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
578  * between multiple inode-users. You could, technically, call
579  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
580  * iput(), but this way you'd end up with a new vfsmount for each inode.
581  */
582 
583 static int drm_fs_cnt;
584 static struct vfsmount *drm_fs_mnt;
585 
586 static int drm_fs_init_fs_context(struct fs_context *fc)
587 {
588 	return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM;
589 }
590 
591 static struct file_system_type drm_fs_type = {
592 	.name		= "drm",
593 	.owner		= THIS_MODULE,
594 	.init_fs_context = drm_fs_init_fs_context,
595 	.kill_sb	= kill_anon_super,
596 };
597 
598 static struct inode *drm_fs_inode_new(void)
599 {
600 	struct inode *inode;
601 	int r;
602 
603 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
604 	if (r < 0) {
605 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
606 		return ERR_PTR(r);
607 	}
608 
609 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
610 	if (IS_ERR(inode))
611 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
612 
613 	return inode;
614 }
615 
616 static void drm_fs_inode_free(struct inode *inode)
617 {
618 	if (inode) {
619 		iput(inode);
620 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
621 	}
622 }
623 
624 #endif /* __linux__ */
625 
626 /**
627  * DOC: component helper usage recommendations
628  *
629  * DRM drivers that drive hardware where a logical device consists of a pile of
630  * independent hardware blocks are recommended to use the :ref:`component helper
631  * library<component>`. For consistency and better options for code reuse the
632  * following guidelines apply:
633  *
634  *  - The entire device initialization procedure should be run from the
635  *    &component_master_ops.master_bind callback, starting with
636  *    devm_drm_dev_alloc(), then binding all components with
637  *    component_bind_all() and finishing with drm_dev_register().
638  *
639  *  - The opaque pointer passed to all components through component_bind_all()
640  *    should point at &struct drm_device of the device instance, not some driver
641  *    specific private structure.
642  *
643  *  - The component helper fills the niche where further standardization of
644  *    interfaces is not practical. When there already is, or will be, a
645  *    standardized interface like &drm_bridge or &drm_panel, providing its own
646  *    functions to find such components at driver load time, like
647  *    drm_of_find_panel_or_bridge(), then the component helper should not be
648  *    used.
649  */
650 
651 static void drm_dev_init_release(struct drm_device *dev, void *res)
652 {
653 	drm_legacy_ctxbitmap_cleanup(dev);
654 	drm_legacy_remove_map_hash(dev);
655 #ifdef __linux__
656 	drm_fs_inode_free(dev->anon_inode);
657 
658 	put_device(dev->dev);
659 #endif
660 	/* Prevent use-after-free in drm_managed_release when debugging is
661 	 * enabled. Slightly awkward, but can't really be helped. */
662 	dev->dev = NULL;
663 	mutex_destroy(&dev->master_mutex);
664 	mutex_destroy(&dev->clientlist_mutex);
665 	mutex_destroy(&dev->filelist_mutex);
666 	mutex_destroy(&dev->struct_mutex);
667 	drm_legacy_destroy_members(dev);
668 }
669 
670 #ifdef notyet
671 
672 static int drm_dev_init(struct drm_device *dev,
673 			const struct drm_driver *driver,
674 			struct device *parent)
675 {
676 	struct inode *inode;
677 	int ret;
678 
679 	if (!drm_core_init_complete) {
680 		DRM_ERROR("DRM core is not initialized\n");
681 		return -ENODEV;
682 	}
683 
684 	if (WARN_ON(!parent))
685 		return -EINVAL;
686 
687 	kref_init(&dev->ref);
688 	dev->dev = get_device(parent);
689 	dev->driver = driver;
690 
691 	INIT_LIST_HEAD(&dev->managed.resources);
692 	spin_lock_init(&dev->managed.lock);
693 
694 	/* no per-device feature limits by default */
695 	dev->driver_features = ~0u;
696 
697 	drm_legacy_init_members(dev);
698 	INIT_LIST_HEAD(&dev->filelist);
699 	INIT_LIST_HEAD(&dev->filelist_internal);
700 	INIT_LIST_HEAD(&dev->clientlist);
701 	INIT_LIST_HEAD(&dev->vblank_event_list);
702 
703 	spin_lock_init(&dev->event_lock);
704 	mutex_init(&dev->struct_mutex);
705 	mutex_init(&dev->filelist_mutex);
706 	mutex_init(&dev->clientlist_mutex);
707 	mutex_init(&dev->master_mutex);
708 
709 	ret = drmm_add_action(dev, drm_dev_init_release, NULL);
710 	if (ret)
711 		return ret;
712 
713 	inode = drm_fs_inode_new();
714 	if (IS_ERR(inode)) {
715 		ret = PTR_ERR(inode);
716 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
717 		goto err;
718 	}
719 
720 	dev->anon_inode = inode;
721 
722 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
723 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
724 		if (ret)
725 			goto err;
726 	}
727 
728 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
729 	if (ret)
730 		goto err;
731 
732 	ret = drm_legacy_create_map_hash(dev);
733 	if (ret)
734 		goto err;
735 
736 	drm_legacy_ctxbitmap_init(dev);
737 
738 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
739 		ret = drm_gem_init(dev);
740 		if (ret) {
741 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
742 			goto err;
743 		}
744 	}
745 
746 	ret = drm_dev_set_unique(dev, dev_name(parent));
747 	if (ret)
748 		goto err;
749 
750 	return 0;
751 
752 err:
753 	drm_managed_release(dev);
754 
755 	return ret;
756 }
757 
758 static void devm_drm_dev_init_release(void *data)
759 {
760 	drm_dev_put(data);
761 }
762 
763 static int devm_drm_dev_init(struct device *parent,
764 			     struct drm_device *dev,
765 			     const struct drm_driver *driver)
766 {
767 	int ret;
768 
769 	ret = drm_dev_init(dev, driver, parent);
770 	if (ret)
771 		return ret;
772 
773 	return devm_add_action_or_reset(parent,
774 					devm_drm_dev_init_release, dev);
775 }
776 
777 void *__devm_drm_dev_alloc(struct device *parent,
778 			   const struct drm_driver *driver,
779 			   size_t size, size_t offset)
780 {
781 	void *container;
782 	struct drm_device *drm;
783 	int ret;
784 
785 	container = kzalloc(size, GFP_KERNEL);
786 	if (!container)
787 		return ERR_PTR(-ENOMEM);
788 
789 	drm = container + offset;
790 	ret = devm_drm_dev_init(parent, drm, driver);
791 	if (ret) {
792 		kfree(container);
793 		return ERR_PTR(ret);
794 	}
795 	drmm_add_final_kfree(drm, container);
796 
797 	return container;
798 }
799 EXPORT_SYMBOL(__devm_drm_dev_alloc);
800 
801 /**
802  * drm_dev_alloc - Allocate new DRM device
803  * @driver: DRM driver to allocate device for
804  * @parent: Parent device object
805  *
806  * This is the deprecated version of devm_drm_dev_alloc(), which does not support
807  * subclassing through embedding the struct &drm_device in a driver private
808  * structure, and which does not support automatic cleanup through devres.
809  *
810  * RETURNS:
811  * Pointer to new DRM device, or ERR_PTR on failure.
812  */
813 struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
814 				 struct device *parent)
815 {
816 	struct drm_device *dev;
817 	int ret;
818 
819 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
820 	if (!dev)
821 		return ERR_PTR(-ENOMEM);
822 
823 	ret = drm_dev_init(dev, driver, parent);
824 	if (ret) {
825 		kfree(dev);
826 		return ERR_PTR(ret);
827 	}
828 
829 	drmm_add_final_kfree(dev, dev);
830 
831 	return dev;
832 }
833 EXPORT_SYMBOL(drm_dev_alloc);
834 
835 #endif
836 
837 static void drm_dev_release(struct kref *ref)
838 {
839 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
840 
841 	if (dev->driver->release)
842 		dev->driver->release(dev);
843 
844 	drm_managed_release(dev);
845 
846 	kfree(dev->managed.final_kfree);
847 }
848 
849 /**
850  * drm_dev_get - Take reference of a DRM device
851  * @dev: device to take reference of or NULL
852  *
853  * This increases the ref-count of @dev by one. You *must* already own a
854  * reference when calling this. Use drm_dev_put() to drop this reference
855  * again.
856  *
857  * This function never fails. However, this function does not provide *any*
858  * guarantee whether the device is alive or running. It only provides a
859  * reference to the object and the memory associated with it.
860  */
861 void drm_dev_get(struct drm_device *dev)
862 {
863 	if (dev)
864 		kref_get(&dev->ref);
865 }
866 EXPORT_SYMBOL(drm_dev_get);
867 
868 /**
869  * drm_dev_put - Drop reference of a DRM device
870  * @dev: device to drop reference of or NULL
871  *
872  * This decreases the ref-count of @dev by one. The device is destroyed if the
873  * ref-count drops to zero.
874  */
875 void drm_dev_put(struct drm_device *dev)
876 {
877 	if (dev)
878 		kref_put(&dev->ref, drm_dev_release);
879 }
880 EXPORT_SYMBOL(drm_dev_put);
881 
882 static int create_compat_control_link(struct drm_device *dev)
883 {
884 	struct drm_minor *minor;
885 	char *name;
886 	int ret;
887 
888 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
889 		return 0;
890 
891 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
892 	if (!minor)
893 		return 0;
894 
895 	/*
896 	 * Some existing userspace out there uses the existing of the controlD*
897 	 * sysfs files to figure out whether it's a modeset driver. It only does
898 	 * readdir, hence a symlink is sufficient (and the least confusing
899 	 * option). Otherwise controlD* is entirely unused.
900 	 *
901 	 * Old controlD chardev have been allocated in the range
902 	 * 64-127.
903 	 */
904 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
905 	if (!name)
906 		return -ENOMEM;
907 
908 	ret = sysfs_create_link(minor->kdev->kobj.parent,
909 				&minor->kdev->kobj,
910 				name);
911 
912 	kfree(name);
913 
914 	return ret;
915 }
916 
917 static void remove_compat_control_link(struct drm_device *dev)
918 {
919 	struct drm_minor *minor;
920 	char *name;
921 
922 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
923 		return;
924 
925 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
926 	if (!minor)
927 		return;
928 
929 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
930 	if (!name)
931 		return;
932 
933 	sysfs_remove_link(minor->kdev->kobj.parent, name);
934 
935 	kfree(name);
936 }
937 
938 /**
939  * drm_dev_register - Register DRM device
940  * @dev: Device to register
941  * @flags: Flags passed to the driver's .load() function
942  *
943  * Register the DRM device @dev with the system, advertise device to user-space
944  * and start normal device operation. @dev must be initialized via drm_dev_init()
945  * previously.
946  *
947  * Never call this twice on any device!
948  *
949  * NOTE: To ensure backward compatibility with existing drivers method this
950  * function calls the &drm_driver.load method after registering the device
951  * nodes, creating race conditions. Usage of the &drm_driver.load methods is
952  * therefore deprecated, drivers must perform all initialization before calling
953  * drm_dev_register().
954  *
955  * RETURNS:
956  * 0 on success, negative error code on failure.
957  */
958 int drm_dev_register(struct drm_device *dev, unsigned long flags)
959 {
960 	const struct drm_driver *driver = dev->driver;
961 	int ret;
962 
963 	if (!driver->load)
964 		drm_mode_config_validate(dev);
965 
966 	WARN_ON(!dev->managed.final_kfree);
967 
968 	if (drm_dev_needs_global_mutex(dev))
969 		mutex_lock(&drm_global_mutex);
970 
971 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
972 	if (ret)
973 		goto err_minors;
974 
975 	ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
976 	if (ret)
977 		goto err_minors;
978 
979 	ret = create_compat_control_link(dev);
980 	if (ret)
981 		goto err_minors;
982 
983 	dev->registered = true;
984 
985 	if (dev->driver->load) {
986 		ret = dev->driver->load(dev, flags);
987 		if (ret)
988 			goto err_minors;
989 	}
990 
991 	if (drm_core_check_feature(dev, DRIVER_MODESET))
992 		drm_modeset_register_all(dev);
993 
994 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
995 		 driver->name, driver->major, driver->minor,
996 		 driver->patchlevel, driver->date,
997 		 dev->dev ? dev_name(dev->dev) : "virtual device",
998 		 dev->primary->index);
999 
1000 	goto out_unlock;
1001 
1002 err_minors:
1003 	remove_compat_control_link(dev);
1004 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1005 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
1006 out_unlock:
1007 	if (drm_dev_needs_global_mutex(dev))
1008 		mutex_unlock(&drm_global_mutex);
1009 	return ret;
1010 }
1011 EXPORT_SYMBOL(drm_dev_register);
1012 
1013 /**
1014  * drm_dev_unregister - Unregister DRM device
1015  * @dev: Device to unregister
1016  *
1017  * Unregister the DRM device from the system. This does the reverse of
1018  * drm_dev_register() but does not deallocate the device. The caller must call
1019  * drm_dev_put() to drop their final reference.
1020  *
1021  * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
1022  * which can be called while there are still open users of @dev.
1023  *
1024  * This should be called first in the device teardown code to make sure
1025  * userspace can't access the device instance any more.
1026  */
1027 void drm_dev_unregister(struct drm_device *dev)
1028 {
1029 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
1030 		drm_lastclose(dev);
1031 
1032 	dev->registered = false;
1033 
1034 	drm_client_dev_unregister(dev);
1035 
1036 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1037 		drm_modeset_unregister_all(dev);
1038 
1039 	if (dev->driver->unload)
1040 		dev->driver->unload(dev);
1041 
1042 	drm_legacy_pci_agp_destroy(dev);
1043 	drm_legacy_rmmaps(dev);
1044 
1045 	remove_compat_control_link(dev);
1046 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1047 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
1048 }
1049 EXPORT_SYMBOL(drm_dev_unregister);
1050 
1051 /**
1052  * drm_dev_set_unique - Set the unique name of a DRM device
1053  * @dev: device of which to set the unique name
1054  * @name: unique name
1055  *
1056  * Sets the unique name of a DRM device using the specified string. This is
1057  * already done by drm_dev_init(), drivers should only override the default
1058  * unique name for backwards compatibility reasons.
1059  *
1060  * Return: 0 on success or a negative error code on failure.
1061  */
1062 int drm_dev_set_unique(struct drm_device *dev, const char *name)
1063 {
1064 	drmm_kfree(dev, dev->unique);
1065 	dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL);
1066 
1067 	return dev->unique ? 0 : -ENOMEM;
1068 }
1069 EXPORT_SYMBOL(drm_dev_set_unique);
1070 
1071 /*
1072  * DRM Core
1073  * The DRM core module initializes all global DRM objects and makes them
1074  * available to drivers. Once setup, drivers can probe their respective
1075  * devices.
1076  * Currently, core management includes:
1077  *  - The "DRM-Global" key/value database
1078  *  - Global ID management for connectors
1079  *  - DRM major number allocation
1080  *  - DRM minor management
1081  *  - DRM sysfs class
1082  *  - DRM debugfs root
1083  *
1084  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
1085  * interface registered on a DRM device, you can request minor numbers from DRM
1086  * core. DRM core takes care of major-number management and char-dev
1087  * registration. A stub ->open() callback forwards any open() requests to the
1088  * registered minor.
1089  */
1090 
1091 #ifdef __linux__
1092 static int drm_stub_open(struct inode *inode, struct file *filp)
1093 {
1094 	const struct file_operations *new_fops;
1095 	struct drm_minor *minor;
1096 	int err;
1097 
1098 	DRM_DEBUG("\n");
1099 
1100 	minor = drm_minor_acquire(iminor(inode));
1101 	if (IS_ERR(minor))
1102 		return PTR_ERR(minor);
1103 
1104 	new_fops = fops_get(minor->dev->driver->fops);
1105 	if (!new_fops) {
1106 		err = -ENODEV;
1107 		goto out;
1108 	}
1109 
1110 	replace_fops(filp, new_fops);
1111 	if (filp->f_op->open)
1112 		err = filp->f_op->open(inode, filp);
1113 	else
1114 		err = 0;
1115 
1116 out:
1117 	drm_minor_release(minor);
1118 
1119 	return err;
1120 }
1121 
1122 static const struct file_operations drm_stub_fops = {
1123 	.owner = THIS_MODULE,
1124 	.open = drm_stub_open,
1125 	.llseek = noop_llseek,
1126 };
1127 #endif /* __linux__ */
1128 
1129 static void drm_core_exit(void)
1130 {
1131 #ifdef __linux__
1132 	unregister_chrdev(DRM_MAJOR, "drm");
1133 	debugfs_remove(drm_debugfs_root);
1134 	drm_sysfs_destroy();
1135 #endif
1136 	idr_destroy(&drm_minors_idr);
1137 	drm_connector_ida_destroy();
1138 }
1139 
1140 static int __init drm_core_init(void)
1141 {
1142 #ifdef __linux__
1143 	int ret;
1144 #endif
1145 
1146 	drm_connector_ida_init();
1147 	idr_init(&drm_minors_idr);
1148 	drm_memcpy_init_early();
1149 
1150 #ifdef __linux__
1151 	ret = drm_sysfs_init();
1152 	if (ret < 0) {
1153 		DRM_ERROR("Cannot create DRM class: %d\n", ret);
1154 		goto error;
1155 	}
1156 
1157 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
1158 
1159 	ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
1160 	if (ret < 0)
1161 		goto error;
1162 #endif
1163 
1164 	drm_core_init_complete = true;
1165 
1166 	DRM_DEBUG("Initialized\n");
1167 	return 0;
1168 #ifdef __linux__
1169 error:
1170 	drm_core_exit();
1171 	return ret;
1172 #endif
1173 }
1174 
1175 #ifdef __linux__
1176 module_init(drm_core_init);
1177 module_exit(drm_core_exit);
1178 #endif
1179 
1180 void
1181 drm_attach_platform(struct drm_driver *driver, bus_space_tag_t iot,
1182     bus_dma_tag_t dmat, struct device *dev, struct drm_device *drm)
1183 {
1184 	struct drm_attach_args arg;
1185 
1186 	memset(&arg, 0, sizeof(arg));
1187 	arg.driver = driver;
1188 	arg.bst = iot;
1189 	arg.dmat = dmat;
1190 	arg.drm = drm;
1191 
1192 	arg.busid = dev->dv_xname;
1193 	arg.busid_len = strlen(dev->dv_xname) + 1;
1194 	config_found_sm(dev, &arg, drmprint, drmsubmatch);
1195 }
1196 
1197 struct drm_device *
1198 drm_attach_pci(const struct drm_driver *driver, struct pci_attach_args *pa,
1199     int is_agp, int primary, struct device *dev, struct drm_device *drm)
1200 {
1201 	struct drm_attach_args arg;
1202 	struct drm_softc *sc;
1203 
1204 	arg.drm = drm;
1205 	arg.driver = driver;
1206 	arg.dmat = pa->pa_dmat;
1207 	arg.bst = pa->pa_memt;
1208 	arg.is_agp = is_agp;
1209 	arg.primary = primary;
1210 	arg.pa = pa;
1211 
1212 	arg.busid_len = 20;
1213 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
1214 	if (arg.busid == NULL) {
1215 		printf("%s: no memory for drm\n", dev->dv_xname);
1216 		return (NULL);
1217 	}
1218 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
1219 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
1220 
1221 	sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch);
1222 	if (sc == NULL)
1223 		return NULL;
1224 
1225 	return sc->sc_drm;
1226 }
1227 
1228 int
1229 drmprint(void *aux, const char *pnp)
1230 {
1231 	if (pnp != NULL)
1232 		printf("drm at %s", pnp);
1233 	return (UNCONF);
1234 }
1235 
1236 int
1237 drmsubmatch(struct device *parent, void *match, void *aux)
1238 {
1239 	extern struct cfdriver drm_cd;
1240 	struct cfdata *cf = match;
1241 
1242 	/* only allow drm to attach */
1243 	if (cf->cf_driver == &drm_cd)
1244 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
1245 	return (0);
1246 }
1247 
1248 int
1249 drm_pciprobe(struct pci_attach_args *pa, const struct pci_device_id *idlist)
1250 {
1251 	const struct pci_device_id *id_entry;
1252 
1253 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
1254 	    PCI_PRODUCT(pa->pa_id), idlist);
1255 	if (id_entry != NULL)
1256 		return 1;
1257 
1258 	return 0;
1259 }
1260 
1261 int
1262 drm_probe(struct device *parent, void *match, void *aux)
1263 {
1264 	struct cfdata *cf = match;
1265 	struct drm_attach_args *da = aux;
1266 
1267 	if (cf->drmdevcf_primary != DRMDEVCF_PRIMARY_UNK) {
1268 		/*
1269 		 * If primary-ness of device specified, either match
1270 		 * exactly (at high priority), or fail.
1271 		 */
1272 		if (cf->drmdevcf_primary != 0 && da->primary != 0)
1273 			return (10);
1274 		else
1275 			return (0);
1276 	}
1277 
1278 	/* If primary-ness unspecified, it wins. */
1279 	return (1);
1280 }
1281 
1282 void
1283 drm_attach(struct device *parent, struct device *self, void *aux)
1284 {
1285 	struct drm_softc *sc = (struct drm_softc *)self;
1286 	struct drm_attach_args *da = aux;
1287 	struct drm_device *dev = da->drm;
1288 	int ret;
1289 
1290 	if (drm_refcnt == 0) {
1291 		drm_linux_init();
1292 		drm_core_init();
1293 	}
1294 	drm_refcnt++;
1295 
1296 	if (dev == NULL) {
1297 		dev = malloc(sizeof(struct drm_device), M_DRM,
1298 		    M_WAITOK | M_ZERO);
1299 		sc->sc_allocated = 1;
1300 	}
1301 
1302 	sc->sc_drm = dev;
1303 
1304 	dev->dev = self;
1305 	dev->dev_private = parent;
1306 	dev->driver = da->driver;
1307 
1308 	INIT_LIST_HEAD(&dev->managed.resources);
1309 	mtx_init(&dev->managed.lock, IPL_TTY);
1310 
1311 	/* no per-device feature limits by default */
1312 	dev->driver_features = ~0u;
1313 
1314 	dev->dmat = da->dmat;
1315 	dev->bst = da->bst;
1316 	dev->unique = da->busid;
1317 
1318 	if (da->pa) {
1319 		struct pci_attach_args *pa = da->pa;
1320 		pcireg_t subsys;
1321 
1322 		subsys = pci_conf_read(pa->pa_pc, pa->pa_tag,
1323 		    PCI_SUBSYS_ID_REG);
1324 
1325 		dev->pdev = &dev->_pdev;
1326 		dev->pdev->vendor = PCI_VENDOR(pa->pa_id);
1327 		dev->pdev->device = PCI_PRODUCT(pa->pa_id);
1328 		dev->pdev->subsystem_vendor = PCI_VENDOR(subsys);
1329 		dev->pdev->subsystem_device = PCI_PRODUCT(subsys);
1330 		dev->pdev->revision = PCI_REVISION(pa->pa_class);
1331 		dev->pdev->class = (PCI_CLASS(pa->pa_class) << 16) |
1332 		    (PCI_SUBCLASS(pa->pa_class) << 8) |
1333 		    PCI_INTERFACE(pa->pa_class);
1334 
1335 		dev->pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
1336 		dev->pdev->bus = &dev->pdev->_bus;
1337 		dev->pdev->bus->pc = pa->pa_pc;
1338 		dev->pdev->bus->number = pa->pa_bus;
1339 		dev->pdev->bus->domain_nr = pa->pa_domain;
1340 		dev->pdev->bus->bridgetag = pa->pa_bridgetag;
1341 
1342 		if (pa->pa_bridgetag != NULL) {
1343 			dev->pdev->bus->self = malloc(sizeof(struct pci_dev),
1344 			    M_DRM, M_WAITOK | M_ZERO);
1345 			dev->pdev->bus->self->pc = pa->pa_pc;
1346 			dev->pdev->bus->self->tag = *pa->pa_bridgetag;
1347 		}
1348 
1349 		dev->pdev->pc = pa->pa_pc;
1350 		dev->pdev->tag = pa->pa_tag;
1351 		dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
1352 
1353 #ifdef CONFIG_ACPI
1354 		dev->pdev->dev.node = acpi_find_pci(pa->pa_pc, pa->pa_tag);
1355 		aml_register_notify(dev->pdev->dev.node, NULL,
1356 		    drm_linux_acpi_notify, NULL, ACPIDEV_NOPOLL);
1357 #endif
1358 	}
1359 
1360 	mtx_init(&dev->quiesce_mtx, IPL_NONE);
1361 	mtx_init(&dev->event_lock, IPL_TTY);
1362 	rw_init(&dev->struct_mutex, "drmdevlk");
1363 	rw_init(&dev->filelist_mutex, "drmflist");
1364 	rw_init(&dev->clientlist_mutex, "drmclist");
1365 	rw_init(&dev->master_mutex, "drmmast");
1366 
1367 	ret = drmm_add_action(dev, drm_dev_init_release, NULL);
1368 	if (ret)
1369 		goto error;
1370 
1371 	SPLAY_INIT(&dev->files);
1372 	INIT_LIST_HEAD(&dev->filelist_internal);
1373 	INIT_LIST_HEAD(&dev->clientlist);
1374 	INIT_LIST_HEAD(&dev->vblank_event_list);
1375 
1376 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
1377 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
1378 		if (ret)
1379 			goto error;
1380 	}
1381 
1382 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
1383 	if (ret)
1384 		goto error;
1385 
1386 #ifdef CONFIG_DRM_LEGACY
1387 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1388 #if IS_ENABLED(CONFIG_AGP)
1389 		if (da->is_agp)
1390 			dev->agp = drm_agp_init();
1391 #endif
1392 		if (dev->agp != NULL) {
1393 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
1394 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
1395 				dev->agp->mtrr = 1;
1396 		}
1397 	}
1398 #endif
1399 
1400 	if (dev->driver->gem_size > 0) {
1401 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
1402 		/* XXX unique name */
1403 		pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE, 0,
1404 		    "drmobjpl", NULL);
1405 	}
1406 
1407 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
1408 		ret = drm_gem_init(dev);
1409 		if (ret) {
1410 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
1411 			goto error;
1412 		}
1413 	}
1414 
1415 	drmm_add_final_kfree(dev, dev);
1416 
1417 	printf("\n");
1418 	return;
1419 
1420 error:
1421 	drm_managed_release(dev);
1422 	dev->dev_private = NULL;
1423 }
1424 
1425 int
1426 drm_detach(struct device *self, int flags)
1427 {
1428 	struct drm_softc *sc = (struct drm_softc *)self;
1429 	struct drm_device *dev = sc->sc_drm;
1430 
1431 	drm_refcnt--;
1432 	if (drm_refcnt == 0) {
1433 		drm_core_exit();
1434 		drm_linux_exit();
1435 	}
1436 
1437 	drm_lastclose(dev);
1438 
1439 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
1440 		if (dev->driver->gem_size > 0)
1441 			pool_destroy(&dev->objpl);
1442 	}
1443 
1444 #ifdef CONFIG_DRM_LEGACY
1445 	if (dev->agp && dev->agp->mtrr) {
1446 		int retcode;
1447 
1448 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
1449 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
1450 		DRM_DEBUG("mtrr_del = %d", retcode);
1451 	}
1452 
1453 	free(dev->agp, M_DRM, 0);
1454 #endif
1455 	if (dev->pdev && dev->pdev->bus)
1456 		free(dev->pdev->bus->self, M_DRM, sizeof(struct pci_dev));
1457 
1458 	if (sc->sc_allocated)
1459 		free(dev, M_DRM, sizeof(struct drm_device));
1460 
1461 	return 0;
1462 }
1463 
1464 void
1465 drm_quiesce(struct drm_device *dev)
1466 {
1467 	mtx_enter(&dev->quiesce_mtx);
1468 	dev->quiesce = 1;
1469 	while (dev->quiesce_count > 0) {
1470 		msleep_nsec(&dev->quiesce_count, &dev->quiesce_mtx,
1471 		    PZERO, "drmqui", INFSLP);
1472 	}
1473 	mtx_leave(&dev->quiesce_mtx);
1474 }
1475 
1476 void
1477 drm_wakeup(struct drm_device *dev)
1478 {
1479 	mtx_enter(&dev->quiesce_mtx);
1480 	dev->quiesce = 0;
1481 	wakeup(&dev->quiesce);
1482 	mtx_leave(&dev->quiesce_mtx);
1483 }
1484 
1485 int
1486 drm_activate(struct device *self, int act)
1487 {
1488 	struct drm_softc *sc = (struct drm_softc *)self;
1489 	struct drm_device *dev = sc->sc_drm;
1490 
1491 	switch (act) {
1492 	case DVACT_QUIESCE:
1493 		drm_quiesce(dev);
1494 		break;
1495 	case DVACT_WAKEUP:
1496 		drm_wakeup(dev);
1497 		break;
1498 	}
1499 
1500 	return (0);
1501 }
1502 
1503 const struct cfattach drm_ca = {
1504 	sizeof(struct drm_softc), drm_probe, drm_attach,
1505 	drm_detach, drm_activate
1506 };
1507 
1508 struct cfdriver drm_cd = {
1509 	0, "drm", DV_DULL
1510 };
1511 
1512 const struct pci_device_id *
1513 drm_find_description(int vendor, int device, const struct pci_device_id *idlist)
1514 {
1515 	int i = 0;
1516 
1517 	for (i = 0; idlist[i].vendor != 0; i++) {
1518 		if ((idlist[i].vendor == vendor) &&
1519 		    (idlist[i].device == device) &&
1520 		    (idlist[i].subvendor == PCI_ANY_ID) &&
1521 		    (idlist[i].subdevice == PCI_ANY_ID))
1522 			return &idlist[i];
1523 	}
1524 	return NULL;
1525 }
1526 
1527 int
1528 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
1529 {
1530 	return (f1->fminor < f2->fminor ? -1 : f1->fminor > f2->fminor);
1531 }
1532 
1533 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
1534 
1535 struct drm_file *
1536 drm_find_file_by_minor(struct drm_device *dev, int minor)
1537 {
1538 	struct drm_file	key;
1539 
1540 	key.fminor = minor;
1541 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
1542 }
1543 
1544 struct drm_device *
1545 drm_get_device_from_kdev(dev_t kdev)
1546 {
1547 	int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
1548 	/* control */
1549 	if (unit >= 64 && unit < 128)
1550 		unit -= 64;
1551 	/* render */
1552 	if (unit >= 128)
1553 		unit -= 128;
1554 	struct drm_softc *sc;
1555 
1556 	if (unit < drm_cd.cd_ndevs) {
1557 		sc = (struct drm_softc *)drm_cd.cd_devs[unit];
1558 		if (sc)
1559 			return sc->sc_drm;
1560 	}
1561 
1562 	return NULL;
1563 }
1564 
1565 void
1566 filt_drmdetach(struct knote *kn)
1567 {
1568 	struct drm_device *dev = kn->kn_hook;
1569 	int s;
1570 
1571 	s = spltty();
1572 	klist_remove_locked(&dev->note, kn);
1573 	splx(s);
1574 }
1575 
1576 int
1577 filt_drmkms(struct knote *kn, long hint)
1578 {
1579 	if (kn->kn_sfflags & hint)
1580 		kn->kn_fflags |= hint;
1581 	return (kn->kn_fflags != 0);
1582 }
1583 
1584 void
1585 filt_drmreaddetach(struct knote *kn)
1586 {
1587 	struct drm_file		*file_priv = kn->kn_hook;
1588 	int s;
1589 
1590 	s = spltty();
1591 	klist_remove_locked(&file_priv->rsel.si_note, kn);
1592 	splx(s);
1593 }
1594 
1595 int
1596 filt_drmread(struct knote *kn, long hint)
1597 {
1598 	struct drm_file		*file_priv = kn->kn_hook;
1599 	int			 val = 0;
1600 
1601 	if ((hint & NOTE_SUBMIT) == 0)
1602 		mtx_enter(&file_priv->minor->dev->event_lock);
1603 	val = !list_empty(&file_priv->event_list);
1604 	if ((hint & NOTE_SUBMIT) == 0)
1605 		mtx_leave(&file_priv->minor->dev->event_lock);
1606 	return (val);
1607 }
1608 
1609 const struct filterops drm_filtops = {
1610 	.f_flags	= FILTEROP_ISFD,
1611 	.f_attach	= NULL,
1612 	.f_detach	= filt_drmdetach,
1613 	.f_event	= filt_drmkms,
1614 };
1615 
1616 const struct filterops drmread_filtops = {
1617 	.f_flags	= FILTEROP_ISFD,
1618 	.f_attach	= NULL,
1619 	.f_detach	= filt_drmreaddetach,
1620 	.f_event	= filt_drmread,
1621 };
1622 
1623 int
1624 drmkqfilter(dev_t kdev, struct knote *kn)
1625 {
1626 	struct drm_device	*dev = NULL;
1627 	struct drm_file		*file_priv = NULL;
1628 	int			 s;
1629 
1630 	dev = drm_get_device_from_kdev(kdev);
1631 	if (dev == NULL || dev->dev_private == NULL)
1632 		return (ENXIO);
1633 
1634 	switch (kn->kn_filter) {
1635 	case EVFILT_READ:
1636 		mutex_lock(&dev->struct_mutex);
1637 		file_priv = drm_find_file_by_minor(dev, minor(kdev));
1638 		mutex_unlock(&dev->struct_mutex);
1639 		if (file_priv == NULL)
1640 			return (ENXIO);
1641 
1642 		kn->kn_fop = &drmread_filtops;
1643 		kn->kn_hook = file_priv;
1644 
1645 		s = spltty();
1646 		klist_insert_locked(&file_priv->rsel.si_note, kn);
1647 		splx(s);
1648 		break;
1649 	case EVFILT_DEVICE:
1650 		kn->kn_fop = &drm_filtops;
1651 		kn->kn_hook = dev;
1652 
1653 		s = spltty();
1654 		klist_insert_locked(&dev->note, kn);
1655 		splx(s);
1656 		break;
1657 	default:
1658 		return (EINVAL);
1659 	}
1660 
1661 	return (0);
1662 }
1663 
1664 int
1665 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
1666 {
1667 	struct drm_device	*dev = NULL;
1668 	struct drm_file		*file_priv;
1669 	struct drm_minor	*dm;
1670 	int			 ret = 0;
1671 	int			 dminor, realminor, minor_type;
1672 	int need_setup = 0;
1673 
1674 	dev = drm_get_device_from_kdev(kdev);
1675 	if (dev == NULL || dev->dev_private == NULL)
1676 		return (ENXIO);
1677 
1678 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1679 
1680 	if (flags & O_EXCL)
1681 		return (EBUSY); /* No exclusive opens */
1682 
1683 	if (drm_dev_needs_global_mutex(dev))
1684 		mutex_lock(&drm_global_mutex);
1685 
1686 	if (!atomic_fetch_inc(&dev->open_count))
1687 		need_setup = 1;
1688 
1689 	dminor = minor(kdev);
1690 	realminor =  dminor & ((1 << CLONE_SHIFT) - 1);
1691 	if (realminor < 64)
1692 		minor_type = DRM_MINOR_PRIMARY;
1693 	else if (realminor >= 64 && realminor < 128)
1694 		minor_type = DRM_MINOR_CONTROL;
1695 	else
1696 		minor_type = DRM_MINOR_RENDER;
1697 
1698 	dm = *drm_minor_get_slot(dev, minor_type);
1699 	dm->index = minor(kdev);
1700 
1701 	file_priv = drm_file_alloc(dm);
1702 	if (IS_ERR(file_priv)) {
1703 		ret = ENOMEM;
1704 		goto err;
1705 	}
1706 
1707 	/* first opener automatically becomes master */
1708 	if (drm_is_primary_client(file_priv)) {
1709 		ret = drm_master_open(file_priv);
1710 		if (ret != 0)
1711 			goto out_file_free;
1712 	}
1713 
1714 	file_priv->filp = (void *)file_priv;
1715 	file_priv->fminor = minor(kdev);
1716 
1717 	mutex_lock(&dev->filelist_mutex);
1718 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
1719 	mutex_unlock(&dev->filelist_mutex);
1720 
1721 	if (need_setup) {
1722 		ret = drm_legacy_setup(dev);
1723 		if (ret)
1724 			goto out_file_free;
1725 	}
1726 
1727 	if (drm_dev_needs_global_mutex(dev))
1728 		mutex_unlock(&drm_global_mutex);
1729 
1730 	return 0;
1731 
1732 out_file_free:
1733 	drm_file_free(file_priv);
1734 err:
1735 	atomic_dec(&dev->open_count);
1736 	if (drm_dev_needs_global_mutex(dev))
1737 		mutex_unlock(&drm_global_mutex);
1738 	return (ret);
1739 }
1740 
1741 int
1742 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
1743 {
1744 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
1745 	struct drm_file			*file_priv;
1746 	int				 retcode = 0;
1747 
1748 	if (dev == NULL)
1749 		return (ENXIO);
1750 
1751 	if (drm_dev_needs_global_mutex(dev))
1752 		mutex_lock(&drm_global_mutex);
1753 
1754 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1755 
1756 	mutex_lock(&dev->filelist_mutex);
1757 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1758 	if (file_priv == NULL) {
1759 		DRM_ERROR("can't find authenticator\n");
1760 		retcode = EINVAL;
1761 		mutex_unlock(&dev->filelist_mutex);
1762 		goto done;
1763 	}
1764 
1765 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
1766 	mutex_unlock(&dev->filelist_mutex);
1767 	drm_file_free(file_priv);
1768 done:
1769 	if (atomic_dec_and_test(&dev->open_count))
1770 		drm_lastclose(dev);
1771 
1772 	if (drm_dev_needs_global_mutex(dev))
1773 		mutex_unlock(&drm_global_mutex);
1774 
1775 	return (retcode);
1776 }
1777 
1778 int
1779 drmread(dev_t kdev, struct uio *uio, int ioflag)
1780 {
1781 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
1782 	struct drm_file			*file_priv;
1783 	struct drm_pending_event	*ev;
1784 	int		 		 error = 0;
1785 
1786 	if (dev == NULL)
1787 		return (ENXIO);
1788 
1789 	mutex_lock(&dev->filelist_mutex);
1790 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1791 	mutex_unlock(&dev->filelist_mutex);
1792 	if (file_priv == NULL)
1793 		return (ENXIO);
1794 
1795 	/*
1796 	 * The semantics are a little weird here. We will wait until we
1797 	 * have events to process, but as soon as we have events we will
1798 	 * only deliver as many as we have.
1799 	 * Note that events are atomic, if the read buffer will not fit in
1800 	 * a whole event, we won't read any of it out.
1801 	 */
1802 	mtx_enter(&dev->event_lock);
1803 	while (error == 0 && list_empty(&file_priv->event_list)) {
1804 		if (ioflag & IO_NDELAY) {
1805 			mtx_leave(&dev->event_lock);
1806 			return (EAGAIN);
1807 		}
1808 		error = msleep_nsec(&file_priv->event_wait, &dev->event_lock,
1809 		    PWAIT | PCATCH, "drmread", INFSLP);
1810 	}
1811 	if (error) {
1812 		mtx_leave(&dev->event_lock);
1813 		return (error);
1814 	}
1815 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
1816 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1817 		/* XXX we always destroy the event on error. */
1818 		error = uiomove(ev->event, ev->event->length, uio);
1819 		kfree(ev);
1820 		if (error)
1821 			break;
1822 		mtx_enter(&dev->event_lock);
1823 	}
1824 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1825 
1826 	return (error);
1827 }
1828 
1829 /*
1830  * Deqeue an event from the file priv in question. returning 1 if an
1831  * event was found. We take the resid from the read as a parameter because
1832  * we will only dequeue and event if the read buffer has space to fit the
1833  * entire thing.
1834  *
1835  * We are called locked, but we will *unlock* the queue on return so that
1836  * we may sleep to copyout the event.
1837  */
1838 int
1839 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
1840     size_t resid, struct drm_pending_event **out)
1841 {
1842 	struct drm_pending_event *e = NULL;
1843 	int gotone = 0;
1844 
1845 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
1846 
1847 	*out = NULL;
1848 	if (list_empty(&file_priv->event_list))
1849 		goto out;
1850 	e = list_first_entry(&file_priv->event_list,
1851 			     struct drm_pending_event, link);
1852 	if (e->event->length > resid)
1853 		goto out;
1854 
1855 	file_priv->event_space += e->event->length;
1856 	list_del(&e->link);
1857 	*out = e;
1858 	gotone = 1;
1859 
1860 out:
1861 	mtx_leave(&dev->event_lock);
1862 
1863 	return (gotone);
1864 }
1865 
1866 paddr_t
1867 drmmmap(dev_t kdev, off_t offset, int prot)
1868 {
1869 	return -1;
1870 }
1871 
1872 struct drm_dmamem *
1873 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1874     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1875 {
1876 	struct drm_dmamem	*mem;
1877 	size_t			 strsize;
1878 	/*
1879 	 * segs is the last member of the struct since we modify the size
1880 	 * to allow extra segments if more than one are allowed.
1881 	 */
1882 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1883 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
1884 	if (mem == NULL)
1885 		return (NULL);
1886 
1887 	mem->size = size;
1888 
1889 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
1890 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
1891 		goto strfree;
1892 
1893 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
1894 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1895 		goto destroy;
1896 
1897 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
1898 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
1899 		goto free;
1900 
1901 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
1902 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
1903 		goto unmap;
1904 
1905 	return (mem);
1906 
1907 unmap:
1908 	bus_dmamem_unmap(dmat, mem->kva, size);
1909 free:
1910 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1911 destroy:
1912 	bus_dmamap_destroy(dmat, mem->map);
1913 strfree:
1914 	free(mem, M_DRM, 0);
1915 
1916 	return (NULL);
1917 }
1918 
1919 void
1920 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
1921 {
1922 	if (mem == NULL)
1923 		return;
1924 
1925 	bus_dmamap_unload(dmat, mem->map);
1926 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
1927 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1928 	bus_dmamap_destroy(dmat, mem->map);
1929 	free(mem, M_DRM, 0);
1930 }
1931 
1932 struct drm_dma_handle *
1933 drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
1934 {
1935 	struct drm_dma_handle *dmah;
1936 
1937 	dmah = malloc(sizeof(*dmah), M_DRM, M_WAITOK);
1938 	dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size,
1939 	    BUS_DMA_NOCACHE, 0);
1940 	if (dmah->mem == NULL) {
1941 		free(dmah, M_DRM, sizeof(*dmah));
1942 		return NULL;
1943 	}
1944 	dmah->busaddr = dmah->mem->segs[0].ds_addr;
1945 	dmah->size = dmah->mem->size;
1946 	dmah->vaddr = dmah->mem->kva;
1947 	return (dmah);
1948 }
1949 
1950 void
1951 drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
1952 {
1953 	if (dmah == NULL)
1954 		return;
1955 
1956 	drm_dmamem_free(dev->dmat, dmah->mem);
1957 	free(dmah, M_DRM, sizeof(*dmah));
1958 }
1959 
1960 /*
1961  * Compute order.  Can be made faster.
1962  */
1963 int
1964 drm_order(unsigned long size)
1965 {
1966 	int order;
1967 	unsigned long tmp;
1968 
1969 	for (order = 0, tmp = size; tmp >>= 1; ++order)
1970 		;
1971 
1972 	if (size & ~(1 << order))
1973 		++order;
1974 
1975 	return order;
1976 }
1977 
1978 int
1979 drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv)
1980 {
1981 	struct drm_pciinfo *info = data;
1982 
1983 	if (dev->pdev == NULL)
1984 		return -ENOTTY;
1985 
1986 	info->domain = dev->pdev->bus->domain_nr;
1987 	info->bus = dev->pdev->bus->number;
1988 	info->dev = PCI_SLOT(dev->pdev->devfn);
1989 	info->func = PCI_FUNC(dev->pdev->devfn);
1990 	info->vendor_id = dev->pdev->vendor;
1991 	info->device_id = dev->pdev->device;
1992 	info->subvendor_id = dev->pdev->subsystem_vendor;
1993 	info->subdevice_id = dev->pdev->subsystem_device;
1994 	info->revision_id = 0;
1995 
1996 	return 0;
1997 }
1998