xref: /openbsd-src/sys/dev/pci/drm/drm_drv.c (revision de8cc8edbc71bd3e3bc7fbffa27ba0e564c37d8b)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/fcntl.h>
31 #include <sys/poll.h>
32 #include <sys/specdev.h>
33 #include <sys/vnode.h>
34 
35 #include <machine/bus.h>
36 
37 #ifdef __HAVE_ACPI
38 #include <dev/acpi/acpidev.h>
39 #include <dev/acpi/acpivar.h>
40 #include <dev/acpi/dsdt.h>
41 #endif
42 
43 #include <linux/debugfs.h>
44 #include <linux/fs.h>
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/mount.h>
48 #include <linux/pseudo_fs.h>
49 #include <linux/slab.h>
50 #include <linux/srcu.h>
51 
52 #include <drm/drm_client.h>
53 #include <drm/drm_color_mgmt.h>
54 #include <drm/drm_drv.h>
55 #include <drm/drm_file.h>
56 #include <drm/drm_mode_object.h>
57 #include <drm/drm_print.h>
58 
59 #include <drm/drm_gem.h>
60 #include <drm/drm_agpsupport.h>
61 #include <drm/drm_irq.h>
62 
63 #include "drm_crtc_internal.h"
64 #include "drm_internal.h"
65 #include "drm_legacy.h"
66 
67 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
68 MODULE_DESCRIPTION("DRM shared core routines");
69 MODULE_LICENSE("GPL and additional rights");
70 
71 static DEFINE_SPINLOCK(drm_minor_lock);
72 static struct idr drm_minors_idr;
73 
74 /*
75  * If the drm core fails to init for whatever reason,
76  * we should prevent any drivers from registering with it.
77  * It's best to check this at drm_dev_init(), as some drivers
78  * prefer to embed struct drm_device into their own device
79  * structure and call drm_dev_init() themselves.
80  */
81 static bool drm_core_init_complete = false;
82 
83 static struct dentry *drm_debugfs_root;
84 
85 #ifdef notyet
86 DEFINE_STATIC_SRCU(drm_unplug_srcu);
87 #endif
88 
89 /*
90  * Some functions are only called once on init regardless of how many times
91  * drm attaches.  In linux this is handled via module_init()/module_exit()
92  */
93 int drm_refcnt;
94 
95 struct drm_softc {
96 	struct device		sc_dev;
97 	struct drm_device 	*sc_drm;
98 	int			sc_allocated;
99 };
100 
101 struct drm_attach_args {
102 	struct drm_device		*drm;
103 	struct drm_driver		*driver;
104 	char				*busid;
105 	bus_dma_tag_t			 dmat;
106 	bus_space_tag_t			 bst;
107 	size_t				 busid_len;
108 	int				 is_agp;
109 	struct pci_attach_args		*pa;
110 	int				 primary;
111 };
112 
113 void	drm_linux_init(void);
114 void	drm_linux_exit(void);
115 int	drm_linux_acpi_notify(struct aml_node *, int, void *);
116 
117 int	drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
118 	    struct drm_pending_event **);
119 
120 int	drmprint(void *, const char *);
121 int	drmsubmatch(struct device *, void *, void *);
122 const struct pci_device_id *
123 	drm_find_description(int, int, const struct pci_device_id *);
124 
125 int	drm_file_cmp(struct drm_file *, struct drm_file *);
126 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
127 
128 #define DRMDEVCF_PRIMARY	0
129 #define drmdevcf_primary	cf_loc[DRMDEVCF_PRIMARY]	/* spec'd as primary? */
130 #define DRMDEVCF_PRIMARY_UNK	-1
131 
132 /*
133  * DRM Minors
134  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
135  * of them is represented by a drm_minor object. Depending on the capabilities
136  * of the device-driver, different interfaces are registered.
137  *
138  * Minors can be accessed via dev->$minor_name. This pointer is either
139  * NULL or a valid drm_minor pointer and stays valid as long as the device is
140  * valid. This means, DRM minors have the same life-time as the underlying
141  * device. However, this doesn't mean that the minor is active. Minors are
142  * registered and unregistered dynamically according to device-state.
143  */
144 
145 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
146 					     unsigned int type)
147 {
148 	switch (type) {
149 	case DRM_MINOR_PRIMARY:
150 		return &dev->primary;
151 	case DRM_MINOR_RENDER:
152 		return &dev->render;
153 	default:
154 		BUG();
155 	}
156 }
157 
158 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
159 {
160 	struct drm_minor *minor;
161 	unsigned long flags;
162 	int r;
163 
164 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
165 	if (!minor)
166 		return -ENOMEM;
167 
168 	minor->type = type;
169 	minor->dev = dev;
170 
171 	idr_preload(GFP_KERNEL);
172 	spin_lock_irqsave(&drm_minor_lock, flags);
173 	r = idr_alloc(&drm_minors_idr,
174 		      NULL,
175 		      64 * type,
176 		      64 * (type + 1),
177 		      GFP_NOWAIT);
178 	spin_unlock_irqrestore(&drm_minor_lock, flags);
179 	idr_preload_end();
180 
181 	if (r < 0)
182 		goto err_free;
183 
184 	minor->index = r;
185 
186 #ifdef __linux__
187 	minor->kdev = drm_sysfs_minor_alloc(minor);
188 	if (IS_ERR(minor->kdev)) {
189 		r = PTR_ERR(minor->kdev);
190 		goto err_index;
191 	}
192 #endif
193 
194 	*drm_minor_get_slot(dev, type) = minor;
195 	return 0;
196 
197 #ifdef __linux__
198 err_index:
199 	spin_lock_irqsave(&drm_minor_lock, flags);
200 	idr_remove(&drm_minors_idr, minor->index);
201 	spin_unlock_irqrestore(&drm_minor_lock, flags);
202 #endif
203 err_free:
204 	kfree(minor);
205 	return r;
206 }
207 
208 static void drm_minor_free(struct drm_device *dev, unsigned int type)
209 {
210 	struct drm_minor **slot, *minor;
211 	unsigned long flags;
212 
213 	slot = drm_minor_get_slot(dev, type);
214 	minor = *slot;
215 	if (!minor)
216 		return;
217 
218 #ifdef __linux__
219 	put_device(minor->kdev);
220 #endif
221 
222 	spin_lock_irqsave(&drm_minor_lock, flags);
223 	idr_remove(&drm_minors_idr, minor->index);
224 	spin_unlock_irqrestore(&drm_minor_lock, flags);
225 
226 	kfree(minor);
227 	*slot = NULL;
228 }
229 
230 static int drm_minor_register(struct drm_device *dev, unsigned int type)
231 {
232 	struct drm_minor *minor;
233 	unsigned long flags;
234 #ifdef __linux__
235 	int ret;
236 #endif
237 
238 	DRM_DEBUG("\n");
239 
240 	minor = *drm_minor_get_slot(dev, type);
241 	if (!minor)
242 		return 0;
243 
244 #ifdef __linux__
245 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
246 	if (ret) {
247 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
248 		goto err_debugfs;
249 	}
250 
251 	ret = device_add(minor->kdev);
252 	if (ret)
253 		goto err_debugfs;
254 #else
255 	drm_debugfs_root = NULL;
256 #endif
257 
258 	/* replace NULL with @minor so lookups will succeed from now on */
259 	spin_lock_irqsave(&drm_minor_lock, flags);
260 	idr_replace(&drm_minors_idr, minor, minor->index);
261 	spin_unlock_irqrestore(&drm_minor_lock, flags);
262 
263 	DRM_DEBUG("new minor registered %d\n", minor->index);
264 	return 0;
265 
266 #ifdef __linux__
267 err_debugfs:
268 	drm_debugfs_cleanup(minor);
269 	return ret;
270 #endif
271 }
272 
273 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
274 {
275 	struct drm_minor *minor;
276 	unsigned long flags;
277 
278 	minor = *drm_minor_get_slot(dev, type);
279 #ifdef __linux__
280 	if (!minor || !device_is_registered(minor->kdev))
281 #else
282 	if (!minor)
283 #endif
284 		return;
285 
286 	/* replace @minor with NULL so lookups will fail from now on */
287 	spin_lock_irqsave(&drm_minor_lock, flags);
288 	idr_replace(&drm_minors_idr, NULL, minor->index);
289 	spin_unlock_irqrestore(&drm_minor_lock, flags);
290 
291 #ifdef __linux__
292 	device_del(minor->kdev);
293 #endif
294 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
295 	drm_debugfs_cleanup(minor);
296 }
297 
298 /*
299  * Looks up the given minor-ID and returns the respective DRM-minor object. The
300  * refence-count of the underlying device is increased so you must release this
301  * object with drm_minor_release().
302  *
303  * As long as you hold this minor, it is guaranteed that the object and the
304  * minor->dev pointer will stay valid! However, the device may get unplugged and
305  * unregistered while you hold the minor.
306  */
307 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
308 {
309 	struct drm_minor *minor;
310 	unsigned long flags;
311 
312 	spin_lock_irqsave(&drm_minor_lock, flags);
313 	minor = idr_find(&drm_minors_idr, minor_id);
314 	if (minor)
315 		drm_dev_get(minor->dev);
316 	spin_unlock_irqrestore(&drm_minor_lock, flags);
317 
318 	if (!minor) {
319 		return ERR_PTR(-ENODEV);
320 	} else if (drm_dev_is_unplugged(minor->dev)) {
321 		drm_dev_put(minor->dev);
322 		return ERR_PTR(-ENODEV);
323 	}
324 
325 	return minor;
326 }
327 
328 void drm_minor_release(struct drm_minor *minor)
329 {
330 	drm_dev_put(minor->dev);
331 }
332 
333 /**
334  * DOC: driver instance overview
335  *
336  * A device instance for a drm driver is represented by &struct drm_device. This
337  * is initialized with drm_dev_init(), usually from bus-specific ->probe()
338  * callbacks implemented by the driver. The driver then needs to initialize all
339  * the various subsystems for the drm device like memory management, vblank
340  * handling, modesetting support and intial output configuration plus obviously
341  * initialize all the corresponding hardware bits. Finally when everything is up
342  * and running and ready for userspace the device instance can be published
343  * using drm_dev_register().
344  *
345  * There is also deprecated support for initalizing device instances using
346  * bus-specific helpers and the &drm_driver.load callback. But due to
347  * backwards-compatibility needs the device instance have to be published too
348  * early, which requires unpretty global locking to make safe and is therefore
349  * only support for existing drivers not yet converted to the new scheme.
350  *
351  * When cleaning up a device instance everything needs to be done in reverse:
352  * First unpublish the device instance with drm_dev_unregister(). Then clean up
353  * any other resources allocated at device initialization and drop the driver's
354  * reference to &drm_device using drm_dev_put().
355  *
356  * Note that the lifetime rules for &drm_device instance has still a lot of
357  * historical baggage. Hence use the reference counting provided by
358  * drm_dev_get() and drm_dev_put() only carefully.
359  *
360  * Display driver example
361  * ~~~~~~~~~~~~~~~~~~~~~~
362  *
363  * The following example shows a typical structure of a DRM display driver.
364  * The example focus on the probe() function and the other functions that is
365  * almost always present and serves as a demonstration of devm_drm_dev_init()
366  * usage with its accompanying drm_driver->release callback.
367  *
368  * .. code-block:: c
369  *
370  *	struct driver_device {
371  *		struct drm_device drm;
372  *		void *userspace_facing;
373  *		struct clk *pclk;
374  *	};
375  *
376  *	static void driver_drm_release(struct drm_device *drm)
377  *	{
378  *		struct driver_device *priv = container_of(...);
379  *
380  *		drm_mode_config_cleanup(drm);
381  *		drm_dev_fini(drm);
382  *		kfree(priv->userspace_facing);
383  *		kfree(priv);
384  *	}
385  *
386  *	static struct drm_driver driver_drm_driver = {
387  *		[...]
388  *		.release = driver_drm_release,
389  *	};
390  *
391  *	static int driver_probe(struct platform_device *pdev)
392  *	{
393  *		struct driver_device *priv;
394  *		struct drm_device *drm;
395  *		int ret;
396  *
397  *		// devm_kzalloc() can't be used here because the drm_device '
398  *		// lifetime can exceed the device lifetime if driver unbind
399  *		// happens when userspace still has open file descriptors.
400  *		priv = kzalloc(sizeof(*priv), GFP_KERNEL);
401  *		if (!priv)
402  *			return -ENOMEM;
403  *
404  *		drm = &priv->drm;
405  *
406  *		ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
407  *		if (ret) {
408  *			kfree(drm);
409  *			return ret;
410  *		}
411  *
412  *		drm_mode_config_init(drm);
413  *
414  *		priv->userspace_facing = kzalloc(..., GFP_KERNEL);
415  *		if (!priv->userspace_facing)
416  *			return -ENOMEM;
417  *
418  *		priv->pclk = devm_clk_get(dev, "PCLK");
419  *		if (IS_ERR(priv->pclk))
420  *			return PTR_ERR(priv->pclk);
421  *
422  *		// Further setup, display pipeline etc
423  *
424  *		platform_set_drvdata(pdev, drm);
425  *
426  *		drm_mode_config_reset(drm);
427  *
428  *		ret = drm_dev_register(drm);
429  *		if (ret)
430  *			return ret;
431  *
432  *		drm_fbdev_generic_setup(drm, 32);
433  *
434  *		return 0;
435  *	}
436  *
437  *	// This function is called before the devm_ resources are released
438  *	static int driver_remove(struct platform_device *pdev)
439  *	{
440  *		struct drm_device *drm = platform_get_drvdata(pdev);
441  *
442  *		drm_dev_unregister(drm);
443  *		drm_atomic_helper_shutdown(drm)
444  *
445  *		return 0;
446  *	}
447  *
448  *	// This function is called on kernel restart and shutdown
449  *	static void driver_shutdown(struct platform_device *pdev)
450  *	{
451  *		drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
452  *	}
453  *
454  *	static int __maybe_unused driver_pm_suspend(struct device *dev)
455  *	{
456  *		return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
457  *	}
458  *
459  *	static int __maybe_unused driver_pm_resume(struct device *dev)
460  *	{
461  *		drm_mode_config_helper_resume(dev_get_drvdata(dev));
462  *
463  *		return 0;
464  *	}
465  *
466  *	static const struct dev_pm_ops driver_pm_ops = {
467  *		SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
468  *	};
469  *
470  *	static struct platform_driver driver_driver = {
471  *		.driver = {
472  *			[...]
473  *			.pm = &driver_pm_ops,
474  *		},
475  *		.probe = driver_probe,
476  *		.remove = driver_remove,
477  *		.shutdown = driver_shutdown,
478  *	};
479  *	module_platform_driver(driver_driver);
480  *
481  * Drivers that want to support device unplugging (USB, DT overlay unload) should
482  * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
483  * regions that is accessing device resources to prevent use after they're
484  * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
485  * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
486  * drm_atomic_helper_shutdown() is called. This means that if the disable code
487  * paths are protected, they will not run on regular driver module unload,
488  * possibily leaving the hardware enabled.
489  */
490 
491 /**
492  * drm_put_dev - Unregister and release a DRM device
493  * @dev: DRM device
494  *
495  * Called at module unload time or when a PCI device is unplugged.
496  *
497  * Cleans up all DRM device, calling drm_lastclose().
498  *
499  * Note: Use of this function is deprecated. It will eventually go away
500  * completely.  Please use drm_dev_unregister() and drm_dev_put() explicitly
501  * instead to make sure that the device isn't userspace accessible any more
502  * while teardown is in progress, ensuring that userspace can't access an
503  * inconsistent state.
504  */
505 void drm_put_dev(struct drm_device *dev)
506 {
507 	DRM_DEBUG("\n");
508 
509 	if (!dev) {
510 		DRM_ERROR("cleanup called no dev\n");
511 		return;
512 	}
513 
514 	drm_dev_unregister(dev);
515 	drm_dev_put(dev);
516 }
517 EXPORT_SYMBOL(drm_put_dev);
518 
519 /**
520  * drm_dev_enter - Enter device critical section
521  * @dev: DRM device
522  * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
523  *
524  * This function marks and protects the beginning of a section that should not
525  * be entered after the device has been unplugged. The section end is marked
526  * with drm_dev_exit(). Calls to this function can be nested.
527  *
528  * Returns:
529  * True if it is OK to enter the section, false otherwise.
530  */
531 bool drm_dev_enter(struct drm_device *dev, int *idx)
532 {
533 #ifdef notyet
534 	*idx = srcu_read_lock(&drm_unplug_srcu);
535 
536 	if (dev->unplugged) {
537 		srcu_read_unlock(&drm_unplug_srcu, *idx);
538 		return false;
539 	}
540 #endif
541 
542 	return true;
543 }
544 EXPORT_SYMBOL(drm_dev_enter);
545 
546 /**
547  * drm_dev_exit - Exit device critical section
548  * @idx: index returned from drm_dev_enter()
549  *
550  * This function marks the end of a section that should not be entered after
551  * the device has been unplugged.
552  */
553 void drm_dev_exit(int idx)
554 {
555 #ifdef notyet
556 	srcu_read_unlock(&drm_unplug_srcu, idx);
557 #endif
558 }
559 EXPORT_SYMBOL(drm_dev_exit);
560 
561 /**
562  * drm_dev_unplug - unplug a DRM device
563  * @dev: DRM device
564  *
565  * This unplugs a hotpluggable DRM device, which makes it inaccessible to
566  * userspace operations. Entry-points can use drm_dev_enter() and
567  * drm_dev_exit() to protect device resources in a race free manner. This
568  * essentially unregisters the device like drm_dev_unregister(), but can be
569  * called while there are still open users of @dev.
570  */
571 void drm_dev_unplug(struct drm_device *dev)
572 {
573 	STUB();
574 #ifdef notyet
575 	/*
576 	 * After synchronizing any critical read section is guaranteed to see
577 	 * the new value of ->unplugged, and any critical section which might
578 	 * still have seen the old value of ->unplugged is guaranteed to have
579 	 * finished.
580 	 */
581 	dev->unplugged = true;
582 	synchronize_srcu(&drm_unplug_srcu);
583 
584 	drm_dev_unregister(dev);
585 #endif
586 }
587 EXPORT_SYMBOL(drm_dev_unplug);
588 
589 #ifdef __linux__
590 /*
591  * DRM internal mount
592  * We want to be able to allocate our own "struct address_space" to control
593  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
594  * stand-alone address_space objects, so we need an underlying inode. As there
595  * is no way to allocate an independent inode easily, we need a fake internal
596  * VFS mount-point.
597  *
598  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
599  * frees it again. You are allowed to use iget() and iput() to get references to
600  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
601  * drm_fs_inode_free() call (which does not have to be the last iput()).
602  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
603  * between multiple inode-users. You could, technically, call
604  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
605  * iput(), but this way you'd end up with a new vfsmount for each inode.
606  */
607 
608 static int drm_fs_cnt;
609 static struct vfsmount *drm_fs_mnt;
610 
611 static int drm_fs_init_fs_context(struct fs_context *fc)
612 {
613 	return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM;
614 }
615 
616 static struct file_system_type drm_fs_type = {
617 	.name		= "drm",
618 	.owner		= THIS_MODULE,
619 	.init_fs_context = drm_fs_init_fs_context,
620 	.kill_sb	= kill_anon_super,
621 };
622 
623 static struct inode *drm_fs_inode_new(void)
624 {
625 	struct inode *inode;
626 	int r;
627 
628 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
629 	if (r < 0) {
630 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
631 		return ERR_PTR(r);
632 	}
633 
634 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
635 	if (IS_ERR(inode))
636 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
637 
638 	return inode;
639 }
640 
641 static void drm_fs_inode_free(struct inode *inode)
642 {
643 	if (inode) {
644 		iput(inode);
645 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
646 	}
647 }
648 
649 #endif /* __linux__ */
650 
651 /**
652  * DOC: component helper usage recommendations
653  *
654  * DRM drivers that drive hardware where a logical device consists of a pile of
655  * independent hardware blocks are recommended to use the :ref:`component helper
656  * library<component>`. For consistency and better options for code reuse the
657  * following guidelines apply:
658  *
659  *  - The entire device initialization procedure should be run from the
660  *    &component_master_ops.master_bind callback, starting with drm_dev_init(),
661  *    then binding all components with component_bind_all() and finishing with
662  *    drm_dev_register().
663  *
664  *  - The opaque pointer passed to all components through component_bind_all()
665  *    should point at &struct drm_device of the device instance, not some driver
666  *    specific private structure.
667  *
668  *  - The component helper fills the niche where further standardization of
669  *    interfaces is not practical. When there already is, or will be, a
670  *    standardized interface like &drm_bridge or &drm_panel, providing its own
671  *    functions to find such components at driver load time, like
672  *    drm_of_find_panel_or_bridge(), then the component helper should not be
673  *    used.
674  */
675 
676 /**
677  * drm_dev_init - Initialise new DRM device
678  * @dev: DRM device
679  * @driver: DRM driver
680  * @parent: Parent device object
681  *
682  * Initialize a new DRM device. No device registration is done.
683  * Call drm_dev_register() to advertice the device to user space and register it
684  * with other core subsystems. This should be done last in the device
685  * initialization sequence to make sure userspace can't access an inconsistent
686  * state.
687  *
688  * The initial ref-count of the object is 1. Use drm_dev_get() and
689  * drm_dev_put() to take and drop further ref-counts.
690  *
691  * It is recommended that drivers embed &struct drm_device into their own device
692  * structure.
693  *
694  * Drivers that do not want to allocate their own device struct
695  * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
696  * that do embed &struct drm_device it must be placed first in the overall
697  * structure, and the overall structure must be allocated using kmalloc(): The
698  * drm core's release function unconditionally calls kfree() on the @dev pointer
699  * when the final reference is released. To override this behaviour, and so
700  * allow embedding of the drm_device inside the driver's device struct at an
701  * arbitrary offset, you must supply a &drm_driver.release callback and control
702  * the finalization explicitly.
703  *
704  * RETURNS:
705  * 0 on success, or error code on failure.
706  */
707 int drm_dev_init(struct drm_device *dev,
708 		 struct drm_driver *driver,
709 		 struct device *parent)
710 {
711 	int ret;
712 
713 	if (!drm_core_init_complete) {
714 		DRM_ERROR("DRM core is not initialized\n");
715 		return -ENODEV;
716 	}
717 
718 	if (WARN_ON(!parent))
719 		return -EINVAL;
720 
721 	kref_init(&dev->ref);
722 #ifdef __linux__
723 	dev->dev = get_device(parent);
724 #endif
725 	dev->driver = driver;
726 
727 	/* no per-device feature limits by default */
728 	dev->driver_features = ~0u;
729 
730 	drm_legacy_init_members(dev);
731 #ifdef notyet
732 	INIT_LIST_HEAD(&dev->filelist);
733 #else
734 	SPLAY_INIT(&dev->files);
735 #endif
736 	INIT_LIST_HEAD(&dev->filelist_internal);
737 	INIT_LIST_HEAD(&dev->clientlist);
738 	INIT_LIST_HEAD(&dev->vblank_event_list);
739 
740 	mtx_init(&dev->event_lock, IPL_TTY);
741 	mtx_init(&dev->event_lock, IPL_TTY);
742 	rw_init(&dev->struct_mutex, "drmdevlk");
743 	rw_init(&dev->filelist_mutex, "drmflist");
744 	rw_init(&dev->clientlist_mutex, "drmclist");
745 	rw_init(&dev->master_mutex, "drmmast");
746 
747 #ifdef __linux__
748 	dev->anon_inode = drm_fs_inode_new();
749 	if (IS_ERR(dev->anon_inode)) {
750 		ret = PTR_ERR(dev->anon_inode);
751 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
752 		goto err_free;
753 	}
754 #endif
755 
756 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
757 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
758 		if (ret)
759 			goto err_minors;
760 	}
761 
762 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
763 	if (ret)
764 		goto err_minors;
765 
766 	ret = drm_legacy_create_map_hash(dev);
767 	if (ret)
768 		goto err_minors;
769 
770 	drm_legacy_ctxbitmap_init(dev);
771 
772 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
773 		ret = drm_gem_init(dev);
774 		if (ret) {
775 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
776 			goto err_ctxbitmap;
777 		}
778 	}
779 
780 	ret = drm_dev_set_unique(dev, dev_name(parent));
781 	if (ret)
782 		goto err_setunique;
783 
784 	return 0;
785 
786 err_setunique:
787 	if (drm_core_check_feature(dev, DRIVER_GEM))
788 		drm_gem_destroy(dev);
789 err_ctxbitmap:
790 	drm_legacy_ctxbitmap_cleanup(dev);
791 	drm_legacy_remove_map_hash(dev);
792 err_minors:
793 	drm_minor_free(dev, DRM_MINOR_PRIMARY);
794 	drm_minor_free(dev, DRM_MINOR_RENDER);
795 #ifdef __linux__
796 	drm_fs_inode_free(dev->anon_inode);
797 err_free:
798 	put_device(dev->dev);
799 #endif
800 	mutex_destroy(&dev->master_mutex);
801 	mutex_destroy(&dev->clientlist_mutex);
802 	mutex_destroy(&dev->filelist_mutex);
803 	mutex_destroy(&dev->struct_mutex);
804 	drm_legacy_destroy_members(dev);
805 	return ret;
806 }
807 EXPORT_SYMBOL(drm_dev_init);
808 
809 #ifdef notyet
810 static void devm_drm_dev_init_release(void *data)
811 {
812 	drm_dev_put(data);
813 }
814 #endif
815 
816 /**
817  * devm_drm_dev_init - Resource managed drm_dev_init()
818  * @parent: Parent device object
819  * @dev: DRM device
820  * @driver: DRM driver
821  *
822  * Managed drm_dev_init(). The DRM device initialized with this function is
823  * automatically put on driver detach using drm_dev_put(). You must supply a
824  * &drm_driver.release callback to control the finalization explicitly.
825  *
826  * RETURNS:
827  * 0 on success, or error code on failure.
828  */
829 int devm_drm_dev_init(struct device *parent,
830 		      struct drm_device *dev,
831 		      struct drm_driver *driver)
832 {
833 	STUB();
834 	return -ENOSYS;
835 #ifdef notyet
836 	int ret;
837 
838 	if (WARN_ON(!driver->release))
839 		return -EINVAL;
840 
841 	ret = drm_dev_init(dev, driver, parent);
842 	if (ret)
843 		return ret;
844 
845 	ret = devm_add_action(parent, devm_drm_dev_init_release, dev);
846 	if (ret)
847 		devm_drm_dev_init_release(dev);
848 
849 	return ret;
850 #endif
851 }
852 EXPORT_SYMBOL(devm_drm_dev_init);
853 
854 /**
855  * drm_dev_fini - Finalize a dead DRM device
856  * @dev: DRM device
857  *
858  * Finalize a dead DRM device. This is the converse to drm_dev_init() and
859  * frees up all data allocated by it. All driver private data should be
860  * finalized first. Note that this function does not free the @dev, that is
861  * left to the caller.
862  *
863  * The ref-count of @dev must be zero, and drm_dev_fini() should only be called
864  * from a &drm_driver.release callback.
865  */
866 void drm_dev_fini(struct drm_device *dev)
867 {
868 	drm_vblank_cleanup(dev);
869 
870 	if (drm_core_check_feature(dev, DRIVER_GEM))
871 		drm_gem_destroy(dev);
872 
873 	drm_legacy_ctxbitmap_cleanup(dev);
874 	drm_legacy_remove_map_hash(dev);
875 #ifdef __linux__
876 	drm_fs_inode_free(dev->anon_inode);
877 #endif
878 
879 	drm_minor_free(dev, DRM_MINOR_PRIMARY);
880 	drm_minor_free(dev, DRM_MINOR_RENDER);
881 
882 #ifdef __linux__
883 	put_device(dev->dev);
884 #endif
885 
886 	mutex_destroy(&dev->master_mutex);
887 	mutex_destroy(&dev->clientlist_mutex);
888 	mutex_destroy(&dev->filelist_mutex);
889 	mutex_destroy(&dev->struct_mutex);
890 	drm_legacy_destroy_members(dev);
891 	kfree(dev->unique);
892 }
893 EXPORT_SYMBOL(drm_dev_fini);
894 
895 /**
896  * drm_dev_alloc - Allocate new DRM device
897  * @driver: DRM driver to allocate device for
898  * @parent: Parent device object
899  *
900  * Allocate and initialize a new DRM device. No device registration is done.
901  * Call drm_dev_register() to advertice the device to user space and register it
902  * with other core subsystems. This should be done last in the device
903  * initialization sequence to make sure userspace can't access an inconsistent
904  * state.
905  *
906  * The initial ref-count of the object is 1. Use drm_dev_get() and
907  * drm_dev_put() to take and drop further ref-counts.
908  *
909  * Note that for purely virtual devices @parent can be NULL.
910  *
911  * Drivers that wish to subclass or embed &struct drm_device into their
912  * own struct should look at using drm_dev_init() instead.
913  *
914  * RETURNS:
915  * Pointer to new DRM device, or ERR_PTR on failure.
916  */
917 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
918 				 struct device *parent)
919 {
920 	struct drm_device *dev;
921 	int ret;
922 
923 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
924 	if (!dev)
925 		return ERR_PTR(-ENOMEM);
926 
927 	ret = drm_dev_init(dev, driver, parent);
928 	if (ret) {
929 		kfree(dev);
930 		return ERR_PTR(ret);
931 	}
932 
933 	return dev;
934 }
935 EXPORT_SYMBOL(drm_dev_alloc);
936 
937 static void drm_dev_release(struct kref *ref)
938 {
939 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
940 
941 	if (dev->driver->release) {
942 		dev->driver->release(dev);
943 	} else {
944 		drm_dev_fini(dev);
945 		kfree(dev);
946 	}
947 }
948 
949 /**
950  * drm_dev_get - Take reference of a DRM device
951  * @dev: device to take reference of or NULL
952  *
953  * This increases the ref-count of @dev by one. You *must* already own a
954  * reference when calling this. Use drm_dev_put() to drop this reference
955  * again.
956  *
957  * This function never fails. However, this function does not provide *any*
958  * guarantee whether the device is alive or running. It only provides a
959  * reference to the object and the memory associated with it.
960  */
961 void drm_dev_get(struct drm_device *dev)
962 {
963 	if (dev)
964 		kref_get(&dev->ref);
965 }
966 EXPORT_SYMBOL(drm_dev_get);
967 
968 /**
969  * drm_dev_put - Drop reference of a DRM device
970  * @dev: device to drop reference of or NULL
971  *
972  * This decreases the ref-count of @dev by one. The device is destroyed if the
973  * ref-count drops to zero.
974  */
975 void drm_dev_put(struct drm_device *dev)
976 {
977 	if (dev)
978 		kref_put(&dev->ref, drm_dev_release);
979 }
980 EXPORT_SYMBOL(drm_dev_put);
981 
982 static int create_compat_control_link(struct drm_device *dev)
983 {
984 	struct drm_minor *minor;
985 	char *name;
986 	int ret;
987 
988 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
989 		return 0;
990 
991 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
992 	if (!minor)
993 		return 0;
994 
995 	/*
996 	 * Some existing userspace out there uses the existing of the controlD*
997 	 * sysfs files to figure out whether it's a modeset driver. It only does
998 	 * readdir, hence a symlink is sufficient (and the least confusing
999 	 * option). Otherwise controlD* is entirely unused.
1000 	 *
1001 	 * Old controlD chardev have been allocated in the range
1002 	 * 64-127.
1003 	 */
1004 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
1005 	if (!name)
1006 		return -ENOMEM;
1007 
1008 	ret = sysfs_create_link(minor->kdev->kobj.parent,
1009 				&minor->kdev->kobj,
1010 				name);
1011 
1012 	kfree(name);
1013 
1014 	return ret;
1015 }
1016 
1017 static void remove_compat_control_link(struct drm_device *dev)
1018 {
1019 	struct drm_minor *minor;
1020 	char *name;
1021 
1022 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
1023 		return;
1024 
1025 	minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
1026 	if (!minor)
1027 		return;
1028 
1029 	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
1030 	if (!name)
1031 		return;
1032 
1033 	sysfs_remove_link(minor->kdev->kobj.parent, name);
1034 
1035 	kfree(name);
1036 }
1037 
1038 /**
1039  * drm_dev_register - Register DRM device
1040  * @dev: Device to register
1041  * @flags: Flags passed to the driver's .load() function
1042  *
1043  * Register the DRM device @dev with the system, advertise device to user-space
1044  * and start normal device operation. @dev must be initialized via drm_dev_init()
1045  * previously.
1046  *
1047  * Never call this twice on any device!
1048  *
1049  * NOTE: To ensure backward compatibility with existing drivers method this
1050  * function calls the &drm_driver.load method after registering the device
1051  * nodes, creating race conditions. Usage of the &drm_driver.load methods is
1052  * therefore deprecated, drivers must perform all initialization before calling
1053  * drm_dev_register().
1054  *
1055  * RETURNS:
1056  * 0 on success, negative error code on failure.
1057  */
1058 int drm_dev_register(struct drm_device *dev, unsigned long flags)
1059 {
1060 	struct drm_driver *driver = dev->driver;
1061 	int ret;
1062 
1063 	if (drm_dev_needs_global_mutex(dev))
1064 		mutex_lock(&drm_global_mutex);
1065 
1066 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
1067 	if (ret)
1068 		goto err_minors;
1069 
1070 	ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
1071 	if (ret)
1072 		goto err_minors;
1073 
1074 	ret = create_compat_control_link(dev);
1075 	if (ret)
1076 		goto err_minors;
1077 
1078 	dev->registered = true;
1079 
1080 	if (dev->driver->load) {
1081 		ret = dev->driver->load(dev, flags);
1082 		if (ret)
1083 			goto err_minors;
1084 	}
1085 
1086 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1087 		drm_modeset_register_all(dev);
1088 
1089 	ret = 0;
1090 
1091 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
1092 		 driver->name, driver->major, driver->minor,
1093 		 driver->patchlevel, driver->date,
1094 		 dev->dev ? dev_name(dev->dev) : "virtual device",
1095 		 dev->primary->index);
1096 
1097 	goto out_unlock;
1098 
1099 err_minors:
1100 	remove_compat_control_link(dev);
1101 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1102 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
1103 out_unlock:
1104 	if (drm_dev_needs_global_mutex(dev))
1105 		mutex_unlock(&drm_global_mutex);
1106 	return ret;
1107 }
1108 EXPORT_SYMBOL(drm_dev_register);
1109 
1110 /**
1111  * drm_dev_unregister - Unregister DRM device
1112  * @dev: Device to unregister
1113  *
1114  * Unregister the DRM device from the system. This does the reverse of
1115  * drm_dev_register() but does not deallocate the device. The caller must call
1116  * drm_dev_put() to drop their final reference.
1117  *
1118  * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
1119  * which can be called while there are still open users of @dev.
1120  *
1121  * This should be called first in the device teardown code to make sure
1122  * userspace can't access the device instance any more.
1123  */
1124 void drm_dev_unregister(struct drm_device *dev)
1125 {
1126 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
1127 		drm_lastclose(dev);
1128 
1129 	dev->registered = false;
1130 
1131 	drm_client_dev_unregister(dev);
1132 
1133 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1134 		drm_modeset_unregister_all(dev);
1135 
1136 	if (dev->driver->unload)
1137 		dev->driver->unload(dev);
1138 
1139 #if IS_ENABLED(CONFIG_AGP)
1140 	if (dev->agp)
1141 		drm_agp_takedown(dev);
1142 #endif
1143 
1144 	drm_legacy_rmmaps(dev);
1145 
1146 	remove_compat_control_link(dev);
1147 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1148 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
1149 }
1150 EXPORT_SYMBOL(drm_dev_unregister);
1151 
1152 /**
1153  * drm_dev_set_unique - Set the unique name of a DRM device
1154  * @dev: device of which to set the unique name
1155  * @name: unique name
1156  *
1157  * Sets the unique name of a DRM device using the specified string. This is
1158  * already done by drm_dev_init(), drivers should only override the default
1159  * unique name for backwards compatibility reasons.
1160  *
1161  * Return: 0 on success or a negative error code on failure.
1162  */
1163 int drm_dev_set_unique(struct drm_device *dev, const char *name)
1164 {
1165 	kfree(dev->unique);
1166 	dev->unique = kstrdup(name, GFP_KERNEL);
1167 
1168 	return dev->unique ? 0 : -ENOMEM;
1169 }
1170 EXPORT_SYMBOL(drm_dev_set_unique);
1171 
1172 /*
1173  * DRM Core
1174  * The DRM core module initializes all global DRM objects and makes them
1175  * available to drivers. Once setup, drivers can probe their respective
1176  * devices.
1177  * Currently, core management includes:
1178  *  - The "DRM-Global" key/value database
1179  *  - Global ID management for connectors
1180  *  - DRM major number allocation
1181  *  - DRM minor management
1182  *  - DRM sysfs class
1183  *  - DRM debugfs root
1184  *
1185  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
1186  * interface registered on a DRM device, you can request minor numbers from DRM
1187  * core. DRM core takes care of major-number management and char-dev
1188  * registration. A stub ->open() callback forwards any open() requests to the
1189  * registered minor.
1190  */
1191 
1192 #ifdef __linux__
1193 static int drm_stub_open(struct inode *inode, struct file *filp)
1194 {
1195 	const struct file_operations *new_fops;
1196 	struct drm_minor *minor;
1197 	int err;
1198 
1199 	DRM_DEBUG("\n");
1200 
1201 	minor = drm_minor_acquire(iminor(inode));
1202 	if (IS_ERR(minor))
1203 		return PTR_ERR(minor);
1204 
1205 	new_fops = fops_get(minor->dev->driver->fops);
1206 	if (!new_fops) {
1207 		err = -ENODEV;
1208 		goto out;
1209 	}
1210 
1211 	replace_fops(filp, new_fops);
1212 	if (filp->f_op->open)
1213 		err = filp->f_op->open(inode, filp);
1214 	else
1215 		err = 0;
1216 
1217 out:
1218 	drm_minor_release(minor);
1219 
1220 	return err;
1221 }
1222 
1223 static const struct file_operations drm_stub_fops = {
1224 	.owner = THIS_MODULE,
1225 	.open = drm_stub_open,
1226 	.llseek = noop_llseek,
1227 };
1228 #endif /* __linux__ */
1229 
1230 static void drm_core_exit(void)
1231 {
1232 #ifdef __linux__
1233 	unregister_chrdev(DRM_MAJOR, "drm");
1234 	debugfs_remove(drm_debugfs_root);
1235 	drm_sysfs_destroy();
1236 #endif
1237 	idr_destroy(&drm_minors_idr);
1238 	drm_connector_ida_destroy();
1239 }
1240 
1241 static int __init drm_core_init(void)
1242 {
1243 #ifdef __linux__
1244 	int ret;
1245 #endif
1246 
1247 	drm_connector_ida_init();
1248 	idr_init(&drm_minors_idr);
1249 
1250 #ifdef __linux__
1251 	ret = drm_sysfs_init();
1252 	if (ret < 0) {
1253 		DRM_ERROR("Cannot create DRM class: %d\n", ret);
1254 		goto error;
1255 	}
1256 
1257 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
1258 
1259 	ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
1260 	if (ret < 0)
1261 		goto error;
1262 #endif
1263 
1264 	drm_core_init_complete = true;
1265 
1266 	DRM_DEBUG("Initialized\n");
1267 	return 0;
1268 #ifdef __linux__
1269 error:
1270 	drm_core_exit();
1271 	return ret;
1272 #endif
1273 }
1274 
1275 #ifdef __linux__
1276 module_init(drm_core_init);
1277 module_exit(drm_core_exit);
1278 #endif
1279 
1280 void
1281 drm_attach_platform(struct drm_driver *driver, bus_space_tag_t iot,
1282     bus_dma_tag_t dmat, struct device *dev, struct drm_device *drm)
1283 {
1284 	struct drm_attach_args arg;
1285 
1286 	memset(&arg, 0, sizeof(arg));
1287 	arg.driver = driver;
1288 	arg.bst = iot;
1289 	arg.dmat = dmat;
1290 	arg.drm = drm;
1291 
1292 	arg.busid = dev->dv_xname;
1293 	arg.busid_len = strlen(dev->dv_xname) + 1;
1294 	config_found_sm(dev, &arg, drmprint, drmsubmatch);
1295 }
1296 
1297 struct drm_device *
1298 drm_attach_pci(struct drm_driver *driver, struct pci_attach_args *pa,
1299     int is_agp, int primary, struct device *dev, struct drm_device *drm)
1300 {
1301 	struct drm_attach_args arg;
1302 	struct drm_softc *sc;
1303 
1304 	arg.drm = drm;
1305 	arg.driver = driver;
1306 	arg.dmat = pa->pa_dmat;
1307 	arg.bst = pa->pa_memt;
1308 	arg.is_agp = is_agp;
1309 	arg.primary = primary;
1310 	arg.pa = pa;
1311 
1312 	arg.busid_len = 20;
1313 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
1314 	if (arg.busid == NULL) {
1315 		printf("%s: no memory for drm\n", dev->dv_xname);
1316 		return (NULL);
1317 	}
1318 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
1319 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
1320 
1321 	sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch);
1322 	if (sc == NULL)
1323 		return NULL;
1324 
1325 	return sc->sc_drm;
1326 }
1327 
1328 int
1329 drmprint(void *aux, const char *pnp)
1330 {
1331 	if (pnp != NULL)
1332 		printf("drm at %s", pnp);
1333 	return (UNCONF);
1334 }
1335 
1336 int
1337 drmsubmatch(struct device *parent, void *match, void *aux)
1338 {
1339 	extern struct cfdriver drm_cd;
1340 	struct cfdata *cf = match;
1341 
1342 	/* only allow drm to attach */
1343 	if (cf->cf_driver == &drm_cd)
1344 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
1345 	return (0);
1346 }
1347 
1348 int
1349 drm_pciprobe(struct pci_attach_args *pa, const struct pci_device_id *idlist)
1350 {
1351 	const struct pci_device_id *id_entry;
1352 
1353 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
1354 	    PCI_PRODUCT(pa->pa_id), idlist);
1355 	if (id_entry != NULL)
1356 		return 1;
1357 
1358 	return 0;
1359 }
1360 
1361 int
1362 drm_probe(struct device *parent, void *match, void *aux)
1363 {
1364 	struct cfdata *cf = match;
1365 	struct drm_attach_args *da = aux;
1366 
1367 	if (cf->drmdevcf_primary != DRMDEVCF_PRIMARY_UNK) {
1368 		/*
1369 		 * If primary-ness of device specified, either match
1370 		 * exactly (at high priority), or fail.
1371 		 */
1372 		if (cf->drmdevcf_primary != 0 && da->primary != 0)
1373 			return (10);
1374 		else
1375 			return (0);
1376 	}
1377 
1378 	/* If primary-ness unspecified, it wins. */
1379 	return (1);
1380 }
1381 
1382 void
1383 drm_attach(struct device *parent, struct device *self, void *aux)
1384 {
1385 	struct drm_softc *sc = (struct drm_softc *)self;
1386 	struct drm_attach_args *da = aux;
1387 	struct drm_device *dev = da->drm;
1388 	int ret;
1389 
1390 	if (drm_refcnt == 0) {
1391 		drm_linux_init();
1392 		drm_core_init();
1393 	}
1394 	drm_refcnt++;
1395 
1396 	if (dev == NULL) {
1397 		dev = malloc(sizeof(struct drm_device), M_DRM,
1398 		    M_WAITOK | M_ZERO);
1399 		sc->sc_allocated = 1;
1400 	}
1401 
1402 	sc->sc_drm = dev;
1403 
1404 	dev->dev = self;
1405 	dev->dev_private = parent;
1406 	dev->driver = da->driver;
1407 
1408 	/* no per-device feature limits by default */
1409 	dev->driver_features = ~0u;
1410 
1411 	dev->dmat = da->dmat;
1412 	dev->bst = da->bst;
1413 	dev->unique = da->busid;
1414 
1415 	if (da->pa) {
1416 		struct pci_attach_args *pa = da->pa;
1417 		pcireg_t subsys;
1418 
1419 		subsys = pci_conf_read(pa->pa_pc, pa->pa_tag,
1420 		    PCI_SUBSYS_ID_REG);
1421 
1422 		dev->pdev = &dev->_pdev;
1423 		dev->pdev->vendor = PCI_VENDOR(pa->pa_id);
1424 		dev->pdev->device = PCI_PRODUCT(pa->pa_id);
1425 		dev->pdev->subsystem_vendor = PCI_VENDOR(subsys);
1426 		dev->pdev->subsystem_device = PCI_PRODUCT(subsys);
1427 		dev->pdev->revision = PCI_REVISION(pa->pa_class);
1428 
1429 		dev->pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
1430 		dev->pdev->bus = &dev->pdev->_bus;
1431 		dev->pdev->bus->pc = pa->pa_pc;
1432 		dev->pdev->bus->number = pa->pa_bus;
1433 		dev->pdev->bus->domain_nr = pa->pa_domain;
1434 		dev->pdev->bus->bridgetag = pa->pa_bridgetag;
1435 
1436 		if (pa->pa_bridgetag != NULL) {
1437 			dev->pdev->bus->self = malloc(sizeof(struct pci_dev),
1438 			    M_DRM, M_WAITOK | M_ZERO);
1439 			dev->pdev->bus->self->pc = pa->pa_pc;
1440 			dev->pdev->bus->self->tag = *pa->pa_bridgetag;
1441 		}
1442 
1443 		dev->pdev->pc = pa->pa_pc;
1444 		dev->pdev->tag = pa->pa_tag;
1445 		dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
1446 
1447 #ifdef CONFIG_ACPI
1448 		dev->pdev->dev.node = acpi_find_pci(pa->pa_pc, pa->pa_tag);
1449 		aml_register_notify(dev->pdev->dev.node, NULL,
1450 		    drm_linux_acpi_notify, NULL, ACPIDEV_NOPOLL);
1451 #endif
1452 	}
1453 
1454 	mtx_init(&dev->quiesce_mtx, IPL_NONE);
1455 	mtx_init(&dev->event_lock, IPL_TTY);
1456 	rw_init(&dev->struct_mutex, "drmdevlk");
1457 	rw_init(&dev->filelist_mutex, "drmflist");
1458 	rw_init(&dev->clientlist_mutex, "drmclist");
1459 	rw_init(&dev->master_mutex, "drmmast");
1460 
1461 	SPLAY_INIT(&dev->files);
1462 	INIT_LIST_HEAD(&dev->filelist_internal);
1463 	INIT_LIST_HEAD(&dev->clientlist);
1464 	INIT_LIST_HEAD(&dev->vblank_event_list);
1465 
1466 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
1467 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
1468 		if (ret)
1469 			goto error;
1470 	}
1471 
1472 	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
1473 	if (ret)
1474 		goto error;
1475 
1476 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1477 #if IS_ENABLED(CONFIG_AGP)
1478 		if (da->is_agp)
1479 			dev->agp = drm_agp_init();
1480 #endif
1481 		if (dev->agp != NULL) {
1482 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
1483 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
1484 				dev->agp->mtrr = 1;
1485 		}
1486 	}
1487 
1488 	if (dev->driver->gem_size > 0) {
1489 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
1490 		/* XXX unique name */
1491 		pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE, 0,
1492 		    "drmobjpl", NULL);
1493 	}
1494 
1495 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
1496 		ret = drm_gem_init(dev);
1497 		if (ret) {
1498 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
1499 			goto error;
1500 		}
1501 	}
1502 
1503 	printf("\n");
1504 	return;
1505 
1506 error:
1507 	drm_lastclose(dev);
1508 	dev->dev_private = NULL;
1509 }
1510 
1511 int
1512 drm_detach(struct device *self, int flags)
1513 {
1514 	struct drm_softc *sc = (struct drm_softc *)self;
1515 	struct drm_device *dev = sc->sc_drm;
1516 
1517 	drm_refcnt--;
1518 	if (drm_refcnt == 0) {
1519 		drm_core_exit();
1520 		drm_linux_exit();
1521 	}
1522 
1523 	drm_lastclose(dev);
1524 
1525 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
1526 		drm_gem_destroy(dev);
1527 
1528 		if (dev->driver->gem_size > 0)
1529 			pool_destroy(&dev->objpl);
1530 	}
1531 
1532 	drm_vblank_cleanup(dev);
1533 
1534 	if (dev->agp && dev->agp->mtrr) {
1535 		int retcode;
1536 
1537 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
1538 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
1539 		DRM_DEBUG("mtrr_del = %d", retcode);
1540 	}
1541 
1542 	free(dev->agp, M_DRM, 0);
1543 	if (dev->pdev && dev->pdev->bus)
1544 		free(dev->pdev->bus->self, M_DRM, sizeof(struct pci_dev));
1545 
1546 	if (sc->sc_allocated)
1547 		free(dev, M_DRM, sizeof(struct drm_device));
1548 
1549 	return 0;
1550 }
1551 
1552 void
1553 drm_quiesce(struct drm_device *dev)
1554 {
1555 	mtx_enter(&dev->quiesce_mtx);
1556 	dev->quiesce = 1;
1557 	while (dev->quiesce_count > 0) {
1558 		msleep_nsec(&dev->quiesce_count, &dev->quiesce_mtx,
1559 		    PZERO, "drmqui", INFSLP);
1560 	}
1561 	mtx_leave(&dev->quiesce_mtx);
1562 }
1563 
1564 void
1565 drm_wakeup(struct drm_device *dev)
1566 {
1567 	mtx_enter(&dev->quiesce_mtx);
1568 	dev->quiesce = 0;
1569 	wakeup(&dev->quiesce);
1570 	mtx_leave(&dev->quiesce_mtx);
1571 }
1572 
1573 int
1574 drm_activate(struct device *self, int act)
1575 {
1576 	struct drm_softc *sc = (struct drm_softc *)self;
1577 	struct drm_device *dev = sc->sc_drm;
1578 
1579 	switch (act) {
1580 	case DVACT_QUIESCE:
1581 		drm_quiesce(dev);
1582 		break;
1583 	case DVACT_WAKEUP:
1584 		drm_wakeup(dev);
1585 		break;
1586 	}
1587 
1588 	return (0);
1589 }
1590 
1591 struct cfattach drm_ca = {
1592 	sizeof(struct drm_softc), drm_probe, drm_attach,
1593 	drm_detach, drm_activate
1594 };
1595 
1596 struct cfdriver drm_cd = {
1597 	0, "drm", DV_DULL
1598 };
1599 
1600 const struct pci_device_id *
1601 drm_find_description(int vendor, int device, const struct pci_device_id *idlist)
1602 {
1603 	int i = 0;
1604 
1605 	for (i = 0; idlist[i].vendor != 0; i++) {
1606 		if ((idlist[i].vendor == vendor) &&
1607 		    (idlist[i].device == device) &&
1608 		    (idlist[i].subvendor == PCI_ANY_ID) &&
1609 		    (idlist[i].subdevice == PCI_ANY_ID))
1610 			return &idlist[i];
1611 	}
1612 	return NULL;
1613 }
1614 
1615 int
1616 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
1617 {
1618 	return (f1->fminor < f2->fminor ? -1 : f1->fminor > f2->fminor);
1619 }
1620 
1621 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
1622 
1623 struct drm_file *
1624 drm_find_file_by_minor(struct drm_device *dev, int minor)
1625 {
1626 	struct drm_file	key;
1627 
1628 	key.fminor = minor;
1629 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
1630 }
1631 
1632 struct drm_device *
1633 drm_get_device_from_kdev(dev_t kdev)
1634 {
1635 	int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
1636 	/* control */
1637 	if (unit >= 64 && unit < 128)
1638 		unit -= 64;
1639 	/* render */
1640 	if (unit >= 128)
1641 		unit -= 128;
1642 	struct drm_softc *sc;
1643 
1644 	if (unit < drm_cd.cd_ndevs) {
1645 		sc = (struct drm_softc *)drm_cd.cd_devs[unit];
1646 		if (sc)
1647 			return sc->sc_drm;
1648 	}
1649 
1650 	return NULL;
1651 }
1652 
1653 void
1654 filt_drmdetach(struct knote *kn)
1655 {
1656 	struct drm_device *dev = kn->kn_hook;
1657 	int s;
1658 
1659 	s = spltty();
1660 	klist_remove_locked(&dev->note, kn);
1661 	splx(s);
1662 }
1663 
1664 int
1665 filt_drmkms(struct knote *kn, long hint)
1666 {
1667 	if (kn->kn_sfflags & hint)
1668 		kn->kn_fflags |= hint;
1669 	return (kn->kn_fflags != 0);
1670 }
1671 
1672 void
1673 filt_drmreaddetach(struct knote *kn)
1674 {
1675 	struct drm_file		*file_priv = kn->kn_hook;
1676 	int s;
1677 
1678 	s = spltty();
1679 	klist_remove_locked(&file_priv->rsel.si_note, kn);
1680 	splx(s);
1681 }
1682 
1683 int
1684 filt_drmread(struct knote *kn, long hint)
1685 {
1686 	struct drm_file		*file_priv = kn->kn_hook;
1687 	int			 val = 0;
1688 
1689 	if ((hint & NOTE_SUBMIT) == 0)
1690 		mtx_enter(&file_priv->minor->dev->event_lock);
1691 	val = !list_empty(&file_priv->event_list);
1692 	if ((hint & NOTE_SUBMIT) == 0)
1693 		mtx_leave(&file_priv->minor->dev->event_lock);
1694 	return (val);
1695 }
1696 
1697 const struct filterops drm_filtops = {
1698 	.f_flags	= FILTEROP_ISFD,
1699 	.f_attach	= NULL,
1700 	.f_detach	= filt_drmdetach,
1701 	.f_event	= filt_drmkms,
1702 };
1703 
1704 const struct filterops drmread_filtops = {
1705 	.f_flags	= FILTEROP_ISFD,
1706 	.f_attach	= NULL,
1707 	.f_detach	= filt_drmreaddetach,
1708 	.f_event	= filt_drmread,
1709 };
1710 
1711 int
1712 drmkqfilter(dev_t kdev, struct knote *kn)
1713 {
1714 	struct drm_device	*dev = NULL;
1715 	struct drm_file		*file_priv = NULL;
1716 	int			 s;
1717 
1718 	dev = drm_get_device_from_kdev(kdev);
1719 	if (dev == NULL || dev->dev_private == NULL)
1720 		return (ENXIO);
1721 
1722 	switch (kn->kn_filter) {
1723 	case EVFILT_READ:
1724 		mutex_lock(&dev->struct_mutex);
1725 		file_priv = drm_find_file_by_minor(dev, minor(kdev));
1726 		mutex_unlock(&dev->struct_mutex);
1727 		if (file_priv == NULL)
1728 			return (ENXIO);
1729 
1730 		kn->kn_fop = &drmread_filtops;
1731 		kn->kn_hook = file_priv;
1732 
1733 		s = spltty();
1734 		klist_insert_locked(&file_priv->rsel.si_note, kn);
1735 		splx(s);
1736 		break;
1737 	case EVFILT_DEVICE:
1738 		kn->kn_fop = &drm_filtops;
1739 		kn->kn_hook = dev;
1740 
1741 		s = spltty();
1742 		klist_insert_locked(&dev->note, kn);
1743 		splx(s);
1744 		break;
1745 	default:
1746 		return (EINVAL);
1747 	}
1748 
1749 	return (0);
1750 }
1751 
1752 int
1753 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
1754 {
1755 	struct drm_device	*dev = NULL;
1756 	struct drm_file		*file_priv;
1757 	struct drm_minor	*dm;
1758 	int			 ret = 0;
1759 	int			 dminor, realminor, minor_type;
1760 	int need_setup = 0;
1761 
1762 	dev = drm_get_device_from_kdev(kdev);
1763 	if (dev == NULL || dev->dev_private == NULL)
1764 		return (ENXIO);
1765 
1766 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1767 
1768 	if (flags & O_EXCL)
1769 		return (EBUSY); /* No exclusive opens */
1770 
1771 	if (drm_dev_needs_global_mutex(dev))
1772 		mutex_lock(&drm_global_mutex);
1773 
1774 	if (!atomic_fetch_inc(&dev->open_count))
1775 		need_setup = 1;
1776 
1777 	dminor = minor(kdev);
1778 	realminor =  dminor & ((1 << CLONE_SHIFT) - 1);
1779 	if (realminor < 64)
1780 		minor_type = DRM_MINOR_PRIMARY;
1781 	else if (realminor >= 64 && realminor < 128)
1782 		minor_type = DRM_MINOR_CONTROL;
1783 	else
1784 		minor_type = DRM_MINOR_RENDER;
1785 
1786 	dm = *drm_minor_get_slot(dev, minor_type);
1787 	dm->index = minor(kdev);
1788 
1789 	file_priv = drm_file_alloc(dm);
1790 	if (IS_ERR(file_priv)) {
1791 		ret = ENOMEM;
1792 		goto err;
1793 	}
1794 
1795 	/* first opener automatically becomes master */
1796 	if (drm_is_primary_client(file_priv)) {
1797 		ret = drm_master_open(file_priv);
1798 		if (ret != 0)
1799 			goto out_file_free;
1800 	}
1801 
1802 	file_priv->filp = (void *)file_priv;
1803 	file_priv->fminor = minor(kdev);
1804 
1805 	mutex_lock(&dev->filelist_mutex);
1806 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
1807 	mutex_unlock(&dev->filelist_mutex);
1808 
1809 	if (need_setup) {
1810 		ret = drm_legacy_setup(dev);
1811 		if (ret)
1812 			goto out_file_free;
1813 	}
1814 
1815 	if (drm_dev_needs_global_mutex(dev))
1816 		mutex_unlock(&drm_global_mutex);
1817 
1818 	return 0;
1819 
1820 out_file_free:
1821 	drm_file_free(file_priv);
1822 err:
1823 	atomic_dec(&dev->open_count);
1824 	if (drm_dev_needs_global_mutex(dev))
1825 		mutex_unlock(&drm_global_mutex);
1826 	return (ret);
1827 }
1828 
1829 int
1830 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
1831 {
1832 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
1833 	struct drm_file			*file_priv;
1834 	int				 retcode = 0;
1835 
1836 	if (dev == NULL)
1837 		return (ENXIO);
1838 
1839 	if (drm_dev_needs_global_mutex(dev))
1840 		mutex_lock(&drm_global_mutex);
1841 
1842 	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
1843 
1844 	mutex_lock(&dev->filelist_mutex);
1845 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1846 	if (file_priv == NULL) {
1847 		DRM_ERROR("can't find authenticator\n");
1848 		retcode = EINVAL;
1849 		mutex_unlock(&dev->filelist_mutex);
1850 		goto done;
1851 	}
1852 
1853 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
1854 	mutex_unlock(&dev->filelist_mutex);
1855 	drm_file_free(file_priv);
1856 done:
1857 	if (atomic_dec_and_test(&dev->open_count))
1858 		drm_lastclose(dev);
1859 
1860 	if (drm_dev_needs_global_mutex(dev))
1861 		mutex_unlock(&drm_global_mutex);
1862 
1863 	return (retcode);
1864 }
1865 
1866 int
1867 drmread(dev_t kdev, struct uio *uio, int ioflag)
1868 {
1869 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
1870 	struct drm_file			*file_priv;
1871 	struct drm_pending_event	*ev;
1872 	int		 		 error = 0;
1873 
1874 	if (dev == NULL)
1875 		return (ENXIO);
1876 
1877 	mutex_lock(&dev->filelist_mutex);
1878 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1879 	mutex_unlock(&dev->filelist_mutex);
1880 	if (file_priv == NULL)
1881 		return (ENXIO);
1882 
1883 	/*
1884 	 * The semantics are a little weird here. We will wait until we
1885 	 * have events to process, but as soon as we have events we will
1886 	 * only deliver as many as we have.
1887 	 * Note that events are atomic, if the read buffer will not fit in
1888 	 * a whole event, we won't read any of it out.
1889 	 */
1890 	mtx_enter(&dev->event_lock);
1891 	while (error == 0 && list_empty(&file_priv->event_list)) {
1892 		if (ioflag & IO_NDELAY) {
1893 			mtx_leave(&dev->event_lock);
1894 			return (EAGAIN);
1895 		}
1896 		error = msleep_nsec(&file_priv->event_wait, &dev->event_lock,
1897 		    PWAIT | PCATCH, "drmread", INFSLP);
1898 	}
1899 	if (error) {
1900 		mtx_leave(&dev->event_lock);
1901 		return (error);
1902 	}
1903 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
1904 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1905 		/* XXX we always destroy the event on error. */
1906 		error = uiomove(ev->event, ev->event->length, uio);
1907 		kfree(ev);
1908 		if (error)
1909 			break;
1910 		mtx_enter(&dev->event_lock);
1911 	}
1912 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
1913 
1914 	return (error);
1915 }
1916 
1917 /*
1918  * Deqeue an event from the file priv in question. returning 1 if an
1919  * event was found. We take the resid from the read as a parameter because
1920  * we will only dequeue and event if the read buffer has space to fit the
1921  * entire thing.
1922  *
1923  * We are called locked, but we will *unlock* the queue on return so that
1924  * we may sleep to copyout the event.
1925  */
1926 int
1927 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
1928     size_t resid, struct drm_pending_event **out)
1929 {
1930 	struct drm_pending_event *e = NULL;
1931 	int gotone = 0;
1932 
1933 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
1934 
1935 	*out = NULL;
1936 	if (list_empty(&file_priv->event_list))
1937 		goto out;
1938 	e = list_first_entry(&file_priv->event_list,
1939 			     struct drm_pending_event, link);
1940 	if (e->event->length > resid)
1941 		goto out;
1942 
1943 	file_priv->event_space += e->event->length;
1944 	list_del(&e->link);
1945 	*out = e;
1946 	gotone = 1;
1947 
1948 out:
1949 	mtx_leave(&dev->event_lock);
1950 
1951 	return (gotone);
1952 }
1953 
1954 int
1955 drmpoll(dev_t kdev, int events, struct proc *p)
1956 {
1957 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
1958 	struct drm_file		*file_priv;
1959 	int		 	 revents = 0;
1960 
1961 	if (dev == NULL)
1962 		return (POLLERR);
1963 
1964 	mutex_lock(&dev->filelist_mutex);
1965 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
1966 	mutex_unlock(&dev->filelist_mutex);
1967 	if (file_priv == NULL)
1968 		return (POLLERR);
1969 
1970 	mtx_enter(&dev->event_lock);
1971 	if (events & (POLLIN | POLLRDNORM)) {
1972 		if (!list_empty(&file_priv->event_list))
1973 			revents |=  events & (POLLIN | POLLRDNORM);
1974 		else
1975 			selrecord(p, &file_priv->rsel);
1976 	}
1977 	mtx_leave(&dev->event_lock);
1978 
1979 	return (revents);
1980 }
1981 
1982 paddr_t
1983 drmmmap(dev_t kdev, off_t offset, int prot)
1984 {
1985 	return -1;
1986 }
1987 
1988 struct drm_dmamem *
1989 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1990     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1991 {
1992 	struct drm_dmamem	*mem;
1993 	size_t			 strsize;
1994 	/*
1995 	 * segs is the last member of the struct since we modify the size
1996 	 * to allow extra segments if more than one are allowed.
1997 	 */
1998 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1999 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
2000 	if (mem == NULL)
2001 		return (NULL);
2002 
2003 	mem->size = size;
2004 
2005 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
2006 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
2007 		goto strfree;
2008 
2009 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
2010 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
2011 		goto destroy;
2012 
2013 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
2014 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
2015 		goto free;
2016 
2017 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
2018 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
2019 		goto unmap;
2020 
2021 	return (mem);
2022 
2023 unmap:
2024 	bus_dmamem_unmap(dmat, mem->kva, size);
2025 free:
2026 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
2027 destroy:
2028 	bus_dmamap_destroy(dmat, mem->map);
2029 strfree:
2030 	free(mem, M_DRM, 0);
2031 
2032 	return (NULL);
2033 }
2034 
2035 void
2036 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
2037 {
2038 	if (mem == NULL)
2039 		return;
2040 
2041 	bus_dmamap_unload(dmat, mem->map);
2042 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
2043 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
2044 	bus_dmamap_destroy(dmat, mem->map);
2045 	free(mem, M_DRM, 0);
2046 }
2047 
2048 struct drm_dma_handle *
2049 drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
2050 {
2051 	struct drm_dma_handle *dmah;
2052 
2053 	dmah = malloc(sizeof(*dmah), M_DRM, M_WAITOK);
2054 	dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size,
2055 	    BUS_DMA_NOCACHE, 0);
2056 	if (dmah->mem == NULL) {
2057 		free(dmah, M_DRM, sizeof(*dmah));
2058 		return NULL;
2059 	}
2060 	dmah->busaddr = dmah->mem->segs[0].ds_addr;
2061 	dmah->size = dmah->mem->size;
2062 	dmah->vaddr = dmah->mem->kva;
2063 	return (dmah);
2064 }
2065 
2066 void
2067 drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
2068 {
2069 	if (dmah == NULL)
2070 		return;
2071 
2072 	drm_dmamem_free(dev->dmat, dmah->mem);
2073 	free(dmah, M_DRM, sizeof(*dmah));
2074 }
2075 
2076 /*
2077  * Compute order.  Can be made faster.
2078  */
2079 int
2080 drm_order(unsigned long size)
2081 {
2082 	int order;
2083 	unsigned long tmp;
2084 
2085 	for (order = 0, tmp = size; tmp >>= 1; ++order)
2086 		;
2087 
2088 	if (size & ~(1 << order))
2089 		++order;
2090 
2091 	return order;
2092 }
2093 
2094 int
2095 drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv)
2096 {
2097 	struct drm_pciinfo *info = data;
2098 
2099 	if (dev->pdev == NULL)
2100 		return -ENOTTY;
2101 
2102 	info->domain = dev->pdev->bus->domain_nr;
2103 	info->bus = dev->pdev->bus->number;
2104 	info->dev = PCI_SLOT(dev->pdev->devfn);
2105 	info->func = PCI_FUNC(dev->pdev->devfn);
2106 	info->vendor_id = dev->pdev->vendor;
2107 	info->device_id = dev->pdev->device;
2108 	info->subvendor_id = dev->pdev->subsystem_vendor;
2109 	info->subdevice_id = dev->pdev->subsystem_device;
2110 	info->revision_id = 0;
2111 
2112 	return 0;
2113 }
2114