xref: /openbsd-src/sys/dev/pci/drm/drm_drv.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /* $OpenBSD: drm_drv.c,v 1.173 2020/03/04 21:19:15 kettenis Exp $ */
2 /*-
3  * Copyright 2007-2009 Owain G. Ainsworth <oga@openbsd.org>
4  * Copyright © 2008 Intel Corporation
5  * Copyright 2003 Eric Anholt
6  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the "Software"),
12  * to deal in the Software without restriction, including without limitation
13  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14  * and/or sell copies of the Software, and to permit persons to whom the
15  * Software is furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the next
18  * paragraph) shall be included in all copies or substantial portions of the
19  * Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28  *
29  * Authors:
30  *    Rickard E. (Rik) Faith <faith@valinux.com>
31  *    Daryll Strauss <daryll@valinux.com>
32  *    Gareth Hughes <gareth@valinux.com>
33  *    Eric Anholt <eric@anholt.net>
34  *    Owain Ainsworth <oga@openbsd.org>
35  *
36  */
37 
38 /** @file drm_drv.c
39  * The catch-all file for DRM device support, including module setup/teardown,
40  * open/close, and ioctl dispatch.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/fcntl.h>
45 #include <sys/poll.h>
46 #include <sys/specdev.h>
47 #include <sys/systm.h>
48 #include <sys/vnode.h>
49 #include <sys/event.h>
50 
51 #include <machine/bus.h>
52 
53 #ifdef __HAVE_ACPI
54 #include <dev/acpi/acpidev.h>
55 #include <dev/acpi/acpivar.h>
56 #include <dev/acpi/dsdt.h>
57 #endif
58 
59 #include <drm/drmP.h>
60 #include <drm/drm_gem.h>
61 #include <uapi/drm/drm.h>
62 #include "drm_internal.h"
63 #include "drm_crtc_internal.h"
64 #include <drm/drm_vblank.h>
65 #include <drm/drm_print.h>
66 
67 struct drm_softc {
68 	struct device		sc_dev;
69 	struct drm_device 	*sc_drm;
70 	int			sc_allocated;
71 };
72 
73 /*
74  * drm_debug: Enable debug output.
75  * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
76  */
77 #ifdef DRMDEBUG
78 unsigned int drm_debug = DRM_UT_DRIVER | DRM_UT_KMS;
79 #else
80 unsigned int drm_debug = 0;
81 #endif
82 
83 int	 drm_firstopen(struct drm_device *);
84 void	 drm_attach(struct device *, struct device *, void *);
85 int	 drm_probe(struct device *, void *, void *);
86 int	 drm_detach(struct device *, int);
87 void	 drm_quiesce(struct drm_device *);
88 void	 drm_wakeup(struct drm_device *);
89 int	 drm_activate(struct device *, int);
90 int	 drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
91 	     struct drm_pending_event **);
92 
93 int	 drm_getmagic(struct drm_device *, void *, struct drm_file *);
94 int	 drm_authmagic(struct drm_device *, void *, struct drm_file *);
95 int	 drm_getpciinfo(struct drm_device *, void *, struct drm_file *);
96 int	 drm_file_cmp(struct drm_file *, struct drm_file *);
97 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
98 
99 /*
100  * attach drm to a pci-based driver.
101  *
102  * This function does all the pci-specific calculations for the
103  * drm_attach_args.
104  */
105 struct drm_device *
106 drm_attach_pci(struct drm_driver *driver, struct pci_attach_args *pa,
107     int is_agp, int primary, struct device *dev, struct drm_device *drm)
108 {
109 	struct drm_attach_args arg;
110 	struct drm_softc *sc;
111 
112 	arg.drm = drm;
113 	arg.driver = driver;
114 	arg.dmat = pa->pa_dmat;
115 	arg.bst = pa->pa_memt;
116 	arg.is_agp = is_agp;
117 	arg.primary = primary;
118 	arg.pa = pa;
119 
120 	arg.busid_len = 20;
121 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
122 	if (arg.busid == NULL) {
123 		printf("%s: no memory for drm\n", dev->dv_xname);
124 		return (NULL);
125 	}
126 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
127 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
128 
129 	sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch);
130 	if (sc == NULL)
131 		return NULL;
132 
133 	return sc->sc_drm;
134 }
135 
136 int
137 drmprint(void *aux, const char *pnp)
138 {
139 	if (pnp != NULL)
140 		printf("drm at %s", pnp);
141 	return (UNCONF);
142 }
143 
144 int
145 drmsubmatch(struct device *parent, void *match, void *aux)
146 {
147 	extern struct cfdriver drm_cd;
148 	struct cfdata *cf = match;
149 
150 	/* only allow drm to attach */
151 	if (cf->cf_driver == &drm_cd)
152 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
153 	return (0);
154 }
155 
156 int
157 drm_pciprobe(struct pci_attach_args *pa, const struct drm_pcidev *idlist)
158 {
159 	const struct drm_pcidev *id_entry;
160 
161 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
162 	    PCI_PRODUCT(pa->pa_id), idlist);
163 	if (id_entry != NULL)
164 		return 1;
165 
166 	return 0;
167 }
168 
169 int
170 drm_probe(struct device *parent, void *match, void *aux)
171 {
172 	struct cfdata *cf = match;
173 	struct drm_attach_args *da = aux;
174 
175 	if (cf->drmdevcf_primary != DRMDEVCF_PRIMARY_UNK) {
176 		/*
177 		 * If primary-ness of device specified, either match
178 		 * exactly (at high priority), or fail.
179 		 */
180 		if (cf->drmdevcf_primary != 0 && da->primary != 0)
181 			return (10);
182 		else
183 			return (0);
184 	}
185 
186 	/* If primary-ness unspecified, it wins. */
187 	return (1);
188 }
189 
190 void
191 drm_attach(struct device *parent, struct device *self, void *aux)
192 {
193 	struct drm_softc *sc = (struct drm_softc *)self;
194 	struct drm_attach_args *da = aux;
195 	struct drm_device *dev = da->drm;
196 	int ret;
197 
198 	drm_linux_init();
199 
200 	if (dev == NULL) {
201 		dev = malloc(sizeof(struct drm_device), M_DRM,
202 		    M_WAITOK | M_ZERO);
203 		sc->sc_allocated = 1;
204 	}
205 
206 	sc->sc_drm = dev;
207 
208 	dev->dev = self;
209 	dev->dev_private = parent;
210 	dev->driver = da->driver;
211 
212 	dev->dmat = da->dmat;
213 	dev->bst = da->bst;
214 	dev->unique = da->busid;
215 	dev->unique_len = da->busid_len;
216 
217 	if (da->pa) {
218 		struct pci_attach_args *pa = da->pa;
219 		pcireg_t subsys;
220 
221 		subsys = pci_conf_read(pa->pa_pc, pa->pa_tag,
222 		    PCI_SUBSYS_ID_REG);
223 
224 		dev->pdev = &dev->_pdev;
225 		dev->pdev->vendor = PCI_VENDOR(pa->pa_id);
226 		dev->pdev->device = PCI_PRODUCT(pa->pa_id);
227 		dev->pdev->subsystem_vendor = PCI_VENDOR(subsys);
228 		dev->pdev->subsystem_device = PCI_PRODUCT(subsys);
229 		dev->pdev->revision = PCI_REVISION(pa->pa_class);
230 
231 		dev->pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
232 		dev->pdev->bus = &dev->pdev->_bus;
233 		dev->pdev->bus->pc = pa->pa_pc;
234 		dev->pdev->bus->number = pa->pa_bus;
235 		dev->pdev->bus->bridgetag = pa->pa_bridgetag;
236 
237 		if (pa->pa_bridgetag != NULL) {
238 			dev->pdev->bus->self = malloc(sizeof(struct pci_dev),
239 			    M_DRM, M_WAITOK | M_ZERO);
240 			dev->pdev->bus->self->pc = pa->pa_pc;
241 			dev->pdev->bus->self->tag = *pa->pa_bridgetag;
242 		}
243 
244 		dev->pdev->pc = pa->pa_pc;
245 		dev->pdev->tag = pa->pa_tag;
246 		dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
247 
248 #ifdef CONFIG_ACPI
249 		dev->pdev->dev.node = acpi_find_pci(pa->pa_pc, pa->pa_tag);
250 		aml_register_notify(dev->pdev->dev.node, NULL,
251 		    drm_linux_acpi_notify, NULL, ACPIDEV_NOPOLL);
252 #endif
253 	}
254 
255 	rw_init(&dev->struct_mutex, "drmdevlk");
256 	mtx_init(&dev->event_lock, IPL_TTY);
257 	mtx_init(&dev->quiesce_mtx, IPL_NONE);
258 
259 	SPLAY_INIT(&dev->files);
260 	INIT_LIST_HEAD(&dev->vblank_event_list);
261 
262 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
263 #if IS_ENABLED(CONFIG_AGP)
264 		if (da->is_agp)
265 			dev->agp = drm_agp_init();
266 #endif
267 		if (dev->agp != NULL) {
268 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
269 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
270 				dev->agp->mtrr = 1;
271 		}
272 	}
273 
274 	if (dev->driver->gem_size > 0) {
275 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
276 		/* XXX unique name */
277 		pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE, 0,
278 		    "drmobjpl", NULL);
279 	}
280 
281 	if (dev->driver->driver_features & DRIVER_GEM) {
282 		ret = drm_gem_init(dev);
283 		if (ret) {
284 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
285 			goto error;
286 		}
287 	}
288 
289 	printf("\n");
290 	return;
291 
292 error:
293 	drm_lastclose(dev);
294 	dev->dev_private = NULL;
295 }
296 
297 int
298 drm_detach(struct device *self, int flags)
299 {
300 	struct drm_softc *sc = (struct drm_softc *)self;
301 	struct drm_device *dev = sc->sc_drm;
302 
303 	drm_lastclose(dev);
304 
305 	if (dev->driver->driver_features & DRIVER_GEM)
306 		drm_gem_destroy(dev);
307 
308 	if (dev->driver->driver_features & DRIVER_GEM)
309 		pool_destroy(&dev->objpl);
310 
311 	drm_vblank_cleanup(dev);
312 
313 	if (dev->agp && dev->agp->mtrr) {
314 		int retcode;
315 
316 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
317 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
318 		DRM_DEBUG("mtrr_del = %d", retcode);
319 	}
320 
321 	free(dev->agp, M_DRM, 0);
322 	if (dev->pdev && dev->pdev->bus)
323 		free(dev->pdev->bus->self, M_DRM, sizeof(struct pci_dev));
324 
325 	if (sc->sc_allocated)
326 		free(dev, M_DRM, sizeof(struct drm_device));
327 
328 	return 0;
329 }
330 
331 void
332 drm_quiesce(struct drm_device *dev)
333 {
334 	mtx_enter(&dev->quiesce_mtx);
335 	dev->quiesce = 1;
336 	while (dev->quiesce_count > 0) {
337 		msleep_nsec(&dev->quiesce_count, &dev->quiesce_mtx,
338 		    PZERO, "drmqui", INFSLP);
339 	}
340 	mtx_leave(&dev->quiesce_mtx);
341 }
342 
343 void
344 drm_wakeup(struct drm_device *dev)
345 {
346 	mtx_enter(&dev->quiesce_mtx);
347 	dev->quiesce = 0;
348 	wakeup(&dev->quiesce);
349 	mtx_leave(&dev->quiesce_mtx);
350 }
351 
352 int
353 drm_activate(struct device *self, int act)
354 {
355 	struct drm_softc *sc = (struct drm_softc *)self;
356 	struct drm_device *dev = sc->sc_drm;
357 
358 	switch (act) {
359 	case DVACT_QUIESCE:
360 		drm_quiesce(dev);
361 		break;
362 	case DVACT_WAKEUP:
363 		drm_wakeup(dev);
364 		break;
365 	}
366 
367 	return (0);
368 }
369 
370 struct cfattach drm_ca = {
371 	sizeof(struct drm_softc), drm_probe, drm_attach,
372 	drm_detach, drm_activate
373 };
374 
375 struct cfdriver drm_cd = {
376 	0, "drm", DV_DULL
377 };
378 
379 const struct drm_pcidev *
380 drm_find_description(int vendor, int device, const struct drm_pcidev *idlist)
381 {
382 	int i = 0;
383 
384 	for (i = 0; idlist[i].vendor != 0; i++) {
385 		if ((idlist[i].vendor == vendor) &&
386 		    (idlist[i].device == device) &&
387 		    (idlist[i].subvendor == PCI_ANY_ID) &&
388 		    (idlist[i].subdevice == PCI_ANY_ID))
389 			return &idlist[i];
390 	}
391 	return NULL;
392 }
393 
394 int
395 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
396 {
397 	return (f1->minor < f2->minor ? -1 : f1->minor > f2->minor);
398 }
399 
400 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
401 
402 struct drm_file *
403 drm_find_file_by_minor(struct drm_device *dev, int minor)
404 {
405 	struct drm_file	key;
406 
407 	key.minor = minor;
408 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
409 }
410 
411 struct drm_device *
412 drm_get_device_from_kdev(dev_t kdev)
413 {
414 	int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
415 	/* control */
416 	if (unit >= 64 && unit < 128)
417 		unit -= 64;
418 	/* render */
419 	if (unit >= 128)
420 		unit -= 128;
421 	struct drm_softc *sc;
422 
423 	if (unit < drm_cd.cd_ndevs) {
424 		sc = (struct drm_softc *)drm_cd.cd_devs[unit];
425 		if (sc)
426 			return sc->sc_drm;
427 	}
428 
429 	return NULL;
430 }
431 
432 int
433 drm_firstopen(struct drm_device *dev)
434 {
435 	if (dev->driver->firstopen)
436 		dev->driver->firstopen(dev);
437 
438 	dev->magicid = 1;
439 
440 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
441 		dev->irq_enabled = 0;
442 	dev->if_version = 0;
443 
444 	DRM_DEBUG("\n");
445 
446 	return 0;
447 }
448 
449 int
450 drm_lastclose(struct drm_device *dev)
451 {
452 	DRM_DEBUG("\n");
453 
454 	if (dev->driver->lastclose != NULL)
455 		dev->driver->lastclose(dev);
456 
457 	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
458 		drm_irq_uninstall(dev);
459 
460 #if IS_ENABLED(CONFIG_AGP)
461 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
462 		drm_agp_takedown(dev);
463 #endif
464 
465 	return 0;
466 }
467 
468 void
469 filt_drmdetach(struct knote *kn)
470 {
471 	struct drm_device *dev = kn->kn_hook;
472 	int s;
473 
474 	s = spltty();
475 	SLIST_REMOVE(&dev->note, kn, knote, kn_selnext);
476 	splx(s);
477 }
478 
479 int
480 filt_drmkms(struct knote *kn, long hint)
481 {
482 	if (kn->kn_sfflags & hint)
483 		kn->kn_fflags |= hint;
484 	return (kn->kn_fflags != 0);
485 }
486 
487 const struct filterops drm_filtops = {
488 	.f_flags	= FILTEROP_ISFD,
489 	.f_attach	= NULL,
490 	.f_detach	= filt_drmdetach,
491 	.f_event	= filt_drmkms,
492 };
493 
494 int
495 drmkqfilter(dev_t kdev, struct knote *kn)
496 {
497 	struct drm_device	*dev = NULL;
498 	int s;
499 
500 	dev = drm_get_device_from_kdev(kdev);
501 	if (dev == NULL || dev->dev_private == NULL)
502 		return (ENXIO);
503 
504 	switch (kn->kn_filter) {
505 	case EVFILT_DEVICE:
506 		kn->kn_fop = &drm_filtops;
507 		break;
508 	default:
509 		return (EINVAL);
510 	}
511 
512 	kn->kn_hook = dev;
513 
514 	s = spltty();
515 	SLIST_INSERT_HEAD(&dev->note, kn, kn_selnext);
516 	splx(s);
517 
518 	return (0);
519 }
520 
521 int
522 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
523 {
524 	struct drm_device	*dev = NULL;
525 	struct drm_file		*file_priv;
526 	int			 ret = 0;
527 	int			 realminor;
528 
529 	dev = drm_get_device_from_kdev(kdev);
530 	if (dev == NULL || dev->dev_private == NULL)
531 		return (ENXIO);
532 
533 	DRM_DEBUG("open_count = %d\n", dev->open_count);
534 
535 	if (flags & O_EXCL)
536 		return (EBUSY); /* No exclusive opens */
537 
538 	mutex_lock(&dev->struct_mutex);
539 	if (dev->open_count++ == 0) {
540 		mutex_unlock(&dev->struct_mutex);
541 		if ((ret = drm_firstopen(dev)) != 0)
542 			goto err;
543 	} else {
544 		mutex_unlock(&dev->struct_mutex);
545 	}
546 
547 	/* always allocate at least enough space for our data */
548 	file_priv = mallocarray(1, max(dev->driver->file_priv_size,
549 	    sizeof(*file_priv)), M_DRM, M_NOWAIT | M_ZERO);
550 	if (file_priv == NULL) {
551 		ret = ENOMEM;
552 		goto err;
553 	}
554 
555 	file_priv->filp = (void *)&file_priv;
556 	file_priv->minor = minor(kdev);
557 	realminor =  file_priv->minor & ((1 << CLONE_SHIFT) - 1);
558 	if (realminor < 64)
559 		file_priv->minor_type = DRM_MINOR_PRIMARY;
560 	else if (realminor >= 64 && realminor < 128)
561 		file_priv->minor_type = DRM_MINOR_CONTROL;
562 	else
563 		file_priv->minor_type = DRM_MINOR_RENDER;
564 
565 	INIT_LIST_HEAD(&file_priv->lhead);
566 	INIT_LIST_HEAD(&file_priv->fbs);
567 	rw_init(&file_priv->fbs_lock, "fbslk");
568 	INIT_LIST_HEAD(&file_priv->blobs);
569 	INIT_LIST_HEAD(&file_priv->pending_event_list);
570 	INIT_LIST_HEAD(&file_priv->event_list);
571 	init_waitqueue_head(&file_priv->event_wait);
572 	file_priv->event_space = 4096; /* 4k for event buffer */
573 	DRM_DEBUG("minor = %d\n", file_priv->minor);
574 
575 	/* for compatibility root is always authenticated */
576 	file_priv->authenticated = DRM_SUSER(p);
577 
578 	rw_init(&file_priv->event_read_lock, "evread");
579 
580 	if (drm_core_check_feature(dev, DRIVER_GEM))
581 		drm_gem_open(dev, file_priv);
582 
583 	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
584 		drm_syncobj_open(file_priv);
585 
586 	if (drm_core_check_feature(dev, DRIVER_PRIME))
587 		drm_prime_init_file_private(&file_priv->prime);
588 
589 	if (dev->driver->open) {
590 		ret = dev->driver->open(dev, file_priv);
591 		if (ret != 0) {
592 			goto out_prime_destroy;
593 		}
594 	}
595 
596 	mutex_lock(&dev->struct_mutex);
597 	/* first opener automatically becomes master */
598 	if (drm_is_primary_client(file_priv))
599 		file_priv->is_master = SPLAY_EMPTY(&dev->files);
600 	if (file_priv->is_master)
601 		file_priv->authenticated = 1;
602 
603 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
604 	mutex_unlock(&dev->struct_mutex);
605 
606 	return (0);
607 
608 out_prime_destroy:
609 	if (drm_core_check_feature(dev, DRIVER_PRIME))
610 		drm_prime_destroy_file_private(&file_priv->prime);
611 	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
612 		drm_syncobj_release(file_priv);
613 	if (drm_core_check_feature(dev, DRIVER_GEM))
614 		drm_gem_release(dev, file_priv);
615 	free(file_priv, M_DRM, 0);
616 err:
617 	mutex_lock(&dev->struct_mutex);
618 	--dev->open_count;
619 	mutex_unlock(&dev->struct_mutex);
620 	return (ret);
621 }
622 
623 void drm_events_release(struct drm_file *file_priv, struct drm_device *dev);
624 
625 int
626 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
627 {
628 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
629 	struct drm_file			*file_priv;
630 	int				 retcode = 0;
631 
632 	if (dev == NULL)
633 		return (ENXIO);
634 
635 	DRM_DEBUG("open_count = %d\n", dev->open_count);
636 
637 	mutex_lock(&dev->struct_mutex);
638 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
639 	if (file_priv == NULL) {
640 		DRM_ERROR("can't find authenticator\n");
641 		retcode = EINVAL;
642 		goto done;
643 	}
644 	mutex_unlock(&dev->struct_mutex);
645 
646 	if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
647 	    dev->driver->preclose)
648 		dev->driver->preclose(dev, file_priv);
649 
650 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
651 	    DRM_CURRENTPID, (long)&dev->dev, dev->open_count);
652 
653 	drm_events_release(file_priv, dev);
654 
655 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
656 		drm_fb_release(file_priv);
657 		drm_property_destroy_user_blobs(dev, file_priv);
658 	}
659 
660 	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
661 		drm_syncobj_release(file_priv);
662 
663 	if (drm_core_check_feature(dev, DRIVER_GEM))
664 		drm_gem_release(dev, file_priv);
665 
666 	if (dev->driver->postclose)
667 		dev->driver->postclose(dev, file_priv);
668 
669 	if (drm_core_check_feature(dev, DRIVER_PRIME))
670 		drm_prime_destroy_file_private(&file_priv->prime);
671 
672 	mutex_lock(&dev->struct_mutex);
673 
674 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
675 	free(file_priv, M_DRM, 0);
676 
677 done:
678 	if (--dev->open_count == 0) {
679 		mutex_unlock(&dev->struct_mutex);
680 		retcode = drm_lastclose(dev);
681 	} else
682 		mutex_unlock(&dev->struct_mutex);
683 
684 	return (retcode);
685 }
686 
687 int
688 drmread(dev_t kdev, struct uio *uio, int ioflag)
689 {
690 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
691 	struct drm_file			*file_priv;
692 	struct drm_pending_event	*ev;
693 	int		 		 error = 0;
694 
695 	if (dev == NULL)
696 		return (ENXIO);
697 
698 	mutex_lock(&dev->struct_mutex);
699 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
700 	mutex_unlock(&dev->struct_mutex);
701 	if (file_priv == NULL)
702 		return (ENXIO);
703 
704 	/*
705 	 * The semantics are a little weird here. We will wait until we
706 	 * have events to process, but as soon as we have events we will
707 	 * only deliver as many as we have.
708 	 * Note that events are atomic, if the read buffer will not fit in
709 	 * a whole event, we won't read any of it out.
710 	 */
711 	mtx_enter(&dev->event_lock);
712 	while (error == 0 && list_empty(&file_priv->event_list)) {
713 		if (ioflag & IO_NDELAY) {
714 			mtx_leave(&dev->event_lock);
715 			return (EAGAIN);
716 		}
717 		error = msleep_nsec(&file_priv->event_wait, &dev->event_lock,
718 		    PWAIT | PCATCH, "drmread", INFSLP);
719 	}
720 	if (error) {
721 		mtx_leave(&dev->event_lock);
722 		return (error);
723 	}
724 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
725 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
726 		/* XXX we always destroy the event on error. */
727 		error = uiomove(ev->event, ev->event->length, uio);
728 		kfree(ev);
729 		if (error)
730 			break;
731 		mtx_enter(&dev->event_lock);
732 	}
733 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
734 
735 	return (error);
736 }
737 
738 /*
739  * Deqeue an event from the file priv in question. returning 1 if an
740  * event was found. We take the resid from the read as a parameter because
741  * we will only dequeue and event if the read buffer has space to fit the
742  * entire thing.
743  *
744  * We are called locked, but we will *unlock* the queue on return so that
745  * we may sleep to copyout the event.
746  */
747 int
748 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
749     size_t resid, struct drm_pending_event **out)
750 {
751 	struct drm_pending_event *e = NULL;
752 	int gotone = 0;
753 
754 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
755 
756 	*out = NULL;
757 	if (list_empty(&file_priv->event_list))
758 		goto out;
759 	e = list_first_entry(&file_priv->event_list,
760 			     struct drm_pending_event, link);
761 	if (e->event->length > resid)
762 		goto out;
763 
764 	file_priv->event_space += e->event->length;
765 	list_del(&e->link);
766 	*out = e;
767 	gotone = 1;
768 
769 out:
770 	mtx_leave(&dev->event_lock);
771 
772 	return (gotone);
773 }
774 
775 /* XXX kqfilter ... */
776 int
777 drmpoll(dev_t kdev, int events, struct proc *p)
778 {
779 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
780 	struct drm_file		*file_priv;
781 	int		 	 revents = 0;
782 
783 	if (dev == NULL)
784 		return (POLLERR);
785 
786 	mutex_lock(&dev->struct_mutex);
787 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
788 	mutex_unlock(&dev->struct_mutex);
789 	if (file_priv == NULL)
790 		return (POLLERR);
791 
792 	mtx_enter(&dev->event_lock);
793 	if (events & (POLLIN | POLLRDNORM)) {
794 		if (!list_empty(&file_priv->event_list))
795 			revents |=  events & (POLLIN | POLLRDNORM);
796 		else
797 			selrecord(p, &file_priv->rsel);
798 	}
799 	mtx_leave(&dev->event_lock);
800 
801 	return (revents);
802 }
803 
804 paddr_t
805 drmmmap(dev_t kdev, off_t offset, int prot)
806 {
807 	return -1;
808 }
809 
810 struct drm_dmamem *
811 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
812     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
813 {
814 	struct drm_dmamem	*mem;
815 	size_t			 strsize;
816 	/*
817 	 * segs is the last member of the struct since we modify the size
818 	 * to allow extra segments if more than one are allowed.
819 	 */
820 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
821 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
822 	if (mem == NULL)
823 		return (NULL);
824 
825 	mem->size = size;
826 
827 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
828 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
829 		goto strfree;
830 
831 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
832 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
833 		goto destroy;
834 
835 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
836 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
837 		goto free;
838 
839 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
840 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
841 		goto unmap;
842 
843 	return (mem);
844 
845 unmap:
846 	bus_dmamem_unmap(dmat, mem->kva, size);
847 free:
848 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
849 destroy:
850 	bus_dmamap_destroy(dmat, mem->map);
851 strfree:
852 	free(mem, M_DRM, 0);
853 
854 	return (NULL);
855 }
856 
857 void
858 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
859 {
860 	if (mem == NULL)
861 		return;
862 
863 	bus_dmamap_unload(dmat, mem->map);
864 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
865 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
866 	bus_dmamap_destroy(dmat, mem->map);
867 	free(mem, M_DRM, 0);
868 }
869 
870 struct drm_dma_handle *
871 drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
872 {
873 	struct drm_dma_handle *dmah;
874 
875 	dmah = malloc(sizeof(*dmah), M_DRM, M_WAITOK);
876 	dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size,
877 	    BUS_DMA_NOCACHE, 0);
878 	if (dmah->mem == NULL) {
879 		free(dmah, M_DRM, sizeof(*dmah));
880 		return NULL;
881 	}
882 	dmah->busaddr = dmah->mem->segs[0].ds_addr;
883 	dmah->size = dmah->mem->size;
884 	dmah->vaddr = dmah->mem->kva;
885 	return (dmah);
886 }
887 
888 void
889 drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
890 {
891 	if (dmah == NULL)
892 		return;
893 
894 	drm_dmamem_free(dev->dmat, dmah->mem);
895 	free(dmah, M_DRM, sizeof(*dmah));
896 }
897 
898 /**
899  * Called by the client, this returns a unique magic number to be authorized
900  * by the master.
901  *
902  * The master may use its own knowledge of the client (such as the X
903  * connection that the magic is passed over) to determine if the magic number
904  * should be authenticated.
905  */
906 int
907 drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
908 {
909 	struct drm_auth		*auth = data;
910 
911 	if (dev->magicid == 0)
912 		dev->magicid = 1;
913 
914 	/* Find unique magic */
915 	if (file_priv->magic) {
916 		auth->magic = file_priv->magic;
917 	} else {
918 		mutex_lock(&dev->struct_mutex);
919 		file_priv->magic = auth->magic = dev->magicid++;
920 		mutex_unlock(&dev->struct_mutex);
921 		DRM_DEBUG("%d\n", auth->magic);
922 	}
923 
924 	DRM_DEBUG("%u\n", auth->magic);
925 	return 0;
926 }
927 
928 /**
929  * Marks the client associated with the given magic number as authenticated.
930  */
931 int
932 drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
933 {
934 	struct drm_file	*p;
935 	struct drm_auth	*auth = data;
936 	int		 ret = -EINVAL;
937 
938 	DRM_DEBUG("%u\n", auth->magic);
939 
940 	if (auth->magic == 0)
941 		return ret;
942 
943 	mutex_lock(&dev->struct_mutex);
944 	SPLAY_FOREACH(p, drm_file_tree, &dev->files) {
945 		if (p->magic == auth->magic) {
946 			p->authenticated = 1;
947 			p->magic = 0;
948 			ret = 0;
949 			break;
950 		}
951 	}
952 	mutex_unlock(&dev->struct_mutex);
953 
954 	return ret;
955 }
956 
957 /*
958  * Compute order.  Can be made faster.
959  */
960 int
961 drm_order(unsigned long size)
962 {
963 	int order;
964 	unsigned long tmp;
965 
966 	for (order = 0, tmp = size; tmp >>= 1; ++order)
967 		;
968 
969 	if (size & ~(1 << order))
970 		++order;
971 
972 	return order;
973 }
974 
975 int
976 drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv)
977 {
978 	struct drm_pciinfo *info = data;
979 
980 	if (dev->pdev == NULL)
981 		return -ENOTTY;
982 
983 	info->domain = 0;
984 	info->bus = dev->pdev->bus->number;
985 	info->dev = PCI_SLOT(dev->pdev->devfn);
986 	info->func = PCI_FUNC(dev->pdev->devfn);
987 	info->vendor_id = dev->pdev->vendor;
988 	info->device_id = dev->pdev->device;
989 	info->subvendor_id = dev->pdev->subsystem_vendor;
990 	info->subdevice_id = dev->pdev->subsystem_device;
991 	info->revision_id = 0;
992 
993 	return 0;
994 }
995 
996 /**
997  * drm_dev_register - Register DRM device
998  * @dev: Device to register
999  * @flags: Flags passed to the driver's .load() function
1000  *
1001  * Register the DRM device @dev with the system, advertise device to user-space
1002  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
1003  * previously.
1004  *
1005  * Never call this twice on any device!
1006  *
1007  * NOTE: To ensure backward compatibility with existing drivers method this
1008  * function calls the &drm_driver.load method after registering the device
1009  * nodes, creating race conditions. Usage of the &drm_driver.load methods is
1010  * therefore deprecated, drivers must perform all initialization before calling
1011  * drm_dev_register().
1012  *
1013  * RETURNS:
1014  * 0 on success, negative error code on failure.
1015  */
1016 int drm_dev_register(struct drm_device *dev, unsigned long flags)
1017 {
1018 	dev->registered = true;
1019 
1020 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1021 		drm_modeset_register_all(dev);
1022 
1023 	return 0;
1024 }
1025 EXPORT_SYMBOL(drm_dev_register);
1026 
1027 /**
1028  * drm_dev_unregister - Unregister DRM device
1029  * @dev: Device to unregister
1030  *
1031  * Unregister the DRM device from the system. This does the reverse of
1032  * drm_dev_register() but does not deallocate the device. The caller must call
1033  * drm_dev_put() to drop their final reference.
1034  *
1035  * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
1036  * which can be called while there are still open users of @dev.
1037  *
1038  * This should be called first in the device teardown code to make sure
1039  * userspace can't access the device instance any more.
1040  */
1041 void drm_dev_unregister(struct drm_device *dev)
1042 {
1043 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
1044 		drm_lastclose(dev);
1045 
1046 	dev->registered = false;
1047 
1048 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1049 		drm_modeset_unregister_all(dev);
1050 }
1051 EXPORT_SYMBOL(drm_dev_unregister);
1052 
1053