xref: /openbsd-src/sys/dev/pci/drm/drm_drv.c (revision 4b70baf6e17fc8b27fc1f7fa7929335753fa94c3)
1 /* $OpenBSD: drm_drv.c,v 1.161 2019/05/02 09:47:16 kettenis Exp $ */
2 /*-
3  * Copyright 2007-2009 Owain G. Ainsworth <oga@openbsd.org>
4  * Copyright © 2008 Intel Corporation
5  * Copyright 2003 Eric Anholt
6  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the "Software"),
12  * to deal in the Software without restriction, including without limitation
13  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14  * and/or sell copies of the Software, and to permit persons to whom the
15  * Software is furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the next
18  * paragraph) shall be included in all copies or substantial portions of the
19  * Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28  *
29  * Authors:
30  *    Rickard E. (Rik) Faith <faith@valinux.com>
31  *    Daryll Strauss <daryll@valinux.com>
32  *    Gareth Hughes <gareth@valinux.com>
33  *    Eric Anholt <eric@anholt.net>
34  *    Owain Ainsworth <oga@openbsd.org>
35  *
36  */
37 
38 /** @file drm_drv.c
39  * The catch-all file for DRM device support, including module setup/teardown,
40  * open/close, and ioctl dispatch.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/fcntl.h>
45 #include <sys/filio.h>
46 #include <sys/limits.h>
47 #include <sys/pledge.h>
48 #include <sys/poll.h>
49 #include <sys/specdev.h>
50 #include <sys/systm.h>
51 #include <sys/ttycom.h> /* for TIOCSGRP */
52 #include <sys/vnode.h>
53 #include <sys/event.h>
54 
55 #include <uvm/uvm.h>
56 #include <uvm/uvm_device.h>
57 
58 #include <drm/drmP.h>
59 #include <drm/drm_gem.h>
60 #include <uapi/drm/drm.h>
61 #include "drm_internal.h"
62 #include "drm_crtc_internal.h"
63 #include <drm/drm_vblank.h>
64 
65 struct drm_softc {
66 	struct device		sc_dev;
67 	struct drm_device 	*sc_drm;
68 	int			sc_allocated;
69 };
70 
71 #ifdef DRMDEBUG
72 int drm_debug_flag = 1;
73 #endif
74 
75 int	 drm_firstopen(struct drm_device *);
76 void	 drm_attach(struct device *, struct device *, void *);
77 int	 drm_probe(struct device *, void *, void *);
78 int	 drm_detach(struct device *, int);
79 void	 drm_quiesce(struct drm_device *);
80 void	 drm_wakeup(struct drm_device *);
81 int	 drm_activate(struct device *, int);
82 int	 drmprint(void *, const char *);
83 int	 drmsubmatch(struct device *, void *, void *);
84 int	 drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
85 	     struct drm_pending_event **);
86 
87 int	 drm_getmagic(struct drm_device *, void *, struct drm_file *);
88 int	 drm_authmagic(struct drm_device *, void *, struct drm_file *);
89 int	 drm_getpciinfo(struct drm_device *, void *, struct drm_file *);
90 int	 drm_file_cmp(struct drm_file *, struct drm_file *);
91 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
92 
93 /*
94  * attach drm to a pci-based driver.
95  *
96  * This function does all the pci-specific calculations for the
97  * drm_attach_args.
98  */
99 struct drm_device *
100 drm_attach_pci(struct drm_driver *driver, struct pci_attach_args *pa,
101     int is_agp, int console, struct device *dev, struct drm_device *drm)
102 {
103 	struct drm_attach_args arg;
104 	struct drm_softc *sc;
105 	pcireg_t subsys;
106 
107 	arg.drm = drm;
108 	arg.driver = driver;
109 	arg.dmat = pa->pa_dmat;
110 	arg.bst = pa->pa_memt;
111 	arg.is_agp = is_agp;
112 	arg.console = console;
113 
114 	arg.pci_vendor = PCI_VENDOR(pa->pa_id);
115 	arg.pci_device = PCI_PRODUCT(pa->pa_id);
116 
117 	subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
118 	arg.pci_subvendor = PCI_VENDOR(subsys);
119 	arg.pci_subdevice = PCI_PRODUCT(subsys);
120 
121 	arg.pci_revision = PCI_REVISION(pa->pa_class);
122 
123 	arg.pa = pa;
124 	arg.pc = pa->pa_pc;
125 	arg.tag = pa->pa_tag;
126 	arg.bridgetag = pa->pa_bridgetag;
127 
128 	arg.busid_len = 20;
129 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
130 	if (arg.busid == NULL) {
131 		printf("%s: no memory for drm\n", dev->dv_xname);
132 		return (NULL);
133 	}
134 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
135 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
136 
137 	sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch);
138 	if (sc == NULL)
139 		return NULL;
140 
141 	return sc->sc_drm;
142 }
143 
144 int
145 drmprint(void *aux, const char *pnp)
146 {
147 	if (pnp != NULL)
148 		printf("drm at %s", pnp);
149 	return (UNCONF);
150 }
151 
152 int
153 drmsubmatch(struct device *parent, void *match, void *aux)
154 {
155 	extern struct cfdriver drm_cd;
156 	struct cfdata *cf = match;
157 
158 	/* only allow drm to attach */
159 	if (cf->cf_driver == &drm_cd)
160 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
161 	return (0);
162 }
163 
164 int
165 drm_pciprobe(struct pci_attach_args *pa, const struct drm_pcidev *idlist)
166 {
167 	const struct drm_pcidev *id_entry;
168 
169 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
170 	    PCI_PRODUCT(pa->pa_id), idlist);
171 	if (id_entry != NULL)
172 		return 1;
173 
174 	return 0;
175 }
176 
177 int
178 drm_probe(struct device *parent, void *match, void *aux)
179 {
180 	struct cfdata *cf = match;
181 	struct drm_attach_args *da = aux;
182 
183 	if (cf->drmdevcf_console != DRMDEVCF_CONSOLE_UNK) {
184 		/*
185 		 * If console-ness of device specified, either match
186 		 * exactly (at high priority), or fail.
187 		 */
188 		if (cf->drmdevcf_console != 0 && da->console != 0)
189 			return (10);
190 		else
191 			return (0);
192 	}
193 
194 	/* If console-ness unspecified, it wins. */
195 	return (1);
196 }
197 
198 void
199 drm_attach(struct device *parent, struct device *self, void *aux)
200 {
201 	struct drm_softc *sc = (struct drm_softc *)self;
202 	struct drm_attach_args *da = aux;
203 	struct drm_device *dev = da->drm;
204 	int bus, slot, func;
205 	int ret;
206 
207 	drm_linux_init();
208 
209 	if (dev == NULL) {
210 		dev = malloc(sizeof(struct drm_device), M_DRM,
211 		    M_WAITOK | M_ZERO);
212 		sc->sc_allocated = 1;
213 	}
214 
215 	sc->sc_drm = dev;
216 
217 	dev->dev = self;
218 	dev->dev_private = parent;
219 	dev->driver = da->driver;
220 
221 	dev->dmat = da->dmat;
222 	dev->bst = da->bst;
223 	dev->unique = da->busid;
224 	dev->unique_len = da->busid_len;
225 	dev->pdev = &dev->_pdev;
226 	dev->pci_vendor = dev->pdev->vendor = da->pci_vendor;
227 	dev->pci_device = dev->pdev->device = da->pci_device;
228 	dev->pdev->subsystem_vendor = da->pci_subvendor;
229 	dev->pdev->subsystem_device = da->pci_subdevice;
230 	dev->pdev->revision = da->pci_revision;
231 
232 	pci_decompose_tag(da->pc, da->tag, &bus, &slot, &func);
233 	dev->pdev->bus = &dev->pdev->_bus;
234 	dev->pdev->bus->pc = da->pc;
235 	dev->pdev->bus->number = bus;
236 	dev->pdev->bus->bridgetag = da->bridgetag;
237 	dev->pdev->devfn = PCI_DEVFN(slot, func);
238 
239 	dev->pdev->bus->self = malloc(sizeof(struct pci_dev), M_DRM,
240 	    M_NOWAIT | M_ZERO);
241 	if (dev->pdev->bus->self == NULL)
242 		goto error;
243 	dev->pdev->bus->self->pc = da->pc;
244 	if (da->bridgetag != NULL)
245 		dev->pdev->bus->self->tag = *da->bridgetag;
246 
247 	dev->pc = da->pc;
248 	dev->pdev->pc = da->pc;
249 	dev->bridgetag = da->bridgetag;
250 	dev->pdev->tag = da->tag;
251 	dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
252 
253 	rw_init(&dev->struct_mutex, "drmdevlk");
254 	mtx_init(&dev->event_lock, IPL_TTY);
255 	mtx_init(&dev->quiesce_mtx, IPL_NONE);
256 
257 	SPLAY_INIT(&dev->files);
258 	INIT_LIST_HEAD(&dev->vblank_event_list);
259 
260 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
261 #if IS_ENABLED(CONFIG_AGP)
262 		if (da->is_agp)
263 			dev->agp = drm_agp_init();
264 #endif
265 		if (dev->agp != NULL) {
266 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
267 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
268 				dev->agp->mtrr = 1;
269 		}
270 	}
271 
272 	if (dev->driver->gem_size > 0) {
273 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
274 		/* XXX unique name */
275 		pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE, 0,
276 		    "drmobjpl", NULL);
277 	}
278 
279 	if (dev->driver->driver_features & DRIVER_GEM) {
280 		ret = drm_gem_init(dev);
281 		if (ret) {
282 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
283 			goto error;
284 		}
285 	}
286 
287 	printf("\n");
288 	return;
289 
290 error:
291 	drm_lastclose(dev);
292 	dev->dev_private = NULL;
293 }
294 
295 int
296 drm_detach(struct device *self, int flags)
297 {
298 	struct drm_softc *sc = (struct drm_softc *)self;
299 	struct drm_device *dev = sc->sc_drm;
300 
301 	drm_lastclose(dev);
302 
303 	if (dev->driver->driver_features & DRIVER_GEM)
304 		drm_gem_destroy(dev);
305 
306 	if (dev->driver->driver_features & DRIVER_GEM)
307 		pool_destroy(&dev->objpl);
308 
309 	drm_vblank_cleanup(dev);
310 
311 	if (dev->agp && dev->agp->mtrr) {
312 		int retcode;
313 
314 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
315 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
316 		DRM_DEBUG("mtrr_del = %d", retcode);
317 	}
318 
319 	free(dev->agp, M_DRM, 0);
320 	free(dev->pdev->bus->self, M_DRM, sizeof(struct pci_dev));
321 
322 	if (sc->sc_allocated)
323 		free(dev, M_DRM, sizeof(struct drm_device));
324 
325 	return 0;
326 }
327 
328 void
329 drm_quiesce(struct drm_device *dev)
330 {
331 	mtx_enter(&dev->quiesce_mtx);
332 	dev->quiesce = 1;
333 	while (dev->quiesce_count > 0) {
334 		msleep(&dev->quiesce_count, &dev->quiesce_mtx,
335 		    PZERO, "drmqui", 0);
336 	}
337 	mtx_leave(&dev->quiesce_mtx);
338 }
339 
340 void
341 drm_wakeup(struct drm_device *dev)
342 {
343 	mtx_enter(&dev->quiesce_mtx);
344 	dev->quiesce = 0;
345 	wakeup(&dev->quiesce);
346 	mtx_leave(&dev->quiesce_mtx);
347 }
348 
349 int
350 drm_activate(struct device *self, int act)
351 {
352 	struct drm_softc *sc = (struct drm_softc *)self;
353 	struct drm_device *dev = sc->sc_drm;
354 
355 	switch (act) {
356 	case DVACT_QUIESCE:
357 		drm_quiesce(dev);
358 		break;
359 	case DVACT_WAKEUP:
360 		drm_wakeup(dev);
361 		break;
362 	}
363 
364 	return (0);
365 }
366 
367 struct cfattach drm_ca = {
368 	sizeof(struct drm_softc), drm_probe, drm_attach,
369 	drm_detach, drm_activate
370 };
371 
372 struct cfdriver drm_cd = {
373 	0, "drm", DV_DULL
374 };
375 
376 const struct drm_pcidev *
377 drm_find_description(int vendor, int device, const struct drm_pcidev *idlist)
378 {
379 	int i = 0;
380 
381 	for (i = 0; idlist[i].vendor != 0; i++) {
382 		if ((idlist[i].vendor == vendor) &&
383 		    (idlist[i].device == device) &&
384 		    (idlist[i].subvendor == PCI_ANY_ID) &&
385 		    (idlist[i].subdevice == PCI_ANY_ID))
386 			return &idlist[i];
387 	}
388 	return NULL;
389 }
390 
391 int
392 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
393 {
394 	return (f1->minor < f2->minor ? -1 : f1->minor > f2->minor);
395 }
396 
397 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
398 
399 struct drm_file *
400 drm_find_file_by_minor(struct drm_device *dev, int minor)
401 {
402 	struct drm_file	key;
403 
404 	key.minor = minor;
405 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
406 }
407 
408 struct drm_device *
409 drm_get_device_from_kdev(dev_t kdev)
410 {
411 	int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
412 	/* control */
413 	if (unit >= 64 && unit < 128)
414 		unit -= 64;
415 	/* render */
416 	if (unit >= 128)
417 		unit -= 128;
418 	struct drm_softc *sc;
419 
420 	if (unit < drm_cd.cd_ndevs) {
421 		sc = (struct drm_softc *)drm_cd.cd_devs[unit];
422 		if (sc)
423 			return sc->sc_drm;
424 	}
425 
426 	return NULL;
427 }
428 
429 int
430 drm_firstopen(struct drm_device *dev)
431 {
432 	if (dev->driver->firstopen)
433 		dev->driver->firstopen(dev);
434 
435 	dev->magicid = 1;
436 
437 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
438 		dev->irq_enabled = 0;
439 	dev->if_version = 0;
440 
441 	dev->buf_pgid = 0;
442 
443 	DRM_DEBUG("\n");
444 
445 	return 0;
446 }
447 
448 int
449 drm_lastclose(struct drm_device *dev)
450 {
451 	DRM_DEBUG("\n");
452 
453 	if (dev->driver->lastclose != NULL)
454 		dev->driver->lastclose(dev);
455 
456 	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
457 		drm_irq_uninstall(dev);
458 
459 #if IS_ENABLED(CONFIG_AGP)
460 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
461 		drm_agp_takedown(dev);
462 #endif
463 
464 	return 0;
465 }
466 
467 void
468 filt_drmdetach(struct knote *kn)
469 {
470 	struct drm_device *dev = kn->kn_hook;
471 	int s;
472 
473 	s = spltty();
474 	SLIST_REMOVE(&dev->note, kn, knote, kn_selnext);
475 	splx(s);
476 }
477 
478 int
479 filt_drmkms(struct knote *kn, long hint)
480 {
481 	if (kn->kn_sfflags & hint)
482 		kn->kn_fflags |= hint;
483 	return (kn->kn_fflags != 0);
484 }
485 
486 struct filterops drm_filtops =
487 	{ 1, NULL, filt_drmdetach, filt_drmkms };
488 
489 int
490 drmkqfilter(dev_t kdev, struct knote *kn)
491 {
492 	struct drm_device	*dev = NULL;
493 	int s;
494 
495 	dev = drm_get_device_from_kdev(kdev);
496 	if (dev == NULL || dev->dev_private == NULL)
497 		return (ENXIO);
498 
499 	switch (kn->kn_filter) {
500 	case EVFILT_DEVICE:
501 		kn->kn_fop = &drm_filtops;
502 		break;
503 	default:
504 		return (EINVAL);
505 	}
506 
507 	kn->kn_hook = dev;
508 
509 	s = spltty();
510 	SLIST_INSERT_HEAD(&dev->note, kn, kn_selnext);
511 	splx(s);
512 
513 	return (0);
514 }
515 
516 int
517 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
518 {
519 	struct drm_device	*dev = NULL;
520 	struct drm_file		*file_priv;
521 	int			 ret = 0;
522 	int			 realminor;
523 
524 	dev = drm_get_device_from_kdev(kdev);
525 	if (dev == NULL || dev->dev_private == NULL)
526 		return (ENXIO);
527 
528 	DRM_DEBUG("open_count = %d\n", dev->open_count);
529 
530 	if (flags & O_EXCL)
531 		return (EBUSY); /* No exclusive opens */
532 
533 	mutex_lock(&dev->struct_mutex);
534 	if (dev->open_count++ == 0) {
535 		mutex_unlock(&dev->struct_mutex);
536 		if ((ret = drm_firstopen(dev)) != 0)
537 			goto err;
538 	} else {
539 		mutex_unlock(&dev->struct_mutex);
540 	}
541 
542 	/* always allocate at least enough space for our data */
543 	file_priv = mallocarray(1, max(dev->driver->file_priv_size,
544 	    sizeof(*file_priv)), M_DRM, M_NOWAIT | M_ZERO);
545 	if (file_priv == NULL) {
546 		ret = ENOMEM;
547 		goto err;
548 	}
549 
550 	file_priv->filp = (void *)&file_priv;
551 	file_priv->minor = minor(kdev);
552 	realminor =  file_priv->minor & ((1 << CLONE_SHIFT) - 1);
553 	if (realminor < 64)
554 		file_priv->minor_type = DRM_MINOR_PRIMARY;
555 	else if (realminor >= 64 && realminor < 128)
556 		file_priv->minor_type = DRM_MINOR_CONTROL;
557 	else
558 		file_priv->minor_type = DRM_MINOR_RENDER;
559 
560 	INIT_LIST_HEAD(&file_priv->lhead);
561 	INIT_LIST_HEAD(&file_priv->fbs);
562 	rw_init(&file_priv->fbs_lock, "fbslk");
563 	INIT_LIST_HEAD(&file_priv->blobs);
564 	INIT_LIST_HEAD(&file_priv->pending_event_list);
565 	INIT_LIST_HEAD(&file_priv->event_list);
566 	init_waitqueue_head(&file_priv->event_wait);
567 	file_priv->event_space = 4096; /* 4k for event buffer */
568 	DRM_DEBUG("minor = %d\n", file_priv->minor);
569 
570 	/* for compatibility root is always authenticated */
571 	file_priv->authenticated = DRM_SUSER(p);
572 
573 	rw_init(&file_priv->event_read_lock, "evread");
574 
575 	if (drm_core_check_feature(dev, DRIVER_GEM))
576 		drm_gem_open(dev, file_priv);
577 
578 	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
579 		drm_syncobj_open(file_priv);
580 
581 	if (drm_core_check_feature(dev, DRIVER_PRIME))
582 		drm_prime_init_file_private(&file_priv->prime);
583 
584 	if (dev->driver->open) {
585 		ret = dev->driver->open(dev, file_priv);
586 		if (ret != 0) {
587 			goto out_prime_destroy;
588 		}
589 	}
590 
591 	mutex_lock(&dev->struct_mutex);
592 	/* first opener automatically becomes master */
593 	if (drm_is_primary_client(file_priv))
594 		file_priv->is_master = SPLAY_EMPTY(&dev->files);
595 	if (file_priv->is_master)
596 		file_priv->authenticated = 1;
597 
598 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
599 	mutex_unlock(&dev->struct_mutex);
600 
601 	return (0);
602 
603 out_prime_destroy:
604 	if (drm_core_check_feature(dev, DRIVER_PRIME))
605 		drm_prime_destroy_file_private(&file_priv->prime);
606 	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
607 		drm_syncobj_release(file_priv);
608 	if (drm_core_check_feature(dev, DRIVER_GEM))
609 		drm_gem_release(dev, file_priv);
610 	free(file_priv, M_DRM, 0);
611 err:
612 	mutex_lock(&dev->struct_mutex);
613 	--dev->open_count;
614 	mutex_unlock(&dev->struct_mutex);
615 	return (ret);
616 }
617 
618 void drm_events_release(struct drm_file *file_priv, struct drm_device *dev);
619 
620 int
621 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
622 {
623 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
624 	struct drm_file			*file_priv;
625 	int				 retcode = 0;
626 
627 	if (dev == NULL)
628 		return (ENXIO);
629 
630 	DRM_DEBUG("open_count = %d\n", dev->open_count);
631 
632 	mutex_lock(&dev->struct_mutex);
633 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
634 	if (file_priv == NULL) {
635 		DRM_ERROR("can't find authenticator\n");
636 		retcode = EINVAL;
637 		goto done;
638 	}
639 	mutex_unlock(&dev->struct_mutex);
640 
641 	if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
642 	    dev->driver->preclose)
643 		dev->driver->preclose(dev, file_priv);
644 
645 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
646 	    DRM_CURRENTPID, (long)&dev->dev, dev->open_count);
647 
648 	drm_events_release(file_priv, dev);
649 
650 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
651 		drm_fb_release(file_priv);
652 		drm_property_destroy_user_blobs(dev, file_priv);
653 	}
654 
655 	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
656 		drm_syncobj_release(file_priv);
657 
658 	if (drm_core_check_feature(dev, DRIVER_GEM))
659 		drm_gem_release(dev, file_priv);
660 
661 	dev->buf_pgid = 0;
662 
663 	if (dev->driver->postclose)
664 		dev->driver->postclose(dev, file_priv);
665 
666 	if (drm_core_check_feature(dev, DRIVER_PRIME))
667 		drm_prime_destroy_file_private(&file_priv->prime);
668 
669 	mutex_lock(&dev->struct_mutex);
670 
671 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
672 	free(file_priv, M_DRM, 0);
673 
674 done:
675 	if (--dev->open_count == 0) {
676 		mutex_unlock(&dev->struct_mutex);
677 		retcode = drm_lastclose(dev);
678 	} else
679 		mutex_unlock(&dev->struct_mutex);
680 
681 	return (retcode);
682 }
683 
684 int
685 drmread(dev_t kdev, struct uio *uio, int ioflag)
686 {
687 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
688 	struct drm_file			*file_priv;
689 	struct drm_pending_event	*ev;
690 	int		 		 error = 0;
691 
692 	if (dev == NULL)
693 		return (ENXIO);
694 
695 	mutex_lock(&dev->struct_mutex);
696 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
697 	mutex_unlock(&dev->struct_mutex);
698 	if (file_priv == NULL)
699 		return (ENXIO);
700 
701 	/*
702 	 * The semantics are a little weird here. We will wait until we
703 	 * have events to process, but as soon as we have events we will
704 	 * only deliver as many as we have.
705 	 * Note that events are atomic, if the read buffer will not fit in
706 	 * a whole event, we won't read any of it out.
707 	 */
708 	mtx_enter(&dev->event_lock);
709 	while (error == 0 && list_empty(&file_priv->event_list)) {
710 		if (ioflag & IO_NDELAY) {
711 			mtx_leave(&dev->event_lock);
712 			return (EAGAIN);
713 		}
714 		error = msleep(&file_priv->event_wait, &dev->event_lock,
715 		    PWAIT | PCATCH, "drmread", 0);
716 	}
717 	if (error) {
718 		mtx_leave(&dev->event_lock);
719 		return (error);
720 	}
721 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
722 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
723 		/* XXX we always destroy the event on error. */
724 		error = uiomove(ev->event, ev->event->length, uio);
725 		kfree(ev);
726 		if (error)
727 			break;
728 		mtx_enter(&dev->event_lock);
729 	}
730 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
731 
732 	return (error);
733 }
734 
735 /*
736  * Deqeue an event from the file priv in question. returning 1 if an
737  * event was found. We take the resid from the read as a parameter because
738  * we will only dequeue and event if the read buffer has space to fit the
739  * entire thing.
740  *
741  * We are called locked, but we will *unlock* the queue on return so that
742  * we may sleep to copyout the event.
743  */
744 int
745 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
746     size_t resid, struct drm_pending_event **out)
747 {
748 	struct drm_pending_event *e = NULL;
749 	int gotone = 0;
750 
751 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
752 
753 	*out = NULL;
754 	if (list_empty(&file_priv->event_list))
755 		goto out;
756 	e = list_first_entry(&file_priv->event_list,
757 			     struct drm_pending_event, link);
758 	if (e->event->length > resid)
759 		goto out;
760 
761 	file_priv->event_space += e->event->length;
762 	list_del(&e->link);
763 	*out = e;
764 	gotone = 1;
765 
766 out:
767 	mtx_leave(&dev->event_lock);
768 
769 	return (gotone);
770 }
771 
772 /* XXX kqfilter ... */
773 int
774 drmpoll(dev_t kdev, int events, struct proc *p)
775 {
776 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
777 	struct drm_file		*file_priv;
778 	int		 	 revents = 0;
779 
780 	if (dev == NULL)
781 		return (POLLERR);
782 
783 	mutex_lock(&dev->struct_mutex);
784 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
785 	mutex_unlock(&dev->struct_mutex);
786 	if (file_priv == NULL)
787 		return (POLLERR);
788 
789 	mtx_enter(&dev->event_lock);
790 	if (events & (POLLIN | POLLRDNORM)) {
791 		if (!list_empty(&file_priv->event_list))
792 			revents |=  events & (POLLIN | POLLRDNORM);
793 		else
794 			selrecord(p, &file_priv->rsel);
795 	}
796 	mtx_leave(&dev->event_lock);
797 
798 	return (revents);
799 }
800 
801 paddr_t
802 drmmmap(dev_t kdev, off_t offset, int prot)
803 {
804 	return -1;
805 }
806 
807 struct drm_dmamem *
808 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
809     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
810 {
811 	struct drm_dmamem	*mem;
812 	size_t			 strsize;
813 	/*
814 	 * segs is the last member of the struct since we modify the size
815 	 * to allow extra segments if more than one are allowed.
816 	 */
817 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
818 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
819 	if (mem == NULL)
820 		return (NULL);
821 
822 	mem->size = size;
823 
824 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
825 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
826 		goto strfree;
827 
828 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
829 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
830 		goto destroy;
831 
832 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
833 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
834 		goto free;
835 
836 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
837 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
838 		goto unmap;
839 
840 	return (mem);
841 
842 unmap:
843 	bus_dmamem_unmap(dmat, mem->kva, size);
844 free:
845 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
846 destroy:
847 	bus_dmamap_destroy(dmat, mem->map);
848 strfree:
849 	free(mem, M_DRM, 0);
850 
851 	return (NULL);
852 }
853 
854 void
855 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
856 {
857 	if (mem == NULL)
858 		return;
859 
860 	bus_dmamap_unload(dmat, mem->map);
861 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
862 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
863 	bus_dmamap_destroy(dmat, mem->map);
864 	free(mem, M_DRM, 0);
865 }
866 
867 struct drm_dma_handle *
868 drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
869 {
870 	struct drm_dma_handle *dmah;
871 
872 	dmah = malloc(sizeof(*dmah), M_DRM, M_WAITOK);
873 	dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size,
874 	    BUS_DMA_NOCACHE, 0);
875 	if (dmah->mem == NULL) {
876 		free(dmah, M_DRM, sizeof(*dmah));
877 		return NULL;
878 	}
879 	dmah->busaddr = dmah->mem->segs[0].ds_addr;
880 	dmah->size = dmah->mem->size;
881 	dmah->vaddr = dmah->mem->kva;
882 	return (dmah);
883 }
884 
885 void
886 drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
887 {
888 	if (dmah == NULL)
889 		return;
890 
891 	drm_dmamem_free(dev->dmat, dmah->mem);
892 	free(dmah, M_DRM, sizeof(*dmah));
893 }
894 
895 /**
896  * Called by the client, this returns a unique magic number to be authorized
897  * by the master.
898  *
899  * The master may use its own knowledge of the client (such as the X
900  * connection that the magic is passed over) to determine if the magic number
901  * should be authenticated.
902  */
903 int
904 drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
905 {
906 	struct drm_auth		*auth = data;
907 
908 	if (dev->magicid == 0)
909 		dev->magicid = 1;
910 
911 	/* Find unique magic */
912 	if (file_priv->magic) {
913 		auth->magic = file_priv->magic;
914 	} else {
915 		mutex_lock(&dev->struct_mutex);
916 		file_priv->magic = auth->magic = dev->magicid++;
917 		mutex_unlock(&dev->struct_mutex);
918 		DRM_DEBUG("%d\n", auth->magic);
919 	}
920 
921 	DRM_DEBUG("%u\n", auth->magic);
922 	return 0;
923 }
924 
925 /**
926  * Marks the client associated with the given magic number as authenticated.
927  */
928 int
929 drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
930 {
931 	struct drm_file	*p;
932 	struct drm_auth	*auth = data;
933 	int		 ret = -EINVAL;
934 
935 	DRM_DEBUG("%u\n", auth->magic);
936 
937 	if (auth->magic == 0)
938 		return ret;
939 
940 	mutex_lock(&dev->struct_mutex);
941 	SPLAY_FOREACH(p, drm_file_tree, &dev->files) {
942 		if (p->magic == auth->magic) {
943 			p->authenticated = 1;
944 			p->magic = 0;
945 			ret = 0;
946 			break;
947 		}
948 	}
949 	mutex_unlock(&dev->struct_mutex);
950 
951 	return ret;
952 }
953 
954 /*
955  * Compute order.  Can be made faster.
956  */
957 int
958 drm_order(unsigned long size)
959 {
960 	int order;
961 	unsigned long tmp;
962 
963 	for (order = 0, tmp = size; tmp >>= 1; ++order)
964 		;
965 
966 	if (size & ~(1 << order))
967 		++order;
968 
969 	return order;
970 }
971 
972 int
973 drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv)
974 {
975 	struct drm_pciinfo *info = data;
976 
977 	info->domain = 0;
978 	info->bus = dev->pdev->bus->number;
979 	info->dev = PCI_SLOT(dev->pdev->devfn);
980 	info->func = PCI_FUNC(dev->pdev->devfn);
981 	info->vendor_id = dev->pdev->vendor;
982 	info->device_id = dev->pdev->device;
983 	info->subvendor_id = dev->pdev->subsystem_vendor;
984 	info->subdevice_id = dev->pdev->subsystem_device;
985 	info->revision_id = 0;
986 
987 	return 0;
988 }
989 
990 /**
991  * drm_dev_register - Register DRM device
992  * @dev: Device to register
993  * @flags: Flags passed to the driver's .load() function
994  *
995  * Register the DRM device @dev with the system, advertise device to user-space
996  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
997  * previously.
998  *
999  * Never call this twice on any device!
1000  *
1001  * NOTE: To ensure backward compatibility with existing drivers method this
1002  * function calls the &drm_driver.load method after registering the device
1003  * nodes, creating race conditions. Usage of the &drm_driver.load methods is
1004  * therefore deprecated, drivers must perform all initialization before calling
1005  * drm_dev_register().
1006  *
1007  * RETURNS:
1008  * 0 on success, negative error code on failure.
1009  */
1010 int drm_dev_register(struct drm_device *dev, unsigned long flags)
1011 {
1012 	dev->registered = true;
1013 
1014 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1015 		drm_modeset_register_all(dev);
1016 
1017 	return 0;
1018 }
1019 EXPORT_SYMBOL(drm_dev_register);
1020 
1021 /**
1022  * drm_dev_unregister - Unregister DRM device
1023  * @dev: Device to unregister
1024  *
1025  * Unregister the DRM device from the system. This does the reverse of
1026  * drm_dev_register() but does not deallocate the device. The caller must call
1027  * drm_dev_put() to drop their final reference.
1028  *
1029  * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
1030  * which can be called while there are still open users of @dev.
1031  *
1032  * This should be called first in the device teardown code to make sure
1033  * userspace can't access the device instance any more.
1034  */
1035 void drm_dev_unregister(struct drm_device *dev)
1036 {
1037 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
1038 		drm_lastclose(dev);
1039 
1040 	dev->registered = false;
1041 
1042 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1043 		drm_modeset_unregister_all(dev);
1044 }
1045 EXPORT_SYMBOL(drm_dev_unregister);
1046 
1047