xref: /openbsd-src/sys/dev/pci/drm/drm_drv.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /* $OpenBSD: drm_drv.c,v 1.129 2014/07/12 18:48:52 tedu Exp $ */
2 /*-
3  * Copyright 2007-2009 Owain G. Ainsworth <oga@openbsd.org>
4  * Copyright © 2008 Intel Corporation
5  * Copyright 2003 Eric Anholt
6  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the "Software"),
12  * to deal in the Software without restriction, including without limitation
13  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14  * and/or sell copies of the Software, and to permit persons to whom the
15  * Software is furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the next
18  * paragraph) shall be included in all copies or substantial portions of the
19  * Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28  *
29  * Authors:
30  *    Rickard E. (Rik) Faith <faith@valinux.com>
31  *    Daryll Strauss <daryll@valinux.com>
32  *    Gareth Hughes <gareth@valinux.com>
33  *    Eric Anholt <eric@anholt.net>
34  *    Owain Ainsworth <oga@openbsd.org>
35  *
36  */
37 
38 /** @file drm_drv.c
39  * The catch-all file for DRM device support, including module setup/teardown,
40  * open/close, and ioctl dispatch.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/fcntl.h>
45 #include <sys/filio.h>
46 #include <sys/limits.h>
47 #include <sys/poll.h>
48 #include <sys/specdev.h>
49 #include <sys/systm.h>
50 #include <sys/ttycom.h> /* for TIOCSGRP */
51 #include <sys/vnode.h>
52 
53 #include <uvm/uvm.h>
54 #include <uvm/uvm_device.h>
55 
56 #include "drmP.h"
57 #include "drm.h"
58 #include "drm_sarea.h"
59 
60 #ifdef DRMDEBUG
61 int drm_debug_flag = 1;
62 #endif
63 
64 struct drm_device *drm_get_device_from_kdev(dev_t);
65 int	 drm_firstopen(struct drm_device *);
66 int	 drm_lastclose(struct drm_device *);
67 void	 drm_attach(struct device *, struct device *, void *);
68 int	 drm_probe(struct device *, void *, void *);
69 int	 drm_detach(struct device *, int);
70 void	 drm_quiesce(struct drm_device *);
71 void	 drm_wakeup(struct drm_device *);
72 int	 drm_activate(struct device *, int);
73 int	 drmprint(void *, const char *);
74 int	 drmsubmatch(struct device *, void *, void *);
75 int	 drm_do_ioctl(struct drm_device *, int, u_long, caddr_t);
76 int	 drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
77 	     struct drm_pending_event **);
78 
79 int	 drm_getunique(struct drm_device *, void *, struct drm_file *);
80 int	 drm_version(struct drm_device *, void *, struct drm_file *);
81 int	 drm_setversion(struct drm_device *, void *, struct drm_file *);
82 int	 drm_getmagic(struct drm_device *, void *, struct drm_file *);
83 int	 drm_authmagic(struct drm_device *, void *, struct drm_file *);
84 int	 drm_file_cmp(struct drm_file *, struct drm_file *);
85 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
86 
87 /* functions used by the per-open handle  code to grab references to object */
88 void	 drm_gem_object_handle_reference(struct drm_gem_object *);
89 void	 drm_gem_object_handle_unreference(struct drm_gem_object *);
90 void	 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *);
91 
92 int	 drm_handle_cmp(struct drm_handle *, struct drm_handle *);
93 int	 drm_name_cmp(struct drm_gem_object *, struct drm_gem_object *);
94 int	 drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
95 	     vm_fault_t, vm_prot_t, int);
96 boolean_t	 drm_flush(struct uvm_object *, voff_t, voff_t, int);
97 
98 SPLAY_PROTOTYPE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
99 SPLAY_PROTOTYPE(drm_name_tree, drm_gem_object, entry, drm_name_cmp);
100 
101 int	 drm_getcap(struct drm_device *, void *, struct drm_file *);
102 
103 /*
104  * attach drm to a pci-based driver.
105  *
106  * This function does all the pci-specific calculations for the
107  * drm_attach_args.
108  */
109 struct device *
110 drm_attach_pci(struct drm_driver_info *driver, struct pci_attach_args *pa,
111     int is_agp, int console, struct device *dev)
112 {
113 	struct drm_attach_args arg;
114 	pcireg_t subsys;
115 
116 	arg.driver = driver;
117 	arg.dmat = pa->pa_dmat;
118 	arg.bst = pa->pa_memt;
119 	arg.irq = pa->pa_intrline;
120 	arg.is_agp = is_agp;
121 	arg.console = console;
122 
123 	arg.pci_vendor = PCI_VENDOR(pa->pa_id);
124 	arg.pci_device = PCI_PRODUCT(pa->pa_id);
125 
126 	subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
127 	arg.pci_subvendor = PCI_VENDOR(subsys);
128 	arg.pci_subdevice = PCI_PRODUCT(subsys);
129 
130 	arg.pc = pa->pa_pc;
131 	arg.bridgetag = pa->pa_bridgetag;
132 
133 	arg.busid_len = 20;
134 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
135 	if (arg.busid == NULL) {
136 		printf("%s: no memory for drm\n", dev->dv_xname);
137 		return (NULL);
138 	}
139 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
140 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
141 
142 	return (config_found_sm(dev, &arg, drmprint, drmsubmatch));
143 }
144 
145 int
146 drmprint(void *aux, const char *pnp)
147 {
148 	if (pnp != NULL)
149 		printf("drm at %s", pnp);
150 	return (UNCONF);
151 }
152 
153 int
154 drmsubmatch(struct device *parent, void *match, void *aux)
155 {
156 	extern struct cfdriver drm_cd;
157 	struct cfdata *cf = match;
158 
159 	/* only allow drm to attach */
160 	if (cf->cf_driver == &drm_cd)
161 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
162 	return (0);
163 }
164 
165 int
166 drm_pciprobe(struct pci_attach_args *pa, const struct drm_pcidev *idlist)
167 {
168 	const struct drm_pcidev *id_entry;
169 
170 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
171 	    PCI_PRODUCT(pa->pa_id), idlist);
172 	if (id_entry != NULL)
173 		return 1;
174 
175 	return 0;
176 }
177 
178 int
179 drm_probe(struct device *parent, void *match, void *aux)
180 {
181 	struct cfdata *cf = match;
182 	struct drm_attach_args *da = aux;
183 
184 	if (cf->drmdevcf_console != DRMDEVCF_CONSOLE_UNK) {
185 		/*
186 		 * If console-ness of device specified, either match
187 		 * exactly (at high priority), or fail.
188 		 */
189 		if (cf->drmdevcf_console != 0 && da->console != 0)
190 			return (10);
191 		else
192 			return (0);
193 	}
194 
195 	/* If console-ness unspecified, it wins. */
196 	return (1);
197 }
198 
199 void
200 drm_attach(struct device *parent, struct device *self, void *aux)
201 {
202 	struct drm_device	*dev = (struct drm_device *)self;
203 	struct drm_attach_args	*da = aux;
204 
205 	dev->dev_private = parent;
206 	dev->driver = da->driver;
207 
208 	dev->dmat = da->dmat;
209 	dev->bst = da->bst;
210 	dev->irq = da->irq;
211 	dev->unique = da->busid;
212 	dev->unique_len = da->busid_len;
213 	dev->pdev = &dev->drm_pci;
214 	dev->pci_vendor = dev->pdev->vendor = da->pci_vendor;
215 	dev->pci_device = dev->pdev->device = da->pci_device;
216 	dev->pdev->subsystem_vendor = da->pci_subvendor;
217 	dev->pdev->subsystem_device = da->pci_subdevice;
218 
219 	dev->pc = da->pc;
220 	dev->bridgetag = da->bridgetag;
221 
222 	rw_init(&dev->dev_lock, "drmdevlk");
223 	mtx_init(&dev->event_lock, IPL_TTY);
224 	mtx_init(&dev->quiesce_mtx, IPL_NONE);
225 
226 	TAILQ_INIT(&dev->maplist);
227 	SPLAY_INIT(&dev->files);
228 	TAILQ_INIT(&dev->vbl_events);
229 
230 	/*
231 	 * the dma buffers api is just weird. offset 1Gb to ensure we don't
232 	 * conflict with it.
233 	 */
234 	dev->handle_ext = extent_create("drmext", 1024*1024*1024, LONG_MAX,
235 	    M_DRM, NULL, 0, EX_NOWAIT | EX_NOCOALESCE);
236 	if (dev->handle_ext == NULL) {
237 		DRM_ERROR("Failed to initialise handle extent\n");
238 		goto error;
239 	}
240 
241 	if (dev->driver->flags & DRIVER_AGP) {
242 #if __OS_HAS_AGP
243 		if (da->is_agp)
244 			dev->agp = drm_agp_init();
245 #endif
246 		if (dev->driver->flags & DRIVER_AGP_REQUIRE &&
247 		    dev->agp == NULL) {
248 			printf(": couldn't find agp\n");
249 			goto error;
250 		}
251 		if (dev->agp != NULL) {
252 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
253 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
254 				dev->agp->mtrr = 1;
255 		}
256 	}
257 
258 	if (dev->driver->flags & DRIVER_GEM) {
259 		mtx_init(&dev->obj_name_lock, IPL_NONE);
260 		SPLAY_INIT(&dev->name_tree);
261 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
262 		/* XXX unique name */
263 		pool_init(&dev->objpl, dev->driver->gem_size, 0, 0, 0,
264 		    "drmobjpl", &pool_allocator_nointr);
265 	}
266 
267 	printf("\n");
268 	return;
269 
270 error:
271 	drm_lastclose(dev);
272 	dev->dev_private = NULL;
273 }
274 
275 int
276 drm_detach(struct device *self, int flags)
277 {
278 	struct drm_device *dev = (struct drm_device *)self;
279 
280 	drm_lastclose(dev);
281 
282 	if (dev->driver->flags & DRIVER_GEM)
283 		pool_destroy(&dev->objpl);
284 
285 	extent_destroy(dev->handle_ext);
286 
287 	drm_vblank_cleanup(dev);
288 
289 	if (dev->agp && dev->agp->mtrr) {
290 		int retcode;
291 
292 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
293 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
294 		DRM_DEBUG("mtrr_del = %d", retcode);
295 	}
296 
297 
298 	if (dev->agp != NULL) {
299 		drm_free(dev->agp);
300 		dev->agp = NULL;
301 	}
302 
303 	return 0;
304 }
305 
306 void
307 drm_quiesce(struct drm_device *dev)
308 {
309 	mtx_enter(&dev->quiesce_mtx);
310 	dev->quiesce = 1;
311 	while (dev->quiesce_count > 0) {
312 		msleep(&dev->quiesce_count, &dev->quiesce_mtx,
313 		    PZERO, "drmqui", 0);
314 	}
315 	mtx_leave(&dev->quiesce_mtx);
316 }
317 
318 void
319 drm_wakeup(struct drm_device *dev)
320 {
321 	mtx_enter(&dev->quiesce_mtx);
322 	dev->quiesce = 0;
323 	wakeup(&dev->quiesce);
324 	mtx_leave(&dev->quiesce_mtx);
325 }
326 
327 int
328 drm_activate(struct device *self, int act)
329 {
330 	struct drm_device *dev = (struct drm_device *)self;
331 
332 	switch (act) {
333 	case DVACT_QUIESCE:
334 		drm_quiesce(dev);
335 		break;
336 	case DVACT_WAKEUP:
337 		drm_wakeup(dev);
338 		break;
339 	}
340 
341 	return (0);
342 }
343 
344 struct cfattach drm_ca = {
345 	sizeof(struct drm_device), drm_probe, drm_attach,
346 	drm_detach, drm_activate
347 };
348 
349 struct cfdriver drm_cd = {
350 	0, "drm", DV_DULL
351 };
352 
353 const struct drm_pcidev *
354 drm_find_description(int vendor, int device, const struct drm_pcidev *idlist)
355 {
356 	int i = 0;
357 
358 	for (i = 0; idlist[i].vendor != 0; i++) {
359 		if ((idlist[i].vendor == vendor) &&
360 		    (idlist[i].device == device))
361 			return &idlist[i];
362 	}
363 	return NULL;
364 }
365 
366 int
367 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
368 {
369 	return (f1->minor < f2->minor ? -1 : f1->minor > f2->minor);
370 }
371 
372 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
373 
374 struct drm_file *
375 drm_find_file_by_minor(struct drm_device *dev, int minor)
376 {
377 	struct drm_file	key;
378 
379 	key.minor = minor;
380 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
381 }
382 
383 struct drm_device *
384 drm_get_device_from_kdev(dev_t kdev)
385 {
386 	int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
387 
388 	if (unit < drm_cd.cd_ndevs)
389 		return drm_cd.cd_devs[unit];
390 
391 	return NULL;
392 }
393 
394 int
395 drm_firstopen(struct drm_device *dev)
396 {
397 	if (dev->driver->firstopen)
398 		dev->driver->firstopen(dev);
399 
400 	dev->magicid = 1;
401 
402 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
403 		dev->irq_enabled = 0;
404 	dev->if_version = 0;
405 
406 	dev->buf_pgid = 0;
407 
408 	DRM_DEBUG("\n");
409 
410 	return 0;
411 }
412 
413 int
414 drm_lastclose(struct drm_device *dev)
415 {
416 	DRM_DEBUG("\n");
417 
418 	if (dev->driver->lastclose != NULL)
419 		dev->driver->lastclose(dev);
420 
421 	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
422 		drm_irq_uninstall(dev);
423 
424 #if __OS_HAS_AGP
425 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
426 		drm_agp_takedown(dev);
427 #endif
428 
429 	return 0;
430 }
431 
432 int
433 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
434 {
435 	struct drm_device	*dev = NULL;
436 	struct drm_file		*file_priv;
437 	int			 ret = 0;
438 
439 	dev = drm_get_device_from_kdev(kdev);
440 	if (dev == NULL || dev->dev_private == NULL)
441 		return (ENXIO);
442 
443 	DRM_DEBUG("open_count = %d\n", dev->open_count);
444 
445 	if (flags & O_EXCL)
446 		return (EBUSY); /* No exclusive opens */
447 
448 	DRM_LOCK();
449 	if (dev->open_count++ == 0) {
450 		DRM_UNLOCK();
451 		if ((ret = drm_firstopen(dev)) != 0)
452 			goto err;
453 	} else {
454 		DRM_UNLOCK();
455 	}
456 
457 	/* always allocate at least enough space for our data */
458 	file_priv = drm_calloc(1, max(dev->driver->file_priv_size,
459 	    sizeof(*file_priv)));
460 	if (file_priv == NULL) {
461 		ret = ENOMEM;
462 		goto err;
463 	}
464 
465 	file_priv->kdev = kdev;
466 	file_priv->flags = flags;
467 	file_priv->minor = minor(kdev);
468 	INIT_LIST_HEAD(&file_priv->fbs);
469 	TAILQ_INIT(&file_priv->evlist);
470 	file_priv->event_space = 4096; /* 4k for event buffer */
471 	DRM_DEBUG("minor = %d\n", file_priv->minor);
472 
473 	/* for compatibility root is always authenticated */
474 	file_priv->authenticated = DRM_SUSER(p);
475 
476 	if (dev->driver->flags & DRIVER_GEM) {
477 		SPLAY_INIT(&file_priv->obj_tree);
478 		mtx_init(&file_priv->table_lock, IPL_NONE);
479 	}
480 
481 	if (dev->driver->open) {
482 		ret = dev->driver->open(dev, file_priv);
483 		if (ret != 0) {
484 			goto free_priv;
485 		}
486 	}
487 
488 	DRM_LOCK();
489 	/* first opener automatically becomes master if root */
490 	if (SPLAY_EMPTY(&dev->files) && !DRM_SUSER(p)) {
491 		DRM_UNLOCK();
492 		ret = EPERM;
493 		goto free_priv;
494 	}
495 
496 	file_priv->master = SPLAY_EMPTY(&dev->files);
497 
498 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
499 	DRM_UNLOCK();
500 
501 	return (0);
502 
503 free_priv:
504 	drm_free(file_priv);
505 err:
506 	DRM_LOCK();
507 	--dev->open_count;
508 	DRM_UNLOCK();
509 	return (ret);
510 }
511 
512 int
513 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
514 {
515 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
516 	struct drm_file			*file_priv;
517 	struct drm_pending_event	*ev, *evtmp;
518 	struct drm_pending_vblank_event	*vev;
519 	int				 retcode = 0;
520 
521 	if (dev == NULL)
522 		return (ENXIO);
523 
524 	DRM_DEBUG("open_count = %d\n", dev->open_count);
525 
526 	DRM_LOCK();
527 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
528 	if (file_priv == NULL) {
529 		DRM_ERROR("can't find authenticator\n");
530 		retcode = EINVAL;
531 		goto done;
532 	}
533 	DRM_UNLOCK();
534 
535 	if (dev->driver->close != NULL)
536 		dev->driver->close(dev, file_priv);
537 
538 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
539 	    DRM_CURRENTPID, (long)&dev->device, dev->open_count);
540 
541 	mtx_enter(&dev->event_lock);
542 	struct drmevlist *list = &dev->vbl_events;
543 	for (ev = TAILQ_FIRST(list); ev != TAILQ_END(list);
544 	    ev = evtmp) {
545 		evtmp = TAILQ_NEXT(ev, link);
546 		vev = (struct drm_pending_vblank_event *)ev;
547 		if (ev->file_priv == file_priv) {
548 			TAILQ_REMOVE(list, ev, link);
549 			drm_vblank_put(dev, vev->pipe);
550 			ev->destroy(ev);
551 		}
552 	}
553 	while ((ev = TAILQ_FIRST(&file_priv->evlist)) != NULL) {
554 		TAILQ_REMOVE(&file_priv->evlist, ev, link);
555 		ev->destroy(ev);
556 	}
557 	mtx_leave(&dev->event_lock);
558 
559 	if (dev->driver->flags & DRIVER_MODESET)
560 		drm_fb_release(dev, file_priv);
561 
562 	DRM_LOCK();
563 	if (dev->driver->flags & DRIVER_GEM) {
564 		struct drm_handle	*han;
565 		mtx_enter(&file_priv->table_lock);
566 		while ((han = SPLAY_ROOT(&file_priv->obj_tree)) != NULL) {
567 			SPLAY_REMOVE(drm_obj_tree, &file_priv->obj_tree, han);
568 			mtx_leave(&file_priv->table_lock);
569 			drm_gem_object_handle_unreference(han->obj);
570 			drm_free(han);
571 			mtx_enter(&file_priv->table_lock);
572 		}
573 		mtx_leave(&file_priv->table_lock);
574 	}
575 
576 	dev->buf_pgid = 0;
577 
578 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
579 	drm_free(file_priv);
580 
581 done:
582 	if (--dev->open_count == 0) {
583 		DRM_UNLOCK();
584 		retcode = drm_lastclose(dev);
585 	} else
586 		DRM_UNLOCK();
587 
588 	return (retcode);
589 }
590 
591 int
592 drm_do_ioctl(struct drm_device *dev, int minor, u_long cmd, caddr_t data)
593 {
594 	struct drm_file *file_priv;
595 
596 	DRM_LOCK();
597 	file_priv = drm_find_file_by_minor(dev, minor);
598 	DRM_UNLOCK();
599 	if (file_priv == NULL) {
600 		DRM_ERROR("can't find authenticator\n");
601 		return EINVAL;
602 	}
603 
604 	++file_priv->ioctl_count;
605 
606 	DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
607 	    DRM_CURRENTPID, cmd, DRM_IOCTL_NR(cmd), (long)&dev->device,
608 	    file_priv->authenticated);
609 
610 	switch (cmd) {
611 	case FIONBIO:
612 	case FIOASYNC:
613 		return 0;
614 
615 	case TIOCSPGRP:
616 		dev->buf_pgid = *(int *)data;
617 		return 0;
618 
619 	case TIOCGPGRP:
620 		*(int *)data = dev->buf_pgid;
621 		return 0;
622 	case DRM_IOCTL_VERSION:
623 		return (drm_version(dev, data, file_priv));
624 	case DRM_IOCTL_GET_UNIQUE:
625 		return (drm_getunique(dev, data, file_priv));
626 	case DRM_IOCTL_GET_MAGIC:
627 		return (drm_getmagic(dev, data, file_priv));
628 	case DRM_IOCTL_WAIT_VBLANK:
629 		return (drm_wait_vblank(dev, data, file_priv));
630 	case DRM_IOCTL_MODESET_CTL:
631 		return (drm_modeset_ctl(dev, data, file_priv));
632 	case DRM_IOCTL_GEM_CLOSE:
633 		return -drm_gem_close_ioctl(dev, data, file_priv);
634 
635 	/*
636 	 * no-oped ioctls, we don't check permissions on them because
637 	 * they do nothing. they'll be removed as soon as userland is
638 	 * definitely purged
639 	 */
640 	case DRM_IOCTL_SET_SAREA_CTX:
641 	case DRM_IOCTL_BLOCK:
642 	case DRM_IOCTL_UNBLOCK:
643 	case DRM_IOCTL_MOD_CTX:
644 	case DRM_IOCTL_MARK_BUFS:
645 	case DRM_IOCTL_FINISH:
646 	case DRM_IOCTL_INFO_BUFS:
647 	case DRM_IOCTL_SWITCH_CTX:
648 	case DRM_IOCTL_NEW_CTX:
649 	case DRM_IOCTL_GET_SAREA_CTX:
650 		return (0);
651 	}
652 
653 	if (file_priv->authenticated == 1) {
654 		switch (cmd) {
655 		case DRM_IOCTL_GEM_FLINK:
656 			return (drm_gem_flink_ioctl(dev, data, file_priv));
657 		case DRM_IOCTL_GEM_OPEN:
658 			return -drm_gem_open_ioctl(dev, data, file_priv);
659 		case DRM_IOCTL_GET_CAP:
660 			return (drm_getcap(dev, data, file_priv));
661 		}
662 	}
663 
664 	/* master is always root */
665 	if (file_priv->master == 1) {
666 		switch(cmd) {
667 		case DRM_IOCTL_SET_VERSION:
668 			return (drm_setversion(dev, data, file_priv));
669 		case DRM_IOCTL_IRQ_BUSID:
670 			return (drm_irq_by_busid(dev, data, file_priv));
671 		case DRM_IOCTL_AUTH_MAGIC:
672 			return (drm_authmagic(dev, data, file_priv));
673 		case DRM_IOCTL_CONTROL:
674 			return (drm_control(dev, data, file_priv));
675 		case DRM_IOCTL_ADD_DRAW:
676 		case DRM_IOCTL_RM_DRAW:
677 		case DRM_IOCTL_UPDATE_DRAW:
678 			/*
679 			 * Support removed from kernel since it's not used.
680 			 * just return zero until userland stops calling this
681 			 * ioctl.
682 			 */
683 			return (0);
684 		case DRM_IOCTL_SET_UNIQUE:
685 		/*
686 		 * Deprecated in DRM version 1.1, and will return EBUSY
687 		 * when setversion has
688 		 * requested version 1.1 or greater.
689 		 */
690 			return (EBUSY);
691 		case DRM_IOCTL_MODE_GETRESOURCES:
692 			return -drm_mode_getresources(dev, data, file_priv);
693 		case DRM_IOCTL_MODE_GETPLANERESOURCES:
694 			return -drm_mode_getplane_res(dev, data, file_priv);
695 		case DRM_IOCTL_MODE_GETCRTC:
696 			return -drm_mode_getcrtc(dev, data, file_priv);
697 		case DRM_IOCTL_MODE_SETCRTC:
698 			return -drm_mode_setcrtc(dev, data, file_priv);
699 		case DRM_IOCTL_MODE_GETPLANE:
700 			return -drm_mode_getplane(dev, data, file_priv);
701 		case DRM_IOCTL_MODE_SETPLANE:
702 			return -drm_mode_setplane(dev, data, file_priv);
703 		case DRM_IOCTL_MODE_CURSOR:
704 			return -drm_mode_cursor_ioctl(dev, data, file_priv);
705 		case DRM_IOCTL_MODE_GETGAMMA:
706 			return -drm_mode_gamma_get_ioctl(dev, data, file_priv);
707 		case DRM_IOCTL_MODE_SETGAMMA:
708 			return -drm_mode_gamma_set_ioctl(dev, data, file_priv);
709 		case DRM_IOCTL_MODE_GETENCODER:
710 			return -drm_mode_getencoder(dev, data, file_priv);
711 		case DRM_IOCTL_MODE_GETCONNECTOR:
712 			return -drm_mode_getconnector(dev, data, file_priv);
713 		case DRM_IOCTL_MODE_ATTACHMODE:
714 			return -drm_mode_attachmode_ioctl(dev, data, file_priv);
715 		case DRM_IOCTL_MODE_DETACHMODE:
716 			return -drm_mode_detachmode_ioctl(dev, data, file_priv);
717 		case DRM_IOCTL_MODE_GETPROPERTY:
718 			return -drm_mode_getproperty_ioctl(dev, data,
719 			    file_priv);
720 		case DRM_IOCTL_MODE_SETPROPERTY:
721 			return -drm_mode_connector_property_set_ioctl(dev,
722 			    data, file_priv);
723 		case DRM_IOCTL_MODE_GETPROPBLOB:
724 			return -drm_mode_getblob_ioctl(dev, data, file_priv);
725 		case DRM_IOCTL_MODE_GETFB:
726 			return -drm_mode_getfb(dev, data, file_priv);
727 		case DRM_IOCTL_MODE_ADDFB:
728 			return -drm_mode_addfb(dev, data, file_priv);
729 		case DRM_IOCTL_MODE_ADDFB2:
730 			return -drm_mode_addfb2(dev, data, file_priv);
731 		case DRM_IOCTL_MODE_RMFB:
732 			return -drm_mode_rmfb(dev, data, file_priv);
733 		case DRM_IOCTL_MODE_PAGE_FLIP:
734 			return -drm_mode_page_flip_ioctl(dev, data, file_priv);
735 		case DRM_IOCTL_MODE_DIRTYFB:
736 			return -drm_mode_dirtyfb_ioctl(dev, data, file_priv);
737 		case DRM_IOCTL_MODE_CREATE_DUMB:
738 			return -drm_mode_create_dumb_ioctl(dev, data,
739 			    file_priv);
740 		case DRM_IOCTL_MODE_MAP_DUMB:
741 			return -drm_mode_mmap_dumb_ioctl(dev, data, file_priv);
742 		case DRM_IOCTL_MODE_DESTROY_DUMB:
743 			return -drm_mode_destroy_dumb_ioctl(dev, data,
744 			    file_priv);
745 		case DRM_IOCTL_MODE_OBJ_GETPROPERTIES:
746 			return -drm_mode_obj_get_properties_ioctl(dev, data,
747 			    file_priv);
748 		case DRM_IOCTL_MODE_OBJ_SETPROPERTY:
749 			return -drm_mode_obj_set_property_ioctl(dev, data,
750 			    file_priv);
751 		}
752 	}
753 	if (dev->driver->ioctl != NULL)
754 		return (dev->driver->ioctl(dev, cmd, data, file_priv));
755 	else
756 		return (EINVAL);
757 }
758 
759 /* drmioctl is called whenever a process performs an ioctl on /dev/drm.
760  */
761 int
762 drmioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
763 {
764 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
765 	int error;
766 
767 	if (dev == NULL)
768 		return ENODEV;
769 
770 	mtx_enter(&dev->quiesce_mtx);
771 	while (dev->quiesce)
772 		msleep(&dev->quiesce, &dev->quiesce_mtx, PZERO, "drmioc", 0);
773 	dev->quiesce_count++;
774 	mtx_leave(&dev->quiesce_mtx);
775 
776 	error = drm_do_ioctl(dev, minor(kdev), cmd, data);
777 
778 	mtx_enter(&dev->quiesce_mtx);
779 	dev->quiesce_count--;
780 	if (dev->quiesce)
781 		wakeup(&dev->quiesce_count);
782 	mtx_leave(&dev->quiesce_mtx);
783 
784 	return (error);
785 }
786 
787 int
788 drmread(dev_t kdev, struct uio *uio, int ioflag)
789 {
790 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
791 	struct drm_file			*file_priv;
792 	struct drm_pending_event	*ev;
793 	int		 		 error = 0;
794 
795 	if (dev == NULL)
796 		return (ENXIO);
797 
798 	DRM_LOCK();
799 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
800 	DRM_UNLOCK();
801 	if (file_priv == NULL)
802 		return (ENXIO);
803 
804 	/*
805 	 * The semantics are a little weird here. We will wait until we
806 	 * have events to process, but as soon as we have events we will
807 	 * only deliver as many as we have.
808 	 * Note that events are atomic, if the read buffer will not fit in
809 	 * a whole event, we won't read any of it out.
810 	 */
811 	mtx_enter(&dev->event_lock);
812 	while (error == 0 && TAILQ_EMPTY(&file_priv->evlist)) {
813 		if (ioflag & IO_NDELAY) {
814 			mtx_leave(&dev->event_lock);
815 			return (EAGAIN);
816 		}
817 		error = msleep(&file_priv->evlist, &dev->event_lock,
818 		    PWAIT | PCATCH, "drmread", 0);
819 	}
820 	if (error) {
821 		mtx_leave(&dev->event_lock);
822 		return (error);
823 	}
824 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
825 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
826 		/* XXX we always destroy the event on error. */
827 		error = uiomove(ev->event, ev->event->length, uio);
828 		ev->destroy(ev);
829 		if (error)
830 			break;
831 		mtx_enter(&dev->event_lock);
832 	}
833 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
834 
835 	return (error);
836 }
837 
838 /*
839  * Deqeue an event from the file priv in question. returning 1 if an
840  * event was found. We take the resid from the read as a parameter because
841  * we will only dequeue and event if the read buffer has space to fit the
842  * entire thing.
843  *
844  * We are called locked, but we will *unlock* the queue on return so that
845  * we may sleep to copyout the event.
846  */
847 int
848 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
849     size_t resid, struct drm_pending_event **out)
850 {
851 	struct drm_pending_event	*ev = NULL;
852 	int				 gotone = 0;
853 
854 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
855 	if ((ev = TAILQ_FIRST(&file_priv->evlist)) == NULL ||
856 	    ev->event->length > resid)
857 		goto out;
858 
859 	TAILQ_REMOVE(&file_priv->evlist, ev, link);
860 	file_priv->event_space += ev->event->length;
861 	*out = ev;
862 	gotone = 1;
863 
864 out:
865 	mtx_leave(&dev->event_lock);
866 
867 	return (gotone);
868 }
869 
870 /* XXX kqfilter ... */
871 int
872 drmpoll(dev_t kdev, int events, struct proc *p)
873 {
874 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
875 	struct drm_file		*file_priv;
876 	int		 	 revents = 0;
877 
878 	if (dev == NULL)
879 		return (POLLERR);
880 
881 	DRM_LOCK();
882 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
883 	DRM_UNLOCK();
884 	if (file_priv == NULL)
885 		return (POLLERR);
886 
887 	mtx_enter(&dev->event_lock);
888 	if (events & (POLLIN | POLLRDNORM)) {
889 		if (!TAILQ_EMPTY(&file_priv->evlist))
890 			revents |=  events & (POLLIN | POLLRDNORM);
891 		else
892 			selrecord(p, &file_priv->rsel);
893 	}
894 	mtx_leave(&dev->event_lock);
895 
896 	return (revents);
897 }
898 
899 struct drm_local_map *
900 drm_getsarea(struct drm_device *dev)
901 {
902 	struct drm_local_map	*map;
903 
904 	DRM_LOCK();
905 	TAILQ_FOREACH(map, &dev->maplist, link) {
906 		if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
907 			break;
908 	}
909 	DRM_UNLOCK();
910 	return (map);
911 }
912 
913 paddr_t
914 drmmmap(dev_t kdev, off_t offset, int prot)
915 {
916 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
917 	struct drm_local_map	*map;
918 	struct drm_file		*file_priv;
919 	enum drm_map_type	 type;
920 
921 	if (dev == NULL)
922 		return (-1);
923 
924 	DRM_LOCK();
925 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
926 	DRM_UNLOCK();
927 	if (file_priv == NULL) {
928 		DRM_ERROR("can't find authenticator\n");
929 		return (-1);
930 	}
931 
932 	if (!file_priv->authenticated)
933 		return (-1);
934 
935 	if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) {
936 		struct drm_device_dma *dma = dev->dma;
937 		paddr_t	phys = -1;
938 
939 		rw_enter_write(&dma->dma_lock);
940 		if (dma->pagelist != NULL)
941 			phys = dma->pagelist[offset >> PAGE_SHIFT];
942 		rw_exit_write(&dma->dma_lock);
943 
944 		return (phys);
945 	}
946 
947 	/*
948 	 * A sequential search of a linked list is
949  	 * fine here because: 1) there will only be
950 	 * about 5-10 entries in the list and, 2) a
951 	 * DRI client only has to do this mapping
952 	 * once, so it doesn't have to be optimized
953 	 * for performance, even if the list was a
954 	 * bit longer.
955 	 */
956 	DRM_LOCK();
957 	TAILQ_FOREACH(map, &dev->maplist, link) {
958 		if (offset >= map->ext &&
959 		    offset < map->ext + map->size) {
960 			offset -= map->ext;
961 			break;
962 		}
963 	}
964 
965 	if (map == NULL) {
966 		DRM_UNLOCK();
967 		DRM_DEBUG("can't find map\n");
968 		return (-1);
969 	}
970 	if (((map->flags & _DRM_RESTRICTED) && file_priv->master == 0)) {
971 		DRM_UNLOCK();
972 		DRM_DEBUG("restricted map\n");
973 		return (-1);
974 	}
975 	type = map->type;
976 	DRM_UNLOCK();
977 
978 	switch (type) {
979 #if __OS_HAS_AGP
980 	case _DRM_AGP:
981 		return agp_mmap(dev->agp->agpdev,
982 		    offset + map->offset - dev->agp->base, prot);
983 #endif
984 	case _DRM_FRAME_BUFFER:
985 	case _DRM_REGISTERS:
986 		return (offset + map->offset);
987 		break;
988 	case _DRM_SHM:
989 	case _DRM_CONSISTENT:
990 		return (bus_dmamem_mmap(dev->dmat, map->dmamem->segs,
991 		    map->dmamem->nsegs, offset, prot, BUS_DMA_NOWAIT));
992 	default:
993 		DRM_ERROR("bad map type %d\n", type);
994 		return (-1);	/* This should never happen. */
995 	}
996 	/* NOTREACHED */
997 }
998 
999 /*
1000  * Beginning in revision 1.1 of the DRM interface, getunique will return
1001  * a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
1002  * before setunique has been called.  The format for the bus-specific part of
1003  * the unique is not defined for any other bus.
1004  */
1005 int
1006 drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv)
1007 {
1008 	struct drm_unique	 *u = data;
1009 
1010 	if (u->unique_len >= dev->unique_len) {
1011 		if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
1012 			return EFAULT;
1013 	}
1014 	u->unique_len = dev->unique_len;
1015 
1016 	return 0;
1017 }
1018 
1019 int
1020 drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1021 {
1022 	struct drm_get_cap *req = data;
1023 
1024 	req->value = 0;
1025 	switch (req->capability) {
1026 	case DRM_CAP_DUMB_BUFFER:
1027 		if (dev->driver->dumb_create)
1028 			req->value = 1;
1029 		break;
1030 	case DRM_CAP_VBLANK_HIGH_CRTC:
1031 		req->value = 1;
1032 		break;
1033 	case DRM_CAP_DUMB_PREFERRED_DEPTH:
1034 		req->value = dev->mode_config.preferred_depth;
1035 		break;
1036 	case DRM_CAP_DUMB_PREFER_SHADOW:
1037 		req->value = dev->mode_config.prefer_shadow;
1038 		break;
1039 	case DRM_CAP_TIMESTAMP_MONOTONIC:
1040 		req->value = drm_timestamp_monotonic;
1041 		break;
1042 	default:
1043 		return EINVAL;
1044 	}
1045 	return 0;
1046 }
1047 
1048 #define DRM_IF_MAJOR	1
1049 #define DRM_IF_MINOR	2
1050 
1051 int
1052 drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
1053 {
1054 	struct drm_version	*version = data;
1055 	int			 len;
1056 
1057 #define DRM_COPY(name, value)						\
1058 	len = strlen( value );						\
1059 	if ( len > name##_len ) len = name##_len;			\
1060 	name##_len = strlen( value );					\
1061 	if ( len && name ) {						\
1062 		if ( DRM_COPY_TO_USER( name, value, len ) )		\
1063 			return EFAULT;				\
1064 	}
1065 
1066 	version->version_major = dev->driver->major;
1067 	version->version_minor = dev->driver->minor;
1068 	version->version_patchlevel = dev->driver->patchlevel;
1069 
1070 	DRM_COPY(version->name, dev->driver->name);
1071 	DRM_COPY(version->date, dev->driver->date);
1072 	DRM_COPY(version->desc, dev->driver->desc);
1073 
1074 	return 0;
1075 }
1076 
1077 int
1078 drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
1079 {
1080 	struct drm_set_version	ver, *sv = data;
1081 	int			if_version;
1082 
1083 	/* Save the incoming data, and set the response before continuing
1084 	 * any further.
1085 	 */
1086 	ver = *sv;
1087 	sv->drm_di_major = DRM_IF_MAJOR;
1088 	sv->drm_di_minor = DRM_IF_MINOR;
1089 	sv->drm_dd_major = dev->driver->major;
1090 	sv->drm_dd_minor = dev->driver->minor;
1091 
1092 	/*
1093 	 * We no longer support interface versions less than 1.1, so error
1094 	 * out if the xserver is too old. 1.1 always ties the drm to a
1095 	 * certain busid, this was done on attach
1096 	 */
1097 	if (ver.drm_di_major != -1) {
1098 		if (ver.drm_di_major != DRM_IF_MAJOR || ver.drm_di_minor < 1 ||
1099 		    ver.drm_di_minor > DRM_IF_MINOR) {
1100 			return EINVAL;
1101 		}
1102 		if_version = DRM_IF_VERSION(ver.drm_di_major, ver.drm_dd_minor);
1103 		dev->if_version = imax(if_version, dev->if_version);
1104 	}
1105 
1106 	if (ver.drm_dd_major != -1) {
1107 		if (ver.drm_dd_major != dev->driver->major ||
1108 		    ver.drm_dd_minor < 0 ||
1109 		    ver.drm_dd_minor > dev->driver->minor)
1110 			return EINVAL;
1111 	}
1112 
1113 	return 0;
1114 }
1115 
1116 struct drm_dmamem *
1117 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1118     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1119 {
1120 	struct drm_dmamem	*mem;
1121 	size_t			 strsize;
1122 	/*
1123 	 * segs is the last member of the struct since we modify the size
1124 	 * to allow extra segments if more than one are allowed.
1125 	 */
1126 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1127 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
1128 	if (mem == NULL)
1129 		return (NULL);
1130 
1131 	mem->size = size;
1132 
1133 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
1134 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
1135 		goto strfree;
1136 
1137 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
1138 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1139 		goto destroy;
1140 
1141 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
1142 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
1143 		goto free;
1144 
1145 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
1146 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
1147 		goto unmap;
1148 
1149 	return (mem);
1150 
1151 unmap:
1152 	bus_dmamem_unmap(dmat, mem->kva, size);
1153 free:
1154 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1155 destroy:
1156 	bus_dmamap_destroy(dmat, mem->map);
1157 strfree:
1158 	free(mem, M_DRM, 0);
1159 
1160 	return (NULL);
1161 }
1162 
1163 void
1164 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
1165 {
1166 	if (mem == NULL)
1167 		return;
1168 
1169 	bus_dmamap_unload(dmat, mem->map);
1170 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
1171 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1172 	bus_dmamap_destroy(dmat, mem->map);
1173 	free(mem, M_DRM, 0);
1174 }
1175 
1176 /**
1177  * Called by the client, this returns a unique magic number to be authorized
1178  * by the master.
1179  *
1180  * The master may use its own knowledge of the client (such as the X
1181  * connection that the magic is passed over) to determine if the magic number
1182  * should be authenticated.
1183  */
1184 int
1185 drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
1186 {
1187 	struct drm_auth		*auth = data;
1188 
1189 	if (dev->magicid == 0)
1190 		dev->magicid = 1;
1191 
1192 	/* Find unique magic */
1193 	if (file_priv->magic) {
1194 		auth->magic = file_priv->magic;
1195 	} else {
1196 		DRM_LOCK();
1197 		file_priv->magic = auth->magic = dev->magicid++;
1198 		DRM_UNLOCK();
1199 		DRM_DEBUG("%d\n", auth->magic);
1200 	}
1201 
1202 	DRM_DEBUG("%u\n", auth->magic);
1203 	return (0);
1204 }
1205 
1206 /**
1207  * Marks the client associated with the given magic number as authenticated.
1208  */
1209 int
1210 drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
1211 {
1212 	struct drm_file	*p;
1213 	struct drm_auth	*auth = data;
1214 	int		 ret = EINVAL;
1215 
1216 	DRM_DEBUG("%u\n", auth->magic);
1217 
1218 	if (auth->magic == 0)
1219 		return (ret);
1220 
1221 	DRM_LOCK();
1222 	SPLAY_FOREACH(p, drm_file_tree, &dev->files) {
1223 		if (p->magic == auth->magic) {
1224 			p->authenticated = 1;
1225 			p->magic = 0;
1226 			ret = 0;
1227 			break;
1228 		}
1229 	}
1230 	DRM_UNLOCK();
1231 
1232 	return (ret);
1233 }
1234 
1235 struct uvm_pagerops drm_pgops = {
1236 	NULL,
1237 	drm_ref,
1238 	drm_unref,
1239 	drm_fault,
1240 	drm_flush,
1241 };
1242 
1243 void
1244 drm_ref(struct uvm_object *uobj)
1245 {
1246 	uobj->uo_refs++;
1247 }
1248 
1249 void
1250 drm_unref(struct uvm_object *uobj)
1251 {
1252 	struct drm_gem_object *obj = (struct drm_gem_object *)uobj;
1253 	struct drm_device *dev = obj->dev;
1254 
1255 	if (uobj->uo_refs > 1) {
1256 		uobj->uo_refs--;
1257 		return;
1258 	}
1259 
1260 	/* We own this thing now. It is on no queues, though it may still
1261 	 * be bound to the aperture (and on the inactive list, in which case
1262 	 * idling the buffer is what triggered the free. Since we know no one
1263 	 * else can grab it now, we can nuke with impunity.
1264 	 */
1265 	if (dev->driver->gem_free_object != NULL)
1266 		dev->driver->gem_free_object(obj);
1267 }
1268 
1269 boolean_t
1270 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
1271 {
1272 	return (TRUE);
1273 }
1274 
1275 int
1276 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
1277     int npages, int centeridx, vm_fault_t fault_type,
1278     vm_prot_t access_type, int flags)
1279 {
1280 	struct vm_map_entry *entry = ufi->entry;
1281 	struct uvm_object *uobj = entry->object.uvm_obj;
1282 	struct drm_gem_object *obj = (struct drm_gem_object *)uobj;
1283 	struct drm_device *dev = obj->dev;
1284 	int ret;
1285 
1286 	/*
1287 	 * we do not allow device mappings to be mapped copy-on-write
1288 	 * so we kill any attempt to do so here.
1289 	 */
1290 
1291 	if (UVM_ET_ISCOPYONWRITE(entry)) {
1292 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
1293 		return(VM_PAGER_ERROR);
1294 	}
1295 
1296 	/*
1297 	 * We could end up here as the result of a copyin(9) or
1298 	 * copyout(9) while handling an ioctl.  So we must be careful
1299 	 * not to deadlock.  Therefore we only block if the quiesce
1300 	 * count is zero, which guarantees we didn't enter from within
1301 	 * an ioctl code path.
1302 	 */
1303 	mtx_enter(&dev->quiesce_mtx);
1304 	if (dev->quiesce && dev->quiesce_count == 0) {
1305 		mtx_leave(&dev->quiesce_mtx);
1306 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
1307 		mtx_enter(&dev->quiesce_mtx);
1308 		while (dev->quiesce) {
1309 			msleep(&dev->quiesce, &dev->quiesce_mtx,
1310 			    PZERO, "drmflt", 0);
1311 		}
1312 		mtx_leave(&dev->quiesce_mtx);
1313 		return(VM_PAGER_REFAULT);
1314 	}
1315 	dev->quiesce_count++;
1316 	mtx_leave(&dev->quiesce_mtx);
1317 
1318 	/* Call down into driver to do the magic */
1319 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
1320 	    entry->start), vaddr, pps, npages, centeridx,
1321 	    access_type, flags);
1322 
1323 	mtx_enter(&dev->quiesce_mtx);
1324 	dev->quiesce_count--;
1325 	if (dev->quiesce)
1326 		wakeup(&dev->quiesce_count);
1327 	mtx_leave(&dev->quiesce_mtx);
1328 
1329 	return (ret);
1330 }
1331 
1332 /*
1333  * Code to support memory managers based on the GEM (Graphics
1334  * Execution Manager) api.
1335  */
1336 struct drm_gem_object *
1337 drm_gem_object_alloc(struct drm_device *dev, size_t size)
1338 {
1339 	struct drm_gem_object	*obj;
1340 
1341 	KASSERT((size & (PAGE_SIZE -1)) == 0);
1342 
1343 	if ((obj = pool_get(&dev->objpl, PR_WAITOK | PR_ZERO)) == NULL)
1344 		return (NULL);
1345 
1346 	obj->dev = dev;
1347 
1348 	/* uao create can't fail in the 0 case, it just sleeps */
1349 	obj->uao = uao_create(size, 0);
1350 	obj->size = size;
1351 	uvm_objinit(&obj->uobj, &drm_pgops, 1);
1352 
1353 	if (dev->driver->gem_init_object != NULL &&
1354 	    dev->driver->gem_init_object(obj) != 0) {
1355 		uao_detach(obj->uao);
1356 		pool_put(&dev->objpl, obj);
1357 		return (NULL);
1358 	}
1359 	atomic_inc(&dev->obj_count);
1360 	atomic_add(obj->size, &dev->obj_memory);
1361 	return (obj);
1362 }
1363 
1364 int
1365 drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size)
1366 {
1367 	BUG_ON((size & (PAGE_SIZE -1)) != 0);
1368 
1369 	obj->dev = dev;
1370 
1371 	/* uao create can't fail in the 0 case, it just sleeps */
1372 	obj->uao = uao_create(size, 0);
1373 	obj->size = size;
1374 	uvm_objinit(&obj->uobj, &drm_pgops, 1);
1375 
1376 	atomic_inc(&dev->obj_count);
1377 	atomic_add(obj->size, &dev->obj_memory);
1378 	return 0;
1379 }
1380 
1381 void
1382 drm_gem_object_release(struct drm_gem_object *obj)
1383 {
1384 	struct drm_device *dev = obj->dev;
1385 
1386 	if (obj->uao)
1387 		uao_detach(obj->uao);
1388 
1389 	atomic_dec(&dev->obj_count);
1390 	atomic_sub(obj->size, &dev->obj_memory);
1391 	if (obj->do_flags & DRM_WANTED) /* should never happen, not on lists */
1392 		wakeup(obj);
1393 }
1394 
1395 /**
1396  * Create a handle for this object. This adds a handle reference
1397  * to the object, which includes a regular reference count. Callers
1398  * will likely want to dereference the object afterwards.
1399  */
1400 int
1401 drm_gem_handle_create(struct drm_file *file_priv,
1402 		       struct drm_gem_object *obj,
1403 		       u32 *handlep)
1404 {
1405 	struct drm_device *dev = obj->dev;
1406 	struct drm_handle *han;
1407 	int ret;
1408 
1409 	if ((han = drm_calloc(1, sizeof(*han))) == NULL)
1410 		return -ENOMEM;
1411 
1412 	han->obj = obj;
1413 	mtx_enter(&file_priv->table_lock);
1414 again:
1415 	*handlep = han->handle = ++file_priv->obj_id;
1416 	/*
1417 	 * Make sure we have no duplicates. this'll hurt once we wrap, 0 is
1418 	 * reserved.
1419 	 */
1420 	if (han->handle == 0 || SPLAY_INSERT(drm_obj_tree,
1421 	    &file_priv->obj_tree, han))
1422 		goto again;
1423 	mtx_leave(&file_priv->table_lock);
1424 
1425 	drm_gem_object_handle_reference(obj);
1426 
1427 	if (dev->driver->gem_open_object) {
1428 		ret = dev->driver->gem_open_object(obj, file_priv);
1429 		if (ret) {
1430 			drm_gem_handle_delete(file_priv, *handlep);
1431 			return ret;
1432 		}
1433 	}
1434 
1435 	return 0;
1436 }
1437 
1438 /**
1439  * Removes the mapping from handle to filp for this object.
1440  */
1441 int
1442 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
1443 {
1444 	struct drm_device *dev;
1445 	struct drm_gem_object *obj;
1446 	struct drm_handle *han, find;
1447 
1448 	find.handle = handle;
1449 	mtx_enter(&filp->table_lock);
1450 	han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &find);
1451 	if (han == NULL) {
1452 		mtx_leave(&filp->table_lock);
1453 		return -EINVAL;
1454 	}
1455 	obj = han->obj;
1456 	dev = obj->dev;
1457 
1458 	SPLAY_REMOVE(drm_obj_tree, &filp->obj_tree, han);
1459 	mtx_leave(&filp->table_lock);
1460 
1461 	drm_free(han);
1462 
1463 	if (dev->driver->gem_close_object)
1464 		dev->driver->gem_close_object(obj, filp);
1465 	drm_gem_object_handle_unreference_unlocked(obj);
1466 
1467 	return 0;
1468 }
1469 
1470 /** Returns a reference to the object named by the handle. */
1471 struct drm_gem_object *
1472 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
1473 		      u32 handle)
1474 {
1475 	struct drm_gem_object *obj;
1476 	struct drm_handle *han, search;
1477 
1478 	mtx_enter(&filp->table_lock);
1479 
1480 	/* Check if we currently have a reference on the object */
1481 	search.handle = handle;
1482 	han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &search);
1483 	if (han == NULL) {
1484 		mtx_leave(&filp->table_lock);
1485 		return NULL;
1486 	}
1487 	obj = han->obj;
1488 
1489 	drm_gem_object_reference(obj);
1490 
1491 	mtx_leave(&filp->table_lock);
1492 
1493 	return obj;
1494 }
1495 
1496 /**
1497  * Releases the handle to an mm object.
1498  */
1499 int
1500 drm_gem_close_ioctl(struct drm_device *dev, void *data,
1501 		    struct drm_file *file_priv)
1502 {
1503 	struct drm_gem_close *args = data;
1504 	int ret;
1505 
1506 	if (!(dev->driver->flags & DRIVER_GEM))
1507 		return -ENODEV;
1508 
1509 	ret = drm_gem_handle_delete(file_priv, args->handle);
1510 
1511 	return ret;
1512 }
1513 
1514 int
1515 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1516     struct drm_file *file_priv)
1517 {
1518 	struct drm_gem_flink	*args = data;
1519 	struct drm_gem_object	*obj;
1520 
1521 	if (!(dev->driver->flags & DRIVER_GEM))
1522 		return (ENODEV);
1523 
1524 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1525 	if (obj == NULL)
1526 		return (ENOENT);
1527 
1528 	mtx_enter(&dev->obj_name_lock);
1529 	if (!obj->name) {
1530 again:
1531 		obj->name = ++dev->obj_name;
1532 		/* 0 is reserved, make sure we don't clash. */
1533 		if (obj->name == 0 || SPLAY_INSERT(drm_name_tree,
1534 		    &dev->name_tree, obj))
1535 			goto again;
1536 		/* name holds a reference to the object */
1537 		drm_ref(&obj->uobj);
1538 	}
1539 	mtx_leave(&dev->obj_name_lock);
1540 
1541 	args->name = (uint64_t)obj->name;
1542 
1543 	drm_unref(&obj->uobj);
1544 
1545 	return (0);
1546 }
1547 
1548 /**
1549  * Open an object using the global name, returning a handle and the size.
1550  *
1551  * This handle (of course) holds a reference to the object, so the object
1552  * will not go away until the handle is deleted.
1553  */
1554 int
1555 drm_gem_open_ioctl(struct drm_device *dev, void *data,
1556 		   struct drm_file *file_priv)
1557 {
1558 	struct drm_gem_open *args = data;
1559 	struct drm_gem_object *obj, search;
1560 	int ret;
1561 	u32 handle;
1562 
1563 	if (!(dev->driver->flags & DRIVER_GEM))
1564 		return -ENODEV;
1565 
1566 	mtx_enter(&dev->obj_name_lock);
1567 	search.name = args->name;
1568 	obj = SPLAY_FIND(drm_name_tree, &dev->name_tree, &search);
1569 	if (obj)
1570 		drm_gem_object_reference(obj);
1571 	mtx_leave(&dev->obj_name_lock);
1572 	if (!obj)
1573 		return -ENOENT;
1574 
1575 	ret = drm_gem_handle_create(file_priv, obj, &handle);
1576 	drm_gem_object_unreference_unlocked(obj);
1577 	if (ret)
1578 		return ret;
1579 
1580 	args->handle = handle;
1581 	args->size = obj->size;
1582 
1583         return 0;
1584 }
1585 
1586 void
1587 drm_gem_object_handle_reference(struct drm_gem_object *obj)
1588 {
1589 	drm_gem_object_reference(obj);
1590 	obj->handlecount++;
1591 }
1592 
1593 void
1594 drm_gem_object_handle_unreference(struct drm_gem_object *obj)
1595 {
1596 	/* do this first in case this is the last reference */
1597 	if (--obj->handlecount == 0) {
1598 		struct drm_device	*dev = obj->dev;
1599 
1600 		mtx_enter(&dev->obj_name_lock);
1601 		if (obj->name) {
1602 			SPLAY_REMOVE(drm_name_tree, &dev->name_tree, obj);
1603 			obj->name = 0;
1604 			mtx_leave(&dev->obj_name_lock);
1605 			/* name held a reference to object */
1606 			drm_gem_object_unreference(obj);
1607 		} else {
1608 			mtx_leave(&dev->obj_name_lock);
1609 		}
1610 	}
1611 
1612 	drm_gem_object_unreference(obj);
1613 }
1614 
1615 void
1616 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
1617 {
1618 	struct drm_device *dev = obj->dev;
1619 
1620 	DRM_LOCK();
1621 	drm_gem_object_handle_unreference(obj);
1622 	DRM_UNLOCK();
1623 }
1624 
1625 /**
1626  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
1627  * @obj: obj in question
1628  *
1629  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
1630  */
1631 void
1632 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
1633 {
1634 	struct drm_device *dev = obj->dev;
1635 	struct drm_local_map *map = obj->map;
1636 
1637 	TAILQ_REMOVE(&dev->maplist, map, link);
1638 	obj->map = NULL;
1639 
1640 	/* NOCOALESCE set, can't fail */
1641 	extent_free(dev->handle_ext, map->ext, map->size, EX_NOWAIT);
1642 
1643 	drm_free(map);
1644 }
1645 
1646 /**
1647  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
1648  * @obj: obj in question
1649  *
1650  * GEM memory mapping works by handing back to userspace a fake mmap offset
1651  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
1652  * up the object based on the offset and sets up the various memory mapping
1653  * structures.
1654  *
1655  * This routine allocates and attaches a fake offset for @obj.
1656  */
1657 int
1658 drm_gem_create_mmap_offset(struct drm_gem_object *obj)
1659 {
1660 	struct drm_device *dev = obj->dev;
1661 	struct drm_local_map *map;
1662 	int ret;
1663 
1664 	/* Set the object up for mmap'ing */
1665 	map = drm_calloc(1, sizeof(*map));
1666 	if (map == NULL)
1667 		return -ENOMEM;
1668 
1669 	map->flags = _DRM_DRIVER;
1670 	map->type = _DRM_GEM;
1671 	map->size = obj->size;
1672 	map->handle = obj;
1673 
1674 	/* Get a DRM GEM mmap offset allocated... */
1675 	ret = extent_alloc(dev->handle_ext, map->size, PAGE_SIZE, 0,
1676 	    0, EX_NOWAIT, &map->ext);
1677 	if (ret) {
1678 		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1679 		ret = -ENOSPC;
1680 		goto out_free_list;
1681 	}
1682 
1683 	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
1684 	obj->map = map;
1685 	return 0;
1686 
1687 out_free_list:
1688 	drm_free(map);
1689 
1690 	return ret;
1691 }
1692 
1693 struct uvm_object *
1694 udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
1695 {
1696 	struct drm_device *dev = drm_get_device_from_kdev(device);
1697 	struct drm_local_map *map;
1698 	struct drm_gem_object *obj;
1699 
1700 	if (cdevsw[major(device)].d_mmap != drmmmap)
1701 		return NULL;
1702 
1703 	if (dev == NULL)
1704 		return NULL;
1705 
1706 	if (dev->driver->mmap)
1707 		return dev->driver->mmap(dev, off, size);
1708 
1709 	DRM_LOCK();
1710 	TAILQ_FOREACH(map, &dev->maplist, link) {
1711 		if (off >= map->ext && off + size <= map->ext + map->size)
1712 			break;
1713 	}
1714 
1715 	if (map == NULL || map->type != _DRM_GEM) {
1716 		DRM_UNLOCK();
1717 		return NULL;
1718 	}
1719 
1720 	obj = (struct drm_gem_object *)map->handle;
1721 	drm_ref(&obj->uobj);
1722 	DRM_UNLOCK();
1723 	return &obj->uobj;
1724 }
1725 
1726 /*
1727  * Compute order.  Can be made faster.
1728  */
1729 int
1730 drm_order(unsigned long size)
1731 {
1732 	int order;
1733 	unsigned long tmp;
1734 
1735 	for (order = 0, tmp = size; tmp >>= 1; ++order)
1736 		;
1737 
1738 	if (size & ~(1 << order))
1739 		++order;
1740 
1741 	return order;
1742 }
1743 
1744 int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
1745 {
1746 	pci_chipset_tag_t	pc = dev->pc;
1747 	pcitag_t		tag;
1748 	int			pos ;
1749 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1750 	pcireg_t		id;
1751 
1752 	*mask = 0;
1753 
1754 	if (dev->bridgetag == NULL)
1755 		return -EINVAL;
1756 	tag = *dev->bridgetag;
1757 
1758 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1759 	    &pos, NULL))
1760 		return -EINVAL;
1761 
1762 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1763 
1764 	/* we've been informed via and serverworks don't make the cut */
1765 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1766 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1767 		return -EINVAL;
1768 
1769 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1770 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1771 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1772 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1773 
1774 	lnkcap &= 0x0f;
1775 	lnkcap2 &= 0xfe;
1776 
1777 	if (lnkcap2) { /* PCIE GEN 3.0 */
1778 		if (lnkcap2 & 2)
1779 			*mask |= DRM_PCIE_SPEED_25;
1780 		if (lnkcap2 & 4)
1781 			*mask |= DRM_PCIE_SPEED_50;
1782 		if (lnkcap2 & 8)
1783 			*mask |= DRM_PCIE_SPEED_80;
1784 	} else {
1785 		if (lnkcap & 1)
1786 			*mask |= DRM_PCIE_SPEED_25;
1787 		if (lnkcap & 2)
1788 			*mask |= DRM_PCIE_SPEED_50;
1789 	}
1790 
1791 	DRM_INFO("probing gen 2 caps for device 0x%04x:0x%04x = %x/%x\n",
1792 	    PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap, lnkcap2);
1793 	return 0;
1794 }
1795 
1796 int
1797 drm_handle_cmp(struct drm_handle *a, struct drm_handle *b)
1798 {
1799 	return (a->handle < b->handle ? -1 : a->handle > b->handle);
1800 }
1801 
1802 int
1803 drm_name_cmp(struct drm_gem_object *a, struct drm_gem_object *b)
1804 {
1805 	return (a->name < b->name ? -1 : a->name > b->name);
1806 }
1807 
1808 SPLAY_GENERATE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
1809 
1810 SPLAY_GENERATE(drm_name_tree, drm_gem_object, entry, drm_name_cmp);
1811