xref: /openbsd-src/sys/dev/pci/drm/drm_drv.c (revision 898184e3e61f9129feb5978fad5a8c6865f00b92)
1 /* $OpenBSD: drm_drv.c,v 1.101 2013/03/22 07:52:36 jsg Exp $ */
2 /*-
3  * Copyright 2007-2009 Owain G. Ainsworth <oga@openbsd.org>
4  * Copyright © 2008 Intel Corporation
5  * Copyright 2003 Eric Anholt
6  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the "Software"),
12  * to deal in the Software without restriction, including without limitation
13  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14  * and/or sell copies of the Software, and to permit persons to whom the
15  * Software is furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the next
18  * paragraph) shall be included in all copies or substantial portions of the
19  * Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28  *
29  * Authors:
30  *    Rickard E. (Rik) Faith <faith@valinux.com>
31  *    Daryll Strauss <daryll@valinux.com>
32  *    Gareth Hughes <gareth@valinux.com>
33  *    Eric Anholt <eric@anholt.net>
34  *    Owain Ainsworth <oga@openbsd.org>
35  *
36  */
37 
38 /** @file drm_drv.c
39  * The catch-all file for DRM device support, including module setup/teardown,
40  * open/close, and ioctl dispatch.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/limits.h>
45 #include <sys/systm.h>
46 #include <uvm/uvm_extern.h>
47 
48 #include <sys/ttycom.h> /* for TIOCSGRP */
49 
50 #include "drmP.h"
51 #include "drm.h"
52 #include "drm_sarea.h"
53 
54 #ifdef DRMDEBUG
55 int drm_debug_flag = 1;
56 #endif
57 
58 int	 drm_firstopen(struct drm_device *);
59 int	 drm_lastclose(struct drm_device *);
60 void	 drm_attach(struct device *, struct device *, void *);
61 int	 drm_probe(struct device *, void *, void *);
62 int	 drm_detach(struct device *, int);
63 int	 drm_activate(struct device *, int);
64 int	 drmprint(void *, const char *);
65 int	 drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
66 	     struct drm_pending_event **);
67 
68 int	 drm_getunique(struct drm_device *, void *, struct drm_file *);
69 int	 drm_version(struct drm_device *, void *, struct drm_file *);
70 int	 drm_setversion(struct drm_device *, void *, struct drm_file *);
71 int	 drm_getmagic(struct drm_device *, void *, struct drm_file *);
72 int	 drm_authmagic(struct drm_device *, void *, struct drm_file *);
73 int	 drm_file_cmp(struct drm_file *, struct drm_file *);
74 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
75 
76 /* functions used by the per-open handle  code to grab references to object */
77 void	 drm_handle_ref(struct drm_obj *);
78 void	 drm_handle_unref(struct drm_obj *);
79 
80 int	 drm_handle_cmp(struct drm_handle *, struct drm_handle *);
81 int	 drm_name_cmp(struct drm_obj *, struct drm_obj *);
82 int	 drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
83 	     vm_fault_t, vm_prot_t, int);
84 boolean_t	 drm_flush(struct uvm_object *, voff_t, voff_t, int);
85 
86 SPLAY_PROTOTYPE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
87 SPLAY_PROTOTYPE(drm_name_tree, drm_obj, entry, drm_name_cmp);
88 
89 int	 drm_getcap(struct drm_device *, void *, struct drm_file *);
90 
91 /*
92  * attach drm to a pci-based driver.
93  *
94  * This function does all the pci-specific calculations for the
95  * drm_attach_args.
96  */
97 struct device *
98 drm_attach_pci(struct drm_driver_info *driver, struct pci_attach_args *pa,
99     int is_agp, struct device *dev)
100 {
101 	struct drm_attach_args arg;
102 	pcireg_t subsys;
103 
104 	arg.driver = driver;
105 	arg.dmat = pa->pa_dmat;
106 	arg.bst = pa->pa_memt;
107 	arg.irq = pa->pa_intrline;
108 	arg.is_agp = is_agp;
109 
110 	arg.pci_vendor = PCI_VENDOR(pa->pa_id);
111 	arg.pci_device = PCI_PRODUCT(pa->pa_id);
112 
113 	subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
114 	arg.pci_subvendor = PCI_VENDOR(subsys);
115 	arg.pci_subdevice = PCI_PRODUCT(subsys);
116 
117 	arg.busid_len = 20;
118 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
119 	if (arg.busid == NULL) {
120 		printf("%s: no memory for drm\n", dev->dv_xname);
121 		return (NULL);
122 	}
123 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
124 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
125 
126 	return (config_found(dev, &arg, drmprint));
127 }
128 
129 int
130 drmprint(void *aux, const char *pnp)
131 {
132 	if (pnp != NULL)
133 		printf("drm at %s", pnp);
134 	return (UNCONF);
135 }
136 
137 int
138 drm_pciprobe(struct pci_attach_args *pa, const struct drm_pcidev *idlist)
139 {
140 	const struct drm_pcidev *id_entry;
141 
142 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
143 	    PCI_PRODUCT(pa->pa_id), idlist);
144 	if (id_entry != NULL)
145 		return 1;
146 
147 	return 0;
148 }
149 
150 int
151 drm_probe(struct device *parent, void *match, void *aux)
152 {
153 	struct drm_attach_args *da = aux;
154 
155 	return (da->driver != NULL ? 1 : 0);
156 }
157 
158 void
159 drm_attach(struct device *parent, struct device *self, void *aux)
160 {
161 	struct drm_device	*dev = (struct drm_device *)self;
162 	struct drm_attach_args	*da = aux;
163 
164 	dev->dev_private = parent;
165 	dev->driver = da->driver;
166 
167 	dev->dmat = da->dmat;
168 	dev->bst = da->bst;
169 	dev->irq = da->irq;
170 	dev->unique = da->busid;
171 	dev->unique_len = da->busid_len;
172 	dev->pci_vendor = da->pci_vendor;
173 	dev->pci_device = da->pci_device;
174 	dev->pci_subvendor = da->pci_subvendor;
175 	dev->pci_subdevice = da->pci_subdevice;
176 
177 	rw_init(&dev->dev_lock, "drmdevlk");
178 	mtx_init(&dev->lock.spinlock, IPL_NONE);
179 	mtx_init(&dev->event_lock, IPL_TTY);
180 
181 	TAILQ_INIT(&dev->maplist);
182 	SPLAY_INIT(&dev->files);
183 	TAILQ_INIT(&dev->vbl_events);
184 
185 	/*
186 	 * the dma buffers api is just weird. offset 1Gb to ensure we don't
187 	 * conflict with it.
188 	 */
189 	dev->handle_ext = extent_create("drmext", 1024*1024*1024, LONG_MAX,
190 	    M_DRM, NULL, 0, EX_NOWAIT | EX_NOCOALESCE);
191 	if (dev->handle_ext == NULL) {
192 		DRM_ERROR("Failed to initialise handle extent\n");
193 		goto error;
194 	}
195 
196 	if (dev->driver->flags & DRIVER_AGP) {
197 #if __OS_HAS_AGP
198 		if (da->is_agp)
199 			dev->agp = drm_agp_init();
200 #endif
201 		if (dev->driver->flags & DRIVER_AGP_REQUIRE &&
202 		    dev->agp == NULL) {
203 			printf(": couldn't find agp\n");
204 			goto error;
205 		}
206 		if (dev->agp != NULL) {
207 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
208 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
209 				dev->agp->mtrr = 1;
210 		}
211 	}
212 
213 	if (drm_ctxbitmap_init(dev) != 0) {
214 		printf(": couldn't allocate memory for context bitmap.\n");
215 		goto error;
216 	}
217 
218 	if (dev->driver->flags & DRIVER_GEM) {
219 		mtx_init(&dev->obj_name_lock, IPL_NONE);
220 		SPLAY_INIT(&dev->name_tree);
221 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_obj));
222 		/* XXX unique name */
223 		pool_init(&dev->objpl, dev->driver->gem_size, 0, 0, 0,
224 		    "drmobjpl", &pool_allocator_nointr);
225 	}
226 
227 	printf("\n");
228 	return;
229 
230 error:
231 	drm_lastclose(dev);
232 	dev->dev_private = NULL;
233 }
234 
235 int
236 drm_detach(struct device *self, int flags)
237 {
238 	struct drm_device *dev = (struct drm_device *)self;
239 
240 	drm_lastclose(dev);
241 
242 	drm_ctxbitmap_cleanup(dev);
243 
244 	extent_destroy(dev->handle_ext);
245 
246 	drm_vblank_cleanup(dev);
247 
248 	if (dev->agp && dev->agp->mtrr) {
249 		int retcode;
250 
251 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
252 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
253 		DRM_DEBUG("mtrr_del = %d", retcode);
254 	}
255 
256 
257 	if (dev->agp != NULL) {
258 		drm_free(dev->agp);
259 		dev->agp = NULL;
260 	}
261 
262 	return 0;
263 }
264 
265 int
266 drm_activate(struct device *self, int act)
267 {
268 	switch (act) {
269 	case DVACT_DEACTIVATE:
270 		/* FIXME */
271 		break;
272 	}
273 	return (0);
274 }
275 
276 struct cfattach drm_ca = {
277 	sizeof(struct drm_device), drm_probe, drm_attach,
278 	drm_detach, drm_activate
279 };
280 
281 struct cfdriver drm_cd = {
282 	0, "drm", DV_DULL
283 };
284 
285 const struct drm_pcidev *
286 drm_find_description(int vendor, int device, const struct drm_pcidev *idlist)
287 {
288 	int i = 0;
289 
290 	for (i = 0; idlist[i].vendor != 0; i++) {
291 		if ((idlist[i].vendor == vendor) &&
292 		    (idlist[i].device == device))
293 			return &idlist[i];
294 	}
295 	return NULL;
296 }
297 
298 int
299 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
300 {
301 	return (f1->minor < f2->minor ? -1 : f1->minor > f2->minor);
302 }
303 
304 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
305 
306 struct drm_file *
307 drm_find_file_by_minor(struct drm_device *dev, int minor)
308 {
309 	struct drm_file	key;
310 
311 	key.minor = minor;
312 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
313 }
314 
315 int
316 drm_firstopen(struct drm_device *dev)
317 {
318 	struct drm_local_map	*map;
319 	int			 i;
320 
321 	/* prebuild the SAREA */
322 	i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
323 	    _DRM_CONTAINS_LOCK, &map);
324 	if (i != 0)
325 		return i;
326 
327 	if (dev->driver->firstopen)
328 		dev->driver->firstopen(dev);
329 
330 	if (drm_core_check_feature(dev, DRIVER_DMA) &&
331 	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
332 		if ((i = drm_dma_setup(dev)) != 0)
333 			return (i);
334 	}
335 
336 	dev->magicid = 1;
337 
338 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
339 		dev->irq_enabled = 0;
340 	dev->if_version = 0;
341 
342 	dev->buf_pgid = 0;
343 
344 	DRM_DEBUG("\n");
345 
346 	return 0;
347 }
348 
349 int
350 drm_lastclose(struct drm_device *dev)
351 {
352 	struct drm_local_map	*map, *mapsave;
353 
354 	DRM_DEBUG("\n");
355 
356 	if (dev->driver->lastclose != NULL)
357 		dev->driver->lastclose(dev);
358 
359 	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
360 		drm_irq_uninstall(dev);
361 
362 #if __OS_HAS_AGP
363 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
364 		drm_agp_takedown(dev);
365 #endif
366 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
367 		drm_dma_takedown(dev);
368 
369 	DRM_LOCK();
370 	if (dev->sg != NULL &&
371 	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
372 		struct drm_sg_mem *sg = dev->sg;
373 		dev->sg = NULL;
374 
375 		DRM_UNLOCK();
376 		drm_sg_cleanup(dev, sg);
377 		DRM_LOCK();
378 	}
379 
380 	for (map = TAILQ_FIRST(&dev->maplist); map != TAILQ_END(&dev->maplist);
381 	    map = mapsave) {
382 		mapsave = TAILQ_NEXT(map, link);
383 		if ((map->flags & _DRM_DRIVER) == 0)
384 			drm_rmmap_locked(dev, map);
385 	}
386 
387 	if (dev->lock.hw_lock != NULL) {
388 		dev->lock.hw_lock = NULL; /* SHM removed */
389 		dev->lock.file_priv = NULL;
390 		wakeup(&dev->lock); /* there should be nothing sleeping on it */
391 	}
392 	DRM_UNLOCK();
393 
394 	return 0;
395 }
396 
397 int
398 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
399 {
400 	struct drm_device	*dev = NULL;
401 	struct drm_file		*file_priv;
402 	int			 ret = 0;
403 
404 	dev = drm_get_device_from_kdev(kdev);
405 	if (dev == NULL || dev->dev_private == NULL)
406 		return (ENXIO);
407 
408 	DRM_DEBUG("open_count = %d\n", dev->open_count);
409 
410 	if (flags & O_EXCL)
411 		return (EBUSY); /* No exclusive opens */
412 
413 	DRM_LOCK();
414 	if (dev->open_count++ == 0) {
415 		DRM_UNLOCK();
416 		if ((ret = drm_firstopen(dev)) != 0)
417 			goto err;
418 	} else {
419 		DRM_UNLOCK();
420 	}
421 
422 	/* always allocate at least enough space for our data */
423 	file_priv = drm_calloc(1, max(dev->driver->file_priv_size,
424 	    sizeof(*file_priv)));
425 	if (file_priv == NULL) {
426 		ret = ENOMEM;
427 		goto err;
428 	}
429 
430 	file_priv->kdev = kdev;
431 	file_priv->flags = flags;
432 	file_priv->minor = minor(kdev);
433 	INIT_LIST_HEAD(&file_priv->fbs);
434 	TAILQ_INIT(&file_priv->evlist);
435 	file_priv->event_space = 4096; /* 4k for event buffer */
436 	DRM_DEBUG("minor = %d\n", file_priv->minor);
437 
438 	/* for compatibility root is always authenticated */
439 	file_priv->authenticated = DRM_SUSER(p);
440 
441 	if (dev->driver->flags & DRIVER_GEM) {
442 		SPLAY_INIT(&file_priv->obj_tree);
443 		mtx_init(&file_priv->table_lock, IPL_NONE);
444 	}
445 
446 	if (dev->driver->open) {
447 		ret = dev->driver->open(dev, file_priv);
448 		if (ret != 0) {
449 			goto free_priv;
450 		}
451 	}
452 
453 	DRM_LOCK();
454 	/* first opener automatically becomes master if root */
455 	if (SPLAY_EMPTY(&dev->files) && !DRM_SUSER(p)) {
456 		DRM_UNLOCK();
457 		ret = EPERM;
458 		goto free_priv;
459 	}
460 
461 	file_priv->master = SPLAY_EMPTY(&dev->files);
462 
463 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
464 	DRM_UNLOCK();
465 
466 	return (0);
467 
468 free_priv:
469 	drm_free(file_priv);
470 err:
471 	DRM_LOCK();
472 	--dev->open_count;
473 	DRM_UNLOCK();
474 	return (ret);
475 }
476 
477 int
478 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
479 {
480 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
481 	struct drm_file			*file_priv;
482 	struct drm_pending_event	*ev, *evtmp;
483 	struct drm_pending_vblank_event	*vev;
484 	int				 retcode = 0;
485 
486 	if (dev == NULL)
487 		return (ENXIO);
488 
489 	DRM_DEBUG("open_count = %d\n", dev->open_count);
490 
491 	DRM_LOCK();
492 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
493 	if (file_priv == NULL) {
494 		DRM_ERROR("can't find authenticator\n");
495 		retcode = EINVAL;
496 		goto done;
497 	}
498 	DRM_UNLOCK();
499 
500 	if (dev->driver->close != NULL)
501 		dev->driver->close(dev, file_priv);
502 
503 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
504 	    DRM_CURRENTPID, (long)&dev->device, dev->open_count);
505 
506 	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
507 	    && dev->lock.file_priv == file_priv) {
508 		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
509 		    DRM_CURRENTPID,
510 		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
511 
512 		drm_lock_free(&dev->lock,
513 		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
514 	}
515 	if (dev->driver->flags & DRIVER_DMA)
516 		drm_reclaim_buffers(dev, file_priv);
517 
518 	mtx_enter(&dev->event_lock);
519 	struct drmevlist *list = &dev->vbl_events;
520 	for (ev = TAILQ_FIRST(list); ev != TAILQ_END(list);
521 	    ev = evtmp) {
522 		evtmp = TAILQ_NEXT(ev, link);
523 		vev = (struct drm_pending_vblank_event *)ev;
524 		if (ev->file_priv == file_priv) {
525 			TAILQ_REMOVE(list, ev, link);
526 			drm_vblank_put(dev, vev->pipe);
527 			ev->destroy(ev);
528 		}
529 	}
530 	while ((ev = TAILQ_FIRST(&file_priv->evlist)) != NULL) {
531 		TAILQ_REMOVE(&file_priv->evlist, ev, link);
532 		ev->destroy(ev);
533 	}
534 	mtx_leave(&dev->event_lock);
535 
536 	if (dev->driver->flags & DRIVER_MODESET)
537 		drm_fb_release(dev, file_priv);
538 
539 	DRM_LOCK();
540 	if (dev->driver->flags & DRIVER_GEM) {
541 		struct drm_handle	*han;
542 		mtx_enter(&file_priv->table_lock);
543 		while ((han = SPLAY_ROOT(&file_priv->obj_tree)) != NULL) {
544 			SPLAY_REMOVE(drm_obj_tree, &file_priv->obj_tree, han);
545 			drm_handle_unref(han->obj);
546 			drm_free(han);
547 		}
548 		mtx_leave(&file_priv->table_lock);
549 	}
550 
551 	dev->buf_pgid = 0;
552 
553 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
554 	drm_free(file_priv);
555 
556 done:
557 	if (--dev->open_count == 0) {
558 		DRM_UNLOCK();
559 		retcode = drm_lastclose(dev);
560 	} else
561 		DRM_UNLOCK();
562 
563 	return (retcode);
564 }
565 
566 /* drmioctl is called whenever a process performs an ioctl on /dev/drm.
567  */
568 int
569 drmioctl(dev_t kdev, u_long cmd, caddr_t data, int flags,
570     struct proc *p)
571 {
572 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
573 	struct drm_file *file_priv;
574 
575 	if (dev == NULL)
576 		return ENODEV;
577 
578 	DRM_LOCK();
579 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
580 	DRM_UNLOCK();
581 	if (file_priv == NULL) {
582 		DRM_ERROR("can't find authenticator\n");
583 		return EINVAL;
584 	}
585 
586 	++file_priv->ioctl_count;
587 
588 	DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
589 	    DRM_CURRENTPID, cmd, DRM_IOCTL_NR(cmd), (long)&dev->device,
590 	    file_priv->authenticated);
591 
592 	switch (cmd) {
593 	case FIONBIO:
594 	case FIOASYNC:
595 		return 0;
596 
597 	case TIOCSPGRP:
598 		dev->buf_pgid = *(int *)data;
599 		return 0;
600 
601 	case TIOCGPGRP:
602 		*(int *)data = dev->buf_pgid;
603 		return 0;
604 	case DRM_IOCTL_VERSION:
605 		return (drm_version(dev, data, file_priv));
606 	case DRM_IOCTL_GET_UNIQUE:
607 		return (drm_getunique(dev, data, file_priv));
608 	case DRM_IOCTL_GET_MAGIC:
609 		return (drm_getmagic(dev, data, file_priv));
610 	case DRM_IOCTL_WAIT_VBLANK:
611 		return (drm_wait_vblank(dev, data, file_priv));
612 	case DRM_IOCTL_MODESET_CTL:
613 		return (drm_modeset_ctl(dev, data, file_priv));
614 	case DRM_IOCTL_GEM_CLOSE:
615 		return (drm_gem_close_ioctl(dev, data, file_priv));
616 
617 	/* removed */
618 	case DRM_IOCTL_GET_MAP:
619 		/* FALLTHROUGH */
620 	case DRM_IOCTL_GET_CLIENT:
621 		/* FALLTHROUGH */
622 	case DRM_IOCTL_GET_STATS:
623 		return (EINVAL);
624 	/*
625 	 * no-oped ioctls, we don't check permissions on them because
626 	 * they do nothing. they'll be removed as soon as userland is
627 	 * definitely purged
628 	 */
629 	case DRM_IOCTL_SET_SAREA_CTX:
630 	case DRM_IOCTL_BLOCK:
631 	case DRM_IOCTL_UNBLOCK:
632 	case DRM_IOCTL_MOD_CTX:
633 	case DRM_IOCTL_MARK_BUFS:
634 	case DRM_IOCTL_FINISH:
635 	case DRM_IOCTL_INFO_BUFS:
636 	case DRM_IOCTL_SWITCH_CTX:
637 	case DRM_IOCTL_NEW_CTX:
638 	case DRM_IOCTL_GET_SAREA_CTX:
639 		return (0);
640 	}
641 
642 	if (file_priv->authenticated == 1) {
643 		switch (cmd) {
644 		case DRM_IOCTL_RM_MAP:
645 			return (drm_rmmap_ioctl(dev, data, file_priv));
646 		case DRM_IOCTL_GET_CTX:
647 			return (drm_getctx(dev, data, file_priv));
648 		case DRM_IOCTL_RES_CTX:
649 			return (drm_resctx(dev, data, file_priv));
650 		case DRM_IOCTL_LOCK:
651 			return (drm_lock(dev, data, file_priv));
652 		case DRM_IOCTL_UNLOCK:
653 			return (drm_unlock(dev, data, file_priv));
654 		case DRM_IOCTL_MAP_BUFS:
655 			return (drm_mapbufs(dev, data, file_priv));
656 		case DRM_IOCTL_FREE_BUFS:
657 			return (drm_freebufs(dev, data, file_priv));
658 		case DRM_IOCTL_DMA:
659 			return (drm_dma(dev, data, file_priv));
660 #if __OS_HAS_AGP
661 		case DRM_IOCTL_AGP_INFO:
662 			return (drm_agp_info_ioctl(dev, data, file_priv));
663 #endif
664 		case DRM_IOCTL_GEM_FLINK:
665 			return (drm_gem_flink_ioctl(dev, data, file_priv));
666 		case DRM_IOCTL_GEM_OPEN:
667 			return (drm_gem_open_ioctl(dev, data, file_priv));
668 		case DRM_IOCTL_GET_CAP:
669 			return (drm_getcap(dev, data, file_priv));
670 
671 		}
672 	}
673 
674 	/* master is always root */
675 	if (file_priv->master == 1) {
676 		switch(cmd) {
677 		case DRM_IOCTL_SET_VERSION:
678 			return (drm_setversion(dev, data, file_priv));
679 		case DRM_IOCTL_IRQ_BUSID:
680 			return (drm_irq_by_busid(dev, data, file_priv));
681 		case DRM_IOCTL_AUTH_MAGIC:
682 			return (drm_authmagic(dev, data, file_priv));
683 		case DRM_IOCTL_ADD_MAP:
684 			return (drm_addmap_ioctl(dev, data, file_priv));
685 		case DRM_IOCTL_ADD_CTX:
686 			return (drm_addctx(dev, data, file_priv));
687 		case DRM_IOCTL_RM_CTX:
688 			return (drm_rmctx(dev, data, file_priv));
689 		case DRM_IOCTL_ADD_BUFS:
690 			return (drm_addbufs(dev, (struct drm_buf_desc *)data));
691 		case DRM_IOCTL_CONTROL:
692 			return (drm_control(dev, data, file_priv));
693 #if __OS_HAS_AGP
694 		case DRM_IOCTL_AGP_ACQUIRE:
695 			return (drm_agp_acquire_ioctl(dev, data, file_priv));
696 		case DRM_IOCTL_AGP_RELEASE:
697 			return (drm_agp_release_ioctl(dev, data, file_priv));
698 		case DRM_IOCTL_AGP_ENABLE:
699 			return (drm_agp_enable_ioctl(dev, data, file_priv));
700 		case DRM_IOCTL_AGP_ALLOC:
701 			return (drm_agp_alloc_ioctl(dev, data, file_priv));
702 		case DRM_IOCTL_AGP_FREE:
703 			return (drm_agp_free_ioctl(dev, data, file_priv));
704 		case DRM_IOCTL_AGP_BIND:
705 			return (drm_agp_bind_ioctl(dev, data, file_priv));
706 		case DRM_IOCTL_AGP_UNBIND:
707 			return (drm_agp_unbind_ioctl(dev, data, file_priv));
708 #endif
709 		case DRM_IOCTL_SG_ALLOC:
710 			return (drm_sg_alloc_ioctl(dev, data, file_priv));
711 		case DRM_IOCTL_SG_FREE:
712 			return (drm_sg_free(dev, data, file_priv));
713 		case DRM_IOCTL_ADD_DRAW:
714 		case DRM_IOCTL_RM_DRAW:
715 		case DRM_IOCTL_UPDATE_DRAW:
716 			/*
717 			 * Support removed from kernel since it's not used.
718 			 * just return zero until userland stops calling this
719 			 * ioctl.
720 			 */
721 			return (0);
722 		case DRM_IOCTL_SET_UNIQUE:
723 		/*
724 		 * Deprecated in DRM version 1.1, and will return EBUSY
725 		 * when setversion has
726 		 * requested version 1.1 or greater.
727 		 */
728 			return (EBUSY);
729 		case DRM_IOCTL_MODE_GETRESOURCES:
730 			return drm_mode_getresources(dev, data, file_priv);
731 		case DRM_IOCTL_MODE_GETPLANERESOURCES:
732 			return drm_mode_getplane_res(dev, data, file_priv);
733 		case DRM_IOCTL_MODE_GETCRTC:
734 			return drm_mode_getcrtc(dev, data, file_priv);
735 		case DRM_IOCTL_MODE_SETCRTC:
736 			return drm_mode_setcrtc(dev, data, file_priv);
737 		case DRM_IOCTL_MODE_GETPLANE:
738 			return drm_mode_getplane(dev, data, file_priv);
739 		case DRM_IOCTL_MODE_SETPLANE:
740 			return drm_mode_setplane(dev, data, file_priv);
741 		case DRM_IOCTL_MODE_CURSOR:
742 			return drm_mode_cursor_ioctl(dev, data, file_priv);
743 		case DRM_IOCTL_MODE_GETGAMMA:
744 			return drm_mode_gamma_get_ioctl(dev, data, file_priv);
745 		case DRM_IOCTL_MODE_SETGAMMA:
746 			return drm_mode_gamma_set_ioctl(dev, data, file_priv);
747 		case DRM_IOCTL_MODE_GETENCODER:
748 			return drm_mode_getencoder(dev, data, file_priv);
749 		case DRM_IOCTL_MODE_GETCONNECTOR:
750 			return drm_mode_getconnector(dev, data, file_priv);
751 		case DRM_IOCTL_MODE_ATTACHMODE:
752 			return drm_mode_attachmode_ioctl(dev, data, file_priv);
753 		case DRM_IOCTL_MODE_DETACHMODE:
754 			return drm_mode_detachmode_ioctl(dev, data, file_priv);
755 		case DRM_IOCTL_MODE_GETPROPERTY:
756 			return drm_mode_getproperty_ioctl(dev, data,
757 			    file_priv);
758 		case DRM_IOCTL_MODE_SETPROPERTY:
759 			return drm_mode_connector_property_set_ioctl(dev,
760 			    data, file_priv);
761 		case DRM_IOCTL_MODE_GETPROPBLOB:
762 			return drm_mode_getblob_ioctl(dev, data, file_priv);
763 		case DRM_IOCTL_MODE_GETFB:
764 			return drm_mode_getfb(dev, data, file_priv);
765 		case DRM_IOCTL_MODE_ADDFB:
766 			return drm_mode_addfb(dev, data, file_priv);
767 		case DRM_IOCTL_MODE_ADDFB2:
768 			return drm_mode_addfb2(dev, data, file_priv);
769 		case DRM_IOCTL_MODE_RMFB:
770 			return drm_mode_rmfb(dev, data, file_priv);
771 		case DRM_IOCTL_MODE_PAGE_FLIP:
772 			return drm_mode_page_flip_ioctl(dev, data, file_priv);
773 		case DRM_IOCTL_MODE_DIRTYFB:
774 			return drm_mode_dirtyfb_ioctl(dev, data, file_priv);
775 		case DRM_IOCTL_MODE_CREATE_DUMB:
776 			return drm_mode_create_dumb_ioctl(dev, data,
777 			    file_priv);
778 		case DRM_IOCTL_MODE_MAP_DUMB:
779 			return drm_mode_mmap_dumb_ioctl(dev, data, file_priv);
780 		case DRM_IOCTL_MODE_DESTROY_DUMB:
781 			return drm_mode_destroy_dumb_ioctl(dev, data,
782 			    file_priv);
783 		case DRM_IOCTL_MODE_OBJ_GETPROPERTIES:
784 			return drm_mode_obj_get_properties_ioctl(dev, data,
785 			    file_priv);
786 		case DRM_IOCTL_MODE_OBJ_SETPROPERTY:
787 			return drm_mode_obj_set_property_ioctl(dev, data,
788 			    file_priv);
789 		}
790 	}
791 	if (dev->driver->ioctl != NULL)
792 		return (dev->driver->ioctl(dev, cmd, data, file_priv));
793 	else
794 		return (EINVAL);
795 }
796 
797 int
798 drmread(dev_t kdev, struct uio *uio, int ioflag)
799 {
800 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
801 	struct drm_file			*file_priv;
802 	struct drm_pending_event	*ev;
803 	int		 		 error = 0;
804 
805 	if (dev == NULL)
806 		return (ENXIO);
807 
808 	DRM_LOCK();
809 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
810 	DRM_UNLOCK();
811 	if (file_priv == NULL)
812 		return (ENXIO);
813 
814 	/*
815 	 * The semantics are a little weird here. We will wait until we
816 	 * have events to process, but as soon as we have events we will
817 	 * only deliver as many as we have.
818 	 * Note that events are atomic, if the read buffer will not fit in
819 	 * a whole event, we won't read any of it out.
820 	 */
821 	mtx_enter(&dev->event_lock);
822 	while (error == 0 && TAILQ_EMPTY(&file_priv->evlist)) {
823 		if (ioflag & IO_NDELAY) {
824 			mtx_leave(&dev->event_lock);
825 			return (EAGAIN);
826 		}
827 		error = msleep(&file_priv->evlist, &dev->event_lock,
828 		    PWAIT | PCATCH, "drmread", 0);
829 	}
830 	if (error) {
831 		mtx_leave(&dev->event_lock);
832 		return (error);
833 	}
834 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
835 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
836 		/* XXX we always destroy the event on error. */
837 		error = uiomove(ev->event, ev->event->length, uio);
838 		ev->destroy(ev);
839 		if (error)
840 			break;
841 		mtx_enter(&dev->event_lock);
842 	}
843 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
844 
845 	return (error);
846 }
847 
848 /*
849  * Deqeue an event from the file priv in question. returning 1 if an
850  * event was found. We take the resid from the read as a parameter because
851  * we will only dequeue and event if the read buffer has space to fit the
852  * entire thing.
853  *
854  * We are called locked, but we will *unlock* the queue on return so that
855  * we may sleep to copyout the event.
856  */
857 int
858 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
859     size_t resid, struct drm_pending_event **out)
860 {
861 	struct drm_pending_event	*ev = NULL;
862 	int				 gotone = 0;
863 
864 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
865 	if ((ev = TAILQ_FIRST(&file_priv->evlist)) == NULL ||
866 	    ev->event->length > resid)
867 		goto out;
868 
869 	TAILQ_REMOVE(&file_priv->evlist, ev, link);
870 	file_priv->event_space += ev->event->length;
871 	*out = ev;
872 	gotone = 1;
873 
874 out:
875 	mtx_leave(&dev->event_lock);
876 
877 	return (gotone);
878 }
879 
880 /* XXX kqfilter ... */
881 int
882 drmpoll(dev_t kdev, int events, struct proc *p)
883 {
884 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
885 	struct drm_file		*file_priv;
886 	int		 	 revents = 0;
887 
888 	if (dev == NULL)
889 		return (POLLERR);
890 
891 	DRM_LOCK();
892 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
893 	DRM_UNLOCK();
894 	if (file_priv == NULL)
895 		return (POLLERR);
896 
897 	mtx_enter(&dev->event_lock);
898 	if (events & (POLLIN | POLLRDNORM)) {
899 		if (!TAILQ_EMPTY(&file_priv->evlist))
900 			revents |=  events & (POLLIN | POLLRDNORM);
901 		else
902 			selrecord(p, &file_priv->rsel);
903 	}
904 	mtx_leave(&dev->event_lock);
905 
906 	return (revents);
907 }
908 
909 struct drm_local_map *
910 drm_getsarea(struct drm_device *dev)
911 {
912 	struct drm_local_map	*map;
913 
914 	DRM_LOCK();
915 	TAILQ_FOREACH(map, &dev->maplist, link) {
916 		if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
917 			break;
918 	}
919 	DRM_UNLOCK();
920 	return (map);
921 }
922 
923 paddr_t
924 drmmmap(dev_t kdev, off_t offset, int prot)
925 {
926 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
927 	struct drm_local_map	*map;
928 	struct drm_file		*file_priv;
929 	enum drm_map_type	 type;
930 
931 	if (dev == NULL)
932 		return (-1);
933 
934 	DRM_LOCK();
935 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
936 	DRM_UNLOCK();
937 	if (file_priv == NULL) {
938 		DRM_ERROR("can't find authenticator\n");
939 		return (-1);
940 	}
941 
942 	if (!file_priv->authenticated)
943 		return (-1);
944 
945 	if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) {
946 		struct drm_device_dma *dma = dev->dma;
947 		paddr_t	phys = -1;
948 
949 		rw_enter_write(&dma->dma_lock);
950 		if (dma->pagelist != NULL)
951 			phys = dma->pagelist[offset >> PAGE_SHIFT];
952 		rw_exit_write(&dma->dma_lock);
953 
954 		return (phys);
955 	}
956 
957 	/*
958 	 * A sequential search of a linked list is
959  	 * fine here because: 1) there will only be
960 	 * about 5-10 entries in the list and, 2) a
961 	 * DRI client only has to do this mapping
962 	 * once, so it doesn't have to be optimized
963 	 * for performance, even if the list was a
964 	 * bit longer.
965 	 */
966 	DRM_LOCK();
967 	TAILQ_FOREACH(map, &dev->maplist, link) {
968 		if (offset >= map->ext &&
969 		    offset < map->ext + map->size) {
970 			offset -= map->ext;
971 			break;
972 		}
973 	}
974 
975 	if (map == NULL) {
976 		DRM_UNLOCK();
977 		DRM_DEBUG("can't find map\n");
978 		return (-1);
979 	}
980 	if (((map->flags & _DRM_RESTRICTED) && file_priv->master == 0)) {
981 		DRM_UNLOCK();
982 		DRM_DEBUG("restricted map\n");
983 		return (-1);
984 	}
985 	type = map->type;
986 	DRM_UNLOCK();
987 
988 	switch (type) {
989 	case _DRM_AGP:
990 		return agp_mmap(dev->agp->agpdev,
991 		    offset + map->offset - dev->agp->base, prot);
992 	case _DRM_FRAME_BUFFER:
993 	case _DRM_REGISTERS:
994 		return (offset + map->offset);
995 		break;
996 	/* XXX unify all the bus_dmamem_mmap bits */
997 	case _DRM_SCATTER_GATHER:
998 		return (bus_dmamem_mmap(dev->dmat, dev->sg->mem->segs,
999 		    dev->sg->mem->nsegs, map->offset - dev->sg->handle +
1000 		    offset, prot, BUS_DMA_NOWAIT));
1001 	case _DRM_SHM:
1002 	case _DRM_CONSISTENT:
1003 		return (bus_dmamem_mmap(dev->dmat, map->dmamem->segs,
1004 		    map->dmamem->nsegs, offset, prot, BUS_DMA_NOWAIT));
1005 	default:
1006 		DRM_ERROR("bad map type %d\n", type);
1007 		return (-1);	/* This should never happen. */
1008 	}
1009 	/* NOTREACHED */
1010 }
1011 
1012 /*
1013  * Beginning in revision 1.1 of the DRM interface, getunique will return
1014  * a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
1015  * before setunique has been called.  The format for the bus-specific part of
1016  * the unique is not defined for any other bus.
1017  */
1018 int
1019 drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv)
1020 {
1021 	struct drm_unique	 *u = data;
1022 
1023 	if (u->unique_len >= dev->unique_len) {
1024 		if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
1025 			return EFAULT;
1026 	}
1027 	u->unique_len = dev->unique_len;
1028 
1029 	return 0;
1030 }
1031 
1032 int
1033 drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1034 {
1035 	struct drm_get_cap *req = data;
1036 
1037 	req->value = 0;
1038 	switch (req->capability) {
1039 	case DRM_CAP_DUMB_BUFFER:
1040 		if (dev->driver->dumb_create)
1041 			req->value = 1;
1042 		break;
1043 	case DRM_CAP_VBLANK_HIGH_CRTC:
1044 		req->value = 1;
1045 		break;
1046 	case DRM_CAP_DUMB_PREFERRED_DEPTH:
1047 		req->value = dev->mode_config.preferred_depth;
1048 		break;
1049 	case DRM_CAP_DUMB_PREFER_SHADOW:
1050 		req->value = dev->mode_config.prefer_shadow;
1051 		break;
1052 	default:
1053 		return EINVAL;
1054 	}
1055 	return 0;
1056 }
1057 
1058 #define DRM_IF_MAJOR	1
1059 #define DRM_IF_MINOR	2
1060 
1061 int
1062 drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
1063 {
1064 	struct drm_version	*version = data;
1065 	int			 len;
1066 
1067 #define DRM_COPY(name, value)						\
1068 	len = strlen( value );						\
1069 	if ( len > name##_len ) len = name##_len;			\
1070 	name##_len = strlen( value );					\
1071 	if ( len && name ) {						\
1072 		if ( DRM_COPY_TO_USER( name, value, len ) )		\
1073 			return EFAULT;				\
1074 	}
1075 
1076 	version->version_major = dev->driver->major;
1077 	version->version_minor = dev->driver->minor;
1078 	version->version_patchlevel = dev->driver->patchlevel;
1079 
1080 	DRM_COPY(version->name, dev->driver->name);
1081 	DRM_COPY(version->date, dev->driver->date);
1082 	DRM_COPY(version->desc, dev->driver->desc);
1083 
1084 	return 0;
1085 }
1086 
1087 int
1088 drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
1089 {
1090 	struct drm_set_version	ver, *sv = data;
1091 	int			if_version;
1092 
1093 	/* Save the incoming data, and set the response before continuing
1094 	 * any further.
1095 	 */
1096 	ver = *sv;
1097 	sv->drm_di_major = DRM_IF_MAJOR;
1098 	sv->drm_di_minor = DRM_IF_MINOR;
1099 	sv->drm_dd_major = dev->driver->major;
1100 	sv->drm_dd_minor = dev->driver->minor;
1101 
1102 	/*
1103 	 * We no longer support interface versions less than 1.1, so error
1104 	 * out if the xserver is too old. 1.1 always ties the drm to a
1105 	 * certain busid, this was done on attach
1106 	 */
1107 	if (ver.drm_di_major != -1) {
1108 		if (ver.drm_di_major != DRM_IF_MAJOR || ver.drm_di_minor < 1 ||
1109 		    ver.drm_di_minor > DRM_IF_MINOR) {
1110 			return EINVAL;
1111 		}
1112 		if_version = DRM_IF_VERSION(ver.drm_di_major, ver.drm_dd_minor);
1113 		dev->if_version = imax(if_version, dev->if_version);
1114 	}
1115 
1116 	if (ver.drm_dd_major != -1) {
1117 		if (ver.drm_dd_major != dev->driver->major ||
1118 		    ver.drm_dd_minor < 0 ||
1119 		    ver.drm_dd_minor > dev->driver->minor)
1120 			return EINVAL;
1121 	}
1122 
1123 	return 0;
1124 }
1125 
1126 struct drm_dmamem *
1127 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1128     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1129 {
1130 	struct drm_dmamem	*mem;
1131 	size_t			 strsize;
1132 	/*
1133 	 * segs is the last member of the struct since we modify the size
1134 	 * to allow extra segments if more than one are allowed.
1135 	 */
1136 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1137 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
1138 	if (mem == NULL)
1139 		return (NULL);
1140 
1141 	mem->size = size;
1142 
1143 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
1144 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
1145 		goto strfree;
1146 
1147 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
1148 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1149 		goto destroy;
1150 
1151 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
1152 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
1153 		goto free;
1154 
1155 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
1156 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
1157 		goto unmap;
1158 
1159 	return (mem);
1160 
1161 unmap:
1162 	bus_dmamem_unmap(dmat, mem->kva, size);
1163 free:
1164 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1165 destroy:
1166 	bus_dmamap_destroy(dmat, mem->map);
1167 strfree:
1168 	free(mem, M_DRM);
1169 
1170 	return (NULL);
1171 }
1172 
1173 void
1174 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
1175 {
1176 	if (mem == NULL)
1177 		return;
1178 
1179 	bus_dmamap_unload(dmat, mem->map);
1180 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
1181 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1182 	bus_dmamap_destroy(dmat, mem->map);
1183 	free(mem, M_DRM);
1184 }
1185 
1186 /**
1187  * Called by the client, this returns a unique magic number to be authorized
1188  * by the master.
1189  *
1190  * The master may use its own knowledge of the client (such as the X
1191  * connection that the magic is passed over) to determine if the magic number
1192  * should be authenticated.
1193  */
1194 int
1195 drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
1196 {
1197 	struct drm_auth		*auth = data;
1198 
1199 	if (dev->magicid == 0)
1200 		dev->magicid = 1;
1201 
1202 	/* Find unique magic */
1203 	if (file_priv->magic) {
1204 		auth->magic = file_priv->magic;
1205 	} else {
1206 		DRM_LOCK();
1207 		file_priv->magic = auth->magic = dev->magicid++;
1208 		DRM_UNLOCK();
1209 		DRM_DEBUG("%d\n", auth->magic);
1210 	}
1211 
1212 	DRM_DEBUG("%u\n", auth->magic);
1213 	return (0);
1214 }
1215 
1216 /**
1217  * Marks the client associated with the given magic number as authenticated.
1218  */
1219 int
1220 drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
1221 {
1222 	struct drm_file	*p;
1223 	struct drm_auth	*auth = data;
1224 	int		 ret = EINVAL;
1225 
1226 	DRM_DEBUG("%u\n", auth->magic);
1227 
1228 	if (auth->magic == 0)
1229 		return (ret);
1230 
1231 	DRM_LOCK();
1232 	SPLAY_FOREACH(p, drm_file_tree, &dev->files) {
1233 		if (p->magic == auth->magic) {
1234 			p->authenticated = 1;
1235 			p->magic = 0;
1236 			ret = 0;
1237 			break;
1238 		}
1239 	}
1240 	DRM_UNLOCK();
1241 
1242 	return (ret);
1243 }
1244 
1245 struct uvm_pagerops drm_pgops = {
1246 	NULL,
1247 	drm_ref,
1248 	drm_unref,
1249 	drm_fault,
1250 	drm_flush,
1251 };
1252 
1253 
1254 void
1255 drm_hold_object_locked(struct drm_obj *obj)
1256 {
1257 	while (obj->do_flags & DRM_BUSY) {
1258 		atomic_setbits_int(&obj->do_flags, DRM_WANTED);
1259 		simple_unlock(&uobj->vmobjlock);
1260 #ifdef DRMLOCKDEBUG
1261 		{
1262 		int ret = 0;
1263 		ret = tsleep(obj, PVM, "drm_hold", 3 * hz); /* XXX msleep */
1264 		if (ret)
1265 			printf("still waiting for obj %p, owned by %p\n",
1266 			    obj, obj->holding_proc);
1267 		}
1268 #else
1269 		tsleep(obj, PVM, "drm_hold", 0); /* XXX msleep */
1270 #endif
1271 		simple_lock(&uobj->vmobjlock);
1272 	}
1273 #ifdef DRMLOCKDEBUG
1274 	obj->holding_proc = curproc;
1275 #endif
1276 	atomic_setbits_int(&obj->do_flags, DRM_BUSY);
1277 }
1278 
1279 void
1280 drm_hold_object(struct drm_obj *obj)
1281 {
1282 	simple_lock(&obj->uobj->vmobjlock);
1283 	drm_hold_object_locked(obj);
1284 	simple_unlock(&obj->uobj->vmobjlock);
1285 }
1286 
1287 int
1288 drm_try_hold_object(struct drm_obj *obj)
1289 {
1290 	simple_lock(&obj->uobj->vmobjlock);
1291 	/* if the object is free, grab it */
1292 	if (obj->do_flags & (DRM_BUSY | DRM_WANTED))
1293 		return (0);
1294 	atomic_setbits_int(&obj->do_flags, DRM_BUSY);
1295 #ifdef DRMLOCKDEBUG
1296 	obj->holding_proc = curproc;
1297 #endif
1298 	simple_unlock(&obj->uobj->vmobjlock);
1299 	return (1);
1300 }
1301 
1302 
1303 void
1304 drm_unhold_object_locked(struct drm_obj *obj)
1305 {
1306 	if (obj->do_flags & DRM_WANTED)
1307 		wakeup(obj);
1308 #ifdef DRMLOCKDEBUG
1309 	obj->holding_proc = NULL;
1310 #endif
1311 	atomic_clearbits_int(&obj->do_flags, DRM_WANTED | DRM_BUSY);
1312 }
1313 
1314 void
1315 drm_unhold_object(struct drm_obj *obj)
1316 {
1317 	simple_lock(&obj->uobj->vmobjlock);
1318 	drm_unhold_object_locked(obj);
1319 	simple_unlock(&obj->uobj->vmobjlock);
1320 }
1321 
1322 void
1323 drm_ref_locked(struct uvm_object *uobj)
1324 {
1325 	uobj->uo_refs++;
1326 }
1327 
1328 void
1329 drm_ref(struct uvm_object *uobj)
1330 {
1331 	simple_lock(&uobj->vmobjlock);
1332 	drm_ref_locked(uobj);
1333 	simple_unlock(&uobj->vmobjlock);
1334 }
1335 
1336 void
1337 drm_unref(struct uvm_object *uobj)
1338 {
1339 	simple_lock(&uobj->vmobjlock);
1340 	drm_unref_locked(uobj);
1341 }
1342 
1343 void
1344 drm_unref_locked(struct uvm_object *uobj)
1345 {
1346 	struct drm_obj		*obj = (struct drm_obj *)uobj;
1347 	struct drm_device	*dev = obj->dev;
1348 
1349 again:
1350 	if (uobj->uo_refs > 1) {
1351 		uobj->uo_refs--;
1352 		simple_unlock(&uobj->vmobjlock);
1353 		return;
1354 	}
1355 
1356 	/* inlined version of drm_hold because we want to trylock then sleep */
1357 	if (obj->do_flags & DRM_BUSY) {
1358 		atomic_setbits_int(&obj->do_flags, DRM_WANTED);
1359 		simple_unlock(&uobj->vmobjlock);
1360 		tsleep(obj, PVM, "drm_unref", 0); /* XXX msleep */
1361 		simple_lock(&uobj->vmobjlock);
1362 		goto again;
1363 	}
1364 #ifdef DRMLOCKDEBUG
1365 	obj->holding_proc = curproc;
1366 #endif
1367 	atomic_setbits_int(&obj->do_flags, DRM_BUSY);
1368 	simple_unlock(&obj->vmobjlock);
1369 	/* We own this thing now. it is on no queues, though it may still
1370 	 * be bound to the aperture (and on the inactive list, in which case
1371 	 * idling the buffer is what triggered the free. Since we know no one
1372 	 * else can grab it now, we can nuke with impunity.
1373 	 */
1374 	if (dev->driver->gem_free_object != NULL)
1375 		dev->driver->gem_free_object(obj);
1376 
1377 	uao_detach(obj->uao);
1378 
1379 	atomic_dec(&dev->obj_count);
1380 	atomic_sub(obj->size, &dev->obj_memory);
1381 	if (obj->do_flags & DRM_WANTED) /* should never happen, not on lists */
1382 		wakeup(obj);
1383 	pool_put(&dev->objpl, obj);
1384 }
1385 
1386 /*
1387  * convenience function to unreference and unhold an object.
1388  */
1389 void
1390 drm_unhold_and_unref(struct drm_obj *obj)
1391 {
1392 	drm_lock_obj(obj);
1393 	drm_unhold_object_locked(obj);
1394 	drm_unref_locked(&obj->uobj);
1395 }
1396 
1397 
1398 boolean_t
1399 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
1400 {
1401 	return (TRUE);
1402 }
1403 
1404 
1405 int
1406 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
1407     int npages, int centeridx, vm_fault_t fault_type,
1408     vm_prot_t access_type, int flags)
1409 {
1410 	struct vm_map_entry *entry = ufi->entry;
1411 	struct uvm_object *uobj = entry->object.uvm_obj;
1412 	struct drm_obj *obj = (struct drm_obj *)uobj;
1413 	struct drm_device *dev = obj->dev;
1414 	int ret;
1415 
1416 	/*
1417 	 * we do not allow device mappings to be mapped copy-on-write
1418 	 * so we kill any attempt to do so here.
1419 	 */
1420 
1421 	if (UVM_ET_ISCOPYONWRITE(entry)) {
1422 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
1423 		return(VM_PAGER_ERROR);
1424 	}
1425 
1426 	/* Call down into driver to do the magic */
1427 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
1428 	    entry->start), vaddr, pps, npages, centeridx,
1429 	    access_type, flags);
1430 	return (ret);
1431 }
1432 
1433 /*
1434  * Code to support memory managers based on the GEM (Graphics
1435  * Execution Manager) api.
1436  */
1437 struct drm_obj *
1438 drm_gem_object_alloc(struct drm_device *dev, size_t size)
1439 {
1440 	struct drm_obj	*obj;
1441 
1442 	KASSERT((size & (PAGE_SIZE -1)) == 0);
1443 
1444 	if ((obj = pool_get(&dev->objpl, PR_WAITOK | PR_ZERO)) == NULL)
1445 		return (NULL);
1446 
1447 	obj->dev = dev;
1448 
1449 	/* uao create can't fail in the 0 case, it just sleeps */
1450 	obj->uao = uao_create(size, 0);
1451 	obj->size = size;
1452 	uvm_objinit(&obj->uobj, &drm_pgops, 1);
1453 
1454 	if (dev->driver->gem_init_object != NULL &&
1455 	    dev->driver->gem_init_object(obj) != 0) {
1456 		uao_detach(obj->uao);
1457 		pool_put(&dev->objpl, obj);
1458 		return (NULL);
1459 	}
1460 	atomic_inc(&dev->obj_count);
1461 	atomic_add(obj->size, &dev->obj_memory);
1462 	return (obj);
1463 }
1464 
1465 int
1466 drm_handle_create(struct drm_file *file_priv, struct drm_obj *obj,
1467     int *handlep)
1468 {
1469 	struct drm_handle	*han;
1470 
1471 	if ((han = drm_calloc(1, sizeof(*han))) == NULL)
1472 		return (ENOMEM);
1473 
1474 	han->obj = obj;
1475 	mtx_enter(&file_priv->table_lock);
1476 again:
1477 	*handlep = han->handle = ++file_priv->obj_id;
1478 	/*
1479 	 * Make sure we have no duplicates. this'll hurt once we wrap, 0 is
1480 	 * reserved.
1481 	 */
1482 	if (han->handle == 0 || SPLAY_INSERT(drm_obj_tree,
1483 	    &file_priv->obj_tree, han))
1484 		goto again;
1485 	mtx_leave(&file_priv->table_lock);
1486 
1487 	drm_handle_ref(obj);
1488 	return (0);
1489 }
1490 
1491 struct drm_obj *
1492 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *file_priv,
1493     int handle)
1494 {
1495 	struct drm_obj		*obj;
1496 	struct drm_handle	*han, search;
1497 
1498 	search.handle = handle;
1499 
1500 	mtx_enter(&file_priv->table_lock);
1501 	han = SPLAY_FIND(drm_obj_tree, &file_priv->obj_tree, &search);
1502 	if (han == NULL) {
1503 		mtx_leave(&file_priv->table_lock);
1504 		return (NULL);
1505 	}
1506 
1507 	obj = han->obj;
1508 	drm_ref(&obj->uobj);
1509 	mtx_leave(&file_priv->table_lock);
1510 
1511 	return (obj);
1512 }
1513 
1514 int
1515 drm_gem_close_ioctl(struct drm_device *dev, void *data,
1516     struct drm_file *file_priv)
1517 {
1518 	struct drm_gem_close	*args = data;
1519 	struct drm_handle	*han, find;
1520 	struct drm_obj		*obj;
1521 
1522 	if ((dev->driver->flags & DRIVER_GEM) == 0)
1523 		return (ENODEV);
1524 
1525 	find.handle = args->handle;
1526 	mtx_enter(&file_priv->table_lock);
1527 	han = SPLAY_FIND(drm_obj_tree, &file_priv->obj_tree, &find);
1528 	if (han == NULL) {
1529 		mtx_leave(&file_priv->table_lock);
1530 		return (EINVAL);
1531 	}
1532 
1533 	obj = han->obj;
1534 	SPLAY_REMOVE(drm_obj_tree, &file_priv->obj_tree, han);
1535 	mtx_leave(&file_priv->table_lock);
1536 
1537 	drm_free(han);
1538 
1539 	DRM_LOCK();
1540 	drm_handle_unref(obj);
1541 	DRM_UNLOCK();
1542 
1543 	return (0);
1544 }
1545 
1546 int
1547 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1548     struct drm_file *file_priv)
1549 {
1550 	struct drm_gem_flink	*args = data;
1551 	struct drm_obj		*obj;
1552 
1553 	if (!(dev->driver->flags & DRIVER_GEM))
1554 		return (ENODEV);
1555 
1556 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1557 	if (obj == NULL)
1558 		return (EBADF);
1559 
1560 	mtx_enter(&dev->obj_name_lock);
1561 	if (!obj->name) {
1562 again:
1563 		obj->name = ++dev->obj_name;
1564 		/* 0 is reserved, make sure we don't clash. */
1565 		if (obj->name == 0 || SPLAY_INSERT(drm_name_tree,
1566 		    &dev->name_tree, obj))
1567 			goto again;
1568 		/* name holds a reference to the object */
1569 		drm_ref(&obj->uobj);
1570 	}
1571 	mtx_leave(&dev->obj_name_lock);
1572 
1573 	args->name = (uint64_t)obj->name;
1574 
1575 	drm_unref(&obj->uobj);
1576 
1577 	return (0);
1578 }
1579 
1580 int
1581 drm_gem_open_ioctl(struct drm_device *dev, void *data,
1582     struct drm_file *file_priv)
1583 {
1584 	struct drm_gem_open	*args = data;
1585 	struct drm_obj		*obj, search;
1586 	int			 ret, handle;
1587 
1588 	if (!(dev->driver->flags & DRIVER_GEM))
1589 		return (ENODEV);
1590 
1591 	search.name = args->name;
1592 	mtx_enter(&dev->obj_name_lock);
1593 	obj = SPLAY_FIND(drm_name_tree, &dev->name_tree, &search);
1594 	if (obj != NULL)
1595 		drm_ref(&obj->uobj);
1596 	mtx_leave(&dev->obj_name_lock);
1597 	if (obj == NULL)
1598 		return (ENOENT);
1599 
1600 	/* this gives our reference to the handle */
1601 	ret = drm_handle_create(file_priv, obj, &handle);
1602 	if (ret) {
1603 		drm_unref(&obj->uobj);
1604 		return (ret);
1605 	}
1606 
1607 	args->handle = handle;
1608 	args->size = obj->size;
1609 
1610         return (0);
1611 }
1612 
1613 /*
1614  * grab a reference for a per-open handle.
1615  * The object contains a handlecount too because if all handles disappear we
1616  * need to also remove the global name (names initially are per open unless the
1617  * flink ioctl is called.
1618  */
1619 void
1620 drm_handle_ref(struct drm_obj *obj)
1621 {
1622 	/* we are given the reference from the caller, so just
1623 	 * crank handlecount.
1624 	 */
1625 	obj->handlecount++;
1626 }
1627 
1628 /*
1629  * Remove the reference owned by a per-open handle. If we're the last one,
1630  * remove the reference from flink, too.
1631  */
1632 void
1633 drm_handle_unref(struct drm_obj *obj)
1634 {
1635 	/* do this first in case this is the last reference */
1636 	if (--obj->handlecount == 0) {
1637 		struct drm_device	*dev = obj->dev;
1638 
1639 		mtx_enter(&dev->obj_name_lock);
1640 		if (obj->name) {
1641 			SPLAY_REMOVE(drm_name_tree, &dev->name_tree, obj);
1642 			obj->name = 0;
1643 			mtx_leave(&dev->obj_name_lock);
1644 			/* name held a reference to object */
1645 			drm_unref(&obj->uobj);
1646 		} else {
1647 			mtx_leave(&dev->obj_name_lock);
1648 		}
1649 	}
1650 	drm_unref(&obj->uobj);
1651 }
1652 
1653 /*
1654  * Helper function to load a uvm anonymous object into a dmamap, to be used
1655  * for binding to a translation-table style sg mechanism (e.g. agp, or intel
1656  * gtt).
1657  *
1658  * For now we ignore maxsegsz.
1659  */
1660 int
1661 drm_gem_load_uao(bus_dma_tag_t dmat, bus_dmamap_t map, struct uvm_object *uao,
1662     bus_size_t size, int flags, bus_dma_segment_t **segp)
1663 {
1664 	bus_dma_segment_t	*segs;
1665 	struct vm_page		*pg;
1666 	struct pglist		 plist;
1667 	u_long			 npages = size >> PAGE_SHIFT, i = 0;
1668 	int			 ret;
1669 
1670 	TAILQ_INIT(&plist);
1671 
1672 	/*
1673 	 * This is really quite ugly, but nothing else would need
1674 	 * bus_dmamap_load_uao() yet.
1675 	 */
1676 	segs = malloc(npages * sizeof(*segs), M_DRM,
1677 	    M_WAITOK | M_CANFAIL | M_ZERO);
1678 	if (segs == NULL)
1679 		return (ENOMEM);
1680 
1681 	/* This may sleep, no choice in the matter */
1682 	if (uvm_objwire(uao, 0, size, &plist) != 0) {
1683 		ret = ENOMEM;
1684 		goto free;
1685 	}
1686 
1687 	TAILQ_FOREACH(pg, &plist, pageq) {
1688 		paddr_t pa = VM_PAGE_TO_PHYS(pg);
1689 
1690 		if (i > 0 && pa == (segs[i - 1].ds_addr +
1691 		    segs[i - 1].ds_len)) {
1692 			/* contiguous, yay */
1693 			segs[i - 1].ds_len += PAGE_SIZE;
1694 			continue;
1695 		}
1696 		segs[i].ds_addr = pa;
1697 		segs[i].ds_len = PAGE_SIZE;
1698 		if (i++ > npages)
1699 			break;
1700 	}
1701 	/* this should be impossible */
1702 	if (pg != TAILQ_END(&pageq)) {
1703 		ret = EINVAL;
1704 		goto unwire;
1705 	}
1706 
1707 	if ((ret = bus_dmamap_load_raw(dmat, map, segs, i, size, flags)) != 0)
1708 		goto unwire;
1709 
1710 	*segp = segs;
1711 
1712 	return (0);
1713 
1714 unwire:
1715 	uvm_objunwire(uao, 0, size);
1716 free:
1717 	free(segs, M_DRM);
1718 	return (ret);
1719 }
1720 
1721 int
1722 drm_handle_cmp(struct drm_handle *a, struct drm_handle *b)
1723 {
1724 	return (a->handle < b->handle ? -1 : a->handle > b->handle);
1725 }
1726 
1727 int
1728 drm_name_cmp(struct drm_obj *a, struct drm_obj *b)
1729 {
1730 	return (a->name < b->name ? -1 : a->name > b->name);
1731 }
1732 
1733 SPLAY_GENERATE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
1734 
1735 SPLAY_GENERATE(drm_name_tree, drm_obj, entry, drm_name_cmp);
1736