xref: /openbsd-src/sys/dev/pci/drm/drm_drv.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /* $OpenBSD: drm_drv.c,v 1.149 2016/09/15 02:00:17 dlg Exp $ */
2 /*-
3  * Copyright 2007-2009 Owain G. Ainsworth <oga@openbsd.org>
4  * Copyright © 2008 Intel Corporation
5  * Copyright 2003 Eric Anholt
6  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the "Software"),
12  * to deal in the Software without restriction, including without limitation
13  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14  * and/or sell copies of the Software, and to permit persons to whom the
15  * Software is furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the next
18  * paragraph) shall be included in all copies or substantial portions of the
19  * Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
24  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27  * OTHER DEALINGS IN THE SOFTWARE.
28  *
29  * Authors:
30  *    Rickard E. (Rik) Faith <faith@valinux.com>
31  *    Daryll Strauss <daryll@valinux.com>
32  *    Gareth Hughes <gareth@valinux.com>
33  *    Eric Anholt <eric@anholt.net>
34  *    Owain Ainsworth <oga@openbsd.org>
35  *
36  */
37 
38 /** @file drm_drv.c
39  * The catch-all file for DRM device support, including module setup/teardown,
40  * open/close, and ioctl dispatch.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/fcntl.h>
45 #include <sys/filio.h>
46 #include <sys/limits.h>
47 #include <sys/pledge.h>
48 #include <sys/poll.h>
49 #include <sys/specdev.h>
50 #include <sys/systm.h>
51 #include <sys/ttycom.h> /* for TIOCSGRP */
52 #include <sys/vnode.h>
53 
54 #include <uvm/uvm.h>
55 #include <uvm/uvm_device.h>
56 
57 #include "drmP.h"
58 #include "drm.h"
59 #include "drm_sarea.h"
60 
61 #ifdef DRMDEBUG
62 int drm_debug_flag = 1;
63 #endif
64 
65 int	 drm_firstopen(struct drm_device *);
66 int	 drm_lastclose(struct drm_device *);
67 void	 drm_attach(struct device *, struct device *, void *);
68 int	 drm_probe(struct device *, void *, void *);
69 int	 drm_detach(struct device *, int);
70 void	 drm_quiesce(struct drm_device *);
71 void	 drm_wakeup(struct drm_device *);
72 int	 drm_activate(struct device *, int);
73 int	 drmprint(void *, const char *);
74 int	 drmsubmatch(struct device *, void *, void *);
75 int	 drm_do_ioctl(struct drm_device *, int, u_long, caddr_t);
76 int	 drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
77 	     struct drm_pending_event **);
78 
79 int	 drm_getunique(struct drm_device *, void *, struct drm_file *);
80 int	 drm_version(struct drm_device *, void *, struct drm_file *);
81 int	 drm_setversion(struct drm_device *, void *, struct drm_file *);
82 int	 drm_getmagic(struct drm_device *, void *, struct drm_file *);
83 int	 drm_authmagic(struct drm_device *, void *, struct drm_file *);
84 int	 drm_file_cmp(struct drm_file *, struct drm_file *);
85 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
86 
87 int	 drm_setunique(struct drm_device *, void *, struct drm_file *);
88 int	 drm_noop(struct drm_device *, void *, struct drm_file *);
89 
90 int	 drm_getcap(struct drm_device *, void *, struct drm_file *);
91 int	 drm_setclientcap(struct drm_device *, void *, struct drm_file *);
92 
93 #define DRM_IOCTL_DEF(ioctl, _func, _flags) \
94 	[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
95 
96 /** Ioctl table */
97 static struct drm_ioctl_desc drm_ioctls[] = {
98 	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
99 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
100 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
101 #ifdef __linux__
102 	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
103 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
104 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
105 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
106 #endif
107 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
108 	DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
109 	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
110 
111 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
112 	DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113 	DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
114 	DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
115 
116 #ifdef __linux__
117 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
118 	DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
119 
120 	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
121 	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
122 #else
123 	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
124 	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_noop, DRM_AUTH),
125 #endif
126 
127 #ifdef __linux__
128 	DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
129 	DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
130 
131 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
132 	DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
133 #endif
134 	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
135 #ifdef __linux__
136 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
137 	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
138 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
139 #else
140 	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
141 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
142 #endif
143 #ifdef __linux__
144 	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
145 #endif
146 
147 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
148 	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
149 
150 #ifdef __linux__
151 	DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
152 	DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
153 #endif
154 
155 	DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
156 
157 #ifdef __linux__
158 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
159 	DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
160 	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
161 #else
162 	DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
163 	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_noop, DRM_AUTH),
164 #endif
165 #ifdef __linux__
166 	DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
167 	DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
168 	DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
169 
170 	DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
171 
172 #if __OS_HAS_AGP
173 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
174 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
175 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
176 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
177 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
178 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
179 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
180 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
181 #endif
182 
183 	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
184 	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
185 #endif
186 
187 	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
188 
189 	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
190 
191 	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
192 
193 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
194 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
195 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
196 
197 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
198 
199 #ifdef notyet
200 	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
201 	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
202 #endif
203 
204 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
205 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
206 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
207 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
208 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
209 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
210 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
211 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
212 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
213 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
214 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
215 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
216 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
217 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
218 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
219 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
220 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
221 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
222 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
223 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
224 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
225 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
226 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
227 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
228 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
229 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
230 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
231 };
232 
233 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
234 
235 int
236 pledge_ioctl_drm(struct proc *p, long com, dev_t device)
237 {
238 	struct drm_device *dev = drm_get_device_from_kdev(device);
239 	unsigned int nr = DRM_IOCTL_NR(com);
240 	const struct drm_ioctl_desc *ioctl;
241 
242 	if (dev == NULL)
243 		return EPERM;
244 
245 	if (nr < DRM_CORE_IOCTL_COUNT &&
246 	    ((nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)))
247 		ioctl = &drm_ioctls[nr];
248 	else if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END &&
249 	    nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)
250 		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
251 	else
252 		return EPERM;
253 
254 	if (ioctl->flags & DRM_RENDER_ALLOW)
255 		return 0;
256 
257 	/*
258 	 * These are dangerous, but we have to allow them until we
259 	 * have prime/dma-buf support.
260 	 */
261 	switch (com) {
262 	case DRM_IOCTL_GET_MAGIC:
263 	case DRM_IOCTL_GEM_OPEN:
264 		return 0;
265 	}
266 
267 	return EPERM;
268 }
269 
270 int
271 drm_setunique(struct drm_device *dev, void *data,
272     struct drm_file *file_priv)
273 {
274 	/*
275 	 * Deprecated in DRM version 1.1, and will return EBUSY
276 	 * when setversion has
277 	 * requested version 1.1 or greater.
278 	 */
279 	return (-EBUSY);
280 }
281 
282 /** No-op ioctl. */
283 int drm_noop(struct drm_device *dev, void *data,
284 	     struct drm_file *file_priv)
285 {
286 	return 0;
287 }
288 
289 /*
290  * attach drm to a pci-based driver.
291  *
292  * This function does all the pci-specific calculations for the
293  * drm_attach_args.
294  */
295 struct device *
296 drm_attach_pci(struct drm_driver_info *driver, struct pci_attach_args *pa,
297     int is_agp, int console, struct device *dev)
298 {
299 	struct drm_attach_args arg;
300 	pcireg_t subsys;
301 
302 	arg.driver = driver;
303 	arg.dmat = pa->pa_dmat;
304 	arg.bst = pa->pa_memt;
305 	arg.is_agp = is_agp;
306 	arg.console = console;
307 
308 	arg.pci_vendor = PCI_VENDOR(pa->pa_id);
309 	arg.pci_device = PCI_PRODUCT(pa->pa_id);
310 
311 	subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
312 	arg.pci_subvendor = PCI_VENDOR(subsys);
313 	arg.pci_subdevice = PCI_PRODUCT(subsys);
314 
315 	arg.pc = pa->pa_pc;
316 	arg.tag = pa->pa_tag;
317 	arg.bridgetag = pa->pa_bridgetag;
318 
319 	arg.busid_len = 20;
320 	arg.busid = malloc(arg.busid_len + 1, M_DRM, M_NOWAIT);
321 	if (arg.busid == NULL) {
322 		printf("%s: no memory for drm\n", dev->dv_xname);
323 		return (NULL);
324 	}
325 	snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
326 	    pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
327 
328 	return (config_found_sm(dev, &arg, drmprint, drmsubmatch));
329 }
330 
331 int
332 drmprint(void *aux, const char *pnp)
333 {
334 	if (pnp != NULL)
335 		printf("drm at %s", pnp);
336 	return (UNCONF);
337 }
338 
339 int
340 drmsubmatch(struct device *parent, void *match, void *aux)
341 {
342 	extern struct cfdriver drm_cd;
343 	struct cfdata *cf = match;
344 
345 	/* only allow drm to attach */
346 	if (cf->cf_driver == &drm_cd)
347 		return ((*cf->cf_attach->ca_match)(parent, match, aux));
348 	return (0);
349 }
350 
351 int
352 drm_pciprobe(struct pci_attach_args *pa, const struct drm_pcidev *idlist)
353 {
354 	const struct drm_pcidev *id_entry;
355 
356 	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
357 	    PCI_PRODUCT(pa->pa_id), idlist);
358 	if (id_entry != NULL)
359 		return 1;
360 
361 	return 0;
362 }
363 
364 int
365 drm_probe(struct device *parent, void *match, void *aux)
366 {
367 	struct cfdata *cf = match;
368 	struct drm_attach_args *da = aux;
369 
370 	if (cf->drmdevcf_console != DRMDEVCF_CONSOLE_UNK) {
371 		/*
372 		 * If console-ness of device specified, either match
373 		 * exactly (at high priority), or fail.
374 		 */
375 		if (cf->drmdevcf_console != 0 && da->console != 0)
376 			return (10);
377 		else
378 			return (0);
379 	}
380 
381 	/* If console-ness unspecified, it wins. */
382 	return (1);
383 }
384 
385 void
386 drm_attach(struct device *parent, struct device *self, void *aux)
387 {
388 	struct drm_device *dev = (struct drm_device *)self;
389 	struct drm_attach_args *da = aux;
390 	int bus, slot, func;
391 	int ret;
392 
393 	dev->dev_private = parent;
394 	dev->driver = da->driver;
395 
396 	dev->dmat = da->dmat;
397 	dev->bst = da->bst;
398 	dev->unique = da->busid;
399 	dev->unique_len = da->busid_len;
400 	dev->pdev = &dev->_pdev;
401 	dev->pci_vendor = dev->pdev->vendor = da->pci_vendor;
402 	dev->pci_device = dev->pdev->device = da->pci_device;
403 	dev->pdev->subsystem_vendor = da->pci_subvendor;
404 	dev->pdev->subsystem_device = da->pci_subdevice;
405 
406 	pci_decompose_tag(da->pc, da->tag, &bus, &slot, &func);
407 	dev->pdev->bus = &dev->pdev->_bus;
408 	dev->pdev->bus->number = bus;
409 	dev->pdev->devfn = PCI_DEVFN(slot, func);
410 
411 	dev->pc = da->pc;
412 	dev->pdev->pc = da->pc;
413 	dev->bridgetag = da->bridgetag;
414 	dev->pdev->tag = da->tag;
415 	dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
416 
417 	rw_init(&dev->struct_mutex, "drmdevlk");
418 	mtx_init(&dev->event_lock, IPL_TTY);
419 	mtx_init(&dev->quiesce_mtx, IPL_NONE);
420 
421 	SPLAY_INIT(&dev->files);
422 	INIT_LIST_HEAD(&dev->vblank_event_list);
423 
424 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
425 #if __OS_HAS_AGP
426 		if (da->is_agp)
427 			dev->agp = drm_agp_init();
428 #endif
429 		if (dev->agp != NULL) {
430 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
431 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
432 				dev->agp->mtrr = 1;
433 		}
434 	}
435 
436 	if (dev->driver->driver_features & DRIVER_GEM) {
437 		KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
438 		/* XXX unique name */
439 		pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE, 0,
440 		    "drmobjpl", NULL);
441 	}
442 
443 	if (dev->driver->driver_features & DRIVER_GEM) {
444 		ret = drm_gem_init(dev);
445 		if (ret) {
446 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
447 			goto error;
448 		}
449 	}
450 
451 	printf("\n");
452 	return;
453 
454 error:
455 	drm_lastclose(dev);
456 	dev->dev_private = NULL;
457 }
458 
459 int
460 drm_detach(struct device *self, int flags)
461 {
462 	struct drm_device *dev = (struct drm_device *)self;
463 
464 	drm_lastclose(dev);
465 
466 	if (dev->driver->driver_features & DRIVER_GEM)
467 		drm_gem_destroy(dev);
468 
469 	if (dev->driver->driver_features & DRIVER_GEM)
470 		pool_destroy(&dev->objpl);
471 
472 	drm_vblank_cleanup(dev);
473 
474 	if (dev->agp && dev->agp->mtrr) {
475 		int retcode;
476 
477 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
478 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
479 		DRM_DEBUG("mtrr_del = %d", retcode);
480 	}
481 
482 
483 	if (dev->agp != NULL) {
484 		drm_free(dev->agp);
485 		dev->agp = NULL;
486 	}
487 
488 	return 0;
489 }
490 
491 void
492 drm_quiesce(struct drm_device *dev)
493 {
494 	mtx_enter(&dev->quiesce_mtx);
495 	dev->quiesce = 1;
496 	while (dev->quiesce_count > 0) {
497 		msleep(&dev->quiesce_count, &dev->quiesce_mtx,
498 		    PZERO, "drmqui", 0);
499 	}
500 	mtx_leave(&dev->quiesce_mtx);
501 }
502 
503 void
504 drm_wakeup(struct drm_device *dev)
505 {
506 	mtx_enter(&dev->quiesce_mtx);
507 	dev->quiesce = 0;
508 	wakeup(&dev->quiesce);
509 	mtx_leave(&dev->quiesce_mtx);
510 }
511 
512 int
513 drm_activate(struct device *self, int act)
514 {
515 	struct drm_device *dev = (struct drm_device *)self;
516 
517 	switch (act) {
518 	case DVACT_QUIESCE:
519 		drm_quiesce(dev);
520 		break;
521 	case DVACT_WAKEUP:
522 		drm_wakeup(dev);
523 		break;
524 	}
525 
526 	return (0);
527 }
528 
529 struct cfattach drm_ca = {
530 	sizeof(struct drm_device), drm_probe, drm_attach,
531 	drm_detach, drm_activate
532 };
533 
534 struct cfdriver drm_cd = {
535 	0, "drm", DV_DULL
536 };
537 
538 const struct drm_pcidev *
539 drm_find_description(int vendor, int device, const struct drm_pcidev *idlist)
540 {
541 	int i = 0;
542 
543 	for (i = 0; idlist[i].vendor != 0; i++) {
544 		if ((idlist[i].vendor == vendor) &&
545 		    (idlist[i].device == device))
546 			return &idlist[i];
547 	}
548 	return NULL;
549 }
550 
551 int
552 drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
553 {
554 	return (f1->minor < f2->minor ? -1 : f1->minor > f2->minor);
555 }
556 
557 SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp);
558 
559 struct drm_file *
560 drm_find_file_by_minor(struct drm_device *dev, int minor)
561 {
562 	struct drm_file	key;
563 
564 	key.minor = minor;
565 	return (SPLAY_FIND(drm_file_tree, &dev->files, &key));
566 }
567 
568 struct drm_device *
569 drm_get_device_from_kdev(dev_t kdev)
570 {
571 	int unit = minor(kdev) & ((1 << CLONE_SHIFT) - 1);
572 
573 	if (unit < drm_cd.cd_ndevs)
574 		return drm_cd.cd_devs[unit];
575 
576 	return NULL;
577 }
578 
579 int
580 drm_firstopen(struct drm_device *dev)
581 {
582 	if (dev->driver->firstopen)
583 		dev->driver->firstopen(dev);
584 
585 	dev->magicid = 1;
586 
587 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
588 		dev->irq_enabled = 0;
589 	dev->if_version = 0;
590 
591 	dev->buf_pgid = 0;
592 
593 	DRM_DEBUG("\n");
594 
595 	return 0;
596 }
597 
598 int
599 drm_lastclose(struct drm_device *dev)
600 {
601 	DRM_DEBUG("\n");
602 
603 	if (dev->driver->lastclose != NULL)
604 		dev->driver->lastclose(dev);
605 
606 	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
607 		drm_irq_uninstall(dev);
608 
609 #if __OS_HAS_AGP
610 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
611 		drm_agp_takedown(dev);
612 #endif
613 
614 	return 0;
615 }
616 
617 int
618 drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
619 {
620 	struct drm_device	*dev = NULL;
621 	struct drm_file		*file_priv;
622 	int			 ret = 0;
623 
624 	dev = drm_get_device_from_kdev(kdev);
625 	if (dev == NULL || dev->dev_private == NULL)
626 		return (ENXIO);
627 
628 	DRM_DEBUG("open_count = %d\n", dev->open_count);
629 
630 	if (flags & O_EXCL)
631 		return (EBUSY); /* No exclusive opens */
632 
633 	mutex_lock(&dev->struct_mutex);
634 	if (dev->open_count++ == 0) {
635 		mutex_unlock(&dev->struct_mutex);
636 		if ((ret = drm_firstopen(dev)) != 0)
637 			goto err;
638 	} else {
639 		mutex_unlock(&dev->struct_mutex);
640 	}
641 
642 	/* always allocate at least enough space for our data */
643 	file_priv = drm_calloc(1, max(dev->driver->file_priv_size,
644 	    sizeof(*file_priv)));
645 	if (file_priv == NULL) {
646 		ret = ENOMEM;
647 		goto err;
648 	}
649 
650 	file_priv->filp = (void *)&file_priv;
651 	file_priv->minor = minor(kdev);
652 	INIT_LIST_HEAD(&file_priv->fbs);
653 	INIT_LIST_HEAD(&file_priv->event_list);
654 	file_priv->event_space = 4096; /* 4k for event buffer */
655 	DRM_DEBUG("minor = %d\n", file_priv->minor);
656 
657 	/* for compatibility root is always authenticated */
658 	file_priv->authenticated = DRM_SUSER(p);
659 
660 	if (dev->driver->driver_features & DRIVER_GEM)
661 		drm_gem_open(dev, file_priv);
662 
663 	if (dev->driver->open) {
664 		ret = dev->driver->open(dev, file_priv);
665 		if (ret != 0) {
666 			goto free_priv;
667 		}
668 	}
669 
670 	mutex_lock(&dev->struct_mutex);
671 	/* first opener automatically becomes master if root */
672 	if (SPLAY_EMPTY(&dev->files) && !DRM_SUSER(p)) {
673 		mutex_unlock(&dev->struct_mutex);
674 		ret = EPERM;
675 		goto free_priv;
676 	}
677 
678 	file_priv->is_master = SPLAY_EMPTY(&dev->files);
679 
680 	SPLAY_INSERT(drm_file_tree, &dev->files, file_priv);
681 	mutex_unlock(&dev->struct_mutex);
682 
683 	return (0);
684 
685 free_priv:
686 	drm_free(file_priv);
687 err:
688 	mutex_lock(&dev->struct_mutex);
689 	--dev->open_count;
690 	mutex_unlock(&dev->struct_mutex);
691 	return (ret);
692 }
693 
694 int
695 drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
696 {
697 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
698 	struct drm_file			*file_priv;
699 	struct drm_pending_event *e, *et;
700 	struct drm_pending_vblank_event	*v, *vt;
701 	int				 retcode = 0;
702 
703 	if (dev == NULL)
704 		return (ENXIO);
705 
706 	DRM_DEBUG("open_count = %d\n", dev->open_count);
707 
708 	mutex_lock(&dev->struct_mutex);
709 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
710 	if (file_priv == NULL) {
711 		DRM_ERROR("can't find authenticator\n");
712 		retcode = EINVAL;
713 		goto done;
714 	}
715 	mutex_unlock(&dev->struct_mutex);
716 
717 	if (dev->driver->close != NULL)
718 		dev->driver->close(dev, file_priv);
719 	if (dev->driver->preclose != NULL)
720 		dev->driver->preclose(dev, file_priv);
721 
722 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
723 	    DRM_CURRENTPID, (long)&dev->device, dev->open_count);
724 
725 	mtx_enter(&dev->event_lock);
726 
727 	/* Remove pending flips */
728 	list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
729 		if (v->base.file_priv == file_priv) {
730 			list_del(&v->base.link);
731 			drm_vblank_put(dev, v->pipe);
732 			v->base.destroy(&v->base);
733 		}
734 
735 	/* Remove unconsumed events */
736 	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
737 		list_del(&e->link);
738 		e->destroy(e);
739 	}
740 
741 	mtx_leave(&dev->event_lock);
742 
743 	if (dev->driver->driver_features & DRIVER_MODESET)
744 		drm_fb_release(dev, file_priv);
745 
746 	if (dev->driver->driver_features & DRIVER_GEM)
747 		drm_gem_release(dev, file_priv);
748 
749 	mutex_lock(&dev->struct_mutex);
750 
751 	dev->buf_pgid = 0;
752 
753 	if (dev->driver->postclose)
754 		dev->driver->postclose(dev, file_priv);
755 
756 	SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
757 	drm_free(file_priv);
758 
759 done:
760 	if (--dev->open_count == 0) {
761 		mutex_unlock(&dev->struct_mutex);
762 		retcode = drm_lastclose(dev);
763 	} else
764 		mutex_unlock(&dev->struct_mutex);
765 
766 	return (retcode);
767 }
768 
769 int
770 drm_do_ioctl(struct drm_device *dev, int minor, u_long cmd, caddr_t data)
771 {
772 	struct drm_file *file_priv;
773 	const struct drm_ioctl_desc *ioctl;
774 	drm_ioctl_t *func;
775 	unsigned int nr = DRM_IOCTL_NR(cmd);
776 	int retcode = -EINVAL;
777 	unsigned int usize, asize;
778 
779 	mutex_lock(&dev->struct_mutex);
780 	file_priv = drm_find_file_by_minor(dev, minor);
781 	mutex_unlock(&dev->struct_mutex);
782 	if (file_priv == NULL) {
783 		DRM_ERROR("can't find authenticator\n");
784 		return -EINVAL;
785 	}
786 
787 	DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
788 	    DRM_CURRENTPID, cmd, (u_int)DRM_IOCTL_NR(cmd), (long)&dev->device,
789 	    file_priv->authenticated);
790 
791 	switch (cmd) {
792 	case FIONBIO:
793 	case FIOASYNC:
794 		return 0;
795 
796 	case TIOCSPGRP:
797 		dev->buf_pgid = *(int *)data;
798 		return 0;
799 
800 	case TIOCGPGRP:
801 		*(int *)data = dev->buf_pgid;
802 		return 0;
803 	}
804 
805 	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
806 	    ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
807 		return (-EINVAL);
808 	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
809 	    (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
810 		uint32_t drv_size;
811 		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
812 		drv_size = IOCPARM_LEN(ioctl->cmd_drv);
813 		usize = asize = IOCPARM_LEN(cmd);
814 		if (drv_size > asize)
815 			asize = drv_size;
816 	} else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
817 		uint32_t drv_size;
818 		ioctl = &drm_ioctls[nr];
819 
820 		drv_size = IOCPARM_LEN(ioctl->cmd_drv);
821 		usize = asize = IOCPARM_LEN(cmd);
822 		if (drv_size > asize)
823 			asize = drv_size;
824 		cmd = ioctl->cmd;
825 	} else
826 		return (-EINVAL);
827 
828 	func = ioctl->func;
829 	if (!func) {
830 		DRM_DEBUG("no function\n");
831 		return (-EINVAL);
832 	}
833 
834 	if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(curproc)) ||
835 	    ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
836 	    ((ioctl->flags & DRM_MASTER) && !file_priv->is_master))
837 		return (-EACCES);
838 
839 	if (ioctl->flags & DRM_UNLOCKED)
840 		retcode = func(dev, data, file_priv);
841 	else {
842 		/* XXX lock */
843 		retcode = func(dev, data, file_priv);
844 		/* XXX unlock */
845 	}
846 
847 	return (retcode);
848 }
849 
850 /* drmioctl is called whenever a process performs an ioctl on /dev/drm.
851  */
852 int
853 drmioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
854 {
855 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
856 	int error;
857 
858 	if (dev == NULL)
859 		return ENODEV;
860 
861 	mtx_enter(&dev->quiesce_mtx);
862 	while (dev->quiesce)
863 		msleep(&dev->quiesce, &dev->quiesce_mtx, PZERO, "drmioc", 0);
864 	dev->quiesce_count++;
865 	mtx_leave(&dev->quiesce_mtx);
866 
867 	error = -drm_do_ioctl(dev, minor(kdev), cmd, data);
868 	if (error < 0 && error != ERESTART && error != EJUSTRETURN)
869 		printf("%s: cmd 0x%lx errno %d\n", __func__, cmd, error);
870 
871 	mtx_enter(&dev->quiesce_mtx);
872 	dev->quiesce_count--;
873 	if (dev->quiesce)
874 		wakeup(&dev->quiesce_count);
875 	mtx_leave(&dev->quiesce_mtx);
876 
877 	return (error);
878 }
879 
880 int
881 drmread(dev_t kdev, struct uio *uio, int ioflag)
882 {
883 	struct drm_device		*dev = drm_get_device_from_kdev(kdev);
884 	struct drm_file			*file_priv;
885 	struct drm_pending_event	*ev;
886 	int		 		 error = 0;
887 
888 	if (dev == NULL)
889 		return (ENXIO);
890 
891 	mutex_lock(&dev->struct_mutex);
892 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
893 	mutex_unlock(&dev->struct_mutex);
894 	if (file_priv == NULL)
895 		return (ENXIO);
896 
897 	/*
898 	 * The semantics are a little weird here. We will wait until we
899 	 * have events to process, but as soon as we have events we will
900 	 * only deliver as many as we have.
901 	 * Note that events are atomic, if the read buffer will not fit in
902 	 * a whole event, we won't read any of it out.
903 	 */
904 	mtx_enter(&dev->event_lock);
905 	while (error == 0 && list_empty(&file_priv->event_list)) {
906 		if (ioflag & IO_NDELAY) {
907 			mtx_leave(&dev->event_lock);
908 			return (EAGAIN);
909 		}
910 		error = msleep(&file_priv->event_list, &dev->event_lock,
911 		    PWAIT | PCATCH, "drmread", 0);
912 	}
913 	if (error) {
914 		mtx_leave(&dev->event_lock);
915 		return (error);
916 	}
917 	while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
918 		MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
919 		/* XXX we always destroy the event on error. */
920 		error = uiomove(ev->event, ev->event->length, uio);
921 		ev->destroy(ev);
922 		if (error)
923 			break;
924 		mtx_enter(&dev->event_lock);
925 	}
926 	MUTEX_ASSERT_UNLOCKED(&dev->event_lock);
927 
928 	return (error);
929 }
930 
931 /*
932  * Deqeue an event from the file priv in question. returning 1 if an
933  * event was found. We take the resid from the read as a parameter because
934  * we will only dequeue and event if the read buffer has space to fit the
935  * entire thing.
936  *
937  * We are called locked, but we will *unlock* the queue on return so that
938  * we may sleep to copyout the event.
939  */
940 int
941 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
942     size_t resid, struct drm_pending_event **out)
943 {
944 	struct drm_pending_event *e = NULL;
945 	int gotone = 0;
946 
947 	MUTEX_ASSERT_LOCKED(&dev->event_lock);
948 
949 	*out = NULL;
950 	if (list_empty(&file_priv->event_list))
951 		goto out;
952 	e = list_first_entry(&file_priv->event_list,
953 			     struct drm_pending_event, link);
954 	if (e->event->length > resid)
955 		goto out;
956 
957 	file_priv->event_space += e->event->length;
958 	list_del(&e->link);
959 	*out = e;
960 	gotone = 1;
961 
962 out:
963 	mtx_leave(&dev->event_lock);
964 
965 	return (gotone);
966 }
967 
968 /* XXX kqfilter ... */
969 int
970 drmpoll(dev_t kdev, int events, struct proc *p)
971 {
972 	struct drm_device	*dev = drm_get_device_from_kdev(kdev);
973 	struct drm_file		*file_priv;
974 	int		 	 revents = 0;
975 
976 	if (dev == NULL)
977 		return (POLLERR);
978 
979 	mutex_lock(&dev->struct_mutex);
980 	file_priv = drm_find_file_by_minor(dev, minor(kdev));
981 	mutex_unlock(&dev->struct_mutex);
982 	if (file_priv == NULL)
983 		return (POLLERR);
984 
985 	mtx_enter(&dev->event_lock);
986 	if (events & (POLLIN | POLLRDNORM)) {
987 		if (!list_empty(&file_priv->event_list))
988 			revents |=  events & (POLLIN | POLLRDNORM);
989 		else
990 			selrecord(p, &file_priv->rsel);
991 	}
992 	mtx_leave(&dev->event_lock);
993 
994 	return (revents);
995 }
996 
997 paddr_t
998 drmmmap(dev_t kdev, off_t offset, int prot)
999 {
1000 	return -1;
1001 }
1002 
1003 /*
1004  * Beginning in revision 1.1 of the DRM interface, getunique will return
1005  * a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
1006  * before setunique has been called.  The format for the bus-specific part of
1007  * the unique is not defined for any other bus.
1008  */
1009 int
1010 drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv)
1011 {
1012 	struct drm_unique	 *u = data;
1013 
1014 	if (u->unique_len >= dev->unique_len) {
1015 		if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
1016 			return -EFAULT;
1017 	}
1018 	u->unique_len = dev->unique_len;
1019 
1020 	return 0;
1021 }
1022 
1023 int
1024 drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1025 {
1026 	struct drm_get_cap *req = data;
1027 
1028 	req->value = 0;
1029 	switch (req->capability) {
1030 	case DRM_CAP_DUMB_BUFFER:
1031 		if (dev->driver->dumb_create)
1032 			req->value = 1;
1033 		break;
1034 	case DRM_CAP_VBLANK_HIGH_CRTC:
1035 		req->value = 1;
1036 		break;
1037 	case DRM_CAP_DUMB_PREFERRED_DEPTH:
1038 		req->value = dev->mode_config.preferred_depth;
1039 		break;
1040 	case DRM_CAP_DUMB_PREFER_SHADOW:
1041 		req->value = dev->mode_config.prefer_shadow;
1042 		break;
1043 #ifdef notyet
1044 	case DRM_CAP_PRIME:
1045 		req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
1046 		req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
1047 		break;
1048 #endif
1049 	case DRM_CAP_TIMESTAMP_MONOTONIC:
1050 		req->value = drm_timestamp_monotonic;
1051 		break;
1052 	case DRM_CAP_ASYNC_PAGE_FLIP:
1053 		req->value = dev->mode_config.async_page_flip;
1054 		break;
1055 	case DRM_CAP_CURSOR_WIDTH:
1056 		if (dev->mode_config.cursor_width)
1057 			req->value = dev->mode_config.cursor_width;
1058 		else
1059 			req->value = 64;
1060 		break;
1061 	case DRM_CAP_CURSOR_HEIGHT:
1062 		if (dev->mode_config.cursor_height)
1063 			req->value = dev->mode_config.cursor_height;
1064 		else
1065 			req->value = 64;
1066 		break;
1067 	default:
1068 		return -EINVAL;
1069 	}
1070 	return 0;
1071 }
1072 
1073 /**
1074  * Set device/driver capabilities
1075  */
1076 int
1077 drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1078 {
1079 	struct drm_set_client_cap *req = data;
1080 
1081 	switch (req->capability) {
1082 	case DRM_CLIENT_CAP_STEREO_3D:
1083 		if (req->value > 1)
1084 			return -EINVAL;
1085 		file_priv->stereo_allowed = req->value;
1086 		break;
1087 	default:
1088 		return -EINVAL;
1089 	}
1090 
1091 	return 0;
1092 }
1093 
1094 #define DRM_IF_MAJOR	1
1095 #define DRM_IF_MINOR	4
1096 
1097 int
1098 drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
1099 {
1100 	struct drm_version	*version = data;
1101 	int			 len;
1102 
1103 #define DRM_COPY(name, value)						\
1104 	len = strlen( value );						\
1105 	if ( len > name##_len ) len = name##_len;			\
1106 	name##_len = strlen( value );					\
1107 	if ( len && name ) {						\
1108 		if ( DRM_COPY_TO_USER( name, value, len ) )		\
1109 			return -EFAULT;				\
1110 	}
1111 
1112 	version->version_major = dev->driver->major;
1113 	version->version_minor = dev->driver->minor;
1114 	version->version_patchlevel = dev->driver->patchlevel;
1115 
1116 	DRM_COPY(version->name, dev->driver->name);
1117 	DRM_COPY(version->date, dev->driver->date);
1118 	DRM_COPY(version->desc, dev->driver->desc);
1119 
1120 	return 0;
1121 }
1122 
1123 int
1124 drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
1125 {
1126 	struct drm_set_version	ver, *sv = data;
1127 	int			if_version;
1128 
1129 	/* Save the incoming data, and set the response before continuing
1130 	 * any further.
1131 	 */
1132 	ver = *sv;
1133 	sv->drm_di_major = DRM_IF_MAJOR;
1134 	sv->drm_di_minor = DRM_IF_MINOR;
1135 	sv->drm_dd_major = dev->driver->major;
1136 	sv->drm_dd_minor = dev->driver->minor;
1137 
1138 	/*
1139 	 * We no longer support interface versions less than 1.1, so error
1140 	 * out if the xserver is too old. 1.1 always ties the drm to a
1141 	 * certain busid, this was done on attach
1142 	 */
1143 	if (ver.drm_di_major != -1) {
1144 		if (ver.drm_di_major != DRM_IF_MAJOR || ver.drm_di_minor < 1 ||
1145 		    ver.drm_di_minor > DRM_IF_MINOR) {
1146 			return -EINVAL;
1147 		}
1148 		if_version = DRM_IF_VERSION(ver.drm_di_major, ver.drm_dd_minor);
1149 		dev->if_version = imax(if_version, dev->if_version);
1150 	}
1151 
1152 	if (ver.drm_dd_major != -1) {
1153 		if (ver.drm_dd_major != dev->driver->major ||
1154 		    ver.drm_dd_minor < 0 ||
1155 		    ver.drm_dd_minor > dev->driver->minor)
1156 			return -EINVAL;
1157 	}
1158 
1159 	return 0;
1160 }
1161 
1162 struct drm_dmamem *
1163 drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1164     int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1165 {
1166 	struct drm_dmamem	*mem;
1167 	size_t			 strsize;
1168 	/*
1169 	 * segs is the last member of the struct since we modify the size
1170 	 * to allow extra segments if more than one are allowed.
1171 	 */
1172 	strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1173 	mem = malloc(strsize, M_DRM, M_NOWAIT | M_ZERO);
1174 	if (mem == NULL)
1175 		return (NULL);
1176 
1177 	mem->size = size;
1178 
1179 	if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,
1180 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map) != 0)
1181 		goto strfree;
1182 
1183 	if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,
1184 	    &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1185 		goto destroy;
1186 
1187 	if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,
1188 	    &mem->kva, BUS_DMA_NOWAIT | mapflags) != 0)
1189 		goto free;
1190 
1191 	if (bus_dmamap_load(dmat, mem->map, mem->kva, size,
1192 	    NULL, BUS_DMA_NOWAIT | loadflags) != 0)
1193 		goto unmap;
1194 
1195 	return (mem);
1196 
1197 unmap:
1198 	bus_dmamem_unmap(dmat, mem->kva, size);
1199 free:
1200 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1201 destroy:
1202 	bus_dmamap_destroy(dmat, mem->map);
1203 strfree:
1204 	free(mem, M_DRM, 0);
1205 
1206 	return (NULL);
1207 }
1208 
1209 void
1210 drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
1211 {
1212 	if (mem == NULL)
1213 		return;
1214 
1215 	bus_dmamap_unload(dmat, mem->map);
1216 	bus_dmamem_unmap(dmat, mem->kva, mem->size);
1217 	bus_dmamem_free(dmat, mem->segs, mem->nsegs);
1218 	bus_dmamap_destroy(dmat, mem->map);
1219 	free(mem, M_DRM, 0);
1220 }
1221 
1222 /**
1223  * Called by the client, this returns a unique magic number to be authorized
1224  * by the master.
1225  *
1226  * The master may use its own knowledge of the client (such as the X
1227  * connection that the magic is passed over) to determine if the magic number
1228  * should be authenticated.
1229  */
1230 int
1231 drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
1232 {
1233 	struct drm_auth		*auth = data;
1234 
1235 	if (dev->magicid == 0)
1236 		dev->magicid = 1;
1237 
1238 	/* Find unique magic */
1239 	if (file_priv->magic) {
1240 		auth->magic = file_priv->magic;
1241 	} else {
1242 		mutex_lock(&dev->struct_mutex);
1243 		file_priv->magic = auth->magic = dev->magicid++;
1244 		mutex_unlock(&dev->struct_mutex);
1245 		DRM_DEBUG("%d\n", auth->magic);
1246 	}
1247 
1248 	DRM_DEBUG("%u\n", auth->magic);
1249 	return 0;
1250 }
1251 
1252 /**
1253  * Marks the client associated with the given magic number as authenticated.
1254  */
1255 int
1256 drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
1257 {
1258 	struct drm_file	*p;
1259 	struct drm_auth	*auth = data;
1260 	int		 ret = -EINVAL;
1261 
1262 	DRM_DEBUG("%u\n", auth->magic);
1263 
1264 	if (auth->magic == 0)
1265 		return ret;
1266 
1267 	mutex_lock(&dev->struct_mutex);
1268 	SPLAY_FOREACH(p, drm_file_tree, &dev->files) {
1269 		if (p->magic == auth->magic) {
1270 			p->authenticated = 1;
1271 			p->magic = 0;
1272 			ret = 0;
1273 			break;
1274 		}
1275 	}
1276 	mutex_unlock(&dev->struct_mutex);
1277 
1278 	return ret;
1279 }
1280 
1281 /*
1282  * Compute order.  Can be made faster.
1283  */
1284 int
1285 drm_order(unsigned long size)
1286 {
1287 	int order;
1288 	unsigned long tmp;
1289 
1290 	for (order = 0, tmp = size; tmp >>= 1; ++order)
1291 		;
1292 
1293 	if (size & ~(1 << order))
1294 		++order;
1295 
1296 	return order;
1297 }
1298 
1299 int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
1300 {
1301 	pci_chipset_tag_t	pc = dev->pc;
1302 	pcitag_t		tag;
1303 	int			pos ;
1304 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1305 	pcireg_t		id;
1306 
1307 	*mask = 0;
1308 
1309 	if (dev->bridgetag == NULL)
1310 		return -EINVAL;
1311 	tag = *dev->bridgetag;
1312 
1313 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1314 	    &pos, NULL))
1315 		return -EINVAL;
1316 
1317 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1318 
1319 	/* we've been informed via and serverworks don't make the cut */
1320 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1321 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1322 		return -EINVAL;
1323 
1324 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1325 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1326 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1327 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1328 
1329 	lnkcap &= 0x0f;
1330 	lnkcap2 &= 0xfe;
1331 
1332 	if (lnkcap2) { /* PCIE GEN 3.0 */
1333 		if (lnkcap2 & 2)
1334 			*mask |= DRM_PCIE_SPEED_25;
1335 		if (lnkcap2 & 4)
1336 			*mask |= DRM_PCIE_SPEED_50;
1337 		if (lnkcap2 & 8)
1338 			*mask |= DRM_PCIE_SPEED_80;
1339 	} else {
1340 		if (lnkcap & 1)
1341 			*mask |= DRM_PCIE_SPEED_25;
1342 		if (lnkcap & 2)
1343 			*mask |= DRM_PCIE_SPEED_50;
1344 	}
1345 
1346 	DRM_INFO("probing gen 2 caps for device 0x%04x:0x%04x = %x/%x\n",
1347 	    PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap, lnkcap2);
1348 	return 0;
1349 }
1350