xref: /openbsd-src/sys/dev/pci/drm/drm_irq.c (revision 850e275390052b330d93020bf619a739a3c277ac)
1 /*-
2  * Copyright 2003 Eric Anholt
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <anholt@FreeBSD.org>
25  *
26  */
27 
28 /** @file drm_irq.c
29  * Support code for handling setup/teardown of interrupt handlers and
30  * handing interrupt handlers off to the drivers.
31  */
32 
33 #include <sys/workq.h>
34 
35 #include "drmP.h"
36 #include "drm.h"
37 
38 irqreturn_t	drm_irq_handler_wrap(DRM_IRQ_ARGS);
39 void		drm_locked_task(void *, void *);
40 void		drm_update_vblank_count(struct drm_device *, int);
41 void		vblank_disable(void *);
42 
43 int
44 drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv)
45 {
46 	struct drm_irq_busid	*irq = data;
47 
48 	if ((irq->busnum >> 8) != dev->pci_domain ||
49 	    (irq->busnum & 0xff) != dev->pci_bus ||
50 	    irq->devnum != dev->pci_slot ||
51 	    irq->funcnum != dev->pci_func)
52 		return EINVAL;
53 
54 	irq->irq = dev->irq;
55 
56 	DRM_DEBUG("%d:%d:%d => IRQ %d\n", irq->busnum, irq->devnum,
57 	    irq->funcnum, irq->irq);
58 
59 	return 0;
60 }
61 
62 irqreturn_t
63 drm_irq_handler_wrap(DRM_IRQ_ARGS)
64 {
65 	irqreturn_t ret;
66 	struct drm_device *dev = (struct drm_device *)arg;
67 
68 	DRM_SPINLOCK(&dev->irq_lock);
69 	ret = dev->driver.irq_handler(arg);
70 	DRM_SPINUNLOCK(&dev->irq_lock);
71 
72 	return ret;
73 }
74 
75 int
76 drm_irq_install(struct drm_device *dev)
77 {
78 	int retcode;
79 	pci_intr_handle_t ih;
80 	const char *istr;
81 
82 	if (dev->irq == 0 || dev->dev_private == NULL)
83 		return (EINVAL);
84 
85 	DRM_DEBUG("irq=%d\n", dev->irq);
86 
87 	DRM_LOCK();
88 	if (dev->irq_enabled) {
89 		DRM_UNLOCK();
90 		return (EBUSY);
91 	}
92 	dev->irq_enabled = 1;
93 	DRM_UNLOCK();
94 
95 	mtx_init(&dev->irq_lock, IPL_BIO);
96 
97 	/* Before installing handler */
98 	dev->driver.irq_preinstall(dev);
99 
100 	/* Install handler */
101 	if (pci_intr_map(&dev->pa, &ih) != 0) {
102 		retcode = ENOENT;
103 		goto err;
104 	}
105 	istr = pci_intr_string(dev->pa.pa_pc, ih);
106 	dev->irqh = pci_intr_establish(dev->pa.pa_pc, ih, IPL_BIO,
107 	    drm_irq_handler_wrap, dev, dev->device.dv_xname);
108 	if (!dev->irqh) {
109 		retcode = ENOENT;
110 		goto err;
111 	}
112 	DRM_DEBUG("%s: interrupting at %s\n", dev->device.dv_xname, istr);
113 
114 	/* After installing handler */
115 	dev->driver.irq_postinstall(dev);
116 
117 	return 0;
118 err:
119 	DRM_LOCK();
120 	dev->irq_enabled = 0;
121 	DRM_SPINUNINIT(&dev->irq_lock);
122 	DRM_UNLOCK();
123 	return retcode;
124 }
125 
126 int
127 drm_irq_uninstall(struct drm_device *dev)
128 {
129 
130 	DRM_LOCK();
131 	if (!dev->irq_enabled) {
132 		DRM_UNLOCK();
133 		return (EINVAL);
134 	}
135 
136 	dev->irq_enabled = 0;
137 	DRM_UNLOCK();
138 
139 	DRM_DEBUG("irq=%d\n", dev->irq);
140 
141 	dev->driver.irq_uninstall(dev);
142 
143 	pci_intr_disestablish(dev->pa.pa_pc, dev->irqh);
144 
145 	drm_vblank_cleanup(dev);
146 	DRM_SPINUNINIT(&dev->irq_lock);
147 
148 	return 0;
149 }
150 
151 int
152 drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
153 {
154 	struct drm_control	*ctl = data;
155 
156 	/* Handle drivers who used to require IRQ setup no longer does. */
157 	if (!dev->driver.use_irq)
158 		return (0);
159 
160 	switch (ctl->func) {
161 	case DRM_INST_HANDLER:
162 		if (dev->if_version < DRM_IF_VERSION(1, 2) &&
163 		    ctl->irq != dev->irq)
164 			return (EINVAL);
165 		return (drm_irq_install(dev));
166 	case DRM_UNINST_HANDLER:
167 		return (drm_irq_uninstall(dev));
168 	default:
169 		return (EINVAL);
170 	}
171 }
172 
173 void
174 vblank_disable(void *arg)
175 {
176 	struct drm_device *dev = (struct drm_device*)arg;
177 	int i;
178 
179 	DRM_SPINLOCK(&dev->vbl_lock);
180 	if (!dev->vblank_disable_allowed)
181 		goto out;
182 
183 	for (i=0; i < dev->num_crtcs; i++){
184 		if (atomic_read(&dev->vblank[i].vbl_refcount) == 0 &&
185 		    dev->vblank[i].vbl_enabled) {
186 			dev->vblank[i].last_vblank =
187 			    dev->driver.get_vblank_counter(dev, i);
188 			dev->driver.disable_vblank(dev, i);
189 			dev->vblank[i].vbl_enabled = 0;
190 		}
191 	}
192 out:
193 	DRM_SPINUNLOCK(&dev->vbl_lock);
194 }
195 
196 void
197 drm_vblank_cleanup(struct drm_device *dev)
198 {
199 	if (dev->num_crtcs == 0)
200 		return; /* not initialised */
201 
202 	timeout_del(&dev->vblank_disable_timer);
203 
204 	vblank_disable(dev);
205 
206 	drm_free(dev->vblank, sizeof(*dev->vblank) *
207 	    dev->num_crtcs, M_DRM);
208 
209 	dev->vblank = NULL;
210 	dev->num_crtcs = 0;
211 	DRM_SPINUNINIT(&dev->vbl_lock);
212 }
213 
214 int
215 drm_vblank_init(struct drm_device *dev, int num_crtcs)
216 {
217 	timeout_set(&dev->vblank_disable_timer, vblank_disable, dev);
218 	mtx_init(&dev->vbl_lock, IPL_BIO);
219 	dev->num_crtcs = num_crtcs;
220 
221 	dev->vblank = drm_calloc(num_crtcs, sizeof(*dev->vblank), M_DRM);
222 	if (dev->vblank == NULL)
223 		goto err;
224 
225 	dev->vblank_disable_allowed = 0;
226 
227 	return (0);
228 
229 err:
230 	drm_vblank_cleanup(dev);
231 	return ENOMEM;
232 }
233 
234 u_int32_t
235 drm_vblank_count(struct drm_device *dev, int crtc)
236 {
237 	return atomic_read(&dev->vblank[crtc].vbl_count);
238 }
239 
240 void
241 drm_update_vblank_count(struct drm_device *dev, int crtc)
242 {
243 	u_int32_t cur_vblank, diff;
244 
245 	/*
246 	 * Interrupt was disabled prior to this call, so deal with counter wrap
247 	 * note that we may have lost a full dev->max_vblank_count events if
248 	 * the register is small or the interrupts were off for a long time.
249 	 */
250 	cur_vblank = dev->driver.get_vblank_counter(dev, crtc);
251 	diff = cur_vblank - dev->vblank[crtc].last_vblank;
252 	if (cur_vblank < dev->vblank[crtc].last_vblank)
253 		diff += dev->max_vblank_count;
254 
255 	atomic_add(diff, &dev->vblank[crtc].vbl_count);
256 }
257 
258 int
259 drm_vblank_get(struct drm_device *dev, int crtc)
260 {
261 	int ret = 0;
262 
263 	DRM_SPINLOCK(&dev->vbl_lock);
264 
265 	atomic_add(1, &dev->vblank[crtc].vbl_refcount);
266 	if (dev->vblank[crtc].vbl_refcount == 1 &&
267 	    dev->vblank[crtc].vbl_enabled == 0) {
268 		ret = dev->driver.enable_vblank(dev, crtc);
269 		if (ret) {
270 			atomic_dec(&dev->vblank[crtc].vbl_refcount);
271 		} else {
272 			dev->vblank[crtc].vbl_enabled = 1;
273 			drm_update_vblank_count(dev, crtc);
274 		}
275 	}
276 	DRM_SPINUNLOCK(&dev->vbl_lock);
277 
278 	return (ret);
279 }
280 
281 void
282 drm_vblank_put(struct drm_device *dev, int crtc)
283 {
284 	DRM_SPINLOCK(&dev->vbl_lock);
285 	/* Last user schedules interrupt disable */
286 	atomic_dec(&dev->vblank[crtc].vbl_refcount);
287 	if (dev->vblank[crtc].vbl_refcount == 0)
288 		timeout_add_sec(&dev->vblank_disable_timer, 5);
289 	DRM_SPINUNLOCK(&dev->vbl_lock);
290 }
291 
292 int
293 drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
294 {
295 	struct drm_modeset_ctl *modeset = data;
296 	int crtc, ret = 0;
297 
298 	/* not initialised yet, just noop */
299 	if (dev->num_crtcs == 0)
300 		goto out;
301 
302 	crtc = modeset->crtc;
303 	if (crtc >= dev->num_crtcs) {
304 		ret = EINVAL;
305 		goto out;
306 	}
307 
308 	/*
309 	 * If interrupts are enabled/disabled between calls to this ioctl then
310 	 * it can get nasty. So just grab a reference so that the interrupts
311 	 * keep going through the modeset
312 	 */
313 	switch (modeset->cmd) {
314 	case _DRM_PRE_MODESET:
315 		if (dev->vblank[crtc].vbl_inmodeset == 0) {
316 			DRM_SPINLOCK(&dev->vbl_lock);
317 			dev->vblank[crtc].vbl_inmodeset = 1;
318 			DRM_SPINUNLOCK(&dev->vbl_lock);
319 			drm_vblank_get(dev, crtc);
320 		}
321 		break;
322 	case _DRM_POST_MODESET:
323 		if (dev->vblank[crtc].vbl_inmodeset) {
324 			DRM_SPINLOCK(&dev->vbl_lock);
325 			dev->vblank_disable_allowed = 1;
326 			dev->vblank[crtc].vbl_inmodeset = 0;
327 			DRM_SPINUNLOCK(&dev->vbl_lock);
328 			drm_vblank_put(dev, crtc);
329 		}
330 		break;
331 	default:
332 		ret = EINVAL;
333 		break;
334 	}
335 
336 out:
337 	return (ret);
338 }
339 
340 int
341 drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
342 {
343 	union drm_wait_vblank	*vblwait = data;
344 	int			 ret, flags, crtc, seq;
345 
346 	if (!dev->irq_enabled)
347 		return EINVAL;
348 
349 	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
350 	crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
351 
352 	if (crtc >= dev->num_crtcs)
353 		return EINVAL;
354 
355 	ret = drm_vblank_get(dev, crtc);
356 	if (ret)
357 		return (ret);
358 	seq = drm_vblank_count(dev,crtc);
359 
360 	if (vblwait->request.type & _DRM_VBLANK_RELATIVE) {
361 		vblwait->request.sequence += seq;
362 		vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
363 	}
364 
365 	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
366 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
367 	    (seq - vblwait->request.sequence) <= (1<<23)) {
368 		vblwait->request.sequence = seq + 1;
369 	}
370 
371 	if (flags & _DRM_VBLANK_SIGNAL) {
372 		ret = EINVAL;
373 	} else {
374 		DRM_SPINLOCK(&dev->vbl_lock);
375 		while (ret == 0) {
376 			if ((drm_vblank_count(dev, crtc)
377 			    - vblwait->request.sequence) <= (1 << 23)) {
378 				DRM_SPINUNLOCK(&dev->vbl_lock);
379 				break;
380 			}
381 			ret = msleep(&dev->vblank[crtc],
382 			    &dev->vbl_lock, PZERO | PCATCH,
383 			    "drmvblq", 3 * DRM_HZ);
384 		}
385 		DRM_SPINUNLOCK(&dev->vbl_lock);
386 
387 		if (ret != EINTR) {
388 			struct timeval now;
389 
390 			microtime(&now);
391 			vblwait->reply.tval_sec = now.tv_sec;
392 			vblwait->reply.tval_usec = now.tv_usec;
393 			vblwait->reply.sequence = drm_vblank_count(dev, crtc);
394 		}
395 	}
396 
397 	drm_vblank_put(dev, crtc);
398 	return (ret);
399 }
400 
401 void
402 drm_handle_vblank(struct drm_device *dev, int crtc)
403 {
404 	atomic_inc(&dev->vblank[crtc].vbl_count);
405 	wakeup(&dev->vblank[crtc]);
406 }
407 
408 void
409 drm_locked_task(void *context, void *pending)
410 {
411 	struct drm_device *dev = context;
412 	void		  (*func)(struct drm_device *);
413 
414 	DRM_SPINLOCK(&dev->tsk_lock);
415 	mtx_enter(&dev->lock.spinlock);
416 	func = dev->locked_task_call;
417 	if (func == NULL ||
418 	    drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT) == 0) {
419 		mtx_leave(&dev->lock.spinlock);
420 		DRM_SPINUNLOCK(&dev->tsk_lock);
421 		return;
422 	}
423 
424 	dev->lock.file_priv = NULL; /* kernel owned */
425 	dev->lock.lock_time = jiffies;
426 	mtx_leave(&dev->lock.spinlock);
427 	dev->locked_task_call = NULL;
428 	DRM_SPINUNLOCK(&dev->tsk_lock);
429 
430 	(*func)(dev);
431 
432 	drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
433 }
434 
435 void
436 drm_locked_tasklet(struct drm_device *dev, void (*tasklet)(struct drm_device *))
437 {
438 	DRM_SPINLOCK(&dev->tsk_lock);
439 	if (dev->locked_task_call != NULL) {
440 		DRM_SPINUNLOCK(&dev->tsk_lock);
441 		return;
442 	}
443 
444 	dev->locked_task_call = tasklet;
445 	DRM_SPINUNLOCK(&dev->tsk_lock);
446 
447 	if (workq_add_task(NULL, 0, drm_locked_task, dev, NULL) == ENOMEM)
448 		DRM_ERROR("error adding task to workq\n");
449 }
450