xref: /netbsd-src/sys/dev/dmover/dmover_io.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: dmover_io.c,v 1.28 2007/09/25 14:13:34 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * dmover_io.c: Support for user-space access to dmover-api
40  *
41  * This interface is quite simple:
42  *
43  *	1.  The user opens /dev/dmover, which is a cloning device.  This
44  *	    allocates internal state for the session.
45  *
46  *	2.  The user does a DMIO_SETFUNC to select the data movement
47  *	    function.  This actually creates the dmover session.
48  *
49  *	3.  The user writes request messages to its dmover handle.
50  *
51  *	4.  The user reads request responses from its dmover handle.
52  *
53  *	5.  The user closes the file descriptor and the session is
54  *	    torn down.
55  */
56 
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: dmover_io.c,v 1.28 2007/09/25 14:13:34 ad Exp $");
59 
60 #include <sys/param.h>
61 #include <sys/queue.h>
62 #include <sys/conf.h>
63 #include <sys/pool.h>
64 #include <sys/proc.h>
65 #include <sys/poll.h>
66 #include <sys/malloc.h>
67 #include <sys/lock.h>
68 #include <sys/file.h>
69 #include <sys/filedesc.h>
70 #include <sys/filio.h>
71 #include <sys/select.h>
72 #include <sys/systm.h>
73 #include <sys/workqueue.h>
74 #include <sys/once.h>
75 
76 #include <uvm/uvm_extern.h>
77 
78 #include <dev/dmover/dmovervar.h>
79 #include <dev/dmover/dmover_io.h>
80 
81 struct dmio_usrreq_state {
82 	union {
83 		struct work u_work;
84 		TAILQ_ENTRY(dmio_usrreq_state) u_q;
85 	} dus_u;
86 #define	dus_q		dus_u.u_q
87 #define	dus_work	dus_u.u_work
88 	struct uio dus_uio_out;
89 	struct uio *dus_uio_in;
90 	struct dmover_request *dus_req;
91 	uint32_t dus_id;
92 	struct vmspace *dus_vmspace;
93 };
94 
95 struct dmio_state {
96 	struct dmover_session *ds_session;
97 	TAILQ_HEAD(, dmio_usrreq_state) ds_pending;
98 	TAILQ_HEAD(, dmio_usrreq_state) ds_complete;
99 	struct selinfo ds_selq;
100 	volatile int ds_flags;
101 	u_int ds_nreqs;
102 	struct simplelock ds_slock;
103 };
104 
105 static ONCE_DECL(dmio_cleaner_control);
106 static struct workqueue *dmio_cleaner;
107 static int dmio_cleaner_init(void);
108 static void dmio_usrreq_fini1(struct work *wk, void *);
109 
110 #define	DMIO_STATE_SEL		0x0001
111 #define	DMIO_STATE_DEAD		0x0002
112 #define	DMIO_STATE_LARVAL	0x0004
113 #define	DMIO_STATE_READ_WAIT	0x0008
114 #define	DMIO_STATE_WRITE_WAIT	0x0010
115 
116 #define	DMIO_NREQS_MAX		64	/* XXX pulled out of a hat */
117 
118 struct pool dmio_state_pool;
119 struct pool dmio_usrreq_state_pool;
120 
121 void	dmoverioattach(int);
122 
123 dev_type_open(dmoverioopen);
124 
125 const struct cdevsw dmoverio_cdevsw = {
126 	dmoverioopen, noclose, noread, nowrite, noioctl,
127 	nostop, notty, nopoll, nommap, nokqfilter,
128 	D_OTHER
129 };
130 
131 /*
132  * dmoverioattach:
133  *
134  *	Pseudo-device attach routine.
135  */
136 void
137 dmoverioattach(int count)
138 {
139 
140 	pool_init(&dmio_state_pool, sizeof(struct dmio_state),
141 	    0, 0, 0, "dmiostate", NULL, IPL_SOFTCLOCK);
142 	pool_init(&dmio_usrreq_state_pool, sizeof(struct dmio_usrreq_state),
143 	    0, 0, 0, "dmiourstate", NULL, IPL_SOFTCLOCK);
144 }
145 
146 /*
147  * dmio_cleaner_init:
148  *
149  *	Create cleaner thread.
150  */
151 static int
152 dmio_cleaner_init(void)
153 {
154 
155 	return workqueue_create(&dmio_cleaner, "dmioclean", dmio_usrreq_fini1,
156 	    NULL, PWAIT, IPL_SOFTCLOCK, 0);
157 }
158 
159 /*
160  * dmio_usrreq_init:
161  *
162  *	Build a request structure.
163  */
164 static int
165 dmio_usrreq_init(struct file *fp, struct dmio_usrreq_state *dus,
166     struct dmio_usrreq *req, struct dmover_request *dreq)
167 {
168 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
169 	struct dmover_session *dses = ds->ds_session;
170 	struct uio *uio_out = &dus->dus_uio_out;
171 	struct uio *uio_in;
172 	dmio_buffer inbuf;
173 	size_t len;
174 	int i, error;
175 	u_int j;
176 
177 	/* XXX How should malloc interact w/ FNONBLOCK? */
178 
179 	error = RUN_ONCE(&dmio_cleaner_control, dmio_cleaner_init);
180 	if (error) {
181 		return error;
182 	}
183 
184 	error = proc_vmspace_getref(curproc, &dus->dus_vmspace);
185 	if (error) {
186 		return error;
187 	}
188 
189 	if (req->req_outbuf.dmbuf_iovcnt != 0) {
190 		if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
191 			return (EINVAL);
192 		len = sizeof(struct iovec) * req->req_outbuf.dmbuf_iovcnt;
193 		uio_out->uio_iov = malloc(len, M_TEMP, M_WAITOK);
194 		error = copyin(req->req_outbuf.dmbuf_iov, uio_out->uio_iov,
195 		    len);
196 		if (error) {
197 			free(uio_out->uio_iov, M_TEMP);
198 			return (error);
199 		}
200 
201 		for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
202 			len += uio_out->uio_iov[j].iov_len;
203 			if (len > SSIZE_MAX) {
204 				free(uio_out->uio_iov, M_TEMP);
205 				return (EINVAL);
206 			}
207 		}
208 
209 		uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
210 		uio_out->uio_resid = len;
211 		uio_out->uio_rw = UIO_READ;
212 		uio_out->uio_vmspace = dus->dus_vmspace;
213 
214 		dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
215 		dreq->dreq_outbuf.dmbuf_uio = uio_out;
216 	} else {
217 		uio_out->uio_iov = NULL;
218 		uio_out = NULL;
219 		dreq->dreq_outbuf_type = DMOVER_BUF_NONE;
220 	}
221 
222 	memcpy(dreq->dreq_immediate, req->req_immediate,
223 	    sizeof(dreq->dreq_immediate));
224 
225 	if (dses->dses_ninputs == 0) {
226 		/* No inputs; all done. */
227 		return (0);
228 	}
229 
230 	dreq->dreq_inbuf_type = DMOVER_BUF_UIO;
231 
232 	dus->dus_uio_in = malloc(sizeof(struct uio) * dses->dses_ninputs,
233 	    M_TEMP, M_WAITOK);
234 	memset(dus->dus_uio_in, 0, sizeof(struct uio) * dses->dses_ninputs);
235 
236 	for (i = 0; i < dses->dses_ninputs; i++) {
237 		uio_in = &dus->dus_uio_in[i];
238 
239 		error = copyin(&req->req_inbuf[i], &inbuf, sizeof(inbuf));
240 		if (error)
241 			goto bad;
242 
243 		if (inbuf.dmbuf_iovcnt > IOV_MAX) {
244 			error = EINVAL;
245 			goto bad;
246 		}
247 		len = sizeof(struct iovec) * inbuf.dmbuf_iovcnt;
248 		if (len == 0) {
249 			error = EINVAL;
250 			goto bad;
251 		}
252 		uio_in->uio_iov = malloc(len, M_TEMP, M_WAITOK);
253 
254 		error = copyin(inbuf.dmbuf_iov, uio_in->uio_iov, len);
255 		if (error) {
256 			free(uio_in->uio_iov, M_TEMP);
257 			goto bad;
258 		}
259 
260 		for (j = 0, len = 0; j < inbuf.dmbuf_iovcnt; j++) {
261 			len += uio_in->uio_iov[j].iov_len;
262 			if (len > SSIZE_MAX) {
263 				free(uio_in->uio_iov, M_TEMP);
264 				error = EINVAL;
265 				goto bad;
266 			}
267 		}
268 
269 		if (uio_out != NULL && len != uio_out->uio_resid) {
270 			free(uio_in->uio_iov, M_TEMP);
271 			error = EINVAL;
272 			goto bad;
273 		}
274 
275 		uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
276 		uio_in->uio_resid = len;
277 		uio_in->uio_rw = UIO_WRITE;
278 		uio_in->uio_vmspace = dus->dus_vmspace;
279 
280 		dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
281 	}
282 
283 	return (0);
284 
285  bad:
286 	if (i > 0) {
287 		for (--i; i >= 0; i--) {
288 			uio_in = &dus->dus_uio_in[i];
289 			free(uio_in->uio_iov, M_TEMP);
290 		}
291 	}
292 	free(dus->dus_uio_in, M_TEMP);
293 	if (uio_out != NULL)
294 		free(uio_out->uio_iov, M_TEMP);
295 	uvmspace_free(dus->dus_vmspace);
296 	return (error);
297 }
298 
299 /*
300  * dmio_usrreq_fini:
301  *
302  *	Tear down a request.  Must be called at splsoftclock().
303  */
304 static void
305 dmio_usrreq_fini(struct dmio_state *ds, struct dmio_usrreq_state *dus)
306 {
307 	struct dmover_session *dses = ds->ds_session;
308 	struct uio *uio_out = &dus->dus_uio_out;
309 	struct uio *uio_in;
310 	int i;
311 
312 	if (uio_out->uio_iov != NULL)
313 		free(uio_out->uio_iov, M_TEMP);
314 
315 	if (dses->dses_ninputs) {
316 		for (i = 0; i < dses->dses_ninputs; i++) {
317 			uio_in = &dus->dus_uio_in[i];
318 			free(uio_in->uio_iov, M_TEMP);
319 		}
320 		free(dus->dus_uio_in, M_TEMP);
321 	}
322 
323 	workqueue_enqueue(dmio_cleaner, &dus->dus_work, NULL);
324 }
325 
326 static void
327 dmio_usrreq_fini1(struct work *wk, void *dummy)
328 {
329 	struct dmio_usrreq_state *dus = (void *)wk;
330 	int s;
331 
332 	KASSERT(wk == &dus->dus_work);
333 
334 	uvmspace_free(dus->dus_vmspace);
335 	s = splsoftclock();
336 	pool_put(&dmio_usrreq_state_pool, dus);
337 	splx(s);
338 }
339 
340 /*
341  * dmio_read:
342  *
343  *	Read file op.
344  */
345 static int
346 dmio_read(struct file *fp, off_t *offp, struct uio *uio,
347     kauth_cred_t cred, int flags)
348 {
349 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
350 	struct dmio_usrreq_state *dus;
351 	struct dmover_request *dreq;
352 	struct dmio_usrresp resp;
353 	int s, error = 0, progress = 0;
354 
355 	if ((uio->uio_resid % sizeof(resp)) != 0)
356 		return (EINVAL);
357 
358 	if (ds->ds_session == NULL)
359 		return (ENXIO);
360 
361 	s = splsoftclock();
362 	simple_lock(&ds->ds_slock);
363 
364 	while (uio->uio_resid != 0) {
365 
366 		for (;;) {
367 			dus = TAILQ_FIRST(&ds->ds_complete);
368 			if (dus == NULL) {
369 				if (fp->f_flag & FNONBLOCK) {
370 					error = progress ? 0 : EWOULDBLOCK;
371 					goto out;
372 				}
373 				ds->ds_flags |= DMIO_STATE_READ_WAIT;
374 				error = ltsleep(&ds->ds_complete,
375 				    PRIBIO | PCATCH, "dmvrrd", 0,
376 				    &ds->ds_slock);
377 				if (error)
378 					goto out;
379 				continue;
380 			}
381 			/* Have a completed request. */
382 			TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
383 			ds->ds_nreqs--;
384 			if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) {
385 				ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT;
386 				wakeup(&ds->ds_nreqs);
387 			}
388 			if (ds->ds_flags & DMIO_STATE_SEL) {
389 				ds->ds_flags &= ~DMIO_STATE_SEL;
390 				selwakeup(&ds->ds_selq);
391 			}
392 			break;
393 		}
394 
395 		simple_unlock(&ds->ds_slock);
396 
397 		dreq = dus->dus_req;
398 		resp.resp_id = dus->dus_id;
399 		if (dreq->dreq_flags & DMOVER_REQ_ERROR)
400 			resp.resp_error = dreq->dreq_error;
401 		else {
402 			resp.resp_error = 0;
403 			memcpy(resp.resp_immediate, dreq->dreq_immediate,
404 			    sizeof(resp.resp_immediate));
405 		}
406 
407 		dmio_usrreq_fini(ds, dus);
408 
409 		splx(s);
410 
411 		progress = 1;
412 
413 		dmover_request_free(dreq);
414 
415 		error = uiomove(&resp, sizeof(resp), uio);
416 		if (error)
417 			return (error);
418 
419 		s = splsoftclock();
420 		simple_lock(&ds->ds_slock);
421 	}
422 
423  out:
424 	simple_unlock(&ds->ds_slock);
425 	splx(s);
426 
427 	return (error);
428 }
429 
430 /*
431  * dmio_usrreq_done:
432  *
433  *	Dmover completion callback.
434  */
435 static void
436 dmio_usrreq_done(struct dmover_request *dreq)
437 {
438 	struct dmio_usrreq_state *dus = dreq->dreq_cookie;
439 	struct dmio_state *ds = dreq->dreq_session->dses_cookie;
440 
441 	/* We're already at splsoftclock(). */
442 
443 	simple_lock(&ds->ds_slock);
444 	TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
445 	if (ds->ds_flags & DMIO_STATE_DEAD) {
446 		ds->ds_nreqs--;
447 		dmio_usrreq_fini(ds, dus);
448 		dmover_request_free(dreq);
449 		if (ds->ds_nreqs == 0) {
450 			simple_unlock(&ds->ds_slock);
451 			seldestroy(&ds->ds_selq);
452 			pool_put(&dmio_state_pool, ds);
453 			return;
454 		}
455 	} else {
456 		TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
457 		if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
458 			ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
459 			wakeup(&ds->ds_complete);
460 		}
461 		if (ds->ds_flags & DMIO_STATE_SEL) {
462 			ds->ds_flags &= ~DMIO_STATE_SEL;
463 			selwakeup(&ds->ds_selq);
464 		}
465 	}
466 	simple_unlock(&ds->ds_slock);
467 }
468 
469 /*
470  * dmio_write:
471  *
472  *	Write file op.
473  */
474 static int
475 dmio_write(struct file *fp, off_t *offp, struct uio *uio,
476     kauth_cred_t cred, int flags)
477 {
478 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
479 	struct dmio_usrreq_state *dus;
480 	struct dmover_request *dreq;
481 	struct dmio_usrreq req;
482 	int error = 0, s, progress = 0;
483 
484 	if ((uio->uio_resid % sizeof(req)) != 0)
485 		return (EINVAL);
486 
487 	if (ds->ds_session == NULL)
488 		return (ENXIO);
489 
490 	s = splsoftclock();
491 	simple_lock(&ds->ds_slock);
492 
493 	while (uio->uio_resid != 0) {
494 
495 		if (ds->ds_nreqs == DMIO_NREQS_MAX) {
496 			if (fp->f_flag & FNONBLOCK) {
497 				error = progress ? 0 : EWOULDBLOCK;
498 				break;
499 			}
500 			ds->ds_flags |= DMIO_STATE_WRITE_WAIT;
501 			error = ltsleep(&ds->ds_nreqs, PRIBIO | PCATCH,
502 			    "dmiowr", 0, &ds->ds_slock);
503 			if (error)
504 				break;
505 			continue;
506 		}
507 
508 		ds->ds_nreqs++;
509 
510 		simple_unlock(&ds->ds_slock);
511 		splx(s);
512 
513 		progress = 1;
514 
515 		error = uiomove(&req, sizeof(req), uio);
516 		if (error) {
517 			s = splsoftclock();
518 			simple_lock(&ds->ds_slock);
519 			ds->ds_nreqs--;
520 			break;
521 		}
522 
523 		/* XXX How should this interact with FNONBLOCK? */
524 		dreq = dmover_request_alloc(ds->ds_session, NULL);
525 		if (dreq == NULL) {
526 			/* XXX */
527 			s = splsoftclock();
528 			simple_lock(&ds->ds_slock);
529 			ds->ds_nreqs--;
530 			error = ENOMEM;
531 			break;
532 		}
533 		s = splsoftclock();
534 		dus = pool_get(&dmio_usrreq_state_pool, PR_WAITOK);
535 		splx(s);
536 
537 		error = dmio_usrreq_init(fp, dus, &req, dreq);
538 		if (error) {
539 			dmover_request_free(dreq);
540 			s = splsoftclock();
541 			pool_put(&dmio_usrreq_state_pool, dus);
542 			simple_lock(&ds->ds_slock);
543 			break;
544 		}
545 
546 		dreq->dreq_callback = dmio_usrreq_done;
547 		dreq->dreq_cookie = dus;
548 
549 		dus->dus_req = dreq;
550 		dus->dus_id = req.req_id;
551 
552 		s = splsoftclock();
553 		simple_lock(&ds->ds_slock);
554 
555 		TAILQ_INSERT_TAIL(&ds->ds_pending, dus, dus_q);
556 
557 		simple_unlock(&ds->ds_slock);
558 		splx(s);
559 
560 		dmover_process(dreq);
561 
562 		s = splsoftclock();
563 		simple_lock(&ds->ds_slock);
564 	}
565 
566 	simple_unlock(&ds->ds_slock);
567 	splx(s);
568 
569 	return (error);
570 }
571 
572 /*
573  * dmio_ioctl:
574  *
575  *	Ioctl file op.
576  */
577 static int
578 dmio_ioctl(struct file *fp, u_long cmd, void *data, struct lwp *l)
579 {
580 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
581 	int error, s;
582 
583 	switch (cmd) {
584 	case FIONBIO:
585 	case FIOASYNC:
586 		return (0);
587 
588 	case DMIO_SETFUNC:
589 	    {
590 		struct dmio_setfunc *dsf = data;
591 		struct dmover_session *dses;
592 
593 		s = splsoftclock();
594 		simple_lock(&ds->ds_slock);
595 
596 		if (ds->ds_session != NULL ||
597 		    (ds->ds_flags & DMIO_STATE_LARVAL) != 0) {
598 			simple_unlock(&ds->ds_slock);
599 			splx(s);
600 			return (EBUSY);
601 		}
602 
603 		ds->ds_flags |= DMIO_STATE_LARVAL;
604 
605 		simple_unlock(&ds->ds_slock);
606 		splx(s);
607 
608 		dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0';
609 		error = dmover_session_create(dsf->dsf_name, &dses);
610 
611 		s = splsoftclock();
612 		simple_lock(&ds->ds_slock);
613 
614 		if (error == 0) {
615 			dses->dses_cookie = ds;
616 			ds->ds_session = dses;
617 		}
618 		ds->ds_flags &= ~DMIO_STATE_LARVAL;
619 
620 		simple_unlock(&ds->ds_slock);
621 		splx(s);
622 		break;
623 	    }
624 
625 	default:
626 		error = ENOTTY;
627 	}
628 
629 	return (error);
630 }
631 
632 /*
633  * dmio_poll:
634  *
635  *	Poll file op.
636  */
637 static int
638 dmio_poll(struct file *fp, int events, struct lwp *l)
639 {
640 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
641 	int s, revents = 0;
642 
643 	if ((events & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) == 0)
644 		return (revents);
645 
646 	s = splsoftclock();
647 	simple_lock(&ds->ds_slock);
648 
649 	if (ds->ds_flags & DMIO_STATE_DEAD) {
650 		/* EOF */
651 		revents |= events & (POLLIN | POLLRDNORM |
652 		    POLLOUT | POLLWRNORM);
653 		goto out;
654 	}
655 
656 	/* We can read if there are completed requests. */
657 	if (events & (POLLIN | POLLRDNORM))
658 		if (TAILQ_EMPTY(&ds->ds_complete) == 0)
659 			revents |= events & (POLLIN | POLLRDNORM);
660 
661 	/*
662 	 * We can write if there is there are fewer then DMIO_NREQS_MAX
663 	 * are already in the queue.
664 	 */
665 	if (events & (POLLOUT | POLLWRNORM))
666 		if (ds->ds_nreqs < DMIO_NREQS_MAX)
667 			revents |= events & (POLLOUT | POLLWRNORM);
668 
669 	if (revents == 0) {
670 		selrecord(l, &ds->ds_selq);
671 		ds->ds_flags |= DMIO_STATE_SEL;
672 	}
673 
674  out:
675 	simple_unlock(&ds->ds_slock);
676 	splx(s);
677 
678 	return (revents);
679 }
680 
681 /*
682  * dmio_close:
683  *
684  *	Close file op.
685  */
686 static int
687 dmio_close(struct file *fp, struct lwp *l)
688 {
689 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
690 	struct dmio_usrreq_state *dus;
691 	struct dmover_session *dses;
692 	int s;
693 
694 	s = splsoftclock();
695 	simple_lock(&ds->ds_slock);
696 
697 	ds->ds_flags |= DMIO_STATE_DEAD;
698 
699 	/* Garbage-collect all the responses on the queue. */
700 	while ((dus = TAILQ_FIRST(&ds->ds_complete)) != NULL) {
701 		TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
702 		ds->ds_nreqs--;
703 		dmover_request_free(dus->dus_req);
704 		dmio_usrreq_fini(ds, dus);
705 	}
706 
707 	/*
708 	 * If there are any requests pending, we have to wait for
709 	 * them.  Don't free the dmio_state in this case.
710 	 */
711 	if (ds->ds_nreqs == 0) {
712 		dses = ds->ds_session;
713 		simple_unlock(&ds->ds_slock);
714 		seldestroy(&ds->ds_selq);
715 		pool_put(&dmio_state_pool, ds);
716 	} else {
717 		dses = NULL;
718 		simple_unlock(&ds->ds_slock);
719 	}
720 
721 	splx(s);
722 
723 	fp->f_data = NULL;
724 
725 	if (dses != NULL)
726 		dmover_session_destroy(dses);
727 
728 	return (0);
729 }
730 
731 static const struct fileops dmio_fileops = {
732 	dmio_read,
733 	dmio_write,
734 	dmio_ioctl,
735 	fnullop_fcntl,
736 	dmio_poll,
737 	fbadop_stat,
738 	dmio_close,
739 	fnullop_kqfilter
740 };
741 
742 /*
743  * dmoverioopen:
744  *
745  *	Device switch open routine.
746  */
747 int
748 dmoverioopen(dev_t dev, int flag, int mode, struct lwp *l)
749 {
750 	struct dmio_state *ds;
751 	struct file *fp;
752 	int error, fd, s;
753 
754 	/* falloc() will use the descriptor for us. */
755 	if ((error = falloc(l, &fp, &fd)) != 0)
756 		return (error);
757 
758 	s = splsoftclock();
759 	ds = pool_get(&dmio_state_pool, PR_WAITOK);
760 	splx(s);
761 
762 	memset(ds, 0, sizeof(*ds));
763 	simple_lock_init(&ds->ds_slock);
764 	TAILQ_INIT(&ds->ds_pending);
765 	TAILQ_INIT(&ds->ds_complete);
766 	selinit(&ds->ds_selq);
767 
768 	return fdclone(l, fp, fd, flag, &dmio_fileops, ds);
769 }
770