xref: /netbsd-src/sys/dev/dmover/dmover_io.c (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: dmover_io.c,v 1.9 2003/05/30 11:37:47 scw Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * dmover_io.c: Support for user-space access to dmover-api
40  *
41  * This interface is quite simple:
42  *
43  *	1.  The user opens /dev/dmover, which is a cloning device.  This
44  *	    allocates internal state for the session.
45  *
46  *	2.  The user does a DMIO_SETFUNC to select the data movement
47  *	    function.  This actually creates the dmover session.
48  *
49  *	3.  The user writes request messages to its dmover handle.
50  *
51  *	4.  The user reads request responses from its dmover handle.
52  *
53  *	5.  The user closes the file descriptor and the session is
54  *	    torn down.
55  */
56 
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: dmover_io.c,v 1.9 2003/05/30 11:37:47 scw Exp $");
59 
60 #include <sys/param.h>
61 #include <sys/queue.h>
62 #include <sys/conf.h>
63 #include <sys/pool.h>
64 #include <sys/proc.h>
65 #include <sys/poll.h>
66 #include <sys/malloc.h>
67 #include <sys/lock.h>
68 #include <sys/file.h>
69 #include <sys/filedesc.h>
70 #include <sys/filio.h>
71 #include <sys/select.h>
72 #include <sys/systm.h>
73 
74 #include <dev/dmover/dmovervar.h>
75 #include <dev/dmover/dmover_io.h>
76 
77 struct dmio_usrreq_state {
78 	TAILQ_ENTRY(dmio_usrreq_state) dus_q;
79 	struct uio dus_uio_out;
80 	struct uio *dus_uio_in;
81 	struct dmover_request *dus_req;
82 	uint32_t dus_id;
83 };
84 
85 struct dmio_state {
86 	struct dmover_session *ds_session;
87 	TAILQ_HEAD(, dmio_usrreq_state) ds_pending;
88 	TAILQ_HEAD(, dmio_usrreq_state) ds_complete;
89 	struct selinfo ds_selq;
90 	__volatile int ds_flags;
91 	u_int ds_nreqs;
92 	struct simplelock ds_slock;
93 };
94 
95 #define	DMIO_STATE_SEL		0x0001
96 #define	DMIO_STATE_DEAD		0x0002
97 #define	DMIO_STATE_LARVAL	0x0004
98 #define	DMIO_STATE_READ_WAIT	0x0008
99 #define	DMIO_STATE_WRITE_WAIT	0x0010
100 
101 #define	DMIO_NREQS_MAX		64	/* XXX pulled out of a hat */
102 
103 struct pool dmio_state_pool;
104 struct pool dmio_usrreq_state_pool;
105 
106 void	dmoverioattach(int);
107 
108 dev_type_open(dmoverioopen);
109 
110 const struct cdevsw dmoverio_cdevsw = {
111 	dmoverioopen, noclose, noread, nowrite, noioctl,
112 	nostop, notty, nopoll, nommap, nokqfilter,
113 };
114 
115 /*
116  * dmoverioattach:
117  *
118  *	Pseudo-device attach routine.
119  */
120 void
121 dmoverioattach(int count)
122 {
123 
124 	pool_init(&dmio_state_pool, sizeof(struct dmio_state),
125 	    0, 0, 0, "dmiostate", NULL);
126 	pool_init(&dmio_usrreq_state_pool, sizeof(struct dmio_usrreq_state),
127 	    0, 0, 0, "dmiourstate", NULL);
128 }
129 
130 /*
131  * dmio_usrreq_init:
132  *
133  *	Build a request structure.
134  */
135 static int
136 dmio_usrreq_init(struct file *fp, struct dmio_usrreq_state *dus,
137     struct dmio_usrreq *req, struct dmover_request *dreq)
138 {
139 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
140 	struct dmover_session *dses = ds->ds_session;
141 	struct uio *uio_out = &dus->dus_uio_out;
142 	struct uio *uio_in;
143 	dmio_buffer inbuf;
144 	size_t len;
145 	int i, error;
146 	u_int j;
147 
148 	/* XXX How should malloc interact w/ FNONBLOCK? */
149 
150 	if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
151 		return (EINVAL);
152 	len = sizeof(struct iovec) * req->req_outbuf.dmbuf_iovcnt;
153 	if (len == 0)
154 		return (EINVAL);
155 	uio_out->uio_iov = malloc(len, M_TEMP, M_WAITOK);
156 
157 	error = copyin(req->req_outbuf.dmbuf_iov, uio_out->uio_iov, len);
158 	if (error) {
159 		free(uio_out->uio_iov, M_TEMP);
160 		return (error);
161 	}
162 
163 	for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
164 		len += uio_out->uio_iov[j].iov_len;
165 		if (len > SSIZE_MAX) {
166 			free(uio_out->uio_iov, M_TEMP);
167 			return (EINVAL);
168 		}
169 	}
170 
171 	uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
172 	uio_out->uio_resid = len;
173 	uio_out->uio_rw = UIO_READ;
174 	uio_out->uio_segflg = UIO_USERSPACE;
175 	uio_out->uio_procp = curproc;
176 
177 	dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
178 	dreq->dreq_outbuf.dmbuf_uio = uio_out;
179 
180 	if (dses->dses_ninputs == 0) {
181 		/* No inputs; copy the immediate. */
182 		memcpy(dreq->dreq_immediate, req->req_immediate,
183 		    sizeof(dreq->dreq_immediate));
184 		return (0);
185 	}
186 
187 	dreq->dreq_inbuf_type = DMOVER_BUF_UIO;
188 
189 	dus->dus_uio_in = malloc(sizeof(struct uio) * dses->dses_ninputs,
190 	    M_TEMP, M_WAITOK);
191 	memset(dus->dus_uio_in, 0, sizeof(struct uio) * dses->dses_ninputs);
192 
193 	for (i = 0; i < dses->dses_ninputs; i++) {
194 		uio_in = &dus->dus_uio_in[i];
195 
196 		error = copyin(&req->req_inbuf[i], &inbuf, sizeof(inbuf));
197 		if (error)
198 			goto bad;
199 
200 		if (inbuf.dmbuf_iovcnt > IOV_MAX) {
201 			error = EINVAL;
202 			goto bad;
203 		}
204 		len = sizeof(struct iovec) * inbuf.dmbuf_iovcnt;
205 		if (len == 0) {
206 			error = EINVAL;
207 			goto bad;
208 		}
209 		uio_in->uio_iov = malloc(len, M_TEMP, M_WAITOK);
210 
211 		error = copyin(inbuf.dmbuf_iov, uio_in->uio_iov, len);
212 		if (error) {
213 			free(uio_in->uio_iov, M_TEMP);
214 			goto bad;
215 		}
216 
217 		for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
218 			len += uio_in->uio_iov[j].iov_len;
219 			if (len > SSIZE_MAX) {
220 				free(uio_in->uio_iov, M_TEMP);
221 				error = EINVAL;
222 				goto bad;
223 			}
224 		}
225 
226 		if (len != uio_out->uio_resid) {
227 			free(uio_in->uio_iov, M_TEMP);
228 			error = EINVAL;
229 			goto bad;
230 		}
231 
232 		uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
233 		uio_in->uio_resid = len;
234 		uio_in->uio_rw = UIO_WRITE;
235 		uio_in->uio_segflg = UIO_USERSPACE;
236 		uio_in->uio_procp = curproc;
237 
238 		dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
239 	}
240 
241 	return (0);
242 
243  bad:
244 	if (i > 0) {
245 		for (--i; i >= 0; i--) {
246 			uio_in = &dus->dus_uio_in[i];
247 			free(uio_in->uio_iov, M_TEMP);
248 		}
249 	}
250 	free(dus->dus_uio_in, M_TEMP);
251 	free(uio_out->uio_iov, M_TEMP);
252 	return (error);
253 }
254 
255 /*
256  * dmio_usrreq_fini:
257  *
258  *	Tear down a request.  Must be called at splsoftclock().
259  */
260 static void
261 dmio_usrreq_fini(struct dmio_state *ds, struct dmio_usrreq_state *dus)
262 {
263 	struct dmover_session *dses = ds->ds_session;
264 	struct uio *uio_out = &dus->dus_uio_out;
265 	struct uio *uio_in;
266 	int i;
267 
268 	free(uio_out->uio_iov, M_TEMP);
269 
270 	if (dses->dses_ninputs == 0) {
271 		pool_put(&dmio_usrreq_state_pool, dus);
272 		return;
273 	}
274 
275 	for (i = 0; i < dses->dses_ninputs; i++) {
276 		uio_in = &dus->dus_uio_in[i];
277 		free(uio_in->uio_iov, M_TEMP);
278 	}
279 
280 	free(dus->dus_uio_in, M_TEMP);
281 
282 	pool_put(&dmio_usrreq_state_pool, dus);
283 }
284 
285 /*
286  * dmio_read:
287  *
288  *	Read file op.
289  */
290 static int
291 dmio_read(struct file *fp, off_t *offp, struct uio *uio,
292     struct ucred *cred, int flags)
293 {
294 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
295 	struct dmio_usrreq_state *dus;
296 	struct dmover_request *dreq;
297 	struct dmio_usrresp resp;
298 	int s, error = 0, progress = 0;
299 
300 	if ((uio->uio_resid % sizeof(resp)) != 0)
301 		return (EINVAL);
302 
303 	if (ds->ds_session == NULL)
304 		return (ENXIO);
305 
306 	s = splsoftclock();
307 	simple_lock(&ds->ds_slock);
308 
309 	while (uio->uio_resid != 0) {
310 
311 		for (;;) {
312 			dus = TAILQ_FIRST(&ds->ds_complete);
313 			if (dus == NULL) {
314 				if (fp->f_flag & FNONBLOCK) {
315 					error = progress ? 0 : EWOULDBLOCK;
316 					goto out;
317 				}
318 				ds->ds_flags |= DMIO_STATE_READ_WAIT;
319 				error = ltsleep(&ds->ds_complete,
320 				    PRIBIO | PCATCH, "dmvrrd", 0,
321 				    &ds->ds_slock);
322 				if (error)
323 					goto out;
324 				continue;
325 			}
326 			/* Have a completed request. */
327 			TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
328 			ds->ds_nreqs--;
329 			if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) {
330 				ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT;
331 				wakeup(&ds->ds_nreqs);
332 			}
333 			if (ds->ds_flags & DMIO_STATE_SEL) {
334 				ds->ds_flags &= ~DMIO_STATE_SEL;
335 				selwakeup(&ds->ds_selq);
336 			}
337 			break;
338 		}
339 
340 		simple_unlock(&ds->ds_slock);
341 
342 		dreq = dus->dus_req;
343 		resp.resp_id = dus->dus_id;
344 		resp.resp_error = (dreq->dreq_flags & DMOVER_REQ_ERROR) ?
345 		    dreq->dreq_error : 0;
346 
347 		dmio_usrreq_fini(ds, dus);
348 
349 		splx(s);
350 
351 		progress = 1;
352 
353 		dmover_request_free(dreq);
354 
355 		error = uiomove(&resp, sizeof(resp), uio);
356 		if (error)
357 			return (error);
358 
359 		s = splsoftclock();
360 		simple_lock(&ds->ds_slock);
361 	}
362 
363  out:
364 	simple_unlock(&ds->ds_slock);
365 	splx(s);
366 
367 	return (error);
368 }
369 
370 /*
371  * dmio_usrreq_done:
372  *
373  *	Dmover completion callback.
374  */
375 static void
376 dmio_usrreq_done(struct dmover_request *dreq)
377 {
378 	struct dmio_usrreq_state *dus = dreq->dreq_cookie;
379 	struct dmio_state *ds = dreq->dreq_session->dses_cookie;
380 
381 	/* We're already at splsoftclock(). */
382 
383 	simple_lock(&ds->ds_slock);
384 	TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
385 	if (ds->ds_flags & DMIO_STATE_DEAD) {
386 		ds->ds_nreqs--;
387 		dmio_usrreq_fini(ds, dus);
388 		dmover_request_free(dreq);
389 		if (ds->ds_nreqs == 0) {
390 			simple_unlock(&ds->ds_slock);
391 			pool_put(&dmio_state_pool, ds);
392 			return;
393 		}
394 	} else {
395 		TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
396 		if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
397 			ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
398 			wakeup(&ds->ds_complete);
399 		}
400 		if (ds->ds_flags & DMIO_STATE_SEL) {
401 			ds->ds_flags &= ~DMIO_STATE_SEL;
402 			selwakeup(&ds->ds_selq);
403 		}
404 	}
405 	simple_unlock(&ds->ds_slock);
406 }
407 
408 /*
409  * dmio_write:
410  *
411  *	Write file op.
412  */
413 static int
414 dmio_write(struct file *fp, off_t *offp, struct uio *uio,
415     struct ucred *cred, int flags)
416 {
417 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
418 	struct dmio_usrreq_state *dus;
419 	struct dmover_request *dreq;
420 	struct dmio_usrreq req;
421 	int error = 0, s, progress = 0;
422 
423 	if ((uio->uio_resid % sizeof(req)) != 0)
424 		return (EINVAL);
425 
426 	if (ds->ds_session == NULL)
427 		return (ENXIO);
428 
429 	s = splsoftclock();
430 	simple_lock(&ds->ds_slock);
431 
432 	while (uio->uio_resid != 0) {
433 
434 		if (ds->ds_nreqs == DMIO_NREQS_MAX) {
435 			if (fp->f_flag & FNONBLOCK) {
436 				error = progress ? 0 : EWOULDBLOCK;
437 				break;
438 			}
439 			ds->ds_flags |= DMIO_STATE_WRITE_WAIT;
440 			error = ltsleep(&ds->ds_nreqs, PRIBIO | PCATCH,
441 			    "dmiowr", 0, &ds->ds_slock);
442 			if (error)
443 				break;
444 			continue;
445 		}
446 
447 		ds->ds_nreqs++;
448 
449 		simple_unlock(&ds->ds_slock);
450 		splx(s);
451 
452 		progress = 1;
453 
454 		error = uiomove(&req, sizeof(req), uio);
455 		if (error) {
456 			s = splsoftclock();
457 			simple_lock(&ds->ds_slock);
458 			ds->ds_nreqs--;
459 			break;
460 		}
461 
462 		/* XXX How should this interact with FNONBLOCK? */
463 		dreq = dmover_request_alloc(ds->ds_session, NULL);
464 		if (dreq == NULL) {
465 			/* XXX */
466 			s = splsoftclock();
467 			simple_lock(&ds->ds_slock);
468 			ds->ds_nreqs--;
469 			error = ENOMEM;
470 			break;
471 		}
472 		s = splsoftclock();
473 		dus = pool_get(&dmio_usrreq_state_pool, PR_WAITOK);
474 		splx(s);
475 
476 		error = dmio_usrreq_init(fp, dus, &req, dreq);
477 		if (error) {
478 			dmover_request_free(dreq);
479 			s = splsoftclock();
480 			pool_put(&dmio_usrreq_state_pool, dus);
481 			simple_lock(&ds->ds_slock);
482 			break;
483 		}
484 
485 		dreq->dreq_callback = dmio_usrreq_done;
486 		dreq->dreq_cookie = dus;
487 
488 		dus->dus_req = dreq;
489 		dus->dus_id = req.req_id;
490 
491 		s = splsoftclock();
492 		simple_lock(&ds->ds_slock);
493 
494 		TAILQ_INSERT_TAIL(&ds->ds_pending, dus, dus_q);
495 
496 		simple_unlock(&ds->ds_slock);
497 		splx(s);
498 
499 		dmover_process(dreq);
500 
501 		s = splsoftclock();
502 		simple_lock(&ds->ds_slock);
503 	}
504 
505 	simple_unlock(&ds->ds_slock);
506 	splx(s);
507 
508 	return (error);
509 }
510 
511 /*
512  * dmio_ioctl:
513  *
514  *	Ioctl file op.
515  */
516 static int
517 dmio_ioctl(struct file *fp, u_long cmd, void *data, struct proc *p)
518 {
519 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
520 	int error, s;
521 
522 	switch (cmd) {
523 	case FIONBIO:
524 	case FIOASYNC:
525 		return (0);
526 
527 	case DMIO_SETFUNC:
528 	    {
529 		struct dmio_setfunc *dsf = data;
530 		struct dmover_session *dses;
531 
532 		s = splsoftclock();
533 		simple_lock(&ds->ds_slock);
534 
535 		if (ds->ds_session != NULL ||
536 		    (ds->ds_flags & DMIO_STATE_LARVAL) != 0) {
537 			simple_unlock(&ds->ds_slock);
538 			splx(s);
539 			return (EBUSY);
540 		}
541 
542 		ds->ds_flags |= DMIO_STATE_LARVAL;
543 
544 		simple_unlock(&ds->ds_slock);
545 		splx(s);
546 
547 		dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0';
548 		error = dmover_session_create(dsf->dsf_name, &dses);
549 
550 		s = splsoftclock();
551 		simple_lock(&ds->ds_slock);
552 
553 		if (error == 0) {
554 			dses->dses_cookie = ds;
555 			ds->ds_session = dses;
556 		}
557 		ds->ds_flags &= ~DMIO_STATE_LARVAL;
558 
559 		simple_unlock(&ds->ds_slock);
560 		splx(s);
561 		break;
562 	    }
563 
564 	default:
565 		error = ENOTTY;
566 	}
567 
568 	return (error);
569 }
570 
571 /*
572  * dmio_fcntl:
573  *
574  *	Fcntl file op.
575  */
576 static int
577 dmio_fcntl(struct file *fp, u_int cmd, void *data, struct proc *p)
578 {
579 
580 	if (cmd == FNONBLOCK || cmd == FASYNC)
581 		return (0);
582 
583 	return (EOPNOTSUPP);
584 }
585 
586 /*
587  * dmio_poll:
588  *
589  *	Poll file op.
590  */
591 static int
592 dmio_poll(struct file *fp, int events, struct proc *p)
593 {
594 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
595 	int s, revents = 0;
596 
597 	if ((events & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) == 0)
598 		return (revents);
599 
600 	s = splsoftclock();
601 	simple_lock(&ds->ds_slock);
602 
603 	if (ds->ds_flags & DMIO_STATE_DEAD) {
604 		/* EOF */
605 		revents |= events & (POLLIN | POLLRDNORM |
606 		    POLLOUT | POLLWRNORM);
607 		goto out;
608 	}
609 
610 	/* We can read if there are completed requests. */
611 	if (events & (POLLIN | POLLRDNORM))
612 		if (TAILQ_EMPTY(&ds->ds_complete) == 0)
613 			revents |= events & (POLLIN | POLLRDNORM);
614 
615 	/*
616 	 * We can write if there is there are fewer then DMIO_NREQS_MAX
617 	 * are already in the queue.
618 	 */
619 	if (events & (POLLOUT | POLLWRNORM))
620 		if (ds->ds_nreqs < DMIO_NREQS_MAX)
621 			revents |= events & (POLLOUT | POLLWRNORM);
622 
623 	if (revents == 0) {
624 		selrecord(p, &ds->ds_selq);
625 		ds->ds_flags |= DMIO_STATE_SEL;
626 	}
627 
628  out:
629 	simple_unlock(&ds->ds_slock);
630 	splx(s);
631 
632 	return (revents);
633 }
634 
635 /*
636  * dmio_stat:
637  *
638  *	Stat file op.
639  */
640 static int
641 dmio_stat(struct file *fp, struct stat *sb, struct proc *p)
642 {
643 
644 	return (EOPNOTSUPP);
645 }
646 
647 /*
648  * dmio_close:
649  *
650  *	Close file op.
651  */
652 static int
653 dmio_close(struct file *fp, struct proc *p)
654 {
655 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
656 	struct dmio_usrreq_state *dus;
657 	struct dmover_session *dses;
658 	int s;
659 
660 	s = splsoftclock();
661 	simple_lock(&ds->ds_slock);
662 
663 	ds->ds_flags |= DMIO_STATE_DEAD;
664 
665 	/* Garbage-collect all the responses on the queue. */
666 	while ((dus = TAILQ_FIRST(&ds->ds_complete)) != NULL) {
667 		TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
668 		ds->ds_nreqs--;
669 		dmover_request_free(dus->dus_req);
670 		dmio_usrreq_fini(ds, dus);
671 	}
672 
673 	/*
674 	 * If there are any requests pending, we have to wait for
675 	 * them.  Don't free the dmio_state in this case.
676 	 */
677 	if (ds->ds_nreqs == 0) {
678 		dses = ds->ds_session;
679 		simple_unlock(&ds->ds_slock);
680 		pool_put(&dmio_state_pool, ds);
681 	} else {
682 		dses = NULL;
683 		simple_unlock(&ds->ds_slock);
684 	}
685 
686 	splx(s);
687 
688 	fp->f_data = NULL;
689 
690 	if (dses != NULL)
691 		dmover_session_destroy(dses);
692 
693 	return (0);
694 }
695 
696 static struct fileops dmio_fileops = {
697 	dmio_read,
698 	dmio_write,
699 	dmio_ioctl,
700 	dmio_fcntl,
701 	dmio_poll,
702 	dmio_stat,
703 	dmio_close,
704 };
705 
706 /*
707  * dmoverioopen:
708  *
709  *	Device switch open routine.
710  */
711 int
712 dmoverioopen(dev_t dev, int flag, int mode, struct proc *p)
713 {
714 	struct dmio_state *ds;
715 	struct file *fp;
716 	int error, fd, s;
717 
718 	/* falloc() will use the descriptor for us. */
719 	if ((error = falloc(p, &fp, &fd)) != 0)
720 		return (error);
721 
722 	s = splsoftclock();
723 	ds = pool_get(&dmio_state_pool, PR_WAITOK);
724 	splx(s);
725 
726 	memset(ds, 0, sizeof(*ds));
727 	TAILQ_INIT(&ds->ds_pending);
728 	TAILQ_INIT(&ds->ds_complete);
729 
730 	fp->f_flag = FREAD | FWRITE;
731 	fp->f_type = DTYPE_MISC;
732 	fp->f_ops = &dmio_fileops;
733 	fp->f_data = (caddr_t) ds;
734 
735 	p->p_dupfd = fd;
736 	FILE_SET_MATURE(fp);
737 	FILE_UNUSE(fp, p);
738 
739 	return (ENXIO);
740 }
741