xref: /netbsd-src/sys/dev/dmover/dmover_io.c (revision aaf4ece63a859a04e37cf3a7229b5fab0157cc06)
1 /*	$NetBSD: dmover_io.c,v 1.19 2005/12/24 20:27:29 perry Exp $	*/
2 
3 /*
4  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * dmover_io.c: Support for user-space access to dmover-api
40  *
41  * This interface is quite simple:
42  *
43  *	1.  The user opens /dev/dmover, which is a cloning device.  This
44  *	    allocates internal state for the session.
45  *
46  *	2.  The user does a DMIO_SETFUNC to select the data movement
47  *	    function.  This actually creates the dmover session.
48  *
49  *	3.  The user writes request messages to its dmover handle.
50  *
51  *	4.  The user reads request responses from its dmover handle.
52  *
53  *	5.  The user closes the file descriptor and the session is
54  *	    torn down.
55  */
56 
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: dmover_io.c,v 1.19 2005/12/24 20:27:29 perry Exp $");
59 
60 #include <sys/param.h>
61 #include <sys/queue.h>
62 #include <sys/conf.h>
63 #include <sys/pool.h>
64 #include <sys/proc.h>
65 #include <sys/poll.h>
66 #include <sys/malloc.h>
67 #include <sys/lock.h>
68 #include <sys/file.h>
69 #include <sys/filedesc.h>
70 #include <sys/filio.h>
71 #include <sys/select.h>
72 #include <sys/systm.h>
73 
74 #include <dev/dmover/dmovervar.h>
75 #include <dev/dmover/dmover_io.h>
76 
77 struct dmio_usrreq_state {
78 	TAILQ_ENTRY(dmio_usrreq_state) dus_q;
79 	struct uio dus_uio_out;
80 	struct uio *dus_uio_in;
81 	struct dmover_request *dus_req;
82 	uint32_t dus_id;
83 };
84 
85 struct dmio_state {
86 	struct dmover_session *ds_session;
87 	TAILQ_HEAD(, dmio_usrreq_state) ds_pending;
88 	TAILQ_HEAD(, dmio_usrreq_state) ds_complete;
89 	struct selinfo ds_selq;
90 	volatile int ds_flags;
91 	u_int ds_nreqs;
92 	struct simplelock ds_slock;
93 };
94 
95 #define	DMIO_STATE_SEL		0x0001
96 #define	DMIO_STATE_DEAD		0x0002
97 #define	DMIO_STATE_LARVAL	0x0004
98 #define	DMIO_STATE_READ_WAIT	0x0008
99 #define	DMIO_STATE_WRITE_WAIT	0x0010
100 
101 #define	DMIO_NREQS_MAX		64	/* XXX pulled out of a hat */
102 
103 struct pool dmio_state_pool;
104 struct pool dmio_usrreq_state_pool;
105 
106 void	dmoverioattach(int);
107 
108 dev_type_open(dmoverioopen);
109 
110 const struct cdevsw dmoverio_cdevsw = {
111 	dmoverioopen, noclose, noread, nowrite, noioctl,
112 	nostop, notty, nopoll, nommap, nokqfilter,
113 };
114 
115 /*
116  * dmoverioattach:
117  *
118  *	Pseudo-device attach routine.
119  */
120 void
121 dmoverioattach(int count)
122 {
123 
124 	pool_init(&dmio_state_pool, sizeof(struct dmio_state),
125 	    0, 0, 0, "dmiostate", NULL);
126 	pool_init(&dmio_usrreq_state_pool, sizeof(struct dmio_usrreq_state),
127 	    0, 0, 0, "dmiourstate", NULL);
128 }
129 
130 /*
131  * dmio_usrreq_init:
132  *
133  *	Build a request structure.
134  */
135 static int
136 dmio_usrreq_init(struct file *fp, struct dmio_usrreq_state *dus,
137     struct dmio_usrreq *req, struct dmover_request *dreq)
138 {
139 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
140 	struct dmover_session *dses = ds->ds_session;
141 	struct uio *uio_out = &dus->dus_uio_out;
142 	struct uio *uio_in;
143 	dmio_buffer inbuf;
144 	size_t len;
145 	int i, error;
146 	u_int j;
147 
148 	/* XXX How should malloc interact w/ FNONBLOCK? */
149 
150 	if (req->req_outbuf.dmbuf_iovcnt != 0) {
151 		if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
152 			return (EINVAL);
153 		len = sizeof(struct iovec) * req->req_outbuf.dmbuf_iovcnt;
154 		uio_out->uio_iov = malloc(len, M_TEMP, M_WAITOK);
155 		error = copyin(req->req_outbuf.dmbuf_iov, uio_out->uio_iov,
156 		    len);
157 		if (error) {
158 			free(uio_out->uio_iov, M_TEMP);
159 			return (error);
160 		}
161 
162 		for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
163 			len += uio_out->uio_iov[j].iov_len;
164 			if (len > SSIZE_MAX) {
165 				free(uio_out->uio_iov, M_TEMP);
166 				return (EINVAL);
167 			}
168 		}
169 
170 		uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
171 		uio_out->uio_resid = len;
172 		uio_out->uio_rw = UIO_READ;
173 		uio_out->uio_segflg = UIO_USERSPACE;
174 		uio_out->uio_lwp = curlwp;
175 		dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
176 		dreq->dreq_outbuf.dmbuf_uio = uio_out;
177 	} else {
178 		uio_out->uio_iov = NULL;
179 		uio_out = NULL;
180 		dreq->dreq_outbuf_type = DMOVER_BUF_NONE;
181 	}
182 
183 	memcpy(dreq->dreq_immediate, req->req_immediate,
184 	    sizeof(dreq->dreq_immediate));
185 
186 	if (dses->dses_ninputs == 0) {
187 		/* No inputs; all done. */
188 		return (0);
189 	}
190 
191 	dreq->dreq_inbuf_type = DMOVER_BUF_UIO;
192 
193 	dus->dus_uio_in = malloc(sizeof(struct uio) * dses->dses_ninputs,
194 	    M_TEMP, M_WAITOK);
195 	memset(dus->dus_uio_in, 0, sizeof(struct uio) * dses->dses_ninputs);
196 
197 	for (i = 0; i < dses->dses_ninputs; i++) {
198 		uio_in = &dus->dus_uio_in[i];
199 
200 		error = copyin(&req->req_inbuf[i], &inbuf, sizeof(inbuf));
201 		if (error)
202 			goto bad;
203 
204 		if (inbuf.dmbuf_iovcnt > IOV_MAX) {
205 			error = EINVAL;
206 			goto bad;
207 		}
208 		len = sizeof(struct iovec) * inbuf.dmbuf_iovcnt;
209 		if (len == 0) {
210 			error = EINVAL;
211 			goto bad;
212 		}
213 		uio_in->uio_iov = malloc(len, M_TEMP, M_WAITOK);
214 
215 		error = copyin(inbuf.dmbuf_iov, uio_in->uio_iov, len);
216 		if (error) {
217 			free(uio_in->uio_iov, M_TEMP);
218 			goto bad;
219 		}
220 
221 		for (j = 0, len = 0; j < inbuf.dmbuf_iovcnt; j++) {
222 			len += uio_in->uio_iov[j].iov_len;
223 			if (len > SSIZE_MAX) {
224 				free(uio_in->uio_iov, M_TEMP);
225 				error = EINVAL;
226 				goto bad;
227 			}
228 		}
229 
230 		if (uio_out != NULL && len != uio_out->uio_resid) {
231 			free(uio_in->uio_iov, M_TEMP);
232 			error = EINVAL;
233 			goto bad;
234 		}
235 
236 		uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
237 		uio_in->uio_resid = len;
238 		uio_in->uio_rw = UIO_WRITE;
239 		uio_in->uio_segflg = UIO_USERSPACE;
240 		uio_in->uio_lwp = curlwp;
241 
242 		dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
243 	}
244 
245 	return (0);
246 
247  bad:
248 	if (i > 0) {
249 		for (--i; i >= 0; i--) {
250 			uio_in = &dus->dus_uio_in[i];
251 			free(uio_in->uio_iov, M_TEMP);
252 		}
253 	}
254 	free(dus->dus_uio_in, M_TEMP);
255 	if (uio_out != NULL)
256 		free(uio_out->uio_iov, M_TEMP);
257 	return (error);
258 }
259 
260 /*
261  * dmio_usrreq_fini:
262  *
263  *	Tear down a request.  Must be called at splsoftclock().
264  */
265 static void
266 dmio_usrreq_fini(struct dmio_state *ds, struct dmio_usrreq_state *dus)
267 {
268 	struct dmover_session *dses = ds->ds_session;
269 	struct uio *uio_out = &dus->dus_uio_out;
270 	struct uio *uio_in;
271 	int i;
272 
273 	if (uio_out->uio_iov != NULL)
274 		free(uio_out->uio_iov, M_TEMP);
275 
276 	if (dses->dses_ninputs == 0) {
277 		pool_put(&dmio_usrreq_state_pool, dus);
278 		return;
279 	}
280 
281 	for (i = 0; i < dses->dses_ninputs; i++) {
282 		uio_in = &dus->dus_uio_in[i];
283 		free(uio_in->uio_iov, M_TEMP);
284 	}
285 
286 	free(dus->dus_uio_in, M_TEMP);
287 
288 	pool_put(&dmio_usrreq_state_pool, dus);
289 }
290 
291 /*
292  * dmio_read:
293  *
294  *	Read file op.
295  */
296 static int
297 dmio_read(struct file *fp, off_t *offp, struct uio *uio,
298     struct ucred *cred, int flags)
299 {
300 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
301 	struct dmio_usrreq_state *dus;
302 	struct dmover_request *dreq;
303 	struct dmio_usrresp resp;
304 	int s, error = 0, progress = 0;
305 
306 	if ((uio->uio_resid % sizeof(resp)) != 0)
307 		return (EINVAL);
308 
309 	if (ds->ds_session == NULL)
310 		return (ENXIO);
311 
312 	s = splsoftclock();
313 	simple_lock(&ds->ds_slock);
314 
315 	while (uio->uio_resid != 0) {
316 
317 		for (;;) {
318 			dus = TAILQ_FIRST(&ds->ds_complete);
319 			if (dus == NULL) {
320 				if (fp->f_flag & FNONBLOCK) {
321 					error = progress ? 0 : EWOULDBLOCK;
322 					goto out;
323 				}
324 				ds->ds_flags |= DMIO_STATE_READ_WAIT;
325 				error = ltsleep(&ds->ds_complete,
326 				    PRIBIO | PCATCH, "dmvrrd", 0,
327 				    &ds->ds_slock);
328 				if (error)
329 					goto out;
330 				continue;
331 			}
332 			/* Have a completed request. */
333 			TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
334 			ds->ds_nreqs--;
335 			if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) {
336 				ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT;
337 				wakeup(&ds->ds_nreqs);
338 			}
339 			if (ds->ds_flags & DMIO_STATE_SEL) {
340 				ds->ds_flags &= ~DMIO_STATE_SEL;
341 				selwakeup(&ds->ds_selq);
342 			}
343 			break;
344 		}
345 
346 		simple_unlock(&ds->ds_slock);
347 
348 		dreq = dus->dus_req;
349 		resp.resp_id = dus->dus_id;
350 		if (dreq->dreq_flags & DMOVER_REQ_ERROR)
351 			resp.resp_error = dreq->dreq_error;
352 		else {
353 			resp.resp_error = 0;
354 			memcpy(resp.resp_immediate, dreq->dreq_immediate,
355 			    sizeof(resp.resp_immediate));
356 		}
357 
358 		dmio_usrreq_fini(ds, dus);
359 
360 		splx(s);
361 
362 		progress = 1;
363 
364 		dmover_request_free(dreq);
365 
366 		error = uiomove(&resp, sizeof(resp), uio);
367 		if (error)
368 			return (error);
369 
370 		s = splsoftclock();
371 		simple_lock(&ds->ds_slock);
372 	}
373 
374  out:
375 	simple_unlock(&ds->ds_slock);
376 	splx(s);
377 
378 	return (error);
379 }
380 
381 /*
382  * dmio_usrreq_done:
383  *
384  *	Dmover completion callback.
385  */
386 static void
387 dmio_usrreq_done(struct dmover_request *dreq)
388 {
389 	struct dmio_usrreq_state *dus = dreq->dreq_cookie;
390 	struct dmio_state *ds = dreq->dreq_session->dses_cookie;
391 
392 	/* We're already at splsoftclock(). */
393 
394 	simple_lock(&ds->ds_slock);
395 	TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
396 	if (ds->ds_flags & DMIO_STATE_DEAD) {
397 		ds->ds_nreqs--;
398 		dmio_usrreq_fini(ds, dus);
399 		dmover_request_free(dreq);
400 		if (ds->ds_nreqs == 0) {
401 			simple_unlock(&ds->ds_slock);
402 			pool_put(&dmio_state_pool, ds);
403 			return;
404 		}
405 	} else {
406 		TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
407 		if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
408 			ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
409 			wakeup(&ds->ds_complete);
410 		}
411 		if (ds->ds_flags & DMIO_STATE_SEL) {
412 			ds->ds_flags &= ~DMIO_STATE_SEL;
413 			selwakeup(&ds->ds_selq);
414 		}
415 	}
416 	simple_unlock(&ds->ds_slock);
417 }
418 
419 /*
420  * dmio_write:
421  *
422  *	Write file op.
423  */
424 static int
425 dmio_write(struct file *fp, off_t *offp, struct uio *uio,
426     struct ucred *cred, int flags)
427 {
428 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
429 	struct dmio_usrreq_state *dus;
430 	struct dmover_request *dreq;
431 	struct dmio_usrreq req;
432 	int error = 0, s, progress = 0;
433 
434 	if ((uio->uio_resid % sizeof(req)) != 0)
435 		return (EINVAL);
436 
437 	if (ds->ds_session == NULL)
438 		return (ENXIO);
439 
440 	s = splsoftclock();
441 	simple_lock(&ds->ds_slock);
442 
443 	while (uio->uio_resid != 0) {
444 
445 		if (ds->ds_nreqs == DMIO_NREQS_MAX) {
446 			if (fp->f_flag & FNONBLOCK) {
447 				error = progress ? 0 : EWOULDBLOCK;
448 				break;
449 			}
450 			ds->ds_flags |= DMIO_STATE_WRITE_WAIT;
451 			error = ltsleep(&ds->ds_nreqs, PRIBIO | PCATCH,
452 			    "dmiowr", 0, &ds->ds_slock);
453 			if (error)
454 				break;
455 			continue;
456 		}
457 
458 		ds->ds_nreqs++;
459 
460 		simple_unlock(&ds->ds_slock);
461 		splx(s);
462 
463 		progress = 1;
464 
465 		error = uiomove(&req, sizeof(req), uio);
466 		if (error) {
467 			s = splsoftclock();
468 			simple_lock(&ds->ds_slock);
469 			ds->ds_nreqs--;
470 			break;
471 		}
472 
473 		/* XXX How should this interact with FNONBLOCK? */
474 		dreq = dmover_request_alloc(ds->ds_session, NULL);
475 		if (dreq == NULL) {
476 			/* XXX */
477 			s = splsoftclock();
478 			simple_lock(&ds->ds_slock);
479 			ds->ds_nreqs--;
480 			error = ENOMEM;
481 			break;
482 		}
483 		s = splsoftclock();
484 		dus = pool_get(&dmio_usrreq_state_pool, PR_WAITOK);
485 		splx(s);
486 
487 		error = dmio_usrreq_init(fp, dus, &req, dreq);
488 		if (error) {
489 			dmover_request_free(dreq);
490 			s = splsoftclock();
491 			pool_put(&dmio_usrreq_state_pool, dus);
492 			simple_lock(&ds->ds_slock);
493 			break;
494 		}
495 
496 		dreq->dreq_callback = dmio_usrreq_done;
497 		dreq->dreq_cookie = dus;
498 
499 		dus->dus_req = dreq;
500 		dus->dus_id = req.req_id;
501 
502 		s = splsoftclock();
503 		simple_lock(&ds->ds_slock);
504 
505 		TAILQ_INSERT_TAIL(&ds->ds_pending, dus, dus_q);
506 
507 		simple_unlock(&ds->ds_slock);
508 		splx(s);
509 
510 		dmover_process(dreq);
511 
512 		s = splsoftclock();
513 		simple_lock(&ds->ds_slock);
514 	}
515 
516 	simple_unlock(&ds->ds_slock);
517 	splx(s);
518 
519 	return (error);
520 }
521 
522 /*
523  * dmio_ioctl:
524  *
525  *	Ioctl file op.
526  */
527 static int
528 dmio_ioctl(struct file *fp, u_long cmd, void *data, struct lwp *l)
529 {
530 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
531 	int error, s;
532 
533 	switch (cmd) {
534 	case FIONBIO:
535 	case FIOASYNC:
536 		return (0);
537 
538 	case DMIO_SETFUNC:
539 	    {
540 		struct dmio_setfunc *dsf = data;
541 		struct dmover_session *dses;
542 
543 		s = splsoftclock();
544 		simple_lock(&ds->ds_slock);
545 
546 		if (ds->ds_session != NULL ||
547 		    (ds->ds_flags & DMIO_STATE_LARVAL) != 0) {
548 			simple_unlock(&ds->ds_slock);
549 			splx(s);
550 			return (EBUSY);
551 		}
552 
553 		ds->ds_flags |= DMIO_STATE_LARVAL;
554 
555 		simple_unlock(&ds->ds_slock);
556 		splx(s);
557 
558 		dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0';
559 		error = dmover_session_create(dsf->dsf_name, &dses);
560 
561 		s = splsoftclock();
562 		simple_lock(&ds->ds_slock);
563 
564 		if (error == 0) {
565 			dses->dses_cookie = ds;
566 			ds->ds_session = dses;
567 		}
568 		ds->ds_flags &= ~DMIO_STATE_LARVAL;
569 
570 		simple_unlock(&ds->ds_slock);
571 		splx(s);
572 		break;
573 	    }
574 
575 	default:
576 		error = ENOTTY;
577 	}
578 
579 	return (error);
580 }
581 
582 /*
583  * dmio_poll:
584  *
585  *	Poll file op.
586  */
587 static int
588 dmio_poll(struct file *fp, int events, struct lwp *l)
589 {
590 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
591 	int s, revents = 0;
592 
593 	if ((events & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) == 0)
594 		return (revents);
595 
596 	s = splsoftclock();
597 	simple_lock(&ds->ds_slock);
598 
599 	if (ds->ds_flags & DMIO_STATE_DEAD) {
600 		/* EOF */
601 		revents |= events & (POLLIN | POLLRDNORM |
602 		    POLLOUT | POLLWRNORM);
603 		goto out;
604 	}
605 
606 	/* We can read if there are completed requests. */
607 	if (events & (POLLIN | POLLRDNORM))
608 		if (TAILQ_EMPTY(&ds->ds_complete) == 0)
609 			revents |= events & (POLLIN | POLLRDNORM);
610 
611 	/*
612 	 * We can write if there is there are fewer then DMIO_NREQS_MAX
613 	 * are already in the queue.
614 	 */
615 	if (events & (POLLOUT | POLLWRNORM))
616 		if (ds->ds_nreqs < DMIO_NREQS_MAX)
617 			revents |= events & (POLLOUT | POLLWRNORM);
618 
619 	if (revents == 0) {
620 		selrecord(l, &ds->ds_selq);
621 		ds->ds_flags |= DMIO_STATE_SEL;
622 	}
623 
624  out:
625 	simple_unlock(&ds->ds_slock);
626 	splx(s);
627 
628 	return (revents);
629 }
630 
631 /*
632  * dmio_close:
633  *
634  *	Close file op.
635  */
636 static int
637 dmio_close(struct file *fp, struct lwp *l)
638 {
639 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
640 	struct dmio_usrreq_state *dus;
641 	struct dmover_session *dses;
642 	int s;
643 
644 	s = splsoftclock();
645 	simple_lock(&ds->ds_slock);
646 
647 	ds->ds_flags |= DMIO_STATE_DEAD;
648 
649 	/* Garbage-collect all the responses on the queue. */
650 	while ((dus = TAILQ_FIRST(&ds->ds_complete)) != NULL) {
651 		TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
652 		ds->ds_nreqs--;
653 		dmover_request_free(dus->dus_req);
654 		dmio_usrreq_fini(ds, dus);
655 	}
656 
657 	/*
658 	 * If there are any requests pending, we have to wait for
659 	 * them.  Don't free the dmio_state in this case.
660 	 */
661 	if (ds->ds_nreqs == 0) {
662 		dses = ds->ds_session;
663 		simple_unlock(&ds->ds_slock);
664 		pool_put(&dmio_state_pool, ds);
665 	} else {
666 		dses = NULL;
667 		simple_unlock(&ds->ds_slock);
668 	}
669 
670 	splx(s);
671 
672 	fp->f_data = NULL;
673 
674 	if (dses != NULL)
675 		dmover_session_destroy(dses);
676 
677 	return (0);
678 }
679 
680 static const struct fileops dmio_fileops = {
681 	dmio_read,
682 	dmio_write,
683 	dmio_ioctl,
684 	fnullop_fcntl,
685 	dmio_poll,
686 	fbadop_stat,
687 	dmio_close,
688 	fnullop_kqfilter
689 };
690 
691 /*
692  * dmoverioopen:
693  *
694  *	Device switch open routine.
695  */
696 int
697 dmoverioopen(dev_t dev, int flag, int mode, struct lwp *l)
698 {
699 	struct dmio_state *ds;
700 	struct file *fp;
701 	int error, fd, s;
702 	struct proc *p = l->l_proc;
703 
704 	/* falloc() will use the descriptor for us. */
705 	if ((error = falloc(p, &fp, &fd)) != 0)
706 		return (error);
707 
708 	s = splsoftclock();
709 	ds = pool_get(&dmio_state_pool, PR_WAITOK);
710 	splx(s);
711 
712 	memset(ds, 0, sizeof(*ds));
713 	TAILQ_INIT(&ds->ds_pending);
714 	TAILQ_INIT(&ds->ds_complete);
715 
716 	return fdclone(l, fp, fd, flag, &dmio_fileops, ds);
717 }
718