xref: /netbsd-src/sys/dev/dmover/dmover_io.c (revision c2f76ff004a2cb67efe5b12d97bd3ef7fe89e18d)
1 /*	$NetBSD: dmover_io.c,v 1.39 2010/11/13 14:08:20 uebayasi Exp $	*/
2 
3 /*
4  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * dmover_io.c: Support for user-space access to dmover-api
40  *
41  * This interface is quite simple:
42  *
43  *	1.  The user opens /dev/dmover, which is a cloning device.  This
44  *	    allocates internal state for the session.
45  *
46  *	2.  The user does a DMIO_SETFUNC to select the data movement
47  *	    function.  This actually creates the dmover session.
48  *
49  *	3.  The user writes request messages to its dmover handle.
50  *
51  *	4.  The user reads request responses from its dmover handle.
52  *
53  *	5.  The user closes the file descriptor and the session is
54  *	    torn down.
55  */
56 
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: dmover_io.c,v 1.39 2010/11/13 14:08:20 uebayasi Exp $");
59 
60 #include <sys/param.h>
61 #include <sys/queue.h>
62 #include <sys/conf.h>
63 #include <sys/pool.h>
64 #include <sys/proc.h>
65 #include <sys/poll.h>
66 #include <sys/malloc.h>
67 #include <sys/simplelock.h>
68 #include <sys/file.h>
69 #include <sys/filedesc.h>
70 #include <sys/filio.h>
71 #include <sys/select.h>
72 #include <sys/systm.h>
73 #include <sys/workqueue.h>
74 #include <sys/once.h>
75 #include <sys/stat.h>
76 #include <sys/kauth.h>
77 
78 #include <uvm/uvm_extern.h>
79 
80 #include <dev/dmover/dmovervar.h>
81 #include <dev/dmover/dmover_io.h>
82 
83 struct dmio_usrreq_state {
84 	union {
85 		struct work u_work;
86 		TAILQ_ENTRY(dmio_usrreq_state) u_q;
87 	} dus_u;
88 #define	dus_q		dus_u.u_q
89 #define	dus_work	dus_u.u_work
90 	struct uio dus_uio_out;
91 	struct uio *dus_uio_in;
92 	struct dmover_request *dus_req;
93 	uint32_t dus_id;
94 	struct vmspace *dus_vmspace;
95 };
96 
97 struct dmio_state {
98 	struct dmover_session *ds_session;
99 	TAILQ_HEAD(, dmio_usrreq_state) ds_pending;
100 	TAILQ_HEAD(, dmio_usrreq_state) ds_complete;
101 	struct selinfo ds_selq;
102 	volatile int ds_flags;
103 	u_int ds_nreqs;
104 	struct simplelock ds_slock;
105 	struct timespec ds_atime;
106 	struct timespec ds_mtime;
107 	struct timespec ds_btime;
108 };
109 
110 static ONCE_DECL(dmio_cleaner_control);
111 static struct workqueue *dmio_cleaner;
112 static int dmio_cleaner_init(void);
113 static void dmio_usrreq_fini1(struct work *wk, void *);
114 
115 #define	DMIO_STATE_SEL		0x0001
116 #define	DMIO_STATE_DEAD		0x0002
117 #define	DMIO_STATE_LARVAL	0x0004
118 #define	DMIO_STATE_READ_WAIT	0x0008
119 #define	DMIO_STATE_WRITE_WAIT	0x0010
120 
121 #define	DMIO_NREQS_MAX		64	/* XXX pulled out of a hat */
122 
123 struct pool dmio_state_pool;
124 struct pool dmio_usrreq_state_pool;
125 
126 void	dmoverioattach(int);
127 
128 dev_type_open(dmoverioopen);
129 
130 const struct cdevsw dmoverio_cdevsw = {
131 	dmoverioopen, noclose, noread, nowrite, noioctl,
132 	nostop, notty, nopoll, nommap, nokqfilter,
133 	D_OTHER
134 };
135 
136 /*
137  * dmoverioattach:
138  *
139  *	Pseudo-device attach routine.
140  */
141 void
142 dmoverioattach(int count)
143 {
144 
145 	pool_init(&dmio_state_pool, sizeof(struct dmio_state),
146 	    0, 0, 0, "dmiostate", NULL, IPL_SOFTCLOCK);
147 	pool_init(&dmio_usrreq_state_pool, sizeof(struct dmio_usrreq_state),
148 	    0, 0, 0, "dmiourstate", NULL, IPL_SOFTCLOCK);
149 }
150 
151 /*
152  * dmio_cleaner_init:
153  *
154  *	Create cleaner thread.
155  */
156 static int
157 dmio_cleaner_init(void)
158 {
159 
160 	return workqueue_create(&dmio_cleaner, "dmioclean", dmio_usrreq_fini1,
161 	    NULL, PWAIT, IPL_SOFTCLOCK, 0);
162 }
163 
164 /*
165  * dmio_usrreq_init:
166  *
167  *	Build a request structure.
168  */
169 static int
170 dmio_usrreq_init(struct file *fp, struct dmio_usrreq_state *dus,
171     struct dmio_usrreq *req, struct dmover_request *dreq)
172 {
173 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
174 	struct dmover_session *dses = ds->ds_session;
175 	struct uio *uio_out = &dus->dus_uio_out;
176 	struct uio *uio_in;
177 	dmio_buffer inbuf;
178 	size_t len;
179 	int i, error;
180 	u_int j;
181 
182 	/* XXX How should malloc interact w/ FNONBLOCK? */
183 
184 	error = RUN_ONCE(&dmio_cleaner_control, dmio_cleaner_init);
185 	if (error) {
186 		return error;
187 	}
188 
189 	error = proc_vmspace_getref(curproc, &dus->dus_vmspace);
190 	if (error) {
191 		return error;
192 	}
193 
194 	if (req->req_outbuf.dmbuf_iovcnt != 0) {
195 		if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
196 			return (EINVAL);
197 		len = sizeof(struct iovec) * req->req_outbuf.dmbuf_iovcnt;
198 		uio_out->uio_iov = malloc(len, M_TEMP, M_WAITOK);
199 		error = copyin(req->req_outbuf.dmbuf_iov, uio_out->uio_iov,
200 		    len);
201 		if (error) {
202 			free(uio_out->uio_iov, M_TEMP);
203 			return (error);
204 		}
205 
206 		for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
207 			len += uio_out->uio_iov[j].iov_len;
208 			if (len > SSIZE_MAX) {
209 				free(uio_out->uio_iov, M_TEMP);
210 				return (EINVAL);
211 			}
212 		}
213 
214 		uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
215 		uio_out->uio_resid = len;
216 		uio_out->uio_rw = UIO_READ;
217 		uio_out->uio_vmspace = dus->dus_vmspace;
218 
219 		dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
220 		dreq->dreq_outbuf.dmbuf_uio = uio_out;
221 	} else {
222 		uio_out->uio_iov = NULL;
223 		uio_out = NULL;
224 		dreq->dreq_outbuf_type = DMOVER_BUF_NONE;
225 	}
226 
227 	memcpy(dreq->dreq_immediate, req->req_immediate,
228 	    sizeof(dreq->dreq_immediate));
229 
230 	if (dses->dses_ninputs == 0) {
231 		/* No inputs; all done. */
232 		return (0);
233 	}
234 
235 	dreq->dreq_inbuf_type = DMOVER_BUF_UIO;
236 
237 	dus->dus_uio_in = malloc(sizeof(struct uio) * dses->dses_ninputs,
238 	    M_TEMP, M_WAITOK);
239 	memset(dus->dus_uio_in, 0, sizeof(struct uio) * dses->dses_ninputs);
240 
241 	for (i = 0; i < dses->dses_ninputs; i++) {
242 		uio_in = &dus->dus_uio_in[i];
243 
244 		error = copyin(&req->req_inbuf[i], &inbuf, sizeof(inbuf));
245 		if (error)
246 			goto bad;
247 
248 		if (inbuf.dmbuf_iovcnt > IOV_MAX) {
249 			error = EINVAL;
250 			goto bad;
251 		}
252 		len = sizeof(struct iovec) * inbuf.dmbuf_iovcnt;
253 		if (len == 0) {
254 			error = EINVAL;
255 			goto bad;
256 		}
257 		uio_in->uio_iov = malloc(len, M_TEMP, M_WAITOK);
258 
259 		error = copyin(inbuf.dmbuf_iov, uio_in->uio_iov, len);
260 		if (error) {
261 			free(uio_in->uio_iov, M_TEMP);
262 			goto bad;
263 		}
264 
265 		for (j = 0, len = 0; j < inbuf.dmbuf_iovcnt; j++) {
266 			len += uio_in->uio_iov[j].iov_len;
267 			if (len > SSIZE_MAX) {
268 				free(uio_in->uio_iov, M_TEMP);
269 				error = EINVAL;
270 				goto bad;
271 			}
272 		}
273 
274 		if (uio_out != NULL && len != uio_out->uio_resid) {
275 			free(uio_in->uio_iov, M_TEMP);
276 			error = EINVAL;
277 			goto bad;
278 		}
279 
280 		uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
281 		uio_in->uio_resid = len;
282 		uio_in->uio_rw = UIO_WRITE;
283 		uio_in->uio_vmspace = dus->dus_vmspace;
284 
285 		dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
286 	}
287 
288 	return (0);
289 
290  bad:
291 	if (i > 0) {
292 		for (--i; i >= 0; i--) {
293 			uio_in = &dus->dus_uio_in[i];
294 			free(uio_in->uio_iov, M_TEMP);
295 		}
296 	}
297 	free(dus->dus_uio_in, M_TEMP);
298 	if (uio_out != NULL)
299 		free(uio_out->uio_iov, M_TEMP);
300 	uvmspace_free(dus->dus_vmspace);
301 	return (error);
302 }
303 
304 /*
305  * dmio_usrreq_fini:
306  *
307  *	Tear down a request.  Must be called at splsoftclock().
308  */
309 static void
310 dmio_usrreq_fini(struct dmio_state *ds, struct dmio_usrreq_state *dus)
311 {
312 	struct dmover_session *dses = ds->ds_session;
313 	struct uio *uio_out = &dus->dus_uio_out;
314 	struct uio *uio_in;
315 	int i;
316 
317 	if (uio_out->uio_iov != NULL)
318 		free(uio_out->uio_iov, M_TEMP);
319 
320 	if (dses->dses_ninputs) {
321 		for (i = 0; i < dses->dses_ninputs; i++) {
322 			uio_in = &dus->dus_uio_in[i];
323 			free(uio_in->uio_iov, M_TEMP);
324 		}
325 		free(dus->dus_uio_in, M_TEMP);
326 	}
327 
328 	workqueue_enqueue(dmio_cleaner, &dus->dus_work, NULL);
329 }
330 
331 static void
332 dmio_usrreq_fini1(struct work *wk, void *dummy)
333 {
334 	struct dmio_usrreq_state *dus = (void *)wk;
335 	int s;
336 
337 	KASSERT(wk == &dus->dus_work);
338 
339 	uvmspace_free(dus->dus_vmspace);
340 	s = splsoftclock();
341 	pool_put(&dmio_usrreq_state_pool, dus);
342 	splx(s);
343 }
344 
345 /*
346  * dmio_read:
347  *
348  *	Read file op.
349  */
350 static int
351 dmio_read(struct file *fp, off_t *offp, struct uio *uio,
352     kauth_cred_t cred, int flags)
353 {
354 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
355 	struct dmio_usrreq_state *dus;
356 	struct dmover_request *dreq;
357 	struct dmio_usrresp resp;
358 	int s, error = 0, progress = 0;
359 
360 	if ((uio->uio_resid % sizeof(resp)) != 0)
361 		return (EINVAL);
362 
363 	if (ds->ds_session == NULL)
364 		return (ENXIO);
365 
366 	getnanotime(&ds->ds_atime);
367 	s = splsoftclock();
368 	simple_lock(&ds->ds_slock);
369 
370 	while (uio->uio_resid != 0) {
371 
372 		for (;;) {
373 			dus = TAILQ_FIRST(&ds->ds_complete);
374 			if (dus == NULL) {
375 				if (fp->f_flag & FNONBLOCK) {
376 					error = progress ? 0 : EWOULDBLOCK;
377 					goto out;
378 				}
379 				ds->ds_flags |= DMIO_STATE_READ_WAIT;
380 				error = ltsleep(&ds->ds_complete,
381 				    PRIBIO | PCATCH, "dmvrrd", 0,
382 				    &ds->ds_slock);
383 				if (error)
384 					goto out;
385 				continue;
386 			}
387 			/* Have a completed request. */
388 			TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
389 			ds->ds_nreqs--;
390 			if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) {
391 				ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT;
392 				wakeup(&ds->ds_nreqs);
393 			}
394 			if (ds->ds_flags & DMIO_STATE_SEL) {
395 				ds->ds_flags &= ~DMIO_STATE_SEL;
396 				selnotify(&ds->ds_selq, POLLIN | POLLRDNORM, 0);
397 			}
398 			break;
399 		}
400 
401 		simple_unlock(&ds->ds_slock);
402 
403 		dreq = dus->dus_req;
404 		resp.resp_id = dus->dus_id;
405 		if (dreq->dreq_flags & DMOVER_REQ_ERROR)
406 			resp.resp_error = dreq->dreq_error;
407 		else {
408 			resp.resp_error = 0;
409 			memcpy(resp.resp_immediate, dreq->dreq_immediate,
410 			    sizeof(resp.resp_immediate));
411 		}
412 
413 		dmio_usrreq_fini(ds, dus);
414 
415 		splx(s);
416 
417 		progress = 1;
418 
419 		dmover_request_free(dreq);
420 
421 		error = uiomove(&resp, sizeof(resp), uio);
422 		if (error)
423 			return (error);
424 
425 		s = splsoftclock();
426 		simple_lock(&ds->ds_slock);
427 	}
428 
429  out:
430 	simple_unlock(&ds->ds_slock);
431 	splx(s);
432 
433 	return (error);
434 }
435 
436 /*
437  * dmio_usrreq_done:
438  *
439  *	Dmover completion callback.
440  */
441 static void
442 dmio_usrreq_done(struct dmover_request *dreq)
443 {
444 	struct dmio_usrreq_state *dus = dreq->dreq_cookie;
445 	struct dmio_state *ds = dreq->dreq_session->dses_cookie;
446 
447 	/* We're already at splsoftclock(). */
448 
449 	simple_lock(&ds->ds_slock);
450 	TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
451 	if (ds->ds_flags & DMIO_STATE_DEAD) {
452 		ds->ds_nreqs--;
453 		dmio_usrreq_fini(ds, dus);
454 		dmover_request_free(dreq);
455 		if (ds->ds_nreqs == 0) {
456 			simple_unlock(&ds->ds_slock);
457 			seldestroy(&ds->ds_selq);
458 			pool_put(&dmio_state_pool, ds);
459 			return;
460 		}
461 	} else {
462 		TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
463 		if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
464 			ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
465 			wakeup(&ds->ds_complete);
466 		}
467 		if (ds->ds_flags & DMIO_STATE_SEL) {
468 			ds->ds_flags &= ~DMIO_STATE_SEL;
469 			selnotify(&ds->ds_selq, POLLOUT | POLLWRNORM, 0);
470 		}
471 	}
472 	simple_unlock(&ds->ds_slock);
473 }
474 
475 /*
476  * dmio_write:
477  *
478  *	Write file op.
479  */
480 static int
481 dmio_write(struct file *fp, off_t *offp, struct uio *uio,
482     kauth_cred_t cred, int flags)
483 {
484 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
485 	struct dmio_usrreq_state *dus;
486 	struct dmover_request *dreq;
487 	struct dmio_usrreq req;
488 	int error = 0, s, progress = 0;
489 
490 	if ((uio->uio_resid % sizeof(req)) != 0)
491 		return (EINVAL);
492 
493 	if (ds->ds_session == NULL)
494 		return (ENXIO);
495 
496 	getnanotime(&ds->ds_mtime);
497 	s = splsoftclock();
498 	simple_lock(&ds->ds_slock);
499 
500 	while (uio->uio_resid != 0) {
501 
502 		if (ds->ds_nreqs == DMIO_NREQS_MAX) {
503 			if (fp->f_flag & FNONBLOCK) {
504 				error = progress ? 0 : EWOULDBLOCK;
505 				break;
506 			}
507 			ds->ds_flags |= DMIO_STATE_WRITE_WAIT;
508 			error = ltsleep(&ds->ds_nreqs, PRIBIO | PCATCH,
509 			    "dmiowr", 0, &ds->ds_slock);
510 			if (error)
511 				break;
512 			continue;
513 		}
514 
515 		ds->ds_nreqs++;
516 
517 		simple_unlock(&ds->ds_slock);
518 		splx(s);
519 
520 		progress = 1;
521 
522 		error = uiomove(&req, sizeof(req), uio);
523 		if (error) {
524 			s = splsoftclock();
525 			simple_lock(&ds->ds_slock);
526 			ds->ds_nreqs--;
527 			break;
528 		}
529 
530 		/* XXX How should this interact with FNONBLOCK? */
531 		dreq = dmover_request_alloc(ds->ds_session, NULL);
532 		if (dreq == NULL) {
533 			/* XXX */
534 			s = splsoftclock();
535 			simple_lock(&ds->ds_slock);
536 			ds->ds_nreqs--;
537 			error = ENOMEM;
538 			break;
539 		}
540 		s = splsoftclock();
541 		dus = pool_get(&dmio_usrreq_state_pool, PR_WAITOK);
542 		splx(s);
543 
544 		error = dmio_usrreq_init(fp, dus, &req, dreq);
545 		if (error) {
546 			dmover_request_free(dreq);
547 			s = splsoftclock();
548 			pool_put(&dmio_usrreq_state_pool, dus);
549 			simple_lock(&ds->ds_slock);
550 			break;
551 		}
552 
553 		dreq->dreq_callback = dmio_usrreq_done;
554 		dreq->dreq_cookie = dus;
555 
556 		dus->dus_req = dreq;
557 		dus->dus_id = req.req_id;
558 
559 		s = splsoftclock();
560 		simple_lock(&ds->ds_slock);
561 
562 		TAILQ_INSERT_TAIL(&ds->ds_pending, dus, dus_q);
563 
564 		simple_unlock(&ds->ds_slock);
565 		splx(s);
566 
567 		dmover_process(dreq);
568 
569 		s = splsoftclock();
570 		simple_lock(&ds->ds_slock);
571 	}
572 
573 	simple_unlock(&ds->ds_slock);
574 	splx(s);
575 
576 	return (error);
577 }
578 
579 static int
580 dmio_stat(struct file *fp, struct stat *st)
581 {
582 	struct dmio_state *ds = fp->f_data;
583 
584 	(void)memset(st, 0, sizeof(st));
585 	KERNEL_LOCK(1, NULL);
586 	st->st_dev = makedev(cdevsw_lookup_major(&dmoverio_cdevsw), 0);
587 	st->st_atimespec = ds->ds_atime;
588 	st->st_mtimespec = ds->ds_mtime;
589 	st->st_ctimespec = st->st_birthtimespec = ds->ds_btime;
590 	st->st_uid = kauth_cred_geteuid(fp->f_cred);
591 	st->st_gid = kauth_cred_getegid(fp->f_cred);
592 	KERNEL_UNLOCK_ONE(NULL);
593 	return 0;
594 }
595 
596 /*
597  * dmio_ioctl:
598  *
599  *	Ioctl file op.
600  */
601 static int
602 dmio_ioctl(struct file *fp, u_long cmd, void *data)
603 {
604 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
605 	int error, s;
606 
607 	switch (cmd) {
608 	case FIONBIO:
609 	case FIOASYNC:
610 		return (0);
611 
612 	case DMIO_SETFUNC:
613 	    {
614 		struct dmio_setfunc *dsf = data;
615 		struct dmover_session *dses;
616 
617 		s = splsoftclock();
618 		simple_lock(&ds->ds_slock);
619 
620 		if (ds->ds_session != NULL ||
621 		    (ds->ds_flags & DMIO_STATE_LARVAL) != 0) {
622 			simple_unlock(&ds->ds_slock);
623 			splx(s);
624 			return (EBUSY);
625 		}
626 
627 		ds->ds_flags |= DMIO_STATE_LARVAL;
628 
629 		simple_unlock(&ds->ds_slock);
630 		splx(s);
631 
632 		dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0';
633 		error = dmover_session_create(dsf->dsf_name, &dses);
634 
635 		s = splsoftclock();
636 		simple_lock(&ds->ds_slock);
637 
638 		if (error == 0) {
639 			dses->dses_cookie = ds;
640 			ds->ds_session = dses;
641 		}
642 		ds->ds_flags &= ~DMIO_STATE_LARVAL;
643 
644 		simple_unlock(&ds->ds_slock);
645 		splx(s);
646 		break;
647 	    }
648 
649 	default:
650 		error = ENOTTY;
651 	}
652 
653 	return (error);
654 }
655 
656 /*
657  * dmio_poll:
658  *
659  *	Poll file op.
660  */
661 static int
662 dmio_poll(struct file *fp, int events)
663 {
664 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
665 	int s, revents = 0;
666 
667 	if ((events & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) == 0)
668 		return (revents);
669 
670 	s = splsoftclock();
671 	simple_lock(&ds->ds_slock);
672 
673 	if (ds->ds_flags & DMIO_STATE_DEAD) {
674 		/* EOF */
675 		revents |= events & (POLLIN | POLLRDNORM |
676 		    POLLOUT | POLLWRNORM);
677 		goto out;
678 	}
679 
680 	/* We can read if there are completed requests. */
681 	if (events & (POLLIN | POLLRDNORM))
682 		if (TAILQ_EMPTY(&ds->ds_complete) == 0)
683 			revents |= events & (POLLIN | POLLRDNORM);
684 
685 	/*
686 	 * We can write if there is there are fewer then DMIO_NREQS_MAX
687 	 * are already in the queue.
688 	 */
689 	if (events & (POLLOUT | POLLWRNORM))
690 		if (ds->ds_nreqs < DMIO_NREQS_MAX)
691 			revents |= events & (POLLOUT | POLLWRNORM);
692 
693 	if (revents == 0) {
694 		selrecord(curlwp, &ds->ds_selq);
695 		ds->ds_flags |= DMIO_STATE_SEL;
696 	}
697 
698  out:
699 	simple_unlock(&ds->ds_slock);
700 	splx(s);
701 
702 	return (revents);
703 }
704 
705 /*
706  * dmio_close:
707  *
708  *	Close file op.
709  */
710 static int
711 dmio_close(struct file *fp)
712 {
713 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
714 	struct dmio_usrreq_state *dus;
715 	struct dmover_session *dses;
716 	int s;
717 
718 	s = splsoftclock();
719 	simple_lock(&ds->ds_slock);
720 
721 	ds->ds_flags |= DMIO_STATE_DEAD;
722 
723 	/* Garbage-collect all the responses on the queue. */
724 	while ((dus = TAILQ_FIRST(&ds->ds_complete)) != NULL) {
725 		TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
726 		ds->ds_nreqs--;
727 		dmover_request_free(dus->dus_req);
728 		dmio_usrreq_fini(ds, dus);
729 	}
730 
731 	/*
732 	 * If there are any requests pending, we have to wait for
733 	 * them.  Don't free the dmio_state in this case.
734 	 */
735 	if (ds->ds_nreqs == 0) {
736 		dses = ds->ds_session;
737 		simple_unlock(&ds->ds_slock);
738 		seldestroy(&ds->ds_selq);
739 		pool_put(&dmio_state_pool, ds);
740 	} else {
741 		dses = NULL;
742 		simple_unlock(&ds->ds_slock);
743 	}
744 
745 	splx(s);
746 
747 	fp->f_data = NULL;
748 
749 	if (dses != NULL)
750 		dmover_session_destroy(dses);
751 
752 	return (0);
753 }
754 
755 static const struct fileops dmio_fileops = {
756 	.fo_read = dmio_read,
757 	.fo_write = dmio_write,
758 	.fo_ioctl = dmio_ioctl,
759 	.fo_fcntl = fnullop_fcntl,
760 	.fo_poll = dmio_poll,
761 	.fo_stat = dmio_stat,
762 	.fo_close = dmio_close,
763 	.fo_kqfilter = fnullop_kqfilter,
764 	.fo_restart = fnullop_restart,
765 };
766 
767 /*
768  * dmoverioopen:
769  *
770  *	Device switch open routine.
771  */
772 int
773 dmoverioopen(dev_t dev, int flag, int mode, struct lwp *l)
774 {
775 	struct dmio_state *ds;
776 	struct file *fp;
777 	int error, fd, s;
778 
779 	/* falloc() will use the descriptor for us. */
780 	if ((error = fd_allocfile(&fp, &fd)) != 0)
781 		return (error);
782 
783 	s = splsoftclock();
784 	ds = pool_get(&dmio_state_pool, PR_WAITOK);
785 	splx(s);
786 	getnanotime(&ds->ds_btime);
787 	ds->ds_atime = ds->ds_mtime = ds->ds_btime;
788 
789 	memset(ds, 0, sizeof(*ds));
790 	simple_lock_init(&ds->ds_slock);
791 	TAILQ_INIT(&ds->ds_pending);
792 	TAILQ_INIT(&ds->ds_complete);
793 	selinit(&ds->ds_selq);
794 
795 	return fd_clone(fp, fd, flag, &dmio_fileops, ds);
796 }
797