xref: /netbsd-src/sys/dev/dmover/dmover_io.c (revision fad4c9f71477ae11cea2ee75ec82151ac770a534)
1 /*	$NetBSD: dmover_io.c,v 1.22 2006/05/14 21:42:27 elad Exp $	*/
2 
3 /*
4  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * dmover_io.c: Support for user-space access to dmover-api
40  *
41  * This interface is quite simple:
42  *
43  *	1.  The user opens /dev/dmover, which is a cloning device.  This
44  *	    allocates internal state for the session.
45  *
46  *	2.  The user does a DMIO_SETFUNC to select the data movement
47  *	    function.  This actually creates the dmover session.
48  *
49  *	3.  The user writes request messages to its dmover handle.
50  *
51  *	4.  The user reads request responses from its dmover handle.
52  *
53  *	5.  The user closes the file descriptor and the session is
54  *	    torn down.
55  */
56 
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: dmover_io.c,v 1.22 2006/05/14 21:42:27 elad Exp $");
59 
60 #include <sys/param.h>
61 #include <sys/queue.h>
62 #include <sys/conf.h>
63 #include <sys/pool.h>
64 #include <sys/proc.h>
65 #include <sys/poll.h>
66 #include <sys/malloc.h>
67 #include <sys/lock.h>
68 #include <sys/file.h>
69 #include <sys/filedesc.h>
70 #include <sys/filio.h>
71 #include <sys/select.h>
72 #include <sys/systm.h>
73 #include <sys/workqueue.h>
74 #include <sys/once.h>
75 
76 #include <uvm/uvm_extern.h>
77 
78 #include <dev/dmover/dmovervar.h>
79 #include <dev/dmover/dmover_io.h>
80 
81 struct dmio_usrreq_state {
82 	union {
83 		struct work u_work;
84 		TAILQ_ENTRY(dmio_usrreq_state) u_q;
85 	} dus_u;
86 #define	dus_q		dus_u.u_q
87 #define	dus_work	dus_u.u_work
88 	struct uio dus_uio_out;
89 	struct uio *dus_uio_in;
90 	struct dmover_request *dus_req;
91 	uint32_t dus_id;
92 	struct vmspace *dus_vmspace;
93 };
94 
95 struct dmio_state {
96 	struct dmover_session *ds_session;
97 	TAILQ_HEAD(, dmio_usrreq_state) ds_pending;
98 	TAILQ_HEAD(, dmio_usrreq_state) ds_complete;
99 	struct selinfo ds_selq;
100 	volatile int ds_flags;
101 	u_int ds_nreqs;
102 	struct simplelock ds_slock;
103 };
104 
105 static ONCE_DECL(dmio_cleaner_control);
106 static struct workqueue *dmio_cleaner;
107 static int dmio_cleaner_init(void);
108 static void dmio_usrreq_fini1(struct work *wk, void *);
109 
110 #define	DMIO_STATE_SEL		0x0001
111 #define	DMIO_STATE_DEAD		0x0002
112 #define	DMIO_STATE_LARVAL	0x0004
113 #define	DMIO_STATE_READ_WAIT	0x0008
114 #define	DMIO_STATE_WRITE_WAIT	0x0010
115 
116 #define	DMIO_NREQS_MAX		64	/* XXX pulled out of a hat */
117 
118 struct pool dmio_state_pool;
119 struct pool dmio_usrreq_state_pool;
120 
121 void	dmoverioattach(int);
122 
123 dev_type_open(dmoverioopen);
124 
125 const struct cdevsw dmoverio_cdevsw = {
126 	dmoverioopen, noclose, noread, nowrite, noioctl,
127 	nostop, notty, nopoll, nommap, nokqfilter,
128 };
129 
130 /*
131  * dmoverioattach:
132  *
133  *	Pseudo-device attach routine.
134  */
135 void
136 dmoverioattach(int count)
137 {
138 
139 	pool_init(&dmio_state_pool, sizeof(struct dmio_state),
140 	    0, 0, 0, "dmiostate", NULL);
141 	pool_init(&dmio_usrreq_state_pool, sizeof(struct dmio_usrreq_state),
142 	    0, 0, 0, "dmiourstate", NULL);
143 }
144 
145 /*
146  * dmio_cleaner_init:
147  *
148  *	Create cleaner thread.
149  */
150 static int
151 dmio_cleaner_init(void)
152 {
153 
154 	return workqueue_create(&dmio_cleaner, "dmioclean", dmio_usrreq_fini1,
155 	    NULL, PWAIT, 0 /* IPL_SOFTCLOCK */, 0);
156 }
157 
158 /*
159  * dmio_usrreq_init:
160  *
161  *	Build a request structure.
162  */
163 static int
164 dmio_usrreq_init(struct file *fp, struct dmio_usrreq_state *dus,
165     struct dmio_usrreq *req, struct dmover_request *dreq)
166 {
167 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
168 	struct dmover_session *dses = ds->ds_session;
169 	struct uio *uio_out = &dus->dus_uio_out;
170 	struct uio *uio_in;
171 	dmio_buffer inbuf;
172 	size_t len;
173 	int i, error;
174 	u_int j;
175 
176 	/* XXX How should malloc interact w/ FNONBLOCK? */
177 
178 	error = RUN_ONCE(&dmio_cleaner_control, dmio_cleaner_init);
179 	if (error) {
180 		return error;
181 	}
182 
183 	error = proc_vmspace_getref(curproc, &dus->dus_vmspace);
184 	if (error) {
185 		return error;
186 	}
187 
188 	if (req->req_outbuf.dmbuf_iovcnt != 0) {
189 		if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
190 			return (EINVAL);
191 		len = sizeof(struct iovec) * req->req_outbuf.dmbuf_iovcnt;
192 		uio_out->uio_iov = malloc(len, M_TEMP, M_WAITOK);
193 		error = copyin(req->req_outbuf.dmbuf_iov, uio_out->uio_iov,
194 		    len);
195 		if (error) {
196 			free(uio_out->uio_iov, M_TEMP);
197 			return (error);
198 		}
199 
200 		for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
201 			len += uio_out->uio_iov[j].iov_len;
202 			if (len > SSIZE_MAX) {
203 				free(uio_out->uio_iov, M_TEMP);
204 				return (EINVAL);
205 			}
206 		}
207 
208 		uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
209 		uio_out->uio_resid = len;
210 		uio_out->uio_rw = UIO_READ;
211 		uio_out->uio_vmspace = dus->dus_vmspace;
212 
213 		dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
214 		dreq->dreq_outbuf.dmbuf_uio = uio_out;
215 	} else {
216 		uio_out->uio_iov = NULL;
217 		uio_out = NULL;
218 		dreq->dreq_outbuf_type = DMOVER_BUF_NONE;
219 	}
220 
221 	memcpy(dreq->dreq_immediate, req->req_immediate,
222 	    sizeof(dreq->dreq_immediate));
223 
224 	if (dses->dses_ninputs == 0) {
225 		/* No inputs; all done. */
226 		return (0);
227 	}
228 
229 	dreq->dreq_inbuf_type = DMOVER_BUF_UIO;
230 
231 	dus->dus_uio_in = malloc(sizeof(struct uio) * dses->dses_ninputs,
232 	    M_TEMP, M_WAITOK);
233 	memset(dus->dus_uio_in, 0, sizeof(struct uio) * dses->dses_ninputs);
234 
235 	for (i = 0; i < dses->dses_ninputs; i++) {
236 		uio_in = &dus->dus_uio_in[i];
237 
238 		error = copyin(&req->req_inbuf[i], &inbuf, sizeof(inbuf));
239 		if (error)
240 			goto bad;
241 
242 		if (inbuf.dmbuf_iovcnt > IOV_MAX) {
243 			error = EINVAL;
244 			goto bad;
245 		}
246 		len = sizeof(struct iovec) * inbuf.dmbuf_iovcnt;
247 		if (len == 0) {
248 			error = EINVAL;
249 			goto bad;
250 		}
251 		uio_in->uio_iov = malloc(len, M_TEMP, M_WAITOK);
252 
253 		error = copyin(inbuf.dmbuf_iov, uio_in->uio_iov, len);
254 		if (error) {
255 			free(uio_in->uio_iov, M_TEMP);
256 			goto bad;
257 		}
258 
259 		for (j = 0, len = 0; j < inbuf.dmbuf_iovcnt; j++) {
260 			len += uio_in->uio_iov[j].iov_len;
261 			if (len > SSIZE_MAX) {
262 				free(uio_in->uio_iov, M_TEMP);
263 				error = EINVAL;
264 				goto bad;
265 			}
266 		}
267 
268 		if (uio_out != NULL && len != uio_out->uio_resid) {
269 			free(uio_in->uio_iov, M_TEMP);
270 			error = EINVAL;
271 			goto bad;
272 		}
273 
274 		uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
275 		uio_in->uio_resid = len;
276 		uio_in->uio_rw = UIO_WRITE;
277 		uio_in->uio_vmspace = dus->dus_vmspace;
278 
279 		dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
280 	}
281 
282 	return (0);
283 
284  bad:
285 	if (i > 0) {
286 		for (--i; i >= 0; i--) {
287 			uio_in = &dus->dus_uio_in[i];
288 			free(uio_in->uio_iov, M_TEMP);
289 		}
290 	}
291 	free(dus->dus_uio_in, M_TEMP);
292 	if (uio_out != NULL)
293 		free(uio_out->uio_iov, M_TEMP);
294 	uvmspace_free(dus->dus_vmspace);
295 	return (error);
296 }
297 
298 /*
299  * dmio_usrreq_fini:
300  *
301  *	Tear down a request.  Must be called at splsoftclock().
302  */
303 static void
304 dmio_usrreq_fini(struct dmio_state *ds, struct dmio_usrreq_state *dus)
305 {
306 	struct dmover_session *dses = ds->ds_session;
307 	struct uio *uio_out = &dus->dus_uio_out;
308 	struct uio *uio_in;
309 	int i;
310 
311 	if (uio_out->uio_iov != NULL)
312 		free(uio_out->uio_iov, M_TEMP);
313 
314 	if (dses->dses_ninputs) {
315 		for (i = 0; i < dses->dses_ninputs; i++) {
316 			uio_in = &dus->dus_uio_in[i];
317 			free(uio_in->uio_iov, M_TEMP);
318 		}
319 		free(dus->dus_uio_in, M_TEMP);
320 	}
321 
322 	workqueue_enqueue(dmio_cleaner, &dus->dus_work);
323 }
324 
325 static void
326 dmio_usrreq_fini1(struct work *wk, void *dummy)
327 {
328 	struct dmio_usrreq_state *dus = (void *)wk;
329 	int s;
330 
331 	KASSERT(wk == &dus->dus_work);
332 
333 	uvmspace_free(dus->dus_vmspace);
334 	s = splsoftclock();
335 	pool_put(&dmio_usrreq_state_pool, dus);
336 	splx(s);
337 }
338 
339 /*
340  * dmio_read:
341  *
342  *	Read file op.
343  */
344 static int
345 dmio_read(struct file *fp, off_t *offp, struct uio *uio,
346     kauth_cred_t cred, int flags)
347 {
348 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
349 	struct dmio_usrreq_state *dus;
350 	struct dmover_request *dreq;
351 	struct dmio_usrresp resp;
352 	int s, error = 0, progress = 0;
353 
354 	if ((uio->uio_resid % sizeof(resp)) != 0)
355 		return (EINVAL);
356 
357 	if (ds->ds_session == NULL)
358 		return (ENXIO);
359 
360 	s = splsoftclock();
361 	simple_lock(&ds->ds_slock);
362 
363 	while (uio->uio_resid != 0) {
364 
365 		for (;;) {
366 			dus = TAILQ_FIRST(&ds->ds_complete);
367 			if (dus == NULL) {
368 				if (fp->f_flag & FNONBLOCK) {
369 					error = progress ? 0 : EWOULDBLOCK;
370 					goto out;
371 				}
372 				ds->ds_flags |= DMIO_STATE_READ_WAIT;
373 				error = ltsleep(&ds->ds_complete,
374 				    PRIBIO | PCATCH, "dmvrrd", 0,
375 				    &ds->ds_slock);
376 				if (error)
377 					goto out;
378 				continue;
379 			}
380 			/* Have a completed request. */
381 			TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
382 			ds->ds_nreqs--;
383 			if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) {
384 				ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT;
385 				wakeup(&ds->ds_nreqs);
386 			}
387 			if (ds->ds_flags & DMIO_STATE_SEL) {
388 				ds->ds_flags &= ~DMIO_STATE_SEL;
389 				selwakeup(&ds->ds_selq);
390 			}
391 			break;
392 		}
393 
394 		simple_unlock(&ds->ds_slock);
395 
396 		dreq = dus->dus_req;
397 		resp.resp_id = dus->dus_id;
398 		if (dreq->dreq_flags & DMOVER_REQ_ERROR)
399 			resp.resp_error = dreq->dreq_error;
400 		else {
401 			resp.resp_error = 0;
402 			memcpy(resp.resp_immediate, dreq->dreq_immediate,
403 			    sizeof(resp.resp_immediate));
404 		}
405 
406 		dmio_usrreq_fini(ds, dus);
407 
408 		splx(s);
409 
410 		progress = 1;
411 
412 		dmover_request_free(dreq);
413 
414 		error = uiomove(&resp, sizeof(resp), uio);
415 		if (error)
416 			return (error);
417 
418 		s = splsoftclock();
419 		simple_lock(&ds->ds_slock);
420 	}
421 
422  out:
423 	simple_unlock(&ds->ds_slock);
424 	splx(s);
425 
426 	return (error);
427 }
428 
429 /*
430  * dmio_usrreq_done:
431  *
432  *	Dmover completion callback.
433  */
434 static void
435 dmio_usrreq_done(struct dmover_request *dreq)
436 {
437 	struct dmio_usrreq_state *dus = dreq->dreq_cookie;
438 	struct dmio_state *ds = dreq->dreq_session->dses_cookie;
439 
440 	/* We're already at splsoftclock(). */
441 
442 	simple_lock(&ds->ds_slock);
443 	TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
444 	if (ds->ds_flags & DMIO_STATE_DEAD) {
445 		ds->ds_nreqs--;
446 		dmio_usrreq_fini(ds, dus);
447 		dmover_request_free(dreq);
448 		if (ds->ds_nreqs == 0) {
449 			simple_unlock(&ds->ds_slock);
450 			pool_put(&dmio_state_pool, ds);
451 			return;
452 		}
453 	} else {
454 		TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
455 		if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
456 			ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
457 			wakeup(&ds->ds_complete);
458 		}
459 		if (ds->ds_flags & DMIO_STATE_SEL) {
460 			ds->ds_flags &= ~DMIO_STATE_SEL;
461 			selwakeup(&ds->ds_selq);
462 		}
463 	}
464 	simple_unlock(&ds->ds_slock);
465 }
466 
467 /*
468  * dmio_write:
469  *
470  *	Write file op.
471  */
472 static int
473 dmio_write(struct file *fp, off_t *offp, struct uio *uio,
474     kauth_cred_t cred, int flags)
475 {
476 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
477 	struct dmio_usrreq_state *dus;
478 	struct dmover_request *dreq;
479 	struct dmio_usrreq req;
480 	int error = 0, s, progress = 0;
481 
482 	if ((uio->uio_resid % sizeof(req)) != 0)
483 		return (EINVAL);
484 
485 	if (ds->ds_session == NULL)
486 		return (ENXIO);
487 
488 	s = splsoftclock();
489 	simple_lock(&ds->ds_slock);
490 
491 	while (uio->uio_resid != 0) {
492 
493 		if (ds->ds_nreqs == DMIO_NREQS_MAX) {
494 			if (fp->f_flag & FNONBLOCK) {
495 				error = progress ? 0 : EWOULDBLOCK;
496 				break;
497 			}
498 			ds->ds_flags |= DMIO_STATE_WRITE_WAIT;
499 			error = ltsleep(&ds->ds_nreqs, PRIBIO | PCATCH,
500 			    "dmiowr", 0, &ds->ds_slock);
501 			if (error)
502 				break;
503 			continue;
504 		}
505 
506 		ds->ds_nreqs++;
507 
508 		simple_unlock(&ds->ds_slock);
509 		splx(s);
510 
511 		progress = 1;
512 
513 		error = uiomove(&req, sizeof(req), uio);
514 		if (error) {
515 			s = splsoftclock();
516 			simple_lock(&ds->ds_slock);
517 			ds->ds_nreqs--;
518 			break;
519 		}
520 
521 		/* XXX How should this interact with FNONBLOCK? */
522 		dreq = dmover_request_alloc(ds->ds_session, NULL);
523 		if (dreq == NULL) {
524 			/* XXX */
525 			s = splsoftclock();
526 			simple_lock(&ds->ds_slock);
527 			ds->ds_nreqs--;
528 			error = ENOMEM;
529 			break;
530 		}
531 		s = splsoftclock();
532 		dus = pool_get(&dmio_usrreq_state_pool, PR_WAITOK);
533 		splx(s);
534 
535 		error = dmio_usrreq_init(fp, dus, &req, dreq);
536 		if (error) {
537 			dmover_request_free(dreq);
538 			s = splsoftclock();
539 			pool_put(&dmio_usrreq_state_pool, dus);
540 			simple_lock(&ds->ds_slock);
541 			break;
542 		}
543 
544 		dreq->dreq_callback = dmio_usrreq_done;
545 		dreq->dreq_cookie = dus;
546 
547 		dus->dus_req = dreq;
548 		dus->dus_id = req.req_id;
549 
550 		s = splsoftclock();
551 		simple_lock(&ds->ds_slock);
552 
553 		TAILQ_INSERT_TAIL(&ds->ds_pending, dus, dus_q);
554 
555 		simple_unlock(&ds->ds_slock);
556 		splx(s);
557 
558 		dmover_process(dreq);
559 
560 		s = splsoftclock();
561 		simple_lock(&ds->ds_slock);
562 	}
563 
564 	simple_unlock(&ds->ds_slock);
565 	splx(s);
566 
567 	return (error);
568 }
569 
570 /*
571  * dmio_ioctl:
572  *
573  *	Ioctl file op.
574  */
575 static int
576 dmio_ioctl(struct file *fp, u_long cmd, void *data, struct lwp *l)
577 {
578 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
579 	int error, s;
580 
581 	switch (cmd) {
582 	case FIONBIO:
583 	case FIOASYNC:
584 		return (0);
585 
586 	case DMIO_SETFUNC:
587 	    {
588 		struct dmio_setfunc *dsf = data;
589 		struct dmover_session *dses;
590 
591 		s = splsoftclock();
592 		simple_lock(&ds->ds_slock);
593 
594 		if (ds->ds_session != NULL ||
595 		    (ds->ds_flags & DMIO_STATE_LARVAL) != 0) {
596 			simple_unlock(&ds->ds_slock);
597 			splx(s);
598 			return (EBUSY);
599 		}
600 
601 		ds->ds_flags |= DMIO_STATE_LARVAL;
602 
603 		simple_unlock(&ds->ds_slock);
604 		splx(s);
605 
606 		dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0';
607 		error = dmover_session_create(dsf->dsf_name, &dses);
608 
609 		s = splsoftclock();
610 		simple_lock(&ds->ds_slock);
611 
612 		if (error == 0) {
613 			dses->dses_cookie = ds;
614 			ds->ds_session = dses;
615 		}
616 		ds->ds_flags &= ~DMIO_STATE_LARVAL;
617 
618 		simple_unlock(&ds->ds_slock);
619 		splx(s);
620 		break;
621 	    }
622 
623 	default:
624 		error = ENOTTY;
625 	}
626 
627 	return (error);
628 }
629 
630 /*
631  * dmio_poll:
632  *
633  *	Poll file op.
634  */
635 static int
636 dmio_poll(struct file *fp, int events, struct lwp *l)
637 {
638 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
639 	int s, revents = 0;
640 
641 	if ((events & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) == 0)
642 		return (revents);
643 
644 	s = splsoftclock();
645 	simple_lock(&ds->ds_slock);
646 
647 	if (ds->ds_flags & DMIO_STATE_DEAD) {
648 		/* EOF */
649 		revents |= events & (POLLIN | POLLRDNORM |
650 		    POLLOUT | POLLWRNORM);
651 		goto out;
652 	}
653 
654 	/* We can read if there are completed requests. */
655 	if (events & (POLLIN | POLLRDNORM))
656 		if (TAILQ_EMPTY(&ds->ds_complete) == 0)
657 			revents |= events & (POLLIN | POLLRDNORM);
658 
659 	/*
660 	 * We can write if there is there are fewer then DMIO_NREQS_MAX
661 	 * are already in the queue.
662 	 */
663 	if (events & (POLLOUT | POLLWRNORM))
664 		if (ds->ds_nreqs < DMIO_NREQS_MAX)
665 			revents |= events & (POLLOUT | POLLWRNORM);
666 
667 	if (revents == 0) {
668 		selrecord(l, &ds->ds_selq);
669 		ds->ds_flags |= DMIO_STATE_SEL;
670 	}
671 
672  out:
673 	simple_unlock(&ds->ds_slock);
674 	splx(s);
675 
676 	return (revents);
677 }
678 
679 /*
680  * dmio_close:
681  *
682  *	Close file op.
683  */
684 static int
685 dmio_close(struct file *fp, struct lwp *l)
686 {
687 	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
688 	struct dmio_usrreq_state *dus;
689 	struct dmover_session *dses;
690 	int s;
691 
692 	s = splsoftclock();
693 	simple_lock(&ds->ds_slock);
694 
695 	ds->ds_flags |= DMIO_STATE_DEAD;
696 
697 	/* Garbage-collect all the responses on the queue. */
698 	while ((dus = TAILQ_FIRST(&ds->ds_complete)) != NULL) {
699 		TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
700 		ds->ds_nreqs--;
701 		dmover_request_free(dus->dus_req);
702 		dmio_usrreq_fini(ds, dus);
703 	}
704 
705 	/*
706 	 * If there are any requests pending, we have to wait for
707 	 * them.  Don't free the dmio_state in this case.
708 	 */
709 	if (ds->ds_nreqs == 0) {
710 		dses = ds->ds_session;
711 		simple_unlock(&ds->ds_slock);
712 		pool_put(&dmio_state_pool, ds);
713 	} else {
714 		dses = NULL;
715 		simple_unlock(&ds->ds_slock);
716 	}
717 
718 	splx(s);
719 
720 	fp->f_data = NULL;
721 
722 	if (dses != NULL)
723 		dmover_session_destroy(dses);
724 
725 	return (0);
726 }
727 
728 static const struct fileops dmio_fileops = {
729 	dmio_read,
730 	dmio_write,
731 	dmio_ioctl,
732 	fnullop_fcntl,
733 	dmio_poll,
734 	fbadop_stat,
735 	dmio_close,
736 	fnullop_kqfilter
737 };
738 
739 /*
740  * dmoverioopen:
741  *
742  *	Device switch open routine.
743  */
744 int
745 dmoverioopen(dev_t dev, int flag, int mode, struct lwp *l)
746 {
747 	struct dmio_state *ds;
748 	struct file *fp;
749 	int error, fd, s;
750 	struct proc *p = l->l_proc;
751 
752 	/* falloc() will use the descriptor for us. */
753 	if ((error = falloc(p, &fp, &fd)) != 0)
754 		return (error);
755 
756 	s = splsoftclock();
757 	ds = pool_get(&dmio_state_pool, PR_WAITOK);
758 	splx(s);
759 
760 	memset(ds, 0, sizeof(*ds));
761 	simple_lock_init(&ds->ds_slock);
762 	TAILQ_INIT(&ds->ds_pending);
763 	TAILQ_INIT(&ds->ds_complete);
764 
765 	return fdclone(l, fp, fd, flag, &dmio_fileops, ds);
766 }
767