1 /* $NetBSD: dmover_io.c,v 1.46 2019/02/10 17:13:33 christos Exp $ */
2
3 /*
4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * dmover_io.c: Support for user-space access to dmover-api
40 *
41 * This interface is quite simple:
42 *
43 * 1. The user opens /dev/dmover, which is a cloning device. This
44 * allocates internal state for the session.
45 *
46 * 2. The user does a DMIO_SETFUNC to select the data movement
47 * function. This actually creates the dmover session.
48 *
49 * 3. The user writes request messages to its dmover handle.
50 *
51 * 4. The user reads request responses from its dmover handle.
52 *
53 * 5. The user closes the file descriptor and the session is
54 * torn down.
55 */
56
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: dmover_io.c,v 1.46 2019/02/10 17:13:33 christos Exp $");
59
60 #include <sys/param.h>
61 #include <sys/queue.h>
62 #include <sys/conf.h>
63 #include <sys/pool.h>
64 #include <sys/proc.h>
65 #include <sys/poll.h>
66 #include <sys/malloc.h>
67 #include <sys/file.h>
68 #include <sys/filedesc.h>
69 #include <sys/filio.h>
70 #include <sys/select.h>
71 #include <sys/systm.h>
72 #include <sys/workqueue.h>
73 #include <sys/once.h>
74 #include <sys/stat.h>
75 #include <sys/kauth.h>
76 #include <sys/mutex.h>
77 #include <sys/condvar.h>
78
79 #include <uvm/uvm_extern.h>
80
81 #include <dev/dmover/dmovervar.h>
82 #include <dev/dmover/dmover_io.h>
83
84 #include "ioconf.h"
85
86 struct dmio_usrreq_state {
87 union {
88 struct work u_work;
89 TAILQ_ENTRY(dmio_usrreq_state) u_q;
90 } dus_u;
91 #define dus_q dus_u.u_q
92 #define dus_work dus_u.u_work
93 struct uio dus_uio_out;
94 struct uio *dus_uio_in;
95 struct dmover_request *dus_req;
96 uint32_t dus_id;
97 struct vmspace *dus_vmspace;
98 };
99
100 struct dmio_state {
101 struct dmover_session *ds_session;
102 TAILQ_HEAD(, dmio_usrreq_state) ds_pending;
103 TAILQ_HEAD(, dmio_usrreq_state) ds_complete;
104 struct selinfo ds_selq;
105 volatile int ds_flags;
106 u_int ds_nreqs;
107 kmutex_t ds_lock;
108 kcondvar_t ds_complete_cv;
109 kcondvar_t ds_nreqs_cv;
110 struct timespec ds_atime;
111 struct timespec ds_mtime;
112 struct timespec ds_btime;
113 };
114
115 static ONCE_DECL(dmio_cleaner_control);
116 static struct workqueue *dmio_cleaner;
117 static int dmio_cleaner_init(void);
118 static struct dmio_state *dmio_state_get(void);
119 static void dmio_state_put(struct dmio_state *);
120 static void dmio_usrreq_fini1(struct work *wk, void *);
121
122 #define DMIO_STATE_SEL 0x0001
123 #define DMIO_STATE_DEAD 0x0002
124 #define DMIO_STATE_LARVAL 0x0004
125 #define DMIO_STATE_READ_WAIT 0x0008
126 #define DMIO_STATE_WRITE_WAIT 0x0010
127
128 #define DMIO_NREQS_MAX 64 /* XXX pulled out of a hat */
129
130 struct pool dmio_state_pool;
131 struct pool dmio_usrreq_state_pool;
132
133 dev_type_open(dmoverioopen);
134
135 const struct cdevsw dmoverio_cdevsw = {
136 .d_open = dmoverioopen,
137 .d_close = noclose,
138 .d_read = noread,
139 .d_write = nowrite,
140 .d_ioctl = noioctl,
141 .d_stop = nostop,
142 .d_tty = notty,
143 .d_poll = nopoll,
144 .d_mmap = nommap,
145 .d_kqfilter = nokqfilter,
146 .d_discard = nodiscard,
147 .d_flag = D_OTHER
148 };
149
150 /*
151 * dmoverioattach:
152 *
153 * Pseudo-device attach routine.
154 */
155 void
dmoverioattach(int count)156 dmoverioattach(int count)
157 {
158
159 pool_init(&dmio_state_pool, sizeof(struct dmio_state),
160 0, 0, 0, "dmiostate", NULL, IPL_SOFTCLOCK);
161 pool_init(&dmio_usrreq_state_pool, sizeof(struct dmio_usrreq_state),
162 0, 0, 0, "dmiourstate", NULL, IPL_SOFTCLOCK);
163 }
164
165 /*
166 * dmio_cleaner_init:
167 *
168 * Create cleaner thread.
169 */
170 static int
dmio_cleaner_init(void)171 dmio_cleaner_init(void)
172 {
173
174 return workqueue_create(&dmio_cleaner, "dmioclean", dmio_usrreq_fini1,
175 NULL, PWAIT, IPL_SOFTCLOCK, 0);
176 }
177
178 static struct dmio_state *
dmio_state_get(void)179 dmio_state_get(void)
180 {
181 struct dmio_state *ds;
182
183 ds = pool_get(&dmio_state_pool, PR_WAITOK | PR_ZERO);
184
185 getnanotime(&ds->ds_btime);
186 ds->ds_atime = ds->ds_mtime = ds->ds_btime;
187
188 mutex_init(&ds->ds_lock, MUTEX_DEFAULT, IPL_SOFTCLOCK);
189 cv_init(&ds->ds_complete_cv, "dmvrrd");
190 cv_init(&ds->ds_nreqs_cv, "dmiowr");
191 TAILQ_INIT(&ds->ds_pending);
192 TAILQ_INIT(&ds->ds_complete);
193 selinit(&ds->ds_selq);
194
195 return ds;
196 }
197
198 static void
dmio_state_put(struct dmio_state * ds)199 dmio_state_put(struct dmio_state *ds)
200 {
201
202 seldestroy(&ds->ds_selq);
203 cv_destroy(&ds->ds_nreqs_cv);
204 cv_destroy(&ds->ds_complete_cv);
205 mutex_destroy(&ds->ds_lock);
206
207 pool_put(&dmio_state_pool, ds);
208 }
209
210 /*
211 * dmio_usrreq_init:
212 *
213 * Build a request structure.
214 */
215 static int
dmio_usrreq_init(struct file * fp,struct dmio_usrreq_state * dus,struct dmio_usrreq * req,struct dmover_request * dreq)216 dmio_usrreq_init(struct file *fp, struct dmio_usrreq_state *dus,
217 struct dmio_usrreq *req, struct dmover_request *dreq)
218 {
219 struct dmio_state *ds = (struct dmio_state *) fp->f_data;
220 struct dmover_session *dses = ds->ds_session;
221 struct uio *uio_out = &dus->dus_uio_out;
222 struct uio *uio_in;
223 dmio_buffer inbuf;
224 size_t len;
225 int i, error;
226 u_int j;
227
228 /* XXX How should malloc interact w/ FNONBLOCK? */
229
230 error = RUN_ONCE(&dmio_cleaner_control, dmio_cleaner_init);
231 if (error) {
232 return error;
233 }
234
235 error = proc_vmspace_getref(curproc, &dus->dus_vmspace);
236 if (error) {
237 return error;
238 }
239
240 if (req->req_outbuf.dmbuf_iovcnt != 0) {
241 if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
242 return (EINVAL);
243 len = sizeof(struct iovec) * req->req_outbuf.dmbuf_iovcnt;
244 uio_out->uio_iov = malloc(len, M_TEMP, M_WAITOK);
245 error = copyin(req->req_outbuf.dmbuf_iov, uio_out->uio_iov,
246 len);
247 if (error) {
248 free(uio_out->uio_iov, M_TEMP);
249 return (error);
250 }
251
252 for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
253 len += uio_out->uio_iov[j].iov_len;
254 if (len > SSIZE_MAX) {
255 free(uio_out->uio_iov, M_TEMP);
256 return (EINVAL);
257 }
258 }
259
260 uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
261 uio_out->uio_resid = len;
262 uio_out->uio_rw = UIO_READ;
263 uio_out->uio_vmspace = dus->dus_vmspace;
264
265 dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
266 dreq->dreq_outbuf.dmbuf_uio = uio_out;
267 } else {
268 uio_out->uio_iov = NULL;
269 uio_out = NULL;
270 dreq->dreq_outbuf_type = DMOVER_BUF_NONE;
271 }
272
273 memcpy(dreq->dreq_immediate, req->req_immediate,
274 sizeof(dreq->dreq_immediate));
275
276 if (dses->dses_ninputs == 0) {
277 /* No inputs; all done. */
278 return (0);
279 }
280
281 dreq->dreq_inbuf_type = DMOVER_BUF_UIO;
282
283 dus->dus_uio_in = malloc(sizeof(struct uio) * dses->dses_ninputs,
284 M_TEMP, M_WAITOK);
285 memset(dus->dus_uio_in, 0, sizeof(struct uio) * dses->dses_ninputs);
286
287 for (i = 0; i < dses->dses_ninputs; i++) {
288 uio_in = &dus->dus_uio_in[i];
289
290 error = copyin(&req->req_inbuf[i], &inbuf, sizeof(inbuf));
291 if (error)
292 goto bad;
293
294 if (inbuf.dmbuf_iovcnt > IOV_MAX) {
295 error = EINVAL;
296 goto bad;
297 }
298 len = sizeof(struct iovec) * inbuf.dmbuf_iovcnt;
299 if (len == 0) {
300 error = EINVAL;
301 goto bad;
302 }
303 uio_in->uio_iov = malloc(len, M_TEMP, M_WAITOK);
304
305 error = copyin(inbuf.dmbuf_iov, uio_in->uio_iov, len);
306 if (error) {
307 free(uio_in->uio_iov, M_TEMP);
308 goto bad;
309 }
310
311 for (j = 0, len = 0; j < inbuf.dmbuf_iovcnt; j++) {
312 len += uio_in->uio_iov[j].iov_len;
313 if (len > SSIZE_MAX) {
314 free(uio_in->uio_iov, M_TEMP);
315 error = EINVAL;
316 goto bad;
317 }
318 }
319
320 if (uio_out != NULL && len != uio_out->uio_resid) {
321 free(uio_in->uio_iov, M_TEMP);
322 error = EINVAL;
323 goto bad;
324 }
325
326 uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
327 uio_in->uio_resid = len;
328 uio_in->uio_rw = UIO_WRITE;
329 uio_in->uio_vmspace = dus->dus_vmspace;
330
331 dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
332 }
333
334 return (0);
335
336 bad:
337 if (i > 0) {
338 for (--i; i >= 0; i--) {
339 uio_in = &dus->dus_uio_in[i];
340 free(uio_in->uio_iov, M_TEMP);
341 }
342 }
343 free(dus->dus_uio_in, M_TEMP);
344 if (uio_out != NULL)
345 free(uio_out->uio_iov, M_TEMP);
346 uvmspace_free(dus->dus_vmspace);
347 return (error);
348 }
349
350 /*
351 * dmio_usrreq_fini:
352 *
353 * Tear down a request. Must be called at splsoftclock().
354 */
355 static void
dmio_usrreq_fini(struct dmio_state * ds,struct dmio_usrreq_state * dus)356 dmio_usrreq_fini(struct dmio_state *ds, struct dmio_usrreq_state *dus)
357 {
358 struct dmover_session *dses = ds->ds_session;
359 struct uio *uio_out = &dus->dus_uio_out;
360 struct uio *uio_in;
361 int i;
362
363 if (uio_out->uio_iov != NULL)
364 free(uio_out->uio_iov, M_TEMP);
365
366 if (dses->dses_ninputs) {
367 for (i = 0; i < dses->dses_ninputs; i++) {
368 uio_in = &dus->dus_uio_in[i];
369 free(uio_in->uio_iov, M_TEMP);
370 }
371 free(dus->dus_uio_in, M_TEMP);
372 }
373
374 workqueue_enqueue(dmio_cleaner, &dus->dus_work, NULL);
375 }
376
377 static void
dmio_usrreq_fini1(struct work * wk,void * dummy)378 dmio_usrreq_fini1(struct work *wk, void *dummy)
379 {
380 struct dmio_usrreq_state *dus = (void *)wk;
381
382 KASSERT(wk == &dus->dus_work);
383
384 uvmspace_free(dus->dus_vmspace);
385 pool_put(&dmio_usrreq_state_pool, dus);
386 }
387
388 /*
389 * dmio_read:
390 *
391 * Read file op.
392 */
393 static int
dmio_read(struct file * fp,off_t * offp,struct uio * uio,kauth_cred_t cred,int flags)394 dmio_read(struct file *fp, off_t *offp, struct uio *uio,
395 kauth_cred_t cred, int flags)
396 {
397 struct dmio_state *ds = (struct dmio_state *) fp->f_data;
398 struct dmio_usrreq_state *dus;
399 struct dmover_request *dreq;
400 struct dmio_usrresp resp;
401 int error = 0, progress = 0;
402
403 if ((uio->uio_resid % sizeof(resp)) != 0)
404 return (EINVAL);
405
406 if (ds->ds_session == NULL)
407 return (ENXIO);
408
409 getnanotime(&ds->ds_atime);
410 mutex_enter(&ds->ds_lock);
411
412 while (uio->uio_resid != 0) {
413
414 for (;;) {
415 dus = TAILQ_FIRST(&ds->ds_complete);
416 if (dus == NULL) {
417 if (fp->f_flag & FNONBLOCK) {
418 error = progress ? 0 : EWOULDBLOCK;
419 goto out;
420 }
421 ds->ds_flags |= DMIO_STATE_READ_WAIT;
422 error = cv_wait_sig(&ds->ds_complete_cv, &ds->ds_lock);
423 if (error)
424 goto out;
425 continue;
426 }
427 /* Have a completed request. */
428 TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
429 ds->ds_nreqs--;
430 if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) {
431 ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT;
432 cv_broadcast(&ds->ds_nreqs_cv);
433 }
434 if (ds->ds_flags & DMIO_STATE_SEL) {
435 ds->ds_flags &= ~DMIO_STATE_SEL;
436 selnotify(&ds->ds_selq, POLLIN | POLLRDNORM, 0);
437 }
438 break;
439 }
440
441 dreq = dus->dus_req;
442 resp.resp_id = dus->dus_id;
443 if (dreq->dreq_flags & DMOVER_REQ_ERROR)
444 resp.resp_error = dreq->dreq_error;
445 else {
446 resp.resp_error = 0;
447 memcpy(resp.resp_immediate, dreq->dreq_immediate,
448 sizeof(resp.resp_immediate));
449 }
450
451 dmio_usrreq_fini(ds, dus);
452
453 mutex_exit(&ds->ds_lock);
454
455 progress = 1;
456
457 dmover_request_free(dreq);
458
459 error = uiomove(&resp, sizeof(resp), uio);
460 if (error)
461 return (error);
462
463 mutex_enter(&ds->ds_lock);
464 }
465
466 out:
467 mutex_exit(&ds->ds_lock);
468
469 return (error);
470 }
471
472 /*
473 * dmio_usrreq_done:
474 *
475 * Dmover completion callback.
476 */
477 static void
dmio_usrreq_done(struct dmover_request * dreq)478 dmio_usrreq_done(struct dmover_request *dreq)
479 {
480 struct dmio_usrreq_state *dus = dreq->dreq_cookie;
481 struct dmio_state *ds = dreq->dreq_session->dses_cookie;
482
483 /* We're already at splsoftclock(). */
484
485 mutex_enter(&ds->ds_lock);
486 TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
487 if (ds->ds_flags & DMIO_STATE_DEAD) {
488 int nreqs = --ds->ds_nreqs;
489 mutex_exit(&ds->ds_lock);
490 dmio_usrreq_fini(ds, dus);
491 dmover_request_free(dreq);
492 if (nreqs == 0) {
493 dmio_state_put(ds);
494 }
495 return;
496 }
497
498 TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
499 if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
500 ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
501 cv_broadcast(&ds->ds_complete_cv);
502 }
503 if (ds->ds_flags & DMIO_STATE_SEL) {
504 ds->ds_flags &= ~DMIO_STATE_SEL;
505 selnotify(&ds->ds_selq, POLLOUT | POLLWRNORM, 0);
506 }
507 mutex_exit(&ds->ds_lock);
508 }
509
510 /*
511 * dmio_write:
512 *
513 * Write file op.
514 */
515 static int
dmio_write(struct file * fp,off_t * offp,struct uio * uio,kauth_cred_t cred,int flags)516 dmio_write(struct file *fp, off_t *offp, struct uio *uio,
517 kauth_cred_t cred, int flags)
518 {
519 struct dmio_state *ds = (struct dmio_state *) fp->f_data;
520 struct dmio_usrreq_state *dus;
521 struct dmover_request *dreq;
522 struct dmio_usrreq req;
523 int error = 0, progress = 0;
524
525 if ((uio->uio_resid % sizeof(req)) != 0)
526 return (EINVAL);
527
528 if (ds->ds_session == NULL)
529 return (ENXIO);
530
531 getnanotime(&ds->ds_mtime);
532 mutex_enter(&ds->ds_lock);
533
534 while (uio->uio_resid != 0) {
535
536 if (ds->ds_nreqs == DMIO_NREQS_MAX) {
537 if (fp->f_flag & FNONBLOCK) {
538 error = progress ? 0 : EWOULDBLOCK;
539 break;
540 }
541 ds->ds_flags |= DMIO_STATE_WRITE_WAIT;
542 error = cv_wait_sig(&ds->ds_complete_cv, &ds->ds_lock);
543 if (error)
544 break;
545 continue;
546 }
547
548 ds->ds_nreqs++;
549
550 mutex_exit(&ds->ds_lock);
551
552 progress = 1;
553
554 error = uiomove(&req, sizeof(req), uio);
555 if (error) {
556 mutex_enter(&ds->ds_lock);
557 ds->ds_nreqs--;
558 break;
559 }
560
561 /* XXX How should this interact with FNONBLOCK? */
562 dreq = dmover_request_alloc(ds->ds_session, NULL);
563 if (dreq == NULL) {
564 /* XXX */
565 ds->ds_nreqs--;
566 error = ENOMEM;
567 return error;
568 }
569 dus = pool_get(&dmio_usrreq_state_pool, PR_WAITOK);
570
571 error = dmio_usrreq_init(fp, dus, &req, dreq);
572 if (error) {
573 dmover_request_free(dreq);
574 pool_put(&dmio_usrreq_state_pool, dus);
575 return error;
576 }
577
578 dreq->dreq_callback = dmio_usrreq_done;
579 dreq->dreq_cookie = dus;
580
581 dus->dus_req = dreq;
582 dus->dus_id = req.req_id;
583
584 mutex_enter(&ds->ds_lock);
585
586 TAILQ_INSERT_TAIL(&ds->ds_pending, dus, dus_q);
587
588 mutex_exit(&ds->ds_lock);
589
590 dmover_process(dreq);
591
592 mutex_enter(&ds->ds_lock);
593 }
594
595 mutex_exit(&ds->ds_lock);
596
597 return (error);
598 }
599
600 static int
dmio_stat(struct file * fp,struct stat * st)601 dmio_stat(struct file *fp, struct stat *st)
602 {
603 struct dmio_state *ds = fp->f_data;
604
605 (void)memset(st, 0, sizeof(*st));
606 KERNEL_LOCK(1, NULL);
607 st->st_dev = makedev(cdevsw_lookup_major(&dmoverio_cdevsw), 0);
608 st->st_atimespec = ds->ds_atime;
609 st->st_mtimespec = ds->ds_mtime;
610 st->st_ctimespec = st->st_birthtimespec = ds->ds_btime;
611 st->st_uid = kauth_cred_geteuid(fp->f_cred);
612 st->st_gid = kauth_cred_getegid(fp->f_cred);
613 KERNEL_UNLOCK_ONE(NULL);
614 return 0;
615 }
616
617 /*
618 * dmio_ioctl:
619 *
620 * Ioctl file op.
621 */
622 static int
dmio_ioctl(struct file * fp,u_long cmd,void * data)623 dmio_ioctl(struct file *fp, u_long cmd, void *data)
624 {
625 struct dmio_state *ds = (struct dmio_state *) fp->f_data;
626 int error;
627
628 switch (cmd) {
629 case FIONBIO:
630 case FIOASYNC:
631 return (0);
632
633 case DMIO_SETFUNC:
634 {
635 struct dmio_setfunc *dsf = data;
636 struct dmover_session *dses;
637
638 mutex_enter(&ds->ds_lock);
639
640 if (ds->ds_session != NULL ||
641 (ds->ds_flags & DMIO_STATE_LARVAL) != 0) {
642 mutex_exit(&ds->ds_lock);
643 return (EBUSY);
644 }
645
646 ds->ds_flags |= DMIO_STATE_LARVAL;
647
648 mutex_exit(&ds->ds_lock);
649
650 dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0';
651 error = dmover_session_create(dsf->dsf_name, &dses);
652
653 mutex_enter(&ds->ds_lock);
654
655 if (error == 0) {
656 dses->dses_cookie = ds;
657 ds->ds_session = dses;
658 }
659 ds->ds_flags &= ~DMIO_STATE_LARVAL;
660
661 mutex_exit(&ds->ds_lock);
662 break;
663 }
664
665 default:
666 error = ENOTTY;
667 }
668
669 return (error);
670 }
671
672 /*
673 * dmio_poll:
674 *
675 * Poll file op.
676 */
677 static int
dmio_poll(struct file * fp,int events)678 dmio_poll(struct file *fp, int events)
679 {
680 struct dmio_state *ds = (struct dmio_state *) fp->f_data;
681 int revents = 0;
682
683 if ((events & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) == 0)
684 return (revents);
685
686 mutex_enter(&ds->ds_lock);
687
688 if (ds->ds_flags & DMIO_STATE_DEAD) {
689 /* EOF */
690 revents |= events & (POLLIN | POLLRDNORM |
691 POLLOUT | POLLWRNORM);
692 goto out;
693 }
694
695 /* We can read if there are completed requests. */
696 if (events & (POLLIN | POLLRDNORM))
697 if (TAILQ_EMPTY(&ds->ds_complete) == 0)
698 revents |= events & (POLLIN | POLLRDNORM);
699
700 /*
701 * We can write if there is there are fewer then DMIO_NREQS_MAX
702 * are already in the queue.
703 */
704 if (events & (POLLOUT | POLLWRNORM))
705 if (ds->ds_nreqs < DMIO_NREQS_MAX)
706 revents |= events & (POLLOUT | POLLWRNORM);
707
708 if (revents == 0) {
709 selrecord(curlwp, &ds->ds_selq);
710 ds->ds_flags |= DMIO_STATE_SEL;
711 }
712
713 out:
714 mutex_exit(&ds->ds_lock);
715
716 return (revents);
717 }
718
719 /*
720 * dmio_close:
721 *
722 * Close file op.
723 */
724 static int
dmio_close(struct file * fp)725 dmio_close(struct file *fp)
726 {
727 struct dmio_state *ds = (struct dmio_state *) fp->f_data;
728 struct dmio_usrreq_state *dus;
729 struct dmover_session *dses;
730
731 mutex_enter(&ds->ds_lock);
732
733 ds->ds_flags |= DMIO_STATE_DEAD;
734
735 /* Garbage-collect all the responses on the queue. */
736 while ((dus = TAILQ_FIRST(&ds->ds_complete)) != NULL) {
737 TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
738 ds->ds_nreqs--;
739 mutex_exit(&ds->ds_lock);
740 dmover_request_free(dus->dus_req);
741 dmio_usrreq_fini(ds, dus);
742 mutex_enter(&ds->ds_lock);
743 }
744
745 /*
746 * If there are any requests pending, we have to wait for
747 * them. Don't free the dmio_state in this case.
748 */
749 if (ds->ds_nreqs == 0) {
750 dses = ds->ds_session;
751 mutex_exit(&ds->ds_lock);
752 dmio_state_put(ds);
753 } else {
754 dses = NULL;
755 mutex_exit(&ds->ds_lock);
756 }
757
758 fp->f_data = NULL;
759
760 if (dses != NULL)
761 dmover_session_destroy(dses);
762
763 return (0);
764 }
765
766 static const struct fileops dmio_fileops = {
767 .fo_name = "dmio",
768 .fo_read = dmio_read,
769 .fo_write = dmio_write,
770 .fo_ioctl = dmio_ioctl,
771 .fo_fcntl = fnullop_fcntl,
772 .fo_poll = dmio_poll,
773 .fo_stat = dmio_stat,
774 .fo_close = dmio_close,
775 .fo_kqfilter = fnullop_kqfilter,
776 .fo_restart = fnullop_restart,
777 };
778
779 /*
780 * dmoverioopen:
781 *
782 * Device switch open routine.
783 */
784 int
dmoverioopen(dev_t dev,int flag,int mode,struct lwp * l)785 dmoverioopen(dev_t dev, int flag, int mode, struct lwp *l)
786 {
787 struct dmio_state *ds;
788 struct file *fp;
789 int error, fd;
790
791 if ((error = fd_allocfile(&fp, &fd)) != 0)
792 return (error);
793
794 ds = dmio_state_get();
795
796 return fd_clone(fp, fd, flag, &dmio_fileops, ds);
797 }
798