xref: /openbsd-src/sys/dev/vscsi.c (revision 25c4e8bd056e974b28f4a0ffd39d76c190a56013)
1 /*	$OpenBSD: vscsi.c,v 1.61 2022/07/02 08:50:41 visa Exp $ */
2 
3 /*
4  * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/conf.h>
25 #include <sys/queue.h>
26 #include <sys/rwlock.h>
27 #include <sys/pool.h>
28 #include <sys/task.h>
29 #include <sys/ioctl.h>
30 #include <sys/selinfo.h>
31 
32 #include <scsi/scsi_all.h>
33 #include <scsi/scsiconf.h>
34 
35 #include <dev/vscsivar.h>
36 
37 int		vscsi_match(struct device *, void *, void *);
38 void		vscsi_attach(struct device *, struct device *, void *);
39 void		vscsi_shutdown(void *);
40 
41 struct vscsi_ccb {
42 	TAILQ_ENTRY(vscsi_ccb)	ccb_entry;
43 	int			ccb_tag;
44 	struct scsi_xfer	*ccb_xs;
45 	size_t			ccb_datalen;
46 };
47 
48 TAILQ_HEAD(vscsi_ccb_list, vscsi_ccb);
49 
50 enum vscsi_state {
51 	VSCSI_S_CLOSED,
52 	VSCSI_S_CONFIG,
53 	VSCSI_S_RUNNING
54 };
55 
56 struct vscsi_softc {
57 	struct device		sc_dev;
58 	struct scsibus_softc	*sc_scsibus;
59 
60 	struct mutex		sc_state_mtx;
61 	enum vscsi_state	sc_state;
62 	u_int			sc_ref_count;
63 	struct pool		sc_ccb_pool;
64 
65 	struct scsi_iopool	sc_iopool;
66 
67 	struct vscsi_ccb_list	sc_ccb_i2t;
68 	struct vscsi_ccb_list	sc_ccb_t2i;
69 	int			sc_ccb_tag;
70 	struct mutex		sc_poll_mtx;
71 	struct rwlock		sc_ioc_lock;
72 
73 	struct selinfo		sc_sel;
74 	struct mutex		sc_sel_mtx;
75 };
76 
77 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
78 #define DEV2SC(_d) ((struct vscsi_softc *)device_lookup(&vscsi_cd, minor(_d)))
79 
80 const struct cfattach vscsi_ca = {
81 	sizeof(struct vscsi_softc),
82 	vscsi_match,
83 	vscsi_attach
84 };
85 
86 struct cfdriver vscsi_cd = {
87 	NULL,
88 	"vscsi",
89 	DV_DULL
90 };
91 
92 void		vscsi_cmd(struct scsi_xfer *);
93 int		vscsi_probe(struct scsi_link *);
94 void		vscsi_free(struct scsi_link *);
95 
96 const struct scsi_adapter vscsi_switch = {
97 	vscsi_cmd, NULL, vscsi_probe, vscsi_free, NULL
98 };
99 
100 int		vscsi_i2t(struct vscsi_softc *, struct vscsi_ioc_i2t *);
101 int		vscsi_data(struct vscsi_softc *, struct vscsi_ioc_data *, int);
102 int		vscsi_t2i(struct vscsi_softc *, struct vscsi_ioc_t2i *);
103 int		vscsi_devevent(struct vscsi_softc *, u_long,
104 		    struct vscsi_ioc_devevent *);
105 void		vscsi_devevent_task(void *);
106 void		vscsi_done(struct vscsi_softc *, struct vscsi_ccb *);
107 
108 void *		vscsi_ccb_get(void *);
109 void		vscsi_ccb_put(void *, void *);
110 
111 void		filt_vscsidetach(struct knote *);
112 int		filt_vscsiread(struct knote *, long);
113 
114 const struct filterops vscsi_filtops = {
115 	.f_flags	= FILTEROP_ISFD,
116 	.f_attach	= NULL,
117 	.f_detach	= filt_vscsidetach,
118 	.f_event	= filt_vscsiread,
119 };
120 
121 
122 int
123 vscsi_match(struct device *parent, void *match, void *aux)
124 {
125 	return (1);
126 }
127 
128 void
129 vscsi_attach(struct device *parent, struct device *self, void *aux)
130 {
131 	struct vscsi_softc		*sc = (struct vscsi_softc *)self;
132 	struct scsibus_attach_args	saa;
133 
134 	printf("\n");
135 
136 	mtx_init(&sc->sc_state_mtx, IPL_BIO);
137 	sc->sc_state = VSCSI_S_CLOSED;
138 
139 	TAILQ_INIT(&sc->sc_ccb_i2t);
140 	TAILQ_INIT(&sc->sc_ccb_t2i);
141 	mtx_init(&sc->sc_poll_mtx, IPL_BIO);
142 	mtx_init(&sc->sc_sel_mtx, IPL_BIO);
143 	rw_init(&sc->sc_ioc_lock, "vscsiioc");
144 	scsi_iopool_init(&sc->sc_iopool, sc, vscsi_ccb_get, vscsi_ccb_put);
145 
146 	saa.saa_adapter = &vscsi_switch;
147 	saa.saa_adapter_softc = sc;
148 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
149 	saa.saa_adapter_buswidth = 256;
150 	saa.saa_luns = 8;
151 	saa.saa_openings = 16;
152 	saa.saa_pool = &sc->sc_iopool;
153 	saa.saa_quirks = saa.saa_flags = 0;
154 	saa.saa_wwpn = saa.saa_wwnn = 0;
155 
156 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
157 	    &saa, scsiprint);
158 }
159 
160 void
161 vscsi_cmd(struct scsi_xfer *xs)
162 {
163 	struct scsi_link		*link = xs->sc_link;
164 	struct vscsi_softc		*sc = link->bus->sb_adapter_softc;
165 	struct vscsi_ccb		*ccb = xs->io;
166 	int				polled = ISSET(xs->flags, SCSI_POLL);
167 	int				running = 0;
168 
169 	if (ISSET(xs->flags, SCSI_POLL) && ISSET(xs->flags, SCSI_NOSLEEP)) {
170 		printf("%s: POLL && NOSLEEP for 0x%02x\n", DEVNAME(sc),
171 		    xs->cmd.opcode);
172 		xs->error = XS_DRIVER_STUFFUP;
173 		scsi_done(xs);
174 		return;
175 	}
176 
177 	ccb->ccb_xs = xs;
178 
179 	mtx_enter(&sc->sc_state_mtx);
180 	if (sc->sc_state == VSCSI_S_RUNNING) {
181 		running = 1;
182 		TAILQ_INSERT_TAIL(&sc->sc_ccb_i2t, ccb, ccb_entry);
183 	}
184 	mtx_leave(&sc->sc_state_mtx);
185 
186 	if (!running) {
187 		xs->error = XS_DRIVER_STUFFUP;
188 		scsi_done(xs);
189 		return;
190 	}
191 
192 	selwakeup(&sc->sc_sel);
193 
194 	if (polled) {
195 		mtx_enter(&sc->sc_poll_mtx);
196 		while (ccb->ccb_xs != NULL)
197 			msleep_nsec(ccb, &sc->sc_poll_mtx, PRIBIO, "vscsipoll",
198 			    INFSLP);
199 		mtx_leave(&sc->sc_poll_mtx);
200 		scsi_done(xs);
201 	}
202 }
203 
204 void
205 vscsi_done(struct vscsi_softc *sc, struct vscsi_ccb *ccb)
206 {
207 	struct scsi_xfer		*xs = ccb->ccb_xs;
208 
209 	if (ISSET(xs->flags, SCSI_POLL)) {
210 		mtx_enter(&sc->sc_poll_mtx);
211 		ccb->ccb_xs = NULL;
212 		wakeup(ccb);
213 		mtx_leave(&sc->sc_poll_mtx);
214 	} else
215 		scsi_done(xs);
216 }
217 
218 int
219 vscsi_probe(struct scsi_link *link)
220 {
221 	struct vscsi_softc		*sc = link->bus->sb_adapter_softc;
222 	int				rv = 0;
223 
224 	mtx_enter(&sc->sc_state_mtx);
225 	if (sc->sc_state == VSCSI_S_RUNNING)
226 		sc->sc_ref_count++;
227 	else
228 		rv = ENXIO;
229 	mtx_leave(&sc->sc_state_mtx);
230 
231 	return (rv);
232 }
233 
234 void
235 vscsi_free(struct scsi_link *link)
236 {
237 	struct vscsi_softc		*sc = link->bus->sb_adapter_softc;
238 
239 	mtx_enter(&sc->sc_state_mtx);
240 	sc->sc_ref_count--;
241 	if (sc->sc_state != VSCSI_S_RUNNING && sc->sc_ref_count == 0)
242 		wakeup(&sc->sc_ref_count);
243 	mtx_leave(&sc->sc_state_mtx);
244 }
245 
246 int
247 vscsiopen(dev_t dev, int flags, int mode, struct proc *p)
248 {
249 	struct vscsi_softc		*sc = DEV2SC(dev);
250 	enum vscsi_state		state = VSCSI_S_RUNNING;
251 	int				rv = 0;
252 
253 	if (sc == NULL)
254 		return (ENXIO);
255 
256 	mtx_enter(&sc->sc_state_mtx);
257 	if (sc->sc_state != VSCSI_S_CLOSED)
258 		rv = EBUSY;
259 	else
260 		sc->sc_state = VSCSI_S_CONFIG;
261 	mtx_leave(&sc->sc_state_mtx);
262 
263 	if (rv != 0) {
264 		device_unref(&sc->sc_dev);
265 		return (rv);
266 	}
267 
268 	pool_init(&sc->sc_ccb_pool, sizeof(struct vscsi_ccb), 0, IPL_BIO, 0,
269 	    "vscsiccb", NULL);
270 
271 	/* we need to guarantee some ccbs will be available for the iopool */
272 	rv = pool_prime(&sc->sc_ccb_pool, 8);
273 	if (rv != 0) {
274 		pool_destroy(&sc->sc_ccb_pool);
275 		state = VSCSI_S_CLOSED;
276 	}
277 
278 	/* commit changes */
279 	mtx_enter(&sc->sc_state_mtx);
280 	sc->sc_state = state;
281 	mtx_leave(&sc->sc_state_mtx);
282 
283 	device_unref(&sc->sc_dev);
284 	return (rv);
285 }
286 
287 int
288 vscsiioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
289 {
290 	struct vscsi_softc		*sc = DEV2SC(dev);
291 	int				read = 0;
292 	int				err = 0;
293 
294 	if (sc == NULL)
295 		return (ENXIO);
296 
297 	rw_enter_write(&sc->sc_ioc_lock);
298 
299 	switch (cmd) {
300 	case VSCSI_I2T:
301 		err = vscsi_i2t(sc, (struct vscsi_ioc_i2t *)addr);
302 		break;
303 
304 	case VSCSI_DATA_READ:
305 		read = 1;
306 	case VSCSI_DATA_WRITE:
307 		err = vscsi_data(sc, (struct vscsi_ioc_data *)addr, read);
308 		break;
309 
310 	case VSCSI_T2I:
311 		err = vscsi_t2i(sc, (struct vscsi_ioc_t2i *)addr);
312 		break;
313 
314 	case VSCSI_REQPROBE:
315 	case VSCSI_REQDETACH:
316 		err = vscsi_devevent(sc, cmd,
317 		    (struct vscsi_ioc_devevent *)addr);
318 		break;
319 
320 	default:
321 		err = ENOTTY;
322 		break;
323 	}
324 
325 	rw_exit_write(&sc->sc_ioc_lock);
326 
327 	device_unref(&sc->sc_dev);
328 	return (err);
329 }
330 
331 int
332 vscsi_i2t(struct vscsi_softc *sc, struct vscsi_ioc_i2t *i2t)
333 {
334 	struct vscsi_ccb		*ccb;
335 	struct scsi_xfer		*xs;
336 	struct scsi_link		*link;
337 
338 	mtx_enter(&sc->sc_state_mtx);
339 	ccb = TAILQ_FIRST(&sc->sc_ccb_i2t);
340 	if (ccb != NULL)
341 		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
342 	mtx_leave(&sc->sc_state_mtx);
343 
344 	if (ccb == NULL)
345 		return (EAGAIN);
346 
347 	xs = ccb->ccb_xs;
348 	link = xs->sc_link;
349 
350 	i2t->tag = ccb->ccb_tag;
351 	i2t->target = link->target;
352 	i2t->lun = link->lun;
353 	memcpy(&i2t->cmd, &xs->cmd, xs->cmdlen);
354 	i2t->cmdlen = xs->cmdlen;
355 	i2t->datalen = xs->datalen;
356 
357 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
358 	case SCSI_DATA_IN:
359 		i2t->direction = VSCSI_DIR_READ;
360 		break;
361 	case SCSI_DATA_OUT:
362 		i2t->direction = VSCSI_DIR_WRITE;
363 		break;
364 	default:
365 		i2t->direction = VSCSI_DIR_NONE;
366 		break;
367 	}
368 
369 	TAILQ_INSERT_TAIL(&sc->sc_ccb_t2i, ccb, ccb_entry);
370 
371 	return (0);
372 }
373 
374 int
375 vscsi_data(struct vscsi_softc *sc, struct vscsi_ioc_data *data, int read)
376 {
377 	struct vscsi_ccb		*ccb;
378 	struct scsi_xfer		*xs;
379 	int				xsread;
380 	u_int8_t			*buf;
381 	int				rv = EINVAL;
382 
383 	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
384 		if (ccb->ccb_tag == data->tag)
385 			break;
386 	}
387 	if (ccb == NULL)
388 		return (EFAULT);
389 
390 	xs = ccb->ccb_xs;
391 
392 	if (data->datalen > xs->datalen - ccb->ccb_datalen)
393 		return (ENOMEM);
394 
395 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
396 	case SCSI_DATA_IN:
397 		xsread = 1;
398 		break;
399 	case SCSI_DATA_OUT:
400 		xsread = 0;
401 		break;
402 	default:
403 		return (EINVAL);
404 	}
405 
406 	if (read != xsread)
407 		return (EINVAL);
408 
409 	buf = xs->data;
410 	buf += ccb->ccb_datalen;
411 
412 	if (read)
413 		rv = copyin(data->data, buf, data->datalen);
414 	else
415 		rv = copyout(buf, data->data, data->datalen);
416 
417 	if (rv == 0)
418 		ccb->ccb_datalen += data->datalen;
419 
420 	return (rv);
421 }
422 
423 int
424 vscsi_t2i(struct vscsi_softc *sc, struct vscsi_ioc_t2i *t2i)
425 {
426 	struct vscsi_ccb		*ccb;
427 	struct scsi_xfer		*xs;
428 	int				rv = 0;
429 
430 	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
431 		if (ccb->ccb_tag == t2i->tag)
432 			break;
433 	}
434 	if (ccb == NULL)
435 		return (EFAULT);
436 
437 	TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
438 
439 	xs = ccb->ccb_xs;
440 
441 	xs->resid = xs->datalen - ccb->ccb_datalen;
442 	xs->status = SCSI_OK;
443 
444 	switch (t2i->status) {
445 	case VSCSI_STAT_DONE:
446 		xs->error = XS_NOERROR;
447 		break;
448 	case VSCSI_STAT_SENSE:
449 		xs->error = XS_SENSE;
450 		memcpy(&xs->sense, &t2i->sense, sizeof(xs->sense));
451 		break;
452 	case VSCSI_STAT_RESET:
453 		xs->error = XS_RESET;
454 		break;
455 	case VSCSI_STAT_ERR:
456 	default:
457 		xs->error = XS_DRIVER_STUFFUP;
458 		break;
459 	}
460 
461 	vscsi_done(sc, ccb);
462 
463 	return (rv);
464 }
465 
466 struct vscsi_devevent_task {
467 	struct vscsi_softc *sc;
468 	struct task t;
469 	struct vscsi_ioc_devevent de;
470 	u_long cmd;
471 };
472 
473 int
474 vscsi_devevent(struct vscsi_softc *sc, u_long cmd,
475     struct vscsi_ioc_devevent *de)
476 {
477 	struct vscsi_devevent_task *dt;
478 
479 	dt = malloc(sizeof(*dt), M_TEMP, M_WAITOK | M_CANFAIL);
480 	if (dt == NULL)
481 		return (ENOMEM);
482 
483 	task_set(&dt->t, vscsi_devevent_task, dt);
484 	dt->sc = sc;
485 	dt->de = *de;
486 	dt->cmd = cmd;
487 
488 	device_ref(&sc->sc_dev);
489 	task_add(systq, &dt->t);
490 
491 	return (0);
492 }
493 
494 void
495 vscsi_devevent_task(void *xdt)
496 {
497 	struct vscsi_devevent_task *dt = xdt;
498 	struct vscsi_softc *sc = dt->sc;
499 	int state;
500 
501 	mtx_enter(&sc->sc_state_mtx);
502 	state = sc->sc_state;
503 	mtx_leave(&sc->sc_state_mtx);
504 
505 	if (state != VSCSI_S_RUNNING)
506 		goto gone;
507 
508 	switch (dt->cmd) {
509 	case VSCSI_REQPROBE:
510 		scsi_probe(sc->sc_scsibus, dt->de.target, dt->de.lun);
511 		break;
512 	case VSCSI_REQDETACH:
513 		scsi_detach(sc->sc_scsibus, dt->de.target, dt->de.lun,
514 		    DETACH_FORCE);
515 		break;
516 #ifdef DIAGNOSTIC
517 	default:
518 		panic("unexpected vscsi_devevent cmd");
519 		/* NOTREACHED */
520 #endif
521 	}
522 
523 gone:
524 	device_unref(&sc->sc_dev);
525 
526 	free(dt, M_TEMP, sizeof(*dt));
527 }
528 
529 int
530 vscsikqfilter(dev_t dev, struct knote *kn)
531 {
532 	struct vscsi_softc *sc = DEV2SC(dev);
533 	struct klist *klist;
534 
535 	if (sc == NULL)
536 		return (ENXIO);
537 
538 	klist = &sc->sc_sel.si_note;
539 
540 	switch (kn->kn_filter) {
541 	case EVFILT_READ:
542 		kn->kn_fop = &vscsi_filtops;
543 		break;
544 	default:
545 		device_unref(&sc->sc_dev);
546 		return (EINVAL);
547 	}
548 
549 	kn->kn_hook = sc;
550 
551 	mtx_enter(&sc->sc_sel_mtx);
552 	klist_insert_locked(klist, kn);
553 	mtx_leave(&sc->sc_sel_mtx);
554 
555 	/* device ref is given to the knote in the klist */
556 
557 	return (0);
558 }
559 
560 void
561 filt_vscsidetach(struct knote *kn)
562 {
563 	struct vscsi_softc *sc = kn->kn_hook;
564 	struct klist *klist = &sc->sc_sel.si_note;
565 
566 	mtx_enter(&sc->sc_sel_mtx);
567 	klist_remove_locked(klist, kn);
568 	mtx_leave(&sc->sc_sel_mtx);
569 
570 	device_unref(&sc->sc_dev);
571 }
572 
573 int
574 filt_vscsiread(struct knote *kn, long hint)
575 {
576 	struct vscsi_softc *sc = kn->kn_hook;
577 	int event = 0;
578 
579 	mtx_enter(&sc->sc_state_mtx);
580 	if (!TAILQ_EMPTY(&sc->sc_ccb_i2t))
581 		event = 1;
582 	mtx_leave(&sc->sc_state_mtx);
583 
584 	return (event);
585 }
586 
587 int
588 vscsiclose(dev_t dev, int flags, int mode, struct proc *p)
589 {
590 	struct vscsi_softc		*sc = DEV2SC(dev);
591 	struct vscsi_ccb		*ccb;
592 
593 	if (sc == NULL)
594 		return (ENXIO);
595 
596 	mtx_enter(&sc->sc_state_mtx);
597 	KASSERT(sc->sc_state == VSCSI_S_RUNNING);
598 	sc->sc_state = VSCSI_S_CONFIG;
599 	mtx_leave(&sc->sc_state_mtx);
600 
601 	scsi_activate(sc->sc_scsibus, -1, -1, DVACT_DEACTIVATE);
602 
603 	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_t2i)) != NULL) {
604 		TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
605 		ccb->ccb_xs->error = XS_RESET;
606 		vscsi_done(sc, ccb);
607 	}
608 
609 	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_i2t)) != NULL) {
610 		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
611 		ccb->ccb_xs->error = XS_RESET;
612 		vscsi_done(sc, ccb);
613 	}
614 
615 	scsi_req_detach(sc->sc_scsibus, -1, -1, DETACH_FORCE);
616 
617 	mtx_enter(&sc->sc_state_mtx);
618 	while (sc->sc_ref_count > 0) {
619 		msleep_nsec(&sc->sc_ref_count, &sc->sc_state_mtx,
620 		    PRIBIO, "vscsiref", INFSLP);
621 	}
622 	mtx_leave(&sc->sc_state_mtx);
623 
624 	pool_destroy(&sc->sc_ccb_pool);
625 
626 	mtx_enter(&sc->sc_state_mtx);
627 	sc->sc_state = VSCSI_S_CLOSED;
628 	mtx_leave(&sc->sc_state_mtx);
629 
630 	device_unref(&sc->sc_dev);
631 	return (0);
632 }
633 
634 void *
635 vscsi_ccb_get(void *cookie)
636 {
637 	struct vscsi_softc		*sc = cookie;
638 	struct vscsi_ccb		*ccb = NULL;
639 
640 	ccb = pool_get(&sc->sc_ccb_pool, PR_NOWAIT);
641 	if (ccb != NULL) {
642 		ccb->ccb_tag = sc->sc_ccb_tag++;
643 		ccb->ccb_datalen = 0;
644 	}
645 
646 	return (ccb);
647 }
648 
649 void
650 vscsi_ccb_put(void *cookie, void *io)
651 {
652 	struct vscsi_softc		*sc = cookie;
653 	struct vscsi_ccb		*ccb = io;
654 
655 	pool_put(&sc->sc_ccb_pool, ccb);
656 }
657