xref: /openbsd-src/sys/dev/vscsi.c (revision a0747c9f67a4ae71ccb71e62a28d1ea19e06a63c)
1 /*	$OpenBSD: vscsi.c,v 1.58 2020/12/25 12:59:52 visa Exp $ */
2 
3 /*
4  * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/conf.h>
25 #include <sys/queue.h>
26 #include <sys/rwlock.h>
27 #include <sys/pool.h>
28 #include <sys/task.h>
29 #include <sys/ioctl.h>
30 #include <sys/poll.h>
31 #include <sys/selinfo.h>
32 
33 #include <scsi/scsi_all.h>
34 #include <scsi/scsiconf.h>
35 
36 #include <dev/vscsivar.h>
37 
38 int		vscsi_match(struct device *, void *, void *);
39 void		vscsi_attach(struct device *, struct device *, void *);
40 void		vscsi_shutdown(void *);
41 
42 struct vscsi_ccb {
43 	TAILQ_ENTRY(vscsi_ccb)	ccb_entry;
44 	int			ccb_tag;
45 	struct scsi_xfer	*ccb_xs;
46 	size_t			ccb_datalen;
47 };
48 
49 TAILQ_HEAD(vscsi_ccb_list, vscsi_ccb);
50 
51 enum vscsi_state {
52 	VSCSI_S_CLOSED,
53 	VSCSI_S_CONFIG,
54 	VSCSI_S_RUNNING
55 };
56 
57 struct vscsi_softc {
58 	struct device		sc_dev;
59 	struct scsibus_softc	*sc_scsibus;
60 
61 	struct mutex		sc_state_mtx;
62 	enum vscsi_state	sc_state;
63 	u_int			sc_ref_count;
64 	struct pool		sc_ccb_pool;
65 
66 	struct scsi_iopool	sc_iopool;
67 
68 	struct vscsi_ccb_list	sc_ccb_i2t;
69 	struct vscsi_ccb_list	sc_ccb_t2i;
70 	int			sc_ccb_tag;
71 	struct mutex		sc_poll_mtx;
72 	struct rwlock		sc_ioc_lock;
73 
74 	struct selinfo		sc_sel;
75 	struct mutex		sc_sel_mtx;
76 };
77 
78 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
79 #define DEV2SC(_d) ((struct vscsi_softc *)device_lookup(&vscsi_cd, minor(_d)))
80 
81 struct cfattach vscsi_ca = {
82 	sizeof(struct vscsi_softc),
83 	vscsi_match,
84 	vscsi_attach
85 };
86 
87 struct cfdriver vscsi_cd = {
88 	NULL,
89 	"vscsi",
90 	DV_DULL
91 };
92 
93 void		vscsi_cmd(struct scsi_xfer *);
94 int		vscsi_probe(struct scsi_link *);
95 void		vscsi_free(struct scsi_link *);
96 
97 struct scsi_adapter vscsi_switch = {
98 	vscsi_cmd, NULL, vscsi_probe, vscsi_free, NULL
99 };
100 
101 int		vscsi_i2t(struct vscsi_softc *, struct vscsi_ioc_i2t *);
102 int		vscsi_data(struct vscsi_softc *, struct vscsi_ioc_data *, int);
103 int		vscsi_t2i(struct vscsi_softc *, struct vscsi_ioc_t2i *);
104 int		vscsi_devevent(struct vscsi_softc *, u_long,
105 		    struct vscsi_ioc_devevent *);
106 void		vscsi_devevent_task(void *);
107 void		vscsi_done(struct vscsi_softc *, struct vscsi_ccb *);
108 
109 void *		vscsi_ccb_get(void *);
110 void		vscsi_ccb_put(void *, void *);
111 
112 void		filt_vscsidetach(struct knote *);
113 int		filt_vscsiread(struct knote *, long);
114 
115 const struct filterops vscsi_filtops = {
116 	.f_flags	= FILTEROP_ISFD,
117 	.f_attach	= NULL,
118 	.f_detach	= filt_vscsidetach,
119 	.f_event	= filt_vscsiread,
120 };
121 
122 
123 int
124 vscsi_match(struct device *parent, void *match, void *aux)
125 {
126 	return (1);
127 }
128 
129 void
130 vscsi_attach(struct device *parent, struct device *self, void *aux)
131 {
132 	struct vscsi_softc		*sc = (struct vscsi_softc *)self;
133 	struct scsibus_attach_args	saa;
134 
135 	printf("\n");
136 
137 	mtx_init(&sc->sc_state_mtx, IPL_BIO);
138 	sc->sc_state = VSCSI_S_CLOSED;
139 
140 	TAILQ_INIT(&sc->sc_ccb_i2t);
141 	TAILQ_INIT(&sc->sc_ccb_t2i);
142 	mtx_init(&sc->sc_poll_mtx, IPL_BIO);
143 	mtx_init(&sc->sc_sel_mtx, IPL_BIO);
144 	rw_init(&sc->sc_ioc_lock, "vscsiioc");
145 	scsi_iopool_init(&sc->sc_iopool, sc, vscsi_ccb_get, vscsi_ccb_put);
146 
147 	saa.saa_adapter = &vscsi_switch;
148 	saa.saa_adapter_softc = sc;
149 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
150 	saa.saa_adapter_buswidth = 256;
151 	saa.saa_luns = 8;
152 	saa.saa_openings = 16;
153 	saa.saa_pool = &sc->sc_iopool;
154 	saa.saa_quirks = saa.saa_flags = 0;
155 	saa.saa_wwpn = saa.saa_wwnn = 0;
156 
157 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
158 	    &saa, scsiprint);
159 }
160 
161 void
162 vscsi_cmd(struct scsi_xfer *xs)
163 {
164 	struct scsi_link		*link = xs->sc_link;
165 	struct vscsi_softc		*sc = link->bus->sb_adapter_softc;
166 	struct vscsi_ccb		*ccb = xs->io;
167 	int				polled = ISSET(xs->flags, SCSI_POLL);
168 	int				running = 0;
169 
170 	if (ISSET(xs->flags, SCSI_POLL) && ISSET(xs->flags, SCSI_NOSLEEP)) {
171 		printf("%s: POLL && NOSLEEP for 0x%02x\n", DEVNAME(sc),
172 		    xs->cmd.opcode);
173 		xs->error = XS_DRIVER_STUFFUP;
174 		scsi_done(xs);
175 		return;
176 	}
177 
178 	ccb->ccb_xs = xs;
179 
180 	mtx_enter(&sc->sc_state_mtx);
181 	if (sc->sc_state == VSCSI_S_RUNNING) {
182 		running = 1;
183 		TAILQ_INSERT_TAIL(&sc->sc_ccb_i2t, ccb, ccb_entry);
184 	}
185 	mtx_leave(&sc->sc_state_mtx);
186 
187 	if (!running) {
188 		xs->error = XS_DRIVER_STUFFUP;
189 		scsi_done(xs);
190 		return;
191 	}
192 
193 	selwakeup(&sc->sc_sel);
194 
195 	if (polled) {
196 		mtx_enter(&sc->sc_poll_mtx);
197 		while (ccb->ccb_xs != NULL)
198 			msleep_nsec(ccb, &sc->sc_poll_mtx, PRIBIO, "vscsipoll",
199 			    INFSLP);
200 		mtx_leave(&sc->sc_poll_mtx);
201 		scsi_done(xs);
202 	}
203 }
204 
205 void
206 vscsi_done(struct vscsi_softc *sc, struct vscsi_ccb *ccb)
207 {
208 	struct scsi_xfer		*xs = ccb->ccb_xs;
209 
210 	if (ISSET(xs->flags, SCSI_POLL)) {
211 		mtx_enter(&sc->sc_poll_mtx);
212 		ccb->ccb_xs = NULL;
213 		wakeup(ccb);
214 		mtx_leave(&sc->sc_poll_mtx);
215 	} else
216 		scsi_done(xs);
217 }
218 
219 int
220 vscsi_probe(struct scsi_link *link)
221 {
222 	struct vscsi_softc		*sc = link->bus->sb_adapter_softc;
223 	int				rv = 0;
224 
225 	mtx_enter(&sc->sc_state_mtx);
226 	if (sc->sc_state == VSCSI_S_RUNNING)
227 		sc->sc_ref_count++;
228 	else
229 		rv = ENXIO;
230 	mtx_leave(&sc->sc_state_mtx);
231 
232 	return (rv);
233 }
234 
235 void
236 vscsi_free(struct scsi_link *link)
237 {
238 	struct vscsi_softc		*sc = link->bus->sb_adapter_softc;
239 
240 	mtx_enter(&sc->sc_state_mtx);
241 	sc->sc_ref_count--;
242 	if (sc->sc_state != VSCSI_S_RUNNING && sc->sc_ref_count == 0)
243 		wakeup(&sc->sc_ref_count);
244 	mtx_leave(&sc->sc_state_mtx);
245 }
246 
247 int
248 vscsiopen(dev_t dev, int flags, int mode, struct proc *p)
249 {
250 	struct vscsi_softc		*sc = DEV2SC(dev);
251 	enum vscsi_state		state = VSCSI_S_RUNNING;
252 	int				rv = 0;
253 
254 	if (sc == NULL)
255 		return (ENXIO);
256 
257 	mtx_enter(&sc->sc_state_mtx);
258 	if (sc->sc_state != VSCSI_S_CLOSED)
259 		rv = EBUSY;
260 	else
261 		sc->sc_state = VSCSI_S_CONFIG;
262 	mtx_leave(&sc->sc_state_mtx);
263 
264 	if (rv != 0) {
265 		device_unref(&sc->sc_dev);
266 		return (rv);
267 	}
268 
269 	pool_init(&sc->sc_ccb_pool, sizeof(struct vscsi_ccb), 0, IPL_BIO, 0,
270 	    "vscsiccb", NULL);
271 
272 	/* we need to guarantee some ccbs will be available for the iopool */
273 	rv = pool_prime(&sc->sc_ccb_pool, 8);
274 	if (rv != 0) {
275 		pool_destroy(&sc->sc_ccb_pool);
276 		state = VSCSI_S_CLOSED;
277 	}
278 
279 	/* commit changes */
280 	mtx_enter(&sc->sc_state_mtx);
281 	sc->sc_state = state;
282 	mtx_leave(&sc->sc_state_mtx);
283 
284 	device_unref(&sc->sc_dev);
285 	return (rv);
286 }
287 
288 int
289 vscsiioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
290 {
291 	struct vscsi_softc		*sc = DEV2SC(dev);
292 	int				read = 0;
293 	int				err = 0;
294 
295 	if (sc == NULL)
296 		return (ENXIO);
297 
298 	rw_enter_write(&sc->sc_ioc_lock);
299 
300 	switch (cmd) {
301 	case VSCSI_I2T:
302 		err = vscsi_i2t(sc, (struct vscsi_ioc_i2t *)addr);
303 		break;
304 
305 	case VSCSI_DATA_READ:
306 		read = 1;
307 	case VSCSI_DATA_WRITE:
308 		err = vscsi_data(sc, (struct vscsi_ioc_data *)addr, read);
309 		break;
310 
311 	case VSCSI_T2I:
312 		err = vscsi_t2i(sc, (struct vscsi_ioc_t2i *)addr);
313 		break;
314 
315 	case VSCSI_REQPROBE:
316 	case VSCSI_REQDETACH:
317 		err = vscsi_devevent(sc, cmd,
318 		    (struct vscsi_ioc_devevent *)addr);
319 		break;
320 
321 	default:
322 		err = ENOTTY;
323 		break;
324 	}
325 
326 	rw_exit_write(&sc->sc_ioc_lock);
327 
328 	device_unref(&sc->sc_dev);
329 	return (err);
330 }
331 
332 int
333 vscsi_i2t(struct vscsi_softc *sc, struct vscsi_ioc_i2t *i2t)
334 {
335 	struct vscsi_ccb		*ccb;
336 	struct scsi_xfer		*xs;
337 	struct scsi_link		*link;
338 
339 	mtx_enter(&sc->sc_state_mtx);
340 	ccb = TAILQ_FIRST(&sc->sc_ccb_i2t);
341 	if (ccb != NULL)
342 		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
343 	mtx_leave(&sc->sc_state_mtx);
344 
345 	if (ccb == NULL)
346 		return (EAGAIN);
347 
348 	xs = ccb->ccb_xs;
349 	link = xs->sc_link;
350 
351 	i2t->tag = ccb->ccb_tag;
352 	i2t->target = link->target;
353 	i2t->lun = link->lun;
354 	memcpy(&i2t->cmd, &xs->cmd, xs->cmdlen);
355 	i2t->cmdlen = xs->cmdlen;
356 	i2t->datalen = xs->datalen;
357 
358 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
359 	case SCSI_DATA_IN:
360 		i2t->direction = VSCSI_DIR_READ;
361 		break;
362 	case SCSI_DATA_OUT:
363 		i2t->direction = VSCSI_DIR_WRITE;
364 		break;
365 	default:
366 		i2t->direction = VSCSI_DIR_NONE;
367 		break;
368 	}
369 
370 	TAILQ_INSERT_TAIL(&sc->sc_ccb_t2i, ccb, ccb_entry);
371 
372 	return (0);
373 }
374 
375 int
376 vscsi_data(struct vscsi_softc *sc, struct vscsi_ioc_data *data, int read)
377 {
378 	struct vscsi_ccb		*ccb;
379 	struct scsi_xfer		*xs;
380 	int				xsread;
381 	u_int8_t			*buf;
382 	int				rv = EINVAL;
383 
384 	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
385 		if (ccb->ccb_tag == data->tag)
386 			break;
387 	}
388 	if (ccb == NULL)
389 		return (EFAULT);
390 
391 	xs = ccb->ccb_xs;
392 
393 	if (data->datalen > xs->datalen - ccb->ccb_datalen)
394 		return (ENOMEM);
395 
396 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
397 	case SCSI_DATA_IN:
398 		xsread = 1;
399 		break;
400 	case SCSI_DATA_OUT:
401 		xsread = 0;
402 		break;
403 	default:
404 		return (EINVAL);
405 	}
406 
407 	if (read != xsread)
408 		return (EINVAL);
409 
410 	buf = xs->data;
411 	buf += ccb->ccb_datalen;
412 
413 	if (read)
414 		rv = copyin(data->data, buf, data->datalen);
415 	else
416 		rv = copyout(buf, data->data, data->datalen);
417 
418 	if (rv == 0)
419 		ccb->ccb_datalen += data->datalen;
420 
421 	return (rv);
422 }
423 
424 int
425 vscsi_t2i(struct vscsi_softc *sc, struct vscsi_ioc_t2i *t2i)
426 {
427 	struct vscsi_ccb		*ccb;
428 	struct scsi_xfer		*xs;
429 	int				rv = 0;
430 
431 	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
432 		if (ccb->ccb_tag == t2i->tag)
433 			break;
434 	}
435 	if (ccb == NULL)
436 		return (EFAULT);
437 
438 	TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
439 
440 	xs = ccb->ccb_xs;
441 
442 	xs->resid = xs->datalen - ccb->ccb_datalen;
443 	xs->status = SCSI_OK;
444 
445 	switch (t2i->status) {
446 	case VSCSI_STAT_DONE:
447 		xs->error = XS_NOERROR;
448 		break;
449 	case VSCSI_STAT_SENSE:
450 		xs->error = XS_SENSE;
451 		memcpy(&xs->sense, &t2i->sense, sizeof(xs->sense));
452 		break;
453 	case VSCSI_STAT_RESET:
454 		xs->error = XS_RESET;
455 		break;
456 	case VSCSI_STAT_ERR:
457 	default:
458 		xs->error = XS_DRIVER_STUFFUP;
459 		break;
460 	}
461 
462 	vscsi_done(sc, ccb);
463 
464 	return (rv);
465 }
466 
467 struct vscsi_devevent_task {
468 	struct vscsi_softc *sc;
469 	struct task t;
470 	struct vscsi_ioc_devevent de;
471 	u_long cmd;
472 };
473 
474 int
475 vscsi_devevent(struct vscsi_softc *sc, u_long cmd,
476     struct vscsi_ioc_devevent *de)
477 {
478 	struct vscsi_devevent_task *dt;
479 
480 	dt = malloc(sizeof(*dt), M_TEMP, M_WAITOK | M_CANFAIL);
481 	if (dt == NULL)
482 		return (ENOMEM);
483 
484 	task_set(&dt->t, vscsi_devevent_task, dt);
485 	dt->sc = sc;
486 	dt->de = *de;
487 	dt->cmd = cmd;
488 
489 	device_ref(&sc->sc_dev);
490 	task_add(systq, &dt->t);
491 
492 	return (0);
493 }
494 
495 void
496 vscsi_devevent_task(void *xdt)
497 {
498 	struct vscsi_devevent_task *dt = xdt;
499 	struct vscsi_softc *sc = dt->sc;
500 	int state;
501 
502 	mtx_enter(&sc->sc_state_mtx);
503 	state = sc->sc_state;
504 	mtx_leave(&sc->sc_state_mtx);
505 
506 	if (state != VSCSI_S_RUNNING)
507 		goto gone;
508 
509 	switch (dt->cmd) {
510 	case VSCSI_REQPROBE:
511 		scsi_probe(sc->sc_scsibus, dt->de.target, dt->de.lun);
512 		break;
513 	case VSCSI_REQDETACH:
514 		scsi_detach(sc->sc_scsibus, dt->de.target, dt->de.lun,
515 		    DETACH_FORCE);
516 		break;
517 #ifdef DIAGNOSTIC
518 	default:
519 		panic("unexpected vscsi_devevent cmd");
520 		/* NOTREACHED */
521 #endif
522 	}
523 
524 gone:
525 	device_unref(&sc->sc_dev);
526 
527 	free(dt, M_TEMP, sizeof(*dt));
528 }
529 
530 int
531 vscsipoll(dev_t dev, int events, struct proc *p)
532 {
533 	struct vscsi_softc		*sc = DEV2SC(dev);
534 	int				revents = 0;
535 
536 	if (sc == NULL)
537 		return (POLLERR);
538 
539 	if (events & (POLLIN | POLLRDNORM)) {
540 		mtx_enter(&sc->sc_state_mtx);
541 		if (!TAILQ_EMPTY(&sc->sc_ccb_i2t))
542 			revents |= events & (POLLIN | POLLRDNORM);
543 		mtx_leave(&sc->sc_state_mtx);
544 	}
545 
546 	if (revents == 0) {
547 		if (events & (POLLIN | POLLRDNORM))
548 			selrecord(p, &sc->sc_sel);
549 	}
550 
551 	device_unref(&sc->sc_dev);
552 	return (revents);
553 }
554 
555 int
556 vscsikqfilter(dev_t dev, struct knote *kn)
557 {
558 	struct vscsi_softc *sc = DEV2SC(dev);
559 	struct klist *klist;
560 
561 	if (sc == NULL)
562 		return (ENXIO);
563 
564 	klist = &sc->sc_sel.si_note;
565 
566 	switch (kn->kn_filter) {
567 	case EVFILT_READ:
568 		kn->kn_fop = &vscsi_filtops;
569 		break;
570 	default:
571 		device_unref(&sc->sc_dev);
572 		return (EINVAL);
573 	}
574 
575 	kn->kn_hook = sc;
576 
577 	mtx_enter(&sc->sc_sel_mtx);
578 	klist_insert_locked(klist, kn);
579 	mtx_leave(&sc->sc_sel_mtx);
580 
581 	/* device ref is given to the knote in the klist */
582 
583 	return (0);
584 }
585 
586 void
587 filt_vscsidetach(struct knote *kn)
588 {
589 	struct vscsi_softc *sc = kn->kn_hook;
590 	struct klist *klist = &sc->sc_sel.si_note;
591 
592 	mtx_enter(&sc->sc_sel_mtx);
593 	klist_remove_locked(klist, kn);
594 	mtx_leave(&sc->sc_sel_mtx);
595 
596 	device_unref(&sc->sc_dev);
597 }
598 
599 int
600 filt_vscsiread(struct knote *kn, long hint)
601 {
602 	struct vscsi_softc *sc = kn->kn_hook;
603 	int event = 0;
604 
605 	mtx_enter(&sc->sc_state_mtx);
606 	if (!TAILQ_EMPTY(&sc->sc_ccb_i2t))
607 		event = 1;
608 	mtx_leave(&sc->sc_state_mtx);
609 
610 	return (event);
611 }
612 
613 int
614 vscsiclose(dev_t dev, int flags, int mode, struct proc *p)
615 {
616 	struct vscsi_softc		*sc = DEV2SC(dev);
617 	struct vscsi_ccb		*ccb;
618 
619 	if (sc == NULL)
620 		return (ENXIO);
621 
622 	mtx_enter(&sc->sc_state_mtx);
623 	KASSERT(sc->sc_state == VSCSI_S_RUNNING);
624 	sc->sc_state = VSCSI_S_CONFIG;
625 	mtx_leave(&sc->sc_state_mtx);
626 
627 	scsi_activate(sc->sc_scsibus, -1, -1, DVACT_DEACTIVATE);
628 
629 	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_t2i)) != NULL) {
630 		TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
631 		ccb->ccb_xs->error = XS_RESET;
632 		vscsi_done(sc, ccb);
633 	}
634 
635 	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_i2t)) != NULL) {
636 		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
637 		ccb->ccb_xs->error = XS_RESET;
638 		vscsi_done(sc, ccb);
639 	}
640 
641 	scsi_req_detach(sc->sc_scsibus, -1, -1, DETACH_FORCE);
642 
643 	mtx_enter(&sc->sc_state_mtx);
644 	while (sc->sc_ref_count > 0) {
645 		msleep_nsec(&sc->sc_ref_count, &sc->sc_state_mtx,
646 		    PRIBIO, "vscsiref", INFSLP);
647 	}
648 	mtx_leave(&sc->sc_state_mtx);
649 
650 	pool_destroy(&sc->sc_ccb_pool);
651 
652 	mtx_enter(&sc->sc_state_mtx);
653 	sc->sc_state = VSCSI_S_CLOSED;
654 	mtx_leave(&sc->sc_state_mtx);
655 
656 	device_unref(&sc->sc_dev);
657 	return (0);
658 }
659 
660 void *
661 vscsi_ccb_get(void *cookie)
662 {
663 	struct vscsi_softc		*sc = cookie;
664 	struct vscsi_ccb		*ccb = NULL;
665 
666 	ccb = pool_get(&sc->sc_ccb_pool, PR_NOWAIT);
667 	if (ccb != NULL) {
668 		ccb->ccb_tag = sc->sc_ccb_tag++;
669 		ccb->ccb_datalen = 0;
670 	}
671 
672 	return (ccb);
673 }
674 
675 void
676 vscsi_ccb_put(void *cookie, void *io)
677 {
678 	struct vscsi_softc		*sc = cookie;
679 	struct vscsi_ccb		*ccb = io;
680 
681 	pool_put(&sc->sc_ccb_pool, ccb);
682 }
683