xref: /openbsd-src/sys/dev/vscsi.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: vscsi.c,v 1.48 2020/02/20 16:56:52 visa Exp $ */
2 
3 /*
4  * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/conf.h>
25 #include <sys/queue.h>
26 #include <sys/rwlock.h>
27 #include <sys/pool.h>
28 #include <sys/task.h>
29 #include <sys/ioctl.h>
30 #include <sys/poll.h>
31 #include <sys/selinfo.h>
32 
33 #include <scsi/scsi_all.h>
34 #include <scsi/scsiconf.h>
35 
36 #include <dev/vscsivar.h>
37 
38 int		vscsi_match(struct device *, void *, void *);
39 void		vscsi_attach(struct device *, struct device *, void *);
40 void		vscsi_shutdown(void *);
41 
42 struct vscsi_ccb {
43 	TAILQ_ENTRY(vscsi_ccb)	ccb_entry;
44 	int			ccb_tag;
45 	struct scsi_xfer	*ccb_xs;
46 	size_t			ccb_datalen;
47 };
48 
49 TAILQ_HEAD(vscsi_ccb_list, vscsi_ccb);
50 
51 enum vscsi_state {
52 	VSCSI_S_CLOSED,
53 	VSCSI_S_CONFIG,
54 	VSCSI_S_RUNNING
55 };
56 
57 struct vscsi_softc {
58 	struct device		sc_dev;
59 	struct scsi_link	sc_link;
60 	struct scsibus_softc	*sc_scsibus;
61 
62 	struct mutex		sc_state_mtx;
63 	enum vscsi_state	sc_state;
64 	u_int			sc_ref_count;
65 	struct pool		sc_ccb_pool;
66 
67 	struct scsi_iopool	sc_iopool;
68 
69 	struct vscsi_ccb_list	sc_ccb_i2t;
70 	struct vscsi_ccb_list	sc_ccb_t2i;
71 	int			sc_ccb_tag;
72 	struct mutex		sc_poll_mtx;
73 	struct rwlock		sc_ioc_lock;
74 
75 	struct selinfo		sc_sel;
76 	struct mutex		sc_sel_mtx;
77 };
78 
79 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
80 #define DEV2SC(_d) ((struct vscsi_softc *)device_lookup(&vscsi_cd, minor(_d)))
81 
82 struct cfattach vscsi_ca = {
83 	sizeof(struct vscsi_softc),
84 	vscsi_match,
85 	vscsi_attach
86 };
87 
88 struct cfdriver vscsi_cd = {
89 	NULL,
90 	"vscsi",
91 	DV_DULL
92 };
93 
94 void		vscsi_cmd(struct scsi_xfer *);
95 int		vscsi_probe(struct scsi_link *);
96 void		vscsi_free(struct scsi_link *);
97 
98 struct scsi_adapter vscsi_switch = {
99 	vscsi_cmd, NULL, vscsi_probe, vscsi_free, NULL
100 };
101 
102 int		vscsi_i2t(struct vscsi_softc *, struct vscsi_ioc_i2t *);
103 int		vscsi_data(struct vscsi_softc *, struct vscsi_ioc_data *, int);
104 int		vscsi_t2i(struct vscsi_softc *, struct vscsi_ioc_t2i *);
105 int		vscsi_devevent(struct vscsi_softc *, u_long,
106 		    struct vscsi_ioc_devevent *);
107 void		vscsi_devevent_task(void *);
108 void		vscsi_done(struct vscsi_softc *, struct vscsi_ccb *);
109 
110 void *		vscsi_ccb_get(void *);
111 void		vscsi_ccb_put(void *, void *);
112 
113 void		filt_vscsidetach(struct knote *);
114 int		filt_vscsiread(struct knote *, long);
115 
116 const struct filterops vscsi_filtops = {
117 	.f_flags	= FILTEROP_ISFD,
118 	.f_attach	= NULL,
119 	.f_detach	= filt_vscsidetach,
120 	.f_event	= filt_vscsiread,
121 };
122 
123 
124 int
125 vscsi_match(struct device *parent, void *match, void *aux)
126 {
127 	return (1);
128 }
129 
130 void
131 vscsi_attach(struct device *parent, struct device *self, void *aux)
132 {
133 	struct vscsi_softc		*sc = (struct vscsi_softc *)self;
134 	struct scsibus_attach_args	saa;
135 
136 	printf("\n");
137 
138 	mtx_init(&sc->sc_state_mtx, IPL_BIO);
139 	sc->sc_state = VSCSI_S_CLOSED;
140 
141 	TAILQ_INIT(&sc->sc_ccb_i2t);
142 	TAILQ_INIT(&sc->sc_ccb_t2i);
143 	mtx_init(&sc->sc_poll_mtx, IPL_BIO);
144 	mtx_init(&sc->sc_sel_mtx, IPL_BIO);
145 	rw_init(&sc->sc_ioc_lock, "vscsiioc");
146 	scsi_iopool_init(&sc->sc_iopool, sc, vscsi_ccb_get, vscsi_ccb_put);
147 
148 	sc->sc_link.adapter = &vscsi_switch;
149 	sc->sc_link.adapter_softc = sc;
150 	sc->sc_link.adapter_target = 256;
151 	sc->sc_link.adapter_buswidth = 256;
152 	sc->sc_link.openings = 16;
153 	sc->sc_link.pool = &sc->sc_iopool;
154 
155 	memset(&saa, 0, sizeof(saa));
156 	saa.saa_sc_link = &sc->sc_link;
157 
158 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
159 	    &saa, scsiprint);
160 }
161 
162 void
163 vscsi_cmd(struct scsi_xfer *xs)
164 {
165 	struct scsi_link		*link = xs->sc_link;
166 	struct vscsi_softc		*sc = link->adapter_softc;
167 	struct vscsi_ccb		*ccb = xs->io;
168 	int				polled = ISSET(xs->flags, SCSI_POLL);
169 	int				running = 0;
170 
171 	if (ISSET(xs->flags, SCSI_POLL) && ISSET(xs->flags, SCSI_NOSLEEP)) {
172 		printf("%s: POLL && NOSLEEP for 0x%02x\n", DEVNAME(sc),
173 		    xs->cmd->opcode);
174 		xs->error = XS_DRIVER_STUFFUP;
175 		scsi_done(xs);
176 		return;
177 	}
178 
179 	ccb->ccb_xs = xs;
180 
181 	mtx_enter(&sc->sc_state_mtx);
182 	if (sc->sc_state == VSCSI_S_RUNNING) {
183 		running = 1;
184 		TAILQ_INSERT_TAIL(&sc->sc_ccb_i2t, ccb, ccb_entry);
185 	}
186 	mtx_leave(&sc->sc_state_mtx);
187 
188 	if (!running) {
189 		xs->error = XS_DRIVER_STUFFUP;
190 		scsi_done(xs);
191 		return;
192 	}
193 
194 	selwakeup(&sc->sc_sel);
195 
196 	if (polled) {
197 		mtx_enter(&sc->sc_poll_mtx);
198 		while (ccb->ccb_xs != NULL)
199 			msleep_nsec(ccb, &sc->sc_poll_mtx, PRIBIO, "vscsipoll",
200 			    INFSLP);
201 		mtx_leave(&sc->sc_poll_mtx);
202 		scsi_done(xs);
203 	}
204 }
205 
206 void
207 vscsi_done(struct vscsi_softc *sc, struct vscsi_ccb *ccb)
208 {
209 	struct scsi_xfer		*xs = ccb->ccb_xs;
210 
211 	if (ISSET(xs->flags, SCSI_POLL)) {
212 		mtx_enter(&sc->sc_poll_mtx);
213 		ccb->ccb_xs = NULL;
214 		wakeup(ccb);
215 		mtx_leave(&sc->sc_poll_mtx);
216 	} else
217 		scsi_done(xs);
218 }
219 
220 int
221 vscsi_probe(struct scsi_link *link)
222 {
223 	struct vscsi_softc		*sc = link->adapter_softc;
224 	int				rv = 0;
225 
226 	mtx_enter(&sc->sc_state_mtx);
227 	if (sc->sc_state == VSCSI_S_RUNNING)
228 		sc->sc_ref_count++;
229 	else
230 		rv = ENXIO;
231 	mtx_leave(&sc->sc_state_mtx);
232 
233 	return (rv);
234 }
235 
236 void
237 vscsi_free(struct scsi_link *link)
238 {
239 	struct vscsi_softc		*sc = link->adapter_softc;
240 
241 	mtx_enter(&sc->sc_state_mtx);
242 	sc->sc_ref_count--;
243 	if (sc->sc_state != VSCSI_S_RUNNING && sc->sc_ref_count == 0)
244 		wakeup(&sc->sc_ref_count);
245 	mtx_leave(&sc->sc_state_mtx);
246 }
247 
248 int
249 vscsiopen(dev_t dev, int flags, int mode, struct proc *p)
250 {
251 	struct vscsi_softc		*sc = DEV2SC(dev);
252 	enum vscsi_state		state = VSCSI_S_RUNNING;
253 	int				rv = 0;
254 
255 	if (sc == NULL)
256 		return (ENXIO);
257 
258 	mtx_enter(&sc->sc_state_mtx);
259 	if (sc->sc_state != VSCSI_S_CLOSED)
260 		rv = EBUSY;
261 	else
262 		sc->sc_state = VSCSI_S_CONFIG;
263 	mtx_leave(&sc->sc_state_mtx);
264 
265 	if (rv != 0) {
266 		device_unref(&sc->sc_dev);
267 		return (rv);
268 	}
269 
270 	pool_init(&sc->sc_ccb_pool, sizeof(struct vscsi_ccb), 0, IPL_BIO, 0,
271 	    "vscsiccb", NULL);
272 
273 	/* we need to guarantee some ccbs will be available for the iopool */
274 	rv = pool_prime(&sc->sc_ccb_pool, 8);
275 	if (rv != 0) {
276 		pool_destroy(&sc->sc_ccb_pool);
277 		state = VSCSI_S_CLOSED;
278 	}
279 
280 	/* commit changes */
281 	mtx_enter(&sc->sc_state_mtx);
282 	sc->sc_state = state;
283 	mtx_leave(&sc->sc_state_mtx);
284 
285 	device_unref(&sc->sc_dev);
286 	return (rv);
287 }
288 
289 int
290 vscsiioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
291 {
292 	struct vscsi_softc		*sc = DEV2SC(dev);
293 	int				read = 0;
294 	int				err = 0;
295 
296 	if (sc == NULL)
297 		return (ENXIO);
298 
299 	rw_enter_write(&sc->sc_ioc_lock);
300 
301 	switch (cmd) {
302 	case VSCSI_I2T:
303 		err = vscsi_i2t(sc, (struct vscsi_ioc_i2t *)addr);
304 		break;
305 
306 	case VSCSI_DATA_READ:
307 		read = 1;
308 	case VSCSI_DATA_WRITE:
309 		err = vscsi_data(sc, (struct vscsi_ioc_data *)addr, read);
310 		break;
311 
312 	case VSCSI_T2I:
313 		err = vscsi_t2i(sc, (struct vscsi_ioc_t2i *)addr);
314 		break;
315 
316 	case VSCSI_REQPROBE:
317 	case VSCSI_REQDETACH:
318 		err = vscsi_devevent(sc, cmd,
319 		    (struct vscsi_ioc_devevent *)addr);
320 		break;
321 
322 	default:
323 		err = ENOTTY;
324 		break;
325 	}
326 
327 	rw_exit_write(&sc->sc_ioc_lock);
328 
329 	device_unref(&sc->sc_dev);
330 	return (err);
331 }
332 
333 int
334 vscsi_i2t(struct vscsi_softc *sc, struct vscsi_ioc_i2t *i2t)
335 {
336 	struct vscsi_ccb		*ccb;
337 	struct scsi_xfer		*xs;
338 	struct scsi_link		*link;
339 
340 	mtx_enter(&sc->sc_state_mtx);
341 	ccb = TAILQ_FIRST(&sc->sc_ccb_i2t);
342 	if (ccb != NULL)
343 		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
344 	mtx_leave(&sc->sc_state_mtx);
345 
346 	if (ccb == NULL)
347 		return (EAGAIN);
348 
349 	xs = ccb->ccb_xs;
350 	link = xs->sc_link;
351 
352 	i2t->tag = ccb->ccb_tag;
353 	i2t->target = link->target;
354 	i2t->lun = link->lun;
355 	memcpy(&i2t->cmd, xs->cmd, xs->cmdlen);
356 	i2t->cmdlen = xs->cmdlen;
357 	i2t->datalen = xs->datalen;
358 
359 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
360 	case SCSI_DATA_IN:
361 		i2t->direction = VSCSI_DIR_READ;
362 		break;
363 	case SCSI_DATA_OUT:
364 		i2t->direction = VSCSI_DIR_WRITE;
365 		break;
366 	default:
367 		i2t->direction = VSCSI_DIR_NONE;
368 		break;
369 	}
370 
371 	TAILQ_INSERT_TAIL(&sc->sc_ccb_t2i, ccb, ccb_entry);
372 
373 	return (0);
374 }
375 
376 int
377 vscsi_data(struct vscsi_softc *sc, struct vscsi_ioc_data *data, int read)
378 {
379 	struct vscsi_ccb		*ccb;
380 	struct scsi_xfer		*xs;
381 	int				xsread;
382 	u_int8_t			*buf;
383 	int				rv = EINVAL;
384 
385 	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
386 		if (ccb->ccb_tag == data->tag)
387 			break;
388 	}
389 	if (ccb == NULL)
390 		return (EFAULT);
391 
392 	xs = ccb->ccb_xs;
393 
394 	if (data->datalen > xs->datalen - ccb->ccb_datalen)
395 		return (ENOMEM);
396 
397 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
398 	case SCSI_DATA_IN:
399 		xsread = 1;
400 		break;
401 	case SCSI_DATA_OUT:
402 		xsread = 0;
403 		break;
404 	default:
405 		return (EINVAL);
406 	}
407 
408 	if (read != xsread)
409 		return (EINVAL);
410 
411 	buf = xs->data;
412 	buf += ccb->ccb_datalen;
413 
414 	if (read)
415 		rv = copyin(data->data, buf, data->datalen);
416 	else
417 		rv = copyout(buf, data->data, data->datalen);
418 
419 	if (rv == 0)
420 		ccb->ccb_datalen += data->datalen;
421 
422 	return (rv);
423 }
424 
425 int
426 vscsi_t2i(struct vscsi_softc *sc, struct vscsi_ioc_t2i *t2i)
427 {
428 	struct vscsi_ccb		*ccb;
429 	struct scsi_xfer		*xs;
430 	int				rv = 0;
431 
432 	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
433 		if (ccb->ccb_tag == t2i->tag)
434 			break;
435 	}
436 	if (ccb == NULL)
437 		return (EFAULT);
438 
439 	TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
440 
441 	xs = ccb->ccb_xs;
442 
443 	xs->resid = xs->datalen - ccb->ccb_datalen;
444 	xs->status = SCSI_OK;
445 
446 	switch (t2i->status) {
447 	case VSCSI_STAT_DONE:
448 		xs->error = XS_NOERROR;
449 		break;
450 	case VSCSI_STAT_SENSE:
451 		xs->error = XS_SENSE;
452 		memcpy(&xs->sense, &t2i->sense, sizeof(xs->sense));
453 		break;
454 	case VSCSI_STAT_RESET:
455 		xs->error = XS_RESET;
456 		break;
457 	case VSCSI_STAT_ERR:
458 	default:
459 		xs->error = XS_DRIVER_STUFFUP;
460 		break;
461 	}
462 
463 	vscsi_done(sc, ccb);
464 
465 	return (rv);
466 }
467 
468 struct vscsi_devevent_task {
469 	struct vscsi_softc *sc;
470 	struct task t;
471 	struct vscsi_ioc_devevent de;
472 	u_long cmd;
473 };
474 
475 int
476 vscsi_devevent(struct vscsi_softc *sc, u_long cmd,
477     struct vscsi_ioc_devevent *de)
478 {
479 	struct vscsi_devevent_task *dt;
480 
481 	dt = malloc(sizeof(*dt), M_TEMP, M_WAITOK | M_CANFAIL);
482 	if (dt == NULL)
483 		return (ENOMEM);
484 
485 	task_set(&dt->t, vscsi_devevent_task, dt);
486 	dt->sc = sc;
487 	dt->de = *de;
488 	dt->cmd = cmd;
489 
490 	device_ref(&sc->sc_dev);
491 	task_add(systq, &dt->t);
492 
493 	return (0);
494 }
495 
496 void
497 vscsi_devevent_task(void *xdt)
498 {
499 	struct vscsi_devevent_task *dt = xdt;
500 	struct vscsi_softc *sc = dt->sc;
501 	int state;
502 
503 	mtx_enter(&sc->sc_state_mtx);
504 	state = sc->sc_state;
505 	mtx_leave(&sc->sc_state_mtx);
506 
507 	if (state != VSCSI_S_RUNNING)
508 		goto gone;
509 
510 	switch (dt->cmd) {
511 	case VSCSI_REQPROBE:
512 		scsi_probe(sc->sc_scsibus, dt->de.target, dt->de.lun);
513 		break;
514 	case VSCSI_REQDETACH:
515 		scsi_detach(sc->sc_scsibus, dt->de.target, dt->de.lun,
516 		    DETACH_FORCE);
517 		break;
518 #ifdef DIAGNOSTIC
519 	default:
520 		panic("unexpected vscsi_devevent cmd");
521 		/* NOTREACHED */
522 #endif
523 	}
524 
525 gone:
526 	device_unref(&sc->sc_dev);
527 
528 	free(dt, M_TEMP, sizeof(*dt));
529 }
530 
531 int
532 vscsipoll(dev_t dev, int events, struct proc *p)
533 {
534 	struct vscsi_softc		*sc = DEV2SC(dev);
535 	int				revents = 0;
536 
537 	if (sc == NULL)
538 		return (POLLERR);
539 
540 	if (events & (POLLIN | POLLRDNORM)) {
541 		mtx_enter(&sc->sc_state_mtx);
542 		if (!TAILQ_EMPTY(&sc->sc_ccb_i2t))
543 			revents |= events & (POLLIN | POLLRDNORM);
544 		mtx_leave(&sc->sc_state_mtx);
545 	}
546 
547 	if (revents == 0) {
548 		if (events & (POLLIN | POLLRDNORM))
549 			selrecord(p, &sc->sc_sel);
550 	}
551 
552 	device_unref(&sc->sc_dev);
553 	return (revents);
554 }
555 
556 int
557 vscsikqfilter(dev_t dev, struct knote *kn)
558 {
559 	struct vscsi_softc *sc = DEV2SC(dev);
560 	struct klist *klist;
561 
562 	if (sc == NULL)
563 		return (ENXIO);
564 
565 	klist = &sc->sc_sel.si_note;
566 
567 	switch (kn->kn_filter) {
568 	case EVFILT_READ:
569 		kn->kn_fop = &vscsi_filtops;
570 		break;
571 	default:
572 		device_unref(&sc->sc_dev);
573 		return (EINVAL);
574 	}
575 
576 	kn->kn_hook = sc;
577 
578 	mtx_enter(&sc->sc_sel_mtx);
579 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
580 	mtx_leave(&sc->sc_sel_mtx);
581 
582 	/* device ref is given to the knote in the klist */
583 
584 	return (0);
585 }
586 
587 void
588 filt_vscsidetach(struct knote *kn)
589 {
590 	struct vscsi_softc *sc = kn->kn_hook;
591 	struct klist *klist = &sc->sc_sel.si_note;
592 
593 	mtx_enter(&sc->sc_sel_mtx);
594 	SLIST_REMOVE(klist, kn, knote, kn_selnext);
595 	mtx_leave(&sc->sc_sel_mtx);
596 
597 	device_unref(&sc->sc_dev);
598 }
599 
600 int
601 filt_vscsiread(struct knote *kn, long hint)
602 {
603 	struct vscsi_softc *sc = kn->kn_hook;
604 	int event = 0;
605 
606 	mtx_enter(&sc->sc_state_mtx);
607 	if (!TAILQ_EMPTY(&sc->sc_ccb_i2t))
608 		event = 1;
609 	mtx_leave(&sc->sc_state_mtx);
610 
611 	return (event);
612 }
613 
614 int
615 vscsiclose(dev_t dev, int flags, int mode, struct proc *p)
616 {
617 	struct vscsi_softc		*sc = DEV2SC(dev);
618 	struct vscsi_ccb		*ccb;
619 
620 	if (sc == NULL)
621 		return (ENXIO);
622 
623 	mtx_enter(&sc->sc_state_mtx);
624 	KASSERT(sc->sc_state == VSCSI_S_RUNNING);
625 	sc->sc_state = VSCSI_S_CONFIG;
626 	mtx_leave(&sc->sc_state_mtx);
627 
628 	scsi_activate(sc->sc_scsibus, -1, -1, DVACT_DEACTIVATE);
629 
630 	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_t2i)) != NULL) {
631 		TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
632 		ccb->ccb_xs->error = XS_RESET;
633 		vscsi_done(sc, ccb);
634 	}
635 
636 	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_i2t)) != NULL) {
637 		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
638 		ccb->ccb_xs->error = XS_RESET;
639 		vscsi_done(sc, ccb);
640 	}
641 
642 	scsi_req_detach(sc->sc_scsibus, -1, -1, DETACH_FORCE);
643 
644 	mtx_enter(&sc->sc_state_mtx);
645 	while (sc->sc_ref_count > 0) {
646 		msleep_nsec(&sc->sc_ref_count, &sc->sc_state_mtx,
647 		    PRIBIO, "vscsiref", INFSLP);
648 	}
649 	mtx_leave(&sc->sc_state_mtx);
650 
651 	pool_destroy(&sc->sc_ccb_pool);
652 
653 	mtx_enter(&sc->sc_state_mtx);
654 	sc->sc_state = VSCSI_S_CLOSED;
655 	mtx_leave(&sc->sc_state_mtx);
656 
657 	device_unref(&sc->sc_dev);
658 	return (0);
659 }
660 
661 void *
662 vscsi_ccb_get(void *cookie)
663 {
664 	struct vscsi_softc		*sc = cookie;
665 	struct vscsi_ccb		*ccb = NULL;
666 
667 	ccb = pool_get(&sc->sc_ccb_pool, PR_NOWAIT);
668 	if (ccb != NULL) {
669 		ccb->ccb_tag = sc->sc_ccb_tag++;
670 		ccb->ccb_datalen = 0;
671 	}
672 
673 	return (ccb);
674 }
675 
676 void
677 vscsi_ccb_put(void *cookie, void *io)
678 {
679 	struct vscsi_softc		*sc = cookie;
680 	struct vscsi_ccb		*ccb = io;
681 
682 	pool_put(&sc->sc_ccb_pool, ccb);
683 }
684