xref: /openbsd-src/sys/dev/vscsi.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: vscsi.c,v 1.39 2015/08/27 18:54:02 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/conf.h>
25 #include <sys/queue.h>
26 #include <sys/rwlock.h>
27 #include <sys/pool.h>
28 #include <sys/task.h>
29 #include <sys/ioctl.h>
30 #include <sys/poll.h>
31 #include <sys/selinfo.h>
32 
33 #include <scsi/scsi_all.h>
34 #include <scsi/scsiconf.h>
35 
36 #include <dev/vscsivar.h>
37 
38 int		vscsi_match(struct device *, void *, void *);
39 void		vscsi_attach(struct device *, struct device *, void *);
40 void		vscsi_shutdown(void *);
41 
42 struct vscsi_ccb {
43 	TAILQ_ENTRY(vscsi_ccb)	ccb_entry;
44 	int			ccb_tag;
45 	struct scsi_xfer	*ccb_xs;
46 	size_t			ccb_datalen;
47 };
48 
49 TAILQ_HEAD(vscsi_ccb_list, vscsi_ccb);
50 
51 enum vscsi_state {
52 	VSCSI_S_CLOSED,
53 	VSCSI_S_CONFIG,
54 	VSCSI_S_RUNNING
55 };
56 
57 struct vscsi_softc {
58 	struct device		sc_dev;
59 	struct scsi_link	sc_link;
60 	struct scsibus_softc	*sc_scsibus;
61 
62 	struct mutex		sc_state_mtx;
63 	enum vscsi_state	sc_state;
64 	u_int			sc_ref_count;
65 	struct pool		sc_ccb_pool;
66 
67 	struct scsi_iopool	sc_iopool;
68 
69 	struct vscsi_ccb_list	sc_ccb_i2t;
70 	struct vscsi_ccb_list	sc_ccb_t2i;
71 	int			sc_ccb_tag;
72 	struct mutex		sc_poll_mtx;
73 	struct rwlock		sc_ioc_lock;
74 
75 	struct selinfo		sc_sel;
76 	struct mutex		sc_sel_mtx;
77 };
78 
79 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
80 #define DEV2SC(_d) ((struct vscsi_softc *)device_lookup(&vscsi_cd, minor(_d)))
81 
82 struct cfattach vscsi_ca = {
83 	sizeof(struct vscsi_softc),
84 	vscsi_match,
85 	vscsi_attach
86 };
87 
88 struct cfdriver vscsi_cd = {
89 	NULL,
90 	"vscsi",
91 	DV_DULL
92 };
93 
94 void		vscsi_cmd(struct scsi_xfer *);
95 int		vscsi_probe(struct scsi_link *);
96 void		vscsi_free(struct scsi_link *);
97 
98 struct scsi_adapter vscsi_switch = {
99 	vscsi_cmd,
100 	scsi_minphys,
101 	vscsi_probe,
102 	vscsi_free
103 };
104 
105 int		vscsi_i2t(struct vscsi_softc *, struct vscsi_ioc_i2t *);
106 int		vscsi_data(struct vscsi_softc *, struct vscsi_ioc_data *, int);
107 int		vscsi_t2i(struct vscsi_softc *, struct vscsi_ioc_t2i *);
108 int		vscsi_devevent(struct vscsi_softc *, u_long,
109 		    struct vscsi_ioc_devevent *);
110 void		vscsi_devevent_task(void *);
111 void		vscsi_done(struct vscsi_softc *, struct vscsi_ccb *);
112 
113 void *		vscsi_ccb_get(void *);
114 void		vscsi_ccb_put(void *, void *);
115 
116 void		filt_vscsidetach(struct knote *);
117 int		filt_vscsiread(struct knote *, long);
118 
119 struct filterops vscsi_filtops = {
120 	1,
121 	NULL,
122 	filt_vscsidetach,
123 	filt_vscsiread
124 };
125 
126 
127 int
128 vscsi_match(struct device *parent, void *match, void *aux)
129 {
130 	return (1);
131 }
132 
133 void
134 vscsi_attach(struct device *parent, struct device *self, void *aux)
135 {
136 	struct vscsi_softc		*sc = (struct vscsi_softc *)self;
137 	struct scsibus_attach_args	saa;
138 
139 	printf("\n");
140 
141 	mtx_init(&sc->sc_state_mtx, IPL_BIO);
142 	sc->sc_state = VSCSI_S_CLOSED;
143 
144 	TAILQ_INIT(&sc->sc_ccb_i2t);
145 	TAILQ_INIT(&sc->sc_ccb_t2i);
146 	mtx_init(&sc->sc_poll_mtx, IPL_BIO);
147 	mtx_init(&sc->sc_sel_mtx, IPL_BIO);
148 	rw_init(&sc->sc_ioc_lock, "vscsiioc");
149 	scsi_iopool_init(&sc->sc_iopool, sc, vscsi_ccb_get, vscsi_ccb_put);
150 
151 	sc->sc_link.adapter = &vscsi_switch;
152 	sc->sc_link.adapter_softc = sc;
153 	sc->sc_link.adapter_target = 256;
154 	sc->sc_link.adapter_buswidth = 256;
155 	sc->sc_link.openings = 16;
156 	sc->sc_link.pool = &sc->sc_iopool;
157 
158 	memset(&saa, 0, sizeof(saa));
159 	saa.saa_sc_link = &sc->sc_link;
160 
161 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
162 	    &saa, scsiprint);
163 }
164 
165 void
166 vscsi_cmd(struct scsi_xfer *xs)
167 {
168 	struct scsi_link		*link = xs->sc_link;
169 	struct vscsi_softc		*sc = link->adapter_softc;
170 	struct vscsi_ccb		*ccb = xs->io;
171 	int				polled = ISSET(xs->flags, SCSI_POLL);
172 	int				running = 0;
173 
174 	if (ISSET(xs->flags, SCSI_POLL) && ISSET(xs->flags, SCSI_NOSLEEP)) {
175 		printf("%s: POLL && NOSLEEP for 0x%02x\n", DEVNAME(sc),
176 		    xs->cmd->opcode);
177 		xs->error = XS_DRIVER_STUFFUP;
178 		scsi_done(xs);
179 		return;
180 	}
181 
182 	ccb->ccb_xs = xs;
183 
184 	mtx_enter(&sc->sc_state_mtx);
185 	if (sc->sc_state == VSCSI_S_RUNNING) {
186 		running = 1;
187 		TAILQ_INSERT_TAIL(&sc->sc_ccb_i2t, ccb, ccb_entry);
188 	}
189 	mtx_leave(&sc->sc_state_mtx);
190 
191 	if (!running) {
192 		xs->error = XS_DRIVER_STUFFUP;
193 		scsi_done(xs);
194 		return;
195 	}
196 
197 	selwakeup(&sc->sc_sel);
198 
199 	if (polled) {
200 		mtx_enter(&sc->sc_poll_mtx);
201 		while (ccb->ccb_xs != NULL)
202 			msleep(ccb, &sc->sc_poll_mtx, PRIBIO, "vscsipoll", 0);
203 		mtx_leave(&sc->sc_poll_mtx);
204 		scsi_done(xs);
205 	}
206 }
207 
208 void
209 vscsi_done(struct vscsi_softc *sc, struct vscsi_ccb *ccb)
210 {
211 	struct scsi_xfer		*xs = ccb->ccb_xs;
212 
213 	if (ISSET(xs->flags, SCSI_POLL)) {
214 		mtx_enter(&sc->sc_poll_mtx);
215 		ccb->ccb_xs = NULL;
216 		wakeup(ccb);
217 		mtx_leave(&sc->sc_poll_mtx);
218 	} else
219 		scsi_done(xs);
220 }
221 
222 int
223 vscsi_probe(struct scsi_link *link)
224 {
225 	struct vscsi_softc		*sc = link->adapter_softc;
226 	int				rv = 0;
227 
228 	mtx_enter(&sc->sc_state_mtx);
229 	if (sc->sc_state == VSCSI_S_RUNNING)
230 		sc->sc_ref_count++;
231 	else
232 		rv = ENXIO;
233 	mtx_leave(&sc->sc_state_mtx);
234 
235 	return (rv);
236 }
237 
238 void
239 vscsi_free(struct scsi_link *link)
240 {
241 	struct vscsi_softc		*sc = link->adapter_softc;
242 
243 	mtx_enter(&sc->sc_state_mtx);
244 	sc->sc_ref_count--;
245 	if (sc->sc_state != VSCSI_S_RUNNING && sc->sc_ref_count == 0)
246 		wakeup(&sc->sc_ref_count);
247 	mtx_leave(&sc->sc_state_mtx);
248 }
249 
250 int
251 vscsiopen(dev_t dev, int flags, int mode, struct proc *p)
252 {
253 	struct vscsi_softc		*sc = DEV2SC(dev);
254 	enum vscsi_state		state = VSCSI_S_RUNNING;
255 	int				rv = 0;
256 
257 	if (sc == NULL)
258 		return (ENXIO);
259 
260 	mtx_enter(&sc->sc_state_mtx);
261 	if (sc->sc_state != VSCSI_S_CLOSED)
262 		rv = EBUSY;
263 	else
264 		sc->sc_state = VSCSI_S_CONFIG;
265 	mtx_leave(&sc->sc_state_mtx);
266 
267 	if (rv != 0) {
268 		device_unref(&sc->sc_dev);
269 		return (rv);
270 	}
271 
272 	pool_init(&sc->sc_ccb_pool, sizeof(struct vscsi_ccb), 0, 0, 0,
273 	    "vscsiccb", NULL);
274 	pool_setipl(&sc->sc_ccb_pool, IPL_BIO);
275 
276 	/* we need to guarantee some ccbs will be available for the iopool */
277 	rv = pool_prime(&sc->sc_ccb_pool, 8);
278 	if (rv != 0) {
279 		pool_destroy(&sc->sc_ccb_pool);
280 		state = VSCSI_S_CLOSED;
281 	}
282 
283 	/* commit changes */
284 	mtx_enter(&sc->sc_state_mtx);
285 	sc->sc_state = state;
286 	mtx_leave(&sc->sc_state_mtx);
287 
288 	device_unref(&sc->sc_dev);
289 	return (rv);
290 }
291 
292 int
293 vscsiioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
294 {
295 	struct vscsi_softc		*sc = DEV2SC(dev);
296 	int				read = 0;
297 	int				err = 0;
298 
299 	if (sc == NULL)
300 		return (ENXIO);
301 
302 	rw_enter_write(&sc->sc_ioc_lock);
303 
304 	switch (cmd) {
305 	case VSCSI_I2T:
306 		err = vscsi_i2t(sc, (struct vscsi_ioc_i2t *)addr);
307 		break;
308 
309 	case VSCSI_DATA_READ:
310 		read = 1;
311 	case VSCSI_DATA_WRITE:
312 		err = vscsi_data(sc, (struct vscsi_ioc_data *)addr, read);
313 		break;
314 
315 	case VSCSI_T2I:
316 		err = vscsi_t2i(sc, (struct vscsi_ioc_t2i *)addr);
317 		break;
318 
319 	case VSCSI_REQPROBE:
320 	case VSCSI_REQDETACH:
321 		err = vscsi_devevent(sc, cmd,
322 		    (struct vscsi_ioc_devevent *)addr);
323 		break;
324 
325 	default:
326 		err = ENOTTY;
327 		break;
328 	}
329 
330 	rw_exit_write(&sc->sc_ioc_lock);
331 
332 	device_unref(&sc->sc_dev);
333 	return (err);
334 }
335 
336 int
337 vscsi_i2t(struct vscsi_softc *sc, struct vscsi_ioc_i2t *i2t)
338 {
339 	struct vscsi_ccb		*ccb;
340 	struct scsi_xfer		*xs;
341 	struct scsi_link		*link;
342 
343 	mtx_enter(&sc->sc_state_mtx);
344 	ccb = TAILQ_FIRST(&sc->sc_ccb_i2t);
345 	if (ccb != NULL)
346 		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
347 	mtx_leave(&sc->sc_state_mtx);
348 
349 	if (ccb == NULL)
350 		return (EAGAIN);
351 
352 	xs = ccb->ccb_xs;
353 	link = xs->sc_link;
354 
355 	i2t->tag = ccb->ccb_tag;
356 	i2t->target = link->target;
357 	i2t->lun = link->lun;
358 	memcpy(&i2t->cmd, xs->cmd, xs->cmdlen);
359 	i2t->cmdlen = xs->cmdlen;
360 	i2t->datalen = xs->datalen;
361 
362 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
363 	case SCSI_DATA_IN:
364 		i2t->direction = VSCSI_DIR_READ;
365 		break;
366 	case SCSI_DATA_OUT:
367 		i2t->direction = VSCSI_DIR_WRITE;
368 		break;
369 	default:
370 		i2t->direction = VSCSI_DIR_NONE;
371 		break;
372 	}
373 
374 	TAILQ_INSERT_TAIL(&sc->sc_ccb_t2i, ccb, ccb_entry);
375 
376 	return (0);
377 }
378 
379 int
380 vscsi_data(struct vscsi_softc *sc, struct vscsi_ioc_data *data, int read)
381 {
382 	struct vscsi_ccb		*ccb;
383 	struct scsi_xfer		*xs;
384 	int				xsread;
385 	u_int8_t			*buf;
386 	int				rv = EINVAL;
387 
388 	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
389 		if (ccb->ccb_tag == data->tag)
390 			break;
391 	}
392 	if (ccb == NULL)
393 		return (EFAULT);
394 
395 	xs = ccb->ccb_xs;
396 
397 	if (data->datalen > xs->datalen - ccb->ccb_datalen)
398 		return (ENOMEM);
399 
400 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
401 	case SCSI_DATA_IN:
402 		xsread = 1;
403 		break;
404 	case SCSI_DATA_OUT:
405 		xsread = 0;
406 		break;
407 	default:
408 		return (EINVAL);
409 	}
410 
411 	if (read != xsread)
412 		return (EINVAL);
413 
414 	buf = xs->data;
415 	buf += ccb->ccb_datalen;
416 
417 	if (read)
418 		rv = copyin(data->data, buf, data->datalen);
419 	else
420 		rv = copyout(buf, data->data, data->datalen);
421 
422 	if (rv == 0)
423 		ccb->ccb_datalen += data->datalen;
424 
425 	return (rv);
426 }
427 
428 int
429 vscsi_t2i(struct vscsi_softc *sc, struct vscsi_ioc_t2i *t2i)
430 {
431 	struct vscsi_ccb		*ccb;
432 	struct scsi_xfer		*xs;
433 	struct scsi_link		*link;
434 	int				rv = 0;
435 
436 	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
437 		if (ccb->ccb_tag == t2i->tag)
438 			break;
439 	}
440 	if (ccb == NULL)
441 		return (EFAULT);
442 
443 	TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
444 
445 	xs = ccb->ccb_xs;
446 	link = xs->sc_link;
447 
448 	xs->resid = xs->datalen - ccb->ccb_datalen;
449 	xs->status = SCSI_OK;
450 
451 	switch (t2i->status) {
452 	case VSCSI_STAT_DONE:
453 		xs->error = XS_NOERROR;
454 		break;
455 	case VSCSI_STAT_SENSE:
456 		xs->error = XS_SENSE;
457 		memcpy(&xs->sense, &t2i->sense, sizeof(xs->sense));
458 		break;
459 	case VSCSI_STAT_RESET:
460 		xs->error = XS_RESET;
461 		break;
462 	case VSCSI_STAT_ERR:
463 	default:
464 		xs->error = XS_DRIVER_STUFFUP;
465 		break;
466 	}
467 
468 	vscsi_done(sc, ccb);
469 
470 	return (rv);
471 }
472 
473 struct vscsi_devevent_task {
474 	struct vscsi_softc *sc;
475 	struct task t;
476 	struct vscsi_ioc_devevent de;
477 	u_long cmd;
478 };
479 
480 int
481 vscsi_devevent(struct vscsi_softc *sc, u_long cmd,
482     struct vscsi_ioc_devevent *de)
483 {
484 	struct vscsi_devevent_task *dt;
485 
486 	dt = malloc(sizeof(*dt), M_TEMP, M_WAITOK | M_CANFAIL);
487 	if (dt == NULL)
488 		return (ENOMEM);
489 
490 	task_set(&dt->t, vscsi_devevent_task, dt);
491 	dt->sc = sc;
492 	dt->de = *de;
493 	dt->cmd = cmd;
494 
495 	device_ref(&sc->sc_dev);
496 	task_add(systq, &dt->t);
497 
498 	return (0);
499 }
500 
501 void
502 vscsi_devevent_task(void *xdt)
503 {
504 	struct vscsi_devevent_task *dt = xdt;
505 	struct vscsi_softc *sc = dt->sc;
506 	int state;
507 
508 	mtx_enter(&sc->sc_state_mtx);
509 	state = sc->sc_state;
510 	mtx_leave(&sc->sc_state_mtx);
511 
512 	if (state != VSCSI_S_RUNNING)
513 		goto gone;
514 
515 	switch (dt->cmd) {
516 	case VSCSI_REQPROBE:
517 		scsi_probe(sc->sc_scsibus, dt->de.target, dt->de.lun);
518 		break;
519 	case VSCSI_REQDETACH:
520 		scsi_detach(sc->sc_scsibus, dt->de.target, dt->de.lun,
521 		    DETACH_FORCE);
522 		break;
523 #ifdef DIAGNOSTIC
524 	default:
525 		panic("unexpected vscsi_devevent cmd");
526 		/* NOTREACHED */
527 #endif
528 	}
529 
530 gone:
531 	device_unref(&sc->sc_dev);
532 
533 	free(dt, M_TEMP, sizeof(*dt));
534 }
535 
536 int
537 vscsipoll(dev_t dev, int events, struct proc *p)
538 {
539 	struct vscsi_softc		*sc = DEV2SC(dev);
540 	int				revents = 0;
541 
542 	if (sc == NULL)
543 		return (ENXIO);
544 
545 	if (events & (POLLIN | POLLRDNORM)) {
546 		mtx_enter(&sc->sc_state_mtx);
547 		if (!TAILQ_EMPTY(&sc->sc_ccb_i2t))
548 			revents |= events & (POLLIN | POLLRDNORM);
549 		mtx_leave(&sc->sc_state_mtx);
550 	}
551 
552 	if (revents == 0) {
553 		if (events & (POLLIN | POLLRDNORM))
554 			selrecord(p, &sc->sc_sel);
555 	}
556 
557 	device_unref(&sc->sc_dev);
558 	return (revents);
559 }
560 
561 int
562 vscsikqfilter(dev_t dev, struct knote *kn)
563 {
564 	struct vscsi_softc *sc = DEV2SC(dev);
565 	struct klist *klist;
566 
567 	if (sc == NULL)
568 		return (ENXIO);
569 
570 	klist = &sc->sc_sel.si_note;
571 
572 	switch (kn->kn_filter) {
573 	case EVFILT_READ:
574 		kn->kn_fop = &vscsi_filtops;
575 		break;
576 	default:
577 		device_unref(&sc->sc_dev);
578 		return (EINVAL);
579 	}
580 
581 	kn->kn_hook = sc;
582 
583 	mtx_enter(&sc->sc_sel_mtx);
584 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
585 	mtx_leave(&sc->sc_sel_mtx);
586 
587 	/* device ref is given to the knote in the klist */
588 
589 	return (0);
590 }
591 
592 void
593 filt_vscsidetach(struct knote *kn)
594 {
595 	struct vscsi_softc *sc = kn->kn_hook;
596 	struct klist *klist = &sc->sc_sel.si_note;
597 
598 	mtx_enter(&sc->sc_sel_mtx);
599 	SLIST_REMOVE(klist, kn, knote, kn_selnext);
600 	mtx_leave(&sc->sc_sel_mtx);
601 
602 	device_unref(&sc->sc_dev);
603 }
604 
605 int
606 filt_vscsiread(struct knote *kn, long hint)
607 {
608 	struct vscsi_softc *sc = kn->kn_hook;
609 	int event = 0;
610 
611 	mtx_enter(&sc->sc_state_mtx);
612 	if (!TAILQ_EMPTY(&sc->sc_ccb_i2t))
613 		event = 1;
614 	mtx_leave(&sc->sc_state_mtx);
615 
616 	return (event);
617 }
618 
619 int
620 vscsiclose(dev_t dev, int flags, int mode, struct proc *p)
621 {
622 	struct vscsi_softc		*sc = DEV2SC(dev);
623 	struct vscsi_ccb		*ccb;
624 
625 	if (sc == NULL)
626 		return (ENXIO);
627 
628 	mtx_enter(&sc->sc_state_mtx);
629 	KASSERT(sc->sc_state == VSCSI_S_RUNNING);
630 	sc->sc_state = VSCSI_S_CONFIG;
631 	mtx_leave(&sc->sc_state_mtx);
632 
633 	scsi_activate(sc->sc_scsibus, -1, -1, DVACT_DEACTIVATE);
634 
635 	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_t2i)) != NULL) {
636 		TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
637 		ccb->ccb_xs->error = XS_RESET;
638 		vscsi_done(sc, ccb);
639 	}
640 
641 	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_i2t)) != NULL) {
642 		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
643 		ccb->ccb_xs->error = XS_RESET;
644 		vscsi_done(sc, ccb);
645 	}
646 
647 	scsi_req_detach(sc->sc_scsibus, -1, -1, DETACH_FORCE);
648 
649 	mtx_enter(&sc->sc_state_mtx);
650 	while (sc->sc_ref_count > 0) {
651 		msleep(&sc->sc_ref_count, &sc->sc_state_mtx,
652 		    PRIBIO, "vscsiref", 0);
653 	}
654 	mtx_leave(&sc->sc_state_mtx);
655 
656 	pool_destroy(&sc->sc_ccb_pool);
657 
658 	mtx_enter(&sc->sc_state_mtx);
659 	sc->sc_state = VSCSI_S_CLOSED;
660 	mtx_leave(&sc->sc_state_mtx);
661 
662 	device_unref(&sc->sc_dev);
663 	return (0);
664 }
665 
666 void *
667 vscsi_ccb_get(void *cookie)
668 {
669 	struct vscsi_softc		*sc = cookie;
670 	struct vscsi_ccb		*ccb = NULL;
671 
672 	ccb = pool_get(&sc->sc_ccb_pool, PR_NOWAIT);
673 	if (ccb != NULL) {
674 		ccb->ccb_tag = sc->sc_ccb_tag++;
675 		ccb->ccb_datalen = 0;
676 	}
677 
678 	return (ccb);
679 }
680 
681 void
682 vscsi_ccb_put(void *cookie, void *io)
683 {
684 	struct vscsi_softc		*sc = cookie;
685 	struct vscsi_ccb		*ccb = io;
686 
687 	pool_put(&sc->sc_ccb_pool, ccb);
688 }
689