xref: /openbsd-src/sys/dev/vscsi.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: vscsi.c,v 1.30 2014/07/12 18:48:51 tedu Exp $ */
2 
3 /*
4  * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/buf.h>
22 #include <sys/kernel.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/proc.h>
26 #include <sys/conf.h>
27 #include <sys/queue.h>
28 #include <sys/rwlock.h>
29 #include <sys/pool.h>
30 #include <sys/task.h>
31 #include <sys/ioctl.h>
32 #include <sys/poll.h>
33 #include <sys/selinfo.h>
34 
35 #include <scsi/scsi_all.h>
36 #include <scsi/scsiconf.h>
37 
38 #include <dev/vscsivar.h>
39 
40 int		vscsi_match(struct device *, void *, void *);
41 void		vscsi_attach(struct device *, struct device *, void *);
42 void		vscsi_shutdown(void *);
43 
44 struct vscsi_ccb {
45 	TAILQ_ENTRY(vscsi_ccb)	ccb_entry;
46 	int			ccb_tag;
47 	struct scsi_xfer	*ccb_xs;
48 	size_t			ccb_datalen;
49 };
50 
51 TAILQ_HEAD(vscsi_ccb_list, vscsi_ccb);
52 
53 enum vscsi_state {
54 	VSCSI_S_CLOSED,
55 	VSCSI_S_CONFIG,
56 	VSCSI_S_RUNNING
57 };
58 
59 struct vscsi_softc {
60 	struct device		sc_dev;
61 	struct scsi_link	sc_link;
62 	struct scsibus_softc	*sc_scsibus;
63 
64 	struct mutex		sc_state_mtx;
65 	enum vscsi_state	sc_state;
66 	u_int			sc_ref_count;
67 	struct pool		sc_ccb_pool;
68 
69 	struct scsi_iopool	sc_iopool;
70 
71 	struct vscsi_ccb_list	sc_ccb_i2t;
72 	struct vscsi_ccb_list	sc_ccb_t2i;
73 	int			sc_ccb_tag;
74 	struct mutex		sc_poll_mtx;
75 	struct rwlock		sc_ioc_lock;
76 
77 	struct selinfo		sc_sel;
78 	struct mutex		sc_sel_mtx;
79 };
80 
81 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
82 #define DEV2SC(_d) ((struct vscsi_softc *)device_lookup(&vscsi_cd, minor(_d)))
83 
84 struct cfattach vscsi_ca = {
85 	sizeof(struct vscsi_softc),
86 	vscsi_match,
87 	vscsi_attach
88 };
89 
90 struct cfdriver vscsi_cd = {
91 	NULL,
92 	"vscsi",
93 	DV_DULL
94 };
95 
96 void		vscsi_cmd(struct scsi_xfer *);
97 int		vscsi_probe(struct scsi_link *);
98 void		vscsi_free(struct scsi_link *);
99 
100 struct scsi_adapter vscsi_switch = {
101 	vscsi_cmd,
102 	scsi_minphys,
103 	vscsi_probe,
104 	vscsi_free
105 };
106 
107 int		vscsi_i2t(struct vscsi_softc *, struct vscsi_ioc_i2t *);
108 int		vscsi_data(struct vscsi_softc *, struct vscsi_ioc_data *, int);
109 int		vscsi_t2i(struct vscsi_softc *, struct vscsi_ioc_t2i *);
110 int		vscsi_devevent(struct vscsi_softc *, u_long,
111 		    struct vscsi_ioc_devevent *);
112 void		vscsi_devevent_task(void *, void *);
113 void		vscsi_done(struct vscsi_softc *, struct vscsi_ccb *);
114 
115 void *		vscsi_ccb_get(void *);
116 void		vscsi_ccb_put(void *, void *);
117 
118 void		filt_vscsidetach(struct knote *);
119 int		filt_vscsiread(struct knote *, long);
120 
121 struct filterops vscsi_filtops = {
122 	1,
123 	NULL,
124 	filt_vscsidetach,
125 	filt_vscsiread
126 };
127 
128 
129 int
130 vscsi_match(struct device *parent, void *match, void *aux)
131 {
132 	return (1);
133 }
134 
135 void
136 vscsi_attach(struct device *parent, struct device *self, void *aux)
137 {
138 	struct vscsi_softc		*sc = (struct vscsi_softc *)self;
139 	struct scsibus_attach_args	saa;
140 
141 	printf("\n");
142 
143 	mtx_init(&sc->sc_state_mtx, IPL_BIO);
144 	sc->sc_state = VSCSI_S_CLOSED;
145 
146 	TAILQ_INIT(&sc->sc_ccb_i2t);
147 	TAILQ_INIT(&sc->sc_ccb_t2i);
148 	mtx_init(&sc->sc_poll_mtx, IPL_BIO);
149 	mtx_init(&sc->sc_sel_mtx, IPL_BIO);
150 	rw_init(&sc->sc_ioc_lock, "vscsiioc");
151 	scsi_iopool_init(&sc->sc_iopool, sc, vscsi_ccb_get, vscsi_ccb_put);
152 
153 	sc->sc_link.adapter = &vscsi_switch;
154 	sc->sc_link.adapter_softc = sc;
155 	sc->sc_link.adapter_target = 256;
156 	sc->sc_link.adapter_buswidth = 256;
157 	sc->sc_link.openings = 16;
158 	sc->sc_link.pool = &sc->sc_iopool;
159 
160 	bzero(&saa, sizeof(saa));
161 	saa.saa_sc_link = &sc->sc_link;
162 
163 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
164 	    &saa, scsiprint);
165 }
166 
167 void
168 vscsi_cmd(struct scsi_xfer *xs)
169 {
170 	struct scsi_link		*link = xs->sc_link;
171 	struct vscsi_softc		*sc = link->adapter_softc;
172 	struct vscsi_ccb		*ccb = xs->io;
173 	int				polled = ISSET(xs->flags, SCSI_POLL);
174 	int				running = 0;
175 
176 	if (ISSET(xs->flags, SCSI_POLL) && ISSET(xs->flags, SCSI_NOSLEEP)) {
177 		printf("%s: POLL && NOSLEEP for 0x%02x\n", DEVNAME(sc),
178 		    xs->cmd->opcode);
179 		xs->error = XS_DRIVER_STUFFUP;
180 		scsi_done(xs);
181 		return;
182 	}
183 
184 	ccb->ccb_xs = xs;
185 
186 	mtx_enter(&sc->sc_state_mtx);
187 	if (sc->sc_state == VSCSI_S_RUNNING) {
188 		running = 1;
189 		TAILQ_INSERT_TAIL(&sc->sc_ccb_i2t, ccb, ccb_entry);
190 	}
191 	mtx_leave(&sc->sc_state_mtx);
192 
193 	if (!running) {
194 		xs->error = XS_DRIVER_STUFFUP;
195 		scsi_done(xs);
196 		return;
197 	}
198 
199 	selwakeup(&sc->sc_sel);
200 
201 	if (polled) {
202 		mtx_enter(&sc->sc_poll_mtx);
203 		while (ccb->ccb_xs != NULL)
204 			msleep(ccb, &sc->sc_poll_mtx, PRIBIO, "vscsipoll", 0);
205 		mtx_leave(&sc->sc_poll_mtx);
206 		scsi_done(xs);
207 	}
208 }
209 
210 void
211 vscsi_done(struct vscsi_softc *sc, struct vscsi_ccb *ccb)
212 {
213 	struct scsi_xfer		*xs = ccb->ccb_xs;
214 
215 	if (ISSET(xs->flags, SCSI_POLL)) {
216 		mtx_enter(&sc->sc_poll_mtx);
217 		ccb->ccb_xs = NULL;
218 		wakeup(ccb);
219 		mtx_leave(&sc->sc_poll_mtx);
220 	} else
221 		scsi_done(xs);
222 }
223 
224 int
225 vscsi_probe(struct scsi_link *link)
226 {
227 	struct vscsi_softc		*sc = link->adapter_softc;
228 	int				rv = 0;
229 
230 	mtx_enter(&sc->sc_state_mtx);
231 	if (sc->sc_state == VSCSI_S_RUNNING)
232 		sc->sc_ref_count++;
233 	else
234 		rv = ENXIO;
235 	mtx_leave(&sc->sc_state_mtx);
236 
237 	return (rv);
238 }
239 
240 void
241 vscsi_free(struct scsi_link *link)
242 {
243 	struct vscsi_softc		*sc = link->adapter_softc;
244 
245 	mtx_enter(&sc->sc_state_mtx);
246 	sc->sc_ref_count--;
247 	if (sc->sc_state != VSCSI_S_RUNNING && sc->sc_ref_count == 0)
248 		wakeup(&sc->sc_ref_count);
249 	mtx_leave(&sc->sc_state_mtx);
250 }
251 
252 int
253 vscsiopen(dev_t dev, int flags, int mode, struct proc *p)
254 {
255 	struct vscsi_softc		*sc = DEV2SC(dev);
256 	enum vscsi_state		state = VSCSI_S_RUNNING;
257 	int				rv = 0;
258 
259 	if (sc == NULL)
260 		return (ENXIO);
261 
262 	mtx_enter(&sc->sc_state_mtx);
263 	if (sc->sc_state != VSCSI_S_CLOSED)
264 		rv = EBUSY;
265 	else
266 		sc->sc_state = VSCSI_S_CONFIG;
267 	mtx_leave(&sc->sc_state_mtx);
268 
269 	if (rv != 0) {
270 		device_unref(&sc->sc_dev);
271 		return (rv);
272 	}
273 
274 	pool_init(&sc->sc_ccb_pool, sizeof(struct vscsi_ccb), 0, 0, 0,
275 	    "vscsiccb", NULL);
276 	pool_setipl(&sc->sc_ccb_pool, IPL_BIO);
277 
278 	/* we need to guarantee some ccbs will be available for the iopool */
279 	rv = pool_prime(&sc->sc_ccb_pool, 8);
280 	if (rv != 0) {
281 		pool_destroy(&sc->sc_ccb_pool);
282 		state = VSCSI_S_CLOSED;
283 	}
284 
285 	/* commit changes */
286 	mtx_enter(&sc->sc_state_mtx);
287 	sc->sc_state = state;
288 	mtx_leave(&sc->sc_state_mtx);
289 
290 	device_unref(&sc->sc_dev);
291 	return (rv);
292 }
293 
294 int
295 vscsiioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
296 {
297 	struct vscsi_softc		*sc = DEV2SC(dev);
298 	int				read = 0;
299 	int				err = 0;
300 
301 	if (sc == NULL)
302 		return (ENXIO);
303 
304 	rw_enter_write(&sc->sc_ioc_lock);
305 
306 	switch (cmd) {
307 	case VSCSI_I2T:
308 		err = vscsi_i2t(sc, (struct vscsi_ioc_i2t *)addr);
309 		break;
310 
311 	case VSCSI_DATA_READ:
312 		read = 1;
313 	case VSCSI_DATA_WRITE:
314 		err = vscsi_data(sc, (struct vscsi_ioc_data *)addr, read);
315 		break;
316 
317 	case VSCSI_T2I:
318 		err = vscsi_t2i(sc, (struct vscsi_ioc_t2i *)addr);
319 		break;
320 
321 	case VSCSI_REQPROBE:
322 	case VSCSI_REQDETACH:
323 		err = vscsi_devevent(sc, cmd,
324 		    (struct vscsi_ioc_devevent *)addr);
325 		break;
326 
327 	default:
328 		err = ENOTTY;
329 		break;
330 	}
331 
332 	rw_exit_write(&sc->sc_ioc_lock);
333 
334 	device_unref(&sc->sc_dev);
335 	return (err);
336 }
337 
338 int
339 vscsi_i2t(struct vscsi_softc *sc, struct vscsi_ioc_i2t *i2t)
340 {
341 	struct vscsi_ccb		*ccb;
342 	struct scsi_xfer		*xs;
343 	struct scsi_link		*link;
344 
345 	mtx_enter(&sc->sc_state_mtx);
346 	ccb = TAILQ_FIRST(&sc->sc_ccb_i2t);
347 	if (ccb != NULL)
348 		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
349 	mtx_leave(&sc->sc_state_mtx);
350 
351 	if (ccb == NULL)
352 		return (EAGAIN);
353 
354 	xs = ccb->ccb_xs;
355 	link = xs->sc_link;
356 
357 	i2t->tag = ccb->ccb_tag;
358 	i2t->target = link->target;
359 	i2t->lun = link->lun;
360 	bcopy(xs->cmd, &i2t->cmd, xs->cmdlen);
361 	i2t->cmdlen = xs->cmdlen;
362 	i2t->datalen = xs->datalen;
363 
364 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
365 	case SCSI_DATA_IN:
366 		i2t->direction = VSCSI_DIR_READ;
367 		break;
368 	case SCSI_DATA_OUT:
369 		i2t->direction = VSCSI_DIR_WRITE;
370 		break;
371 	default:
372 		i2t->direction = VSCSI_DIR_NONE;
373 		break;
374 	}
375 
376 	TAILQ_INSERT_TAIL(&sc->sc_ccb_t2i, ccb, ccb_entry);
377 
378 	return (0);
379 }
380 
381 int
382 vscsi_data(struct vscsi_softc *sc, struct vscsi_ioc_data *data, int read)
383 {
384 	struct vscsi_ccb		*ccb;
385 	struct scsi_xfer		*xs;
386 	int				xsread;
387 	u_int8_t			*buf;
388 	int				rv = EINVAL;
389 
390 	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
391 		if (ccb->ccb_tag == data->tag)
392 			break;
393 	}
394 	if (ccb == NULL)
395 		return (EFAULT);
396 
397 	xs = ccb->ccb_xs;
398 
399 	if (data->datalen > xs->datalen - ccb->ccb_datalen)
400 		return (ENOMEM);
401 
402 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
403 	case SCSI_DATA_IN:
404 		xsread = 1;
405 		break;
406 	case SCSI_DATA_OUT:
407 		xsread = 0;
408 		break;
409 	default:
410 		return (EINVAL);
411 	}
412 
413 	if (read != xsread)
414 		return (EINVAL);
415 
416 	buf = xs->data;
417 	buf += ccb->ccb_datalen;
418 
419 	if (read)
420 		rv = copyin(data->data, buf, data->datalen);
421 	else
422 		rv = copyout(buf, data->data, data->datalen);
423 
424 	if (rv == 0)
425 		ccb->ccb_datalen += data->datalen;
426 
427 	return (rv);
428 }
429 
430 int
431 vscsi_t2i(struct vscsi_softc *sc, struct vscsi_ioc_t2i *t2i)
432 {
433 	struct vscsi_ccb		*ccb;
434 	struct scsi_xfer		*xs;
435 	struct scsi_link		*link;
436 	int				rv = 0;
437 
438 	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
439 		if (ccb->ccb_tag == t2i->tag)
440 			break;
441 	}
442 	if (ccb == NULL)
443 		return (EFAULT);
444 
445 	TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
446 
447 	xs = ccb->ccb_xs;
448 	link = xs->sc_link;
449 
450 	xs->resid = xs->datalen - ccb->ccb_datalen;
451 	xs->status = SCSI_OK;
452 
453 	switch (t2i->status) {
454 	case VSCSI_STAT_DONE:
455 		xs->error = XS_NOERROR;
456 		break;
457 	case VSCSI_STAT_SENSE:
458 		xs->error = XS_SENSE;
459 		bcopy(&t2i->sense, &xs->sense, sizeof(xs->sense));
460 		break;
461 	case VSCSI_STAT_RESET:
462 		xs->error = XS_RESET;
463 		break;
464 	case VSCSI_STAT_ERR:
465 	default:
466 		xs->error = XS_DRIVER_STUFFUP;
467 		break;
468 	}
469 
470 	vscsi_done(sc, ccb);
471 
472 	return (rv);
473 }
474 
475 struct vscsi_devevent_task {
476 	struct task t;
477 	struct vscsi_ioc_devevent de;
478 	u_long cmd;
479 };
480 
481 int
482 vscsi_devevent(struct vscsi_softc *sc, u_long cmd,
483     struct vscsi_ioc_devevent *de)
484 {
485 	struct vscsi_devevent_task *dt;
486 
487 	dt = malloc(sizeof(*dt), M_TEMP, M_WAITOK | M_CANFAIL);
488 	if (dt == NULL)
489 		return (ENOMEM);
490 
491 	task_set(&dt->t, vscsi_devevent_task, sc, dt);
492 	dt->de = *de;
493 	dt->cmd = cmd;
494 
495 	device_ref(&sc->sc_dev);
496 	task_add(systq, &dt->t);
497 
498 	return (0);
499 }
500 
501 void
502 vscsi_devevent_task(void *xsc, void *xdt)
503 {
504 	struct vscsi_softc *sc = xsc;
505 	struct vscsi_devevent_task *dt = xdt;
506 	int state;
507 
508 	mtx_enter(&sc->sc_state_mtx);
509 	state = sc->sc_state;
510 	mtx_leave(&sc->sc_state_mtx);
511 
512 	if (state != VSCSI_S_RUNNING)
513 		goto gone;
514 
515 	switch (dt->cmd) {
516 	case VSCSI_REQPROBE:
517 		scsi_probe(sc->sc_scsibus, dt->de.target, dt->de.lun);
518 		break;
519 	case VSCSI_REQDETACH:
520 		scsi_detach(sc->sc_scsibus, dt->de.target, dt->de.lun,
521 		    DETACH_FORCE);
522 		break;
523 #ifdef DIAGNOSTIC
524 	default:
525 		panic("unexpected vscsi_devevent cmd");
526 		/* NOTREACHED */
527 	}
528 #endif
529 
530 gone:
531 	device_unref(&sc->sc_dev);
532 
533 	free(dt, M_TEMP, 0);
534 }
535 
536 int
537 vscsipoll(dev_t dev, int events, struct proc *p)
538 {
539 	struct vscsi_softc		*sc = DEV2SC(dev);
540 	int				revents = 0;
541 
542 	if (sc == NULL)
543 		return (ENXIO);
544 
545 	if (events & (POLLIN | POLLRDNORM)) {
546 		mtx_enter(&sc->sc_state_mtx);
547 		if (!TAILQ_EMPTY(&sc->sc_ccb_i2t))
548 			revents |= events & (POLLIN | POLLRDNORM);
549 		mtx_leave(&sc->sc_state_mtx);
550 	}
551 
552 	if (revents == 0) {
553 		if (events & (POLLIN | POLLRDNORM))
554 			selrecord(p, &sc->sc_sel);
555 	}
556 
557 	device_unref(&sc->sc_dev);
558 	return (revents);
559 }
560 
561 int
562 vscsikqfilter(dev_t dev, struct knote *kn)
563 {
564 	struct vscsi_softc *sc = DEV2SC(dev);
565 	struct klist *klist;
566 
567 	if (sc == NULL)
568 		return (ENXIO);
569 
570 	klist = &sc->sc_sel.si_note;
571 
572 	switch (kn->kn_filter) {
573 	case EVFILT_READ:
574 		kn->kn_fop = &vscsi_filtops;
575 		break;
576 	default:
577 		device_unref(&sc->sc_dev);
578 		return (EINVAL);
579 	}
580 
581 	kn->kn_hook = (caddr_t)sc;
582 
583 	mtx_enter(&sc->sc_sel_mtx);
584 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
585 	mtx_leave(&sc->sc_sel_mtx);
586 
587 	device_unref(&sc->sc_dev);
588 	return (0);
589 }
590 
591 void
592 filt_vscsidetach(struct knote *kn)
593 {
594 	struct vscsi_softc *sc = (struct vscsi_softc *)kn->kn_hook;
595 	struct klist *klist = &sc->sc_sel.si_note;
596 
597 	mtx_enter(&sc->sc_sel_mtx);
598 	SLIST_REMOVE(klist, kn, knote, kn_selnext);
599 	mtx_leave(&sc->sc_sel_mtx);
600 }
601 
602 int
603 filt_vscsiread(struct knote *kn, long hint)
604 {
605 	struct vscsi_softc *sc = (struct vscsi_softc *)kn->kn_hook;
606 	int event = 0;
607 
608 	mtx_enter(&sc->sc_state_mtx);
609 	if (!TAILQ_EMPTY(&sc->sc_ccb_i2t))
610 		event = 1;
611 	mtx_leave(&sc->sc_state_mtx);
612 
613 	return (event);
614 }
615 
616 int
617 vscsiclose(dev_t dev, int flags, int mode, struct proc *p)
618 {
619 	struct vscsi_softc		*sc = DEV2SC(dev);
620 	struct vscsi_ccb		*ccb;
621 
622 	if (sc == NULL)
623 		return (ENXIO);
624 
625 	mtx_enter(&sc->sc_state_mtx);
626 	KASSERT(sc->sc_state == VSCSI_S_RUNNING);
627 	sc->sc_state = VSCSI_S_CONFIG;
628 	mtx_leave(&sc->sc_state_mtx);
629 
630 	scsi_activate(sc->sc_scsibus, -1, -1, DVACT_DEACTIVATE);
631 
632 	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_t2i)) != NULL) {
633 		TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
634 		ccb->ccb_xs->error = XS_RESET;
635 		vscsi_done(sc, ccb);
636 	}
637 
638 	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_i2t)) != NULL) {
639 		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
640 		ccb->ccb_xs->error = XS_RESET;
641 		vscsi_done(sc, ccb);
642 	}
643 
644 	scsi_req_detach(sc->sc_scsibus, -1, -1, DETACH_FORCE);
645 
646 	mtx_enter(&sc->sc_state_mtx);
647 	while (sc->sc_ref_count > 0) {
648 		msleep(&sc->sc_ref_count, &sc->sc_state_mtx,
649 		    PRIBIO, "vscsiref", 0);
650 	}
651 	mtx_leave(&sc->sc_state_mtx);
652 
653 	pool_destroy(&sc->sc_ccb_pool);
654 
655 	mtx_enter(&sc->sc_state_mtx);
656 	sc->sc_state = VSCSI_S_CLOSED;
657 	mtx_leave(&sc->sc_state_mtx);
658 
659 	device_unref(&sc->sc_dev);
660 	return (0);
661 }
662 
663 void *
664 vscsi_ccb_get(void *cookie)
665 {
666 	struct vscsi_softc		*sc = cookie;
667 	struct vscsi_ccb		*ccb = NULL;
668 
669 	ccb = pool_get(&sc->sc_ccb_pool, PR_NOWAIT);
670 	if (ccb != NULL) {
671 		ccb->ccb_tag = sc->sc_ccb_tag++;
672 		ccb->ccb_datalen = 0;
673 	}
674 
675 	return (ccb);
676 }
677 
678 void
679 vscsi_ccb_put(void *cookie, void *io)
680 {
681 	struct vscsi_softc		*sc = cookie;
682 	struct vscsi_ccb		*ccb = io;
683 
684 	pool_put(&sc->sc_ccb_pool, ccb);
685 }
686