xref: /openbsd-src/sys/dev/pv/vioblk.c (revision 35393bd44637bcb887f8dad05b745a7335b7add3)
1 /*	$OpenBSD: vioblk.c,v 1.45 2025/01/16 10:33:27 sf Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Stefan Fritsch.
5  * Copyright (c) 2010 Minoura Makoto.
6  * Copyright (c) 1998, 2001 Manuel Bouyer.
7  * All rights reserved.
8  *
9  * This code is based in part on the NetBSD ld_virtio driver and the
10  * OpenBSD vdsk driver.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *	notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *	notice, this list of conditions and the following disclaimer in the
19  *	documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 2009, 2011 Mark Kettenis
35  *
36  * Permission to use, copy, modify, and distribute this software for any
37  * purpose with or without fee is hereby granted, provided that the above
38  * copyright notice and this permission notice appear in all copies.
39  *
40  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
41  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
42  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
43  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
44  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
45  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
46  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
47  */
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <machine/bus.h>
52 
53 #include <sys/device.h>
54 #include <sys/mutex.h>
55 #include <dev/pv/virtioreg.h>
56 #include <dev/pv/virtiovar.h>
57 #include <dev/pv/vioblkreg.h>
58 
59 #include <scsi/scsi_all.h>
60 #include <scsi/scsi_disk.h>
61 #include <scsi/scsiconf.h>
62 
63 #define VIOBLK_DONE	-1
64 
65 /* Number of DMA segments for buffers that the device must support */
66 #define SEG_MAX		(MAXPHYS/PAGE_SIZE + 1)
67 /* In the virtqueue, we need space for header and footer, too */
68 #define ALLOC_SEGS	(SEG_MAX + 2)
69 
70 static const struct virtio_feature_name vioblk_feature_names[] = {
71 #if VIRTIO_DEBUG
72 	{ VIRTIO_BLK_F_BARRIER,		"Barrier" },
73 	{ VIRTIO_BLK_F_SIZE_MAX,	"SizeMax" },
74 	{ VIRTIO_BLK_F_SEG_MAX,		"SegMax" },
75 	{ VIRTIO_BLK_F_GEOMETRY,	"Geometry" },
76 	{ VIRTIO_BLK_F_RO,		"RO" },
77 	{ VIRTIO_BLK_F_BLK_SIZE,	"BlkSize" },
78 	{ VIRTIO_BLK_F_SCSI,		"SCSI" },
79 	{ VIRTIO_BLK_F_FLUSH,		"Flush" },
80 	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology" },
81 	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE" },
82 	{ VIRTIO_BLK_F_MQ,		"MQ" },
83 	{ VIRTIO_BLK_F_DISCARD,		"Discard" },
84 	{ VIRTIO_BLK_F_WRITE_ZEROES,	"Write0s" },
85 	{ VIRTIO_BLK_F_LIFETIME,	"Lifetime" },
86 	{ VIRTIO_BLK_F_SECURE_ERASE,	"SecErase" },
87 #endif
88 	{ 0,				NULL }
89 };
90 
91 struct virtio_blk_req {
92 	struct virtio_blk_req_hdr	 vr_hdr;
93 	uint8_t				 vr_status;
94 #define VR_DMA_END	offsetof(struct virtio_blk_req, vr_qe_index)
95 	int16_t				 vr_qe_index;
96 	int				 vr_len;
97 	struct scsi_xfer		*vr_xs;
98 	bus_dmamap_t			 vr_cmdsts;
99 	bus_dmamap_t			 vr_payload;
100 	SLIST_ENTRY(virtio_blk_req)	 vr_list;
101 };
102 
103 struct vioblk_softc {
104 	struct device		 sc_dev;
105 	struct virtio_softc	*sc_virtio;
106 
107 	struct virtqueue         sc_vq[1];
108 	struct virtio_blk_req   *sc_reqs;
109 	bus_dma_segment_t        sc_reqs_segs[1];
110 	int			 sc_nreqs;
111 
112 	struct scsi_iopool	 sc_iopool;
113 	struct mutex		 sc_vr_mtx;
114 	SLIST_HEAD(, virtio_blk_req) sc_freelist;
115 
116 	int			 sc_notify_on_empty;
117 
118 	uint32_t		 sc_queued;
119 
120 	uint64_t		 sc_capacity;
121 };
122 
123 int	vioblk_match(struct device *, void *, void *);
124 void	vioblk_attach(struct device *, struct device *, void *);
125 int	vioblk_alloc_reqs(struct vioblk_softc *, int);
126 int	vioblk_vq_done(struct virtqueue *);
127 void	vioblk_vq_done1(struct vioblk_softc *, struct virtio_softc *,
128 			struct virtqueue *, int);
129 void	vioblk_reset(struct vioblk_softc *);
130 
131 void	vioblk_scsi_cmd(struct scsi_xfer *);
132 
133 void   *vioblk_req_get(void *);
134 void	vioblk_req_put(void *, void *);
135 
136 void	vioblk_scsi_inq(struct scsi_xfer *);
137 void	vioblk_scsi_capacity(struct scsi_xfer *);
138 void	vioblk_scsi_capacity16(struct scsi_xfer *);
139 void	vioblk_scsi_done(struct scsi_xfer *, int);
140 
141 const struct cfattach vioblk_ca = {
142 	sizeof(struct vioblk_softc),
143 	vioblk_match,
144 	vioblk_attach,
145 	NULL
146 };
147 
148 struct cfdriver vioblk_cd = {
149 	NULL, "vioblk", DV_DULL
150 };
151 
152 const struct scsi_adapter vioblk_switch = {
153 	vioblk_scsi_cmd, NULL, NULL, NULL, NULL
154 };
155 
156 int
157 vioblk_match(struct device *parent, void *match, void *aux)
158 {
159 	struct virtio_attach_args *va = aux;
160 	if (va->va_devid == PCI_PRODUCT_VIRTIO_BLOCK)
161 		return 1;
162 	return 0;
163 }
164 
165 #define DNPRINTF(n,x...)				\
166     do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
167 
168 void
169 vioblk_attach(struct device *parent, struct device *self, void *aux)
170 {
171 	struct vioblk_softc *sc = (struct vioblk_softc *)self;
172 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
173 	struct virtio_attach_args *va = aux;
174 	struct scsibus_attach_args saa;
175 	int qsize;
176 
177 	vsc->sc_vqs = &sc->sc_vq[0];
178 	vsc->sc_nvqs = 1;
179 	if (vsc->sc_child)
180 		panic("already attached to something else");
181 	vsc->sc_child = self;
182 	vsc->sc_ipl = IPL_BIO;
183 	sc->sc_virtio = vsc;
184 	vsc->sc_driver_features = VIRTIO_BLK_F_RO | VIRTIO_F_NOTIFY_ON_EMPTY |
185 	     VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX | VIRTIO_BLK_F_FLUSH;
186 
187         if (virtio_negotiate_features(vsc, vioblk_feature_names) != 0)
188 		goto err;
189 
190 	if (virtio_has_feature(vsc, VIRTIO_BLK_F_SIZE_MAX)) {
191 		uint32_t size_max = virtio_read_device_config_4(vsc,
192 		    VIRTIO_BLK_CONFIG_SIZE_MAX);
193 		if (size_max < PAGE_SIZE) {
194 			printf("\nMax segment size %u too low\n", size_max);
195 			goto err;
196 		}
197 	}
198 
199 	if (virtio_has_feature(vsc, VIRTIO_BLK_F_SEG_MAX)) {
200 		uint32_t seg_max = virtio_read_device_config_4(vsc,
201 		    VIRTIO_BLK_CONFIG_SEG_MAX);
202 		if (seg_max < SEG_MAX) {
203 			printf("\nMax number of segments %d too small\n",
204 			    seg_max);
205 			goto err;
206 		}
207 	}
208 
209 	sc->sc_capacity = virtio_read_device_config_8(vsc,
210 	    VIRTIO_BLK_CONFIG_CAPACITY);
211 
212 	if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, ALLOC_SEGS, "I/O request")
213 	    != 0) {
214 		printf("\nCan't alloc virtqueue\n");
215 		goto err;
216 	}
217 	qsize = sc->sc_vq[0].vq_num;
218 	sc->sc_vq[0].vq_done = vioblk_vq_done;
219 
220 	if (virtio_has_feature(vsc, VIRTIO_F_NOTIFY_ON_EMPTY)) {
221 		virtio_stop_vq_intr(vsc, &sc->sc_vq[0]);
222 		sc->sc_notify_on_empty = 1;
223 	}
224 	else {
225 		sc->sc_notify_on_empty = 0;
226 	}
227 
228 	sc->sc_queued = 0;
229 
230 	SLIST_INIT(&sc->sc_freelist);
231 	mtx_init(&sc->sc_vr_mtx, IPL_BIO);
232 	scsi_iopool_init(&sc->sc_iopool, sc, vioblk_req_get, vioblk_req_put);
233 
234 	sc->sc_nreqs = vioblk_alloc_reqs(sc, qsize);
235 	if (sc->sc_nreqs == 0) {
236 		printf("\nCan't alloc reqs\n");
237 		goto err;
238 	}
239 	DNPRINTF(1, "%s: qsize: %d\n", __func__, qsize);
240 	printf("\n");
241 
242 	saa.saa_adapter = &vioblk_switch;
243 	saa.saa_adapter_softc = self;
244 	saa.saa_adapter_buswidth = 1;
245 	saa.saa_luns = 1;
246 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
247 	saa.saa_openings = sc->sc_nreqs;
248 	saa.saa_pool = &sc->sc_iopool;
249 	if (virtio_has_feature(vsc, VIRTIO_BLK_F_RO))
250 		saa.saa_flags = SDEV_READONLY;
251 	else
252 		saa.saa_flags = 0;
253 	saa.saa_quirks = 0;
254 	saa.saa_wwpn = saa.saa_wwnn = 0;
255 
256 	if (virtio_attach_finish(vsc, va) != 0)
257 		goto err;
258 	config_found(self, &saa, scsiprint);
259 	return;
260 
261 err:
262 	vsc->sc_child = VIRTIO_CHILD_ERROR;
263 	return;
264 }
265 
266 /*
267  * vioblk_req_get() provides the SCSI layer with all the
268  * resources necessary to start an I/O on the device.
269  *
270  * Since the size of the I/O is unknown at this time the
271  * resources allocated (a.k.a. reserved) must be sufficient
272  * to allow the maximum possible I/O size.
273  *
274  * When the I/O is actually attempted via vioblk_scsi_cmd()
275  * excess resources will be returned via virtio_enqueue_trim().
276  */
277 void *
278 vioblk_req_get(void *cookie)
279 {
280 	struct vioblk_softc *sc = cookie;
281 	struct virtio_blk_req *vr = NULL;
282 
283 	mtx_enter(&sc->sc_vr_mtx);
284 	vr = SLIST_FIRST(&sc->sc_freelist);
285 	if (vr != NULL)
286 		SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
287 	mtx_leave(&sc->sc_vr_mtx);
288 
289 	DNPRINTF(2, "%s: %p\n", __func__, vr);
290 
291 	return vr;
292 }
293 
294 void
295 vioblk_req_put(void *cookie, void *io)
296 {
297 	struct vioblk_softc *sc = cookie;
298 	struct virtio_blk_req *vr = io;
299 
300 	DNPRINTF(2, "%s: %p\n", __func__, vr);
301 
302 	mtx_enter(&sc->sc_vr_mtx);
303 	/*
304 	 * Do *NOT* call virtio_dequeue_commit()!
305 	 *
306 	 * Descriptors are permanently associated with the vioscsi_req and
307 	 * should not be placed on the free list!
308 	 */
309 	SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
310 	mtx_leave(&sc->sc_vr_mtx);
311 }
312 
313 int
314 vioblk_vq_done(struct virtqueue *vq)
315 {
316 	struct virtio_softc *vsc = vq->vq_owner;
317 	struct vioblk_softc *sc = (struct vioblk_softc *)vsc->sc_child;
318 	struct vq_entry *qe;
319 	int slot;
320 	int ret = 0;
321 
322 	if (!sc->sc_notify_on_empty)
323 		virtio_stop_vq_intr(vsc, vq);
324 	for (;;) {
325 		if (virtio_dequeue(vsc, vq, &slot, NULL) != 0) {
326 			if (sc->sc_notify_on_empty)
327 				break;
328 			virtio_start_vq_intr(vsc, vq);
329 			if (virtio_dequeue(vsc, vq, &slot, NULL) != 0)
330 				break;
331 		}
332 		qe = &vq->vq_entries[slot];
333 		vioblk_vq_done1(sc, vsc, vq, qe->qe_vr_index);
334 		ret = 1;
335 	}
336 	return ret;
337 }
338 
339 void
340 vioblk_vq_done1(struct vioblk_softc *sc, struct virtio_softc *vsc,
341     struct virtqueue *vq, int slot)
342 {
343 	struct virtio_blk_req *vr = &sc->sc_reqs[slot];
344 	struct scsi_xfer *xs = vr->vr_xs;
345 	KASSERT(vr->vr_len != VIOBLK_DONE);
346 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0,
347 	    sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_POSTWRITE);
348 	if (vr->vr_hdr.type != VIRTIO_BLK_T_FLUSH) {
349 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, vr->vr_len,
350 		    (vr->vr_hdr.type == VIRTIO_BLK_T_IN) ?
351 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
352 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
353 	}
354 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
355 	    sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
356 	    BUS_DMASYNC_POSTREAD);
357 
358 
359 	if (vr->vr_status != VIRTIO_BLK_S_OK) {
360 		DNPRINTF(1, "%s: EIO\n", __func__);
361 		xs->error = XS_DRIVER_STUFFUP;
362 		xs->resid = xs->datalen;
363 	} else {
364 		xs->error = XS_NOERROR;
365 		xs->resid = xs->datalen - vr->vr_len;
366 	}
367 	vr->vr_len = VIOBLK_DONE;
368 	scsi_done(xs);
369 }
370 
371 void
372 vioblk_reset(struct vioblk_softc *sc)
373 {
374 	int i;
375 
376 	/* reset device to stop DMA */
377 	virtio_reset(sc->sc_virtio);
378 
379 	/* finish requests that have been completed */
380 	virtio_check_vq(sc->sc_virtio, &sc->sc_vq[0]);
381 
382 	/* abort all remaining requests */
383 	for (i = 0; i < sc->sc_nreqs; i++) {
384 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
385 		struct scsi_xfer *xs = vr->vr_xs;
386 
387 		if (vr->vr_len == VIOBLK_DONE)
388 			continue;
389 
390 		xs->error = XS_DRIVER_STUFFUP;
391 		xs->resid = xs->datalen;
392 		scsi_done(xs);
393 	}
394 }
395 
396 void
397 vioblk_scsi_cmd(struct scsi_xfer *xs)
398 {
399 	struct vioblk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
400 	struct virtqueue *vq = &sc->sc_vq[0];
401 	struct virtio_softc *vsc = sc->sc_virtio;
402 	struct virtio_blk_req *vr;
403 	int len, s, timeout, isread, slot, ret, nsegs;
404 	int error = XS_DRIVER_STUFFUP;
405 	struct scsi_rw *rw;
406 	struct scsi_rw_10 *rw10;
407 	struct scsi_rw_12 *rw12;
408 	struct scsi_rw_16 *rw16;
409 	u_int64_t lba = 0;
410 	u_int32_t sector_count = 0;
411 	uint8_t operation;
412 
413 	switch (xs->cmd.opcode) {
414 	case READ_COMMAND:
415 	case READ_10:
416 	case READ_12:
417 	case READ_16:
418 		operation = VIRTIO_BLK_T_IN;
419 		isread = 1;
420 		break;
421 	case WRITE_COMMAND:
422 	case WRITE_10:
423 	case WRITE_12:
424 	case WRITE_16:
425 		operation = VIRTIO_BLK_T_OUT;
426 		isread = 0;
427 		break;
428 
429 	case SYNCHRONIZE_CACHE:
430 		if (!virtio_has_feature(vsc, VIRTIO_BLK_F_FLUSH)) {
431 			vioblk_scsi_done(xs, XS_NOERROR);
432 			return;
433 		}
434 		operation = VIRTIO_BLK_T_FLUSH;
435 		break;
436 
437 	case INQUIRY:
438 		vioblk_scsi_inq(xs);
439 		return;
440 	case READ_CAPACITY:
441 		vioblk_scsi_capacity(xs);
442 		return;
443 	case READ_CAPACITY_16:
444 		vioblk_scsi_capacity16(xs);
445 		return;
446 
447 	case TEST_UNIT_READY:
448 	case START_STOP:
449 	case PREVENT_ALLOW:
450 		vioblk_scsi_done(xs, XS_NOERROR);
451 		return;
452 
453 	default:
454 		printf("%s cmd 0x%02x\n", __func__, xs->cmd.opcode);
455 	case MODE_SENSE:
456 	case MODE_SENSE_BIG:
457 	case REPORT_LUNS:
458 		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
459 		return;
460 	}
461 
462 	/*
463 	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
464 	 * layout as 10-byte READ/WRITE commands.
465 	 */
466 	if (xs->cmdlen == 6) {
467 		rw = (struct scsi_rw *)&xs->cmd;
468 		lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
469 		sector_count = rw->length ? rw->length : 0x100;
470 	} else if (xs->cmdlen == 10) {
471 		rw10 = (struct scsi_rw_10 *)&xs->cmd;
472 		lba = _4btol(rw10->addr);
473 		sector_count = _2btol(rw10->length);
474 	} else if (xs->cmdlen == 12) {
475 		rw12 = (struct scsi_rw_12 *)&xs->cmd;
476 		lba = _4btol(rw12->addr);
477 		sector_count = _4btol(rw12->length);
478 	} else if (xs->cmdlen == 16) {
479 		rw16 = (struct scsi_rw_16 *)&xs->cmd;
480 		lba = _8btol(rw16->addr);
481 		sector_count = _4btol(rw16->length);
482 	}
483 
484 	s = splbio();
485 	vr = xs->io;
486 	slot = vr->vr_qe_index;
487 	if (operation != VIRTIO_BLK_T_FLUSH) {
488 		len = MIN(xs->datalen, sector_count * VIRTIO_BLK_SECTOR_SIZE);
489 		ret = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
490 		    xs->data, len, NULL,
491 		    ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
492 		     BUS_DMA_NOWAIT));
493 		if (ret) {
494 			printf("%s: bus_dmamap_load: %d", __func__, ret);
495 			error = XS_DRIVER_STUFFUP;
496 			goto out_done;
497 		}
498 		nsegs = vr->vr_payload->dm_nsegs + 2;
499 	} else {
500 		len = 0;
501 		nsegs = 2;
502 	}
503 
504 	/*
505 	 * Adjust reservation to the number needed, or virtio gets upset. Note
506 	 * that it may trim UP if 'xs' is being recycled w/o getting a new
507 	 * reservation!
508 	 */
509 	virtio_enqueue_trim(vq, slot, nsegs);
510 
511 	vr->vr_xs = xs;
512 	vr->vr_hdr.type = operation;
513 	vr->vr_hdr.ioprio = 0;
514 	vr->vr_hdr.sector = lba;
515 	vr->vr_len = len;
516 
517 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
518 			0, sizeof(struct virtio_blk_req_hdr),
519 			BUS_DMASYNC_PREWRITE);
520 	if (operation != VIRTIO_BLK_T_FLUSH) {
521 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, len,
522 		    isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
523 	}
524 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
525 	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t),
526 	    BUS_DMASYNC_PREREAD);
527 
528 	virtio_enqueue_p(vq, slot, vr->vr_cmdsts, 0,
529 	    sizeof(struct virtio_blk_req_hdr), 1);
530 	if (operation != VIRTIO_BLK_T_FLUSH)
531 		virtio_enqueue(vq, slot, vr->vr_payload, !isread);
532 	virtio_enqueue_p(vq, slot, vr->vr_cmdsts,
533 	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), 0);
534 	virtio_enqueue_commit(vsc, vq, slot, 1);
535 	sc->sc_queued++;
536 
537 	if (!ISSET(xs->flags, SCSI_POLL)) {
538 		/* check if some xfers are done: */
539 		if (sc->sc_queued > 1)
540 			virtio_check_vq(sc->sc_virtio, vq);
541 		splx(s);
542 		return;
543 	}
544 
545 	timeout = 15 * 1000;
546 	do {
547 		if (virtio_poll_intr(vsc) && vr->vr_len == VIOBLK_DONE)
548 			break;
549 
550 		delay(1000);
551 	} while(--timeout > 0);
552 	if (timeout <= 0) {
553 		printf("%s: SCSI_POLL timed out\n", __func__);
554 		vioblk_reset(sc);
555 		virtio_reinit_start(vsc);
556 		virtio_reinit_end(vsc);
557 	}
558 	splx(s);
559 	return;
560 
561 out_done:
562 	splx(s);
563 	vioblk_scsi_done(xs, error);
564 }
565 
566 void
567 vioblk_scsi_inq(struct scsi_xfer *xs)
568 {
569 	struct scsi_inquiry *inq = (struct scsi_inquiry *)&xs->cmd;
570 	struct scsi_inquiry_data inqd;
571 
572 	if (ISSET(inq->flags, SI_EVPD)) {
573 		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
574 		return;
575 	}
576 
577 	bzero(&inqd, sizeof(inqd));
578 
579 	inqd.device = T_DIRECT;
580 	inqd.version = SCSI_REV_SPC3;
581 	inqd.response_format = SID_SCSI2_RESPONSE;
582 	inqd.additional_length = SID_SCSI2_ALEN;
583 	inqd.flags |= SID_CmdQue;
584 	bcopy("VirtIO  ", inqd.vendor, sizeof(inqd.vendor));
585 	bcopy("Block Device    ", inqd.product, sizeof(inqd.product));
586 
587 	scsi_copy_internal_data(xs, &inqd, sizeof(inqd));
588 
589 	vioblk_scsi_done(xs, XS_NOERROR);
590 }
591 
592 void
593 vioblk_scsi_capacity(struct scsi_xfer *xs)
594 {
595 	struct vioblk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
596 	struct scsi_read_cap_data rcd;
597 	uint64_t capacity;
598 
599 	bzero(&rcd, sizeof(rcd));
600 
601 	capacity = sc->sc_capacity - 1;
602 	if (capacity > 0xffffffff)
603 		capacity = 0xffffffff;
604 
605 	_lto4b(capacity, rcd.addr);
606 	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
607 
608 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
609 	vioblk_scsi_done(xs, XS_NOERROR);
610 }
611 
612 void
613 vioblk_scsi_capacity16(struct scsi_xfer *xs)
614 {
615 	struct vioblk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
616 	struct scsi_read_cap_data_16 rcd;
617 
618 	bzero(&rcd, sizeof(rcd));
619 
620 	_lto8b(sc->sc_capacity - 1, rcd.addr);
621 	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
622 
623 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
624 	vioblk_scsi_done(xs, XS_NOERROR);
625 }
626 
627 void
628 vioblk_scsi_done(struct scsi_xfer *xs, int error)
629 {
630 	xs->error = error;
631 	scsi_done(xs);
632 }
633 
634 int
635 vioblk_alloc_reqs(struct vioblk_softc *sc, int qsize)
636 {
637 	struct virtqueue *vq = &sc->sc_vq[0];
638 	struct vring_desc *vd;
639 	int allocsize, nreqs, r, rsegs, slot, i;
640 	void *vaddr;
641 
642 	if (vq->vq_indirect != NULL)
643 		nreqs = qsize;
644 	else
645 		nreqs = qsize / ALLOC_SEGS;
646 
647 	allocsize = sizeof(struct virtio_blk_req) * nreqs;
648 	r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
649 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
650 	if (r != 0) {
651 		printf("DMA memory allocation failed, size %d, error %d\n",
652 		    allocsize, r);
653 		goto err_none;
654 	}
655 	r = bus_dmamem_map(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1,
656 	    allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
657 	if (r != 0) {
658 		printf("DMA memory map failed, error %d\n", r);
659 		goto err_dmamem_alloc;
660 	}
661 	sc->sc_reqs = vaddr;
662 	memset(vaddr, 0, allocsize);
663 	for (i = 0; i < nreqs; i++) {
664 		/*
665 		 * Assign descriptors and create the DMA maps for each
666 		 * allocated request.
667 		 */
668 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
669 		r = virtio_enqueue_prep(vq, &slot);
670 		if (r == 0)
671 			r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
672 		if (r != 0)
673 			return i;
674 
675 		if (vq->vq_indirect == NULL) {
676 			/*
677 			 * The reserved slots must be a contiguous block
678 			 * starting at vq_desc[slot].
679 			 */
680 			vd = &vq->vq_desc[slot];
681 			for (r = 0; r < ALLOC_SEGS - 1; r++) {
682 				DNPRINTF(2, "%s: vd[%d].next = %d should be "
683 				    "%d\n", __func__, r, vd[r].next,
684 				    (slot + r + 1));
685 				if (vd[r].next != (slot + r + 1))
686 					return i;
687 			}
688 			if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
689 				return i;
690 			DNPRINTF(2, "%s: reserved slots are contiguous "
691 			    "(good!)\n", __func__);
692 		}
693 
694 		vr->vr_qe_index = slot;
695 		vq->vq_entries[slot].qe_vr_index = i;
696 		vr->vr_len = VIOBLK_DONE;
697 
698 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
699 		    VR_DMA_END, 1, VR_DMA_END, 0,
700 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_cmdsts);
701 		if (r != 0) {
702 			printf("cmd dmamap creation failed, err %d\n", r);
703 			nreqs = i;
704 			goto err_reqs;
705 		}
706 		r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
707 		    &vr->vr_hdr, VR_DMA_END, NULL, BUS_DMA_NOWAIT);
708 		if (r != 0) {
709 			printf("command dmamap load failed, err %d\n", r);
710 			nreqs = i;
711 			goto err_reqs;
712 		}
713 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat, MAXPHYS,
714 		    SEG_MAX, MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
715 		    &vr->vr_payload);
716 		if (r != 0) {
717 			printf("payload dmamap creation failed, err %d\n", r);
718 			nreqs = i;
719 			goto err_reqs;
720 		}
721 		SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
722 	}
723 	return nreqs;
724 
725 err_reqs:
726 	for (i = 0; i < nreqs; i++) {
727 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
728 		if (vr->vr_cmdsts) {
729 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
730 			    vr->vr_cmdsts);
731 			vr->vr_cmdsts = 0;
732 		}
733 		if (vr->vr_payload) {
734 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
735 			    vr->vr_payload);
736 			vr->vr_payload = 0;
737 		}
738 	}
739 	bus_dmamem_unmap(sc->sc_virtio->sc_dmat, (caddr_t)sc->sc_reqs,
740 	    allocsize);
741 err_dmamem_alloc:
742 	bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1);
743 err_none:
744 	return 0;
745 }
746