xref: /openbsd-src/sys/dev/pv/vioblk.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: vioblk.c,v 1.17 2020/02/14 15:56:47 krw Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Stefan Fritsch.
5  * Copyright (c) 2010 Minoura Makoto.
6  * Copyright (c) 1998, 2001 Manuel Bouyer.
7  * All rights reserved.
8  *
9  * This code is based in part on the NetBSD ld_virtio driver and the
10  * OpenBSD vdsk driver.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *	notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *	notice, this list of conditions and the following disclaimer in the
19  *	documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 2009, 2011 Mark Kettenis
35  *
36  * Permission to use, copy, modify, and distribute this software for any
37  * purpose with or without fee is hereby granted, provided that the above
38  * copyright notice and this permission notice appear in all copies.
39  *
40  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
41  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
42  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
43  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
44  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
45  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
46  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
47  */
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <machine/bus.h>
53 
54 #include <sys/device.h>
55 #include <sys/stat.h>
56 #include <sys/buf.h>
57 #include <sys/mutex.h>
58 #include <dev/pv/virtioreg.h>
59 #include <dev/pv/virtiovar.h>
60 #include <dev/pv/vioblkreg.h>
61 
62 #include <scsi/scsi_all.h>
63 #include <scsi/scsi_disk.h>
64 #include <scsi/scsiconf.h>
65 
66 #define VIOBLK_DONE	-1
67 
68 /* Number of DMA segments for buffers that the device must support */
69 #define SEG_MAX		(MAXPHYS/PAGE_SIZE + 1)
70 /* In the virtqueue, we need space for header and footer, too */
71 #define ALLOC_SEGS	(SEG_MAX + 2)
72 
73 struct virtio_feature_name vioblk_feature_names[] = {
74 #if VIRTIO_DEBUG
75 	{ VIRTIO_BLK_F_BARRIER,		"Barrier" },
76 	{ VIRTIO_BLK_F_SIZE_MAX,	"SizeMax" },
77 	{ VIRTIO_BLK_F_SEG_MAX,		"SegMax" },
78 	{ VIRTIO_BLK_F_GEOMETRY,	"Geometry" },
79 	{ VIRTIO_BLK_F_RO,		"RO" },
80 	{ VIRTIO_BLK_F_BLK_SIZE,	"BlkSize" },
81 	{ VIRTIO_BLK_F_SCSI,		"SCSI" },
82 	{ VIRTIO_BLK_F_FLUSH,		"Flush" },
83 	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology" },
84 	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE" },
85 	{ VIRTIO_BLK_F_DISCARD,		"Discard" },
86 	{ VIRTIO_BLK_F_WRITE_ZEROES,	"Write0s" },
87 #endif
88 	{ 0,				NULL }
89 };
90 
91 struct virtio_blk_req {
92 	struct virtio_blk_req_hdr	 vr_hdr;
93 	uint8_t				 vr_status;
94 #define VR_DMA_END	offsetof(struct virtio_blk_req, vr_qe_index)
95 	int16_t				 vr_qe_index;
96 	int				 vr_len;
97 	struct scsi_xfer		*vr_xs;
98 	bus_dmamap_t			 vr_cmdsts;
99 	bus_dmamap_t			 vr_payload;
100 	SLIST_ENTRY(virtio_blk_req)	 vr_list;
101 };
102 
103 struct vioblk_softc {
104 	struct device		 sc_dev;
105 	struct virtio_softc	*sc_virtio;
106 
107 	struct virtqueue         sc_vq[1];
108 	struct virtio_blk_req   *sc_reqs;
109 	bus_dma_segment_t        sc_reqs_segs[1];
110 
111 	struct scsi_link	 sc_link;
112 	struct scsi_iopool	 sc_iopool;
113 	struct mutex		 sc_vr_mtx;
114 	SLIST_HEAD(, virtio_blk_req) sc_freelist;
115 
116 	int			 sc_notify_on_empty;
117 
118 	uint32_t		 sc_queued;
119 
120 	uint64_t		 sc_capacity;
121 };
122 
123 int	vioblk_match(struct device *, void *, void *);
124 void	vioblk_attach(struct device *, struct device *, void *);
125 int	vioblk_alloc_reqs(struct vioblk_softc *, int);
126 int	vioblk_vq_done(struct virtqueue *);
127 void	vioblk_vq_done1(struct vioblk_softc *, struct virtio_softc *,
128 			struct virtqueue *, int);
129 void	vioblk_reset(struct vioblk_softc *);
130 
131 void	vioblk_scsi_cmd(struct scsi_xfer *);
132 int	vioblk_dev_probe(struct scsi_link *);
133 void	vioblk_dev_free(struct scsi_link *);
134 
135 void   *vioblk_req_get(void *);
136 void	vioblk_req_put(void *, void *);
137 
138 void	vioblk_scsi_inq(struct scsi_xfer *);
139 void	vioblk_scsi_capacity(struct scsi_xfer *);
140 void	vioblk_scsi_capacity16(struct scsi_xfer *);
141 void	vioblk_scsi_done(struct scsi_xfer *, int);
142 
143 struct cfattach vioblk_ca = {
144 	sizeof(struct vioblk_softc),
145 	vioblk_match,
146 	vioblk_attach,
147 	NULL
148 };
149 
150 struct cfdriver vioblk_cd = {
151 	NULL, "vioblk", DV_DULL
152 };
153 
154 struct scsi_adapter vioblk_switch = {
155 	vioblk_scsi_cmd, NULL, vioblk_dev_probe, vioblk_dev_free, NULL
156 };
157 
158 int vioblk_match(struct device *parent, void *match, void *aux)
159 {
160 	struct virtio_softc *va = aux;
161 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
162 		return 1;
163 	return 0;
164 }
165 
166 #define DNPRINTF(n,x...)				\
167     do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
168 
169 void
170 vioblk_attach(struct device *parent, struct device *self, void *aux)
171 {
172 	struct vioblk_softc *sc = (struct vioblk_softc *)self;
173 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
174 	struct scsibus_attach_args saa;
175 	int qsize;
176 
177 	vsc->sc_vqs = &sc->sc_vq[0];
178 	vsc->sc_nvqs = 1;
179 	vsc->sc_config_change = 0;
180 	if (vsc->sc_child)
181 		panic("already attached to something else");
182 	vsc->sc_child = self;
183 	vsc->sc_ipl = IPL_BIO;
184 	sc->sc_virtio = vsc;
185 	vsc->sc_driver_features = VIRTIO_BLK_F_RO | VIRTIO_F_NOTIFY_ON_EMPTY |
186 	     VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX | VIRTIO_BLK_F_FLUSH;
187 
188         virtio_negotiate_features(vsc, vioblk_feature_names);
189 
190 	if (virtio_has_feature(vsc, VIRTIO_BLK_F_SIZE_MAX)) {
191 		uint32_t size_max = virtio_read_device_config_4(vsc,
192 		    VIRTIO_BLK_CONFIG_SIZE_MAX);
193 		if (size_max < PAGE_SIZE) {
194 			printf("\nMax segment size %u too low\n", size_max);
195 			goto err;
196 		}
197 	}
198 
199 	if (virtio_has_feature(vsc, VIRTIO_BLK_F_SEG_MAX)) {
200 		uint32_t seg_max = virtio_read_device_config_4(vsc,
201 		    VIRTIO_BLK_CONFIG_SEG_MAX);
202 		if (seg_max < SEG_MAX) {
203 			printf("\nMax number of segments %d too small\n",
204 			    seg_max);
205 			goto err;
206 		}
207 	}
208 
209 	sc->sc_capacity = virtio_read_device_config_8(vsc,
210 	    VIRTIO_BLK_CONFIG_CAPACITY);
211 
212 	if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, MAXPHYS, ALLOC_SEGS,
213 	    "I/O request") != 0) {
214 		printf("\nCan't alloc virtqueue\n");
215 		goto err;
216 	}
217 	qsize = sc->sc_vq[0].vq_num;
218 	sc->sc_vq[0].vq_done = vioblk_vq_done;
219 
220 	if (virtio_has_feature(vsc, VIRTIO_F_NOTIFY_ON_EMPTY)) {
221 		virtio_stop_vq_intr(vsc, &sc->sc_vq[0]);
222 		sc->sc_notify_on_empty = 1;
223 	}
224 	else {
225 		sc->sc_notify_on_empty = 0;
226 	}
227 
228 	sc->sc_queued = 0;
229 
230 	SLIST_INIT(&sc->sc_freelist);
231 	mtx_init(&sc->sc_vr_mtx, IPL_BIO);
232 	scsi_iopool_init(&sc->sc_iopool, sc, vioblk_req_get, vioblk_req_put);
233 
234 	sc->sc_link.openings = vioblk_alloc_reqs(sc, qsize);
235 	if (sc->sc_link.openings == 0) {
236 		printf("\nCan't alloc reqs\n");
237 		goto err;
238 	}
239 
240 	sc->sc_link.adapter = &vioblk_switch;
241 	sc->sc_link.pool = &sc->sc_iopool;
242 	sc->sc_link.adapter_softc = self;
243 	sc->sc_link.adapter_buswidth = 2;
244 	sc->sc_link.luns = 1;
245 	sc->sc_link.adapter_target = 2;
246 	DNPRINTF(1, "%s: qsize: %d\n", __func__, qsize);
247 	if (virtio_has_feature(vsc, VIRTIO_BLK_F_RO))
248 		sc->sc_link.flags |= SDEV_READONLY;
249 
250 	bzero(&saa, sizeof(saa));
251 	saa.saa_sc_link = &sc->sc_link;
252 	printf("\n");
253 	config_found(self, &saa, scsiprint);
254 
255 	return;
256 err:
257 	vsc->sc_child = VIRTIO_CHILD_ERROR;
258 	return;
259 }
260 
261 /*
262  * vioblk_req_get() provides the SCSI layer with all the
263  * resources necessary to start an I/O on the device.
264  *
265  * Since the size of the I/O is unknown at this time the
266  * resouces allocated (a.k.a. reserved) must be sufficient
267  * to allow the maximum possible I/O size.
268  *
269  * When the I/O is actually attempted via vioblk_scsi_cmd()
270  * excess resources will be returned via virtio_enqueue_trim().
271  */
272 void *
273 vioblk_req_get(void *cookie)
274 {
275 	struct vioblk_softc *sc = cookie;
276 	struct virtio_blk_req *vr = NULL;
277 
278 	mtx_enter(&sc->sc_vr_mtx);
279 	vr = SLIST_FIRST(&sc->sc_freelist);
280 	if (vr != NULL)
281 		SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
282 	mtx_leave(&sc->sc_vr_mtx);
283 
284 	DNPRINTF(2, "%s: %p\n", __func__, vr);
285 
286 	return vr;
287 }
288 
289 void
290 vioblk_req_put(void *cookie, void *io)
291 {
292 	struct vioblk_softc *sc = cookie;
293 	struct virtio_blk_req *vr = io;
294 
295 	DNPRINTF(2, "%s: %p\n", __func__, vr);
296 
297 	mtx_enter(&sc->sc_vr_mtx);
298 	/*
299 	 * Do *NOT* call virtio_dequeue_commit()!
300 	 *
301 	 * Descriptors are permanently associated with the vioscsi_req and
302 	 * should not be placed on the free list!
303 	 */
304 	SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
305 	mtx_leave(&sc->sc_vr_mtx);
306 }
307 
308 int
309 vioblk_vq_done(struct virtqueue *vq)
310 {
311 	struct virtio_softc *vsc = vq->vq_owner;
312 	struct vioblk_softc *sc = (struct vioblk_softc *)vsc->sc_child;
313 	struct vq_entry *qe;
314 	int slot;
315 	int ret = 0;
316 
317 	if (!sc->sc_notify_on_empty)
318 		virtio_stop_vq_intr(vsc, vq);
319 	for (;;) {
320 		if (virtio_dequeue(vsc, vq, &slot, NULL) != 0) {
321 			if (sc->sc_notify_on_empty)
322 				break;
323 			virtio_start_vq_intr(vsc, vq);
324 			if (virtio_dequeue(vsc, vq, &slot, NULL) != 0)
325 				break;
326 		}
327 		qe = &vq->vq_entries[slot];
328 		vioblk_vq_done1(sc, vsc, vq, qe->qe_vr_index);
329 		ret = 1;
330 	}
331 	return ret;
332 }
333 
334 void
335 vioblk_vq_done1(struct vioblk_softc *sc, struct virtio_softc *vsc,
336     struct virtqueue *vq, int slot)
337 {
338 	struct virtio_blk_req *vr = &sc->sc_reqs[slot];
339 	struct scsi_xfer *xs = vr->vr_xs;
340 	KASSERT(vr->vr_len != VIOBLK_DONE);
341 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0,
342 	    sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_POSTWRITE);
343 	if (vr->vr_hdr.type != VIRTIO_BLK_T_FLUSH) {
344 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, vr->vr_len,
345 		    (vr->vr_hdr.type == VIRTIO_BLK_T_IN) ?
346 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
347 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
348 	}
349 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
350 	    sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
351 	    BUS_DMASYNC_POSTREAD);
352 
353 
354 	if (vr->vr_status != VIRTIO_BLK_S_OK) {
355 		DNPRINTF(1, "%s: EIO\n", __func__);
356 		xs->error = XS_DRIVER_STUFFUP;
357 		xs->resid = xs->datalen;
358 	} else {
359 		xs->error = XS_NOERROR;
360 		xs->resid = xs->datalen - vr->vr_len;
361 	}
362 	vr->vr_len = VIOBLK_DONE;
363 	scsi_done(xs);
364 }
365 
366 void
367 vioblk_reset(struct vioblk_softc *sc)
368 {
369 	int i;
370 
371 	/* reset device to stop DMA */
372 	virtio_reset(sc->sc_virtio);
373 
374 	/* finish requests that have been completed */
375 	vioblk_vq_done(&sc->sc_vq[0]);
376 
377 	/* abort all remaining requests */
378 	for (i = 0; i < sc->sc_link.openings; i++) {
379 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
380 		struct scsi_xfer *xs = vr->vr_xs;
381 
382 		if (vr->vr_len == VIOBLK_DONE)
383 			continue;
384 
385 		xs->error = XS_DRIVER_STUFFUP;
386 		xs->resid = xs->datalen;
387 		scsi_done(xs);
388 	}
389 }
390 
391 void
392 vioblk_scsi_cmd(struct scsi_xfer *xs)
393 {
394 	struct vioblk_softc *sc = xs->sc_link->adapter_softc;
395 	struct virtqueue *vq = &sc->sc_vq[0];
396 	struct virtio_softc *vsc = sc->sc_virtio;
397 	struct virtio_blk_req *vr;
398 	int len, s, timeout, isread, slot, ret, nsegs;
399 	int error = XS_DRIVER_STUFFUP;
400 	struct scsi_rw *rw;
401 	struct scsi_rw_big *rwb;
402 	struct scsi_rw_12 *rw12;
403 	struct scsi_rw_16 *rw16;
404 	u_int64_t lba = 0;
405 	u_int32_t sector_count = 0;
406 	uint8_t operation;
407 
408 	switch (xs->cmd->opcode) {
409 	case READ_BIG:
410 	case READ_COMMAND:
411 	case READ_12:
412 	case READ_16:
413 		operation = VIRTIO_BLK_T_IN;
414 		isread = 1;
415 		break;
416 	case WRITE_BIG:
417 	case WRITE_COMMAND:
418 	case WRITE_12:
419 	case WRITE_16:
420 		operation = VIRTIO_BLK_T_OUT;
421 		isread = 0;
422 		break;
423 
424 	case SYNCHRONIZE_CACHE:
425 		if (!virtio_has_feature(vsc, VIRTIO_BLK_F_FLUSH)) {
426 			vioblk_scsi_done(xs, XS_NOERROR);
427 			return;
428 		}
429 		operation = VIRTIO_BLK_T_FLUSH;
430 		break;
431 
432 	case INQUIRY:
433 		vioblk_scsi_inq(xs);
434 		return;
435 	case READ_CAPACITY:
436 		vioblk_scsi_capacity(xs);
437 		return;
438 	case READ_CAPACITY_16:
439 		vioblk_scsi_capacity16(xs);
440 		return;
441 
442 	case TEST_UNIT_READY:
443 	case START_STOP:
444 	case PREVENT_ALLOW:
445 		vioblk_scsi_done(xs, XS_NOERROR);
446 		return;
447 
448 	default:
449 		printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode);
450 	case MODE_SENSE:
451 	case MODE_SENSE_BIG:
452 	case REPORT_LUNS:
453 		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
454 		return;
455 	}
456 
457 	/*
458 	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
459 	 * layout as 10-byte READ/WRITE commands.
460 	 */
461 	if (xs->cmdlen == 6) {
462 		rw = (struct scsi_rw *)xs->cmd;
463 		lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
464 		sector_count = rw->length ? rw->length : 0x100;
465 	} else if (xs->cmdlen == 10) {
466 		rwb = (struct scsi_rw_big *)xs->cmd;
467 		lba = _4btol(rwb->addr);
468 		sector_count = _2btol(rwb->length);
469 	} else if (xs->cmdlen == 12) {
470 		rw12 = (struct scsi_rw_12 *)xs->cmd;
471 		lba = _4btol(rw12->addr);
472 		sector_count = _4btol(rw12->length);
473 	} else if (xs->cmdlen == 16) {
474 		rw16 = (struct scsi_rw_16 *)xs->cmd;
475 		lba = _8btol(rw16->addr);
476 		sector_count = _4btol(rw16->length);
477 	}
478 
479 	s = splbio();
480 	vr = xs->io;
481 	slot = vr->vr_qe_index;
482 	if (operation != VIRTIO_BLK_T_FLUSH) {
483 		len = MIN(xs->datalen, sector_count * VIRTIO_BLK_SECTOR_SIZE);
484 		ret = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
485 		    xs->data, len, NULL,
486 		    ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
487 		     BUS_DMA_NOWAIT));
488 		if (ret) {
489 			printf("%s: bus_dmamap_load: %d", __func__, ret);
490 			error = XS_DRIVER_STUFFUP;
491 			goto out_done;
492 		}
493 		nsegs = vr->vr_payload->dm_nsegs + 2;
494 	} else {
495 		len = 0;
496 		nsegs = 2;
497 	}
498 
499 	/*
500 	 * Adjust reservation to the number needed, or virtio gets upset. Note
501 	 * that it may trim UP if 'xs' is being recycled w/o getting a new
502 	 * reservation!
503 	 */
504 	virtio_enqueue_trim(vq, slot, nsegs);
505 
506 	vr->vr_xs = xs;
507 	vr->vr_hdr.type = operation;
508 	vr->vr_hdr.ioprio = 0;
509 	vr->vr_hdr.sector = lba;
510 	vr->vr_len = len;
511 
512 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
513 			0, sizeof(struct virtio_blk_req_hdr),
514 			BUS_DMASYNC_PREWRITE);
515 	if (operation != VIRTIO_BLK_T_FLUSH) {
516 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, len,
517 		    isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
518 	}
519 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
520 	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t),
521 	    BUS_DMASYNC_PREREAD);
522 
523 	virtio_enqueue_p(vq, slot, vr->vr_cmdsts, 0,
524 	    sizeof(struct virtio_blk_req_hdr), 1);
525 	if (operation != VIRTIO_BLK_T_FLUSH)
526 		virtio_enqueue(vq, slot, vr->vr_payload, !isread);
527 	virtio_enqueue_p(vq, slot, vr->vr_cmdsts,
528 	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), 0);
529 	virtio_enqueue_commit(vsc, vq, slot, 1);
530 	sc->sc_queued++;
531 
532 	if (!ISSET(xs->flags, SCSI_POLL)) {
533 		/* check if some xfers are done: */
534 		if (sc->sc_queued > 1)
535 			vioblk_vq_done(vq);
536 		splx(s);
537 		return;
538 	}
539 
540 	timeout = 15 * 1000;
541 	do {
542 		if (virtio_poll_intr(vsc) && vr->vr_len == VIOBLK_DONE)
543 			break;
544 
545 		delay(1000);
546 	} while(--timeout > 0);
547 	if (timeout <= 0) {
548 		printf("%s: SCSI_POLL timed out\n", __func__);
549 		vioblk_reset(sc);
550 		virtio_reinit_start(vsc);
551 		virtio_reinit_end(vsc);
552 	}
553 	splx(s);
554 	return;
555 
556 out_done:
557 	splx(s);
558 	vioblk_scsi_done(xs, error);
559 }
560 
561 void
562 vioblk_scsi_inq(struct scsi_xfer *xs)
563 {
564 	struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd;
565 	struct scsi_inquiry_data inqd;
566 
567 	if (ISSET(inq->flags, SI_EVPD)) {
568 		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
569 		return;
570 	}
571 
572 	bzero(&inqd, sizeof(inqd));
573 
574 	inqd.device = T_DIRECT;
575 	inqd.version = 0x05; /* SPC-3 */
576 	inqd.response_format = 2;
577 	inqd.additional_length = 32;
578 	inqd.flags |= SID_CmdQue;
579 	bcopy("VirtIO  ", inqd.vendor, sizeof(inqd.vendor));
580 	bcopy("Block Device    ", inqd.product, sizeof(inqd.product));
581 
582 	bcopy(&inqd, xs->data, MIN(sizeof(inqd), xs->datalen));
583 	vioblk_scsi_done(xs, XS_NOERROR);
584 }
585 
586 void
587 vioblk_scsi_capacity(struct scsi_xfer *xs)
588 {
589 	struct vioblk_softc *sc = xs->sc_link->adapter_softc;
590 	struct scsi_read_cap_data rcd;
591 	uint64_t capacity;
592 
593 	bzero(&rcd, sizeof(rcd));
594 
595 	capacity = sc->sc_capacity - 1;
596 	if (capacity > 0xffffffff)
597 		capacity = 0xffffffff;
598 
599 	_lto4b(capacity, rcd.addr);
600 	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
601 
602 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
603 	vioblk_scsi_done(xs, XS_NOERROR);
604 }
605 
606 void
607 vioblk_scsi_capacity16(struct scsi_xfer *xs)
608 {
609 	struct vioblk_softc *sc = xs->sc_link->adapter_softc;
610 	struct scsi_read_cap_data_16 rcd;
611 
612 	bzero(&rcd, sizeof(rcd));
613 
614 	_lto8b(sc->sc_capacity - 1, rcd.addr);
615 	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
616 
617 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
618 	vioblk_scsi_done(xs, XS_NOERROR);
619 }
620 
621 void
622 vioblk_scsi_done(struct scsi_xfer *xs, int error)
623 {
624 	xs->error = error;
625 	scsi_done(xs);
626 }
627 
628 int
629 vioblk_dev_probe(struct scsi_link *link)
630 {
631 	KASSERT(link->lun == 0);
632 	if (link->target == 0)
633 		return (0);
634 	return (ENODEV);
635 }
636 
637 void
638 vioblk_dev_free(struct scsi_link *link)
639 {
640 	printf("%s\n", __func__);
641 }
642 
643 int
644 vioblk_alloc_reqs(struct vioblk_softc *sc, int qsize)
645 {
646 	struct virtqueue *vq = &sc->sc_vq[0];
647 	struct vring_desc *vd;
648 	int allocsize, nreqs, r, rsegs, slot, i;
649 	void *vaddr;
650 
651 	if (vq->vq_indirect != NULL)
652 		nreqs = qsize;
653 	else
654 		nreqs = qsize / ALLOC_SEGS;
655 
656 	allocsize = sizeof(struct virtio_blk_req) * nreqs;
657 	r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
658 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
659 	if (r != 0) {
660 		printf("DMA memory allocation failed, size %d, error %d\n",
661 		    allocsize, r);
662 		goto err_none;
663 	}
664 	r = bus_dmamem_map(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1,
665 	    allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
666 	if (r != 0) {
667 		printf("DMA memory map failed, error %d\n", r);
668 		goto err_dmamem_alloc;
669 	}
670 	sc->sc_reqs = vaddr;
671 	memset(vaddr, 0, allocsize);
672 	for (i = 0; i < nreqs; i++) {
673 		/*
674 		 * Assign descriptors and create the DMA maps for each
675 		 * allocated request.
676 		 */
677 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
678 		r = virtio_enqueue_prep(vq, &slot);
679 		if (r == 0)
680 			r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
681 		if (r != 0)
682 			return i;
683 
684 		if (vq->vq_indirect == NULL) {
685 			/*
686 			 * The reserved slots must be a contiguous block
687 			 * starting at vq_desc[slot].
688 			 */
689 			vd = &vq->vq_desc[slot];
690 			for (r = 0; r < ALLOC_SEGS - 1; r++) {
691 				DNPRINTF(2, "%s: vd[%d].next = %d should be "
692 				    "%d\n", __func__, r, vd[r].next,
693 				    (slot + r + 1));
694 				if (vd[r].next != (slot + r + 1))
695 					return i;
696 			}
697 			if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
698 				return i;
699 			DNPRINTF(2, "%s: reserved slots are contiguous "
700 			    "(good!)\n", __func__);
701 		}
702 
703 		vr->vr_qe_index = slot;
704 		vq->vq_entries[slot].qe_vr_index = i;
705 		vr->vr_len = VIOBLK_DONE;
706 
707 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
708 		    VR_DMA_END, 1, VR_DMA_END, 0,
709 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_cmdsts);
710 		if (r != 0) {
711 			printf("cmd dmamap creation failed, err %d\n", r);
712 			nreqs = i;
713 			goto err_reqs;
714 		}
715 		r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
716 		    &vr->vr_hdr, VR_DMA_END, NULL, BUS_DMA_NOWAIT);
717 		if (r != 0) {
718 			printf("command dmamap load failed, err %d\n", r);
719 			nreqs = i;
720 			goto err_reqs;
721 		}
722 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat, MAXPHYS,
723 		    SEG_MAX, MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
724 		    &vr->vr_payload);
725 		if (r != 0) {
726 			printf("payload dmamap creation failed, err %d\n", r);
727 			nreqs = i;
728 			goto err_reqs;
729 		}
730 		SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
731 	}
732 	return nreqs;
733 
734 err_reqs:
735 	for (i = 0; i < nreqs; i++) {
736 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
737 		if (vr->vr_cmdsts) {
738 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
739 			    vr->vr_cmdsts);
740 			vr->vr_cmdsts = 0;
741 		}
742 		if (vr->vr_payload) {
743 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
744 			    vr->vr_payload);
745 			vr->vr_payload = 0;
746 		}
747 	}
748 	bus_dmamem_unmap(sc->sc_virtio->sc_dmat, (caddr_t)sc->sc_reqs,
749 	    allocsize);
750 err_dmamem_alloc:
751 	bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1);
752 err_none:
753 	return 0;
754 }
755