xref: /openbsd-src/sys/dev/pv/vioblk.c (revision 4b70baf6e17fc8b27fc1f7fa7929335753fa94c3)
1 /*	$OpenBSD: vioblk.c,v 1.12 2019/03/24 18:22:36 sf Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Stefan Fritsch.
5  * Copyright (c) 2010 Minoura Makoto.
6  * Copyright (c) 1998, 2001 Manuel Bouyer.
7  * All rights reserved.
8  *
9  * This code is based in part on the NetBSD ld_virtio driver and the
10  * OpenBSD vdsk driver.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *	notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *	notice, this list of conditions and the following disclaimer in the
19  *	documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 2009, 2011 Mark Kettenis
35  *
36  * Permission to use, copy, modify, and distribute this software for any
37  * purpose with or without fee is hereby granted, provided that the above
38  * copyright notice and this permission notice appear in all copies.
39  *
40  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
41  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
42  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
43  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
44  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
45  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
46  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
47  */
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <machine/bus.h>
53 
54 #include <sys/device.h>
55 #include <sys/stat.h>
56 #include <sys/buf.h>
57 #include <sys/mutex.h>
58 #include <dev/pv/virtioreg.h>
59 #include <dev/pv/virtiovar.h>
60 #include <dev/pv/vioblkreg.h>
61 
62 #include <scsi/scsi_all.h>
63 #include <scsi/scsi_disk.h>
64 #include <scsi/scsiconf.h>
65 
66 #define VIOBLK_DONE	-1
67 
68 #define MAX_XFER	MAX(MAXPHYS,MAXBSIZE)
69 /* Number of DMA segments for buffers that the device must support */
70 #define SEG_MAX		(MAX_XFER/PAGE_SIZE + 1)
71 /* In the virtqueue, we need space for header and footer, too */
72 #define ALLOC_SEGS	(SEG_MAX + 2)
73 
74 struct virtio_feature_name vioblk_feature_names[] = {
75 #if VIRTIO_DEBUG
76 	{ VIRTIO_BLK_F_BARRIER,		"Barrier" },
77 	{ VIRTIO_BLK_F_SIZE_MAX,	"SizeMax" },
78 	{ VIRTIO_BLK_F_SEG_MAX,		"SegMax" },
79 	{ VIRTIO_BLK_F_GEOMETRY,	"Geometry" },
80 	{ VIRTIO_BLK_F_RO,		"RO" },
81 	{ VIRTIO_BLK_F_BLK_SIZE,	"BlkSize" },
82 	{ VIRTIO_BLK_F_SCSI,		"SCSI" },
83 	{ VIRTIO_BLK_F_FLUSH,		"Flush" },
84 	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology" },
85 	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE" },
86 	{ VIRTIO_BLK_F_DISCARD,		"Discard" },
87 	{ VIRTIO_BLK_F_WRITE_ZEROES,	"Write0s" },
88 #endif
89 	{ 0,				NULL }
90 };
91 
92 struct virtio_blk_req {
93 	struct virtio_blk_req_hdr	 vr_hdr;
94 	uint8_t				 vr_status;
95 #define VR_DMA_END	offsetof(struct virtio_blk_req, vr_qe_index)
96 	int16_t				 vr_qe_index;
97 	int				 vr_len;
98 	struct scsi_xfer		*vr_xs;
99 	bus_dmamap_t			 vr_cmdsts;
100 	bus_dmamap_t			 vr_payload;
101 	SLIST_ENTRY(virtio_blk_req)	 vr_list;
102 };
103 
104 struct vioblk_softc {
105 	struct device		 sc_dev;
106 	struct virtio_softc	*sc_virtio;
107 
108 	struct virtqueue         sc_vq[1];
109 	struct virtio_blk_req   *sc_reqs;
110 	bus_dma_segment_t        sc_reqs_segs[1];
111 
112 	struct scsi_adapter	 sc_switch;
113 	struct scsi_link	 sc_link;
114 	struct scsi_iopool	 sc_iopool;
115 	struct mutex		 sc_vr_mtx;
116 	SLIST_HEAD(, virtio_blk_req) sc_freelist;
117 
118 	int			 sc_notify_on_empty;
119 
120 	uint32_t		 sc_queued;
121 
122 	uint64_t		 sc_capacity;
123 };
124 
125 int	vioblk_match(struct device *, void *, void *);
126 void	vioblk_attach(struct device *, struct device *, void *);
127 int	vioblk_alloc_reqs(struct vioblk_softc *, int);
128 int	vioblk_vq_done(struct virtqueue *);
129 void	vioblk_vq_done1(struct vioblk_softc *, struct virtio_softc *,
130 			struct virtqueue *, int);
131 void	vioblk_reset(struct vioblk_softc *);
132 
133 void	vioblk_scsi_cmd(struct scsi_xfer *);
134 int	vioblk_dev_probe(struct scsi_link *);
135 void	vioblk_dev_free(struct scsi_link *);
136 
137 void   *vioblk_req_get(void *);
138 void	vioblk_req_put(void *, void *);
139 
140 void	vioblk_scsi_inq(struct scsi_xfer *);
141 void	vioblk_scsi_capacity(struct scsi_xfer *);
142 void	vioblk_scsi_capacity16(struct scsi_xfer *);
143 void	vioblk_scsi_done(struct scsi_xfer *, int);
144 
145 struct cfattach vioblk_ca = {
146 	sizeof(struct vioblk_softc),
147 	vioblk_match,
148 	vioblk_attach,
149 	NULL
150 };
151 
152 struct cfdriver vioblk_cd = {
153 	NULL, "vioblk", DV_DULL
154 };
155 
156 
157 int vioblk_match(struct device *parent, void *match, void *aux)
158 {
159 	struct virtio_softc *va = aux;
160 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
161 		return 1;
162 	return 0;
163 }
164 
165 #define DNPRINTF(n,x...)				\
166     do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
167 
168 void
169 vioblk_attach(struct device *parent, struct device *self, void *aux)
170 {
171 	struct vioblk_softc *sc = (struct vioblk_softc *)self;
172 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
173 	struct scsibus_attach_args saa;
174 	uint64_t features;
175 	int qsize;
176 
177 	vsc->sc_vqs = &sc->sc_vq[0];
178 	vsc->sc_nvqs = 1;
179 	vsc->sc_config_change = 0;
180 	if (vsc->sc_child)
181 		panic("already attached to something else");
182 	vsc->sc_child = self;
183 	vsc->sc_ipl = IPL_BIO;
184 	sc->sc_virtio = vsc;
185 
186         features = virtio_negotiate_features(vsc,
187 	    (VIRTIO_BLK_F_RO       | VIRTIO_F_NOTIFY_ON_EMPTY |
188 	     VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX |
189 	     VIRTIO_BLK_F_FLUSH),
190 	    vioblk_feature_names);
191 
192 
193 	if (features & VIRTIO_BLK_F_SIZE_MAX) {
194 		uint32_t size_max = virtio_read_device_config_4(vsc,
195 		    VIRTIO_BLK_CONFIG_SIZE_MAX);
196 		if (size_max < PAGE_SIZE) {
197 			printf("\nMax segment size %u too low\n", size_max);
198 			goto err;
199 		}
200 	}
201 
202 	if (features & VIRTIO_BLK_F_SEG_MAX) {
203 		uint32_t seg_max = virtio_read_device_config_4(vsc,
204 		    VIRTIO_BLK_CONFIG_SEG_MAX);
205 		if (seg_max < SEG_MAX) {
206 			printf("\nMax number of segments %d too small\n",
207 			    seg_max);
208 			goto err;
209 		}
210 	}
211 
212 	sc->sc_capacity = virtio_read_device_config_8(vsc,
213 	    VIRTIO_BLK_CONFIG_CAPACITY);
214 
215 	if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, MAX_XFER, ALLOC_SEGS,
216 	    "I/O request") != 0) {
217 		printf("\nCan't alloc virtqueue\n");
218 		goto err;
219 	}
220 	qsize = sc->sc_vq[0].vq_num;
221 	sc->sc_vq[0].vq_done = vioblk_vq_done;
222 
223 	if (features & VIRTIO_F_NOTIFY_ON_EMPTY) {
224 		virtio_stop_vq_intr(vsc, &sc->sc_vq[0]);
225 		sc->sc_notify_on_empty = 1;
226 	}
227 	else {
228 		sc->sc_notify_on_empty = 0;
229 	}
230 
231 	sc->sc_queued = 0;
232 
233 	sc->sc_switch.scsi_cmd = vioblk_scsi_cmd;
234 	sc->sc_switch.scsi_minphys = scsi_minphys;
235 	sc->sc_switch.dev_probe = vioblk_dev_probe;
236 	sc->sc_switch.dev_free = vioblk_dev_free;
237 
238 	SLIST_INIT(&sc->sc_freelist);
239 	mtx_init(&sc->sc_vr_mtx, IPL_BIO);
240 	scsi_iopool_init(&sc->sc_iopool, sc, vioblk_req_get, vioblk_req_put);
241 
242 	sc->sc_link.openings = vioblk_alloc_reqs(sc, qsize);
243 	if (sc->sc_link.openings == 0) {
244 		printf("\nCan't alloc reqs\n");
245 		goto err;
246 	}
247 
248 	sc->sc_link.adapter = &sc->sc_switch;
249 	sc->sc_link.pool = &sc->sc_iopool;
250 	sc->sc_link.adapter_softc = self;
251 	sc->sc_link.adapter_buswidth = 2;
252 	sc->sc_link.luns = 1;
253 	sc->sc_link.adapter_target = 2;
254 	DNPRINTF(1, "%s: qsize: %d\n", __func__, qsize);
255 	if (features & VIRTIO_BLK_F_RO)
256 		sc->sc_link.flags |= SDEV_READONLY;
257 
258 	bzero(&saa, sizeof(saa));
259 	saa.saa_sc_link = &sc->sc_link;
260 	printf("\n");
261 	config_found(self, &saa, scsiprint);
262 
263 	return;
264 err:
265 	vsc->sc_child = VIRTIO_CHILD_ERROR;
266 	return;
267 }
268 
269 /*
270  * vioblk_req_get() provides the SCSI layer with all the
271  * resources necessary to start an I/O on the device.
272  *
273  * Since the size of the I/O is unknown at this time the
274  * resouces allocated (a.k.a. reserved) must be sufficient
275  * to allow the maximum possible I/O size.
276  *
277  * When the I/O is actually attempted via vioblk_scsi_cmd()
278  * excess resources will be returned via virtio_enqueue_trim().
279  */
280 void *
281 vioblk_req_get(void *cookie)
282 {
283 	struct vioblk_softc *sc = cookie;
284 	struct virtio_blk_req *vr = NULL;
285 
286 	mtx_enter(&sc->sc_vr_mtx);
287 	vr = SLIST_FIRST(&sc->sc_freelist);
288 	if (vr != NULL)
289 		SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
290 	mtx_leave(&sc->sc_vr_mtx);
291 
292 	DNPRINTF(2, "%s: %p\n", __func__, vr);
293 
294 	return vr;
295 }
296 
297 void
298 vioblk_req_put(void *cookie, void *io)
299 {
300 	struct vioblk_softc *sc = cookie;
301 	struct virtio_blk_req *vr = io;
302 
303 	DNPRINTF(2, "%s: %p\n", __func__, vr);
304 
305 	mtx_enter(&sc->sc_vr_mtx);
306 	/*
307 	 * Do *NOT* call virtio_dequeue_commit()!
308 	 *
309 	 * Descriptors are permanently associated with the vioscsi_req and
310 	 * should not be placed on the free list!
311 	 */
312 	SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
313 	mtx_leave(&sc->sc_vr_mtx);
314 }
315 
316 int
317 vioblk_vq_done(struct virtqueue *vq)
318 {
319 	struct virtio_softc *vsc = vq->vq_owner;
320 	struct vioblk_softc *sc = (struct vioblk_softc *)vsc->sc_child;
321 	struct vq_entry *qe;
322 	int slot;
323 	int ret = 0;
324 
325 	if (!sc->sc_notify_on_empty)
326 		virtio_stop_vq_intr(vsc, vq);
327 	for (;;) {
328 		if (virtio_dequeue(vsc, vq, &slot, NULL) != 0) {
329 			if (sc->sc_notify_on_empty)
330 				break;
331 			virtio_start_vq_intr(vsc, vq);
332 			if (virtio_dequeue(vsc, vq, &slot, NULL) != 0)
333 				break;
334 		}
335 		qe = &vq->vq_entries[slot];
336 		vioblk_vq_done1(sc, vsc, vq, qe->qe_vr_index);
337 		ret = 1;
338 	}
339 	return ret;
340 }
341 
342 void
343 vioblk_vq_done1(struct vioblk_softc *sc, struct virtio_softc *vsc,
344     struct virtqueue *vq, int slot)
345 {
346 	struct virtio_blk_req *vr = &sc->sc_reqs[slot];
347 	struct scsi_xfer *xs = vr->vr_xs;
348 	KASSERT(vr->vr_len != VIOBLK_DONE);
349 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0,
350 	    sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_POSTWRITE);
351 	if (vr->vr_hdr.type != VIRTIO_BLK_T_FLUSH) {
352 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, vr->vr_len,
353 		    (vr->vr_hdr.type == VIRTIO_BLK_T_IN) ?
354 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
355 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
356 	}
357 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
358 	    sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
359 	    BUS_DMASYNC_POSTREAD);
360 
361 
362 	if (vr->vr_status != VIRTIO_BLK_S_OK) {
363 		DNPRINTF(1, "%s: EIO\n", __func__);
364 		xs->error = XS_DRIVER_STUFFUP;
365 		xs->resid = xs->datalen;
366 	} else {
367 		xs->error = XS_NOERROR;
368 		xs->resid = xs->datalen - vr->vr_len;
369 	}
370 	vr->vr_len = VIOBLK_DONE;
371 	scsi_done(xs);
372 }
373 
374 void
375 vioblk_reset(struct vioblk_softc *sc)
376 {
377 	int i;
378 
379 	/* reset device to stop DMA */
380 	virtio_reset(sc->sc_virtio);
381 
382 	/* finish requests that have been completed */
383 	vioblk_vq_done(&sc->sc_vq[0]);
384 
385 	/* abort all remaining requests */
386 	for (i = 0; i < sc->sc_link.openings; i++) {
387 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
388 		struct scsi_xfer *xs = vr->vr_xs;
389 
390 		if (vr->vr_len == VIOBLK_DONE)
391 			continue;
392 
393 		xs->error = XS_DRIVER_STUFFUP;
394 		xs->resid = xs->datalen;
395 		scsi_done(xs);
396 	}
397 }
398 
399 void
400 vioblk_scsi_cmd(struct scsi_xfer *xs)
401 {
402 	struct vioblk_softc *sc = xs->sc_link->adapter_softc;
403 	struct virtqueue *vq = &sc->sc_vq[0];
404 	struct virtio_softc *vsc = sc->sc_virtio;
405 	struct virtio_blk_req *vr;
406 	int len, s, timeout, isread, slot, ret, nsegs;
407 	int error = XS_DRIVER_STUFFUP;
408 	struct scsi_rw *rw;
409 	struct scsi_rw_big *rwb;
410 	struct scsi_rw_12 *rw12;
411 	struct scsi_rw_16 *rw16;
412 	u_int64_t lba = 0;
413 	u_int32_t sector_count = 0;
414 	uint8_t operation;
415 
416 	switch (xs->cmd->opcode) {
417 	case READ_BIG:
418 	case READ_COMMAND:
419 	case READ_12:
420 	case READ_16:
421 		operation = VIRTIO_BLK_T_IN;
422 		isread = 1;
423 		break;
424 	case WRITE_BIG:
425 	case WRITE_COMMAND:
426 	case WRITE_12:
427 	case WRITE_16:
428 		operation = VIRTIO_BLK_T_OUT;
429 		isread = 0;
430 		break;
431 
432 	case SYNCHRONIZE_CACHE:
433 		if ((vsc->sc_features & VIRTIO_BLK_F_FLUSH) == 0) {
434 			vioblk_scsi_done(xs, XS_NOERROR);
435 			return;
436 		}
437 		operation = VIRTIO_BLK_T_FLUSH;
438 		break;
439 
440 	case INQUIRY:
441 		vioblk_scsi_inq(xs);
442 		return;
443 	case READ_CAPACITY:
444 		vioblk_scsi_capacity(xs);
445 		return;
446 	case READ_CAPACITY_16:
447 		vioblk_scsi_capacity16(xs);
448 		return;
449 
450 	case TEST_UNIT_READY:
451 	case START_STOP:
452 	case PREVENT_ALLOW:
453 		vioblk_scsi_done(xs, XS_NOERROR);
454 		return;
455 
456 	default:
457 		printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode);
458 	case MODE_SENSE:
459 	case MODE_SENSE_BIG:
460 	case REPORT_LUNS:
461 		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
462 		return;
463 	}
464 
465 	/*
466 	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
467 	 * layout as 10-byte READ/WRITE commands.
468 	 */
469 	if (xs->cmdlen == 6) {
470 		rw = (struct scsi_rw *)xs->cmd;
471 		lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
472 		sector_count = rw->length ? rw->length : 0x100;
473 	} else if (xs->cmdlen == 10) {
474 		rwb = (struct scsi_rw_big *)xs->cmd;
475 		lba = _4btol(rwb->addr);
476 		sector_count = _2btol(rwb->length);
477 	} else if (xs->cmdlen == 12) {
478 		rw12 = (struct scsi_rw_12 *)xs->cmd;
479 		lba = _4btol(rw12->addr);
480 		sector_count = _4btol(rw12->length);
481 	} else if (xs->cmdlen == 16) {
482 		rw16 = (struct scsi_rw_16 *)xs->cmd;
483 		lba = _8btol(rw16->addr);
484 		sector_count = _4btol(rw16->length);
485 	}
486 
487 	s = splbio();
488 	vr = xs->io;
489 	slot = vr->vr_qe_index;
490 	if (operation != VIRTIO_BLK_T_FLUSH) {
491 		len = MIN(xs->datalen, sector_count * VIRTIO_BLK_SECTOR_SIZE);
492 		ret = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
493 		    xs->data, len, NULL,
494 		    ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
495 		     BUS_DMA_NOWAIT));
496 		if (ret) {
497 			printf("%s: bus_dmamap_load: %d", __func__, ret);
498 			error = XS_DRIVER_STUFFUP;
499 			goto out_done;
500 		}
501 		nsegs = vr->vr_payload->dm_nsegs + 2;
502 	} else {
503 		len = 0;
504 		nsegs = 2;
505 	}
506 
507 	/*
508 	 * Adjust reservation to the number needed, or virtio gets upset. Note
509 	 * that it may trim UP if 'xs' is being recycled w/o getting a new
510 	 * reservation!
511 	 */
512 	virtio_enqueue_trim(vq, slot, nsegs);
513 
514 	vr->vr_xs = xs;
515 	vr->vr_hdr.type = operation;
516 	vr->vr_hdr.ioprio = 0;
517 	vr->vr_hdr.sector = lba;
518 	vr->vr_len = len;
519 
520 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
521 			0, sizeof(struct virtio_blk_req_hdr),
522 			BUS_DMASYNC_PREWRITE);
523 	if (operation != VIRTIO_BLK_T_FLUSH) {
524 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, len,
525 		    isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
526 	}
527 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
528 	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t),
529 	    BUS_DMASYNC_PREREAD);
530 
531 	virtio_enqueue_p(vq, slot, vr->vr_cmdsts, 0,
532 	    sizeof(struct virtio_blk_req_hdr), 1);
533 	if (operation != VIRTIO_BLK_T_FLUSH)
534 		virtio_enqueue(vq, slot, vr->vr_payload, !isread);
535 	virtio_enqueue_p(vq, slot, vr->vr_cmdsts,
536 	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), 0);
537 	virtio_enqueue_commit(vsc, vq, slot, 1);
538 	sc->sc_queued++;
539 
540 	if (!ISSET(xs->flags, SCSI_POLL)) {
541 		/* check if some xfers are done: */
542 		if (sc->sc_queued > 1)
543 			vioblk_vq_done(vq);
544 		splx(s);
545 		return;
546 	}
547 
548 	timeout = 15 * 1000;
549 	do {
550 		if (virtio_poll_intr(vsc) && vr->vr_len == VIOBLK_DONE)
551 			break;
552 
553 		delay(1000);
554 	} while(--timeout > 0);
555 	if (timeout <= 0) {
556 		uint32_t features;
557 		printf("%s: SCSI_POLL timed out\n", __func__);
558 		vioblk_reset(sc);
559 		virtio_reinit_start(vsc);
560 		features = virtio_negotiate_features(vsc, vsc->sc_features,
561 		    NULL);
562 		KASSERT(features == vsc->sc_features);
563 	}
564 	splx(s);
565 	return;
566 
567 out_done:
568 	splx(s);
569 	vioblk_scsi_done(xs, error);
570 }
571 
572 void
573 vioblk_scsi_inq(struct scsi_xfer *xs)
574 {
575 	struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd;
576 	struct scsi_inquiry_data inqd;
577 
578 	if (ISSET(inq->flags, SI_EVPD)) {
579 		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
580 		return;
581 	}
582 
583 	bzero(&inqd, sizeof(inqd));
584 
585 	inqd.device = T_DIRECT;
586 	inqd.version = 0x05; /* SPC-3 */
587 	inqd.response_format = 2;
588 	inqd.additional_length = 32;
589 	inqd.flags |= SID_CmdQue;
590 	bcopy("VirtIO  ", inqd.vendor, sizeof(inqd.vendor));
591 	bcopy("Block Device    ", inqd.product, sizeof(inqd.product));
592 
593 	bcopy(&inqd, xs->data, MIN(sizeof(inqd), xs->datalen));
594 	vioblk_scsi_done(xs, XS_NOERROR);
595 }
596 
597 void
598 vioblk_scsi_capacity(struct scsi_xfer *xs)
599 {
600 	struct vioblk_softc *sc = xs->sc_link->adapter_softc;
601 	struct scsi_read_cap_data rcd;
602 	uint64_t capacity;
603 
604 	bzero(&rcd, sizeof(rcd));
605 
606 	capacity = sc->sc_capacity - 1;
607 	if (capacity > 0xffffffff)
608 		capacity = 0xffffffff;
609 
610 	_lto4b(capacity, rcd.addr);
611 	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
612 
613 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
614 	vioblk_scsi_done(xs, XS_NOERROR);
615 }
616 
617 void
618 vioblk_scsi_capacity16(struct scsi_xfer *xs)
619 {
620 	struct vioblk_softc *sc = xs->sc_link->adapter_softc;
621 	struct scsi_read_cap_data_16 rcd;
622 
623 	bzero(&rcd, sizeof(rcd));
624 
625 	_lto8b(sc->sc_capacity - 1, rcd.addr);
626 	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
627 
628 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
629 	vioblk_scsi_done(xs, XS_NOERROR);
630 }
631 
632 void
633 vioblk_scsi_done(struct scsi_xfer *xs, int error)
634 {
635 	xs->error = error;
636 	scsi_done(xs);
637 }
638 
639 int
640 vioblk_dev_probe(struct scsi_link *link)
641 {
642 	KASSERT(link->lun == 0);
643 	if (link->target == 0)
644 		return (0);
645 	return (ENODEV);
646 }
647 
648 void
649 vioblk_dev_free(struct scsi_link *link)
650 {
651 	printf("%s\n", __func__);
652 }
653 
654 int
655 vioblk_alloc_reqs(struct vioblk_softc *sc, int qsize)
656 {
657 	struct virtqueue *vq = &sc->sc_vq[0];
658 	struct vring_desc *vd;
659 	int allocsize, nreqs, r, rsegs, slot, i;
660 	void *vaddr;
661 
662 	if (vq->vq_indirect != NULL)
663 		nreqs = qsize;
664 	else
665 		nreqs = qsize / ALLOC_SEGS;
666 
667 	allocsize = sizeof(struct virtio_blk_req) * nreqs;
668 	r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
669 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
670 	if (r != 0) {
671 		printf("DMA memory allocation failed, size %d, error %d\n",
672 		    allocsize, r);
673 		goto err_none;
674 	}
675 	r = bus_dmamem_map(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1,
676 	    allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
677 	if (r != 0) {
678 		printf("DMA memory map failed, error %d\n", r);
679 		goto err_dmamem_alloc;
680 	}
681 	sc->sc_reqs = vaddr;
682 	memset(vaddr, 0, allocsize);
683 	for (i = 0; i < nreqs; i++) {
684 		/*
685 		 * Assign descriptors and create the DMA maps for each
686 		 * allocated request.
687 		 */
688 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
689 		r = virtio_enqueue_prep(vq, &slot);
690 		if (r == 0)
691 			r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
692 		if (r != 0)
693 			return i;
694 
695 		if (vq->vq_indirect == NULL) {
696 			/*
697 			 * The reserved slots must be a contiguous block
698 			 * starting at vq_desc[slot].
699 			 */
700 			vd = &vq->vq_desc[slot];
701 			for (r = 0; r < ALLOC_SEGS - 1; r++) {
702 				DNPRINTF(2, "%s: vd[%d].next = %d should be "
703 				    "%d\n", __func__, r, vd[r].next,
704 				    (slot + r + 1));
705 				if (vd[r].next != (slot + r + 1))
706 					return i;
707 			}
708 			if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
709 				return i;
710 			DNPRINTF(2, "%s: reserved slots are contiguous "
711 			    "(good!)\n", __func__);
712 		}
713 
714 		vr->vr_qe_index = slot;
715 		vq->vq_entries[slot].qe_vr_index = i;
716 		vr->vr_len = VIOBLK_DONE;
717 
718 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
719 		    VR_DMA_END, 1, VR_DMA_END, 0,
720 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_cmdsts);
721 		if (r != 0) {
722 			printf("cmd dmamap creation failed, err %d\n", r);
723 			nreqs = i;
724 			goto err_reqs;
725 		}
726 		r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
727 		    &vr->vr_hdr, VR_DMA_END, NULL, BUS_DMA_NOWAIT);
728 		if (r != 0) {
729 			printf("command dmamap load failed, err %d\n", r);
730 			nreqs = i;
731 			goto err_reqs;
732 		}
733 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat, MAX_XFER,
734 		    SEG_MAX, MAX_XFER, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
735 		    &vr->vr_payload);
736 		if (r != 0) {
737 			printf("payload dmamap creation failed, err %d\n", r);
738 			nreqs = i;
739 			goto err_reqs;
740 		}
741 		SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
742 	}
743 	return nreqs;
744 
745 err_reqs:
746 	for (i = 0; i < nreqs; i++) {
747 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
748 		if (vr->vr_cmdsts) {
749 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
750 			    vr->vr_cmdsts);
751 			vr->vr_cmdsts = 0;
752 		}
753 		if (vr->vr_payload) {
754 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
755 			    vr->vr_payload);
756 			vr->vr_payload = 0;
757 		}
758 	}
759 	bus_dmamem_unmap(sc->sc_virtio->sc_dmat, (caddr_t)sc->sc_reqs,
760 	    allocsize);
761 err_dmamem_alloc:
762 	bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1);
763 err_none:
764 	return 0;
765 }
766