xref: /openbsd-src/sys/dev/pv/vioblk.c (revision 897fc685943471cf985a0fe38ba076ea6fe74fa5)
1 /*	$OpenBSD: vioblk.c,v 1.9 2017/08/10 18:06:58 reyk Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Stefan Fritsch.
5  * Copyright (c) 2010 Minoura Makoto.
6  * Copyright (c) 1998, 2001 Manuel Bouyer.
7  * All rights reserved.
8  *
9  * This code is based in part on the NetBSD ld_virtio driver and the
10  * OpenBSD vdsk driver.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *	notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *	notice, this list of conditions and the following disclaimer in the
19  *	documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 2009, 2011 Mark Kettenis
35  *
36  * Permission to use, copy, modify, and distribute this software for any
37  * purpose with or without fee is hereby granted, provided that the above
38  * copyright notice and this permission notice appear in all copies.
39  *
40  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
41  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
42  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
43  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
44  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
45  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
46  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
47  */
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <machine/bus.h>
53 
54 #include <sys/device.h>
55 #include <sys/stat.h>
56 #include <sys/buf.h>
57 #include <sys/mutex.h>
58 #include <dev/pv/virtioreg.h>
59 #include <dev/pv/virtiovar.h>
60 #include <dev/pv/vioblkreg.h>
61 
62 #include <scsi/scsi_all.h>
63 #include <scsi/scsi_disk.h>
64 #include <scsi/scsiconf.h>
65 
66 #define VIOBLK_DONE	-1
67 
68 #define MAX_XFER	MAX(MAXPHYS,MAXBSIZE)
69 /* Number of DMA segments for buffers that the device must support */
70 #define SEG_MAX		(MAX_XFER/PAGE_SIZE + 1)
71 /* In the virtqueue, we need space for header and footer, too */
72 #define ALLOC_SEGS	(SEG_MAX + 2)
73 
74 struct virtio_feature_name vioblk_feature_names[] = {
75 	{ VIRTIO_BLK_F_BARRIER,		"Barrier" },
76 	{ VIRTIO_BLK_F_SIZE_MAX,	"SizeMax" },
77 	{ VIRTIO_BLK_F_SEG_MAX,		"SegMax" },
78 	{ VIRTIO_BLK_F_GEOMETRY,	"Geometry" },
79 	{ VIRTIO_BLK_F_RO,		"RO" },
80 	{ VIRTIO_BLK_F_BLK_SIZE,	"BlkSize" },
81 	{ VIRTIO_BLK_F_SCSI,		"SCSI" },
82 	{ VIRTIO_BLK_F_FLUSH,		"Flush" },
83 	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology" },
84 	{ 0,				NULL }
85 };
86 
87 struct virtio_blk_req {
88 	struct virtio_blk_req_hdr	 vr_hdr;
89 	uint8_t				 vr_status;
90 #define VR_DMA_END	offsetof(struct virtio_blk_req, vr_qe_index)
91 	int16_t				 vr_qe_index;
92 	int				 vr_len;
93 	struct scsi_xfer		*vr_xs;
94 	bus_dmamap_t			 vr_cmdsts;
95 	bus_dmamap_t			 vr_payload;
96 	SLIST_ENTRY(virtio_blk_req)	 vr_list;
97 };
98 
99 struct vioblk_softc {
100 	struct device		 sc_dev;
101 	struct virtio_softc	*sc_virtio;
102 
103 	struct virtqueue         sc_vq[1];
104 	struct virtio_blk_req   *sc_reqs;
105 	bus_dma_segment_t        sc_reqs_segs[1];
106 
107 	struct scsi_adapter	 sc_switch;
108 	struct scsi_link	 sc_link;
109 	struct scsi_iopool	 sc_iopool;
110 	struct mutex		 sc_vr_mtx;
111 	SLIST_HEAD(, virtio_blk_req) sc_freelist;
112 
113 	int			 sc_notify_on_empty;
114 
115 	uint32_t		 sc_queued;
116 
117 	uint64_t		 sc_capacity;
118 };
119 
120 int	vioblk_match(struct device *, void *, void *);
121 void	vioblk_attach(struct device *, struct device *, void *);
122 int	vioblk_alloc_reqs(struct vioblk_softc *, int);
123 int	vioblk_vq_done(struct virtqueue *);
124 void	vioblk_vq_done1(struct vioblk_softc *, struct virtio_softc *,
125 			struct virtqueue *, int);
126 void	vioblk_reset(struct vioblk_softc *);
127 
128 void	vioblk_scsi_cmd(struct scsi_xfer *);
129 int	vioblk_dev_probe(struct scsi_link *);
130 void	vioblk_dev_free(struct scsi_link *);
131 
132 void   *vioblk_req_get(void *);
133 void	vioblk_req_put(void *, void *);
134 
135 void	vioblk_scsi_inq(struct scsi_xfer *);
136 void	vioblk_scsi_capacity(struct scsi_xfer *);
137 void	vioblk_scsi_capacity16(struct scsi_xfer *);
138 void	vioblk_scsi_done(struct scsi_xfer *, int);
139 
140 struct cfattach vioblk_ca = {
141 	sizeof(struct vioblk_softc),
142 	vioblk_match,
143 	vioblk_attach,
144 	NULL
145 };
146 
147 struct cfdriver vioblk_cd = {
148 	NULL, "vioblk", DV_DULL
149 };
150 
151 
152 int vioblk_match(struct device *parent, void *match, void *aux)
153 {
154 	struct virtio_softc *va = aux;
155 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
156 		return 1;
157 	return 0;
158 }
159 
160 #define DNPRINTF(n,x...)				\
161     do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
162 
163 void
164 vioblk_attach(struct device *parent, struct device *self, void *aux)
165 {
166 	struct vioblk_softc *sc = (struct vioblk_softc *)self;
167 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
168 	struct scsibus_attach_args saa;
169 	uint32_t features;
170 	int qsize;
171 
172 	vsc->sc_vqs = &sc->sc_vq[0];
173 	vsc->sc_nvqs = 1;
174 	vsc->sc_config_change = 0;
175 	if (vsc->sc_child)
176 		panic("already attached to something else");
177 	vsc->sc_child = self;
178 	vsc->sc_ipl = IPL_BIO;
179 	sc->sc_virtio = vsc;
180 
181         features = virtio_negotiate_features(vsc,
182 	    (VIRTIO_BLK_F_RO       | VIRTIO_F_NOTIFY_ON_EMPTY |
183 	     VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX |
184 	     VIRTIO_BLK_F_FLUSH),
185 	    vioblk_feature_names);
186 
187 
188 	if (features & VIRTIO_BLK_F_SIZE_MAX) {
189 		uint32_t size_max = virtio_read_device_config_4(vsc,
190 		    VIRTIO_BLK_CONFIG_SIZE_MAX);
191 		if (size_max < PAGE_SIZE) {
192 			printf("\nMax segment size %u too low\n", size_max);
193 			goto err;
194 		}
195 	}
196 
197 	if (features & VIRTIO_BLK_F_SEG_MAX) {
198 		uint32_t seg_max = virtio_read_device_config_4(vsc,
199 		    VIRTIO_BLK_CONFIG_SEG_MAX);
200 		if (seg_max < SEG_MAX) {
201 			printf("\nMax number of segments %d too small\n",
202 			    seg_max);
203 			goto err;
204 		}
205 	}
206 
207 	sc->sc_capacity = virtio_read_device_config_8(vsc,
208 	    VIRTIO_BLK_CONFIG_CAPACITY);
209 
210 	if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, MAX_XFER, ALLOC_SEGS,
211 	    "I/O request") != 0) {
212 		printf("\nCan't alloc virtqueue\n");
213 		goto err;
214 	}
215 	qsize = sc->sc_vq[0].vq_num;
216 	sc->sc_vq[0].vq_done = vioblk_vq_done;
217 
218 	if (features & VIRTIO_F_NOTIFY_ON_EMPTY) {
219 		virtio_stop_vq_intr(vsc, &sc->sc_vq[0]);
220 		sc->sc_notify_on_empty = 1;
221 	}
222 	else {
223 		sc->sc_notify_on_empty = 0;
224 	}
225 
226 	sc->sc_queued = 0;
227 
228 	sc->sc_switch.scsi_cmd = vioblk_scsi_cmd;
229 	sc->sc_switch.scsi_minphys = scsi_minphys;
230 	sc->sc_switch.dev_probe = vioblk_dev_probe;
231 	sc->sc_switch.dev_free = vioblk_dev_free;
232 
233 	SLIST_INIT(&sc->sc_freelist);
234 	mtx_init(&sc->sc_vr_mtx, IPL_BIO);
235 	scsi_iopool_init(&sc->sc_iopool, sc, vioblk_req_get, vioblk_req_put);
236 
237 	sc->sc_link.openings = vioblk_alloc_reqs(sc, qsize);
238 	if (sc->sc_link.openings == 0) {
239 		printf("\nCan't alloc reqs\n");
240 		goto err;
241 	}
242 
243 	sc->sc_link.adapter = &sc->sc_switch;
244 	sc->sc_link.pool = &sc->sc_iopool;
245 	sc->sc_link.adapter_softc = self;
246 	sc->sc_link.adapter_buswidth = 2;
247 	sc->sc_link.luns = 1;
248 	sc->sc_link.adapter_target = 2;
249 	DNPRINTF(1, "%s: qsize: %d\n", __func__, qsize);
250 	if (features & VIRTIO_BLK_F_RO)
251 		sc->sc_link.flags |= SDEV_READONLY;
252 
253 	bzero(&saa, sizeof(saa));
254 	saa.saa_sc_link = &sc->sc_link;
255 	printf("\n");
256 	config_found(self, &saa, scsiprint);
257 
258 	return;
259 err:
260 	vsc->sc_child = VIRTIO_CHILD_ERROR;
261 	return;
262 }
263 
264 /*
265  * vioblk_req_get() provides the SCSI layer with all the
266  * resources necessary to start an I/O on the device.
267  *
268  * Since the size of the I/O is unknown at this time the
269  * resouces allocated (a.k.a. reserved) must be sufficient
270  * to allow the maximum possible I/O size.
271  *
272  * When the I/O is actually attempted via vioblk_scsi_cmd()
273  * excess resources will be returned via virtio_enqueue_trim().
274  */
275 void *
276 vioblk_req_get(void *cookie)
277 {
278 	struct vioblk_softc *sc = cookie;
279 	struct virtio_blk_req *vr = NULL;
280 
281 	mtx_enter(&sc->sc_vr_mtx);
282 	vr = SLIST_FIRST(&sc->sc_freelist);
283 	if (vr != NULL)
284 		SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
285 	mtx_leave(&sc->sc_vr_mtx);
286 
287 	DNPRINTF(2, "%s: %p\n", __func__, vr);
288 
289 	return vr;
290 }
291 
292 void
293 vioblk_req_put(void *cookie, void *io)
294 {
295 	struct vioblk_softc *sc = cookie;
296 	struct virtio_blk_req *vr = io;
297 
298 	DNPRINTF(2, "%s: %p\n", __func__, vr);
299 
300 	mtx_enter(&sc->sc_vr_mtx);
301 	/*
302 	 * Do *NOT* call virtio_dequeue_commit()!
303 	 *
304 	 * Descriptors are permanently associated with the vioscsi_req and
305 	 * should not be placed on the free list!
306 	 */
307 	SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
308 	mtx_leave(&sc->sc_vr_mtx);
309 }
310 
311 int
312 vioblk_vq_done(struct virtqueue *vq)
313 {
314 	struct virtio_softc *vsc = vq->vq_owner;
315 	struct vioblk_softc *sc = (struct vioblk_softc *)vsc->sc_child;
316 	struct vq_entry *qe;
317 	int slot;
318 	int ret = 0;
319 
320 	if (!sc->sc_notify_on_empty)
321 		virtio_stop_vq_intr(vsc, vq);
322 	for (;;) {
323 		if (virtio_dequeue(vsc, vq, &slot, NULL) != 0) {
324 			if (sc->sc_notify_on_empty)
325 				break;
326 			virtio_start_vq_intr(vsc, vq);
327 			if (virtio_dequeue(vsc, vq, &slot, NULL) != 0)
328 				break;
329 		}
330 		qe = &vq->vq_entries[slot];
331 		vioblk_vq_done1(sc, vsc, vq, qe->qe_vr_index);
332 		ret = 1;
333 	}
334 	return ret;
335 }
336 
337 void
338 vioblk_vq_done1(struct vioblk_softc *sc, struct virtio_softc *vsc,
339     struct virtqueue *vq, int slot)
340 {
341 	struct virtio_blk_req *vr = &sc->sc_reqs[slot];
342 	struct scsi_xfer *xs = vr->vr_xs;
343 	KASSERT(vr->vr_len != VIOBLK_DONE);
344 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0,
345 	    sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_POSTWRITE);
346 	if (vr->vr_hdr.type != VIRTIO_BLK_T_FLUSH) {
347 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, vr->vr_len,
348 		    (vr->vr_hdr.type == VIRTIO_BLK_T_IN) ?
349 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
350 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
351 	}
352 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
353 	    sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
354 	    BUS_DMASYNC_POSTREAD);
355 
356 
357 	if (vr->vr_status != VIRTIO_BLK_S_OK) {
358 		DNPRINTF(1, "%s: EIO\n", __func__);
359 		xs->error = XS_DRIVER_STUFFUP;
360 		xs->resid = xs->datalen;
361 	} else {
362 		xs->error = XS_NOERROR;
363 		xs->resid = xs->datalen - vr->vr_len;
364 	}
365 	vr->vr_len = VIOBLK_DONE;
366 	scsi_done(xs);
367 }
368 
369 void
370 vioblk_reset(struct vioblk_softc *sc)
371 {
372 	int i;
373 
374 	/* reset device to stop DMA */
375 	virtio_reset(sc->sc_virtio);
376 
377 	/* finish requests that have been completed */
378 	vioblk_vq_done(&sc->sc_vq[0]);
379 
380 	/* abort all remaining requests */
381 	for (i = 0; i < sc->sc_link.openings; i++) {
382 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
383 		struct scsi_xfer *xs = vr->vr_xs;
384 
385 		if (vr->vr_len == VIOBLK_DONE)
386 			continue;
387 
388 		xs->error = XS_DRIVER_STUFFUP;
389 		xs->resid = xs->datalen;
390 		scsi_done(xs);
391 	}
392 }
393 
394 void
395 vioblk_scsi_cmd(struct scsi_xfer *xs)
396 {
397 	struct vioblk_softc *sc = xs->sc_link->adapter_softc;
398 	struct virtqueue *vq = &sc->sc_vq[0];
399 	struct virtio_softc *vsc = sc->sc_virtio;
400 	struct virtio_blk_req *vr;
401 	int len, s, timeout, isread, slot, ret, nsegs;
402 	int error = XS_DRIVER_STUFFUP;
403 	struct scsi_rw *rw;
404 	struct scsi_rw_big *rwb;
405 	struct scsi_rw_12 *rw12;
406 	struct scsi_rw_16 *rw16;
407 	u_int64_t lba = 0;
408 	u_int32_t sector_count = 0;
409 	uint8_t operation;
410 
411 	switch (xs->cmd->opcode) {
412 	case READ_BIG:
413 	case READ_COMMAND:
414 	case READ_12:
415 	case READ_16:
416 		operation = VIRTIO_BLK_T_IN;
417 		isread = 1;
418 		break;
419 	case WRITE_BIG:
420 	case WRITE_COMMAND:
421 	case WRITE_12:
422 	case WRITE_16:
423 		operation = VIRTIO_BLK_T_OUT;
424 		isread = 0;
425 		break;
426 
427 	case SYNCHRONIZE_CACHE:
428 		if ((vsc->sc_features & VIRTIO_BLK_F_FLUSH) == 0) {
429 			vioblk_scsi_done(xs, XS_NOERROR);
430 			return;
431 		}
432 		operation = VIRTIO_BLK_T_FLUSH;
433 		break;
434 
435 	case INQUIRY:
436 		vioblk_scsi_inq(xs);
437 		return;
438 	case READ_CAPACITY:
439 		vioblk_scsi_capacity(xs);
440 		return;
441 	case READ_CAPACITY_16:
442 		vioblk_scsi_capacity16(xs);
443 		return;
444 
445 	case TEST_UNIT_READY:
446 	case START_STOP:
447 	case PREVENT_ALLOW:
448 		vioblk_scsi_done(xs, XS_NOERROR);
449 		return;
450 
451 	default:
452 		printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode);
453 	case MODE_SENSE:
454 	case MODE_SENSE_BIG:
455 	case REPORT_LUNS:
456 		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
457 		return;
458 	}
459 
460 	/*
461 	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
462 	 * layout as 10-byte READ/WRITE commands.
463 	 */
464 	if (xs->cmdlen == 6) {
465 		rw = (struct scsi_rw *)xs->cmd;
466 		lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
467 		sector_count = rw->length ? rw->length : 0x100;
468 	} else if (xs->cmdlen == 10) {
469 		rwb = (struct scsi_rw_big *)xs->cmd;
470 		lba = _4btol(rwb->addr);
471 		sector_count = _2btol(rwb->length);
472 	} else if (xs->cmdlen == 12) {
473 		rw12 = (struct scsi_rw_12 *)xs->cmd;
474 		lba = _4btol(rw12->addr);
475 		sector_count = _4btol(rw12->length);
476 	} else if (xs->cmdlen == 16) {
477 		rw16 = (struct scsi_rw_16 *)xs->cmd;
478 		lba = _8btol(rw16->addr);
479 		sector_count = _4btol(rw16->length);
480 	}
481 
482 	s = splbio();
483 	vr = xs->io;
484 	slot = vr->vr_qe_index;
485 	if (operation != VIRTIO_BLK_T_FLUSH) {
486 		len = MIN(xs->datalen, sector_count * VIRTIO_BLK_SECTOR_SIZE);
487 		ret = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
488 		    xs->data, len, NULL,
489 		    ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
490 		     BUS_DMA_NOWAIT));
491 		if (ret) {
492 			printf("%s: bus_dmamap_load: %d", __func__, ret);
493 			error = XS_DRIVER_STUFFUP;
494 			goto out_done;
495 		}
496 		nsegs = vr->vr_payload->dm_nsegs + 2;
497 	} else {
498 		len = 0;
499 		nsegs = 2;
500 	}
501 
502 	/*
503 	 * Adjust reservation to the number needed, or virtio gets upset. Note
504 	 * that it may trim UP if 'xs' is being recycled w/o getting a new
505 	 * reservation!
506 	 */
507 	virtio_enqueue_trim(vq, slot, nsegs);
508 
509 	vr->vr_xs = xs;
510 	vr->vr_hdr.type = operation;
511 	vr->vr_hdr.ioprio = 0;
512 	vr->vr_hdr.sector = lba;
513 	vr->vr_len = len;
514 
515 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
516 			0, sizeof(struct virtio_blk_req_hdr),
517 			BUS_DMASYNC_PREWRITE);
518 	if (operation != VIRTIO_BLK_T_FLUSH) {
519 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, len,
520 		    isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
521 	}
522 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
523 	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t),
524 	    BUS_DMASYNC_PREREAD);
525 
526 	virtio_enqueue_p(vq, slot, vr->vr_cmdsts, 0,
527 	    sizeof(struct virtio_blk_req_hdr), 1);
528 	if (operation != VIRTIO_BLK_T_FLUSH)
529 		virtio_enqueue(vq, slot, vr->vr_payload, !isread);
530 	virtio_enqueue_p(vq, slot, vr->vr_cmdsts,
531 	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), 0);
532 	virtio_enqueue_commit(vsc, vq, slot, 1);
533 	sc->sc_queued++;
534 
535 	if (!ISSET(xs->flags, SCSI_POLL)) {
536 		/* check if some xfers are done: */
537 		if (sc->sc_queued > 1)
538 			vioblk_vq_done(vq);
539 		splx(s);
540 		return;
541 	}
542 
543 	timeout = 15 * 1000;
544 	do {
545 		if (virtio_poll_intr(vsc) && vr->vr_len == VIOBLK_DONE)
546 			break;
547 
548 		delay(1000);
549 	} while(--timeout > 0);
550 	if (timeout <= 0) {
551 		uint32_t features;
552 		printf("%s: SCSI_POLL timed out\n", __func__);
553 		vioblk_reset(sc);
554 		virtio_reinit_start(vsc);
555 		features = virtio_negotiate_features(vsc, vsc->sc_features,
556 		    NULL);
557 		KASSERT(features == vsc->sc_features);
558 	}
559 	splx(s);
560 	return;
561 
562 out_done:
563 	splx(s);
564 	vioblk_scsi_done(xs, error);
565 }
566 
567 void
568 vioblk_scsi_inq(struct scsi_xfer *xs)
569 {
570 	struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd;
571 	struct scsi_inquiry_data inqd;
572 
573 	if (ISSET(inq->flags, SI_EVPD)) {
574 		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
575 		return;
576 	}
577 
578 	bzero(&inqd, sizeof(inqd));
579 
580 	inqd.device = T_DIRECT;
581 	inqd.version = 0x05; /* SPC-3 */
582 	inqd.response_format = 2;
583 	inqd.additional_length = 32;
584 	inqd.flags |= SID_CmdQue;
585 	bcopy("VirtIO  ", inqd.vendor, sizeof(inqd.vendor));
586 	bcopy("Block Device    ", inqd.product, sizeof(inqd.product));
587 
588 	bcopy(&inqd, xs->data, MIN(sizeof(inqd), xs->datalen));
589 	vioblk_scsi_done(xs, XS_NOERROR);
590 }
591 
592 void
593 vioblk_scsi_capacity(struct scsi_xfer *xs)
594 {
595 	struct vioblk_softc *sc = xs->sc_link->adapter_softc;
596 	struct scsi_read_cap_data rcd;
597 	uint64_t capacity;
598 
599 	bzero(&rcd, sizeof(rcd));
600 
601 	capacity = sc->sc_capacity - 1;
602 	if (capacity > 0xffffffff)
603 		capacity = 0xffffffff;
604 
605 	_lto4b(capacity, rcd.addr);
606 	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
607 
608 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
609 	vioblk_scsi_done(xs, XS_NOERROR);
610 }
611 
612 void
613 vioblk_scsi_capacity16(struct scsi_xfer *xs)
614 {
615 	struct vioblk_softc *sc = xs->sc_link->adapter_softc;
616 	struct scsi_read_cap_data_16 rcd;
617 
618 	bzero(&rcd, sizeof(rcd));
619 
620 	_lto8b(sc->sc_capacity - 1, rcd.addr);
621 	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
622 
623 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
624 	vioblk_scsi_done(xs, XS_NOERROR);
625 }
626 
627 void
628 vioblk_scsi_done(struct scsi_xfer *xs, int error)
629 {
630 	xs->error = error;
631 	scsi_done(xs);
632 }
633 
634 int
635 vioblk_dev_probe(struct scsi_link *link)
636 {
637 	KASSERT(link->lun == 0);
638 	if (link->target == 0)
639 		return (0);
640 	return (ENODEV);
641 }
642 
643 void
644 vioblk_dev_free(struct scsi_link *link)
645 {
646 	printf("%s\n", __func__);
647 }
648 
649 int
650 vioblk_alloc_reqs(struct vioblk_softc *sc, int qsize)
651 {
652 	struct virtqueue *vq = &sc->sc_vq[0];
653 	struct vring_desc *vd;
654 	int allocsize, nreqs, r, rsegs, slot, i;
655 	void *vaddr;
656 
657 	if (vq->vq_indirect != NULL)
658 		nreqs = qsize;
659 	else
660 		nreqs = qsize / ALLOC_SEGS;
661 
662 	allocsize = sizeof(struct virtio_blk_req) * nreqs;
663 	r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
664 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
665 	if (r != 0) {
666 		printf("DMA memory allocation failed, size %d, error %d\n",
667 		    allocsize, r);
668 		goto err_none;
669 	}
670 	r = bus_dmamem_map(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1,
671 	    allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
672 	if (r != 0) {
673 		printf("DMA memory map failed, error %d\n", r);
674 		goto err_dmamem_alloc;
675 	}
676 	sc->sc_reqs = vaddr;
677 	memset(vaddr, 0, allocsize);
678 	for (i = 0; i < nreqs; i++) {
679 		/*
680 		 * Assign descriptors and create the DMA maps for each
681 		 * allocated request.
682 		 */
683 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
684 		r = virtio_enqueue_prep(vq, &slot);
685 		if (r == 0)
686 			r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
687 		if (r != 0)
688 			return i;
689 
690 		if (vq->vq_indirect == NULL) {
691 			/*
692 			 * The reserved slots must be a contiguous block
693 			 * starting at vq_desc[slot].
694 			 */
695 			vd = &vq->vq_desc[slot];
696 			for (r = 0; r < ALLOC_SEGS - 1; r++) {
697 				DNPRINTF(2, "%s: vd[%d].next = %d should be "
698 				    "%d\n", __func__, r, vd[r].next,
699 				    (slot + r + 1));
700 				if (vd[r].next != (slot + r + 1))
701 					return i;
702 			}
703 			if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
704 				return i;
705 			DNPRINTF(2, "%s: reserved slots are contiguous "
706 			    "(good!)\n", __func__);
707 		}
708 
709 		vr->vr_qe_index = slot;
710 		vq->vq_entries[slot].qe_vr_index = i;
711 		vr->vr_len = VIOBLK_DONE;
712 
713 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
714 		    VR_DMA_END, 1, VR_DMA_END, 0,
715 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_cmdsts);
716 		if (r != 0) {
717 			printf("cmd dmamap creation failed, err %d\n", r);
718 			nreqs = i;
719 			goto err_reqs;
720 		}
721 		r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
722 		    &vr->vr_hdr, VR_DMA_END, NULL, BUS_DMA_NOWAIT);
723 		if (r != 0) {
724 			printf("command dmamap load failed, err %d\n", r);
725 			nreqs = i;
726 			goto err_reqs;
727 		}
728 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat, MAX_XFER,
729 		    SEG_MAX, MAX_XFER, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
730 		    &vr->vr_payload);
731 		if (r != 0) {
732 			printf("payload dmamap creation failed, err %d\n", r);
733 			nreqs = i;
734 			goto err_reqs;
735 		}
736 		SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
737 	}
738 	return nreqs;
739 
740 err_reqs:
741 	for (i = 0; i < nreqs; i++) {
742 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
743 		if (vr->vr_cmdsts) {
744 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
745 			    vr->vr_cmdsts);
746 			vr->vr_cmdsts = 0;
747 		}
748 		if (vr->vr_payload) {
749 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
750 			    vr->vr_payload);
751 			vr->vr_payload = 0;
752 		}
753 	}
754 	bus_dmamem_unmap(sc->sc_virtio->sc_dmat, (caddr_t)sc->sc_reqs,
755 	    allocsize);
756 err_dmamem_alloc:
757 	bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1);
758 err_none:
759 	return 0;
760 }
761