xref: /openbsd-src/sys/dev/pv/vioblk.c (revision d5abdd01d7a5f24fb6f9b0aab446ef59a9e9067a)
1 /*	$OpenBSD: vioblk.c,v 1.36 2023/05/29 08:13:35 sf Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Stefan Fritsch.
5  * Copyright (c) 2010 Minoura Makoto.
6  * Copyright (c) 1998, 2001 Manuel Bouyer.
7  * All rights reserved.
8  *
9  * This code is based in part on the NetBSD ld_virtio driver and the
10  * OpenBSD vdsk driver.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *	notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *	notice, this list of conditions and the following disclaimer in the
19  *	documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 2009, 2011 Mark Kettenis
35  *
36  * Permission to use, copy, modify, and distribute this software for any
37  * purpose with or without fee is hereby granted, provided that the above
38  * copyright notice and this permission notice appear in all copies.
39  *
40  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
41  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
42  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
43  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
44  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
45  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
46  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
47  */
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <machine/bus.h>
53 
54 #include <sys/device.h>
55 #include <sys/stat.h>
56 #include <sys/buf.h>
57 #include <sys/mutex.h>
58 #include <dev/pv/virtioreg.h>
59 #include <dev/pv/virtiovar.h>
60 #include <dev/pv/vioblkreg.h>
61 
62 #include <scsi/scsi_all.h>
63 #include <scsi/scsi_disk.h>
64 #include <scsi/scsiconf.h>
65 
66 #define VIOBLK_DONE	-1
67 
68 /* Number of DMA segments for buffers that the device must support */
69 #define SEG_MAX		(MAXPHYS/PAGE_SIZE + 1)
70 /* In the virtqueue, we need space for header and footer, too */
71 #define ALLOC_SEGS	(SEG_MAX + 2)
72 
73 struct virtio_feature_name vioblk_feature_names[] = {
74 #if VIRTIO_DEBUG
75 	{ VIRTIO_BLK_F_BARRIER,		"Barrier" },
76 	{ VIRTIO_BLK_F_SIZE_MAX,	"SizeMax" },
77 	{ VIRTIO_BLK_F_SEG_MAX,		"SegMax" },
78 	{ VIRTIO_BLK_F_GEOMETRY,	"Geometry" },
79 	{ VIRTIO_BLK_F_RO,		"RO" },
80 	{ VIRTIO_BLK_F_BLK_SIZE,	"BlkSize" },
81 	{ VIRTIO_BLK_F_SCSI,		"SCSI" },
82 	{ VIRTIO_BLK_F_FLUSH,		"Flush" },
83 	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology" },
84 	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE" },
85 	{ VIRTIO_BLK_F_DISCARD,		"Discard" },
86 	{ VIRTIO_BLK_F_WRITE_ZEROES,	"Write0s" },
87 #endif
88 	{ 0,				NULL }
89 };
90 
91 struct virtio_blk_req {
92 	struct virtio_blk_req_hdr	 vr_hdr;
93 	uint8_t				 vr_status;
94 #define VR_DMA_END	offsetof(struct virtio_blk_req, vr_qe_index)
95 	int16_t				 vr_qe_index;
96 	int				 vr_len;
97 	struct scsi_xfer		*vr_xs;
98 	bus_dmamap_t			 vr_cmdsts;
99 	bus_dmamap_t			 vr_payload;
100 	SLIST_ENTRY(virtio_blk_req)	 vr_list;
101 };
102 
103 struct vioblk_softc {
104 	struct device		 sc_dev;
105 	struct virtio_softc	*sc_virtio;
106 
107 	struct virtqueue         sc_vq[1];
108 	struct virtio_blk_req   *sc_reqs;
109 	bus_dma_segment_t        sc_reqs_segs[1];
110 	int			 sc_nreqs;
111 
112 	struct scsi_iopool	 sc_iopool;
113 	struct mutex		 sc_vr_mtx;
114 	SLIST_HEAD(, virtio_blk_req) sc_freelist;
115 
116 	int			 sc_notify_on_empty;
117 
118 	uint32_t		 sc_queued;
119 
120 	uint64_t		 sc_capacity;
121 };
122 
123 int	vioblk_match(struct device *, void *, void *);
124 void	vioblk_attach(struct device *, struct device *, void *);
125 int	vioblk_alloc_reqs(struct vioblk_softc *, int);
126 int	vioblk_vq_done(struct virtqueue *);
127 void	vioblk_vq_done1(struct vioblk_softc *, struct virtio_softc *,
128 			struct virtqueue *, int);
129 void	vioblk_reset(struct vioblk_softc *);
130 
131 void	vioblk_scsi_cmd(struct scsi_xfer *);
132 
133 void   *vioblk_req_get(void *);
134 void	vioblk_req_put(void *, void *);
135 
136 void	vioblk_scsi_inq(struct scsi_xfer *);
137 void	vioblk_scsi_capacity(struct scsi_xfer *);
138 void	vioblk_scsi_capacity16(struct scsi_xfer *);
139 void	vioblk_scsi_done(struct scsi_xfer *, int);
140 
141 const struct cfattach vioblk_ca = {
142 	sizeof(struct vioblk_softc),
143 	vioblk_match,
144 	vioblk_attach,
145 	NULL
146 };
147 
148 struct cfdriver vioblk_cd = {
149 	NULL, "vioblk", DV_DULL
150 };
151 
152 const struct scsi_adapter vioblk_switch = {
153 	vioblk_scsi_cmd, NULL, NULL, NULL, NULL
154 };
155 
156 int vioblk_match(struct device *parent, void *match, void *aux)
157 {
158 	struct virtio_softc *va = aux;
159 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
160 		return 1;
161 	return 0;
162 }
163 
164 #define DNPRINTF(n,x...)				\
165     do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
166 
167 void
168 vioblk_attach(struct device *parent, struct device *self, void *aux)
169 {
170 	struct vioblk_softc *sc = (struct vioblk_softc *)self;
171 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
172 	struct scsibus_attach_args saa;
173 	int qsize;
174 
175 	vsc->sc_vqs = &sc->sc_vq[0];
176 	vsc->sc_nvqs = 1;
177 	vsc->sc_config_change = 0;
178 	if (vsc->sc_child)
179 		panic("already attached to something else");
180 	vsc->sc_child = self;
181 	vsc->sc_ipl = IPL_BIO;
182 	sc->sc_virtio = vsc;
183 	vsc->sc_driver_features = VIRTIO_BLK_F_RO | VIRTIO_F_NOTIFY_ON_EMPTY |
184 	     VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX | VIRTIO_BLK_F_FLUSH;
185 
186         virtio_negotiate_features(vsc, vioblk_feature_names);
187 
188 	if (virtio_has_feature(vsc, VIRTIO_BLK_F_SIZE_MAX)) {
189 		uint32_t size_max = virtio_read_device_config_4(vsc,
190 		    VIRTIO_BLK_CONFIG_SIZE_MAX);
191 		if (size_max < PAGE_SIZE) {
192 			printf("\nMax segment size %u too low\n", size_max);
193 			goto err;
194 		}
195 	}
196 
197 	if (virtio_has_feature(vsc, VIRTIO_BLK_F_SEG_MAX)) {
198 		uint32_t seg_max = virtio_read_device_config_4(vsc,
199 		    VIRTIO_BLK_CONFIG_SEG_MAX);
200 		if (seg_max < SEG_MAX) {
201 			printf("\nMax number of segments %d too small\n",
202 			    seg_max);
203 			goto err;
204 		}
205 	}
206 
207 	sc->sc_capacity = virtio_read_device_config_8(vsc,
208 	    VIRTIO_BLK_CONFIG_CAPACITY);
209 
210 	if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, MAXPHYS, ALLOC_SEGS,
211 	    "I/O request") != 0) {
212 		printf("\nCan't alloc virtqueue\n");
213 		goto err;
214 	}
215 	qsize = sc->sc_vq[0].vq_num;
216 	sc->sc_vq[0].vq_done = vioblk_vq_done;
217 
218 	if (virtio_has_feature(vsc, VIRTIO_F_NOTIFY_ON_EMPTY)) {
219 		virtio_stop_vq_intr(vsc, &sc->sc_vq[0]);
220 		sc->sc_notify_on_empty = 1;
221 	}
222 	else {
223 		sc->sc_notify_on_empty = 0;
224 	}
225 
226 	sc->sc_queued = 0;
227 
228 	SLIST_INIT(&sc->sc_freelist);
229 	mtx_init(&sc->sc_vr_mtx, IPL_BIO);
230 	scsi_iopool_init(&sc->sc_iopool, sc, vioblk_req_get, vioblk_req_put);
231 
232 	sc->sc_nreqs = vioblk_alloc_reqs(sc, qsize);
233 	if (sc->sc_nreqs == 0) {
234 		printf("\nCan't alloc reqs\n");
235 		goto err;
236 	}
237 	DNPRINTF(1, "%s: qsize: %d\n", __func__, qsize);
238 	printf("\n");
239 
240 	saa.saa_adapter = &vioblk_switch;
241 	saa.saa_adapter_softc = self;
242 	saa.saa_adapter_buswidth = 1;
243 	saa.saa_luns = 1;
244 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
245 	saa.saa_openings = sc->sc_nreqs;
246 	saa.saa_pool = &sc->sc_iopool;
247 	if (virtio_has_feature(vsc, VIRTIO_BLK_F_RO))
248 		saa.saa_flags = SDEV_READONLY;
249 	else
250 		saa.saa_flags = 0;
251 	saa.saa_quirks = 0;
252 	saa.saa_wwpn = saa.saa_wwnn = 0;
253 
254 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
255 	config_found(self, &saa, scsiprint);
256 
257 	return;
258 err:
259 	vsc->sc_child = VIRTIO_CHILD_ERROR;
260 	return;
261 }
262 
263 /*
264  * vioblk_req_get() provides the SCSI layer with all the
265  * resources necessary to start an I/O on the device.
266  *
267  * Since the size of the I/O is unknown at this time the
268  * resources allocated (a.k.a. reserved) must be sufficient
269  * to allow the maximum possible I/O size.
270  *
271  * When the I/O is actually attempted via vioblk_scsi_cmd()
272  * excess resources will be returned via virtio_enqueue_trim().
273  */
274 void *
275 vioblk_req_get(void *cookie)
276 {
277 	struct vioblk_softc *sc = cookie;
278 	struct virtio_blk_req *vr = NULL;
279 
280 	mtx_enter(&sc->sc_vr_mtx);
281 	vr = SLIST_FIRST(&sc->sc_freelist);
282 	if (vr != NULL)
283 		SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
284 	mtx_leave(&sc->sc_vr_mtx);
285 
286 	DNPRINTF(2, "%s: %p\n", __func__, vr);
287 
288 	return vr;
289 }
290 
291 void
292 vioblk_req_put(void *cookie, void *io)
293 {
294 	struct vioblk_softc *sc = cookie;
295 	struct virtio_blk_req *vr = io;
296 
297 	DNPRINTF(2, "%s: %p\n", __func__, vr);
298 
299 	mtx_enter(&sc->sc_vr_mtx);
300 	/*
301 	 * Do *NOT* call virtio_dequeue_commit()!
302 	 *
303 	 * Descriptors are permanently associated with the vioscsi_req and
304 	 * should not be placed on the free list!
305 	 */
306 	SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
307 	mtx_leave(&sc->sc_vr_mtx);
308 }
309 
310 int
311 vioblk_vq_done(struct virtqueue *vq)
312 {
313 	struct virtio_softc *vsc = vq->vq_owner;
314 	struct vioblk_softc *sc = (struct vioblk_softc *)vsc->sc_child;
315 	struct vq_entry *qe;
316 	int slot;
317 	int ret = 0;
318 
319 	if (!sc->sc_notify_on_empty)
320 		virtio_stop_vq_intr(vsc, vq);
321 	for (;;) {
322 		if (virtio_dequeue(vsc, vq, &slot, NULL) != 0) {
323 			if (sc->sc_notify_on_empty)
324 				break;
325 			virtio_start_vq_intr(vsc, vq);
326 			if (virtio_dequeue(vsc, vq, &slot, NULL) != 0)
327 				break;
328 		}
329 		qe = &vq->vq_entries[slot];
330 		vioblk_vq_done1(sc, vsc, vq, qe->qe_vr_index);
331 		ret = 1;
332 	}
333 	return ret;
334 }
335 
336 void
337 vioblk_vq_done1(struct vioblk_softc *sc, struct virtio_softc *vsc,
338     struct virtqueue *vq, int slot)
339 {
340 	struct virtio_blk_req *vr = &sc->sc_reqs[slot];
341 	struct scsi_xfer *xs = vr->vr_xs;
342 	KASSERT(vr->vr_len != VIOBLK_DONE);
343 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0,
344 	    sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_POSTWRITE);
345 	if (vr->vr_hdr.type != VIRTIO_BLK_T_FLUSH) {
346 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, vr->vr_len,
347 		    (vr->vr_hdr.type == VIRTIO_BLK_T_IN) ?
348 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
349 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
350 	}
351 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
352 	    sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
353 	    BUS_DMASYNC_POSTREAD);
354 
355 
356 	if (vr->vr_status != VIRTIO_BLK_S_OK) {
357 		DNPRINTF(1, "%s: EIO\n", __func__);
358 		xs->error = XS_DRIVER_STUFFUP;
359 		xs->resid = xs->datalen;
360 	} else {
361 		xs->error = XS_NOERROR;
362 		xs->resid = xs->datalen - vr->vr_len;
363 	}
364 	vr->vr_len = VIOBLK_DONE;
365 	scsi_done(xs);
366 }
367 
368 void
369 vioblk_reset(struct vioblk_softc *sc)
370 {
371 	int i;
372 
373 	/* reset device to stop DMA */
374 	virtio_reset(sc->sc_virtio);
375 
376 	/* finish requests that have been completed */
377 	vioblk_vq_done(&sc->sc_vq[0]);
378 
379 	/* abort all remaining requests */
380 	for (i = 0; i < sc->sc_nreqs; i++) {
381 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
382 		struct scsi_xfer *xs = vr->vr_xs;
383 
384 		if (vr->vr_len == VIOBLK_DONE)
385 			continue;
386 
387 		xs->error = XS_DRIVER_STUFFUP;
388 		xs->resid = xs->datalen;
389 		scsi_done(xs);
390 	}
391 }
392 
393 void
394 vioblk_scsi_cmd(struct scsi_xfer *xs)
395 {
396 	struct vioblk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
397 	struct virtqueue *vq = &sc->sc_vq[0];
398 	struct virtio_softc *vsc = sc->sc_virtio;
399 	struct virtio_blk_req *vr;
400 	int len, s, timeout, isread, slot, ret, nsegs;
401 	int error = XS_DRIVER_STUFFUP;
402 	struct scsi_rw *rw;
403 	struct scsi_rw_10 *rw10;
404 	struct scsi_rw_12 *rw12;
405 	struct scsi_rw_16 *rw16;
406 	u_int64_t lba = 0;
407 	u_int32_t sector_count = 0;
408 	uint8_t operation;
409 
410 	switch (xs->cmd.opcode) {
411 	case READ_COMMAND:
412 	case READ_10:
413 	case READ_12:
414 	case READ_16:
415 		operation = VIRTIO_BLK_T_IN;
416 		isread = 1;
417 		break;
418 	case WRITE_COMMAND:
419 	case WRITE_10:
420 	case WRITE_12:
421 	case WRITE_16:
422 		operation = VIRTIO_BLK_T_OUT;
423 		isread = 0;
424 		break;
425 
426 	case SYNCHRONIZE_CACHE:
427 		if (!virtio_has_feature(vsc, VIRTIO_BLK_F_FLUSH)) {
428 			vioblk_scsi_done(xs, XS_NOERROR);
429 			return;
430 		}
431 		operation = VIRTIO_BLK_T_FLUSH;
432 		break;
433 
434 	case INQUIRY:
435 		vioblk_scsi_inq(xs);
436 		return;
437 	case READ_CAPACITY:
438 		vioblk_scsi_capacity(xs);
439 		return;
440 	case READ_CAPACITY_16:
441 		vioblk_scsi_capacity16(xs);
442 		return;
443 
444 	case TEST_UNIT_READY:
445 	case START_STOP:
446 	case PREVENT_ALLOW:
447 		vioblk_scsi_done(xs, XS_NOERROR);
448 		return;
449 
450 	default:
451 		printf("%s cmd 0x%02x\n", __func__, xs->cmd.opcode);
452 	case MODE_SENSE:
453 	case MODE_SENSE_BIG:
454 	case REPORT_LUNS:
455 		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
456 		return;
457 	}
458 
459 	/*
460 	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
461 	 * layout as 10-byte READ/WRITE commands.
462 	 */
463 	if (xs->cmdlen == 6) {
464 		rw = (struct scsi_rw *)&xs->cmd;
465 		lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
466 		sector_count = rw->length ? rw->length : 0x100;
467 	} else if (xs->cmdlen == 10) {
468 		rw10 = (struct scsi_rw_10 *)&xs->cmd;
469 		lba = _4btol(rw10->addr);
470 		sector_count = _2btol(rw10->length);
471 	} else if (xs->cmdlen == 12) {
472 		rw12 = (struct scsi_rw_12 *)&xs->cmd;
473 		lba = _4btol(rw12->addr);
474 		sector_count = _4btol(rw12->length);
475 	} else if (xs->cmdlen == 16) {
476 		rw16 = (struct scsi_rw_16 *)&xs->cmd;
477 		lba = _8btol(rw16->addr);
478 		sector_count = _4btol(rw16->length);
479 	}
480 
481 	s = splbio();
482 	vr = xs->io;
483 	slot = vr->vr_qe_index;
484 	if (operation != VIRTIO_BLK_T_FLUSH) {
485 		len = MIN(xs->datalen, sector_count * VIRTIO_BLK_SECTOR_SIZE);
486 		ret = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
487 		    xs->data, len, NULL,
488 		    ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
489 		     BUS_DMA_NOWAIT));
490 		if (ret) {
491 			printf("%s: bus_dmamap_load: %d", __func__, ret);
492 			error = XS_DRIVER_STUFFUP;
493 			goto out_done;
494 		}
495 		nsegs = vr->vr_payload->dm_nsegs + 2;
496 	} else {
497 		len = 0;
498 		nsegs = 2;
499 	}
500 
501 	/*
502 	 * Adjust reservation to the number needed, or virtio gets upset. Note
503 	 * that it may trim UP if 'xs' is being recycled w/o getting a new
504 	 * reservation!
505 	 */
506 	virtio_enqueue_trim(vq, slot, nsegs);
507 
508 	vr->vr_xs = xs;
509 	vr->vr_hdr.type = operation;
510 	vr->vr_hdr.ioprio = 0;
511 	vr->vr_hdr.sector = lba;
512 	vr->vr_len = len;
513 
514 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
515 			0, sizeof(struct virtio_blk_req_hdr),
516 			BUS_DMASYNC_PREWRITE);
517 	if (operation != VIRTIO_BLK_T_FLUSH) {
518 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, len,
519 		    isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
520 	}
521 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
522 	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t),
523 	    BUS_DMASYNC_PREREAD);
524 
525 	virtio_enqueue_p(vq, slot, vr->vr_cmdsts, 0,
526 	    sizeof(struct virtio_blk_req_hdr), 1);
527 	if (operation != VIRTIO_BLK_T_FLUSH)
528 		virtio_enqueue(vq, slot, vr->vr_payload, !isread);
529 	virtio_enqueue_p(vq, slot, vr->vr_cmdsts,
530 	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), 0);
531 	virtio_enqueue_commit(vsc, vq, slot, 1);
532 	sc->sc_queued++;
533 
534 	if (!ISSET(xs->flags, SCSI_POLL)) {
535 		/* check if some xfers are done: */
536 		if (sc->sc_queued > 1)
537 			vioblk_vq_done(vq);
538 		splx(s);
539 		return;
540 	}
541 
542 	timeout = 15 * 1000;
543 	do {
544 		if (virtio_poll_intr(vsc) && vr->vr_len == VIOBLK_DONE)
545 			break;
546 
547 		delay(1000);
548 	} while(--timeout > 0);
549 	if (timeout <= 0) {
550 		printf("%s: SCSI_POLL timed out\n", __func__);
551 		vioblk_reset(sc);
552 		virtio_reinit_start(vsc);
553 		virtio_reinit_end(vsc);
554 	}
555 	splx(s);
556 	return;
557 
558 out_done:
559 	splx(s);
560 	vioblk_scsi_done(xs, error);
561 }
562 
563 void
564 vioblk_scsi_inq(struct scsi_xfer *xs)
565 {
566 	struct scsi_inquiry *inq = (struct scsi_inquiry *)&xs->cmd;
567 	struct scsi_inquiry_data inqd;
568 
569 	if (ISSET(inq->flags, SI_EVPD)) {
570 		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
571 		return;
572 	}
573 
574 	bzero(&inqd, sizeof(inqd));
575 
576 	inqd.device = T_DIRECT;
577 	inqd.version = SCSI_REV_SPC3;
578 	inqd.response_format = SID_SCSI2_RESPONSE;
579 	inqd.additional_length = SID_SCSI2_ALEN;
580 	inqd.flags |= SID_CmdQue;
581 	bcopy("VirtIO  ", inqd.vendor, sizeof(inqd.vendor));
582 	bcopy("Block Device    ", inqd.product, sizeof(inqd.product));
583 
584 	scsi_copy_internal_data(xs, &inqd, sizeof(inqd));
585 
586 	vioblk_scsi_done(xs, XS_NOERROR);
587 }
588 
589 void
590 vioblk_scsi_capacity(struct scsi_xfer *xs)
591 {
592 	struct vioblk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
593 	struct scsi_read_cap_data rcd;
594 	uint64_t capacity;
595 
596 	bzero(&rcd, sizeof(rcd));
597 
598 	capacity = sc->sc_capacity - 1;
599 	if (capacity > 0xffffffff)
600 		capacity = 0xffffffff;
601 
602 	_lto4b(capacity, rcd.addr);
603 	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
604 
605 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
606 	vioblk_scsi_done(xs, XS_NOERROR);
607 }
608 
609 void
610 vioblk_scsi_capacity16(struct scsi_xfer *xs)
611 {
612 	struct vioblk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
613 	struct scsi_read_cap_data_16 rcd;
614 
615 	bzero(&rcd, sizeof(rcd));
616 
617 	_lto8b(sc->sc_capacity - 1, rcd.addr);
618 	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
619 
620 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
621 	vioblk_scsi_done(xs, XS_NOERROR);
622 }
623 
624 void
625 vioblk_scsi_done(struct scsi_xfer *xs, int error)
626 {
627 	xs->error = error;
628 	scsi_done(xs);
629 }
630 
631 int
632 vioblk_alloc_reqs(struct vioblk_softc *sc, int qsize)
633 {
634 	struct virtqueue *vq = &sc->sc_vq[0];
635 	struct vring_desc *vd;
636 	int allocsize, nreqs, r, rsegs, slot, i;
637 	void *vaddr;
638 
639 	if (vq->vq_indirect != NULL)
640 		nreqs = qsize;
641 	else
642 		nreqs = qsize / ALLOC_SEGS;
643 
644 	allocsize = sizeof(struct virtio_blk_req) * nreqs;
645 	r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
646 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
647 	if (r != 0) {
648 		printf("DMA memory allocation failed, size %d, error %d\n",
649 		    allocsize, r);
650 		goto err_none;
651 	}
652 	r = bus_dmamem_map(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1,
653 	    allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
654 	if (r != 0) {
655 		printf("DMA memory map failed, error %d\n", r);
656 		goto err_dmamem_alloc;
657 	}
658 	sc->sc_reqs = vaddr;
659 	memset(vaddr, 0, allocsize);
660 	for (i = 0; i < nreqs; i++) {
661 		/*
662 		 * Assign descriptors and create the DMA maps for each
663 		 * allocated request.
664 		 */
665 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
666 		r = virtio_enqueue_prep(vq, &slot);
667 		if (r == 0)
668 			r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
669 		if (r != 0)
670 			return i;
671 
672 		if (vq->vq_indirect == NULL) {
673 			/*
674 			 * The reserved slots must be a contiguous block
675 			 * starting at vq_desc[slot].
676 			 */
677 			vd = &vq->vq_desc[slot];
678 			for (r = 0; r < ALLOC_SEGS - 1; r++) {
679 				DNPRINTF(2, "%s: vd[%d].next = %d should be "
680 				    "%d\n", __func__, r, vd[r].next,
681 				    (slot + r + 1));
682 				if (vd[r].next != (slot + r + 1))
683 					return i;
684 			}
685 			if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
686 				return i;
687 			DNPRINTF(2, "%s: reserved slots are contiguous "
688 			    "(good!)\n", __func__);
689 		}
690 
691 		vr->vr_qe_index = slot;
692 		vq->vq_entries[slot].qe_vr_index = i;
693 		vr->vr_len = VIOBLK_DONE;
694 
695 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
696 		    VR_DMA_END, 1, VR_DMA_END, 0,
697 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_cmdsts);
698 		if (r != 0) {
699 			printf("cmd dmamap creation failed, err %d\n", r);
700 			nreqs = i;
701 			goto err_reqs;
702 		}
703 		r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
704 		    &vr->vr_hdr, VR_DMA_END, NULL, BUS_DMA_NOWAIT);
705 		if (r != 0) {
706 			printf("command dmamap load failed, err %d\n", r);
707 			nreqs = i;
708 			goto err_reqs;
709 		}
710 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat, MAXPHYS,
711 		    SEG_MAX, MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
712 		    &vr->vr_payload);
713 		if (r != 0) {
714 			printf("payload dmamap creation failed, err %d\n", r);
715 			nreqs = i;
716 			goto err_reqs;
717 		}
718 		SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
719 	}
720 	return nreqs;
721 
722 err_reqs:
723 	for (i = 0; i < nreqs; i++) {
724 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
725 		if (vr->vr_cmdsts) {
726 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
727 			    vr->vr_cmdsts);
728 			vr->vr_cmdsts = 0;
729 		}
730 		if (vr->vr_payload) {
731 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
732 			    vr->vr_payload);
733 			vr->vr_payload = 0;
734 		}
735 	}
736 	bus_dmamem_unmap(sc->sc_virtio->sc_dmat, (caddr_t)sc->sc_reqs,
737 	    allocsize);
738 err_dmamem_alloc:
739 	bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1);
740 err_none:
741 	return 0;
742 }
743