xref: /netbsd-src/sys/dev/pci/ld_virtio.c (revision a5847cc334d9a7029f6352b847e9e8d71a0f9e0c)
1 /*	$NetBSD: ld_virtio.c,v 1.2 2011/11/02 14:34:09 hannken Exp $	*/
2 
3 /*
4  * Copyright (c) 2010 Minoura Makoto.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.2 2011/11/02 14:34:09 hannken Exp $");
30 
31 #include "rnd.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/buf.h>
37 #include <sys/bus.h>
38 #include <sys/device.h>
39 #include <sys/disk.h>
40 #include <sys/mutex.h>
41 #if NRND > 0
42 #include <sys/rnd.h>
43 #endif
44 
45 #include <dev/pci/pcidevs.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcivar.h>
48 
49 #include <dev/ldvar.h>
50 #include <dev/pci/virtioreg.h>
51 #include <dev/pci/virtiovar.h>
52 
53 /*
54  * ld_virtioreg:
55  */
56 /* Configuration registers */
57 #define VIRTIO_BLK_CONFIG_CAPACITY	0 /* 64bit */
58 #define VIRTIO_BLK_CONFIG_SIZE_MAX	8 /* 32bit */
59 #define VIRTIO_BLK_CONFIG_SEG_MAX	12 /* 32bit */
60 #define VIRTIO_BLK_CONFIG_GEOMETRY_C	16 /* 16bit */
61 #define VIRTIO_BLK_CONFIG_GEOMETRY_H	18 /* 8bit */
62 #define VIRTIO_BLK_CONFIG_GEOMETRY_S	19 /* 8bit */
63 #define VIRTIO_BLK_CONFIG_BLK_SIZE	20 /* 32bit */
64 #define VIRTIO_BLK_CONFIG_SECTORS_MAX	24 /* 32bit */
65 
66 /* Feature bits */
67 #define VIRTIO_BLK_F_BARRIER	(1<<0)
68 #define VIRTIO_BLK_F_SIZE_MAX	(1<<1)
69 #define VIRTIO_BLK_F_SEG_MAX	(1<<2)
70 #define VIRTIO_BLK_F_GEOMETRY	(1<<4)
71 #define VIRTIO_BLK_F_RO		(1<<5)
72 #define VIRTIO_BLK_F_BLK_SIZE	(1<<6)
73 #define VIRTIO_BLK_F_SCSI	(1<<7)
74 #define VIRTIO_BLK_F_FLUSH	(1<<9)
75 #define VIRTIO_BLK_F_SECTOR_MAX	(1<<10)
76 
77 /* Command */
78 #define VIRTIO_BLK_T_IN		0
79 #define VIRTIO_BLK_T_OUT	1
80 #define VIRTIO_BLK_T_BARRIER	0x80000000
81 
82 /* Status */
83 #define VIRTIO_BLK_S_OK		0
84 #define VIRTIO_BLK_S_IOERR	1
85 
86 /* Request header structure */
87 struct virtio_blk_req_hdr {
88 	uint32_t	type;	/* VIRTIO_BLK_T_* */
89 	uint32_t	ioprio;
90 	uint64_t	sector;
91 } __packed;
92 /* 512*virtio_blk_req_hdr.sector byte payload and 1 byte status follows */
93 
94 
95 /*
96  * ld_virtiovar:
97  */
98 struct virtio_blk_req {
99 	struct virtio_blk_req_hdr	vr_hdr;
100 	uint8_t				vr_status;
101 	struct buf			*vr_bp;
102 	bus_dmamap_t			vr_cmdsts;
103 	bus_dmamap_t			vr_payload;
104 };
105 
106 struct ld_virtio_softc {
107 	struct ld_softc		sc_ld;
108 	device_t		sc_dev;
109 
110 	struct virtio_softc	*sc_virtio;
111 	struct virtqueue	sc_vq[1];
112 
113 	struct virtio_blk_req	*sc_reqs;
114 	bus_dma_segment_t	sc_reqs_segs[1];
115 
116 	kmutex_t		sc_lock;
117 
118 	int			sc_readonly;
119 };
120 
121 static int	ld_virtio_match(device_t, cfdata_t, void *);
122 static void	ld_virtio_attach(device_t, device_t, void *);
123 static int	ld_virtio_detach(device_t, int);
124 
125 CFATTACH_DECL_NEW(ld_virtio, sizeof(struct ld_virtio_softc),
126     ld_virtio_match, ld_virtio_attach, ld_virtio_detach, NULL);
127 
128 static int
129 ld_virtio_match(device_t parent, cfdata_t match, void *aux)
130 {
131 	struct virtio_softc *va = aux;
132 
133 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
134 		return 1;
135 
136 	return 0;
137 }
138 
139 static int ld_virtio_vq_done(struct virtqueue *);
140 static int ld_virtio_dump(struct ld_softc *, void *, int, int);
141 static int ld_virtio_start(struct ld_softc *, struct buf *);
142 
143 static int
144 ld_virtio_alloc_reqs(struct ld_virtio_softc *sc, int qsize)
145 {
146 	int allocsize, r, rsegs, i;
147 	struct ld_softc *ld = &sc->sc_ld;
148 	void *vaddr;
149 
150 	allocsize = sizeof(struct virtio_blk_req) * qsize;
151 	r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
152 			     &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
153 	if (r != 0) {
154 		aprint_error_dev(sc->sc_dev,
155 				 "DMA memory allocation failed, size %d, "
156 				 "error code %d\n", allocsize, r);
157 		goto err_none;
158 	}
159 	r = bus_dmamem_map(sc->sc_virtio->sc_dmat,
160 			   &sc->sc_reqs_segs[0], 1, allocsize,
161 			   &vaddr, BUS_DMA_NOWAIT);
162 	if (r != 0) {
163 		aprint_error_dev(sc->sc_dev,
164 				 "DMA memory map failed, "
165 				 "error code %d\n", r);
166 		goto err_dmamem_alloc;
167 	}
168 	sc->sc_reqs = vaddr;
169 	memset(vaddr, 0, allocsize);
170 	for (i = 0; i < qsize; i++) {
171 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
172 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
173 				      offsetof(struct virtio_blk_req, vr_bp),
174 				      1,
175 				      offsetof(struct virtio_blk_req, vr_bp),
176 				      0,
177 				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
178 				      &vr->vr_cmdsts);
179 		if (r != 0) {
180 			aprint_error_dev(sc->sc_dev,
181 					 "command dmamap creation failed, "
182 					 "error code %d\n", r);
183 			goto err_reqs;
184 		}
185 		r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
186 				    &vr->vr_hdr,
187 				    offsetof(struct virtio_blk_req, vr_bp),
188 				    NULL, BUS_DMA_NOWAIT);
189 		if (r != 0) {
190 			aprint_error_dev(sc->sc_dev,
191 					 "command dmamap load failed, "
192 					 "error code %d\n", r);
193 			goto err_reqs;
194 		}
195 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
196 				      ld->sc_maxxfer,
197 				      (ld->sc_maxxfer / NBPG) + 1,
198 				      ld->sc_maxxfer,
199 				      0,
200 				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
201 				      &vr->vr_payload);
202 		if (r != 0) {
203 			aprint_error_dev(sc->sc_dev,
204 					 "payload dmamap creation failed, "
205 					 "error code %d\n", r);
206 			goto err_reqs;
207 		}
208 	}
209 	return 0;
210 
211 err_reqs:
212 	for (i = 0; i < qsize; i++) {
213 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
214 		if (vr->vr_cmdsts) {
215 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
216 					   vr->vr_cmdsts);
217 			vr->vr_cmdsts = 0;
218 		}
219 		if (vr->vr_payload) {
220 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
221 					   vr->vr_payload);
222 			vr->vr_payload = 0;
223 		}
224 	}
225 	bus_dmamem_unmap(sc->sc_virtio->sc_dmat, sc->sc_reqs, allocsize);
226 err_dmamem_alloc:
227 	bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1);
228 err_none:
229 	return -1;
230 }
231 
232 static void
233 ld_virtio_attach(device_t parent, device_t self, void *aux)
234 {
235 	struct ld_virtio_softc *sc = device_private(self);
236 	struct ld_softc *ld = &sc->sc_ld;
237 	struct virtio_softc *vsc = device_private(parent);
238 	uint32_t features;
239 	int qsize, maxxfersize;
240 
241 	if (vsc->sc_child != NULL) {
242 		aprint_normal(": child already attached for %s; "
243 			      "something wrong...\n",
244 			      device_xname(parent));
245 		return;
246 	}
247 	aprint_normal("\n");
248 	aprint_naive("\n");
249 
250 	sc->sc_dev = self;
251 	sc->sc_virtio = vsc;
252 
253 	vsc->sc_child = self;
254 	vsc->sc_ipl = IPL_BIO;
255 	vsc->sc_vqs = &sc->sc_vq[0];
256 	vsc->sc_nvqs = 1;
257 	vsc->sc_config_change = 0;
258 	vsc->sc_intrhand = virtio_vq_intr;
259 
260 	features = virtio_negotiate_features(vsc,
261 					     (VIRTIO_BLK_F_SIZE_MAX |
262 					      VIRTIO_BLK_F_SEG_MAX |
263 					      VIRTIO_BLK_F_GEOMETRY |
264 					      VIRTIO_BLK_F_RO |
265 					      VIRTIO_BLK_F_BLK_SIZE |
266 					      VIRTIO_BLK_F_SECTOR_MAX));
267 	if (features & VIRTIO_BLK_F_RO)
268 		sc->sc_readonly = 1;
269 	else
270 		sc->sc_readonly = 0;
271 
272 	maxxfersize = MAXPHYS;
273 	if (features & VIRTIO_BLK_F_SECTOR_MAX) {
274 		maxxfersize = virtio_read_device_config_4(vsc,
275 					VIRTIO_BLK_CONFIG_SECTORS_MAX)
276 				* ld->sc_secsize;
277 		if (maxxfersize > MAXPHYS)
278 			maxxfersize = MAXPHYS;
279 	}
280 
281 	if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0,
282 			    maxxfersize, maxxfersize / NBPG + 2,
283 			    "I/O request") != 0) {
284 		goto err;
285 	}
286 	qsize = sc->sc_vq[0].vq_num;
287 	sc->sc_vq[0].vq_done = ld_virtio_vq_done;
288 
289 	ld->sc_dv = self;
290 	ld->sc_secperunit = virtio_read_device_config_8(vsc,
291 				VIRTIO_BLK_CONFIG_CAPACITY);
292 	ld->sc_secsize = 512;
293 	if (features & VIRTIO_BLK_F_BLK_SIZE) {
294 		ld->sc_secsize = virtio_read_device_config_4(vsc,
295 					VIRTIO_BLK_CONFIG_BLK_SIZE);
296 	}
297 	ld->sc_maxxfer = maxxfersize;
298 	if (features & VIRTIO_BLK_F_GEOMETRY) {
299 		ld->sc_ncylinders = virtio_read_device_config_2(vsc,
300 					VIRTIO_BLK_CONFIG_GEOMETRY_C);
301 		ld->sc_nheads     = virtio_read_device_config_1(vsc,
302 					VIRTIO_BLK_CONFIG_GEOMETRY_H);
303 		ld->sc_nsectors   = virtio_read_device_config_1(vsc,
304 					VIRTIO_BLK_CONFIG_GEOMETRY_S);
305 	}
306 	ld->sc_maxqueuecnt = qsize;
307 
308 	if (ld_virtio_alloc_reqs(sc, qsize) < 0)
309 		goto err;
310 
311 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
312 
313 	ld->sc_dump = ld_virtio_dump;
314 	ld->sc_flush = NULL;
315 	ld->sc_start = ld_virtio_start;
316 
317 	ld->sc_flags = LDF_ENABLED;
318 	ldattach(ld);
319 
320 	return;
321 
322 err:
323 	vsc->sc_child = (void*)1;
324 	return;
325 }
326 
327 static int
328 ld_virtio_start(struct ld_softc *ld, struct buf *bp)
329 {
330 	/* splbio */
331 	struct ld_virtio_softc *sc = device_private(ld->sc_dv);
332 	struct virtio_softc *vsc = sc->sc_virtio;
333 	struct virtqueue *vq = &sc->sc_vq[0];
334 	struct virtio_blk_req *vr;
335 	int r;
336 	int isread = (bp->b_flags & B_READ);
337 	int slot;
338 
339 	if (sc->sc_readonly && !isread)
340 		return EIO;
341 
342 	r = virtio_enqueue_prep(vsc, vq, &slot);
343 	if (r != 0)
344 		return r;
345 	vr = &sc->sc_reqs[slot];
346 	r = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
347 			    bp->b_data, bp->b_bcount, NULL,
348 			    ((isread?BUS_DMA_READ:BUS_DMA_WRITE)
349 			     |BUS_DMA_NOWAIT));
350 	if (r != 0)
351 		return r;
352 
353 	r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 2);
354 	if (r != 0) {
355 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
356 		return r;
357 	}
358 
359 	vr->vr_bp = bp;
360 	vr->vr_hdr.type = isread?VIRTIO_BLK_T_IN:VIRTIO_BLK_T_OUT;
361 	vr->vr_hdr.ioprio = 0;
362 	vr->vr_hdr.sector = bp->b_rawblkno * sc->sc_ld.sc_secsize / 512;
363 
364 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
365 			0, sizeof(struct virtio_blk_req_hdr),
366 			BUS_DMASYNC_PREWRITE);
367 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
368 			0, bp->b_bcount,
369 			isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE);
370 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
371 			offsetof(struct virtio_blk_req, vr_status),
372 			sizeof(uint8_t),
373 			BUS_DMASYNC_PREREAD);
374 
375 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
376 			 0, sizeof(struct virtio_blk_req_hdr),
377 			 true);
378 	virtio_enqueue(vsc, vq, slot, vr->vr_payload, !isread);
379 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
380 			 offsetof(struct virtio_blk_req, vr_status),
381 			 sizeof(uint8_t),
382 			 false);
383 	virtio_enqueue_commit(vsc, vq, slot, true);
384 
385 	return 0;
386 }
387 
388 static void
389 ld_virtio_vq_done1(struct ld_virtio_softc *sc, struct virtio_softc *vsc,
390 		   struct virtqueue *vq, int slot)
391 {
392 	struct virtio_blk_req *vr = &sc->sc_reqs[slot];
393 	struct buf *bp = vr->vr_bp;
394 
395 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
396 			0, sizeof(struct virtio_blk_req_hdr),
397 			BUS_DMASYNC_POSTWRITE);
398 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
399 			0, bp->b_bcount,
400 			(bp->b_flags & B_READ)?BUS_DMASYNC_POSTREAD
401 					      :BUS_DMASYNC_POSTWRITE);
402 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
403 			sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
404 			BUS_DMASYNC_POSTREAD);
405 
406 	if (vr->vr_status != VIRTIO_BLK_S_OK) {
407 		bp->b_error = EIO;
408 		bp->b_resid = bp->b_bcount;
409 	} else {
410 		bp->b_error = 0;
411 		bp->b_resid = 0;
412 	}
413 
414 	virtio_dequeue_commit(vsc, vq, slot);
415 
416 	lddone(&sc->sc_ld, bp);
417 }
418 
419 static int
420 ld_virtio_vq_done(struct virtqueue *vq)
421 {
422 	struct virtio_softc *vsc = vq->vq_owner;
423 	struct ld_virtio_softc *sc = device_private(vsc->sc_child);
424 	int r = 0;
425 	int slot;
426 
427 again:
428 	if (virtio_dequeue(vsc, vq, &slot, NULL))
429 		return r;
430 	r = 1;
431 
432 	ld_virtio_vq_done1(sc, vsc, vq, slot);
433 	goto again;
434 }
435 
436 static int
437 ld_virtio_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
438 {
439 	struct ld_virtio_softc *sc = device_private(ld->sc_dv);
440 	struct virtio_softc *vsc = sc->sc_virtio;
441 	struct virtqueue *vq = &sc->sc_vq[0];
442 	struct virtio_blk_req *vr;
443 	int slot, r;
444 
445 	if (sc->sc_readonly)
446 		return EIO;
447 
448 	r = virtio_enqueue_prep(vsc, vq, &slot);
449 	if (r != 0) {
450 		if (r == EAGAIN) { /* no free slot; dequeue first */
451 			delay(100);
452 			ld_virtio_vq_done(vq);
453 			r = virtio_enqueue_prep(vsc, vq, &slot);
454 			if (r != 0)
455 				return r;
456 		}
457 		return r;
458 	}
459 	vr = &sc->sc_reqs[slot];
460 	r = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
461 			    data, blkcnt*ld->sc_secsize, NULL,
462 			    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
463 	if (r != 0)
464 		return r;
465 
466 	r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 2);
467 	if (r != 0) {
468 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
469 		return r;
470 	}
471 
472 	vr->vr_bp = (void*)0xdeadbeef;
473 	vr->vr_hdr.type = VIRTIO_BLK_T_OUT;
474 	vr->vr_hdr.ioprio = 0;
475 	vr->vr_hdr.sector = (daddr_t) blkno * ld->sc_secsize / 512;
476 
477 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
478 			0, sizeof(struct virtio_blk_req_hdr),
479 			BUS_DMASYNC_PREWRITE);
480 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
481 			0, blkcnt*ld->sc_secsize,
482 			BUS_DMASYNC_PREWRITE);
483 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
484 			offsetof(struct virtio_blk_req, vr_status),
485 			sizeof(uint8_t),
486 			BUS_DMASYNC_PREREAD);
487 
488 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
489 			 0, sizeof(struct virtio_blk_req_hdr),
490 			 true);
491 	virtio_enqueue(vsc, vq, slot, vr->vr_payload, true);
492 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
493 			 offsetof(struct virtio_blk_req, vr_status),
494 			 sizeof(uint8_t),
495 			 false);
496 	virtio_enqueue_commit(vsc, vq, slot, true);
497 
498 	for ( ; ; ) {
499 		int dslot;
500 
501 		r = virtio_dequeue(vsc, vq, &dslot, NULL);
502 		if (r != 0)
503 			continue;
504 		if (dslot != slot) {
505 			ld_virtio_vq_done1(sc, vsc, vq, dslot);
506 			continue;
507 		} else
508 			break;
509 	}
510 
511 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
512 			0, sizeof(struct virtio_blk_req_hdr),
513 			BUS_DMASYNC_POSTWRITE);
514 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
515 			0, blkcnt*ld->sc_secsize,
516 			BUS_DMASYNC_POSTWRITE);
517 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
518 			offsetof(struct virtio_blk_req, vr_status),
519 			sizeof(uint8_t),
520 			BUS_DMASYNC_POSTREAD);
521 	if (vr->vr_status == VIRTIO_BLK_S_OK)
522 		r = 0;
523 	else
524 		r = EIO;
525 	virtio_dequeue_commit(vsc, vq, slot);
526 
527 	return r;
528 }
529 
530 static int
531 ld_virtio_detach(device_t self, int flags)
532 {
533 	struct ld_virtio_softc *sc = device_private(self);
534 	struct ld_softc *ld = &sc->sc_ld;
535 	bus_dma_tag_t dmat = sc->sc_virtio->sc_dmat;
536 	int r, i, qsize;
537 
538 	qsize = sc->sc_vq[0].vq_num;
539 	r = ldbegindetach(ld, flags);
540 	if (r != 0)
541 		return r;
542 	virtio_reset(sc->sc_virtio);
543 	virtio_free_vq(sc->sc_virtio, &sc->sc_vq[0]);
544 
545 	for (i = 0; i < qsize; i++) {
546 		bus_dmamap_destroy(dmat,
547 				   sc->sc_reqs[i].vr_cmdsts);
548 		bus_dmamap_destroy(dmat,
549 				   sc->sc_reqs[i].vr_payload);
550 	}
551 	bus_dmamem_unmap(dmat, sc->sc_reqs,
552 			 sizeof(struct virtio_blk_req) * qsize);
553 	bus_dmamem_free(dmat, &sc->sc_reqs_segs[0], 1);
554 
555 	ldenddetach(ld);
556 
557 	return 0;
558 }
559