xref: /netbsd-src/sys/dev/pci/vioscsi.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: vioscsi.c,v 1.22 2020/07/12 06:40:11 kim Exp $	*/
2 /*	$OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $	*/
3 
4 /*
5  * Copyright (c) 2013 Google Inc.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.22 2020/07/12 06:40:11 kim Exp $");
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/bus.h>
27 #include <sys/buf.h>
28 #include <sys/module.h>
29 
30 #include <dev/pci/vioscsireg.h>
31 #include <dev/pci/virtiovar.h>
32 
33 #include <dev/scsipi/scsi_all.h>
34 #include <dev/scsipi/scsiconf.h>
35 
36 #ifdef VIOSCSI_DEBUG
37 static int vioscsi_debug = 1;
38 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
39 #else
40 #define DPRINTF(f) ((void)0)
41 #endif
42 
43 struct vioscsi_req {
44 	struct virtio_scsi_req_hdr	 vr_req;
45 	struct virtio_scsi_res_hdr	 vr_res;
46 	struct scsipi_xfer		*vr_xs;
47 	bus_dmamap_t			 vr_control;
48 	bus_dmamap_t			 vr_data;
49 };
50 
51 struct vioscsi_softc {
52 	device_t 		 sc_dev;
53 	struct scsipi_adapter	 sc_adapter;
54 	struct scsipi_channel 	 sc_channel;
55 
56 	struct virtqueue	 sc_vqs[3];
57 #define VIOSCSI_VQ_CONTROL	0
58 #define VIOSCSI_VQ_EVENT	1
59 #define VIOSCSI_VQ_REQUEST	2
60 
61 	struct vioscsi_req	*sc_reqs;
62 	int			 sc_nreqs;
63 	bus_dma_segment_t        sc_reqs_segs[1];
64 
65 	u_int32_t		 sc_seg_max;
66 
67 	kmutex_t		 sc_mutex;
68 };
69 
70 /*
71  * Each block request uses at least two segments - one for the header
72  * and one for the status.
73 */
74 #define VIRTIO_SCSI_MIN_SEGMENTS 2
75 
76 static int	 vioscsi_match(device_t, cfdata_t, void *);
77 static void	 vioscsi_attach(device_t, device_t, void *);
78 static int	 vioscsi_detach(device_t, int);
79 
80 static int	 vioscsi_alloc_reqs(struct vioscsi_softc *,
81     struct virtio_softc *, int);
82 static void	 vioscsi_free_reqs(struct vioscsi_softc *,
83     struct virtio_softc *);
84 static void	 vioscsi_scsipi_request(struct scsipi_channel *,
85     scsipi_adapter_req_t, void *);
86 static int	 vioscsi_vq_done(struct virtqueue *);
87 static void	 vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
88     struct vioscsi_req *, struct virtqueue *, int);
89 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
90 static void	 vioscsi_bad_target(struct scsipi_xfer *);
91 
92 static const char *const vioscsi_vq_names[] = {
93 	"control",
94 	"event",
95 	"request",
96 };
97 
98 CFATTACH_DECL3_NEW(vioscsi, sizeof(struct vioscsi_softc),
99     vioscsi_match, vioscsi_attach, vioscsi_detach, NULL, NULL, NULL,
100     DVF_DETACH_SHUTDOWN);
101 
102 static int
103 vioscsi_match(device_t parent, cfdata_t match, void *aux)
104 {
105 	struct virtio_attach_args *va = aux;
106 
107 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
108 		return 1;
109 
110 	return 0;
111 }
112 
113 static void
114 vioscsi_attach(device_t parent, device_t self, void *aux)
115 {
116 	struct vioscsi_softc *sc = device_private(self);
117 	struct virtio_softc *vsc = device_private(parent);
118 	struct scsipi_adapter *adapt = &sc->sc_adapter;
119 	struct scsipi_channel *chan = &sc->sc_channel;
120 	int rv, qsize = 0, i = 0;
121 	int ipl = IPL_BIO;
122 
123 	if (virtio_child(vsc) != NULL) {
124 		aprint_error(": parent %s already has a child\n",
125 		    device_xname(parent));
126 		return;
127 	}
128 
129 	sc->sc_dev = self;
130 
131 	virtio_child_attach_start(vsc, self, ipl, sc->sc_vqs,
132 	    NULL, virtio_vq_intr, VIRTIO_F_PCI_INTR_MSIX,
133 	    0, VIRTIO_COMMON_FLAG_BITS);
134 
135 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, ipl);
136 
137 	uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
138 	    VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
139 
140 	uint32_t seg_max = virtio_read_device_config_4(vsc,
141 	    VIRTIO_SCSI_CONFIG_SEG_MAX);
142 
143 	uint16_t max_target = virtio_read_device_config_2(vsc,
144 	    VIRTIO_SCSI_CONFIG_MAX_TARGET);
145 
146 	uint32_t max_lun = virtio_read_device_config_4(vsc,
147 	    VIRTIO_SCSI_CONFIG_MAX_LUN);
148 
149 	sc->sc_seg_max = seg_max;
150 
151 	for(i=0; i < __arraycount(sc->sc_vqs); i++) {
152 		rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
153 		    VIRTIO_SCSI_MIN_SEGMENTS + howmany(MAXPHYS, NBPG),
154 		    vioscsi_vq_names[i]);
155 		if (rv) {
156 			aprint_error_dev(sc->sc_dev,
157 			    "failed to allocate virtqueue %d\n", i);
158 			goto err;
159 		}
160 
161 		if (i == VIOSCSI_VQ_REQUEST)
162 			sc->sc_vqs[i].vq_done = vioscsi_vq_done;
163 	}
164 
165 	qsize = sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num;
166 	if (vioscsi_alloc_reqs(sc, vsc, qsize))
167 		goto err;
168 
169 	aprint_normal_dev(sc->sc_dev,
170 	    "cmd_per_lun %u qsize %d seg_max %u max_target %hu"
171 	    " max_lun %u\n",
172 	    cmd_per_lun, qsize, seg_max, max_target, max_lun);
173 
174 	if (virtio_child_attach_finish(vsc) != 0)
175 		goto err;
176 
177 	/*
178 	 * Fill in the scsipi_adapter.
179 	 */
180 	memset(adapt, 0, sizeof(*adapt));
181 	adapt->adapt_dev = sc->sc_dev;
182 	adapt->adapt_nchannels = 1;
183 	adapt->adapt_openings = MIN(qsize, cmd_per_lun);
184 	adapt->adapt_max_periph = adapt->adapt_openings;
185 	adapt->adapt_request = vioscsi_scsipi_request;
186 	adapt->adapt_minphys = minphys;
187 
188 	/*
189 	 * Fill in the scsipi_channel.
190 	 */
191 	memset(chan, 0, sizeof(*chan));
192 	chan->chan_adapter = adapt;
193 	chan->chan_bustype = &scsi_bustype;
194 	chan->chan_channel = 0;
195 	chan->chan_ntargets = MIN(max_target, 16);	/* cap reasonably */
196 	chan->chan_nluns = MIN(max_lun, 1024);		/* cap reasonably */
197 	chan->chan_id = max_target;
198 	chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
199 
200 	config_found(self, &sc->sc_channel, scsiprint);
201 	return;
202 
203 err:
204 	if (qsize > 0)
205 		vioscsi_free_reqs(sc, vsc);
206 
207 	for (i=0; i < __arraycount(sc->sc_vqs); i++) {
208 		if (sc->sc_vqs[i].vq_num > 0)
209 			virtio_free_vq(vsc, &sc->sc_vqs[i]);
210 	}
211 
212 	virtio_child_attach_failed(vsc);
213 }
214 
215 static int
216 vioscsi_detach(device_t self, int flags)
217 {
218 	struct vioscsi_softc *sc = device_private(self);
219 	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
220 	int rc, i;
221 
222 	/*
223 	 * Dequeue all pending finished requests. Must be done
224 	 * before we try to detach children so that we process
225 	 * their pending requests while they still exist.
226 	 */
227 	if (sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num > 0)
228 		vioscsi_vq_done(&sc->sc_vqs[VIOSCSI_VQ_REQUEST]);
229 
230 	if ((rc = config_detach_children(self, flags)) != 0)
231 		return rc;
232 
233 	virtio_reset(vsc);
234 
235 	for (i = 0; i < __arraycount(sc->sc_vqs); i++) {
236 		if (sc->sc_vqs[i].vq_num > 0)
237 			virtio_free_vq(vsc, &sc->sc_vqs[i]);
238 	}
239 
240 	vioscsi_free_reqs(sc, vsc);
241 
242 	virtio_child_detach(vsc);
243 
244 	mutex_destroy(&sc->sc_mutex);
245 
246 	return 0;
247 }
248 
249 #define XS2DMA(xs) \
250     ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
251     (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
252     BUS_DMA_STREAMING)
253 
254 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
255     BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
256 
257 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
258     BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
259 
260 static void
261 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
262     request, void *arg)
263 {
264 	struct vioscsi_softc *sc =
265 	    device_private(chan->chan_adapter->adapt_dev);
266 	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
267 	struct scsipi_xfer *xs;
268 	struct scsipi_periph *periph;
269 	struct vioscsi_req *vr;
270 	struct virtio_scsi_req_hdr *req;
271 	struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
272 	int slot, error;
273 	bool dopoll;
274 
275 	DPRINTF(("%s: enter\n", __func__));
276 
277 	switch (request) {
278 	case ADAPTER_REQ_RUN_XFER:
279 		break;
280 	case ADAPTER_REQ_SET_XFER_MODE:
281 	{
282 		struct scsipi_xfer_mode *xm = arg;
283 		xm->xm_mode = PERIPH_CAP_TQING;
284 		xm->xm_period = 0;
285 		xm->xm_offset = 0;
286 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
287 		return;
288 	}
289 	default:
290 		DPRINTF(("%s: unhandled %d\n", __func__, request));
291 		return;
292 	}
293 
294 	xs = arg;
295 	periph = xs->xs_periph;
296 
297 	/*
298 	 * This can happen when we run out of queue slots.
299 	 */
300 	vr = vioscsi_req_get(sc);
301 	if (vr == NULL) {
302 		xs->error = XS_RESOURCE_SHORTAGE;
303 		scsipi_done(xs);
304 		return;
305 	}
306 
307 	req = &vr->vr_req;
308 	slot = vr - sc->sc_reqs;
309 
310 	/*
311 	 * "The only supported format for the LUN field is: first byte set to
312 	 * 1, second byte set to target, third and fourth byte representing a
313 	 * single level LUN structure, followed by four zero bytes."
314 	 */
315 	if (periph->periph_target >= 256 || periph->periph_lun >= 16384
316 	    || periph->periph_target < 0 || periph->periph_lun < 0) {
317 		goto stuffup;
318 	}
319 
320 	req->lun[0] = 1;
321 	req->lun[1] = periph->periph_target;
322 	req->lun[2] = 0x40 | ((periph->periph_lun >> 8) & 0x3F);
323 	req->lun[3] = periph->periph_lun & 0xFF;
324 	memset(req->lun + 4, 0, 4);
325 	DPRINTF(("%s: command %p for %d:%d at slot %d\n", __func__,
326 	    xs, periph->periph_target, periph->periph_lun, slot));
327 
328 	/* tag */
329 	switch (XS_CTL_TAGTYPE(xs)) {
330 	case XS_CTL_HEAD_TAG:
331 		req->task_attr = VIRTIO_SCSI_S_HEAD;
332 		break;
333 
334 #if 0	/* XXX */
335 	case XS_CTL_ACA_TAG:
336 		req->task_attr = VIRTIO_SCSI_S_ACA;
337 		break;
338 #endif
339 
340 	case XS_CTL_ORDERED_TAG:
341 		req->task_attr = VIRTIO_SCSI_S_ORDERED;
342 		break;
343 
344 	case XS_CTL_SIMPLE_TAG:
345 	default:
346 		req->task_attr = VIRTIO_SCSI_S_SIMPLE;
347 		break;
348 	}
349 	req->id = slot;
350 
351 	if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
352 		DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
353 		    (size_t)xs->cmdlen, sizeof(req->cdb)));
354 		goto stuffup;
355 	}
356 
357 	memset(req->cdb, 0, sizeof(req->cdb));
358 	memcpy(req->cdb, xs->cmd, xs->cmdlen);
359 
360 	error = bus_dmamap_load(virtio_dmat(vsc), vr->vr_data,
361 	    xs->data, xs->datalen, NULL, XS2DMA(xs));
362 	if (error) {
363 		aprint_error_dev(sc->sc_dev, "%s: error %d loading DMA map\n",
364 		    __func__, error);
365 
366 		if (error == ENOMEM || error == EAGAIN) {
367 			/*
368 			 * Map is allocated with ALLOCNOW, so this should
369 			 * actually never ever happen.
370 			 */
371 			xs->error = XS_RESOURCE_SHORTAGE;
372 		} else {
373 stuffup:
374 			/* not a temporary condition */
375 			xs->error = XS_DRIVER_STUFFUP;
376 		}
377 
378 		virtio_enqueue_abort(vsc, vq, slot);
379 		scsipi_done(xs);
380 		return;
381 	}
382 
383 	int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
384 	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
385 		nsegs += vr->vr_data->dm_nsegs;
386 
387 	error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
388 	if (error) {
389 		aprint_error_dev(sc->sc_dev, "error reserving %d (nsegs %d)\n",
390 		    error, nsegs);
391 		bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
392 		/* slot already freed by virtio_enqueue_reserve() */
393 		xs->error = XS_RESOURCE_SHORTAGE;
394 		scsipi_done(xs);
395 		return;
396 	}
397 
398 	vr->vr_xs = xs;
399 
400 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
401 	    offsetof(struct vioscsi_req, vr_req),
402 	    sizeof(struct virtio_scsi_req_hdr),
403 	    BUS_DMASYNC_PREWRITE);
404 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
405 	    offsetof(struct vioscsi_req, vr_res),
406             sizeof(struct virtio_scsi_res_hdr),
407 	    BUS_DMASYNC_PREREAD);
408 	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
409 		bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
410 		    XS2DMAPRE(xs));
411 
412 	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
413 	    offsetof(struct vioscsi_req, vr_req),
414             sizeof(struct virtio_scsi_req_hdr), 1);
415 	if (xs->xs_control & XS_CTL_DATA_OUT)
416 		virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
417 	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
418 	    offsetof(struct vioscsi_req, vr_res),
419             sizeof(struct virtio_scsi_res_hdr), 0);
420 	if (xs->xs_control & XS_CTL_DATA_IN)
421 		virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
422 	dopoll = (xs->xs_control & XS_CTL_POLL) != 0;
423 	virtio_enqueue_commit(vsc, vq, slot, 1);
424 
425 	if (!dopoll)
426 		return;
427 
428 	DPRINTF(("%s: polling...\n", __func__));
429 	// XXX: do this better.
430 	int timeout = 1000;
431 	do {
432 		virtio_intrhand(vsc);
433 		if (vr->vr_xs != xs)
434 			break;
435 		delay(1000);
436 	} while (--timeout > 0);
437 
438 	if (vr->vr_xs == xs) {
439 		// XXX: Abort!
440 		xs->error = XS_TIMEOUT;
441 		xs->resid = xs->datalen;
442 		DPRINTF(("%s: polling timeout\n", __func__));
443 		scsipi_done(xs);
444 	}
445 	DPRINTF(("%s: command %p done (timeout=%d)\n", __func__,
446 	    xs, timeout));
447 }
448 
449 static void
450 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
451     struct vioscsi_req *vr, struct virtqueue *vq, int slot)
452 {
453 	struct scsipi_xfer *xs = vr->vr_xs;
454 	size_t sense_len;
455 
456 	DPRINTF(("%s: enter\n", __func__));
457 
458 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
459 	    offsetof(struct vioscsi_req, vr_req),
460 	    sizeof(struct virtio_scsi_req_hdr),
461 	    BUS_DMASYNC_POSTWRITE);
462 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
463 	    offsetof(struct vioscsi_req, vr_res),
464 	    sizeof(struct virtio_scsi_res_hdr),
465 	    BUS_DMASYNC_POSTREAD);
466 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
467 	    XS2DMAPOST(xs));
468 
469 	xs->status = vr->vr_res.status;
470 	xs->resid = vr->vr_res.residual;
471 
472 	switch (vr->vr_res.response) {
473 	case VIRTIO_SCSI_S_OK:
474 		sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
475 		memcpy(&xs->sense, vr->vr_res.sense, sense_len);
476 		xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
477 		break;
478 	case VIRTIO_SCSI_S_BAD_TARGET:
479 		vioscsi_bad_target(xs);
480 		break;
481 	default:
482 		DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
483 		xs->error = XS_DRIVER_STUFFUP;
484 		xs->resid = xs->datalen;
485 		break;
486 	}
487 
488 	DPRINTF(("%s: command %p done %d, %d, %d\n", __func__,
489 	    xs, xs->error, xs->status, xs->resid));
490 
491 	bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
492 	vr->vr_xs = NULL;
493 
494 	virtio_dequeue_commit(vsc, vq, slot);
495 
496 	mutex_exit(&sc->sc_mutex);
497 	scsipi_done(xs);
498 	mutex_enter(&sc->sc_mutex);
499 }
500 
501 static void
502 vioscsi_bad_target(struct scsipi_xfer *xs)
503 {
504 	struct scsi_sense_data *sense = &xs->sense.scsi_sense;
505 
506 	DPRINTF(("%s: bad target %d:%d\n", __func__,
507 	    xs->xs_periph->periph_target, xs->xs_periph->periph_lun));
508 
509 	memset(sense, 0, sizeof(*sense));
510 	sense->response_code = 0x70;
511 	sense->flags = SKEY_ILLEGAL_REQUEST;
512 	xs->error = XS_SENSE;
513 	xs->status = 0;
514 	xs->resid = 0;
515 }
516 
517 static int
518 vioscsi_vq_done(struct virtqueue *vq)
519 {
520 	struct virtio_softc *vsc = vq->vq_owner;
521 	struct vioscsi_softc *sc = device_private(virtio_child(vsc));
522 	int ret = 0;
523 
524 	DPRINTF(("%s: enter %d\n", __func__, vq->vq_index));
525 
526 	mutex_enter(&sc->sc_mutex);
527 
528 	for (;;) {
529 		int r, slot;
530 
531 		r = virtio_dequeue(vsc, vq, &slot, NULL);
532 		if (r != 0)
533 			break;
534 
535 		DPRINTF(("%s: slot=%d\n", __func__, slot));
536 
537 		vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot], vq, slot);
538 
539 		ret = 1;
540 	}
541 
542 	mutex_exit(&sc->sc_mutex);
543 
544 	DPRINTF(("%s: exit %d: %d\n", __func__, vq->vq_index, ret));
545 
546 	return ret;
547 }
548 
549 static struct vioscsi_req *
550 vioscsi_req_get(struct vioscsi_softc *sc)
551 {
552 	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
553 	struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
554 	struct vioscsi_req *vr = NULL;
555 	int r, slot;
556 
557 	mutex_enter(&sc->sc_mutex);
558 
559 	if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
560 		DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
561 		goto out;
562 	}
563 	KASSERT(slot < sc->sc_nreqs);
564 	vr = &sc->sc_reqs[slot];
565 
566 	DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
567 
568 out:
569 	mutex_exit(&sc->sc_mutex);
570 
571 	return vr;
572 }
573 
574 static int
575 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
576     int qsize)
577 {
578 	size_t allocsize;
579 	int r, rsegs, slot;
580 	void *vaddr;
581 	struct vioscsi_req *vr;
582 
583 	allocsize = qsize * sizeof(struct vioscsi_req);
584 	r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
585 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
586 	if (r != 0) {
587 		aprint_error_dev(sc->sc_dev,
588 		    "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
589 		    allocsize, r);
590 		return r;
591 	}
592 	r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1,
593 	    allocsize, &vaddr, BUS_DMA_NOWAIT);
594 	if (r != 0) {
595 		aprint_error_dev(sc->sc_dev,
596 		    "%s: bus_dmamem_map failed, error %d\n", __func__, r);
597 		bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
598 		return r;
599 	}
600 	memset(vaddr, 0, allocsize);
601 
602 	sc->sc_reqs = vaddr;
603 	sc->sc_nreqs = qsize;
604 
605 	/* Prepare maps for the requests */
606 	for (slot=0; slot < qsize; slot++) {
607 		vr = &sc->sc_reqs[slot];
608 
609 		r = bus_dmamap_create(virtio_dmat(vsc),
610 		    offsetof(struct vioscsi_req, vr_xs), 1,
611 		    offsetof(struct vioscsi_req, vr_xs), 0,
612 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
613 		if (r != 0) {
614 			aprint_error_dev(sc->sc_dev,
615 		    	    "%s: bus_dmamem_create ctrl failed, error %d\n",
616 			    __func__, r);
617 			goto cleanup;
618 		}
619 
620 		r = bus_dmamap_create(virtio_dmat(vsc), MAXPHYS, sc->sc_seg_max,
621 		    MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
622 		if (r != 0) {
623 			aprint_error_dev(sc->sc_dev,
624 		    	    "%s: bus_dmamem_create data failed, error %d\n",
625 			    __func__, r);
626 			goto cleanup;
627 		}
628 
629 		r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_control,
630 		    vr, offsetof(struct vioscsi_req, vr_xs), NULL,
631 		    BUS_DMA_NOWAIT);
632 		if (r != 0) {
633 			aprint_error_dev(sc->sc_dev,
634 		    	    "%s: bus_dmamap_load ctrl error %d\n",
635 			    __func__, r);
636 			goto cleanup;
637 		}
638 	}
639 
640 	return 0;
641 
642 cleanup:
643 	for (; slot > 0; slot--) {
644 		vr = &sc->sc_reqs[slot];
645 
646 		if (vr->vr_control) {
647 			/* this will also unload the mapping if loaded */
648 			bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
649 			vr->vr_control = NULL;
650 		}
651 
652 		if (vr->vr_data) {
653 			bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
654 			vr->vr_data = NULL;
655 		}
656 	}
657 
658 	bus_dmamem_unmap(virtio_dmat(vsc), vaddr, allocsize);
659 	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
660 
661 	return r;
662 }
663 
664 static void
665 vioscsi_free_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc)
666 {
667 	int slot;
668 	struct vioscsi_req *vr;
669 
670 	if (sc->sc_nreqs == 0) {
671 		/* Not allocated */
672 		return;
673 	}
674 
675 	/* Free request maps */
676 	for (slot=0; slot < sc->sc_nreqs; slot++) {
677 		vr = &sc->sc_reqs[slot];
678 
679 		bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
680 		bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
681 	}
682 
683 	bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_reqs,
684 			 sc->sc_nreqs * sizeof(struct vioscsi_req));
685 	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
686 }
687 
688 MODULE(MODULE_CLASS_DRIVER, vioscsi, "virtio");
689 
690 #ifdef _MODULE
691 #include "ioconf.c"
692 #endif
693 
694 static int
695 vioscsi_modcmd(modcmd_t cmd, void *opaque)
696 {
697         int error = 0;
698 
699 #ifdef _MODULE
700         switch (cmd) {
701         case MODULE_CMD_INIT:
702                 error = config_init_component(cfdriver_ioconf_vioscsi,
703                     cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi);
704                 break;
705         case MODULE_CMD_FINI:
706                 error = config_fini_component(cfdriver_ioconf_vioscsi,
707                     cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi);
708                 break;
709         default:
710                 error = ENOTTY;
711                 break;
712         }
713 #endif
714 
715         return error;
716 }
717