xref: /netbsd-src/sys/dev/pci/vioscsi.c (revision 796c32c94f6e154afc9de0f63da35c91bb739b45)
1 /*	$NetBSD: vioscsi.c,v 1.19 2017/05/15 21:30:37 jdolecek Exp $	*/
2 /*	$OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $	*/
3 
4 /*
5  * Copyright (c) 2013 Google Inc.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.19 2017/05/15 21:30:37 jdolecek Exp $");
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/bus.h>
27 #include <sys/buf.h>
28 #include <sys/module.h>
29 
30 #include <dev/pci/pcidevs.h>
31 #include <dev/pci/pcireg.h>
32 #include <dev/pci/pcivar.h>
33 
34 #include <dev/pci/vioscsireg.h>
35 #include <dev/pci/virtiovar.h>
36 
37 #include <dev/scsipi/scsi_all.h>
38 #include <dev/scsipi/scsiconf.h>
39 
40 #ifdef VIOSCSI_DEBUG
41 static int vioscsi_debug = 1;
42 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
43 #else
44 #define DPRINTF(f) ((void)0)
45 #endif
46 
47 struct vioscsi_req {
48 	struct virtio_scsi_req_hdr	 vr_req;
49 	struct virtio_scsi_res_hdr	 vr_res;
50 	struct scsipi_xfer		*vr_xs;
51 	bus_dmamap_t			 vr_control;
52 	bus_dmamap_t			 vr_data;
53 };
54 
55 struct vioscsi_softc {
56 	device_t 		 sc_dev;
57 	struct scsipi_adapter	 sc_adapter;
58 	struct scsipi_channel 	 sc_channel;
59 
60 	struct virtqueue	 sc_vqs[3];
61 #define VIOSCSI_VQ_CONTROL	0
62 #define VIOSCSI_VQ_EVENT	1
63 #define VIOSCSI_VQ_REQUEST	2
64 
65 	struct vioscsi_req	*sc_reqs;
66 	int			 sc_nreqs;
67 	bus_dma_segment_t        sc_reqs_segs[1];
68 
69 	u_int32_t		 sc_seg_max;
70 
71 	kmutex_t		 sc_mutex;
72 };
73 
74 /*
75  * Each block request uses at least two segments - one for the header
76  * and one for the status.
77 */
78 #define VIRTIO_SCSI_MIN_SEGMENTS 2
79 
80 static int	 vioscsi_match(device_t, cfdata_t, void *);
81 static void	 vioscsi_attach(device_t, device_t, void *);
82 static int	 vioscsi_detach(device_t, int);
83 
84 static int	 vioscsi_alloc_reqs(struct vioscsi_softc *,
85     struct virtio_softc *, int);
86 static void	 vioscsi_free_reqs(struct vioscsi_softc *,
87     struct virtio_softc *);
88 static void	 vioscsi_scsipi_request(struct scsipi_channel *,
89     scsipi_adapter_req_t, void *);
90 static int	 vioscsi_vq_done(struct virtqueue *);
91 static void	 vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
92     struct vioscsi_req *, struct virtqueue *, int);
93 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
94 static void	 vioscsi_bad_target(struct scsipi_xfer *);
95 
96 static const char *const vioscsi_vq_names[] = {
97 	"control",
98 	"event",
99 	"request",
100 };
101 
102 CFATTACH_DECL3_NEW(vioscsi, sizeof(struct vioscsi_softc),
103     vioscsi_match, vioscsi_attach, vioscsi_detach, NULL, NULL, NULL,
104     DVF_DETACH_SHUTDOWN);
105 
106 static int
107 vioscsi_match(device_t parent, cfdata_t match, void *aux)
108 {
109 	struct virtio_attach_args *va = aux;
110 
111 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
112 		return 1;
113 
114 	return 0;
115 }
116 
117 static void
118 vioscsi_attach(device_t parent, device_t self, void *aux)
119 {
120 	struct vioscsi_softc *sc = device_private(self);
121 	struct virtio_softc *vsc = device_private(parent);
122 	struct scsipi_adapter *adapt = &sc->sc_adapter;
123 	struct scsipi_channel *chan = &sc->sc_channel;
124 	int rv, qsize = 0, i = 0;
125 	int ipl = IPL_BIO;
126 
127 	if (virtio_child(vsc) != NULL) {
128 		aprint_error(": parent %s already has a child\n",
129 		    device_xname(parent));
130 		return;
131 	}
132 
133 	sc->sc_dev = self;
134 
135 	virtio_child_attach_start(vsc, self, ipl, sc->sc_vqs,
136 	    NULL, virtio_vq_intr, VIRTIO_F_PCI_INTR_MSIX,
137 	    0, VIRTIO_COMMON_FLAG_BITS);
138 
139 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, ipl);
140 
141 	uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
142 	    VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
143 
144 	uint32_t seg_max = virtio_read_device_config_4(vsc,
145 	    VIRTIO_SCSI_CONFIG_SEG_MAX);
146 
147 	uint16_t max_target = virtio_read_device_config_2(vsc,
148 	    VIRTIO_SCSI_CONFIG_MAX_TARGET);
149 
150 	uint32_t max_lun = virtio_read_device_config_4(vsc,
151 	    VIRTIO_SCSI_CONFIG_MAX_LUN);
152 
153 	sc->sc_seg_max = seg_max;
154 
155 	for(i=0; i < __arraycount(sc->sc_vqs); i++) {
156 		rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
157 		    VIRTIO_SCSI_MIN_SEGMENTS + howmany(MAXPHYS, NBPG),
158 		    vioscsi_vq_names[i]);
159 		if (rv) {
160 			aprint_error_dev(sc->sc_dev,
161 			    "failed to allocate virtqueue %d\n", i);
162 			goto err;
163 		}
164 
165 		if (i == VIOSCSI_VQ_REQUEST)
166 			sc->sc_vqs[i].vq_done = vioscsi_vq_done;
167 	}
168 
169 	qsize = sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num;
170 	if (vioscsi_alloc_reqs(sc, vsc, qsize))
171 		goto err;
172 
173 	aprint_normal_dev(sc->sc_dev,
174 	    "cmd_per_lun %u qsize %d seg_max %u max_target %hu"
175 	    " max_lun %u\n",
176 	    cmd_per_lun, qsize, seg_max, max_target, max_lun);
177 
178 	if (virtio_child_attach_finish(vsc) != 0)
179 		goto err;
180 
181 	/*
182 	 * Fill in the scsipi_adapter.
183 	 */
184 	memset(adapt, 0, sizeof(*adapt));
185 	adapt->adapt_dev = sc->sc_dev;
186 	adapt->adapt_nchannels = 1;
187 	adapt->adapt_openings = MIN(qsize, cmd_per_lun);
188 	adapt->adapt_max_periph = adapt->adapt_openings;
189 	adapt->adapt_request = vioscsi_scsipi_request;
190 	adapt->adapt_minphys = minphys;
191 
192 	/*
193 	 * Fill in the scsipi_channel.
194 	 */
195 	memset(chan, 0, sizeof(*chan));
196 	chan->chan_adapter = adapt;
197 	chan->chan_bustype = &scsi_bustype;
198 	chan->chan_channel = 0;
199 	chan->chan_ntargets = MIN(max_target, 16);	/* cap reasonably */
200 	chan->chan_nluns = MIN(max_lun, 1024);		/* cap reasonably */
201 	chan->chan_id = 0;
202 	chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
203 
204 	config_found(self, &sc->sc_channel, scsiprint);
205 	return;
206 
207 err:
208 	if (qsize > 0)
209 		vioscsi_free_reqs(sc, vsc);
210 
211 	for (i=0; i < __arraycount(sc->sc_vqs); i++) {
212 		if (sc->sc_vqs[i].vq_num > 0)
213 			virtio_free_vq(vsc, &sc->sc_vqs[i]);
214 	}
215 
216 	virtio_child_attach_failed(vsc);
217 }
218 
219 static int
220 vioscsi_detach(device_t self, int flags)
221 {
222 	struct vioscsi_softc *sc = device_private(self);
223 	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
224 	int rc, i;
225 
226 	/*
227 	 * Dequeue all pending finished requests. Must be done
228 	 * before we try to detach children so that we process
229 	 * their pending requests while they still exist.
230 	 */
231 	if (sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num > 0)
232 		vioscsi_vq_done(&sc->sc_vqs[VIOSCSI_VQ_REQUEST]);
233 
234 	if ((rc = config_detach_children(self, flags)) != 0)
235 		return rc;
236 
237 	virtio_reset(vsc);
238 
239 	for (i = 0; i < __arraycount(sc->sc_vqs); i++) {
240 		if (sc->sc_vqs[i].vq_num > 0)
241 			virtio_free_vq(vsc, &sc->sc_vqs[i]);
242 	}
243 
244 	vioscsi_free_reqs(sc, vsc);
245 
246 	virtio_child_detach(vsc);
247 
248 	mutex_destroy(&sc->sc_mutex);
249 
250 	return 0;
251 }
252 
253 #define XS2DMA(xs) \
254     ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
255     (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
256     BUS_DMA_STREAMING)
257 
258 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
259     BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
260 
261 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
262     BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
263 
264 static void
265 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
266     request, void *arg)
267 {
268 	struct vioscsi_softc *sc =
269 	    device_private(chan->chan_adapter->adapt_dev);
270 	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
271 	struct scsipi_xfer *xs;
272 	struct scsipi_periph *periph;
273 	struct vioscsi_req *vr;
274 	struct virtio_scsi_req_hdr *req;
275 	struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
276 	int slot, error;
277 
278 	DPRINTF(("%s: enter\n", __func__));
279 
280 	switch (request) {
281 	case ADAPTER_REQ_RUN_XFER:
282 		break;
283 	case ADAPTER_REQ_SET_XFER_MODE:
284 	{
285 		struct scsipi_xfer_mode *xm = arg;
286 		xm->xm_mode = PERIPH_CAP_TQING;
287 		xm->xm_period = 0;
288 		xm->xm_offset = 0;
289 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
290 		return;
291 	}
292 	default:
293 		DPRINTF(("%s: unhandled %d\n", __func__, request));
294 		return;
295 	}
296 
297 	xs = arg;
298 	periph = xs->xs_periph;
299 
300 	/*
301 	 * This can happen when we run out of queue slots.
302 	 */
303 	vr = vioscsi_req_get(sc);
304 	if (vr == NULL) {
305 		xs->error = XS_RESOURCE_SHORTAGE;
306 		scsipi_done(xs);
307 		return;
308 	}
309 
310 	req = &vr->vr_req;
311 	slot = vr - sc->sc_reqs;
312 
313 	/*
314 	 * "The only supported format for the LUN field is: first byte set to
315 	 * 1, second byte set to target, third and fourth byte representing a
316 	 * single level LUN structure, followed by four zero bytes."
317 	 */
318 	if (periph->periph_target >= 256 || periph->periph_lun >= 16384
319 	    || periph->periph_target < 0 || periph->periph_lun < 0) {
320 		goto stuffup;
321 	}
322 
323 	req->lun[0] = 1;
324 	req->lun[1] = periph->periph_target - 1;
325 	req->lun[2] = 0x40 | ((periph->periph_lun >> 8) & 0x3F);
326 	req->lun[3] = periph->periph_lun & 0xFF;
327 	memset(req->lun + 4, 0, 4);
328 	DPRINTF(("%s: command %p for %d:%d at slot %d\n", __func__,
329 	    xs, periph->periph_target, periph->periph_lun, slot));
330 
331 	/* tag */
332 	switch (XS_CTL_TAGTYPE(xs)) {
333 	case XS_CTL_HEAD_TAG:
334 		req->task_attr = VIRTIO_SCSI_S_HEAD;
335 		break;
336 
337 #if 0	/* XXX */
338 	case XS_CTL_ACA_TAG:
339 		req->task_attr = VIRTIO_SCSI_S_ACA;
340 		break;
341 #endif
342 
343 	case XS_CTL_ORDERED_TAG:
344 		req->task_attr = VIRTIO_SCSI_S_ORDERED;
345 		break;
346 
347 	case XS_CTL_SIMPLE_TAG:
348 	default:
349 		req->task_attr = VIRTIO_SCSI_S_SIMPLE;
350 		break;
351 	}
352 	req->id = slot;
353 
354 	if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
355 		DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
356 		    (size_t)xs->cmdlen, sizeof(req->cdb)));
357 		goto stuffup;
358 	}
359 
360 	memset(req->cdb, 0, sizeof(req->cdb));
361 	memcpy(req->cdb, xs->cmd, xs->cmdlen);
362 
363 	error = bus_dmamap_load(virtio_dmat(vsc), vr->vr_data,
364 	    xs->data, xs->datalen, NULL, XS2DMA(xs));
365 	if (error) {
366 		aprint_error_dev(sc->sc_dev, "%s: error %d loading DMA map\n",
367 		    __func__, error);
368 
369 		if (error == ENOMEM || error == EAGAIN) {
370 			/*
371 			 * Map is allocated with ALLOCNOW, so this should
372 			 * actually never ever happen.
373 			 */
374 			xs->error = XS_RESOURCE_SHORTAGE;
375 		} else {
376 stuffup:
377 			/* not a temporary condition */
378 			xs->error = XS_DRIVER_STUFFUP;
379 		}
380 
381 		virtio_enqueue_abort(vsc, vq, slot);
382 		scsipi_done(xs);
383 		return;
384 	}
385 
386 	int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
387 	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
388 		nsegs += vr->vr_data->dm_nsegs;
389 
390 	error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
391 	if (error) {
392 		aprint_error_dev(sc->sc_dev, "error reserving %d (nsegs %d)\n",
393 		    error, nsegs);
394 		bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
395 		/* slot already freed by virtio_enqueue_reserve() */
396 		xs->error = XS_RESOURCE_SHORTAGE;
397 		scsipi_done(xs);
398 		return;
399 	}
400 
401 	vr->vr_xs = xs;
402 
403 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
404 	    offsetof(struct vioscsi_req, vr_req),
405 	    sizeof(struct virtio_scsi_req_hdr),
406 	    BUS_DMASYNC_PREWRITE);
407 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
408 	    offsetof(struct vioscsi_req, vr_res),
409             sizeof(struct virtio_scsi_res_hdr),
410 	    BUS_DMASYNC_PREREAD);
411 	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
412 		bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
413 		    XS2DMAPRE(xs));
414 
415 	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
416 	    offsetof(struct vioscsi_req, vr_req),
417             sizeof(struct virtio_scsi_req_hdr), 1);
418 	if (xs->xs_control & XS_CTL_DATA_OUT)
419 		virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
420 	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
421 	    offsetof(struct vioscsi_req, vr_res),
422             sizeof(struct virtio_scsi_res_hdr), 0);
423 	if (xs->xs_control & XS_CTL_DATA_IN)
424 		virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
425 	virtio_enqueue_commit(vsc, vq, slot, 1);
426 
427 	if ((xs->xs_control & XS_CTL_POLL) == 0)
428 		return;
429 
430 	DPRINTF(("%s: polling...\n", __func__));
431 	// XXX: do this better.
432 	int timeout = 1000;
433 	do {
434 		virtio_intrhand(vsc);
435 		if (vr->vr_xs != xs)
436 			break;
437 		delay(1000);
438 	} while (--timeout > 0);
439 
440 	if (vr->vr_xs == xs) {
441 		// XXX: Abort!
442 		xs->error = XS_TIMEOUT;
443 		xs->resid = xs->datalen;
444 		DPRINTF(("%s: polling timeout\n", __func__));
445 		scsipi_done(xs);
446 	}
447 	DPRINTF(("%s: command %p done (timeout=%d)\n", __func__,
448 	    xs, timeout));
449 }
450 
451 static void
452 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
453     struct vioscsi_req *vr, struct virtqueue *vq, int slot)
454 {
455 	struct scsipi_xfer *xs = vr->vr_xs;
456 	size_t sense_len;
457 
458 	DPRINTF(("%s: enter\n", __func__));
459 
460 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
461 	    offsetof(struct vioscsi_req, vr_req),
462 	    sizeof(struct virtio_scsi_req_hdr),
463 	    BUS_DMASYNC_POSTWRITE);
464 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
465 	    offsetof(struct vioscsi_req, vr_res),
466 	    sizeof(struct virtio_scsi_res_hdr),
467 	    BUS_DMASYNC_POSTREAD);
468 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
469 	    XS2DMAPOST(xs));
470 
471 	xs->status = vr->vr_res.status;
472 	xs->resid = vr->vr_res.residual;
473 
474 	switch (vr->vr_res.response) {
475 	case VIRTIO_SCSI_S_OK:
476 		sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
477 		memcpy(&xs->sense, vr->vr_res.sense, sense_len);
478 		xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
479 		break;
480 	case VIRTIO_SCSI_S_BAD_TARGET:
481 		vioscsi_bad_target(xs);
482 		break;
483 	default:
484 		DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
485 		xs->error = XS_DRIVER_STUFFUP;
486 		xs->resid = xs->datalen;
487 		break;
488 	}
489 
490 	DPRINTF(("%s: command %p done %d, %d, %d\n", __func__,
491 	    xs, xs->error, xs->status, xs->resid));
492 
493 	bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
494 	vr->vr_xs = NULL;
495 
496 	virtio_dequeue_commit(vsc, vq, slot);
497 
498 	mutex_exit(&sc->sc_mutex);
499 	scsipi_done(xs);
500 	mutex_enter(&sc->sc_mutex);
501 }
502 
503 static void
504 vioscsi_bad_target(struct scsipi_xfer *xs)
505 {
506 	struct scsi_sense_data *sense = &xs->sense.scsi_sense;
507 
508 	DPRINTF(("%s: bad target %d:%d\n", __func__,
509 	    xs->xs_periph->periph_target, xs->xs_periph->periph_lun));
510 
511 	memset(sense, 0, sizeof(*sense));
512 	sense->response_code = 0x70;
513 	sense->flags = SKEY_ILLEGAL_REQUEST;
514 	xs->error = XS_SENSE;
515 	xs->status = 0;
516 	xs->resid = 0;
517 }
518 
519 static int
520 vioscsi_vq_done(struct virtqueue *vq)
521 {
522 	struct virtio_softc *vsc = vq->vq_owner;
523 	struct vioscsi_softc *sc = device_private(virtio_child(vsc));
524 	int ret = 0;
525 
526 	DPRINTF(("%s: enter %d\n", __func__, vq->vq_index));
527 
528 	mutex_enter(&sc->sc_mutex);
529 
530 	for (;;) {
531 		int r, slot;
532 
533 		r = virtio_dequeue(vsc, vq, &slot, NULL);
534 		if (r != 0)
535 			break;
536 
537 		DPRINTF(("%s: slot=%d\n", __func__, slot));
538 
539 		vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot], vq, slot);
540 
541 		ret = 1;
542 	}
543 
544 	mutex_exit(&sc->sc_mutex);
545 
546 	DPRINTF(("%s: exit %d: %d\n", __func__, vq->vq_index, ret));
547 
548 	return ret;
549 }
550 
551 static struct vioscsi_req *
552 vioscsi_req_get(struct vioscsi_softc *sc)
553 {
554 	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
555 	struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
556 	struct vioscsi_req *vr = NULL;
557 	int r, slot;
558 
559 	mutex_enter(&sc->sc_mutex);
560 
561 	if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
562 		DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
563 		goto out;
564 	}
565 	KASSERT(slot < sc->sc_nreqs);
566 	vr = &sc->sc_reqs[slot];
567 
568 	DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
569 
570 out:
571 	mutex_exit(&sc->sc_mutex);
572 
573 	return vr;
574 }
575 
576 static int
577 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
578     int qsize)
579 {
580 	size_t allocsize;
581 	int r, rsegs, slot;
582 	void *vaddr;
583 	struct vioscsi_req *vr;
584 
585 	allocsize = qsize * sizeof(struct vioscsi_req);
586 	r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
587 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
588 	if (r != 0) {
589 		aprint_error_dev(sc->sc_dev,
590 		    "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
591 		    allocsize, r);
592 		return r;
593 	}
594 	r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1,
595 	    allocsize, &vaddr, BUS_DMA_NOWAIT);
596 	if (r != 0) {
597 		aprint_error_dev(sc->sc_dev,
598 		    "%s: bus_dmamem_map failed, error %d\n", __func__, r);
599 		bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
600 		return r;
601 	}
602 	memset(vaddr, 0, allocsize);
603 
604 	sc->sc_reqs = vaddr;
605 	sc->sc_nreqs = qsize;
606 
607 	/* Prepare maps for the requests */
608 	for (slot=0; slot < qsize; slot++) {
609 		vr = &sc->sc_reqs[slot];
610 
611 		r = bus_dmamap_create(virtio_dmat(vsc),
612 		    offsetof(struct vioscsi_req, vr_xs), 1,
613 		    offsetof(struct vioscsi_req, vr_xs), 0,
614 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
615 		if (r != 0) {
616 			aprint_error_dev(sc->sc_dev,
617 		    	    "%s: bus_dmamem_create ctrl failed, error %d\n",
618 			    __func__, r);
619 			goto cleanup;
620 		}
621 
622 		r = bus_dmamap_create(virtio_dmat(vsc), MAXPHYS, sc->sc_seg_max,
623 		    MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
624 		if (r != 0) {
625 			aprint_error_dev(sc->sc_dev,
626 		    	    "%s: bus_dmamem_create data failed, error %d\n",
627 			    __func__, r);
628 			goto cleanup;
629 		}
630 
631 		r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_control,
632 		    vr, offsetof(struct vioscsi_req, vr_xs), NULL,
633 		    BUS_DMA_NOWAIT);
634 		if (r != 0) {
635 			aprint_error_dev(sc->sc_dev,
636 		    	    "%s: bus_dmamap_load ctrl error %d\n",
637 			    __func__, r);
638 			goto cleanup;
639 		}
640 	}
641 
642 	return 0;
643 
644 cleanup:
645 	for (; slot > 0; slot--) {
646 		vr = &sc->sc_reqs[slot];
647 
648 		if (vr->vr_control) {
649 			/* this will also unload the mapping if loaded */
650 			bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
651 			vr->vr_control = NULL;
652 		}
653 
654 		if (vr->vr_data) {
655 			bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
656 			vr->vr_data = NULL;
657 		}
658 	}
659 
660 	bus_dmamem_unmap(virtio_dmat(vsc), vaddr, allocsize);
661 	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
662 
663 	return r;
664 }
665 
666 static void
667 vioscsi_free_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc)
668 {
669 	int slot;
670 	struct vioscsi_req *vr;
671 
672 	if (sc->sc_nreqs == 0) {
673 		/* Not allocated */
674 		return;
675 	}
676 
677 	/* Free request maps */
678 	for (slot=0; slot < sc->sc_nreqs; slot++) {
679 		vr = &sc->sc_reqs[slot];
680 
681 		bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
682 		bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
683 	}
684 
685 	bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_reqs,
686 			 sc->sc_nreqs * sizeof(struct vioscsi_req));
687 	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
688 }
689 
690 MODULE(MODULE_CLASS_DRIVER, vioscsi, "virtio");
691 
692 #ifdef _MODULE
693 #include "ioconf.c"
694 #endif
695 
696 static int
697 vioscsi_modcmd(modcmd_t cmd, void *opaque)
698 {
699         int error = 0;
700 
701 #ifdef _MODULE
702         switch (cmd) {
703         case MODULE_CMD_INIT:
704                 error = config_init_component(cfdriver_ioconf_vioscsi,
705                     cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi);
706                 break;
707         case MODULE_CMD_FINI:
708                 error = config_fini_component(cfdriver_ioconf_vioscsi,
709                     cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi);
710                 break;
711         default:
712                 error = ENOTTY;
713                 break;
714         }
715 #endif
716 
717         return error;
718 }
719