1 /* $NetBSD: vioscsi.c,v 1.20 2018/06/10 14:59:23 jakllsch Exp $ */ 2 /* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */ 3 4 /* 5 * Copyright (c) 2013 Google Inc. 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/cdefs.h> 21 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.20 2018/06/10 14:59:23 jakllsch Exp $"); 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/device.h> 26 #include <sys/bus.h> 27 #include <sys/buf.h> 28 #include <sys/module.h> 29 30 #include <dev/pci/vioscsireg.h> 31 #include <dev/pci/virtiovar.h> 32 33 #include <dev/scsipi/scsi_all.h> 34 #include <dev/scsipi/scsiconf.h> 35 36 #ifdef VIOSCSI_DEBUG 37 static int vioscsi_debug = 1; 38 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0) 39 #else 40 #define DPRINTF(f) ((void)0) 41 #endif 42 43 struct vioscsi_req { 44 struct virtio_scsi_req_hdr vr_req; 45 struct virtio_scsi_res_hdr vr_res; 46 struct scsipi_xfer *vr_xs; 47 bus_dmamap_t vr_control; 48 bus_dmamap_t vr_data; 49 }; 50 51 struct vioscsi_softc { 52 device_t sc_dev; 53 struct scsipi_adapter sc_adapter; 54 struct scsipi_channel sc_channel; 55 56 struct virtqueue sc_vqs[3]; 57 #define VIOSCSI_VQ_CONTROL 0 58 #define VIOSCSI_VQ_EVENT 1 59 #define VIOSCSI_VQ_REQUEST 2 60 61 struct vioscsi_req *sc_reqs; 62 int sc_nreqs; 63 bus_dma_segment_t sc_reqs_segs[1]; 64 65 u_int32_t sc_seg_max; 66 67 kmutex_t sc_mutex; 68 }; 69 70 /* 71 * Each block request uses at least two segments - one for the header 72 * and one for the status. 73 */ 74 #define VIRTIO_SCSI_MIN_SEGMENTS 2 75 76 static int vioscsi_match(device_t, cfdata_t, void *); 77 static void vioscsi_attach(device_t, device_t, void *); 78 static int vioscsi_detach(device_t, int); 79 80 static int vioscsi_alloc_reqs(struct vioscsi_softc *, 81 struct virtio_softc *, int); 82 static void vioscsi_free_reqs(struct vioscsi_softc *, 83 struct virtio_softc *); 84 static void vioscsi_scsipi_request(struct scsipi_channel *, 85 scsipi_adapter_req_t, void *); 86 static int vioscsi_vq_done(struct virtqueue *); 87 static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *, 88 struct vioscsi_req *, struct virtqueue *, int); 89 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *); 90 static void vioscsi_bad_target(struct scsipi_xfer *); 91 92 static const char *const vioscsi_vq_names[] = { 93 "control", 94 "event", 95 "request", 96 }; 97 98 CFATTACH_DECL3_NEW(vioscsi, sizeof(struct vioscsi_softc), 99 vioscsi_match, vioscsi_attach, vioscsi_detach, NULL, NULL, NULL, 100 DVF_DETACH_SHUTDOWN); 101 102 static int 103 vioscsi_match(device_t parent, cfdata_t match, void *aux) 104 { 105 struct virtio_attach_args *va = aux; 106 107 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI) 108 return 1; 109 110 return 0; 111 } 112 113 static void 114 vioscsi_attach(device_t parent, device_t self, void *aux) 115 { 116 struct vioscsi_softc *sc = device_private(self); 117 struct virtio_softc *vsc = device_private(parent); 118 struct scsipi_adapter *adapt = &sc->sc_adapter; 119 struct scsipi_channel *chan = &sc->sc_channel; 120 int rv, qsize = 0, i = 0; 121 int ipl = IPL_BIO; 122 123 if (virtio_child(vsc) != NULL) { 124 aprint_error(": parent %s already has a child\n", 125 device_xname(parent)); 126 return; 127 } 128 129 sc->sc_dev = self; 130 131 virtio_child_attach_start(vsc, self, ipl, sc->sc_vqs, 132 NULL, virtio_vq_intr, VIRTIO_F_PCI_INTR_MSIX, 133 0, VIRTIO_COMMON_FLAG_BITS); 134 135 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, ipl); 136 137 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc, 138 VIRTIO_SCSI_CONFIG_CMD_PER_LUN); 139 140 uint32_t seg_max = virtio_read_device_config_4(vsc, 141 VIRTIO_SCSI_CONFIG_SEG_MAX); 142 143 uint16_t max_target = virtio_read_device_config_2(vsc, 144 VIRTIO_SCSI_CONFIG_MAX_TARGET); 145 146 uint32_t max_lun = virtio_read_device_config_4(vsc, 147 VIRTIO_SCSI_CONFIG_MAX_LUN); 148 149 sc->sc_seg_max = seg_max; 150 151 for(i=0; i < __arraycount(sc->sc_vqs); i++) { 152 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS, 153 VIRTIO_SCSI_MIN_SEGMENTS + howmany(MAXPHYS, NBPG), 154 vioscsi_vq_names[i]); 155 if (rv) { 156 aprint_error_dev(sc->sc_dev, 157 "failed to allocate virtqueue %d\n", i); 158 goto err; 159 } 160 161 if (i == VIOSCSI_VQ_REQUEST) 162 sc->sc_vqs[i].vq_done = vioscsi_vq_done; 163 } 164 165 qsize = sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num; 166 if (vioscsi_alloc_reqs(sc, vsc, qsize)) 167 goto err; 168 169 aprint_normal_dev(sc->sc_dev, 170 "cmd_per_lun %u qsize %d seg_max %u max_target %hu" 171 " max_lun %u\n", 172 cmd_per_lun, qsize, seg_max, max_target, max_lun); 173 174 if (virtio_child_attach_finish(vsc) != 0) 175 goto err; 176 177 /* 178 * Fill in the scsipi_adapter. 179 */ 180 memset(adapt, 0, sizeof(*adapt)); 181 adapt->adapt_dev = sc->sc_dev; 182 adapt->adapt_nchannels = 1; 183 adapt->adapt_openings = MIN(qsize, cmd_per_lun); 184 adapt->adapt_max_periph = adapt->adapt_openings; 185 adapt->adapt_request = vioscsi_scsipi_request; 186 adapt->adapt_minphys = minphys; 187 188 /* 189 * Fill in the scsipi_channel. 190 */ 191 memset(chan, 0, sizeof(*chan)); 192 chan->chan_adapter = adapt; 193 chan->chan_bustype = &scsi_bustype; 194 chan->chan_channel = 0; 195 chan->chan_ntargets = MIN(max_target, 16); /* cap reasonably */ 196 chan->chan_nluns = MIN(max_lun, 1024); /* cap reasonably */ 197 chan->chan_id = 0; 198 chan->chan_flags = SCSIPI_CHAN_NOSETTLE; 199 200 config_found(self, &sc->sc_channel, scsiprint); 201 return; 202 203 err: 204 if (qsize > 0) 205 vioscsi_free_reqs(sc, vsc); 206 207 for (i=0; i < __arraycount(sc->sc_vqs); i++) { 208 if (sc->sc_vqs[i].vq_num > 0) 209 virtio_free_vq(vsc, &sc->sc_vqs[i]); 210 } 211 212 virtio_child_attach_failed(vsc); 213 } 214 215 static int 216 vioscsi_detach(device_t self, int flags) 217 { 218 struct vioscsi_softc *sc = device_private(self); 219 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev)); 220 int rc, i; 221 222 /* 223 * Dequeue all pending finished requests. Must be done 224 * before we try to detach children so that we process 225 * their pending requests while they still exist. 226 */ 227 if (sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num > 0) 228 vioscsi_vq_done(&sc->sc_vqs[VIOSCSI_VQ_REQUEST]); 229 230 if ((rc = config_detach_children(self, flags)) != 0) 231 return rc; 232 233 virtio_reset(vsc); 234 235 for (i = 0; i < __arraycount(sc->sc_vqs); i++) { 236 if (sc->sc_vqs[i].vq_num > 0) 237 virtio_free_vq(vsc, &sc->sc_vqs[i]); 238 } 239 240 vioscsi_free_reqs(sc, vsc); 241 242 virtio_child_detach(vsc); 243 244 mutex_destroy(&sc->sc_mutex); 245 246 return 0; 247 } 248 249 #define XS2DMA(xs) \ 250 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \ 251 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \ 252 BUS_DMA_STREAMING) 253 254 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \ 255 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE) 256 257 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \ 258 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE) 259 260 static void 261 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t 262 request, void *arg) 263 { 264 struct vioscsi_softc *sc = 265 device_private(chan->chan_adapter->adapt_dev); 266 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev)); 267 struct scsipi_xfer *xs; 268 struct scsipi_periph *periph; 269 struct vioscsi_req *vr; 270 struct virtio_scsi_req_hdr *req; 271 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST]; 272 int slot, error; 273 274 DPRINTF(("%s: enter\n", __func__)); 275 276 switch (request) { 277 case ADAPTER_REQ_RUN_XFER: 278 break; 279 case ADAPTER_REQ_SET_XFER_MODE: 280 { 281 struct scsipi_xfer_mode *xm = arg; 282 xm->xm_mode = PERIPH_CAP_TQING; 283 xm->xm_period = 0; 284 xm->xm_offset = 0; 285 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm); 286 return; 287 } 288 default: 289 DPRINTF(("%s: unhandled %d\n", __func__, request)); 290 return; 291 } 292 293 xs = arg; 294 periph = xs->xs_periph; 295 296 /* 297 * This can happen when we run out of queue slots. 298 */ 299 vr = vioscsi_req_get(sc); 300 if (vr == NULL) { 301 xs->error = XS_RESOURCE_SHORTAGE; 302 scsipi_done(xs); 303 return; 304 } 305 306 req = &vr->vr_req; 307 slot = vr - sc->sc_reqs; 308 309 /* 310 * "The only supported format for the LUN field is: first byte set to 311 * 1, second byte set to target, third and fourth byte representing a 312 * single level LUN structure, followed by four zero bytes." 313 */ 314 if (periph->periph_target >= 256 || periph->periph_lun >= 16384 315 || periph->periph_target < 0 || periph->periph_lun < 0) { 316 goto stuffup; 317 } 318 319 req->lun[0] = 1; 320 req->lun[1] = periph->periph_target - 1; 321 req->lun[2] = 0x40 | ((periph->periph_lun >> 8) & 0x3F); 322 req->lun[3] = periph->periph_lun & 0xFF; 323 memset(req->lun + 4, 0, 4); 324 DPRINTF(("%s: command %p for %d:%d at slot %d\n", __func__, 325 xs, periph->periph_target, periph->periph_lun, slot)); 326 327 /* tag */ 328 switch (XS_CTL_TAGTYPE(xs)) { 329 case XS_CTL_HEAD_TAG: 330 req->task_attr = VIRTIO_SCSI_S_HEAD; 331 break; 332 333 #if 0 /* XXX */ 334 case XS_CTL_ACA_TAG: 335 req->task_attr = VIRTIO_SCSI_S_ACA; 336 break; 337 #endif 338 339 case XS_CTL_ORDERED_TAG: 340 req->task_attr = VIRTIO_SCSI_S_ORDERED; 341 break; 342 343 case XS_CTL_SIMPLE_TAG: 344 default: 345 req->task_attr = VIRTIO_SCSI_S_SIMPLE; 346 break; 347 } 348 req->id = slot; 349 350 if ((size_t)xs->cmdlen > sizeof(req->cdb)) { 351 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__, 352 (size_t)xs->cmdlen, sizeof(req->cdb))); 353 goto stuffup; 354 } 355 356 memset(req->cdb, 0, sizeof(req->cdb)); 357 memcpy(req->cdb, xs->cmd, xs->cmdlen); 358 359 error = bus_dmamap_load(virtio_dmat(vsc), vr->vr_data, 360 xs->data, xs->datalen, NULL, XS2DMA(xs)); 361 if (error) { 362 aprint_error_dev(sc->sc_dev, "%s: error %d loading DMA map\n", 363 __func__, error); 364 365 if (error == ENOMEM || error == EAGAIN) { 366 /* 367 * Map is allocated with ALLOCNOW, so this should 368 * actually never ever happen. 369 */ 370 xs->error = XS_RESOURCE_SHORTAGE; 371 } else { 372 stuffup: 373 /* not a temporary condition */ 374 xs->error = XS_DRIVER_STUFFUP; 375 } 376 377 virtio_enqueue_abort(vsc, vq, slot); 378 scsipi_done(xs); 379 return; 380 } 381 382 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS; 383 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0) 384 nsegs += vr->vr_data->dm_nsegs; 385 386 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs); 387 if (error) { 388 aprint_error_dev(sc->sc_dev, "error reserving %d (nsegs %d)\n", 389 error, nsegs); 390 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data); 391 /* slot already freed by virtio_enqueue_reserve() */ 392 xs->error = XS_RESOURCE_SHORTAGE; 393 scsipi_done(xs); 394 return; 395 } 396 397 vr->vr_xs = xs; 398 399 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control, 400 offsetof(struct vioscsi_req, vr_req), 401 sizeof(struct virtio_scsi_req_hdr), 402 BUS_DMASYNC_PREWRITE); 403 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control, 404 offsetof(struct vioscsi_req, vr_res), 405 sizeof(struct virtio_scsi_res_hdr), 406 BUS_DMASYNC_PREREAD); 407 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0) 408 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen, 409 XS2DMAPRE(xs)); 410 411 virtio_enqueue_p(vsc, vq, slot, vr->vr_control, 412 offsetof(struct vioscsi_req, vr_req), 413 sizeof(struct virtio_scsi_req_hdr), 1); 414 if (xs->xs_control & XS_CTL_DATA_OUT) 415 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1); 416 virtio_enqueue_p(vsc, vq, slot, vr->vr_control, 417 offsetof(struct vioscsi_req, vr_res), 418 sizeof(struct virtio_scsi_res_hdr), 0); 419 if (xs->xs_control & XS_CTL_DATA_IN) 420 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0); 421 virtio_enqueue_commit(vsc, vq, slot, 1); 422 423 if ((xs->xs_control & XS_CTL_POLL) == 0) 424 return; 425 426 DPRINTF(("%s: polling...\n", __func__)); 427 // XXX: do this better. 428 int timeout = 1000; 429 do { 430 virtio_intrhand(vsc); 431 if (vr->vr_xs != xs) 432 break; 433 delay(1000); 434 } while (--timeout > 0); 435 436 if (vr->vr_xs == xs) { 437 // XXX: Abort! 438 xs->error = XS_TIMEOUT; 439 xs->resid = xs->datalen; 440 DPRINTF(("%s: polling timeout\n", __func__)); 441 scsipi_done(xs); 442 } 443 DPRINTF(("%s: command %p done (timeout=%d)\n", __func__, 444 xs, timeout)); 445 } 446 447 static void 448 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc, 449 struct vioscsi_req *vr, struct virtqueue *vq, int slot) 450 { 451 struct scsipi_xfer *xs = vr->vr_xs; 452 size_t sense_len; 453 454 DPRINTF(("%s: enter\n", __func__)); 455 456 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control, 457 offsetof(struct vioscsi_req, vr_req), 458 sizeof(struct virtio_scsi_req_hdr), 459 BUS_DMASYNC_POSTWRITE); 460 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control, 461 offsetof(struct vioscsi_req, vr_res), 462 sizeof(struct virtio_scsi_res_hdr), 463 BUS_DMASYNC_POSTREAD); 464 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen, 465 XS2DMAPOST(xs)); 466 467 xs->status = vr->vr_res.status; 468 xs->resid = vr->vr_res.residual; 469 470 switch (vr->vr_res.response) { 471 case VIRTIO_SCSI_S_OK: 472 sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len); 473 memcpy(&xs->sense, vr->vr_res.sense, sense_len); 474 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE; 475 break; 476 case VIRTIO_SCSI_S_BAD_TARGET: 477 vioscsi_bad_target(xs); 478 break; 479 default: 480 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response)); 481 xs->error = XS_DRIVER_STUFFUP; 482 xs->resid = xs->datalen; 483 break; 484 } 485 486 DPRINTF(("%s: command %p done %d, %d, %d\n", __func__, 487 xs, xs->error, xs->status, xs->resid)); 488 489 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data); 490 vr->vr_xs = NULL; 491 492 virtio_dequeue_commit(vsc, vq, slot); 493 494 mutex_exit(&sc->sc_mutex); 495 scsipi_done(xs); 496 mutex_enter(&sc->sc_mutex); 497 } 498 499 static void 500 vioscsi_bad_target(struct scsipi_xfer *xs) 501 { 502 struct scsi_sense_data *sense = &xs->sense.scsi_sense; 503 504 DPRINTF(("%s: bad target %d:%d\n", __func__, 505 xs->xs_periph->periph_target, xs->xs_periph->periph_lun)); 506 507 memset(sense, 0, sizeof(*sense)); 508 sense->response_code = 0x70; 509 sense->flags = SKEY_ILLEGAL_REQUEST; 510 xs->error = XS_SENSE; 511 xs->status = 0; 512 xs->resid = 0; 513 } 514 515 static int 516 vioscsi_vq_done(struct virtqueue *vq) 517 { 518 struct virtio_softc *vsc = vq->vq_owner; 519 struct vioscsi_softc *sc = device_private(virtio_child(vsc)); 520 int ret = 0; 521 522 DPRINTF(("%s: enter %d\n", __func__, vq->vq_index)); 523 524 mutex_enter(&sc->sc_mutex); 525 526 for (;;) { 527 int r, slot; 528 529 r = virtio_dequeue(vsc, vq, &slot, NULL); 530 if (r != 0) 531 break; 532 533 DPRINTF(("%s: slot=%d\n", __func__, slot)); 534 535 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot], vq, slot); 536 537 ret = 1; 538 } 539 540 mutex_exit(&sc->sc_mutex); 541 542 DPRINTF(("%s: exit %d: %d\n", __func__, vq->vq_index, ret)); 543 544 return ret; 545 } 546 547 static struct vioscsi_req * 548 vioscsi_req_get(struct vioscsi_softc *sc) 549 { 550 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev)); 551 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST]; 552 struct vioscsi_req *vr = NULL; 553 int r, slot; 554 555 mutex_enter(&sc->sc_mutex); 556 557 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) { 558 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r)); 559 goto out; 560 } 561 KASSERT(slot < sc->sc_nreqs); 562 vr = &sc->sc_reqs[slot]; 563 564 DPRINTF(("%s: %p, %d\n", __func__, vr, slot)); 565 566 out: 567 mutex_exit(&sc->sc_mutex); 568 569 return vr; 570 } 571 572 static int 573 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc, 574 int qsize) 575 { 576 size_t allocsize; 577 int r, rsegs, slot; 578 void *vaddr; 579 struct vioscsi_req *vr; 580 581 allocsize = qsize * sizeof(struct vioscsi_req); 582 r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0, 583 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 584 if (r != 0) { 585 aprint_error_dev(sc->sc_dev, 586 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__, 587 allocsize, r); 588 return r; 589 } 590 r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1, 591 allocsize, &vaddr, BUS_DMA_NOWAIT); 592 if (r != 0) { 593 aprint_error_dev(sc->sc_dev, 594 "%s: bus_dmamem_map failed, error %d\n", __func__, r); 595 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1); 596 return r; 597 } 598 memset(vaddr, 0, allocsize); 599 600 sc->sc_reqs = vaddr; 601 sc->sc_nreqs = qsize; 602 603 /* Prepare maps for the requests */ 604 for (slot=0; slot < qsize; slot++) { 605 vr = &sc->sc_reqs[slot]; 606 607 r = bus_dmamap_create(virtio_dmat(vsc), 608 offsetof(struct vioscsi_req, vr_xs), 1, 609 offsetof(struct vioscsi_req, vr_xs), 0, 610 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control); 611 if (r != 0) { 612 aprint_error_dev(sc->sc_dev, 613 "%s: bus_dmamem_create ctrl failed, error %d\n", 614 __func__, r); 615 goto cleanup; 616 } 617 618 r = bus_dmamap_create(virtio_dmat(vsc), MAXPHYS, sc->sc_seg_max, 619 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data); 620 if (r != 0) { 621 aprint_error_dev(sc->sc_dev, 622 "%s: bus_dmamem_create data failed, error %d\n", 623 __func__, r); 624 goto cleanup; 625 } 626 627 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_control, 628 vr, offsetof(struct vioscsi_req, vr_xs), NULL, 629 BUS_DMA_NOWAIT); 630 if (r != 0) { 631 aprint_error_dev(sc->sc_dev, 632 "%s: bus_dmamap_load ctrl error %d\n", 633 __func__, r); 634 goto cleanup; 635 } 636 } 637 638 return 0; 639 640 cleanup: 641 for (; slot > 0; slot--) { 642 vr = &sc->sc_reqs[slot]; 643 644 if (vr->vr_control) { 645 /* this will also unload the mapping if loaded */ 646 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control); 647 vr->vr_control = NULL; 648 } 649 650 if (vr->vr_data) { 651 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data); 652 vr->vr_data = NULL; 653 } 654 } 655 656 bus_dmamem_unmap(virtio_dmat(vsc), vaddr, allocsize); 657 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1); 658 659 return r; 660 } 661 662 static void 663 vioscsi_free_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc) 664 { 665 int slot; 666 struct vioscsi_req *vr; 667 668 if (sc->sc_nreqs == 0) { 669 /* Not allocated */ 670 return; 671 } 672 673 /* Free request maps */ 674 for (slot=0; slot < sc->sc_nreqs; slot++) { 675 vr = &sc->sc_reqs[slot]; 676 677 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control); 678 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data); 679 } 680 681 bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_reqs, 682 sc->sc_nreqs * sizeof(struct vioscsi_req)); 683 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1); 684 } 685 686 MODULE(MODULE_CLASS_DRIVER, vioscsi, "virtio"); 687 688 #ifdef _MODULE 689 #include "ioconf.c" 690 #endif 691 692 static int 693 vioscsi_modcmd(modcmd_t cmd, void *opaque) 694 { 695 int error = 0; 696 697 #ifdef _MODULE 698 switch (cmd) { 699 case MODULE_CMD_INIT: 700 error = config_init_component(cfdriver_ioconf_vioscsi, 701 cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi); 702 break; 703 case MODULE_CMD_FINI: 704 error = config_fini_component(cfdriver_ioconf_vioscsi, 705 cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi); 706 break; 707 default: 708 error = ENOTTY; 709 break; 710 } 711 #endif 712 713 return error; 714 } 715