1 /* $NetBSD: vioscsi.c,v 1.30 2022/10/11 22:03:37 andvar Exp $ */ 2 /* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */ 3 4 /* 5 * Copyright (c) 2013 Google Inc. 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/cdefs.h> 21 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.30 2022/10/11 22:03:37 andvar Exp $"); 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/device.h> 26 #include <sys/bus.h> 27 #include <sys/buf.h> 28 #include <sys/module.h> 29 30 #include <dev/pci/vioscsireg.h> 31 #include <dev/pci/virtiovar.h> 32 33 #include <dev/scsipi/scsi_all.h> 34 #include <dev/scsipi/scsiconf.h> 35 36 #ifdef VIOSCSI_DEBUG 37 static int vioscsi_debug = 1; 38 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0) 39 #else 40 #define DPRINTF(f) ((void)0) 41 #endif 42 43 struct vioscsi_req { 44 struct virtio_scsi_req_hdr vr_req; 45 struct virtio_scsi_res_hdr vr_res; 46 struct scsipi_xfer *vr_xs; 47 bus_dmamap_t vr_control; 48 bus_dmamap_t vr_data; 49 }; 50 51 struct vioscsi_softc { 52 device_t sc_dev; 53 struct scsipi_adapter sc_adapter; 54 struct scsipi_channel sc_channel; 55 56 struct virtqueue sc_vqs[3]; 57 #define VIOSCSI_VQ_CONTROL 0 58 #define VIOSCSI_VQ_EVENT 1 59 #define VIOSCSI_VQ_REQUEST 2 60 61 struct vioscsi_req *sc_reqs; 62 int sc_nreqs; 63 bus_dma_segment_t sc_reqs_segs[1]; 64 65 u_int32_t sc_seg_max; 66 67 kmutex_t sc_mutex; 68 }; 69 70 /* 71 * Each block request uses at least two segments - one for the header 72 * and one for the status. 73 */ 74 #define VIRTIO_SCSI_MIN_SEGMENTS 2 75 76 static int vioscsi_match(device_t, cfdata_t, void *); 77 static void vioscsi_attach(device_t, device_t, void *); 78 static int vioscsi_detach(device_t, int); 79 80 static int vioscsi_alloc_reqs(struct vioscsi_softc *, 81 struct virtio_softc *, int); 82 static void vioscsi_free_reqs(struct vioscsi_softc *, 83 struct virtio_softc *); 84 static void vioscsi_scsipi_request(struct scsipi_channel *, 85 scsipi_adapter_req_t, void *); 86 static int vioscsi_vq_done(struct virtqueue *); 87 static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *, 88 struct vioscsi_req *, struct virtqueue *, int); 89 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *); 90 static void vioscsi_bad_target(struct scsipi_xfer *); 91 92 static const char *const vioscsi_vq_names[] = { 93 "control", 94 "event", 95 "request", 96 }; 97 98 CFATTACH_DECL3_NEW(vioscsi, sizeof(struct vioscsi_softc), 99 vioscsi_match, vioscsi_attach, vioscsi_detach, NULL, NULL, NULL, 100 DVF_DETACH_SHUTDOWN); 101 102 static int 103 vioscsi_match(device_t parent, cfdata_t match, void *aux) 104 { 105 struct virtio_attach_args *va = aux; 106 107 if (va->sc_childdevid == VIRTIO_DEVICE_ID_SCSI) 108 return 1; 109 110 return 0; 111 } 112 113 static void 114 vioscsi_attach(device_t parent, device_t self, void *aux) 115 { 116 struct vioscsi_softc *sc = device_private(self); 117 struct virtio_softc *vsc = device_private(parent); 118 struct scsipi_adapter *adapt = &sc->sc_adapter; 119 struct scsipi_channel *chan = &sc->sc_channel; 120 int rv, qsize = 0, i = 0; 121 int ipl = IPL_BIO; 122 123 if (virtio_child(vsc) != NULL) { 124 aprint_error(": parent %s already has a child\n", 125 device_xname(parent)); 126 return; 127 } 128 129 sc->sc_dev = self; 130 131 virtio_child_attach_start(vsc, self, ipl, sc->sc_vqs, 132 NULL, virtio_vq_intr, VIRTIO_F_INTR_MSIX, 133 0, VIRTIO_COMMON_FLAG_BITS); 134 135 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, ipl); 136 137 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc, 138 VIRTIO_SCSI_CONFIG_CMD_PER_LUN); 139 140 uint32_t seg_max = virtio_read_device_config_4(vsc, 141 VIRTIO_SCSI_CONFIG_SEG_MAX); 142 143 uint16_t max_target = virtio_read_device_config_2(vsc, 144 VIRTIO_SCSI_CONFIG_MAX_TARGET); 145 146 uint32_t max_lun = virtio_read_device_config_4(vsc, 147 VIRTIO_SCSI_CONFIG_MAX_LUN); 148 149 sc->sc_seg_max = seg_max; 150 151 for(i=0; i < __arraycount(sc->sc_vqs); i++) { 152 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS, 153 VIRTIO_SCSI_MIN_SEGMENTS + howmany(MAXPHYS, NBPG), 154 vioscsi_vq_names[i]); 155 if (rv) { 156 aprint_error_dev(sc->sc_dev, 157 "failed to allocate virtqueue %d\n", i); 158 goto err; 159 } 160 161 if (i == VIOSCSI_VQ_REQUEST) 162 sc->sc_vqs[i].vq_done = vioscsi_vq_done; 163 } 164 165 qsize = sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num; 166 if (vioscsi_alloc_reqs(sc, vsc, qsize)) 167 goto err; 168 169 aprint_normal_dev(sc->sc_dev, 170 "cmd_per_lun %u qsize %d seg_max %u max_target %hu" 171 " max_lun %u\n", 172 cmd_per_lun, qsize, seg_max, max_target, max_lun); 173 174 if (virtio_child_attach_finish(vsc) != 0) 175 goto err; 176 177 /* 178 * Fill in the scsipi_adapter. 179 */ 180 memset(adapt, 0, sizeof(*adapt)); 181 adapt->adapt_dev = sc->sc_dev; 182 adapt->adapt_nchannels = 1; 183 adapt->adapt_openings = MIN(qsize, cmd_per_lun); 184 adapt->adapt_max_periph = adapt->adapt_openings; 185 adapt->adapt_request = vioscsi_scsipi_request; 186 adapt->adapt_minphys = minphys; 187 188 /* 189 * Fill in the scsipi_channel. 190 */ 191 memset(chan, 0, sizeof(*chan)); 192 chan->chan_adapter = adapt; 193 chan->chan_bustype = &scsi_bustype; 194 chan->chan_channel = 0; 195 chan->chan_ntargets = MIN(1 + max_target, 256); /* cap reasonably */ 196 chan->chan_nluns = MIN(1 + max_lun, 16384); /* cap reasonably */ 197 chan->chan_id = max_target + 1; 198 chan->chan_flags = SCSIPI_CHAN_NOSETTLE; 199 200 config_found(self, &sc->sc_channel, scsiprint, CFARGS_NONE); 201 return; 202 203 err: 204 if (qsize > 0) 205 vioscsi_free_reqs(sc, vsc); 206 207 for (i=0; i < __arraycount(sc->sc_vqs); i++) { 208 if (sc->sc_vqs[i].vq_num > 0) 209 virtio_free_vq(vsc, &sc->sc_vqs[i]); 210 } 211 212 virtio_child_attach_failed(vsc); 213 } 214 215 static int 216 vioscsi_detach(device_t self, int flags) 217 { 218 struct vioscsi_softc *sc = device_private(self); 219 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev)); 220 int rc, i; 221 222 /* 223 * Dequeue all pending finished requests. Must be done 224 * before we try to detach children so that we process 225 * their pending requests while they still exist. 226 */ 227 if (sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num > 0) 228 vioscsi_vq_done(&sc->sc_vqs[VIOSCSI_VQ_REQUEST]); 229 230 if ((rc = config_detach_children(self, flags)) != 0) 231 return rc; 232 233 virtio_reset(vsc); 234 235 for (i = 0; i < __arraycount(sc->sc_vqs); i++) { 236 if (sc->sc_vqs[i].vq_num > 0) 237 virtio_free_vq(vsc, &sc->sc_vqs[i]); 238 } 239 240 vioscsi_free_reqs(sc, vsc); 241 242 virtio_child_detach(vsc); 243 244 mutex_destroy(&sc->sc_mutex); 245 246 return 0; 247 } 248 249 #define XS2DMA(xs) \ 250 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \ 251 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \ 252 BUS_DMA_STREAMING) 253 254 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \ 255 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE) 256 257 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \ 258 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE) 259 260 static void 261 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t 262 request, void *arg) 263 { 264 struct vioscsi_softc *sc = 265 device_private(chan->chan_adapter->adapt_dev); 266 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev)); 267 struct scsipi_xfer *xs; 268 struct scsipi_periph *periph; 269 struct vioscsi_req *vr; 270 struct virtio_scsi_req_hdr *req; 271 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST]; 272 int slot, error; 273 bool dopoll; 274 275 DPRINTF(("%s: enter\n", __func__)); 276 277 switch (request) { 278 case ADAPTER_REQ_RUN_XFER: 279 break; 280 case ADAPTER_REQ_SET_XFER_MODE: 281 { 282 struct scsipi_xfer_mode *xm = arg; 283 xm->xm_mode = PERIPH_CAP_TQING; 284 xm->xm_period = 0; 285 xm->xm_offset = 0; 286 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm); 287 return; 288 } 289 default: 290 DPRINTF(("%s: unhandled %d\n", __func__, request)); 291 return; 292 } 293 294 xs = arg; 295 periph = xs->xs_periph; 296 297 /* 298 * This can happen when we run out of queue slots. 299 */ 300 vr = vioscsi_req_get(sc); 301 if (vr == NULL) { 302 xs->error = XS_RESOURCE_SHORTAGE; 303 scsipi_done(xs); 304 return; 305 } 306 307 req = &vr->vr_req; 308 slot = vr - sc->sc_reqs; 309 310 /* 311 * "The only supported format for the LUN field is: first byte set to 312 * 1, second byte set to target, third and fourth byte representing a 313 * single level LUN structure, followed by four zero bytes." 314 */ 315 if (periph->periph_target >= 256 || periph->periph_lun >= 16384 316 || periph->periph_target < 0 || periph->periph_lun < 0) { 317 goto stuffup; 318 } 319 320 req->lun[0] = 1; 321 req->lun[1] = periph->periph_target; 322 req->lun[2] = 0x40 | ((periph->periph_lun >> 8) & 0x3F); 323 req->lun[3] = periph->periph_lun & 0xFF; 324 memset(req->lun + 4, 0, 4); 325 DPRINTF(("%s: command %p for %d:%d at slot %d\n", __func__, 326 xs, periph->periph_target, periph->periph_lun, slot)); 327 328 /* tag */ 329 switch (XS_CTL_TAGTYPE(xs)) { 330 case XS_CTL_HEAD_TAG: 331 req->task_attr = VIRTIO_SCSI_S_HEAD; 332 break; 333 334 #if 0 /* XXX */ 335 case XS_CTL_ACA_TAG: 336 req->task_attr = VIRTIO_SCSI_S_ACA; 337 break; 338 #endif 339 340 case XS_CTL_ORDERED_TAG: 341 req->task_attr = VIRTIO_SCSI_S_ORDERED; 342 break; 343 344 case XS_CTL_SIMPLE_TAG: 345 default: 346 req->task_attr = VIRTIO_SCSI_S_SIMPLE; 347 break; 348 } 349 req->id = virtio_rw64(vsc, slot); 350 351 if ((size_t)xs->cmdlen > sizeof(req->cdb)) { 352 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__, 353 (size_t)xs->cmdlen, sizeof(req->cdb))); 354 goto stuffup; 355 } 356 357 memset(req->cdb, 0, sizeof(req->cdb)); 358 memcpy(req->cdb, xs->cmd, xs->cmdlen); 359 360 error = bus_dmamap_load(virtio_dmat(vsc), vr->vr_data, 361 xs->data, xs->datalen, NULL, XS2DMA(xs)); 362 if (error) { 363 aprint_error_dev(sc->sc_dev, "%s: error %d loading DMA map\n", 364 __func__, error); 365 366 if (error == ENOMEM || error == EAGAIN) { 367 /* 368 * Map is allocated with ALLOCNOW, so this should 369 * actually never ever happen. 370 */ 371 xs->error = XS_RESOURCE_SHORTAGE; 372 } else { 373 stuffup: 374 /* not a temporary condition */ 375 xs->error = XS_DRIVER_STUFFUP; 376 } 377 378 virtio_enqueue_abort(vsc, vq, slot); 379 scsipi_done(xs); 380 return; 381 } 382 383 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS; 384 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0) 385 nsegs += vr->vr_data->dm_nsegs; 386 387 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs); 388 if (error) { 389 aprint_error_dev(sc->sc_dev, "error reserving %d (nsegs %d)\n", 390 error, nsegs); 391 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data); 392 /* slot already freed by virtio_enqueue_reserve() */ 393 xs->error = XS_RESOURCE_SHORTAGE; 394 scsipi_done(xs); 395 return; 396 } 397 398 vr->vr_xs = xs; 399 400 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control, 401 offsetof(struct vioscsi_req, vr_req), 402 sizeof(struct virtio_scsi_req_hdr), 403 BUS_DMASYNC_PREWRITE); 404 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control, 405 offsetof(struct vioscsi_req, vr_res), 406 sizeof(struct virtio_scsi_res_hdr), 407 BUS_DMASYNC_PREREAD); 408 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0) 409 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen, 410 XS2DMAPRE(xs)); 411 412 virtio_enqueue_p(vsc, vq, slot, vr->vr_control, 413 offsetof(struct vioscsi_req, vr_req), 414 sizeof(struct virtio_scsi_req_hdr), 1); 415 if (xs->xs_control & XS_CTL_DATA_OUT) 416 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1); 417 virtio_enqueue_p(vsc, vq, slot, vr->vr_control, 418 offsetof(struct vioscsi_req, vr_res), 419 sizeof(struct virtio_scsi_res_hdr), 0); 420 if (xs->xs_control & XS_CTL_DATA_IN) 421 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0); 422 dopoll = (xs->xs_control & XS_CTL_POLL) != 0; 423 virtio_enqueue_commit(vsc, vq, slot, 1); 424 425 if (!dopoll) 426 return; 427 428 DPRINTF(("%s: polling...\n", __func__)); 429 // XXX: do this better. 430 int timeout = 1000; 431 do { 432 virtio_intrhand(vsc); 433 if (vr->vr_xs != xs) 434 break; 435 delay(1000); 436 } while (--timeout > 0); 437 438 if (vr->vr_xs == xs) { 439 // XXX: Abort! 440 xs->error = XS_TIMEOUT; 441 xs->resid = xs->datalen; 442 DPRINTF(("%s: polling timeout\n", __func__)); 443 scsipi_done(xs); 444 } 445 DPRINTF(("%s: command %p done (timeout=%d)\n", __func__, 446 xs, timeout)); 447 } 448 449 static void 450 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc, 451 struct vioscsi_req *vr, struct virtqueue *vq, int slot) 452 { 453 struct scsipi_xfer *xs = vr->vr_xs; 454 size_t sense_len; 455 456 DPRINTF(("%s: enter\n", __func__)); 457 458 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control, 459 offsetof(struct vioscsi_req, vr_req), 460 sizeof(struct virtio_scsi_req_hdr), 461 BUS_DMASYNC_POSTWRITE); 462 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control, 463 offsetof(struct vioscsi_req, vr_res), 464 sizeof(struct virtio_scsi_res_hdr), 465 BUS_DMASYNC_POSTREAD); 466 if (xs->datalen) 467 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen, 468 XS2DMAPOST(xs)); 469 470 xs->status = vr->vr_res.status; 471 xs->resid = virtio_rw32(vsc, vr->vr_res.residual); 472 473 switch (vr->vr_res.response) { 474 case VIRTIO_SCSI_S_OK: 475 sense_len = MIN(sizeof(xs->sense), 476 virtio_rw32(vsc, vr->vr_res.sense_len)); 477 memcpy(&xs->sense, vr->vr_res.sense, sense_len); 478 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE; 479 break; 480 case VIRTIO_SCSI_S_BAD_TARGET: 481 vioscsi_bad_target(xs); 482 break; 483 default: 484 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response)); 485 xs->error = XS_DRIVER_STUFFUP; 486 xs->resid = xs->datalen; 487 break; 488 } 489 490 DPRINTF(("%s: command %p done %d, %d, %d\n", __func__, 491 xs, xs->error, xs->status, xs->resid)); 492 493 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data); 494 vr->vr_xs = NULL; 495 496 virtio_dequeue_commit(vsc, vq, slot); 497 498 mutex_exit(&sc->sc_mutex); 499 scsipi_done(xs); 500 mutex_enter(&sc->sc_mutex); 501 } 502 503 static void 504 vioscsi_bad_target(struct scsipi_xfer *xs) 505 { 506 struct scsi_sense_data *sense = &xs->sense.scsi_sense; 507 508 DPRINTF(("%s: bad target %d:%d\n", __func__, 509 xs->xs_periph->periph_target, xs->xs_periph->periph_lun)); 510 511 memset(sense, 0, sizeof(*sense)); 512 sense->response_code = 0x70; 513 sense->flags = SKEY_ILLEGAL_REQUEST; 514 xs->error = XS_SENSE; 515 xs->status = 0; 516 xs->resid = 0; 517 } 518 519 static int 520 vioscsi_vq_done(struct virtqueue *vq) 521 { 522 struct virtio_softc *vsc = vq->vq_owner; 523 struct vioscsi_softc *sc = device_private(virtio_child(vsc)); 524 int ret = 0; 525 526 DPRINTF(("%s: enter %d\n", __func__, vq->vq_index)); 527 528 mutex_enter(&sc->sc_mutex); 529 530 for (;;) { 531 int r, slot; 532 533 r = virtio_dequeue(vsc, vq, &slot, NULL); 534 if (r != 0) 535 break; 536 537 DPRINTF(("%s: slot=%d\n", __func__, slot)); 538 539 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot], vq, slot); 540 541 ret = 1; 542 } 543 544 mutex_exit(&sc->sc_mutex); 545 546 DPRINTF(("%s: exit %d: %d\n", __func__, vq->vq_index, ret)); 547 548 return ret; 549 } 550 551 static struct vioscsi_req * 552 vioscsi_req_get(struct vioscsi_softc *sc) 553 { 554 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev)); 555 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST]; 556 struct vioscsi_req *vr = NULL; 557 int r, slot; 558 559 mutex_enter(&sc->sc_mutex); 560 561 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) { 562 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r)); 563 goto out; 564 } 565 KASSERT(slot < sc->sc_nreqs); 566 vr = &sc->sc_reqs[slot]; 567 568 DPRINTF(("%s: %p, %d\n", __func__, vr, slot)); 569 570 out: 571 mutex_exit(&sc->sc_mutex); 572 573 return vr; 574 } 575 576 static int 577 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc, 578 int qsize) 579 { 580 size_t allocsize; 581 int r, rsegs, slot; 582 void *vaddr; 583 struct vioscsi_req *vr; 584 585 allocsize = qsize * sizeof(struct vioscsi_req); 586 r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0, 587 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 588 if (r != 0) { 589 aprint_error_dev(sc->sc_dev, 590 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__, 591 allocsize, r); 592 return r; 593 } 594 r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1, 595 allocsize, &vaddr, BUS_DMA_NOWAIT); 596 if (r != 0) { 597 aprint_error_dev(sc->sc_dev, 598 "%s: bus_dmamem_map failed, error %d\n", __func__, r); 599 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1); 600 return r; 601 } 602 memset(vaddr, 0, allocsize); 603 604 sc->sc_reqs = vaddr; 605 sc->sc_nreqs = qsize; 606 607 /* Prepare maps for the requests */ 608 for (slot=0; slot < qsize; slot++) { 609 vr = &sc->sc_reqs[slot]; 610 611 r = bus_dmamap_create(virtio_dmat(vsc), 612 offsetof(struct vioscsi_req, vr_xs), 1, 613 offsetof(struct vioscsi_req, vr_xs), 0, 614 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control); 615 if (r != 0) { 616 aprint_error_dev(sc->sc_dev, 617 "%s: bus_dmamap_create ctrl failed, error %d\n", 618 __func__, r); 619 goto cleanup; 620 } 621 622 r = bus_dmamap_create(virtio_dmat(vsc), MAXPHYS, sc->sc_seg_max, 623 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data); 624 if (r != 0) { 625 aprint_error_dev(sc->sc_dev, 626 "%s: bus_dmamap_create data failed, error %d\n", 627 __func__, r); 628 goto cleanup; 629 } 630 631 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_control, 632 vr, offsetof(struct vioscsi_req, vr_xs), NULL, 633 BUS_DMA_NOWAIT); 634 if (r != 0) { 635 aprint_error_dev(sc->sc_dev, 636 "%s: bus_dmamap_load ctrl error %d\n", 637 __func__, r); 638 goto cleanup; 639 } 640 } 641 642 return 0; 643 644 cleanup: 645 for (; slot > 0; slot--) { 646 vr = &sc->sc_reqs[slot]; 647 648 if (vr->vr_control) { 649 /* this will also unload the mapping if loaded */ 650 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control); 651 vr->vr_control = NULL; 652 } 653 654 if (vr->vr_data) { 655 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data); 656 vr->vr_data = NULL; 657 } 658 } 659 660 bus_dmamem_unmap(virtio_dmat(vsc), vaddr, allocsize); 661 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1); 662 663 return r; 664 } 665 666 static void 667 vioscsi_free_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc) 668 { 669 int slot; 670 struct vioscsi_req *vr; 671 672 if (sc->sc_nreqs == 0) { 673 /* Not allocated */ 674 return; 675 } 676 677 /* Free request maps */ 678 for (slot=0; slot < sc->sc_nreqs; slot++) { 679 vr = &sc->sc_reqs[slot]; 680 681 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control); 682 bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data); 683 } 684 685 bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_reqs, 686 sc->sc_nreqs * sizeof(struct vioscsi_req)); 687 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1); 688 } 689 690 MODULE(MODULE_CLASS_DRIVER, vioscsi, "virtio"); 691 692 #ifdef _MODULE 693 #include "ioconf.c" 694 #endif 695 696 static int 697 vioscsi_modcmd(modcmd_t cmd, void *opaque) 698 { 699 int error = 0; 700 701 #ifdef _MODULE 702 switch (cmd) { 703 case MODULE_CMD_INIT: 704 error = config_init_component(cfdriver_ioconf_vioscsi, 705 cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi); 706 break; 707 case MODULE_CMD_FINI: 708 error = config_fini_component(cfdriver_ioconf_vioscsi, 709 cfattach_ioconf_vioscsi, cfdata_ioconf_vioscsi); 710 break; 711 default: 712 error = ENOTTY; 713 break; 714 } 715 #endif 716 717 return error; 718 } 719