1 /* $OpenBSD: vscsi.c,v 1.40 2016/09/15 02:00:17 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 2008 David Gwynne <dlg@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/kernel.h> 22 #include <sys/malloc.h> 23 #include <sys/device.h> 24 #include <sys/conf.h> 25 #include <sys/queue.h> 26 #include <sys/rwlock.h> 27 #include <sys/pool.h> 28 #include <sys/task.h> 29 #include <sys/ioctl.h> 30 #include <sys/poll.h> 31 #include <sys/selinfo.h> 32 33 #include <scsi/scsi_all.h> 34 #include <scsi/scsiconf.h> 35 36 #include <dev/vscsivar.h> 37 38 int vscsi_match(struct device *, void *, void *); 39 void vscsi_attach(struct device *, struct device *, void *); 40 void vscsi_shutdown(void *); 41 42 struct vscsi_ccb { 43 TAILQ_ENTRY(vscsi_ccb) ccb_entry; 44 int ccb_tag; 45 struct scsi_xfer *ccb_xs; 46 size_t ccb_datalen; 47 }; 48 49 TAILQ_HEAD(vscsi_ccb_list, vscsi_ccb); 50 51 enum vscsi_state { 52 VSCSI_S_CLOSED, 53 VSCSI_S_CONFIG, 54 VSCSI_S_RUNNING 55 }; 56 57 struct vscsi_softc { 58 struct device sc_dev; 59 struct scsi_link sc_link; 60 struct scsibus_softc *sc_scsibus; 61 62 struct mutex sc_state_mtx; 63 enum vscsi_state sc_state; 64 u_int sc_ref_count; 65 struct pool sc_ccb_pool; 66 67 struct scsi_iopool sc_iopool; 68 69 struct vscsi_ccb_list sc_ccb_i2t; 70 struct vscsi_ccb_list sc_ccb_t2i; 71 int sc_ccb_tag; 72 struct mutex sc_poll_mtx; 73 struct rwlock sc_ioc_lock; 74 75 struct selinfo sc_sel; 76 struct mutex sc_sel_mtx; 77 }; 78 79 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname) 80 #define DEV2SC(_d) ((struct vscsi_softc *)device_lookup(&vscsi_cd, minor(_d))) 81 82 struct cfattach vscsi_ca = { 83 sizeof(struct vscsi_softc), 84 vscsi_match, 85 vscsi_attach 86 }; 87 88 struct cfdriver vscsi_cd = { 89 NULL, 90 "vscsi", 91 DV_DULL 92 }; 93 94 void vscsi_cmd(struct scsi_xfer *); 95 int vscsi_probe(struct scsi_link *); 96 void vscsi_free(struct scsi_link *); 97 98 struct scsi_adapter vscsi_switch = { 99 vscsi_cmd, 100 scsi_minphys, 101 vscsi_probe, 102 vscsi_free 103 }; 104 105 int vscsi_i2t(struct vscsi_softc *, struct vscsi_ioc_i2t *); 106 int vscsi_data(struct vscsi_softc *, struct vscsi_ioc_data *, int); 107 int vscsi_t2i(struct vscsi_softc *, struct vscsi_ioc_t2i *); 108 int vscsi_devevent(struct vscsi_softc *, u_long, 109 struct vscsi_ioc_devevent *); 110 void vscsi_devevent_task(void *); 111 void vscsi_done(struct vscsi_softc *, struct vscsi_ccb *); 112 113 void * vscsi_ccb_get(void *); 114 void vscsi_ccb_put(void *, void *); 115 116 void filt_vscsidetach(struct knote *); 117 int filt_vscsiread(struct knote *, long); 118 119 struct filterops vscsi_filtops = { 120 1, 121 NULL, 122 filt_vscsidetach, 123 filt_vscsiread 124 }; 125 126 127 int 128 vscsi_match(struct device *parent, void *match, void *aux) 129 { 130 return (1); 131 } 132 133 void 134 vscsi_attach(struct device *parent, struct device *self, void *aux) 135 { 136 struct vscsi_softc *sc = (struct vscsi_softc *)self; 137 struct scsibus_attach_args saa; 138 139 printf("\n"); 140 141 mtx_init(&sc->sc_state_mtx, IPL_BIO); 142 sc->sc_state = VSCSI_S_CLOSED; 143 144 TAILQ_INIT(&sc->sc_ccb_i2t); 145 TAILQ_INIT(&sc->sc_ccb_t2i); 146 mtx_init(&sc->sc_poll_mtx, IPL_BIO); 147 mtx_init(&sc->sc_sel_mtx, IPL_BIO); 148 rw_init(&sc->sc_ioc_lock, "vscsiioc"); 149 scsi_iopool_init(&sc->sc_iopool, sc, vscsi_ccb_get, vscsi_ccb_put); 150 151 sc->sc_link.adapter = &vscsi_switch; 152 sc->sc_link.adapter_softc = sc; 153 sc->sc_link.adapter_target = 256; 154 sc->sc_link.adapter_buswidth = 256; 155 sc->sc_link.openings = 16; 156 sc->sc_link.pool = &sc->sc_iopool; 157 158 memset(&saa, 0, sizeof(saa)); 159 saa.saa_sc_link = &sc->sc_link; 160 161 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, 162 &saa, scsiprint); 163 } 164 165 void 166 vscsi_cmd(struct scsi_xfer *xs) 167 { 168 struct scsi_link *link = xs->sc_link; 169 struct vscsi_softc *sc = link->adapter_softc; 170 struct vscsi_ccb *ccb = xs->io; 171 int polled = ISSET(xs->flags, SCSI_POLL); 172 int running = 0; 173 174 if (ISSET(xs->flags, SCSI_POLL) && ISSET(xs->flags, SCSI_NOSLEEP)) { 175 printf("%s: POLL && NOSLEEP for 0x%02x\n", DEVNAME(sc), 176 xs->cmd->opcode); 177 xs->error = XS_DRIVER_STUFFUP; 178 scsi_done(xs); 179 return; 180 } 181 182 ccb->ccb_xs = xs; 183 184 mtx_enter(&sc->sc_state_mtx); 185 if (sc->sc_state == VSCSI_S_RUNNING) { 186 running = 1; 187 TAILQ_INSERT_TAIL(&sc->sc_ccb_i2t, ccb, ccb_entry); 188 } 189 mtx_leave(&sc->sc_state_mtx); 190 191 if (!running) { 192 xs->error = XS_DRIVER_STUFFUP; 193 scsi_done(xs); 194 return; 195 } 196 197 selwakeup(&sc->sc_sel); 198 199 if (polled) { 200 mtx_enter(&sc->sc_poll_mtx); 201 while (ccb->ccb_xs != NULL) 202 msleep(ccb, &sc->sc_poll_mtx, PRIBIO, "vscsipoll", 0); 203 mtx_leave(&sc->sc_poll_mtx); 204 scsi_done(xs); 205 } 206 } 207 208 void 209 vscsi_done(struct vscsi_softc *sc, struct vscsi_ccb *ccb) 210 { 211 struct scsi_xfer *xs = ccb->ccb_xs; 212 213 if (ISSET(xs->flags, SCSI_POLL)) { 214 mtx_enter(&sc->sc_poll_mtx); 215 ccb->ccb_xs = NULL; 216 wakeup(ccb); 217 mtx_leave(&sc->sc_poll_mtx); 218 } else 219 scsi_done(xs); 220 } 221 222 int 223 vscsi_probe(struct scsi_link *link) 224 { 225 struct vscsi_softc *sc = link->adapter_softc; 226 int rv = 0; 227 228 mtx_enter(&sc->sc_state_mtx); 229 if (sc->sc_state == VSCSI_S_RUNNING) 230 sc->sc_ref_count++; 231 else 232 rv = ENXIO; 233 mtx_leave(&sc->sc_state_mtx); 234 235 return (rv); 236 } 237 238 void 239 vscsi_free(struct scsi_link *link) 240 { 241 struct vscsi_softc *sc = link->adapter_softc; 242 243 mtx_enter(&sc->sc_state_mtx); 244 sc->sc_ref_count--; 245 if (sc->sc_state != VSCSI_S_RUNNING && sc->sc_ref_count == 0) 246 wakeup(&sc->sc_ref_count); 247 mtx_leave(&sc->sc_state_mtx); 248 } 249 250 int 251 vscsiopen(dev_t dev, int flags, int mode, struct proc *p) 252 { 253 struct vscsi_softc *sc = DEV2SC(dev); 254 enum vscsi_state state = VSCSI_S_RUNNING; 255 int rv = 0; 256 257 if (sc == NULL) 258 return (ENXIO); 259 260 mtx_enter(&sc->sc_state_mtx); 261 if (sc->sc_state != VSCSI_S_CLOSED) 262 rv = EBUSY; 263 else 264 sc->sc_state = VSCSI_S_CONFIG; 265 mtx_leave(&sc->sc_state_mtx); 266 267 if (rv != 0) { 268 device_unref(&sc->sc_dev); 269 return (rv); 270 } 271 272 pool_init(&sc->sc_ccb_pool, sizeof(struct vscsi_ccb), 0, IPL_BIO, 0, 273 "vscsiccb", NULL); 274 275 /* we need to guarantee some ccbs will be available for the iopool */ 276 rv = pool_prime(&sc->sc_ccb_pool, 8); 277 if (rv != 0) { 278 pool_destroy(&sc->sc_ccb_pool); 279 state = VSCSI_S_CLOSED; 280 } 281 282 /* commit changes */ 283 mtx_enter(&sc->sc_state_mtx); 284 sc->sc_state = state; 285 mtx_leave(&sc->sc_state_mtx); 286 287 device_unref(&sc->sc_dev); 288 return (rv); 289 } 290 291 int 292 vscsiioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 293 { 294 struct vscsi_softc *sc = DEV2SC(dev); 295 int read = 0; 296 int err = 0; 297 298 if (sc == NULL) 299 return (ENXIO); 300 301 rw_enter_write(&sc->sc_ioc_lock); 302 303 switch (cmd) { 304 case VSCSI_I2T: 305 err = vscsi_i2t(sc, (struct vscsi_ioc_i2t *)addr); 306 break; 307 308 case VSCSI_DATA_READ: 309 read = 1; 310 case VSCSI_DATA_WRITE: 311 err = vscsi_data(sc, (struct vscsi_ioc_data *)addr, read); 312 break; 313 314 case VSCSI_T2I: 315 err = vscsi_t2i(sc, (struct vscsi_ioc_t2i *)addr); 316 break; 317 318 case VSCSI_REQPROBE: 319 case VSCSI_REQDETACH: 320 err = vscsi_devevent(sc, cmd, 321 (struct vscsi_ioc_devevent *)addr); 322 break; 323 324 default: 325 err = ENOTTY; 326 break; 327 } 328 329 rw_exit_write(&sc->sc_ioc_lock); 330 331 device_unref(&sc->sc_dev); 332 return (err); 333 } 334 335 int 336 vscsi_i2t(struct vscsi_softc *sc, struct vscsi_ioc_i2t *i2t) 337 { 338 struct vscsi_ccb *ccb; 339 struct scsi_xfer *xs; 340 struct scsi_link *link; 341 342 mtx_enter(&sc->sc_state_mtx); 343 ccb = TAILQ_FIRST(&sc->sc_ccb_i2t); 344 if (ccb != NULL) 345 TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry); 346 mtx_leave(&sc->sc_state_mtx); 347 348 if (ccb == NULL) 349 return (EAGAIN); 350 351 xs = ccb->ccb_xs; 352 link = xs->sc_link; 353 354 i2t->tag = ccb->ccb_tag; 355 i2t->target = link->target; 356 i2t->lun = link->lun; 357 memcpy(&i2t->cmd, xs->cmd, xs->cmdlen); 358 i2t->cmdlen = xs->cmdlen; 359 i2t->datalen = xs->datalen; 360 361 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 362 case SCSI_DATA_IN: 363 i2t->direction = VSCSI_DIR_READ; 364 break; 365 case SCSI_DATA_OUT: 366 i2t->direction = VSCSI_DIR_WRITE; 367 break; 368 default: 369 i2t->direction = VSCSI_DIR_NONE; 370 break; 371 } 372 373 TAILQ_INSERT_TAIL(&sc->sc_ccb_t2i, ccb, ccb_entry); 374 375 return (0); 376 } 377 378 int 379 vscsi_data(struct vscsi_softc *sc, struct vscsi_ioc_data *data, int read) 380 { 381 struct vscsi_ccb *ccb; 382 struct scsi_xfer *xs; 383 int xsread; 384 u_int8_t *buf; 385 int rv = EINVAL; 386 387 TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) { 388 if (ccb->ccb_tag == data->tag) 389 break; 390 } 391 if (ccb == NULL) 392 return (EFAULT); 393 394 xs = ccb->ccb_xs; 395 396 if (data->datalen > xs->datalen - ccb->ccb_datalen) 397 return (ENOMEM); 398 399 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 400 case SCSI_DATA_IN: 401 xsread = 1; 402 break; 403 case SCSI_DATA_OUT: 404 xsread = 0; 405 break; 406 default: 407 return (EINVAL); 408 } 409 410 if (read != xsread) 411 return (EINVAL); 412 413 buf = xs->data; 414 buf += ccb->ccb_datalen; 415 416 if (read) 417 rv = copyin(data->data, buf, data->datalen); 418 else 419 rv = copyout(buf, data->data, data->datalen); 420 421 if (rv == 0) 422 ccb->ccb_datalen += data->datalen; 423 424 return (rv); 425 } 426 427 int 428 vscsi_t2i(struct vscsi_softc *sc, struct vscsi_ioc_t2i *t2i) 429 { 430 struct vscsi_ccb *ccb; 431 struct scsi_xfer *xs; 432 struct scsi_link *link; 433 int rv = 0; 434 435 TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) { 436 if (ccb->ccb_tag == t2i->tag) 437 break; 438 } 439 if (ccb == NULL) 440 return (EFAULT); 441 442 TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry); 443 444 xs = ccb->ccb_xs; 445 link = xs->sc_link; 446 447 xs->resid = xs->datalen - ccb->ccb_datalen; 448 xs->status = SCSI_OK; 449 450 switch (t2i->status) { 451 case VSCSI_STAT_DONE: 452 xs->error = XS_NOERROR; 453 break; 454 case VSCSI_STAT_SENSE: 455 xs->error = XS_SENSE; 456 memcpy(&xs->sense, &t2i->sense, sizeof(xs->sense)); 457 break; 458 case VSCSI_STAT_RESET: 459 xs->error = XS_RESET; 460 break; 461 case VSCSI_STAT_ERR: 462 default: 463 xs->error = XS_DRIVER_STUFFUP; 464 break; 465 } 466 467 vscsi_done(sc, ccb); 468 469 return (rv); 470 } 471 472 struct vscsi_devevent_task { 473 struct vscsi_softc *sc; 474 struct task t; 475 struct vscsi_ioc_devevent de; 476 u_long cmd; 477 }; 478 479 int 480 vscsi_devevent(struct vscsi_softc *sc, u_long cmd, 481 struct vscsi_ioc_devevent *de) 482 { 483 struct vscsi_devevent_task *dt; 484 485 dt = malloc(sizeof(*dt), M_TEMP, M_WAITOK | M_CANFAIL); 486 if (dt == NULL) 487 return (ENOMEM); 488 489 task_set(&dt->t, vscsi_devevent_task, dt); 490 dt->sc = sc; 491 dt->de = *de; 492 dt->cmd = cmd; 493 494 device_ref(&sc->sc_dev); 495 task_add(systq, &dt->t); 496 497 return (0); 498 } 499 500 void 501 vscsi_devevent_task(void *xdt) 502 { 503 struct vscsi_devevent_task *dt = xdt; 504 struct vscsi_softc *sc = dt->sc; 505 int state; 506 507 mtx_enter(&sc->sc_state_mtx); 508 state = sc->sc_state; 509 mtx_leave(&sc->sc_state_mtx); 510 511 if (state != VSCSI_S_RUNNING) 512 goto gone; 513 514 switch (dt->cmd) { 515 case VSCSI_REQPROBE: 516 scsi_probe(sc->sc_scsibus, dt->de.target, dt->de.lun); 517 break; 518 case VSCSI_REQDETACH: 519 scsi_detach(sc->sc_scsibus, dt->de.target, dt->de.lun, 520 DETACH_FORCE); 521 break; 522 #ifdef DIAGNOSTIC 523 default: 524 panic("unexpected vscsi_devevent cmd"); 525 /* NOTREACHED */ 526 #endif 527 } 528 529 gone: 530 device_unref(&sc->sc_dev); 531 532 free(dt, M_TEMP, sizeof(*dt)); 533 } 534 535 int 536 vscsipoll(dev_t dev, int events, struct proc *p) 537 { 538 struct vscsi_softc *sc = DEV2SC(dev); 539 int revents = 0; 540 541 if (sc == NULL) 542 return (ENXIO); 543 544 if (events & (POLLIN | POLLRDNORM)) { 545 mtx_enter(&sc->sc_state_mtx); 546 if (!TAILQ_EMPTY(&sc->sc_ccb_i2t)) 547 revents |= events & (POLLIN | POLLRDNORM); 548 mtx_leave(&sc->sc_state_mtx); 549 } 550 551 if (revents == 0) { 552 if (events & (POLLIN | POLLRDNORM)) 553 selrecord(p, &sc->sc_sel); 554 } 555 556 device_unref(&sc->sc_dev); 557 return (revents); 558 } 559 560 int 561 vscsikqfilter(dev_t dev, struct knote *kn) 562 { 563 struct vscsi_softc *sc = DEV2SC(dev); 564 struct klist *klist; 565 566 if (sc == NULL) 567 return (ENXIO); 568 569 klist = &sc->sc_sel.si_note; 570 571 switch (kn->kn_filter) { 572 case EVFILT_READ: 573 kn->kn_fop = &vscsi_filtops; 574 break; 575 default: 576 device_unref(&sc->sc_dev); 577 return (EINVAL); 578 } 579 580 kn->kn_hook = sc; 581 582 mtx_enter(&sc->sc_sel_mtx); 583 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 584 mtx_leave(&sc->sc_sel_mtx); 585 586 /* device ref is given to the knote in the klist */ 587 588 return (0); 589 } 590 591 void 592 filt_vscsidetach(struct knote *kn) 593 { 594 struct vscsi_softc *sc = kn->kn_hook; 595 struct klist *klist = &sc->sc_sel.si_note; 596 597 mtx_enter(&sc->sc_sel_mtx); 598 SLIST_REMOVE(klist, kn, knote, kn_selnext); 599 mtx_leave(&sc->sc_sel_mtx); 600 601 device_unref(&sc->sc_dev); 602 } 603 604 int 605 filt_vscsiread(struct knote *kn, long hint) 606 { 607 struct vscsi_softc *sc = kn->kn_hook; 608 int event = 0; 609 610 mtx_enter(&sc->sc_state_mtx); 611 if (!TAILQ_EMPTY(&sc->sc_ccb_i2t)) 612 event = 1; 613 mtx_leave(&sc->sc_state_mtx); 614 615 return (event); 616 } 617 618 int 619 vscsiclose(dev_t dev, int flags, int mode, struct proc *p) 620 { 621 struct vscsi_softc *sc = DEV2SC(dev); 622 struct vscsi_ccb *ccb; 623 624 if (sc == NULL) 625 return (ENXIO); 626 627 mtx_enter(&sc->sc_state_mtx); 628 KASSERT(sc->sc_state == VSCSI_S_RUNNING); 629 sc->sc_state = VSCSI_S_CONFIG; 630 mtx_leave(&sc->sc_state_mtx); 631 632 scsi_activate(sc->sc_scsibus, -1, -1, DVACT_DEACTIVATE); 633 634 while ((ccb = TAILQ_FIRST(&sc->sc_ccb_t2i)) != NULL) { 635 TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry); 636 ccb->ccb_xs->error = XS_RESET; 637 vscsi_done(sc, ccb); 638 } 639 640 while ((ccb = TAILQ_FIRST(&sc->sc_ccb_i2t)) != NULL) { 641 TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry); 642 ccb->ccb_xs->error = XS_RESET; 643 vscsi_done(sc, ccb); 644 } 645 646 scsi_req_detach(sc->sc_scsibus, -1, -1, DETACH_FORCE); 647 648 mtx_enter(&sc->sc_state_mtx); 649 while (sc->sc_ref_count > 0) { 650 msleep(&sc->sc_ref_count, &sc->sc_state_mtx, 651 PRIBIO, "vscsiref", 0); 652 } 653 mtx_leave(&sc->sc_state_mtx); 654 655 pool_destroy(&sc->sc_ccb_pool); 656 657 mtx_enter(&sc->sc_state_mtx); 658 sc->sc_state = VSCSI_S_CLOSED; 659 mtx_leave(&sc->sc_state_mtx); 660 661 device_unref(&sc->sc_dev); 662 return (0); 663 } 664 665 void * 666 vscsi_ccb_get(void *cookie) 667 { 668 struct vscsi_softc *sc = cookie; 669 struct vscsi_ccb *ccb = NULL; 670 671 ccb = pool_get(&sc->sc_ccb_pool, PR_NOWAIT); 672 if (ccb != NULL) { 673 ccb->ccb_tag = sc->sc_ccb_tag++; 674 ccb->ccb_datalen = 0; 675 } 676 677 return (ccb); 678 } 679 680 void 681 vscsi_ccb_put(void *cookie, void *io) 682 { 683 struct vscsi_softc *sc = cookie; 684 struct vscsi_ccb *ccb = io; 685 686 pool_put(&sc->sc_ccb_pool, ccb); 687 } 688