1 /* $NetBSD: scsipi_base.c,v 1.181 2019/02/05 11:11:32 mrg Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.181 2019/02/05 11:11:32 mrg Exp $"); 35 36 #ifdef _KERNEL_OPT 37 #include "opt_scsi.h" 38 #endif 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/buf.h> 44 #include <sys/uio.h> 45 #include <sys/malloc.h> 46 #include <sys/pool.h> 47 #include <sys/errno.h> 48 #include <sys/device.h> 49 #include <sys/proc.h> 50 #include <sys/kthread.h> 51 #include <sys/hash.h> 52 #include <sys/atomic.h> 53 54 #include <dev/scsipi/scsi_spc.h> 55 #include <dev/scsipi/scsipi_all.h> 56 #include <dev/scsipi/scsipi_disk.h> 57 #include <dev/scsipi/scsipiconf.h> 58 #include <dev/scsipi/scsipi_base.h> 59 60 #include <dev/scsipi/scsi_all.h> 61 #include <dev/scsipi/scsi_message.h> 62 63 #include <machine/param.h> 64 65 static int scsipi_complete(struct scsipi_xfer *); 66 static void scsipi_request_sense(struct scsipi_xfer *); 67 static int scsipi_enqueue(struct scsipi_xfer *); 68 static void scsipi_run_queue(struct scsipi_channel *chan); 69 70 static void scsipi_completion_thread(void *); 71 72 static void scsipi_get_tag(struct scsipi_xfer *); 73 static void scsipi_put_tag(struct scsipi_xfer *); 74 75 static int scsipi_get_resource(struct scsipi_channel *); 76 static void scsipi_put_resource(struct scsipi_channel *); 77 78 static void scsipi_async_event_max_openings(struct scsipi_channel *, 79 struct scsipi_max_openings *); 80 static void scsipi_async_event_channel_reset(struct scsipi_channel *); 81 82 static void scsipi_channel_freeze_locked(struct scsipi_channel *, int); 83 84 static void scsipi_adapter_lock(struct scsipi_adapter *adapt); 85 static void scsipi_adapter_unlock(struct scsipi_adapter *adapt); 86 87 static struct pool scsipi_xfer_pool; 88 89 int scsipi_xs_count = 0; 90 91 /* 92 * scsipi_init: 93 * 94 * Called when a scsibus or atapibus is attached to the system 95 * to initialize shared data structures. 96 */ 97 void 98 scsipi_init(void) 99 { 100 static int scsipi_init_done; 101 102 if (scsipi_init_done) 103 return; 104 scsipi_init_done = 1; 105 106 /* Initialize the scsipi_xfer pool. */ 107 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 108 0, 0, "scxspl", NULL, IPL_BIO); 109 if (pool_prime(&scsipi_xfer_pool, 110 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) { 111 printf("WARNING: not enough memory for scsipi_xfer_pool\n"); 112 } 113 114 scsipi_ioctl_init(); 115 } 116 117 /* 118 * scsipi_channel_init: 119 * 120 * Initialize a scsipi_channel when it is attached. 121 */ 122 int 123 scsipi_channel_init(struct scsipi_channel *chan) 124 { 125 struct scsipi_adapter *adapt = chan->chan_adapter; 126 int i; 127 128 /* Initialize shared data. */ 129 scsipi_init(); 130 131 /* Initialize the queues. */ 132 TAILQ_INIT(&chan->chan_queue); 133 TAILQ_INIT(&chan->chan_complete); 134 135 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++) 136 LIST_INIT(&chan->chan_periphtab[i]); 137 138 /* 139 * Create the asynchronous completion thread. 140 */ 141 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan, 142 &chan->chan_thread, "%s", chan->chan_name)) { 143 aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for " 144 "channel %d\n", chan->chan_channel); 145 panic("scsipi_channel_init"); 146 } 147 148 return 0; 149 } 150 151 /* 152 * scsipi_channel_shutdown: 153 * 154 * Shutdown a scsipi_channel. 155 */ 156 void 157 scsipi_channel_shutdown(struct scsipi_channel *chan) 158 { 159 160 mutex_enter(chan_mtx(chan)); 161 /* 162 * Shut down the completion thread. 163 */ 164 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 165 cv_broadcast(chan_cv_complete(chan)); 166 167 /* 168 * Now wait for the thread to exit. 169 */ 170 while (chan->chan_thread != NULL) 171 cv_wait(chan_cv_thread(chan), chan_mtx(chan)); 172 mutex_exit(chan_mtx(chan)); 173 } 174 175 static uint32_t 176 scsipi_chan_periph_hash(uint64_t t, uint64_t l) 177 { 178 uint32_t hash; 179 180 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT); 181 hash = hash32_buf(&l, sizeof(l), hash); 182 183 return hash & SCSIPI_CHAN_PERIPH_HASHMASK; 184 } 185 186 /* 187 * scsipi_insert_periph: 188 * 189 * Insert a periph into the channel. 190 */ 191 void 192 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph) 193 { 194 uint32_t hash; 195 196 hash = scsipi_chan_periph_hash(periph->periph_target, 197 periph->periph_lun); 198 199 mutex_enter(chan_mtx(chan)); 200 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash); 201 mutex_exit(chan_mtx(chan)); 202 } 203 204 /* 205 * scsipi_remove_periph: 206 * 207 * Remove a periph from the channel. 208 */ 209 void 210 scsipi_remove_periph(struct scsipi_channel *chan, 211 struct scsipi_periph *periph) 212 { 213 214 LIST_REMOVE(periph, periph_hash); 215 } 216 217 /* 218 * scsipi_lookup_periph: 219 * 220 * Lookup a periph on the specified channel. 221 */ 222 static struct scsipi_periph * 223 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock) 224 { 225 struct scsipi_periph *periph; 226 uint32_t hash; 227 228 if (target >= chan->chan_ntargets || 229 lun >= chan->chan_nluns) 230 return NULL; 231 232 hash = scsipi_chan_periph_hash(target, lun); 233 234 if (lock) 235 mutex_enter(chan_mtx(chan)); 236 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) { 237 if (periph->periph_target == target && 238 periph->periph_lun == lun) 239 break; 240 } 241 if (lock) 242 mutex_exit(chan_mtx(chan)); 243 244 return periph; 245 } 246 247 struct scsipi_periph * 248 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun) 249 { 250 return scsipi_lookup_periph_internal(chan, target, lun, false); 251 } 252 253 struct scsipi_periph * 254 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun) 255 { 256 return scsipi_lookup_periph_internal(chan, target, lun, true); 257 } 258 259 /* 260 * scsipi_get_resource: 261 * 262 * Allocate a single xfer `resource' from the channel. 263 * 264 * NOTE: Must be called with channel lock held 265 */ 266 static int 267 scsipi_get_resource(struct scsipi_channel *chan) 268 { 269 struct scsipi_adapter *adapt = chan->chan_adapter; 270 271 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 272 if (chan->chan_openings > 0) { 273 chan->chan_openings--; 274 return 1; 275 } 276 return 0; 277 } 278 279 if (adapt->adapt_openings > 0) { 280 adapt->adapt_openings--; 281 return 1; 282 } 283 return 0; 284 } 285 286 /* 287 * scsipi_grow_resources: 288 * 289 * Attempt to grow resources for a channel. If this succeeds, 290 * we allocate one for our caller. 291 * 292 * NOTE: Must be called with channel lock held 293 */ 294 static inline int 295 scsipi_grow_resources(struct scsipi_channel *chan) 296 { 297 298 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 299 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 300 mutex_exit(chan_mtx(chan)); 301 scsipi_adapter_request(chan, 302 ADAPTER_REQ_GROW_RESOURCES, NULL); 303 mutex_enter(chan_mtx(chan)); 304 return scsipi_get_resource(chan); 305 } 306 /* 307 * ask the channel thread to do it. It'll have to thaw the 308 * queue 309 */ 310 scsipi_channel_freeze_locked(chan, 1); 311 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 312 cv_broadcast(chan_cv_complete(chan)); 313 return 0; 314 } 315 316 return 0; 317 } 318 319 /* 320 * scsipi_put_resource: 321 * 322 * Free a single xfer `resource' to the channel. 323 * 324 * NOTE: Must be called with channel lock held 325 */ 326 static void 327 scsipi_put_resource(struct scsipi_channel *chan) 328 { 329 struct scsipi_adapter *adapt = chan->chan_adapter; 330 331 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 332 chan->chan_openings++; 333 else 334 adapt->adapt_openings++; 335 } 336 337 /* 338 * scsipi_get_tag: 339 * 340 * Get a tag ID for the specified xfer. 341 * 342 * NOTE: Must be called with channel lock held 343 */ 344 static void 345 scsipi_get_tag(struct scsipi_xfer *xs) 346 { 347 struct scsipi_periph *periph = xs->xs_periph; 348 int bit, tag; 349 u_int word; 350 351 KASSERT(mutex_owned(chan_mtx(periph->periph_channel))); 352 353 bit = 0; /* XXX gcc */ 354 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 355 bit = ffs(periph->periph_freetags[word]); 356 if (bit != 0) 357 break; 358 } 359 #ifdef DIAGNOSTIC 360 if (word == PERIPH_NTAGWORDS) { 361 scsipi_printaddr(periph); 362 printf("no free tags\n"); 363 panic("scsipi_get_tag"); 364 } 365 #endif 366 367 bit -= 1; 368 periph->periph_freetags[word] &= ~(1 << bit); 369 tag = (word << 5) | bit; 370 371 /* XXX Should eventually disallow this completely. */ 372 if (tag >= periph->periph_openings) { 373 scsipi_printaddr(periph); 374 printf("WARNING: tag %d greater than available openings %d\n", 375 tag, periph->periph_openings); 376 } 377 378 xs->xs_tag_id = tag; 379 } 380 381 /* 382 * scsipi_put_tag: 383 * 384 * Put the tag ID for the specified xfer back into the pool. 385 * 386 * NOTE: Must be called with channel lock held 387 */ 388 static void 389 scsipi_put_tag(struct scsipi_xfer *xs) 390 { 391 struct scsipi_periph *periph = xs->xs_periph; 392 int word, bit; 393 394 KASSERT(mutex_owned(chan_mtx(periph->periph_channel))); 395 396 word = xs->xs_tag_id >> 5; 397 bit = xs->xs_tag_id & 0x1f; 398 399 periph->periph_freetags[word] |= (1 << bit); 400 } 401 402 /* 403 * scsipi_get_xs: 404 * 405 * Allocate an xfer descriptor and associate it with the 406 * specified peripheral. If the peripheral has no more 407 * available command openings, we either block waiting for 408 * one to become available, or fail. 409 * 410 * When this routine is called with the channel lock held 411 * the flags must include XS_CTL_NOSLEEP. 412 */ 413 struct scsipi_xfer * 414 scsipi_get_xs(struct scsipi_periph *periph, int flags) 415 { 416 struct scsipi_xfer *xs; 417 bool lock = (flags & XS_CTL_NOSLEEP) == 0; 418 419 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 420 421 KASSERT(!cold); 422 423 #ifdef DIAGNOSTIC 424 /* 425 * URGENT commands can never be ASYNC. 426 */ 427 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 428 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 429 scsipi_printaddr(periph); 430 printf("URGENT and ASYNC\n"); 431 panic("scsipi_get_xs"); 432 } 433 #endif 434 435 /* 436 * Wait for a command opening to become available. Rules: 437 * 438 * - All xfers must wait for an available opening. 439 * Exception: URGENT xfers can proceed when 440 * active == openings, because we use the opening 441 * of the command we're recovering for. 442 * - if the periph has sense pending, only URGENT & REQSENSE 443 * xfers may proceed. 444 * 445 * - If the periph is recovering, only URGENT xfers may 446 * proceed. 447 * 448 * - If the periph is currently executing a recovery 449 * command, URGENT commands must block, because only 450 * one recovery command can execute at a time. 451 */ 452 if (lock) 453 mutex_enter(chan_mtx(periph->periph_channel)); 454 for (;;) { 455 if (flags & XS_CTL_URGENT) { 456 if (periph->periph_active > periph->periph_openings) 457 goto wait_for_opening; 458 if (periph->periph_flags & PERIPH_SENSE) { 459 if ((flags & XS_CTL_REQSENSE) == 0) 460 goto wait_for_opening; 461 } else { 462 if ((periph->periph_flags & 463 PERIPH_RECOVERY_ACTIVE) != 0) 464 goto wait_for_opening; 465 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 466 } 467 break; 468 } 469 if (periph->periph_active >= periph->periph_openings || 470 (periph->periph_flags & PERIPH_RECOVERING) != 0) 471 goto wait_for_opening; 472 periph->periph_active++; 473 KASSERT(mutex_owned(chan_mtx(periph->periph_channel))); 474 break; 475 476 wait_for_opening: 477 if (flags & XS_CTL_NOSLEEP) { 478 KASSERT(!lock); 479 return NULL; 480 } 481 KASSERT(lock); 482 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 483 periph->periph_flags |= PERIPH_WAITING; 484 cv_wait(periph_cv_periph(periph), 485 chan_mtx(periph->periph_channel)); 486 } 487 if (lock) 488 mutex_exit(chan_mtx(periph->periph_channel)); 489 490 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 491 xs = pool_get(&scsipi_xfer_pool, 492 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 493 if (xs == NULL) { 494 if (lock) 495 mutex_enter(chan_mtx(periph->periph_channel)); 496 if (flags & XS_CTL_URGENT) { 497 if ((flags & XS_CTL_REQSENSE) == 0) 498 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 499 } else 500 periph->periph_active--; 501 if (lock) 502 mutex_exit(chan_mtx(periph->periph_channel)); 503 scsipi_printaddr(periph); 504 printf("unable to allocate %sscsipi_xfer\n", 505 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 506 } 507 508 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 509 510 if (xs != NULL) { 511 memset(xs, 0, sizeof(*xs)); 512 callout_init(&xs->xs_callout, 0); 513 xs->xs_periph = periph; 514 xs->xs_control = flags; 515 xs->xs_status = 0; 516 if ((flags & XS_CTL_NOSLEEP) == 0) 517 mutex_enter(chan_mtx(periph->periph_channel)); 518 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 519 KASSERT(mutex_owned(chan_mtx(periph->periph_channel))); 520 if ((flags & XS_CTL_NOSLEEP) == 0) 521 mutex_exit(chan_mtx(periph->periph_channel)); 522 } 523 return xs; 524 } 525 526 /* 527 * scsipi_put_xs: 528 * 529 * Release an xfer descriptor, decreasing the outstanding command 530 * count for the peripheral. If there is a thread waiting for 531 * an opening, wake it up. If not, kick any queued I/O the 532 * peripheral may have. 533 * 534 * NOTE: Must be called with channel lock held 535 */ 536 void 537 scsipi_put_xs(struct scsipi_xfer *xs) 538 { 539 struct scsipi_periph *periph = xs->xs_periph; 540 int flags = xs->xs_control; 541 542 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 543 KASSERT(mutex_owned(chan_mtx(periph->periph_channel))); 544 545 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 546 callout_destroy(&xs->xs_callout); 547 pool_put(&scsipi_xfer_pool, xs); 548 549 #ifdef DIAGNOSTIC 550 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 551 periph->periph_active == 0) { 552 scsipi_printaddr(periph); 553 printf("recovery without a command to recovery for\n"); 554 panic("scsipi_put_xs"); 555 } 556 #endif 557 558 if (flags & XS_CTL_URGENT) { 559 if ((flags & XS_CTL_REQSENSE) == 0) 560 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 561 } else 562 periph->periph_active--; 563 if (periph->periph_active == 0 && 564 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 565 periph->periph_flags &= ~PERIPH_WAITDRAIN; 566 cv_broadcast(periph_cv_active(periph)); 567 } 568 569 if (periph->periph_flags & PERIPH_WAITING) { 570 periph->periph_flags &= ~PERIPH_WAITING; 571 cv_broadcast(periph_cv_periph(periph)); 572 } else { 573 if (periph->periph_switch->psw_start != NULL && 574 device_is_active(periph->periph_dev)) { 575 SC_DEBUG(periph, SCSIPI_DB2, 576 ("calling private start()\n")); 577 (*periph->periph_switch->psw_start)(periph); 578 } 579 } 580 } 581 582 /* 583 * scsipi_channel_freeze: 584 * 585 * Freeze a channel's xfer queue. 586 */ 587 void 588 scsipi_channel_freeze(struct scsipi_channel *chan, int count) 589 { 590 bool lock = chan_running(chan) > 0; 591 592 if (lock) 593 mutex_enter(chan_mtx(chan)); 594 chan->chan_qfreeze += count; 595 if (lock) 596 mutex_exit(chan_mtx(chan)); 597 } 598 599 static void 600 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count) 601 { 602 603 chan->chan_qfreeze += count; 604 } 605 606 /* 607 * scsipi_channel_thaw: 608 * 609 * Thaw a channel's xfer queue. 610 */ 611 void 612 scsipi_channel_thaw(struct scsipi_channel *chan, int count) 613 { 614 bool lock = chan_running(chan) > 0; 615 616 if (lock) 617 mutex_enter(chan_mtx(chan)); 618 chan->chan_qfreeze -= count; 619 /* 620 * Don't let the freeze count go negative. 621 * 622 * Presumably the adapter driver could keep track of this, 623 * but it might just be easier to do this here so as to allow 624 * multiple callers, including those outside the adapter driver. 625 */ 626 if (chan->chan_qfreeze < 0) { 627 chan->chan_qfreeze = 0; 628 } 629 if (lock) 630 mutex_exit(chan_mtx(chan)); 631 632 /* 633 * until the channel is running 634 */ 635 if (!lock) 636 return; 637 638 /* 639 * Kick the channel's queue here. Note, we may be running in 640 * interrupt context (softclock or HBA's interrupt), so the adapter 641 * driver had better not sleep. 642 */ 643 if (chan->chan_qfreeze == 0) 644 scsipi_run_queue(chan); 645 } 646 647 /* 648 * scsipi_channel_timed_thaw: 649 * 650 * Thaw a channel after some time has expired. This will also 651 * run the channel's queue if the freeze count has reached 0. 652 */ 653 void 654 scsipi_channel_timed_thaw(void *arg) 655 { 656 struct scsipi_channel *chan = arg; 657 658 scsipi_channel_thaw(chan, 1); 659 } 660 661 /* 662 * scsipi_periph_freeze: 663 * 664 * Freeze a device's xfer queue. 665 */ 666 void 667 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count) 668 { 669 670 periph->periph_qfreeze += count; 671 } 672 673 /* 674 * scsipi_periph_thaw: 675 * 676 * Thaw a device's xfer queue. 677 */ 678 void 679 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count) 680 { 681 682 periph->periph_qfreeze -= count; 683 #ifdef DIAGNOSTIC 684 if (periph->periph_qfreeze < 0) { 685 static const char pc[] = "periph freeze count < 0"; 686 scsipi_printaddr(periph); 687 printf("%s\n", pc); 688 panic(pc); 689 } 690 #endif 691 if (periph->periph_qfreeze == 0 && 692 (periph->periph_flags & PERIPH_WAITING) != 0) 693 cv_broadcast(periph_cv_periph(periph)); 694 } 695 696 void 697 scsipi_periph_freeze(struct scsipi_periph *periph, int count) 698 { 699 700 mutex_enter(chan_mtx(periph->periph_channel)); 701 scsipi_periph_freeze_locked(periph, count); 702 mutex_exit(chan_mtx(periph->periph_channel)); 703 } 704 705 void 706 scsipi_periph_thaw(struct scsipi_periph *periph, int count) 707 { 708 709 mutex_enter(chan_mtx(periph->periph_channel)); 710 scsipi_periph_thaw_locked(periph, count); 711 mutex_exit(chan_mtx(periph->periph_channel)); 712 } 713 714 /* 715 * scsipi_periph_timed_thaw: 716 * 717 * Thaw a device after some time has expired. 718 */ 719 void 720 scsipi_periph_timed_thaw(void *arg) 721 { 722 struct scsipi_periph *periph = arg; 723 struct scsipi_channel *chan = periph->periph_channel; 724 725 callout_stop(&periph->periph_callout); 726 727 mutex_enter(chan_mtx(chan)); 728 scsipi_periph_thaw_locked(periph, 1); 729 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 730 /* 731 * Kick the channel's queue here. Note, we're running in 732 * interrupt context (softclock), so the adapter driver 733 * had better not sleep. 734 */ 735 mutex_exit(chan_mtx(chan)); 736 scsipi_run_queue(periph->periph_channel); 737 } else { 738 /* 739 * Tell the completion thread to kick the channel's queue here. 740 */ 741 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 742 cv_broadcast(chan_cv_complete(chan)); 743 mutex_exit(chan_mtx(chan)); 744 } 745 } 746 747 /* 748 * scsipi_wait_drain: 749 * 750 * Wait for a periph's pending xfers to drain. 751 */ 752 void 753 scsipi_wait_drain(struct scsipi_periph *periph) 754 { 755 struct scsipi_channel *chan = periph->periph_channel; 756 757 mutex_enter(chan_mtx(chan)); 758 while (periph->periph_active != 0) { 759 periph->periph_flags |= PERIPH_WAITDRAIN; 760 cv_wait(periph_cv_active(periph), chan_mtx(chan)); 761 } 762 mutex_exit(chan_mtx(chan)); 763 } 764 765 /* 766 * scsipi_kill_pending: 767 * 768 * Kill off all pending xfers for a periph. 769 * 770 * NOTE: Must be called with channel lock held 771 */ 772 void 773 scsipi_kill_pending(struct scsipi_periph *periph) 774 { 775 struct scsipi_channel *chan = periph->periph_channel; 776 777 (*chan->chan_bustype->bustype_kill_pending)(periph); 778 while (periph->periph_active != 0) { 779 periph->periph_flags |= PERIPH_WAITDRAIN; 780 cv_wait(periph_cv_active(periph), chan_mtx(chan)); 781 } 782 } 783 784 /* 785 * scsipi_print_cdb: 786 * prints a command descriptor block (for debug purpose, error messages, 787 * SCSIVERBOSE, ...) 788 */ 789 void 790 scsipi_print_cdb(struct scsipi_generic *cmd) 791 { 792 int i, j; 793 794 printf("0x%02x", cmd->opcode); 795 796 switch (CDB_GROUPID(cmd->opcode)) { 797 case CDB_GROUPID_0: 798 j = CDB_GROUP0; 799 break; 800 case CDB_GROUPID_1: 801 j = CDB_GROUP1; 802 break; 803 case CDB_GROUPID_2: 804 j = CDB_GROUP2; 805 break; 806 case CDB_GROUPID_3: 807 j = CDB_GROUP3; 808 break; 809 case CDB_GROUPID_4: 810 j = CDB_GROUP4; 811 break; 812 case CDB_GROUPID_5: 813 j = CDB_GROUP5; 814 break; 815 case CDB_GROUPID_6: 816 j = CDB_GROUP6; 817 break; 818 case CDB_GROUPID_7: 819 j = CDB_GROUP7; 820 break; 821 default: 822 j = 0; 823 } 824 if (j == 0) 825 j = sizeof (cmd->bytes); 826 for (i = 0; i < j-1; i++) /* already done the opcode */ 827 printf(" %02x", cmd->bytes[i]); 828 } 829 830 /* 831 * scsipi_interpret_sense: 832 * 833 * Look at the returned sense and act on the error, determining 834 * the unix error number to pass back. (0 = report no error) 835 * 836 * NOTE: If we return ERESTART, we are expected to haved 837 * thawed the device! 838 * 839 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 840 */ 841 int 842 scsipi_interpret_sense(struct scsipi_xfer *xs) 843 { 844 struct scsi_sense_data *sense; 845 struct scsipi_periph *periph = xs->xs_periph; 846 u_int8_t key; 847 int error; 848 u_int32_t info; 849 static const char *error_mes[] = { 850 "soft error (corrected)", 851 "not ready", "medium error", 852 "non-media hardware failure", "illegal request", 853 "unit attention", "readonly device", 854 "no data found", "vendor unique", 855 "copy aborted", "command aborted", 856 "search returned equal", "volume overflow", 857 "verify miscompare", "unknown error key" 858 }; 859 860 sense = &xs->sense.scsi_sense; 861 #ifdef SCSIPI_DEBUG 862 if (periph->periph_flags & SCSIPI_DB1) { 863 int count; 864 scsipi_printaddr(periph); 865 printf(" sense debug information:\n"); 866 printf("\tcode 0x%x valid %d\n", 867 SSD_RCODE(sense->response_code), 868 sense->response_code & SSD_RCODE_VALID ? 1 : 0); 869 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 870 sense->segment, 871 SSD_SENSE_KEY(sense->flags), 872 sense->flags & SSD_ILI ? 1 : 0, 873 sense->flags & SSD_EOM ? 1 : 0, 874 sense->flags & SSD_FILEMARK ? 1 : 0); 875 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 876 "extra bytes\n", 877 sense->info[0], 878 sense->info[1], 879 sense->info[2], 880 sense->info[3], 881 sense->extra_len); 882 printf("\textra: "); 883 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++) 884 printf("0x%x ", sense->csi[count]); 885 printf("\n"); 886 } 887 #endif 888 889 /* 890 * If the periph has its own error handler, call it first. 891 * If it returns a legit error value, return that, otherwise 892 * it wants us to continue with normal error processing. 893 */ 894 if (periph->periph_switch->psw_error != NULL) { 895 SC_DEBUG(periph, SCSIPI_DB2, 896 ("calling private err_handler()\n")); 897 error = (*periph->periph_switch->psw_error)(xs); 898 if (error != EJUSTRETURN) 899 return error; 900 } 901 /* otherwise use the default */ 902 switch (SSD_RCODE(sense->response_code)) { 903 904 /* 905 * Old SCSI-1 and SASI devices respond with 906 * codes other than 70. 907 */ 908 case 0x00: /* no error (command completed OK) */ 909 return 0; 910 case 0x04: /* drive not ready after it was selected */ 911 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 912 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 913 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 914 return 0; 915 /* XXX - display some sort of error here? */ 916 return EIO; 917 case 0x20: /* invalid command */ 918 if ((xs->xs_control & 919 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 920 return 0; 921 return EINVAL; 922 case 0x25: /* invalid LUN (Adaptec ACB-4000) */ 923 return EACCES; 924 925 /* 926 * If it's code 70, use the extended stuff and 927 * interpret the key 928 */ 929 case 0x71: /* delayed error */ 930 scsipi_printaddr(periph); 931 key = SSD_SENSE_KEY(sense->flags); 932 printf(" DEFERRED ERROR, key = 0x%x\n", key); 933 /* FALLTHROUGH */ 934 case 0x70: 935 if ((sense->response_code & SSD_RCODE_VALID) != 0) 936 info = _4btol(sense->info); 937 else 938 info = 0; 939 key = SSD_SENSE_KEY(sense->flags); 940 941 switch (key) { 942 case SKEY_NO_SENSE: 943 case SKEY_RECOVERED_ERROR: 944 if (xs->resid == xs->datalen && xs->datalen) { 945 /* 946 * Why is this here? 947 */ 948 xs->resid = 0; /* not short read */ 949 } 950 error = 0; 951 break; 952 case SKEY_EQUAL: 953 error = 0; 954 break; 955 case SKEY_NOT_READY: 956 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 957 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 958 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 959 return 0; 960 if (sense->asc == 0x3A) { 961 error = ENODEV; /* Medium not present */ 962 if (xs->xs_control & XS_CTL_SILENT_NODEV) 963 return error; 964 } else 965 error = EIO; 966 if ((xs->xs_control & XS_CTL_SILENT) != 0) 967 return error; 968 break; 969 case SKEY_ILLEGAL_REQUEST: 970 if ((xs->xs_control & 971 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 972 return 0; 973 /* 974 * Handle the case where a device reports 975 * Logical Unit Not Supported during discovery. 976 */ 977 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 978 sense->asc == 0x25 && 979 sense->ascq == 0x00) 980 return EINVAL; 981 if ((xs->xs_control & XS_CTL_SILENT) != 0) 982 return EIO; 983 error = EINVAL; 984 break; 985 case SKEY_UNIT_ATTENTION: 986 if (sense->asc == 0x29 && 987 sense->ascq == 0x00) { 988 /* device or bus reset */ 989 return ERESTART; 990 } 991 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 992 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 993 if ((xs->xs_control & 994 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 995 /* XXX Should reupload any transient state. */ 996 (periph->periph_flags & 997 PERIPH_REMOVABLE) == 0) { 998 return ERESTART; 999 } 1000 if ((xs->xs_control & XS_CTL_SILENT) != 0) 1001 return EIO; 1002 error = EIO; 1003 break; 1004 case SKEY_DATA_PROTECT: 1005 error = EROFS; 1006 break; 1007 case SKEY_BLANK_CHECK: 1008 error = 0; 1009 break; 1010 case SKEY_ABORTED_COMMAND: 1011 if (xs->xs_retries != 0) { 1012 xs->xs_retries--; 1013 error = ERESTART; 1014 } else 1015 error = EIO; 1016 break; 1017 case SKEY_VOLUME_OVERFLOW: 1018 error = ENOSPC; 1019 break; 1020 default: 1021 error = EIO; 1022 break; 1023 } 1024 1025 /* Print verbose decode if appropriate and possible */ 1026 if ((key == 0) || 1027 ((xs->xs_control & XS_CTL_SILENT) != 0) || 1028 (scsipi_print_sense(xs, 0) != 0)) 1029 return error; 1030 1031 /* Print brief(er) sense information */ 1032 scsipi_printaddr(periph); 1033 printf("%s", error_mes[key - 1]); 1034 if ((sense->response_code & SSD_RCODE_VALID) != 0) { 1035 switch (key) { 1036 case SKEY_NOT_READY: 1037 case SKEY_ILLEGAL_REQUEST: 1038 case SKEY_UNIT_ATTENTION: 1039 case SKEY_DATA_PROTECT: 1040 break; 1041 case SKEY_BLANK_CHECK: 1042 printf(", requested size: %d (decimal)", 1043 info); 1044 break; 1045 case SKEY_ABORTED_COMMAND: 1046 if (xs->xs_retries) 1047 printf(", retrying"); 1048 printf(", cmd 0x%x, info 0x%x", 1049 xs->cmd->opcode, info); 1050 break; 1051 default: 1052 printf(", info = %d (decimal)", info); 1053 } 1054 } 1055 if (sense->extra_len != 0) { 1056 int n; 1057 printf(", data ="); 1058 for (n = 0; n < sense->extra_len; n++) 1059 printf(" %02x", 1060 sense->csi[n]); 1061 } 1062 printf("\n"); 1063 return error; 1064 1065 /* 1066 * Some other code, just report it 1067 */ 1068 default: 1069 #if defined(SCSIDEBUG) || defined(DEBUG) 1070 { 1071 static const char *uc = "undecodable sense error"; 1072 int i; 1073 u_int8_t *cptr = (u_int8_t *) sense; 1074 scsipi_printaddr(periph); 1075 if (xs->cmd == &xs->cmdstore) { 1076 printf("%s for opcode 0x%x, data=", 1077 uc, xs->cmdstore.opcode); 1078 } else { 1079 printf("%s, data=", uc); 1080 } 1081 for (i = 0; i < sizeof (sense); i++) 1082 printf(" 0x%02x", *(cptr++) & 0xff); 1083 printf("\n"); 1084 } 1085 #else 1086 scsipi_printaddr(periph); 1087 printf("Sense Error Code 0x%x", 1088 SSD_RCODE(sense->response_code)); 1089 if ((sense->response_code & SSD_RCODE_VALID) != 0) { 1090 struct scsi_sense_data_unextended *usense = 1091 (struct scsi_sense_data_unextended *)sense; 1092 printf(" at block no. %d (decimal)", 1093 _3btol(usense->block)); 1094 } 1095 printf("\n"); 1096 #endif 1097 return EIO; 1098 } 1099 } 1100 1101 /* 1102 * scsipi_test_unit_ready: 1103 * 1104 * Issue a `test unit ready' request. 1105 */ 1106 int 1107 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags) 1108 { 1109 struct scsi_test_unit_ready cmd; 1110 int retries; 1111 1112 /* some ATAPI drives don't support TEST UNIT READY. Sigh */ 1113 if (periph->periph_quirks & PQUIRK_NOTUR) 1114 return 0; 1115 1116 if (flags & XS_CTL_DISCOVERY) 1117 retries = 0; 1118 else 1119 retries = SCSIPIRETRIES; 1120 1121 memset(&cmd, 0, sizeof(cmd)); 1122 cmd.opcode = SCSI_TEST_UNIT_READY; 1123 1124 return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1125 retries, 10000, NULL, flags); 1126 } 1127 1128 static const struct scsipi_inquiry3_pattern { 1129 const char vendor[8]; 1130 const char product[16]; 1131 const char revision[4]; 1132 } scsipi_inquiry3_quirk[] = { 1133 { "ES-6600 ", "", "" }, 1134 }; 1135 1136 static int 1137 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib) 1138 { 1139 for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) { 1140 const struct scsipi_inquiry3_pattern *q = 1141 &scsipi_inquiry3_quirk[i]; 1142 #define MATCH(field) \ 1143 (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1) 1144 if (MATCH(vendor) && MATCH(product) && MATCH(revision)) 1145 return 0; 1146 } 1147 return 1; 1148 } 1149 1150 /* 1151 * scsipi_inquire: 1152 * 1153 * Ask the device about itself. 1154 */ 1155 int 1156 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf, 1157 int flags) 1158 { 1159 struct scsipi_inquiry cmd; 1160 int error; 1161 int retries; 1162 1163 if (flags & XS_CTL_DISCOVERY) 1164 retries = 0; 1165 else 1166 retries = SCSIPIRETRIES; 1167 1168 /* 1169 * If we request more data than the device can provide, it SHOULD just 1170 * return a short response. However, some devices error with an 1171 * ILLEGAL REQUEST sense code, and yet others have even more special 1172 * failture modes (such as the GL641USB flash adapter, which goes loony 1173 * and sends corrupted CRCs). To work around this, and to bring our 1174 * behavior more in line with other OSes, we do a shorter inquiry, 1175 * covering all the SCSI-2 information, first, and then request more 1176 * data iff the "additional length" field indicates there is more. 1177 * - mycroft, 2003/10/16 1178 */ 1179 memset(&cmd, 0, sizeof(cmd)); 1180 cmd.opcode = INQUIRY; 1181 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2; 1182 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1183 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries, 1184 10000, NULL, flags | XS_CTL_DATA_IN); 1185 if (!error && 1186 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) { 1187 if (scsipi_inquiry3_ok(inqbuf)) { 1188 #if 0 1189 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length); 1190 #endif 1191 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3; 1192 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1193 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries, 1194 10000, NULL, flags | XS_CTL_DATA_IN); 1195 #if 0 1196 printf("inquire: error=%d\n", error); 1197 #endif 1198 } 1199 } 1200 1201 #ifdef SCSI_OLD_NOINQUIRY 1202 /* 1203 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator. 1204 * This board doesn't support the INQUIRY command at all. 1205 */ 1206 if (error == EINVAL || error == EACCES) { 1207 /* 1208 * Conjure up an INQUIRY response. 1209 */ 1210 inqbuf->device = (error == EINVAL ? 1211 SID_QUAL_LU_PRESENT : 1212 SID_QUAL_LU_NOTPRESENT) | T_DIRECT; 1213 inqbuf->dev_qual2 = 0; 1214 inqbuf->version = 0; 1215 inqbuf->response_format = SID_FORMAT_SCSI1; 1216 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1217 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1218 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28); 1219 error = 0; 1220 } 1221 1222 /* 1223 * Kludge for the Emulex MT-02 SCSI->QIC translator. 1224 * This board gives an empty response to an INQUIRY command. 1225 */ 1226 else if (error == 0 && 1227 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) && 1228 inqbuf->dev_qual2 == 0 && 1229 inqbuf->version == 0 && 1230 inqbuf->response_format == SID_FORMAT_SCSI1) { 1231 /* 1232 * Fill out the INQUIRY response. 1233 */ 1234 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL); 1235 inqbuf->dev_qual2 = SID_REMOVABLE; 1236 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1237 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1238 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28); 1239 } 1240 #endif /* SCSI_OLD_NOINQUIRY */ 1241 1242 return error; 1243 } 1244 1245 /* 1246 * scsipi_prevent: 1247 * 1248 * Prevent or allow the user to remove the media 1249 */ 1250 int 1251 scsipi_prevent(struct scsipi_periph *periph, int type, int flags) 1252 { 1253 struct scsi_prevent_allow_medium_removal cmd; 1254 1255 if (periph->periph_quirks & PQUIRK_NODOORLOCK) 1256 return 0; 1257 1258 memset(&cmd, 0, sizeof(cmd)); 1259 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL; 1260 cmd.how = type; 1261 1262 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1263 SCSIPIRETRIES, 5000, NULL, flags)); 1264 } 1265 1266 /* 1267 * scsipi_start: 1268 * 1269 * Send a START UNIT. 1270 */ 1271 int 1272 scsipi_start(struct scsipi_periph *periph, int type, int flags) 1273 { 1274 struct scsipi_start_stop cmd; 1275 1276 memset(&cmd, 0, sizeof(cmd)); 1277 cmd.opcode = START_STOP; 1278 cmd.byte2 = 0x00; 1279 cmd.how = type; 1280 1281 return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1282 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags); 1283 } 1284 1285 /* 1286 * scsipi_mode_sense, scsipi_mode_sense_big: 1287 * get a sense page from a device 1288 */ 1289 1290 int 1291 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page, 1292 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries, 1293 int timeout) 1294 { 1295 struct scsi_mode_sense_6 cmd; 1296 1297 memset(&cmd, 0, sizeof(cmd)); 1298 cmd.opcode = SCSI_MODE_SENSE_6; 1299 cmd.byte2 = byte2; 1300 cmd.page = page; 1301 cmd.length = len & 0xff; 1302 1303 return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1304 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN); 1305 } 1306 1307 int 1308 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page, 1309 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries, 1310 int timeout) 1311 { 1312 struct scsi_mode_sense_10 cmd; 1313 1314 memset(&cmd, 0, sizeof(cmd)); 1315 cmd.opcode = SCSI_MODE_SENSE_10; 1316 cmd.byte2 = byte2; 1317 cmd.page = page; 1318 _lto2b(len, cmd.length); 1319 1320 return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1321 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN); 1322 } 1323 1324 int 1325 scsipi_mode_select(struct scsipi_periph *periph, int byte2, 1326 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries, 1327 int timeout) 1328 { 1329 struct scsi_mode_select_6 cmd; 1330 1331 memset(&cmd, 0, sizeof(cmd)); 1332 cmd.opcode = SCSI_MODE_SELECT_6; 1333 cmd.byte2 = byte2; 1334 cmd.length = len & 0xff; 1335 1336 return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1337 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT); 1338 } 1339 1340 int 1341 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2, 1342 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries, 1343 int timeout) 1344 { 1345 struct scsi_mode_select_10 cmd; 1346 1347 memset(&cmd, 0, sizeof(cmd)); 1348 cmd.opcode = SCSI_MODE_SELECT_10; 1349 cmd.byte2 = byte2; 1350 _lto2b(len, cmd.length); 1351 1352 return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1353 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT); 1354 } 1355 1356 /* 1357 * scsipi_done: 1358 * 1359 * This routine is called by an adapter's interrupt handler when 1360 * an xfer is completed. 1361 */ 1362 void 1363 scsipi_done(struct scsipi_xfer *xs) 1364 { 1365 struct scsipi_periph *periph = xs->xs_periph; 1366 struct scsipi_channel *chan = periph->periph_channel; 1367 int freezecnt; 1368 1369 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1370 #ifdef SCSIPI_DEBUG 1371 if (periph->periph_dbflags & SCSIPI_DB1) 1372 show_scsipi_cmd(xs); 1373 #endif 1374 1375 mutex_enter(chan_mtx(chan)); 1376 /* 1377 * The resource this command was using is now free. 1378 */ 1379 if (xs->xs_status & XS_STS_DONE) { 1380 /* XXX in certain circumstances, such as a device 1381 * being detached, a xs that has already been 1382 * scsipi_done()'d by the main thread will be done'd 1383 * again by scsibusdetach(). Putting the xs on the 1384 * chan_complete queue causes list corruption and 1385 * everyone dies. This prevents that, but perhaps 1386 * there should be better coordination somewhere such 1387 * that this won't ever happen (and can be turned into 1388 * a KASSERT(). 1389 */ 1390 mutex_exit(chan_mtx(chan)); 1391 goto out; 1392 } 1393 scsipi_put_resource(chan); 1394 xs->xs_periph->periph_sent--; 1395 1396 /* 1397 * If the command was tagged, free the tag. 1398 */ 1399 if (XS_CTL_TAGTYPE(xs) != 0) 1400 scsipi_put_tag(xs); 1401 else 1402 periph->periph_flags &= ~PERIPH_UNTAG; 1403 1404 /* Mark the command as `done'. */ 1405 xs->xs_status |= XS_STS_DONE; 1406 1407 #ifdef DIAGNOSTIC 1408 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1409 (XS_CTL_ASYNC|XS_CTL_POLL)) 1410 panic("scsipi_done: ASYNC and POLL"); 1411 #endif 1412 1413 /* 1414 * If the xfer had an error of any sort, freeze the 1415 * periph's queue. Freeze it again if we were requested 1416 * to do so in the xfer. 1417 */ 1418 freezecnt = 0; 1419 if (xs->error != XS_NOERROR) 1420 freezecnt++; 1421 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1422 freezecnt++; 1423 if (freezecnt != 0) 1424 scsipi_periph_freeze_locked(periph, freezecnt); 1425 1426 /* 1427 * record the xfer with a pending sense, in case a SCSI reset is 1428 * received before the thread is waked up. 1429 */ 1430 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1431 periph->periph_flags |= PERIPH_SENSE; 1432 periph->periph_xscheck = xs; 1433 } 1434 1435 /* 1436 * If this was an xfer that was not to complete asynchronously, 1437 * let the requesting thread perform error checking/handling 1438 * in its context. 1439 */ 1440 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1441 /* 1442 * If it's a polling job, just return, to unwind the 1443 * call graph. We don't need to restart the queue, 1444 * because pollings jobs are treated specially, and 1445 * are really only used during crash dumps anyway 1446 * (XXX or during boot-time autconfiguration of 1447 * ATAPI devices). 1448 */ 1449 if (xs->xs_control & XS_CTL_POLL) { 1450 mutex_exit(chan_mtx(chan)); 1451 return; 1452 } 1453 cv_broadcast(xs_cv(xs)); 1454 mutex_exit(chan_mtx(chan)); 1455 goto out; 1456 } 1457 1458 /* 1459 * Catch the extremely common case of I/O completing 1460 * without error; no use in taking a context switch 1461 * if we can handle it in interrupt context. 1462 */ 1463 if (xs->error == XS_NOERROR) { 1464 mutex_exit(chan_mtx(chan)); 1465 (void) scsipi_complete(xs); 1466 goto out; 1467 } 1468 1469 /* 1470 * There is an error on this xfer. Put it on the channel's 1471 * completion queue, and wake up the completion thread. 1472 */ 1473 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1474 cv_broadcast(chan_cv_complete(chan)); 1475 mutex_exit(chan_mtx(chan)); 1476 1477 out: 1478 /* 1479 * If there are more xfers on the channel's queue, attempt to 1480 * run them. 1481 */ 1482 scsipi_run_queue(chan); 1483 } 1484 1485 /* 1486 * scsipi_complete: 1487 * 1488 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1489 * 1490 * NOTE: This routine MUST be called with valid thread context 1491 * except for the case where the following two conditions are 1492 * true: 1493 * 1494 * xs->error == XS_NOERROR 1495 * XS_CTL_ASYNC is set in xs->xs_control 1496 * 1497 * The semantics of this routine can be tricky, so here is an 1498 * explanation: 1499 * 1500 * 0 Xfer completed successfully. 1501 * 1502 * ERESTART Xfer had an error, but was restarted. 1503 * 1504 * anything else Xfer had an error, return value is Unix 1505 * errno. 1506 * 1507 * If the return value is anything but ERESTART: 1508 * 1509 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1510 * the pool. 1511 * - If there is a buf associated with the xfer, 1512 * it has been biodone()'d. 1513 */ 1514 static int 1515 scsipi_complete(struct scsipi_xfer *xs) 1516 { 1517 struct scsipi_periph *periph = xs->xs_periph; 1518 struct scsipi_channel *chan = periph->periph_channel; 1519 int error; 1520 1521 #ifdef DIAGNOSTIC 1522 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1523 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1524 #endif 1525 /* 1526 * If command terminated with a CHECK CONDITION, we need to issue a 1527 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1528 * we'll have the real status. 1529 * Must be processed with channel lock held to avoid missing 1530 * a SCSI bus reset for this command. 1531 */ 1532 mutex_enter(chan_mtx(chan)); 1533 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1534 /* request sense for a request sense ? */ 1535 if (xs->xs_control & XS_CTL_REQSENSE) { 1536 scsipi_printaddr(periph); 1537 printf("request sense for a request sense ?\n"); 1538 /* XXX maybe we should reset the device ? */ 1539 /* we've been frozen because xs->error != XS_NOERROR */ 1540 scsipi_periph_thaw_locked(periph, 1); 1541 mutex_exit(chan_mtx(chan)); 1542 if (xs->resid < xs->datalen) { 1543 printf("we read %d bytes of sense anyway:\n", 1544 xs->datalen - xs->resid); 1545 scsipi_print_sense_data((void *)xs->data, 0); 1546 } 1547 return EINVAL; 1548 } 1549 mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run 1550 scsipi_request_sense(xs); 1551 } else 1552 mutex_exit(chan_mtx(chan)); 1553 1554 /* 1555 * If it's a user level request, bypass all usual completion 1556 * processing, let the user work it out.. 1557 */ 1558 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1559 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1560 mutex_enter(chan_mtx(chan)); 1561 if (xs->error != XS_NOERROR) 1562 scsipi_periph_thaw_locked(periph, 1); 1563 mutex_exit(chan_mtx(chan)); 1564 scsipi_user_done(xs); 1565 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1566 return 0; 1567 } 1568 1569 switch (xs->error) { 1570 case XS_NOERROR: 1571 error = 0; 1572 break; 1573 1574 case XS_SENSE: 1575 case XS_SHORTSENSE: 1576 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1577 break; 1578 1579 case XS_RESOURCE_SHORTAGE: 1580 /* 1581 * XXX Should freeze channel's queue. 1582 */ 1583 scsipi_printaddr(periph); 1584 printf("adapter resource shortage\n"); 1585 /* FALLTHROUGH */ 1586 1587 case XS_BUSY: 1588 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1589 struct scsipi_max_openings mo; 1590 1591 /* 1592 * We set the openings to active - 1, assuming that 1593 * the command that got us here is the first one that 1594 * can't fit into the device's queue. If that's not 1595 * the case, I guess we'll find out soon enough. 1596 */ 1597 mo.mo_target = periph->periph_target; 1598 mo.mo_lun = periph->periph_lun; 1599 if (periph->periph_active < periph->periph_openings) 1600 mo.mo_openings = periph->periph_active - 1; 1601 else 1602 mo.mo_openings = periph->periph_openings - 1; 1603 #ifdef DIAGNOSTIC 1604 if (mo.mo_openings < 0) { 1605 scsipi_printaddr(periph); 1606 printf("QUEUE FULL resulted in < 0 openings\n"); 1607 panic("scsipi_done"); 1608 } 1609 #endif 1610 if (mo.mo_openings == 0) { 1611 scsipi_printaddr(periph); 1612 printf("QUEUE FULL resulted in 0 openings\n"); 1613 mo.mo_openings = 1; 1614 } 1615 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1616 error = ERESTART; 1617 } else if (xs->xs_retries != 0) { 1618 xs->xs_retries--; 1619 /* 1620 * Wait one second, and try again. 1621 */ 1622 mutex_enter(chan_mtx(chan)); 1623 if ((xs->xs_control & XS_CTL_POLL) || 1624 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1625 /* XXX: quite extreme */ 1626 kpause("xsbusy", false, hz, chan_mtx(chan)); 1627 } else if (!callout_pending(&periph->periph_callout)) { 1628 scsipi_periph_freeze_locked(periph, 1); 1629 callout_reset(&periph->periph_callout, 1630 hz, scsipi_periph_timed_thaw, periph); 1631 } 1632 mutex_exit(chan_mtx(chan)); 1633 error = ERESTART; 1634 } else 1635 error = EBUSY; 1636 break; 1637 1638 case XS_REQUEUE: 1639 error = ERESTART; 1640 break; 1641 1642 case XS_SELTIMEOUT: 1643 case XS_TIMEOUT: 1644 /* 1645 * If the device hasn't gone away, honor retry counts. 1646 * 1647 * Note that if we're in the middle of probing it, 1648 * it won't be found because it isn't here yet so 1649 * we won't honor the retry count in that case. 1650 */ 1651 if (scsipi_lookup_periph(chan, periph->periph_target, 1652 periph->periph_lun) && xs->xs_retries != 0) { 1653 xs->xs_retries--; 1654 error = ERESTART; 1655 } else 1656 error = EIO; 1657 break; 1658 1659 case XS_RESET: 1660 if (xs->xs_control & XS_CTL_REQSENSE) { 1661 /* 1662 * request sense interrupted by reset: signal it 1663 * with EINTR return code. 1664 */ 1665 error = EINTR; 1666 } else { 1667 if (xs->xs_retries != 0) { 1668 xs->xs_retries--; 1669 error = ERESTART; 1670 } else 1671 error = EIO; 1672 } 1673 break; 1674 1675 case XS_DRIVER_STUFFUP: 1676 scsipi_printaddr(periph); 1677 printf("generic HBA error\n"); 1678 error = EIO; 1679 break; 1680 default: 1681 scsipi_printaddr(periph); 1682 printf("invalid return code from adapter: %d\n", xs->error); 1683 error = EIO; 1684 break; 1685 } 1686 1687 mutex_enter(chan_mtx(chan)); 1688 if (error == ERESTART) { 1689 /* 1690 * If we get here, the periph has been thawed and frozen 1691 * again if we had to issue recovery commands. Alternatively, 1692 * it may have been frozen again and in a timed thaw. In 1693 * any case, we thaw the periph once we re-enqueue the 1694 * command. Once the periph is fully thawed, it will begin 1695 * operation again. 1696 */ 1697 xs->error = XS_NOERROR; 1698 xs->status = SCSI_OK; 1699 xs->xs_status &= ~XS_STS_DONE; 1700 xs->xs_requeuecnt++; 1701 error = scsipi_enqueue(xs); 1702 if (error == 0) { 1703 scsipi_periph_thaw_locked(periph, 1); 1704 mutex_exit(chan_mtx(chan)); 1705 return ERESTART; 1706 } 1707 } 1708 1709 /* 1710 * scsipi_done() freezes the queue if not XS_NOERROR. 1711 * Thaw it here. 1712 */ 1713 if (xs->error != XS_NOERROR) 1714 scsipi_periph_thaw_locked(periph, 1); 1715 mutex_exit(chan_mtx(chan)); 1716 1717 if (periph->periph_switch->psw_done) 1718 periph->periph_switch->psw_done(xs, error); 1719 1720 mutex_enter(chan_mtx(chan)); 1721 if (xs->xs_control & XS_CTL_ASYNC) 1722 scsipi_put_xs(xs); 1723 mutex_exit(chan_mtx(chan)); 1724 1725 return error; 1726 } 1727 1728 /* 1729 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1730 * returns with a CHECK_CONDITION status. Must be called in valid thread 1731 * context. 1732 */ 1733 1734 static void 1735 scsipi_request_sense(struct scsipi_xfer *xs) 1736 { 1737 struct scsipi_periph *periph = xs->xs_periph; 1738 int flags, error; 1739 struct scsi_request_sense cmd; 1740 1741 periph->periph_flags |= PERIPH_SENSE; 1742 1743 /* if command was polling, request sense will too */ 1744 flags = xs->xs_control & XS_CTL_POLL; 1745 /* Polling commands can't sleep */ 1746 if (flags) 1747 flags |= XS_CTL_NOSLEEP; 1748 1749 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1750 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1751 1752 memset(&cmd, 0, sizeof(cmd)); 1753 cmd.opcode = SCSI_REQUEST_SENSE; 1754 cmd.length = sizeof(struct scsi_sense_data); 1755 1756 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1757 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data), 1758 0, 1000, NULL, flags); 1759 periph->periph_flags &= ~PERIPH_SENSE; 1760 periph->periph_xscheck = NULL; 1761 switch (error) { 1762 case 0: 1763 /* we have a valid sense */ 1764 xs->error = XS_SENSE; 1765 return; 1766 case EINTR: 1767 /* REQUEST_SENSE interrupted by bus reset. */ 1768 xs->error = XS_RESET; 1769 return; 1770 case EIO: 1771 /* request sense coudn't be performed */ 1772 /* 1773 * XXX this isn't quite right but we don't have anything 1774 * better for now 1775 */ 1776 xs->error = XS_DRIVER_STUFFUP; 1777 return; 1778 default: 1779 /* Notify that request sense failed. */ 1780 xs->error = XS_DRIVER_STUFFUP; 1781 scsipi_printaddr(periph); 1782 printf("request sense failed with error %d\n", error); 1783 return; 1784 } 1785 } 1786 1787 /* 1788 * scsipi_enqueue: 1789 * 1790 * Enqueue an xfer on a channel. 1791 */ 1792 static int 1793 scsipi_enqueue(struct scsipi_xfer *xs) 1794 { 1795 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1796 struct scsipi_xfer *qxs; 1797 1798 /* 1799 * If the xfer is to be polled, and there are already jobs on 1800 * the queue, we can't proceed. 1801 */ 1802 KASSERT(mutex_owned(chan_mtx(chan))); 1803 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1804 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1805 xs->error = XS_DRIVER_STUFFUP; 1806 return EAGAIN; 1807 } 1808 1809 /* 1810 * If we have an URGENT xfer, it's an error recovery command 1811 * and it should just go on the head of the channel's queue. 1812 */ 1813 if (xs->xs_control & XS_CTL_URGENT) { 1814 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1815 goto out; 1816 } 1817 1818 /* 1819 * If this xfer has already been on the queue before, we 1820 * need to reinsert it in the correct order. That order is: 1821 * 1822 * Immediately before the first xfer for this periph 1823 * with a requeuecnt less than xs->xs_requeuecnt. 1824 * 1825 * Failing that, at the end of the queue. (We'll end up 1826 * there naturally.) 1827 */ 1828 if (xs->xs_requeuecnt != 0) { 1829 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1830 qxs = TAILQ_NEXT(qxs, channel_q)) { 1831 if (qxs->xs_periph == xs->xs_periph && 1832 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1833 break; 1834 } 1835 if (qxs != NULL) { 1836 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1837 channel_q); 1838 goto out; 1839 } 1840 } 1841 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1842 out: 1843 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1844 scsipi_periph_thaw_locked(xs->xs_periph, 1); 1845 return 0; 1846 } 1847 1848 /* 1849 * scsipi_run_queue: 1850 * 1851 * Start as many xfers as possible running on the channel. 1852 */ 1853 static void 1854 scsipi_run_queue(struct scsipi_channel *chan) 1855 { 1856 struct scsipi_xfer *xs; 1857 struct scsipi_periph *periph; 1858 1859 for (;;) { 1860 mutex_enter(chan_mtx(chan)); 1861 1862 /* 1863 * If the channel is frozen, we can't do any work right 1864 * now. 1865 */ 1866 if (chan->chan_qfreeze != 0) { 1867 mutex_exit(chan_mtx(chan)); 1868 return; 1869 } 1870 1871 /* 1872 * Look for work to do, and make sure we can do it. 1873 */ 1874 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1875 xs = TAILQ_NEXT(xs, channel_q)) { 1876 periph = xs->xs_periph; 1877 1878 if ((periph->periph_sent >= periph->periph_openings) || 1879 periph->periph_qfreeze != 0 || 1880 (periph->periph_flags & PERIPH_UNTAG) != 0) 1881 continue; 1882 1883 if ((periph->periph_flags & 1884 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1885 (xs->xs_control & XS_CTL_URGENT) == 0) 1886 continue; 1887 1888 /* 1889 * We can issue this xfer! 1890 */ 1891 goto got_one; 1892 } 1893 1894 /* 1895 * Can't find any work to do right now. 1896 */ 1897 mutex_exit(chan_mtx(chan)); 1898 return; 1899 1900 got_one: 1901 /* 1902 * Have an xfer to run. Allocate a resource from 1903 * the adapter to run it. If we can't allocate that 1904 * resource, we don't dequeue the xfer. 1905 */ 1906 if (scsipi_get_resource(chan) == 0) { 1907 /* 1908 * Adapter is out of resources. If the adapter 1909 * supports it, attempt to grow them. 1910 */ 1911 if (scsipi_grow_resources(chan) == 0) { 1912 /* 1913 * Wasn't able to grow resources, 1914 * nothing more we can do. 1915 */ 1916 if (xs->xs_control & XS_CTL_POLL) { 1917 scsipi_printaddr(xs->xs_periph); 1918 printf("polling command but no " 1919 "adapter resources"); 1920 /* We'll panic shortly... */ 1921 } 1922 mutex_exit(chan_mtx(chan)); 1923 1924 /* 1925 * XXX: We should be able to note that 1926 * XXX: that resources are needed here! 1927 */ 1928 return; 1929 } 1930 /* 1931 * scsipi_grow_resources() allocated the resource 1932 * for us. 1933 */ 1934 } 1935 1936 /* 1937 * We have a resource to run this xfer, do it! 1938 */ 1939 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1940 1941 /* 1942 * If the command is to be tagged, allocate a tag ID 1943 * for it. 1944 */ 1945 if (XS_CTL_TAGTYPE(xs) != 0) 1946 scsipi_get_tag(xs); 1947 else 1948 periph->periph_flags |= PERIPH_UNTAG; 1949 periph->periph_sent++; 1950 mutex_exit(chan_mtx(chan)); 1951 1952 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1953 } 1954 #ifdef DIAGNOSTIC 1955 panic("scsipi_run_queue: impossible"); 1956 #endif 1957 } 1958 1959 /* 1960 * scsipi_execute_xs: 1961 * 1962 * Begin execution of an xfer, waiting for it to complete, if necessary. 1963 */ 1964 int 1965 scsipi_execute_xs(struct scsipi_xfer *xs) 1966 { 1967 struct scsipi_periph *periph = xs->xs_periph; 1968 struct scsipi_channel *chan = periph->periph_channel; 1969 int oasync, async, poll, error; 1970 1971 KASSERT(!cold); 1972 1973 (chan->chan_bustype->bustype_cmd)(xs); 1974 1975 xs->xs_status &= ~XS_STS_DONE; 1976 xs->error = XS_NOERROR; 1977 xs->resid = xs->datalen; 1978 xs->status = SCSI_OK; 1979 1980 #ifdef SCSIPI_DEBUG 1981 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1982 printf("scsipi_execute_xs: "); 1983 show_scsipi_xs(xs); 1984 printf("\n"); 1985 } 1986 #endif 1987 1988 /* 1989 * Deal with command tagging: 1990 * 1991 * - If the device's current operating mode doesn't 1992 * include tagged queueing, clear the tag mask. 1993 * 1994 * - If the device's current operating mode *does* 1995 * include tagged queueing, set the tag_type in 1996 * the xfer to the appropriate byte for the tag 1997 * message. 1998 */ 1999 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 2000 (xs->xs_control & XS_CTL_REQSENSE)) { 2001 xs->xs_control &= ~XS_CTL_TAGMASK; 2002 xs->xs_tag_type = 0; 2003 } else { 2004 /* 2005 * If the request doesn't specify a tag, give Head 2006 * tags to URGENT operations and Simple tags to 2007 * everything else. 2008 */ 2009 if (XS_CTL_TAGTYPE(xs) == 0) { 2010 if (xs->xs_control & XS_CTL_URGENT) 2011 xs->xs_control |= XS_CTL_HEAD_TAG; 2012 else 2013 xs->xs_control |= XS_CTL_SIMPLE_TAG; 2014 } 2015 2016 switch (XS_CTL_TAGTYPE(xs)) { 2017 case XS_CTL_ORDERED_TAG: 2018 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 2019 break; 2020 2021 case XS_CTL_SIMPLE_TAG: 2022 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 2023 break; 2024 2025 case XS_CTL_HEAD_TAG: 2026 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 2027 break; 2028 2029 default: 2030 scsipi_printaddr(periph); 2031 printf("invalid tag mask 0x%08x\n", 2032 XS_CTL_TAGTYPE(xs)); 2033 panic("scsipi_execute_xs"); 2034 } 2035 } 2036 2037 /* If the adaptor wants us to poll, poll. */ 2038 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 2039 xs->xs_control |= XS_CTL_POLL; 2040 2041 /* 2042 * If we don't yet have a completion thread, or we are to poll for 2043 * completion, clear the ASYNC flag. 2044 */ 2045 oasync = (xs->xs_control & XS_CTL_ASYNC); 2046 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 2047 xs->xs_control &= ~XS_CTL_ASYNC; 2048 2049 async = (xs->xs_control & XS_CTL_ASYNC); 2050 poll = (xs->xs_control & XS_CTL_POLL); 2051 2052 #ifdef DIAGNOSTIC 2053 if (oasync != 0 && xs->bp == NULL) 2054 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 2055 #endif 2056 2057 /* 2058 * Enqueue the transfer. If we're not polling for completion, this 2059 * should ALWAYS return `no error'. 2060 */ 2061 error = scsipi_enqueue(xs); 2062 if (error) { 2063 if (poll == 0) { 2064 scsipi_printaddr(periph); 2065 printf("not polling, but enqueue failed with %d\n", 2066 error); 2067 panic("scsipi_execute_xs"); 2068 } 2069 2070 scsipi_printaddr(periph); 2071 printf("should have flushed queue?\n"); 2072 goto free_xs; 2073 } 2074 2075 mutex_exit(chan_mtx(chan)); 2076 restarted: 2077 scsipi_run_queue(chan); 2078 mutex_enter(chan_mtx(chan)); 2079 2080 /* 2081 * The xfer is enqueued, and possibly running. If it's to be 2082 * completed asynchronously, just return now. 2083 */ 2084 if (async) 2085 return 0; 2086 2087 /* 2088 * Not an asynchronous command; wait for it to complete. 2089 */ 2090 while ((xs->xs_status & XS_STS_DONE) == 0) { 2091 if (poll) { 2092 scsipi_printaddr(periph); 2093 printf("polling command not done\n"); 2094 panic("scsipi_execute_xs"); 2095 } 2096 cv_wait(xs_cv(xs), chan_mtx(chan)); 2097 } 2098 2099 /* 2100 * Command is complete. scsipi_done() has awakened us to perform 2101 * the error handling. 2102 */ 2103 mutex_exit(chan_mtx(chan)); 2104 error = scsipi_complete(xs); 2105 if (error == ERESTART) 2106 goto restarted; 2107 2108 /* 2109 * If it was meant to run async and we cleared aync ourselve, 2110 * don't return an error here. It has already been handled 2111 */ 2112 if (oasync) 2113 error = 0; 2114 /* 2115 * Command completed successfully or fatal error occurred. Fall 2116 * into.... 2117 */ 2118 mutex_enter(chan_mtx(chan)); 2119 free_xs: 2120 scsipi_put_xs(xs); 2121 mutex_exit(chan_mtx(chan)); 2122 2123 /* 2124 * Kick the queue, keep it running in case it stopped for some 2125 * reason. 2126 */ 2127 scsipi_run_queue(chan); 2128 2129 mutex_enter(chan_mtx(chan)); 2130 return error; 2131 } 2132 2133 /* 2134 * scsipi_completion_thread: 2135 * 2136 * This is the completion thread. We wait for errors on 2137 * asynchronous xfers, and perform the error handling 2138 * function, restarting the command, if necessary. 2139 */ 2140 static void 2141 scsipi_completion_thread(void *arg) 2142 { 2143 struct scsipi_channel *chan = arg; 2144 struct scsipi_xfer *xs; 2145 2146 if (chan->chan_init_cb) 2147 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg); 2148 2149 mutex_enter(chan_mtx(chan)); 2150 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 2151 for (;;) { 2152 xs = TAILQ_FIRST(&chan->chan_complete); 2153 if (xs == NULL && chan->chan_tflags == 0) { 2154 /* nothing to do; wait */ 2155 cv_wait(chan_cv_complete(chan), chan_mtx(chan)); 2156 continue; 2157 } 2158 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2159 /* call chan_callback from thread context */ 2160 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 2161 chan->chan_callback(chan, chan->chan_callback_arg); 2162 continue; 2163 } 2164 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 2165 /* attempt to get more openings for this channel */ 2166 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2167 mutex_exit(chan_mtx(chan)); 2168 scsipi_adapter_request(chan, 2169 ADAPTER_REQ_GROW_RESOURCES, NULL); 2170 scsipi_channel_thaw(chan, 1); 2171 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) 2172 kpause("scsizzz", FALSE, hz/10, NULL); 2173 mutex_enter(chan_mtx(chan)); 2174 continue; 2175 } 2176 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2177 /* explicitly run the queues for this channel */ 2178 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2179 mutex_exit(chan_mtx(chan)); 2180 scsipi_run_queue(chan); 2181 mutex_enter(chan_mtx(chan)); 2182 continue; 2183 } 2184 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2185 break; 2186 } 2187 if (xs) { 2188 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2189 mutex_exit(chan_mtx(chan)); 2190 2191 /* 2192 * Have an xfer with an error; process it. 2193 */ 2194 (void) scsipi_complete(xs); 2195 2196 /* 2197 * Kick the queue; keep it running if it was stopped 2198 * for some reason. 2199 */ 2200 scsipi_run_queue(chan); 2201 mutex_enter(chan_mtx(chan)); 2202 } 2203 } 2204 2205 chan->chan_thread = NULL; 2206 2207 /* In case parent is waiting for us to exit. */ 2208 cv_broadcast(chan_cv_thread(chan)); 2209 mutex_exit(chan_mtx(chan)); 2210 2211 kthread_exit(0); 2212 } 2213 /* 2214 * scsipi_thread_call_callback: 2215 * 2216 * request to call a callback from the completion thread 2217 */ 2218 int 2219 scsipi_thread_call_callback(struct scsipi_channel *chan, 2220 void (*callback)(struct scsipi_channel *, void *), void *arg) 2221 { 2222 2223 mutex_enter(chan_mtx(chan)); 2224 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2225 /* kernel thread doesn't exist yet */ 2226 mutex_exit(chan_mtx(chan)); 2227 return ESRCH; 2228 } 2229 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2230 mutex_exit(chan_mtx(chan)); 2231 return EBUSY; 2232 } 2233 scsipi_channel_freeze(chan, 1); 2234 chan->chan_callback = callback; 2235 chan->chan_callback_arg = arg; 2236 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2237 cv_broadcast(chan_cv_complete(chan)); 2238 mutex_exit(chan_mtx(chan)); 2239 return 0; 2240 } 2241 2242 /* 2243 * scsipi_async_event: 2244 * 2245 * Handle an asynchronous event from an adapter. 2246 */ 2247 void 2248 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event, 2249 void *arg) 2250 { 2251 bool lock = chan_running(chan) > 0; 2252 2253 if (lock) 2254 mutex_enter(chan_mtx(chan)); 2255 switch (event) { 2256 case ASYNC_EVENT_MAX_OPENINGS: 2257 scsipi_async_event_max_openings(chan, 2258 (struct scsipi_max_openings *)arg); 2259 break; 2260 2261 case ASYNC_EVENT_XFER_MODE: 2262 if (chan->chan_bustype->bustype_async_event_xfer_mode) { 2263 chan->chan_bustype->bustype_async_event_xfer_mode( 2264 chan, arg); 2265 } 2266 break; 2267 case ASYNC_EVENT_RESET: 2268 scsipi_async_event_channel_reset(chan); 2269 break; 2270 } 2271 if (lock) 2272 mutex_exit(chan_mtx(chan)); 2273 } 2274 2275 /* 2276 * scsipi_async_event_max_openings: 2277 * 2278 * Update the maximum number of outstanding commands a 2279 * device may have. 2280 */ 2281 static void 2282 scsipi_async_event_max_openings(struct scsipi_channel *chan, 2283 struct scsipi_max_openings *mo) 2284 { 2285 struct scsipi_periph *periph; 2286 int minlun, maxlun; 2287 2288 if (mo->mo_lun == -1) { 2289 /* 2290 * Wildcarded; apply it to all LUNs. 2291 */ 2292 minlun = 0; 2293 maxlun = chan->chan_nluns - 1; 2294 } else 2295 minlun = maxlun = mo->mo_lun; 2296 2297 /* XXX This could really suck with a large LUN space. */ 2298 for (; minlun <= maxlun; minlun++) { 2299 periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun); 2300 if (periph == NULL) 2301 continue; 2302 2303 if (mo->mo_openings < periph->periph_openings) 2304 periph->periph_openings = mo->mo_openings; 2305 else if (mo->mo_openings > periph->periph_openings && 2306 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2307 periph->periph_openings = mo->mo_openings; 2308 } 2309 } 2310 2311 /* 2312 * scsipi_set_xfer_mode: 2313 * 2314 * Set the xfer mode for the specified I_T Nexus. 2315 */ 2316 void 2317 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed) 2318 { 2319 struct scsipi_xfer_mode xm; 2320 struct scsipi_periph *itperiph; 2321 int lun; 2322 2323 /* 2324 * Go to the minimal xfer mode. 2325 */ 2326 xm.xm_target = target; 2327 xm.xm_mode = 0; 2328 xm.xm_period = 0; /* ignored */ 2329 xm.xm_offset = 0; /* ignored */ 2330 2331 /* 2332 * Find the first LUN we know about on this I_T Nexus. 2333 */ 2334 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) { 2335 itperiph = scsipi_lookup_periph(chan, target, lun); 2336 if (itperiph != NULL) 2337 break; 2338 } 2339 if (itperiph != NULL) { 2340 xm.xm_mode = itperiph->periph_cap; 2341 /* 2342 * Now issue the request to the adapter. 2343 */ 2344 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2345 /* 2346 * If we want this to happen immediately, issue a dummy 2347 * command, since most adapters can't really negotiate unless 2348 * they're executing a job. 2349 */ 2350 if (immed != 0) { 2351 (void) scsipi_test_unit_ready(itperiph, 2352 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2353 XS_CTL_IGNORE_NOT_READY | 2354 XS_CTL_IGNORE_MEDIA_CHANGE); 2355 } 2356 } 2357 } 2358 2359 /* 2360 * scsipi_channel_reset: 2361 * 2362 * handle scsi bus reset 2363 * called with channel lock held 2364 */ 2365 static void 2366 scsipi_async_event_channel_reset(struct scsipi_channel *chan) 2367 { 2368 struct scsipi_xfer *xs, *xs_next; 2369 struct scsipi_periph *periph; 2370 int target, lun; 2371 2372 /* 2373 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2374 * commands; as the sense is not available any more. 2375 * can't call scsipi_done() from here, as the command has not been 2376 * sent to the adapter yet (this would corrupt accounting). 2377 */ 2378 2379 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2380 xs_next = TAILQ_NEXT(xs, channel_q); 2381 if (xs->xs_control & XS_CTL_REQSENSE) { 2382 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2383 xs->error = XS_RESET; 2384 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2385 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2386 channel_q); 2387 } 2388 } 2389 cv_broadcast(chan_cv_complete(chan)); 2390 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2391 for (target = 0; target < chan->chan_ntargets; target++) { 2392 if (target == chan->chan_id) 2393 continue; 2394 for (lun = 0; lun < chan->chan_nluns; lun++) { 2395 periph = scsipi_lookup_periph_locked(chan, target, lun); 2396 if (periph) { 2397 xs = periph->periph_xscheck; 2398 if (xs) 2399 xs->error = XS_RESET; 2400 } 2401 } 2402 } 2403 } 2404 2405 /* 2406 * scsipi_target_detach: 2407 * 2408 * detach all periph associated with a I_T 2409 * must be called from valid thread context 2410 */ 2411 int 2412 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun, 2413 int flags) 2414 { 2415 struct scsipi_periph *periph; 2416 device_t tdev; 2417 int ctarget, mintarget, maxtarget; 2418 int clun, minlun, maxlun; 2419 int error = 0; 2420 2421 if (target == -1) { 2422 mintarget = 0; 2423 maxtarget = chan->chan_ntargets; 2424 } else { 2425 if (target == chan->chan_id) 2426 return EINVAL; 2427 if (target < 0 || target >= chan->chan_ntargets) 2428 return EINVAL; 2429 mintarget = target; 2430 maxtarget = target + 1; 2431 } 2432 2433 if (lun == -1) { 2434 minlun = 0; 2435 maxlun = chan->chan_nluns; 2436 } else { 2437 if (lun < 0 || lun >= chan->chan_nluns) 2438 return EINVAL; 2439 minlun = lun; 2440 maxlun = lun + 1; 2441 } 2442 2443 /* for config_detach */ 2444 KERNEL_LOCK(1, curlwp); 2445 2446 mutex_enter(chan_mtx(chan)); 2447 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2448 if (ctarget == chan->chan_id) 2449 continue; 2450 2451 for (clun = minlun; clun < maxlun; clun++) { 2452 periph = scsipi_lookup_periph_locked(chan, ctarget, clun); 2453 if (periph == NULL) 2454 continue; 2455 tdev = periph->periph_dev; 2456 mutex_exit(chan_mtx(chan)); 2457 error = config_detach(tdev, flags); 2458 if (error) 2459 goto out; 2460 mutex_enter(chan_mtx(chan)); 2461 KASSERT(scsipi_lookup_periph_locked(chan, ctarget, clun) == NULL); 2462 } 2463 } 2464 mutex_exit(chan_mtx(chan)); 2465 2466 out: 2467 KERNEL_UNLOCK_ONE(curlwp); 2468 2469 return error; 2470 } 2471 2472 /* 2473 * scsipi_adapter_addref: 2474 * 2475 * Add a reference to the adapter pointed to by the provided 2476 * link, enabling the adapter if necessary. 2477 */ 2478 int 2479 scsipi_adapter_addref(struct scsipi_adapter *adapt) 2480 { 2481 int error = 0; 2482 2483 if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1 2484 && adapt->adapt_enable != NULL) { 2485 scsipi_adapter_lock(adapt); 2486 error = scsipi_adapter_enable(adapt, 1); 2487 scsipi_adapter_unlock(adapt); 2488 if (error) 2489 atomic_dec_uint(&adapt->adapt_refcnt); 2490 } 2491 return error; 2492 } 2493 2494 /* 2495 * scsipi_adapter_delref: 2496 * 2497 * Delete a reference to the adapter pointed to by the provided 2498 * link, disabling the adapter if possible. 2499 */ 2500 void 2501 scsipi_adapter_delref(struct scsipi_adapter *adapt) 2502 { 2503 2504 if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0 2505 && adapt->adapt_enable != NULL) { 2506 scsipi_adapter_lock(adapt); 2507 (void) scsipi_adapter_enable(adapt, 0); 2508 scsipi_adapter_unlock(adapt); 2509 } 2510 } 2511 2512 static struct scsipi_syncparam { 2513 int ss_factor; 2514 int ss_period; /* ns * 100 */ 2515 } scsipi_syncparams[] = { 2516 { 0x08, 625 }, /* FAST-160 (Ultra320) */ 2517 { 0x09, 1250 }, /* FAST-80 (Ultra160) */ 2518 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */ 2519 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */ 2520 { 0x0c, 5000 }, /* FAST-20 (Ultra) */ 2521 }; 2522 static const int scsipi_nsyncparams = 2523 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2524 2525 int 2526 scsipi_sync_period_to_factor(int period /* ns * 100 */) 2527 { 2528 int i; 2529 2530 for (i = 0; i < scsipi_nsyncparams; i++) { 2531 if (period <= scsipi_syncparams[i].ss_period) 2532 return scsipi_syncparams[i].ss_factor; 2533 } 2534 2535 return (period / 100) / 4; 2536 } 2537 2538 int 2539 scsipi_sync_factor_to_period(int factor) 2540 { 2541 int i; 2542 2543 for (i = 0; i < scsipi_nsyncparams; i++) { 2544 if (factor == scsipi_syncparams[i].ss_factor) 2545 return scsipi_syncparams[i].ss_period; 2546 } 2547 2548 return (factor * 4) * 100; 2549 } 2550 2551 int 2552 scsipi_sync_factor_to_freq(int factor) 2553 { 2554 int i; 2555 2556 for (i = 0; i < scsipi_nsyncparams; i++) { 2557 if (factor == scsipi_syncparams[i].ss_factor) 2558 return 100000000 / scsipi_syncparams[i].ss_period; 2559 } 2560 2561 return 10000000 / ((factor * 4) * 10); 2562 } 2563 2564 static inline void 2565 scsipi_adapter_lock(struct scsipi_adapter *adapt) 2566 { 2567 2568 if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0) 2569 KERNEL_LOCK(1, NULL); 2570 } 2571 2572 static inline void 2573 scsipi_adapter_unlock(struct scsipi_adapter *adapt) 2574 { 2575 2576 if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0) 2577 KERNEL_UNLOCK_ONE(NULL); 2578 } 2579 2580 void 2581 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp) 2582 { 2583 struct scsipi_adapter *adapt = chan->chan_adapter; 2584 2585 scsipi_adapter_lock(adapt); 2586 (adapt->adapt_minphys)(bp); 2587 scsipi_adapter_unlock(chan->chan_adapter); 2588 } 2589 2590 void 2591 scsipi_adapter_request(struct scsipi_channel *chan, 2592 scsipi_adapter_req_t req, void *arg) 2593 2594 { 2595 struct scsipi_adapter *adapt = chan->chan_adapter; 2596 2597 scsipi_adapter_lock(adapt); 2598 (adapt->adapt_request)(chan, req, arg); 2599 scsipi_adapter_unlock(adapt); 2600 } 2601 2602 int 2603 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd, 2604 void *data, int flag, struct proc *p) 2605 { 2606 struct scsipi_adapter *adapt = chan->chan_adapter; 2607 int error; 2608 2609 if (adapt->adapt_ioctl == NULL) 2610 return ENOTTY; 2611 2612 scsipi_adapter_lock(adapt); 2613 error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p); 2614 scsipi_adapter_unlock(adapt); 2615 return error; 2616 } 2617 2618 int 2619 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable) 2620 { 2621 int error; 2622 2623 scsipi_adapter_lock(adapt); 2624 error = (adapt->adapt_enable)(adapt->adapt_dev, enable); 2625 scsipi_adapter_unlock(adapt); 2626 return error; 2627 } 2628 2629 #ifdef SCSIPI_DEBUG 2630 /* 2631 * Given a scsipi_xfer, dump the request, in all its glory 2632 */ 2633 void 2634 show_scsipi_xs(struct scsipi_xfer *xs) 2635 { 2636 2637 printf("xs(%p): ", xs); 2638 printf("xs_control(0x%08x)", xs->xs_control); 2639 printf("xs_status(0x%08x)", xs->xs_status); 2640 printf("periph(%p)", xs->xs_periph); 2641 printf("retr(0x%x)", xs->xs_retries); 2642 printf("timo(0x%x)", xs->timeout); 2643 printf("cmd(%p)", xs->cmd); 2644 printf("len(0x%x)", xs->cmdlen); 2645 printf("data(%p)", xs->data); 2646 printf("len(0x%x)", xs->datalen); 2647 printf("res(0x%x)", xs->resid); 2648 printf("err(0x%x)", xs->error); 2649 printf("bp(%p)", xs->bp); 2650 show_scsipi_cmd(xs); 2651 } 2652 2653 void 2654 show_scsipi_cmd(struct scsipi_xfer *xs) 2655 { 2656 u_char *b = (u_char *) xs->cmd; 2657 int i = 0; 2658 2659 scsipi_printaddr(xs->xs_periph); 2660 printf(" command: "); 2661 2662 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2663 while (i < xs->cmdlen) { 2664 if (i) 2665 printf(","); 2666 printf("0x%x", b[i++]); 2667 } 2668 printf("-[%d bytes]\n", xs->datalen); 2669 if (xs->datalen) 2670 show_mem(xs->data, uimin(64, xs->datalen)); 2671 } else 2672 printf("-RESET-\n"); 2673 } 2674 2675 void 2676 show_mem(u_char *address, int num) 2677 { 2678 int x; 2679 2680 printf("------------------------------"); 2681 for (x = 0; x < num; x++) { 2682 if ((x % 16) == 0) 2683 printf("\n%03d: ", x); 2684 printf("%02x ", *address++); 2685 } 2686 printf("\n------------------------------\n"); 2687 } 2688 #endif /* SCSIPI_DEBUG */ 2689