1 /* $NetBSD: scsipi_base.c,v 1.165 2015/08/24 23:13:15 pooka Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.165 2015/08/24 23:13:15 pooka Exp $"); 35 36 #ifdef _KERNEL_OPT 37 #include "opt_scsi.h" 38 #endif 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/buf.h> 44 #include <sys/uio.h> 45 #include <sys/malloc.h> 46 #include <sys/pool.h> 47 #include <sys/errno.h> 48 #include <sys/device.h> 49 #include <sys/proc.h> 50 #include <sys/kthread.h> 51 #include <sys/hash.h> 52 53 #include <dev/scsipi/scsi_spc.h> 54 #include <dev/scsipi/scsipi_all.h> 55 #include <dev/scsipi/scsipi_disk.h> 56 #include <dev/scsipi/scsipiconf.h> 57 #include <dev/scsipi/scsipi_base.h> 58 59 #include <dev/scsipi/scsi_all.h> 60 #include <dev/scsipi/scsi_message.h> 61 62 #include <machine/param.h> 63 64 static int scsipi_complete(struct scsipi_xfer *); 65 static void scsipi_request_sense(struct scsipi_xfer *); 66 static int scsipi_enqueue(struct scsipi_xfer *); 67 static void scsipi_run_queue(struct scsipi_channel *chan); 68 69 static void scsipi_completion_thread(void *); 70 71 static void scsipi_get_tag(struct scsipi_xfer *); 72 static void scsipi_put_tag(struct scsipi_xfer *); 73 74 static int scsipi_get_resource(struct scsipi_channel *); 75 static void scsipi_put_resource(struct scsipi_channel *); 76 77 static void scsipi_async_event_max_openings(struct scsipi_channel *, 78 struct scsipi_max_openings *); 79 static void scsipi_async_event_channel_reset(struct scsipi_channel *); 80 81 static struct pool scsipi_xfer_pool; 82 83 /* 84 * scsipi_init: 85 * 86 * Called when a scsibus or atapibus is attached to the system 87 * to initialize shared data structures. 88 */ 89 void 90 scsipi_init(void) 91 { 92 static int scsipi_init_done; 93 94 if (scsipi_init_done) 95 return; 96 scsipi_init_done = 1; 97 98 /* Initialize the scsipi_xfer pool. */ 99 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 100 0, 0, "scxspl", NULL, IPL_BIO); 101 if (pool_prime(&scsipi_xfer_pool, 102 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) { 103 printf("WARNING: not enough memory for scsipi_xfer_pool\n"); 104 } 105 } 106 107 /* 108 * scsipi_channel_init: 109 * 110 * Initialize a scsipi_channel when it is attached. 111 */ 112 int 113 scsipi_channel_init(struct scsipi_channel *chan) 114 { 115 struct scsipi_adapter *adapt = chan->chan_adapter; 116 int i; 117 118 /* Initialize shared data. */ 119 scsipi_init(); 120 121 /* Initialize the queues. */ 122 TAILQ_INIT(&chan->chan_queue); 123 TAILQ_INIT(&chan->chan_complete); 124 125 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++) 126 LIST_INIT(&chan->chan_periphtab[i]); 127 128 /* 129 * Create the asynchronous completion thread. 130 */ 131 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan, 132 &chan->chan_thread, "%s", chan->chan_name)) { 133 aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for " 134 "channel %d\n", chan->chan_channel); 135 panic("scsipi_channel_init"); 136 } 137 138 return (0); 139 } 140 141 /* 142 * scsipi_channel_shutdown: 143 * 144 * Shutdown a scsipi_channel. 145 */ 146 void 147 scsipi_channel_shutdown(struct scsipi_channel *chan) 148 { 149 150 /* 151 * Shut down the completion thread. 152 */ 153 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 154 wakeup(&chan->chan_complete); 155 156 /* 157 * Now wait for the thread to exit. 158 */ 159 while (chan->chan_thread != NULL) 160 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0); 161 } 162 163 static uint32_t 164 scsipi_chan_periph_hash(uint64_t t, uint64_t l) 165 { 166 uint32_t hash; 167 168 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT); 169 hash = hash32_buf(&l, sizeof(l), hash); 170 171 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK); 172 } 173 174 /* 175 * scsipi_insert_periph: 176 * 177 * Insert a periph into the channel. 178 */ 179 void 180 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph) 181 { 182 uint32_t hash; 183 int s; 184 185 hash = scsipi_chan_periph_hash(periph->periph_target, 186 periph->periph_lun); 187 188 s = splbio(); 189 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash); 190 splx(s); 191 } 192 193 /* 194 * scsipi_remove_periph: 195 * 196 * Remove a periph from the channel. 197 */ 198 void 199 scsipi_remove_periph(struct scsipi_channel *chan, 200 struct scsipi_periph *periph) 201 { 202 int s; 203 204 s = splbio(); 205 LIST_REMOVE(periph, periph_hash); 206 splx(s); 207 } 208 209 /* 210 * scsipi_lookup_periph: 211 * 212 * Lookup a periph on the specified channel. 213 */ 214 struct scsipi_periph * 215 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun) 216 { 217 struct scsipi_periph *periph; 218 uint32_t hash; 219 int s; 220 221 KASSERT(cold || KERNEL_LOCKED_P()); 222 223 if (target >= chan->chan_ntargets || 224 lun >= chan->chan_nluns) 225 return (NULL); 226 227 hash = scsipi_chan_periph_hash(target, lun); 228 229 s = splbio(); 230 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) { 231 if (periph->periph_target == target && 232 periph->periph_lun == lun) 233 break; 234 } 235 splx(s); 236 237 return (periph); 238 } 239 240 /* 241 * scsipi_get_resource: 242 * 243 * Allocate a single xfer `resource' from the channel. 244 * 245 * NOTE: Must be called at splbio(). 246 */ 247 static int 248 scsipi_get_resource(struct scsipi_channel *chan) 249 { 250 struct scsipi_adapter *adapt = chan->chan_adapter; 251 252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 253 if (chan->chan_openings > 0) { 254 chan->chan_openings--; 255 return (1); 256 } 257 return (0); 258 } 259 260 if (adapt->adapt_openings > 0) { 261 adapt->adapt_openings--; 262 return (1); 263 } 264 return (0); 265 } 266 267 /* 268 * scsipi_grow_resources: 269 * 270 * Attempt to grow resources for a channel. If this succeeds, 271 * we allocate one for our caller. 272 * 273 * NOTE: Must be called at splbio(). 274 */ 275 static inline int 276 scsipi_grow_resources(struct scsipi_channel *chan) 277 { 278 279 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 280 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 281 scsipi_adapter_request(chan, 282 ADAPTER_REQ_GROW_RESOURCES, NULL); 283 return (scsipi_get_resource(chan)); 284 } 285 /* 286 * ask the channel thread to do it. It'll have to thaw the 287 * queue 288 */ 289 scsipi_channel_freeze(chan, 1); 290 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 291 wakeup(&chan->chan_complete); 292 return (0); 293 } 294 295 return (0); 296 } 297 298 /* 299 * scsipi_put_resource: 300 * 301 * Free a single xfer `resource' to the channel. 302 * 303 * NOTE: Must be called at splbio(). 304 */ 305 static void 306 scsipi_put_resource(struct scsipi_channel *chan) 307 { 308 struct scsipi_adapter *adapt = chan->chan_adapter; 309 310 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 311 chan->chan_openings++; 312 else 313 adapt->adapt_openings++; 314 } 315 316 /* 317 * scsipi_get_tag: 318 * 319 * Get a tag ID for the specified xfer. 320 * 321 * NOTE: Must be called at splbio(). 322 */ 323 static void 324 scsipi_get_tag(struct scsipi_xfer *xs) 325 { 326 struct scsipi_periph *periph = xs->xs_periph; 327 int bit, tag; 328 u_int word; 329 330 bit = 0; /* XXX gcc */ 331 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 332 bit = ffs(periph->periph_freetags[word]); 333 if (bit != 0) 334 break; 335 } 336 #ifdef DIAGNOSTIC 337 if (word == PERIPH_NTAGWORDS) { 338 scsipi_printaddr(periph); 339 printf("no free tags\n"); 340 panic("scsipi_get_tag"); 341 } 342 #endif 343 344 bit -= 1; 345 periph->periph_freetags[word] &= ~(1 << bit); 346 tag = (word << 5) | bit; 347 348 /* XXX Should eventually disallow this completely. */ 349 if (tag >= periph->periph_openings) { 350 scsipi_printaddr(periph); 351 printf("WARNING: tag %d greater than available openings %d\n", 352 tag, periph->periph_openings); 353 } 354 355 xs->xs_tag_id = tag; 356 } 357 358 /* 359 * scsipi_put_tag: 360 * 361 * Put the tag ID for the specified xfer back into the pool. 362 * 363 * NOTE: Must be called at splbio(). 364 */ 365 static void 366 scsipi_put_tag(struct scsipi_xfer *xs) 367 { 368 struct scsipi_periph *periph = xs->xs_periph; 369 int word, bit; 370 371 word = xs->xs_tag_id >> 5; 372 bit = xs->xs_tag_id & 0x1f; 373 374 periph->periph_freetags[word] |= (1 << bit); 375 } 376 377 /* 378 * scsipi_get_xs: 379 * 380 * Allocate an xfer descriptor and associate it with the 381 * specified peripheral. If the peripheral has no more 382 * available command openings, we either block waiting for 383 * one to become available, or fail. 384 */ 385 struct scsipi_xfer * 386 scsipi_get_xs(struct scsipi_periph *periph, int flags) 387 { 388 struct scsipi_xfer *xs; 389 int s; 390 391 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 392 393 KASSERT(!cold); 394 395 #ifdef DIAGNOSTIC 396 /* 397 * URGENT commands can never be ASYNC. 398 */ 399 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 400 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 401 scsipi_printaddr(periph); 402 printf("URGENT and ASYNC\n"); 403 panic("scsipi_get_xs"); 404 } 405 #endif 406 407 s = splbio(); 408 /* 409 * Wait for a command opening to become available. Rules: 410 * 411 * - All xfers must wait for an available opening. 412 * Exception: URGENT xfers can proceed when 413 * active == openings, because we use the opening 414 * of the command we're recovering for. 415 * - if the periph has sense pending, only URGENT & REQSENSE 416 * xfers may proceed. 417 * 418 * - If the periph is recovering, only URGENT xfers may 419 * proceed. 420 * 421 * - If the periph is currently executing a recovery 422 * command, URGENT commands must block, because only 423 * one recovery command can execute at a time. 424 */ 425 for (;;) { 426 if (flags & XS_CTL_URGENT) { 427 if (periph->periph_active > periph->periph_openings) 428 goto wait_for_opening; 429 if (periph->periph_flags & PERIPH_SENSE) { 430 if ((flags & XS_CTL_REQSENSE) == 0) 431 goto wait_for_opening; 432 } else { 433 if ((periph->periph_flags & 434 PERIPH_RECOVERY_ACTIVE) != 0) 435 goto wait_for_opening; 436 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 437 } 438 break; 439 } 440 if (periph->periph_active >= periph->periph_openings || 441 (periph->periph_flags & PERIPH_RECOVERING) != 0) 442 goto wait_for_opening; 443 periph->periph_active++; 444 break; 445 446 wait_for_opening: 447 if (flags & XS_CTL_NOSLEEP) { 448 splx(s); 449 return (NULL); 450 } 451 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 452 periph->periph_flags |= PERIPH_WAITING; 453 (void) tsleep(periph, PRIBIO, "getxs", 0); 454 } 455 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 456 xs = pool_get(&scsipi_xfer_pool, 457 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 458 if (xs == NULL) { 459 if (flags & XS_CTL_URGENT) { 460 if ((flags & XS_CTL_REQSENSE) == 0) 461 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 462 } else 463 periph->periph_active--; 464 scsipi_printaddr(periph); 465 printf("unable to allocate %sscsipi_xfer\n", 466 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 467 } 468 splx(s); 469 470 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 471 472 if (xs != NULL) { 473 memset(xs, 0, sizeof(*xs)); 474 callout_init(&xs->xs_callout, 0); 475 xs->xs_periph = periph; 476 xs->xs_control = flags; 477 xs->xs_status = 0; 478 s = splbio(); 479 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 480 splx(s); 481 } 482 return (xs); 483 } 484 485 /* 486 * scsipi_put_xs: 487 * 488 * Release an xfer descriptor, decreasing the outstanding command 489 * count for the peripheral. If there is a thread waiting for 490 * an opening, wake it up. If not, kick any queued I/O the 491 * peripheral may have. 492 * 493 * NOTE: Must be called at splbio(). 494 */ 495 void 496 scsipi_put_xs(struct scsipi_xfer *xs) 497 { 498 struct scsipi_periph *periph = xs->xs_periph; 499 int flags = xs->xs_control; 500 501 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 502 503 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 504 callout_destroy(&xs->xs_callout); 505 pool_put(&scsipi_xfer_pool, xs); 506 507 #ifdef DIAGNOSTIC 508 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 509 periph->periph_active == 0) { 510 scsipi_printaddr(periph); 511 printf("recovery without a command to recovery for\n"); 512 panic("scsipi_put_xs"); 513 } 514 #endif 515 516 if (flags & XS_CTL_URGENT) { 517 if ((flags & XS_CTL_REQSENSE) == 0) 518 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 519 } else 520 periph->periph_active--; 521 if (periph->periph_active == 0 && 522 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 523 periph->periph_flags &= ~PERIPH_WAITDRAIN; 524 wakeup(&periph->periph_active); 525 } 526 527 if (periph->periph_flags & PERIPH_WAITING) { 528 periph->periph_flags &= ~PERIPH_WAITING; 529 wakeup(periph); 530 } else { 531 if (periph->periph_switch->psw_start != NULL && 532 device_is_active(periph->periph_dev)) { 533 SC_DEBUG(periph, SCSIPI_DB2, 534 ("calling private start()\n")); 535 (*periph->periph_switch->psw_start)(periph); 536 } 537 } 538 } 539 540 /* 541 * scsipi_channel_freeze: 542 * 543 * Freeze a channel's xfer queue. 544 */ 545 void 546 scsipi_channel_freeze(struct scsipi_channel *chan, int count) 547 { 548 int s; 549 550 s = splbio(); 551 chan->chan_qfreeze += count; 552 splx(s); 553 } 554 555 /* 556 * scsipi_channel_thaw: 557 * 558 * Thaw a channel's xfer queue. 559 */ 560 void 561 scsipi_channel_thaw(struct scsipi_channel *chan, int count) 562 { 563 int s; 564 565 s = splbio(); 566 chan->chan_qfreeze -= count; 567 /* 568 * Don't let the freeze count go negative. 569 * 570 * Presumably the adapter driver could keep track of this, 571 * but it might just be easier to do this here so as to allow 572 * multiple callers, including those outside the adapter driver. 573 */ 574 if (chan->chan_qfreeze < 0) { 575 chan->chan_qfreeze = 0; 576 } 577 splx(s); 578 /* 579 * Kick the channel's queue here. Note, we may be running in 580 * interrupt context (softclock or HBA's interrupt), so the adapter 581 * driver had better not sleep. 582 */ 583 if (chan->chan_qfreeze == 0) 584 scsipi_run_queue(chan); 585 } 586 587 /* 588 * scsipi_channel_timed_thaw: 589 * 590 * Thaw a channel after some time has expired. This will also 591 * run the channel's queue if the freeze count has reached 0. 592 */ 593 void 594 scsipi_channel_timed_thaw(void *arg) 595 { 596 struct scsipi_channel *chan = arg; 597 598 scsipi_channel_thaw(chan, 1); 599 } 600 601 /* 602 * scsipi_periph_freeze: 603 * 604 * Freeze a device's xfer queue. 605 */ 606 void 607 scsipi_periph_freeze(struct scsipi_periph *periph, int count) 608 { 609 int s; 610 611 s = splbio(); 612 periph->periph_qfreeze += count; 613 splx(s); 614 } 615 616 /* 617 * scsipi_periph_thaw: 618 * 619 * Thaw a device's xfer queue. 620 */ 621 void 622 scsipi_periph_thaw(struct scsipi_periph *periph, int count) 623 { 624 int s; 625 626 s = splbio(); 627 periph->periph_qfreeze -= count; 628 #ifdef DIAGNOSTIC 629 if (periph->periph_qfreeze < 0) { 630 static const char pc[] = "periph freeze count < 0"; 631 scsipi_printaddr(periph); 632 printf("%s\n", pc); 633 panic(pc); 634 } 635 #endif 636 if (periph->periph_qfreeze == 0 && 637 (periph->periph_flags & PERIPH_WAITING) != 0) 638 wakeup(periph); 639 splx(s); 640 } 641 642 /* 643 * scsipi_periph_timed_thaw: 644 * 645 * Thaw a device after some time has expired. 646 */ 647 void 648 scsipi_periph_timed_thaw(void *arg) 649 { 650 int s; 651 struct scsipi_periph *periph = arg; 652 653 callout_stop(&periph->periph_callout); 654 655 s = splbio(); 656 scsipi_periph_thaw(periph, 1); 657 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 658 /* 659 * Kick the channel's queue here. Note, we're running in 660 * interrupt context (softclock), so the adapter driver 661 * had better not sleep. 662 */ 663 scsipi_run_queue(periph->periph_channel); 664 } else { 665 /* 666 * Tell the completion thread to kick the channel's queue here. 667 */ 668 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 669 wakeup(&periph->periph_channel->chan_complete); 670 } 671 splx(s); 672 } 673 674 /* 675 * scsipi_wait_drain: 676 * 677 * Wait for a periph's pending xfers to drain. 678 */ 679 void 680 scsipi_wait_drain(struct scsipi_periph *periph) 681 { 682 int s; 683 684 s = splbio(); 685 while (periph->periph_active != 0) { 686 periph->periph_flags |= PERIPH_WAITDRAIN; 687 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0); 688 } 689 splx(s); 690 } 691 692 /* 693 * scsipi_kill_pending: 694 * 695 * Kill off all pending xfers for a periph. 696 * 697 * NOTE: Must be called at splbio(). 698 */ 699 void 700 scsipi_kill_pending(struct scsipi_periph *periph) 701 { 702 703 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph); 704 scsipi_wait_drain(periph); 705 } 706 707 /* 708 * scsipi_print_cdb: 709 * prints a command descriptor block (for debug purpose, error messages, 710 * SCSIVERBOSE, ...) 711 */ 712 void 713 scsipi_print_cdb(struct scsipi_generic *cmd) 714 { 715 int i, j; 716 717 printf("0x%02x", cmd->opcode); 718 719 switch (CDB_GROUPID(cmd->opcode)) { 720 case CDB_GROUPID_0: 721 j = CDB_GROUP0; 722 break; 723 case CDB_GROUPID_1: 724 j = CDB_GROUP1; 725 break; 726 case CDB_GROUPID_2: 727 j = CDB_GROUP2; 728 break; 729 case CDB_GROUPID_3: 730 j = CDB_GROUP3; 731 break; 732 case CDB_GROUPID_4: 733 j = CDB_GROUP4; 734 break; 735 case CDB_GROUPID_5: 736 j = CDB_GROUP5; 737 break; 738 case CDB_GROUPID_6: 739 j = CDB_GROUP6; 740 break; 741 case CDB_GROUPID_7: 742 j = CDB_GROUP7; 743 break; 744 default: 745 j = 0; 746 } 747 if (j == 0) 748 j = sizeof (cmd->bytes); 749 for (i = 0; i < j-1; i++) /* already done the opcode */ 750 printf(" %02x", cmd->bytes[i]); 751 } 752 753 /* 754 * scsipi_interpret_sense: 755 * 756 * Look at the returned sense and act on the error, determining 757 * the unix error number to pass back. (0 = report no error) 758 * 759 * NOTE: If we return ERESTART, we are expected to haved 760 * thawed the device! 761 * 762 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 763 */ 764 int 765 scsipi_interpret_sense(struct scsipi_xfer *xs) 766 { 767 struct scsi_sense_data *sense; 768 struct scsipi_periph *periph = xs->xs_periph; 769 u_int8_t key; 770 int error; 771 u_int32_t info; 772 static const char *error_mes[] = { 773 "soft error (corrected)", 774 "not ready", "medium error", 775 "non-media hardware failure", "illegal request", 776 "unit attention", "readonly device", 777 "no data found", "vendor unique", 778 "copy aborted", "command aborted", 779 "search returned equal", "volume overflow", 780 "verify miscompare", "unknown error key" 781 }; 782 783 sense = &xs->sense.scsi_sense; 784 #ifdef SCSIPI_DEBUG 785 if (periph->periph_flags & SCSIPI_DB1) { 786 int count; 787 scsipi_printaddr(periph); 788 printf(" sense debug information:\n"); 789 printf("\tcode 0x%x valid %d\n", 790 SSD_RCODE(sense->response_code), 791 sense->response_code & SSD_RCODE_VALID ? 1 : 0); 792 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 793 sense->segment, 794 SSD_SENSE_KEY(sense->flags), 795 sense->flags & SSD_ILI ? 1 : 0, 796 sense->flags & SSD_EOM ? 1 : 0, 797 sense->flags & SSD_FILEMARK ? 1 : 0); 798 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 799 "extra bytes\n", 800 sense->info[0], 801 sense->info[1], 802 sense->info[2], 803 sense->info[3], 804 sense->extra_len); 805 printf("\textra: "); 806 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++) 807 printf("0x%x ", sense->csi[count]); 808 printf("\n"); 809 } 810 #endif 811 812 /* 813 * If the periph has its own error handler, call it first. 814 * If it returns a legit error value, return that, otherwise 815 * it wants us to continue with normal error processing. 816 */ 817 if (periph->periph_switch->psw_error != NULL) { 818 SC_DEBUG(periph, SCSIPI_DB2, 819 ("calling private err_handler()\n")); 820 error = (*periph->periph_switch->psw_error)(xs); 821 if (error != EJUSTRETURN) 822 return (error); 823 } 824 /* otherwise use the default */ 825 switch (SSD_RCODE(sense->response_code)) { 826 827 /* 828 * Old SCSI-1 and SASI devices respond with 829 * codes other than 70. 830 */ 831 case 0x00: /* no error (command completed OK) */ 832 return (0); 833 case 0x04: /* drive not ready after it was selected */ 834 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 835 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 836 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 837 return (0); 838 /* XXX - display some sort of error here? */ 839 return (EIO); 840 case 0x20: /* invalid command */ 841 if ((xs->xs_control & 842 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 843 return (0); 844 return (EINVAL); 845 case 0x25: /* invalid LUN (Adaptec ACB-4000) */ 846 return (EACCES); 847 848 /* 849 * If it's code 70, use the extended stuff and 850 * interpret the key 851 */ 852 case 0x71: /* delayed error */ 853 scsipi_printaddr(periph); 854 key = SSD_SENSE_KEY(sense->flags); 855 printf(" DEFERRED ERROR, key = 0x%x\n", key); 856 /* FALLTHROUGH */ 857 case 0x70: 858 if ((sense->response_code & SSD_RCODE_VALID) != 0) 859 info = _4btol(sense->info); 860 else 861 info = 0; 862 key = SSD_SENSE_KEY(sense->flags); 863 864 switch (key) { 865 case SKEY_NO_SENSE: 866 case SKEY_RECOVERED_ERROR: 867 if (xs->resid == xs->datalen && xs->datalen) { 868 /* 869 * Why is this here? 870 */ 871 xs->resid = 0; /* not short read */ 872 } 873 case SKEY_EQUAL: 874 error = 0; 875 break; 876 case SKEY_NOT_READY: 877 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 878 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 879 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 880 return (0); 881 if (sense->asc == 0x3A) { 882 error = ENODEV; /* Medium not present */ 883 if (xs->xs_control & XS_CTL_SILENT_NODEV) 884 return (error); 885 } else 886 error = EIO; 887 if ((xs->xs_control & XS_CTL_SILENT) != 0) 888 return (error); 889 break; 890 case SKEY_ILLEGAL_REQUEST: 891 if ((xs->xs_control & 892 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 893 return (0); 894 /* 895 * Handle the case where a device reports 896 * Logical Unit Not Supported during discovery. 897 */ 898 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 899 sense->asc == 0x25 && 900 sense->ascq == 0x00) 901 return (EINVAL); 902 if ((xs->xs_control & XS_CTL_SILENT) != 0) 903 return (EIO); 904 error = EINVAL; 905 break; 906 case SKEY_UNIT_ATTENTION: 907 if (sense->asc == 0x29 && 908 sense->ascq == 0x00) { 909 /* device or bus reset */ 910 return (ERESTART); 911 } 912 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 913 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 914 if ((xs->xs_control & 915 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 916 /* XXX Should reupload any transient state. */ 917 (periph->periph_flags & 918 PERIPH_REMOVABLE) == 0) { 919 return (ERESTART); 920 } 921 if ((xs->xs_control & XS_CTL_SILENT) != 0) 922 return (EIO); 923 error = EIO; 924 break; 925 case SKEY_DATA_PROTECT: 926 error = EROFS; 927 break; 928 case SKEY_BLANK_CHECK: 929 error = 0; 930 break; 931 case SKEY_ABORTED_COMMAND: 932 if (xs->xs_retries != 0) { 933 xs->xs_retries--; 934 error = ERESTART; 935 } else 936 error = EIO; 937 break; 938 case SKEY_VOLUME_OVERFLOW: 939 error = ENOSPC; 940 break; 941 default: 942 error = EIO; 943 break; 944 } 945 946 /* Print verbose decode if appropriate and possible */ 947 if ((key == 0) || 948 ((xs->xs_control & XS_CTL_SILENT) != 0) || 949 (scsipi_print_sense(xs, 0) != 0)) 950 return (error); 951 952 /* Print brief(er) sense information */ 953 scsipi_printaddr(periph); 954 printf("%s", error_mes[key - 1]); 955 if ((sense->response_code & SSD_RCODE_VALID) != 0) { 956 switch (key) { 957 case SKEY_NOT_READY: 958 case SKEY_ILLEGAL_REQUEST: 959 case SKEY_UNIT_ATTENTION: 960 case SKEY_DATA_PROTECT: 961 break; 962 case SKEY_BLANK_CHECK: 963 printf(", requested size: %d (decimal)", 964 info); 965 break; 966 case SKEY_ABORTED_COMMAND: 967 if (xs->xs_retries) 968 printf(", retrying"); 969 printf(", cmd 0x%x, info 0x%x", 970 xs->cmd->opcode, info); 971 break; 972 default: 973 printf(", info = %d (decimal)", info); 974 } 975 } 976 if (sense->extra_len != 0) { 977 int n; 978 printf(", data ="); 979 for (n = 0; n < sense->extra_len; n++) 980 printf(" %02x", 981 sense->csi[n]); 982 } 983 printf("\n"); 984 return (error); 985 986 /* 987 * Some other code, just report it 988 */ 989 default: 990 #if defined(SCSIDEBUG) || defined(DEBUG) 991 { 992 static const char *uc = "undecodable sense error"; 993 int i; 994 u_int8_t *cptr = (u_int8_t *) sense; 995 scsipi_printaddr(periph); 996 if (xs->cmd == &xs->cmdstore) { 997 printf("%s for opcode 0x%x, data=", 998 uc, xs->cmdstore.opcode); 999 } else { 1000 printf("%s, data=", uc); 1001 } 1002 for (i = 0; i < sizeof (sense); i++) 1003 printf(" 0x%02x", *(cptr++) & 0xff); 1004 printf("\n"); 1005 } 1006 #else 1007 scsipi_printaddr(periph); 1008 printf("Sense Error Code 0x%x", 1009 SSD_RCODE(sense->response_code)); 1010 if ((sense->response_code & SSD_RCODE_VALID) != 0) { 1011 struct scsi_sense_data_unextended *usense = 1012 (struct scsi_sense_data_unextended *)sense; 1013 printf(" at block no. %d (decimal)", 1014 _3btol(usense->block)); 1015 } 1016 printf("\n"); 1017 #endif 1018 return (EIO); 1019 } 1020 } 1021 1022 /* 1023 * scsipi_test_unit_ready: 1024 * 1025 * Issue a `test unit ready' request. 1026 */ 1027 int 1028 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags) 1029 { 1030 struct scsi_test_unit_ready cmd; 1031 int retries; 1032 1033 /* some ATAPI drives don't support TEST UNIT READY. Sigh */ 1034 if (periph->periph_quirks & PQUIRK_NOTUR) 1035 return (0); 1036 1037 if (flags & XS_CTL_DISCOVERY) 1038 retries = 0; 1039 else 1040 retries = SCSIPIRETRIES; 1041 1042 memset(&cmd, 0, sizeof(cmd)); 1043 cmd.opcode = SCSI_TEST_UNIT_READY; 1044 1045 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1046 retries, 10000, NULL, flags)); 1047 } 1048 1049 static const struct scsipi_inquiry3_pattern { 1050 const char vendor[8]; 1051 const char product[16]; 1052 const char revision[4]; 1053 } scsipi_inquiry3_quirk[] = { 1054 { "ES-6600 ", "", "" }, 1055 }; 1056 1057 static int 1058 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib) 1059 { 1060 for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) { 1061 const struct scsipi_inquiry3_pattern *q = 1062 &scsipi_inquiry3_quirk[i]; 1063 #define MATCH(field) \ 1064 (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1) 1065 if (MATCH(vendor) && MATCH(product) && MATCH(revision)) 1066 return 0; 1067 } 1068 return 1; 1069 } 1070 1071 /* 1072 * scsipi_inquire: 1073 * 1074 * Ask the device about itself. 1075 */ 1076 int 1077 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf, 1078 int flags) 1079 { 1080 struct scsipi_inquiry cmd; 1081 int error; 1082 int retries; 1083 1084 if (flags & XS_CTL_DISCOVERY) 1085 retries = 0; 1086 else 1087 retries = SCSIPIRETRIES; 1088 1089 /* 1090 * If we request more data than the device can provide, it SHOULD just 1091 * return a short response. However, some devices error with an 1092 * ILLEGAL REQUEST sense code, and yet others have even more special 1093 * failture modes (such as the GL641USB flash adapter, which goes loony 1094 * and sends corrupted CRCs). To work around this, and to bring our 1095 * behavior more in line with other OSes, we do a shorter inquiry, 1096 * covering all the SCSI-2 information, first, and then request more 1097 * data iff the "additional length" field indicates there is more. 1098 * - mycroft, 2003/10/16 1099 */ 1100 memset(&cmd, 0, sizeof(cmd)); 1101 cmd.opcode = INQUIRY; 1102 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2; 1103 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1104 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries, 1105 10000, NULL, flags | XS_CTL_DATA_IN); 1106 if (!error && 1107 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) { 1108 if (scsipi_inquiry3_ok(inqbuf)) { 1109 #if 0 1110 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length); 1111 #endif 1112 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3; 1113 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1114 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries, 1115 10000, NULL, flags | XS_CTL_DATA_IN); 1116 #if 0 1117 printf("inquire: error=%d\n", error); 1118 #endif 1119 } 1120 } 1121 1122 #ifdef SCSI_OLD_NOINQUIRY 1123 /* 1124 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator. 1125 * This board doesn't support the INQUIRY command at all. 1126 */ 1127 if (error == EINVAL || error == EACCES) { 1128 /* 1129 * Conjure up an INQUIRY response. 1130 */ 1131 inqbuf->device = (error == EINVAL ? 1132 SID_QUAL_LU_PRESENT : 1133 SID_QUAL_LU_NOTPRESENT) | T_DIRECT; 1134 inqbuf->dev_qual2 = 0; 1135 inqbuf->version = 0; 1136 inqbuf->response_format = SID_FORMAT_SCSI1; 1137 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1138 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1139 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28); 1140 error = 0; 1141 } 1142 1143 /* 1144 * Kludge for the Emulex MT-02 SCSI->QIC translator. 1145 * This board gives an empty response to an INQUIRY command. 1146 */ 1147 else if (error == 0 && 1148 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) && 1149 inqbuf->dev_qual2 == 0 && 1150 inqbuf->version == 0 && 1151 inqbuf->response_format == SID_FORMAT_SCSI1) { 1152 /* 1153 * Fill out the INQUIRY response. 1154 */ 1155 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL); 1156 inqbuf->dev_qual2 = SID_REMOVABLE; 1157 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1158 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1159 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28); 1160 } 1161 #endif /* SCSI_OLD_NOINQUIRY */ 1162 1163 return error; 1164 } 1165 1166 /* 1167 * scsipi_prevent: 1168 * 1169 * Prevent or allow the user to remove the media 1170 */ 1171 int 1172 scsipi_prevent(struct scsipi_periph *periph, int type, int flags) 1173 { 1174 struct scsi_prevent_allow_medium_removal cmd; 1175 1176 if (periph->periph_quirks & PQUIRK_NODOORLOCK) 1177 return 0; 1178 1179 memset(&cmd, 0, sizeof(cmd)); 1180 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL; 1181 cmd.how = type; 1182 1183 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1184 SCSIPIRETRIES, 5000, NULL, flags)); 1185 } 1186 1187 /* 1188 * scsipi_start: 1189 * 1190 * Send a START UNIT. 1191 */ 1192 int 1193 scsipi_start(struct scsipi_periph *periph, int type, int flags) 1194 { 1195 struct scsipi_start_stop cmd; 1196 1197 memset(&cmd, 0, sizeof(cmd)); 1198 cmd.opcode = START_STOP; 1199 cmd.byte2 = 0x00; 1200 cmd.how = type; 1201 1202 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1203 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags)); 1204 } 1205 1206 /* 1207 * scsipi_mode_sense, scsipi_mode_sense_big: 1208 * get a sense page from a device 1209 */ 1210 1211 int 1212 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page, 1213 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries, 1214 int timeout) 1215 { 1216 struct scsi_mode_sense_6 cmd; 1217 1218 memset(&cmd, 0, sizeof(cmd)); 1219 cmd.opcode = SCSI_MODE_SENSE_6; 1220 cmd.byte2 = byte2; 1221 cmd.page = page; 1222 cmd.length = len & 0xff; 1223 1224 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1225 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN)); 1226 } 1227 1228 int 1229 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page, 1230 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries, 1231 int timeout) 1232 { 1233 struct scsi_mode_sense_10 cmd; 1234 1235 memset(&cmd, 0, sizeof(cmd)); 1236 cmd.opcode = SCSI_MODE_SENSE_10; 1237 cmd.byte2 = byte2; 1238 cmd.page = page; 1239 _lto2b(len, cmd.length); 1240 1241 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1242 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN)); 1243 } 1244 1245 int 1246 scsipi_mode_select(struct scsipi_periph *periph, int byte2, 1247 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries, 1248 int timeout) 1249 { 1250 struct scsi_mode_select_6 cmd; 1251 1252 memset(&cmd, 0, sizeof(cmd)); 1253 cmd.opcode = SCSI_MODE_SELECT_6; 1254 cmd.byte2 = byte2; 1255 cmd.length = len & 0xff; 1256 1257 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1258 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT)); 1259 } 1260 1261 int 1262 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2, 1263 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries, 1264 int timeout) 1265 { 1266 struct scsi_mode_select_10 cmd; 1267 1268 memset(&cmd, 0, sizeof(cmd)); 1269 cmd.opcode = SCSI_MODE_SELECT_10; 1270 cmd.byte2 = byte2; 1271 _lto2b(len, cmd.length); 1272 1273 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1274 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT)); 1275 } 1276 1277 /* 1278 * scsipi_done: 1279 * 1280 * This routine is called by an adapter's interrupt handler when 1281 * an xfer is completed. 1282 */ 1283 void 1284 scsipi_done(struct scsipi_xfer *xs) 1285 { 1286 struct scsipi_periph *periph = xs->xs_periph; 1287 struct scsipi_channel *chan = periph->periph_channel; 1288 int s, freezecnt; 1289 1290 KASSERT(cold || KERNEL_LOCKED_P()); 1291 1292 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1293 #ifdef SCSIPI_DEBUG 1294 if (periph->periph_dbflags & SCSIPI_DB1) 1295 show_scsipi_cmd(xs); 1296 #endif 1297 1298 s = splbio(); 1299 /* 1300 * The resource this command was using is now free. 1301 */ 1302 if (xs->xs_status & XS_STS_DONE) { 1303 /* XXX in certain circumstances, such as a device 1304 * being detached, a xs that has already been 1305 * scsipi_done()'d by the main thread will be done'd 1306 * again by scsibusdetach(). Putting the xs on the 1307 * chan_complete queue causes list corruption and 1308 * everyone dies. This prevents that, but perhaps 1309 * there should be better coordination somewhere such 1310 * that this won't ever happen (and can be turned into 1311 * a KASSERT(). 1312 */ 1313 splx(s); 1314 goto out; 1315 } 1316 scsipi_put_resource(chan); 1317 xs->xs_periph->periph_sent--; 1318 1319 /* 1320 * If the command was tagged, free the tag. 1321 */ 1322 if (XS_CTL_TAGTYPE(xs) != 0) 1323 scsipi_put_tag(xs); 1324 else 1325 periph->periph_flags &= ~PERIPH_UNTAG; 1326 1327 /* Mark the command as `done'. */ 1328 xs->xs_status |= XS_STS_DONE; 1329 1330 #ifdef DIAGNOSTIC 1331 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1332 (XS_CTL_ASYNC|XS_CTL_POLL)) 1333 panic("scsipi_done: ASYNC and POLL"); 1334 #endif 1335 1336 /* 1337 * If the xfer had an error of any sort, freeze the 1338 * periph's queue. Freeze it again if we were requested 1339 * to do so in the xfer. 1340 */ 1341 freezecnt = 0; 1342 if (xs->error != XS_NOERROR) 1343 freezecnt++; 1344 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1345 freezecnt++; 1346 if (freezecnt != 0) 1347 scsipi_periph_freeze(periph, freezecnt); 1348 1349 /* 1350 * record the xfer with a pending sense, in case a SCSI reset is 1351 * received before the thread is waked up. 1352 */ 1353 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1354 periph->periph_flags |= PERIPH_SENSE; 1355 periph->periph_xscheck = xs; 1356 } 1357 1358 /* 1359 * If this was an xfer that was not to complete asynchronously, 1360 * let the requesting thread perform error checking/handling 1361 * in its context. 1362 */ 1363 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1364 splx(s); 1365 /* 1366 * If it's a polling job, just return, to unwind the 1367 * call graph. We don't need to restart the queue, 1368 * because pollings jobs are treated specially, and 1369 * are really only used during crash dumps anyway 1370 * (XXX or during boot-time autconfiguration of 1371 * ATAPI devices). 1372 */ 1373 if (xs->xs_control & XS_CTL_POLL) 1374 return; 1375 wakeup(xs); 1376 goto out; 1377 } 1378 1379 /* 1380 * Catch the extremely common case of I/O completing 1381 * without error; no use in taking a context switch 1382 * if we can handle it in interrupt context. 1383 */ 1384 if (xs->error == XS_NOERROR) { 1385 splx(s); 1386 (void) scsipi_complete(xs); 1387 goto out; 1388 } 1389 1390 /* 1391 * There is an error on this xfer. Put it on the channel's 1392 * completion queue, and wake up the completion thread. 1393 */ 1394 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1395 splx(s); 1396 wakeup(&chan->chan_complete); 1397 1398 out: 1399 /* 1400 * If there are more xfers on the channel's queue, attempt to 1401 * run them. 1402 */ 1403 scsipi_run_queue(chan); 1404 } 1405 1406 /* 1407 * scsipi_complete: 1408 * 1409 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1410 * 1411 * NOTE: This routine MUST be called with valid thread context 1412 * except for the case where the following two conditions are 1413 * true: 1414 * 1415 * xs->error == XS_NOERROR 1416 * XS_CTL_ASYNC is set in xs->xs_control 1417 * 1418 * The semantics of this routine can be tricky, so here is an 1419 * explanation: 1420 * 1421 * 0 Xfer completed successfully. 1422 * 1423 * ERESTART Xfer had an error, but was restarted. 1424 * 1425 * anything else Xfer had an error, return value is Unix 1426 * errno. 1427 * 1428 * If the return value is anything but ERESTART: 1429 * 1430 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1431 * the pool. 1432 * - If there is a buf associated with the xfer, 1433 * it has been biodone()'d. 1434 */ 1435 static int 1436 scsipi_complete(struct scsipi_xfer *xs) 1437 { 1438 struct scsipi_periph *periph = xs->xs_periph; 1439 struct scsipi_channel *chan = periph->periph_channel; 1440 int error, s; 1441 1442 #ifdef DIAGNOSTIC 1443 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1444 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1445 #endif 1446 /* 1447 * If command terminated with a CHECK CONDITION, we need to issue a 1448 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1449 * we'll have the real status. 1450 * Must be processed at splbio() to avoid missing a SCSI bus reset 1451 * for this command. 1452 */ 1453 s = splbio(); 1454 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1455 /* request sense for a request sense ? */ 1456 if (xs->xs_control & XS_CTL_REQSENSE) { 1457 scsipi_printaddr(periph); 1458 printf("request sense for a request sense ?\n"); 1459 /* XXX maybe we should reset the device ? */ 1460 /* we've been frozen because xs->error != XS_NOERROR */ 1461 scsipi_periph_thaw(periph, 1); 1462 splx(s); 1463 if (xs->resid < xs->datalen) { 1464 printf("we read %d bytes of sense anyway:\n", 1465 xs->datalen - xs->resid); 1466 scsipi_print_sense_data((void *)xs->data, 0); 1467 } 1468 return EINVAL; 1469 } 1470 scsipi_request_sense(xs); 1471 } 1472 splx(s); 1473 1474 /* 1475 * If it's a user level request, bypass all usual completion 1476 * processing, let the user work it out.. 1477 */ 1478 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1479 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1480 if (xs->error != XS_NOERROR) 1481 scsipi_periph_thaw(periph, 1); 1482 scsipi_user_done(xs); 1483 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1484 return 0; 1485 } 1486 1487 switch (xs->error) { 1488 case XS_NOERROR: 1489 error = 0; 1490 break; 1491 1492 case XS_SENSE: 1493 case XS_SHORTSENSE: 1494 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1495 break; 1496 1497 case XS_RESOURCE_SHORTAGE: 1498 /* 1499 * XXX Should freeze channel's queue. 1500 */ 1501 scsipi_printaddr(periph); 1502 printf("adapter resource shortage\n"); 1503 /* FALLTHROUGH */ 1504 1505 case XS_BUSY: 1506 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1507 struct scsipi_max_openings mo; 1508 1509 /* 1510 * We set the openings to active - 1, assuming that 1511 * the command that got us here is the first one that 1512 * can't fit into the device's queue. If that's not 1513 * the case, I guess we'll find out soon enough. 1514 */ 1515 mo.mo_target = periph->periph_target; 1516 mo.mo_lun = periph->periph_lun; 1517 if (periph->periph_active < periph->periph_openings) 1518 mo.mo_openings = periph->periph_active - 1; 1519 else 1520 mo.mo_openings = periph->periph_openings - 1; 1521 #ifdef DIAGNOSTIC 1522 if (mo.mo_openings < 0) { 1523 scsipi_printaddr(periph); 1524 printf("QUEUE FULL resulted in < 0 openings\n"); 1525 panic("scsipi_done"); 1526 } 1527 #endif 1528 if (mo.mo_openings == 0) { 1529 scsipi_printaddr(periph); 1530 printf("QUEUE FULL resulted in 0 openings\n"); 1531 mo.mo_openings = 1; 1532 } 1533 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1534 error = ERESTART; 1535 } else if (xs->xs_retries != 0) { 1536 xs->xs_retries--; 1537 /* 1538 * Wait one second, and try again. 1539 */ 1540 if ((xs->xs_control & XS_CTL_POLL) || 1541 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1542 /* XXX: quite extreme */ 1543 kpause("xsbusy", false, hz, NULL); 1544 } else if (!callout_pending(&periph->periph_callout)) { 1545 scsipi_periph_freeze(periph, 1); 1546 callout_reset(&periph->periph_callout, 1547 hz, scsipi_periph_timed_thaw, periph); 1548 } 1549 error = ERESTART; 1550 } else 1551 error = EBUSY; 1552 break; 1553 1554 case XS_REQUEUE: 1555 error = ERESTART; 1556 break; 1557 1558 case XS_SELTIMEOUT: 1559 case XS_TIMEOUT: 1560 /* 1561 * If the device hasn't gone away, honor retry counts. 1562 * 1563 * Note that if we're in the middle of probing it, 1564 * it won't be found because it isn't here yet so 1565 * we won't honor the retry count in that case. 1566 */ 1567 if (scsipi_lookup_periph(chan, periph->periph_target, 1568 periph->periph_lun) && xs->xs_retries != 0) { 1569 xs->xs_retries--; 1570 error = ERESTART; 1571 } else 1572 error = EIO; 1573 break; 1574 1575 case XS_RESET: 1576 if (xs->xs_control & XS_CTL_REQSENSE) { 1577 /* 1578 * request sense interrupted by reset: signal it 1579 * with EINTR return code. 1580 */ 1581 error = EINTR; 1582 } else { 1583 if (xs->xs_retries != 0) { 1584 xs->xs_retries--; 1585 error = ERESTART; 1586 } else 1587 error = EIO; 1588 } 1589 break; 1590 1591 case XS_DRIVER_STUFFUP: 1592 scsipi_printaddr(periph); 1593 printf("generic HBA error\n"); 1594 error = EIO; 1595 break; 1596 default: 1597 scsipi_printaddr(periph); 1598 printf("invalid return code from adapter: %d\n", xs->error); 1599 error = EIO; 1600 break; 1601 } 1602 1603 s = splbio(); 1604 if (error == ERESTART) { 1605 /* 1606 * If we get here, the periph has been thawed and frozen 1607 * again if we had to issue recovery commands. Alternatively, 1608 * it may have been frozen again and in a timed thaw. In 1609 * any case, we thaw the periph once we re-enqueue the 1610 * command. Once the periph is fully thawed, it will begin 1611 * operation again. 1612 */ 1613 xs->error = XS_NOERROR; 1614 xs->status = SCSI_OK; 1615 xs->xs_status &= ~XS_STS_DONE; 1616 xs->xs_requeuecnt++; 1617 error = scsipi_enqueue(xs); 1618 if (error == 0) { 1619 scsipi_periph_thaw(periph, 1); 1620 splx(s); 1621 return (ERESTART); 1622 } 1623 } 1624 1625 /* 1626 * scsipi_done() freezes the queue if not XS_NOERROR. 1627 * Thaw it here. 1628 */ 1629 if (xs->error != XS_NOERROR) 1630 scsipi_periph_thaw(periph, 1); 1631 1632 if (periph->periph_switch->psw_done) 1633 periph->periph_switch->psw_done(xs, error); 1634 1635 if (xs->xs_control & XS_CTL_ASYNC) 1636 scsipi_put_xs(xs); 1637 splx(s); 1638 1639 return (error); 1640 } 1641 1642 /* 1643 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1644 * returns with a CHECK_CONDITION status. Must be called in valid thread 1645 * context and at splbio(). 1646 */ 1647 1648 static void 1649 scsipi_request_sense(struct scsipi_xfer *xs) 1650 { 1651 struct scsipi_periph *periph = xs->xs_periph; 1652 int flags, error; 1653 struct scsi_request_sense cmd; 1654 1655 periph->periph_flags |= PERIPH_SENSE; 1656 1657 /* if command was polling, request sense will too */ 1658 flags = xs->xs_control & XS_CTL_POLL; 1659 /* Polling commands can't sleep */ 1660 if (flags) 1661 flags |= XS_CTL_NOSLEEP; 1662 1663 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1664 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1665 1666 memset(&cmd, 0, sizeof(cmd)); 1667 cmd.opcode = SCSI_REQUEST_SENSE; 1668 cmd.length = sizeof(struct scsi_sense_data); 1669 1670 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1671 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data), 1672 0, 1000, NULL, flags); 1673 periph->periph_flags &= ~PERIPH_SENSE; 1674 periph->periph_xscheck = NULL; 1675 switch (error) { 1676 case 0: 1677 /* we have a valid sense */ 1678 xs->error = XS_SENSE; 1679 return; 1680 case EINTR: 1681 /* REQUEST_SENSE interrupted by bus reset. */ 1682 xs->error = XS_RESET; 1683 return; 1684 case EIO: 1685 /* request sense coudn't be performed */ 1686 /* 1687 * XXX this isn't quite right but we don't have anything 1688 * better for now 1689 */ 1690 xs->error = XS_DRIVER_STUFFUP; 1691 return; 1692 default: 1693 /* Notify that request sense failed. */ 1694 xs->error = XS_DRIVER_STUFFUP; 1695 scsipi_printaddr(periph); 1696 printf("request sense failed with error %d\n", error); 1697 return; 1698 } 1699 } 1700 1701 /* 1702 * scsipi_enqueue: 1703 * 1704 * Enqueue an xfer on a channel. 1705 */ 1706 static int 1707 scsipi_enqueue(struct scsipi_xfer *xs) 1708 { 1709 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1710 struct scsipi_xfer *qxs; 1711 int s; 1712 1713 s = splbio(); 1714 1715 /* 1716 * If the xfer is to be polled, and there are already jobs on 1717 * the queue, we can't proceed. 1718 */ 1719 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1720 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1721 splx(s); 1722 xs->error = XS_DRIVER_STUFFUP; 1723 return (EAGAIN); 1724 } 1725 1726 /* 1727 * If we have an URGENT xfer, it's an error recovery command 1728 * and it should just go on the head of the channel's queue. 1729 */ 1730 if (xs->xs_control & XS_CTL_URGENT) { 1731 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1732 goto out; 1733 } 1734 1735 /* 1736 * If this xfer has already been on the queue before, we 1737 * need to reinsert it in the correct order. That order is: 1738 * 1739 * Immediately before the first xfer for this periph 1740 * with a requeuecnt less than xs->xs_requeuecnt. 1741 * 1742 * Failing that, at the end of the queue. (We'll end up 1743 * there naturally.) 1744 */ 1745 if (xs->xs_requeuecnt != 0) { 1746 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1747 qxs = TAILQ_NEXT(qxs, channel_q)) { 1748 if (qxs->xs_periph == xs->xs_periph && 1749 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1750 break; 1751 } 1752 if (qxs != NULL) { 1753 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1754 channel_q); 1755 goto out; 1756 } 1757 } 1758 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1759 out: 1760 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1761 scsipi_periph_thaw(xs->xs_periph, 1); 1762 splx(s); 1763 return (0); 1764 } 1765 1766 /* 1767 * scsipi_run_queue: 1768 * 1769 * Start as many xfers as possible running on the channel. 1770 */ 1771 static void 1772 scsipi_run_queue(struct scsipi_channel *chan) 1773 { 1774 struct scsipi_xfer *xs; 1775 struct scsipi_periph *periph; 1776 int s; 1777 1778 for (;;) { 1779 s = splbio(); 1780 1781 /* 1782 * If the channel is frozen, we can't do any work right 1783 * now. 1784 */ 1785 if (chan->chan_qfreeze != 0) { 1786 splx(s); 1787 return; 1788 } 1789 1790 /* 1791 * Look for work to do, and make sure we can do it. 1792 */ 1793 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1794 xs = TAILQ_NEXT(xs, channel_q)) { 1795 periph = xs->xs_periph; 1796 1797 if ((periph->periph_sent >= periph->periph_openings) || 1798 periph->periph_qfreeze != 0 || 1799 (periph->periph_flags & PERIPH_UNTAG) != 0) 1800 continue; 1801 1802 if ((periph->periph_flags & 1803 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1804 (xs->xs_control & XS_CTL_URGENT) == 0) 1805 continue; 1806 1807 /* 1808 * We can issue this xfer! 1809 */ 1810 goto got_one; 1811 } 1812 1813 /* 1814 * Can't find any work to do right now. 1815 */ 1816 splx(s); 1817 return; 1818 1819 got_one: 1820 /* 1821 * Have an xfer to run. Allocate a resource from 1822 * the adapter to run it. If we can't allocate that 1823 * resource, we don't dequeue the xfer. 1824 */ 1825 if (scsipi_get_resource(chan) == 0) { 1826 /* 1827 * Adapter is out of resources. If the adapter 1828 * supports it, attempt to grow them. 1829 */ 1830 if (scsipi_grow_resources(chan) == 0) { 1831 /* 1832 * Wasn't able to grow resources, 1833 * nothing more we can do. 1834 */ 1835 if (xs->xs_control & XS_CTL_POLL) { 1836 scsipi_printaddr(xs->xs_periph); 1837 printf("polling command but no " 1838 "adapter resources"); 1839 /* We'll panic shortly... */ 1840 } 1841 splx(s); 1842 1843 /* 1844 * XXX: We should be able to note that 1845 * XXX: that resources are needed here! 1846 */ 1847 return; 1848 } 1849 /* 1850 * scsipi_grow_resources() allocated the resource 1851 * for us. 1852 */ 1853 } 1854 1855 /* 1856 * We have a resource to run this xfer, do it! 1857 */ 1858 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1859 1860 /* 1861 * If the command is to be tagged, allocate a tag ID 1862 * for it. 1863 */ 1864 if (XS_CTL_TAGTYPE(xs) != 0) 1865 scsipi_get_tag(xs); 1866 else 1867 periph->periph_flags |= PERIPH_UNTAG; 1868 periph->periph_sent++; 1869 splx(s); 1870 1871 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1872 } 1873 #ifdef DIAGNOSTIC 1874 panic("scsipi_run_queue: impossible"); 1875 #endif 1876 } 1877 1878 /* 1879 * scsipi_execute_xs: 1880 * 1881 * Begin execution of an xfer, waiting for it to complete, if necessary. 1882 */ 1883 int 1884 scsipi_execute_xs(struct scsipi_xfer *xs) 1885 { 1886 struct scsipi_periph *periph = xs->xs_periph; 1887 struct scsipi_channel *chan = periph->periph_channel; 1888 int oasync, async, poll, error, s; 1889 1890 KASSERT(!cold); 1891 KASSERT(KERNEL_LOCKED_P()); 1892 1893 (chan->chan_bustype->bustype_cmd)(xs); 1894 1895 xs->xs_status &= ~XS_STS_DONE; 1896 xs->error = XS_NOERROR; 1897 xs->resid = xs->datalen; 1898 xs->status = SCSI_OK; 1899 1900 #ifdef SCSIPI_DEBUG 1901 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1902 printf("scsipi_execute_xs: "); 1903 show_scsipi_xs(xs); 1904 printf("\n"); 1905 } 1906 #endif 1907 1908 /* 1909 * Deal with command tagging: 1910 * 1911 * - If the device's current operating mode doesn't 1912 * include tagged queueing, clear the tag mask. 1913 * 1914 * - If the device's current operating mode *does* 1915 * include tagged queueing, set the tag_type in 1916 * the xfer to the appropriate byte for the tag 1917 * message. 1918 */ 1919 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 1920 (xs->xs_control & XS_CTL_REQSENSE)) { 1921 xs->xs_control &= ~XS_CTL_TAGMASK; 1922 xs->xs_tag_type = 0; 1923 } else { 1924 /* 1925 * If the request doesn't specify a tag, give Head 1926 * tags to URGENT operations and Ordered tags to 1927 * everything else. 1928 */ 1929 if (XS_CTL_TAGTYPE(xs) == 0) { 1930 if (xs->xs_control & XS_CTL_URGENT) 1931 xs->xs_control |= XS_CTL_HEAD_TAG; 1932 else 1933 xs->xs_control |= XS_CTL_ORDERED_TAG; 1934 } 1935 1936 switch (XS_CTL_TAGTYPE(xs)) { 1937 case XS_CTL_ORDERED_TAG: 1938 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 1939 break; 1940 1941 case XS_CTL_SIMPLE_TAG: 1942 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 1943 break; 1944 1945 case XS_CTL_HEAD_TAG: 1946 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 1947 break; 1948 1949 default: 1950 scsipi_printaddr(periph); 1951 printf("invalid tag mask 0x%08x\n", 1952 XS_CTL_TAGTYPE(xs)); 1953 panic("scsipi_execute_xs"); 1954 } 1955 } 1956 1957 /* If the adaptor wants us to poll, poll. */ 1958 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 1959 xs->xs_control |= XS_CTL_POLL; 1960 1961 /* 1962 * If we don't yet have a completion thread, or we are to poll for 1963 * completion, clear the ASYNC flag. 1964 */ 1965 oasync = (xs->xs_control & XS_CTL_ASYNC); 1966 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 1967 xs->xs_control &= ~XS_CTL_ASYNC; 1968 1969 async = (xs->xs_control & XS_CTL_ASYNC); 1970 poll = (xs->xs_control & XS_CTL_POLL); 1971 1972 #ifdef DIAGNOSTIC 1973 if (oasync != 0 && xs->bp == NULL) 1974 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 1975 #endif 1976 1977 /* 1978 * Enqueue the transfer. If we're not polling for completion, this 1979 * should ALWAYS return `no error'. 1980 */ 1981 error = scsipi_enqueue(xs); 1982 if (error) { 1983 if (poll == 0) { 1984 scsipi_printaddr(periph); 1985 printf("not polling, but enqueue failed with %d\n", 1986 error); 1987 panic("scsipi_execute_xs"); 1988 } 1989 1990 scsipi_printaddr(periph); 1991 printf("should have flushed queue?\n"); 1992 goto free_xs; 1993 } 1994 1995 restarted: 1996 scsipi_run_queue(chan); 1997 1998 /* 1999 * The xfer is enqueued, and possibly running. If it's to be 2000 * completed asynchronously, just return now. 2001 */ 2002 if (async) 2003 return (0); 2004 2005 /* 2006 * Not an asynchronous command; wait for it to complete. 2007 */ 2008 s = splbio(); 2009 while ((xs->xs_status & XS_STS_DONE) == 0) { 2010 if (poll) { 2011 scsipi_printaddr(periph); 2012 printf("polling command not done\n"); 2013 panic("scsipi_execute_xs"); 2014 } 2015 (void) tsleep(xs, PRIBIO, "xscmd", 0); 2016 } 2017 splx(s); 2018 2019 /* 2020 * Command is complete. scsipi_done() has awakened us to perform 2021 * the error handling. 2022 */ 2023 error = scsipi_complete(xs); 2024 if (error == ERESTART) 2025 goto restarted; 2026 2027 /* 2028 * If it was meant to run async and we cleared aync ourselve, 2029 * don't return an error here. It has already been handled 2030 */ 2031 if (oasync) 2032 error = 0; 2033 /* 2034 * Command completed successfully or fatal error occurred. Fall 2035 * into.... 2036 */ 2037 free_xs: 2038 s = splbio(); 2039 scsipi_put_xs(xs); 2040 splx(s); 2041 2042 /* 2043 * Kick the queue, keep it running in case it stopped for some 2044 * reason. 2045 */ 2046 scsipi_run_queue(chan); 2047 2048 return (error); 2049 } 2050 2051 /* 2052 * scsipi_completion_thread: 2053 * 2054 * This is the completion thread. We wait for errors on 2055 * asynchronous xfers, and perform the error handling 2056 * function, restarting the command, if necessary. 2057 */ 2058 static void 2059 scsipi_completion_thread(void *arg) 2060 { 2061 struct scsipi_channel *chan = arg; 2062 struct scsipi_xfer *xs; 2063 int s; 2064 2065 if (chan->chan_init_cb) 2066 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg); 2067 2068 s = splbio(); 2069 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 2070 splx(s); 2071 for (;;) { 2072 s = splbio(); 2073 xs = TAILQ_FIRST(&chan->chan_complete); 2074 if (xs == NULL && chan->chan_tflags == 0) { 2075 /* nothing to do; wait */ 2076 (void) tsleep(&chan->chan_complete, PRIBIO, 2077 "sccomp", 0); 2078 splx(s); 2079 continue; 2080 } 2081 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2082 /* call chan_callback from thread context */ 2083 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 2084 chan->chan_callback(chan, chan->chan_callback_arg); 2085 splx(s); 2086 continue; 2087 } 2088 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 2089 /* attempt to get more openings for this channel */ 2090 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2091 scsipi_adapter_request(chan, 2092 ADAPTER_REQ_GROW_RESOURCES, NULL); 2093 scsipi_channel_thaw(chan, 1); 2094 splx(s); 2095 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) 2096 kpause("scsizzz", FALSE, hz/10, NULL); 2097 continue; 2098 } 2099 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2100 /* explicitly run the queues for this channel */ 2101 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2102 scsipi_run_queue(chan); 2103 splx(s); 2104 continue; 2105 } 2106 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2107 splx(s); 2108 break; 2109 } 2110 if (xs) { 2111 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2112 splx(s); 2113 2114 /* 2115 * Have an xfer with an error; process it. 2116 */ 2117 (void) scsipi_complete(xs); 2118 2119 /* 2120 * Kick the queue; keep it running if it was stopped 2121 * for some reason. 2122 */ 2123 scsipi_run_queue(chan); 2124 } else { 2125 splx(s); 2126 } 2127 } 2128 2129 chan->chan_thread = NULL; 2130 2131 /* In case parent is waiting for us to exit. */ 2132 wakeup(&chan->chan_thread); 2133 2134 kthread_exit(0); 2135 } 2136 /* 2137 * scsipi_thread_call_callback: 2138 * 2139 * request to call a callback from the completion thread 2140 */ 2141 int 2142 scsipi_thread_call_callback(struct scsipi_channel *chan, 2143 void (*callback)(struct scsipi_channel *, void *), void *arg) 2144 { 2145 int s; 2146 2147 s = splbio(); 2148 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2149 /* kernel thread doesn't exist yet */ 2150 splx(s); 2151 return ESRCH; 2152 } 2153 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2154 splx(s); 2155 return EBUSY; 2156 } 2157 scsipi_channel_freeze(chan, 1); 2158 chan->chan_callback = callback; 2159 chan->chan_callback_arg = arg; 2160 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2161 wakeup(&chan->chan_complete); 2162 splx(s); 2163 return(0); 2164 } 2165 2166 /* 2167 * scsipi_async_event: 2168 * 2169 * Handle an asynchronous event from an adapter. 2170 */ 2171 void 2172 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event, 2173 void *arg) 2174 { 2175 int s; 2176 2177 s = splbio(); 2178 switch (event) { 2179 case ASYNC_EVENT_MAX_OPENINGS: 2180 scsipi_async_event_max_openings(chan, 2181 (struct scsipi_max_openings *)arg); 2182 break; 2183 2184 case ASYNC_EVENT_XFER_MODE: 2185 if (chan->chan_bustype->bustype_async_event_xfer_mode) { 2186 chan->chan_bustype->bustype_async_event_xfer_mode( 2187 chan, arg); 2188 } 2189 break; 2190 case ASYNC_EVENT_RESET: 2191 scsipi_async_event_channel_reset(chan); 2192 break; 2193 } 2194 splx(s); 2195 } 2196 2197 /* 2198 * scsipi_async_event_max_openings: 2199 * 2200 * Update the maximum number of outstanding commands a 2201 * device may have. 2202 */ 2203 static void 2204 scsipi_async_event_max_openings(struct scsipi_channel *chan, 2205 struct scsipi_max_openings *mo) 2206 { 2207 struct scsipi_periph *periph; 2208 int minlun, maxlun; 2209 2210 if (mo->mo_lun == -1) { 2211 /* 2212 * Wildcarded; apply it to all LUNs. 2213 */ 2214 minlun = 0; 2215 maxlun = chan->chan_nluns - 1; 2216 } else 2217 minlun = maxlun = mo->mo_lun; 2218 2219 /* XXX This could really suck with a large LUN space. */ 2220 for (; minlun <= maxlun; minlun++) { 2221 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun); 2222 if (periph == NULL) 2223 continue; 2224 2225 if (mo->mo_openings < periph->periph_openings) 2226 periph->periph_openings = mo->mo_openings; 2227 else if (mo->mo_openings > periph->periph_openings && 2228 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2229 periph->periph_openings = mo->mo_openings; 2230 } 2231 } 2232 2233 /* 2234 * scsipi_set_xfer_mode: 2235 * 2236 * Set the xfer mode for the specified I_T Nexus. 2237 */ 2238 void 2239 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed) 2240 { 2241 struct scsipi_xfer_mode xm; 2242 struct scsipi_periph *itperiph; 2243 int lun, s; 2244 2245 /* 2246 * Go to the minimal xfer mode. 2247 */ 2248 xm.xm_target = target; 2249 xm.xm_mode = 0; 2250 xm.xm_period = 0; /* ignored */ 2251 xm.xm_offset = 0; /* ignored */ 2252 2253 /* 2254 * Find the first LUN we know about on this I_T Nexus. 2255 */ 2256 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) { 2257 itperiph = scsipi_lookup_periph(chan, target, lun); 2258 if (itperiph != NULL) 2259 break; 2260 } 2261 if (itperiph != NULL) { 2262 xm.xm_mode = itperiph->periph_cap; 2263 /* 2264 * Now issue the request to the adapter. 2265 */ 2266 s = splbio(); 2267 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2268 splx(s); 2269 /* 2270 * If we want this to happen immediately, issue a dummy 2271 * command, since most adapters can't really negotiate unless 2272 * they're executing a job. 2273 */ 2274 if (immed != 0) { 2275 (void) scsipi_test_unit_ready(itperiph, 2276 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2277 XS_CTL_IGNORE_NOT_READY | 2278 XS_CTL_IGNORE_MEDIA_CHANGE); 2279 } 2280 } 2281 } 2282 2283 /* 2284 * scsipi_channel_reset: 2285 * 2286 * handle scsi bus reset 2287 * called at splbio 2288 */ 2289 static void 2290 scsipi_async_event_channel_reset(struct scsipi_channel *chan) 2291 { 2292 struct scsipi_xfer *xs, *xs_next; 2293 struct scsipi_periph *periph; 2294 int target, lun; 2295 2296 /* 2297 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2298 * commands; as the sense is not available any more. 2299 * can't call scsipi_done() from here, as the command has not been 2300 * sent to the adapter yet (this would corrupt accounting). 2301 */ 2302 2303 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2304 xs_next = TAILQ_NEXT(xs, channel_q); 2305 if (xs->xs_control & XS_CTL_REQSENSE) { 2306 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2307 xs->error = XS_RESET; 2308 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2309 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2310 channel_q); 2311 } 2312 } 2313 wakeup(&chan->chan_complete); 2314 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2315 for (target = 0; target < chan->chan_ntargets; target++) { 2316 if (target == chan->chan_id) 2317 continue; 2318 for (lun = 0; lun < chan->chan_nluns; lun++) { 2319 periph = scsipi_lookup_periph(chan, target, lun); 2320 if (periph) { 2321 xs = periph->periph_xscheck; 2322 if (xs) 2323 xs->error = XS_RESET; 2324 } 2325 } 2326 } 2327 } 2328 2329 /* 2330 * scsipi_target_detach: 2331 * 2332 * detach all periph associated with a I_T 2333 * must be called from valid thread context 2334 */ 2335 int 2336 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun, 2337 int flags) 2338 { 2339 struct scsipi_periph *periph; 2340 int ctarget, mintarget, maxtarget; 2341 int clun, minlun, maxlun; 2342 int error; 2343 2344 if (target == -1) { 2345 mintarget = 0; 2346 maxtarget = chan->chan_ntargets; 2347 } else { 2348 if (target == chan->chan_id) 2349 return EINVAL; 2350 if (target < 0 || target >= chan->chan_ntargets) 2351 return EINVAL; 2352 mintarget = target; 2353 maxtarget = target + 1; 2354 } 2355 2356 if (lun == -1) { 2357 minlun = 0; 2358 maxlun = chan->chan_nluns; 2359 } else { 2360 if (lun < 0 || lun >= chan->chan_nluns) 2361 return EINVAL; 2362 minlun = lun; 2363 maxlun = lun + 1; 2364 } 2365 2366 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2367 if (ctarget == chan->chan_id) 2368 continue; 2369 2370 for (clun = minlun; clun < maxlun; clun++) { 2371 periph = scsipi_lookup_periph(chan, ctarget, clun); 2372 if (periph == NULL) 2373 continue; 2374 error = config_detach(periph->periph_dev, flags); 2375 if (error) 2376 return (error); 2377 } 2378 } 2379 return(0); 2380 } 2381 2382 /* 2383 * scsipi_adapter_addref: 2384 * 2385 * Add a reference to the adapter pointed to by the provided 2386 * link, enabling the adapter if necessary. 2387 */ 2388 int 2389 scsipi_adapter_addref(struct scsipi_adapter *adapt) 2390 { 2391 int s, error = 0; 2392 2393 s = splbio(); 2394 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) { 2395 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1); 2396 if (error) 2397 adapt->adapt_refcnt--; 2398 } 2399 splx(s); 2400 return (error); 2401 } 2402 2403 /* 2404 * scsipi_adapter_delref: 2405 * 2406 * Delete a reference to the adapter pointed to by the provided 2407 * link, disabling the adapter if possible. 2408 */ 2409 void 2410 scsipi_adapter_delref(struct scsipi_adapter *adapt) 2411 { 2412 int s; 2413 2414 s = splbio(); 2415 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL) 2416 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0); 2417 splx(s); 2418 } 2419 2420 static struct scsipi_syncparam { 2421 int ss_factor; 2422 int ss_period; /* ns * 100 */ 2423 } scsipi_syncparams[] = { 2424 { 0x08, 625 }, /* FAST-160 (Ultra320) */ 2425 { 0x09, 1250 }, /* FAST-80 (Ultra160) */ 2426 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */ 2427 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */ 2428 { 0x0c, 5000 }, /* FAST-20 (Ultra) */ 2429 }; 2430 static const int scsipi_nsyncparams = 2431 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2432 2433 int 2434 scsipi_sync_period_to_factor(int period /* ns * 100 */) 2435 { 2436 int i; 2437 2438 for (i = 0; i < scsipi_nsyncparams; i++) { 2439 if (period <= scsipi_syncparams[i].ss_period) 2440 return (scsipi_syncparams[i].ss_factor); 2441 } 2442 2443 return ((period / 100) / 4); 2444 } 2445 2446 int 2447 scsipi_sync_factor_to_period(int factor) 2448 { 2449 int i; 2450 2451 for (i = 0; i < scsipi_nsyncparams; i++) { 2452 if (factor == scsipi_syncparams[i].ss_factor) 2453 return (scsipi_syncparams[i].ss_period); 2454 } 2455 2456 return ((factor * 4) * 100); 2457 } 2458 2459 int 2460 scsipi_sync_factor_to_freq(int factor) 2461 { 2462 int i; 2463 2464 for (i = 0; i < scsipi_nsyncparams; i++) { 2465 if (factor == scsipi_syncparams[i].ss_factor) 2466 return (100000000 / scsipi_syncparams[i].ss_period); 2467 } 2468 2469 return (10000000 / ((factor * 4) * 10)); 2470 } 2471 2472 #ifdef SCSIPI_DEBUG 2473 /* 2474 * Given a scsipi_xfer, dump the request, in all its glory 2475 */ 2476 void 2477 show_scsipi_xs(struct scsipi_xfer *xs) 2478 { 2479 2480 printf("xs(%p): ", xs); 2481 printf("xs_control(0x%08x)", xs->xs_control); 2482 printf("xs_status(0x%08x)", xs->xs_status); 2483 printf("periph(%p)", xs->xs_periph); 2484 printf("retr(0x%x)", xs->xs_retries); 2485 printf("timo(0x%x)", xs->timeout); 2486 printf("cmd(%p)", xs->cmd); 2487 printf("len(0x%x)", xs->cmdlen); 2488 printf("data(%p)", xs->data); 2489 printf("len(0x%x)", xs->datalen); 2490 printf("res(0x%x)", xs->resid); 2491 printf("err(0x%x)", xs->error); 2492 printf("bp(%p)", xs->bp); 2493 show_scsipi_cmd(xs); 2494 } 2495 2496 void 2497 show_scsipi_cmd(struct scsipi_xfer *xs) 2498 { 2499 u_char *b = (u_char *) xs->cmd; 2500 int i = 0; 2501 2502 scsipi_printaddr(xs->xs_periph); 2503 printf(" command: "); 2504 2505 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2506 while (i < xs->cmdlen) { 2507 if (i) 2508 printf(","); 2509 printf("0x%x", b[i++]); 2510 } 2511 printf("-[%d bytes]\n", xs->datalen); 2512 if (xs->datalen) 2513 show_mem(xs->data, min(64, xs->datalen)); 2514 } else 2515 printf("-RESET-\n"); 2516 } 2517 2518 void 2519 show_mem(u_char *address, int num) 2520 { 2521 int x; 2522 2523 printf("------------------------------"); 2524 for (x = 0; x < num; x++) { 2525 if ((x % 16) == 0) 2526 printf("\n%03d: ", x); 2527 printf("%02x ", *address++); 2528 } 2529 printf("\n------------------------------\n"); 2530 } 2531 #endif /* SCSIPI_DEBUG */ 2532