1 /* $NetBSD: scsipi_base.c,v 1.164 2014/11/18 17:03:41 joerg Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.164 2014/11/18 17:03:41 joerg Exp $"); 35 36 #include "opt_scsi.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/buf.h> 42 #include <sys/uio.h> 43 #include <sys/malloc.h> 44 #include <sys/pool.h> 45 #include <sys/errno.h> 46 #include <sys/device.h> 47 #include <sys/proc.h> 48 #include <sys/kthread.h> 49 #include <sys/hash.h> 50 51 #include <dev/scsipi/scsi_spc.h> 52 #include <dev/scsipi/scsipi_all.h> 53 #include <dev/scsipi/scsipi_disk.h> 54 #include <dev/scsipi/scsipiconf.h> 55 #include <dev/scsipi/scsipi_base.h> 56 57 #include <dev/scsipi/scsi_all.h> 58 #include <dev/scsipi/scsi_message.h> 59 60 #include <machine/param.h> 61 62 static int scsipi_complete(struct scsipi_xfer *); 63 static void scsipi_request_sense(struct scsipi_xfer *); 64 static int scsipi_enqueue(struct scsipi_xfer *); 65 static void scsipi_run_queue(struct scsipi_channel *chan); 66 67 static void scsipi_completion_thread(void *); 68 69 static void scsipi_get_tag(struct scsipi_xfer *); 70 static void scsipi_put_tag(struct scsipi_xfer *); 71 72 static int scsipi_get_resource(struct scsipi_channel *); 73 static void scsipi_put_resource(struct scsipi_channel *); 74 75 static void scsipi_async_event_max_openings(struct scsipi_channel *, 76 struct scsipi_max_openings *); 77 static void scsipi_async_event_channel_reset(struct scsipi_channel *); 78 79 static struct pool scsipi_xfer_pool; 80 81 /* 82 * scsipi_init: 83 * 84 * Called when a scsibus or atapibus is attached to the system 85 * to initialize shared data structures. 86 */ 87 void 88 scsipi_init(void) 89 { 90 static int scsipi_init_done; 91 92 if (scsipi_init_done) 93 return; 94 scsipi_init_done = 1; 95 96 /* Initialize the scsipi_xfer pool. */ 97 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 98 0, 0, "scxspl", NULL, IPL_BIO); 99 if (pool_prime(&scsipi_xfer_pool, 100 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) { 101 printf("WARNING: not enough memory for scsipi_xfer_pool\n"); 102 } 103 } 104 105 /* 106 * scsipi_channel_init: 107 * 108 * Initialize a scsipi_channel when it is attached. 109 */ 110 int 111 scsipi_channel_init(struct scsipi_channel *chan) 112 { 113 struct scsipi_adapter *adapt = chan->chan_adapter; 114 int i; 115 116 /* Initialize shared data. */ 117 scsipi_init(); 118 119 /* Initialize the queues. */ 120 TAILQ_INIT(&chan->chan_queue); 121 TAILQ_INIT(&chan->chan_complete); 122 123 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++) 124 LIST_INIT(&chan->chan_periphtab[i]); 125 126 /* 127 * Create the asynchronous completion thread. 128 */ 129 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan, 130 &chan->chan_thread, "%s", chan->chan_name)) { 131 aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for " 132 "channel %d\n", chan->chan_channel); 133 panic("scsipi_channel_init"); 134 } 135 136 return (0); 137 } 138 139 /* 140 * scsipi_channel_shutdown: 141 * 142 * Shutdown a scsipi_channel. 143 */ 144 void 145 scsipi_channel_shutdown(struct scsipi_channel *chan) 146 { 147 148 /* 149 * Shut down the completion thread. 150 */ 151 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 152 wakeup(&chan->chan_complete); 153 154 /* 155 * Now wait for the thread to exit. 156 */ 157 while (chan->chan_thread != NULL) 158 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0); 159 } 160 161 static uint32_t 162 scsipi_chan_periph_hash(uint64_t t, uint64_t l) 163 { 164 uint32_t hash; 165 166 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT); 167 hash = hash32_buf(&l, sizeof(l), hash); 168 169 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK); 170 } 171 172 /* 173 * scsipi_insert_periph: 174 * 175 * Insert a periph into the channel. 176 */ 177 void 178 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph) 179 { 180 uint32_t hash; 181 int s; 182 183 hash = scsipi_chan_periph_hash(periph->periph_target, 184 periph->periph_lun); 185 186 s = splbio(); 187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash); 188 splx(s); 189 } 190 191 /* 192 * scsipi_remove_periph: 193 * 194 * Remove a periph from the channel. 195 */ 196 void 197 scsipi_remove_periph(struct scsipi_channel *chan, 198 struct scsipi_periph *periph) 199 { 200 int s; 201 202 s = splbio(); 203 LIST_REMOVE(periph, periph_hash); 204 splx(s); 205 } 206 207 /* 208 * scsipi_lookup_periph: 209 * 210 * Lookup a periph on the specified channel. 211 */ 212 struct scsipi_periph * 213 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun) 214 { 215 struct scsipi_periph *periph; 216 uint32_t hash; 217 int s; 218 219 KASSERT(cold || KERNEL_LOCKED_P()); 220 221 if (target >= chan->chan_ntargets || 222 lun >= chan->chan_nluns) 223 return (NULL); 224 225 hash = scsipi_chan_periph_hash(target, lun); 226 227 s = splbio(); 228 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) { 229 if (periph->periph_target == target && 230 periph->periph_lun == lun) 231 break; 232 } 233 splx(s); 234 235 return (periph); 236 } 237 238 /* 239 * scsipi_get_resource: 240 * 241 * Allocate a single xfer `resource' from the channel. 242 * 243 * NOTE: Must be called at splbio(). 244 */ 245 static int 246 scsipi_get_resource(struct scsipi_channel *chan) 247 { 248 struct scsipi_adapter *adapt = chan->chan_adapter; 249 250 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 251 if (chan->chan_openings > 0) { 252 chan->chan_openings--; 253 return (1); 254 } 255 return (0); 256 } 257 258 if (adapt->adapt_openings > 0) { 259 adapt->adapt_openings--; 260 return (1); 261 } 262 return (0); 263 } 264 265 /* 266 * scsipi_grow_resources: 267 * 268 * Attempt to grow resources for a channel. If this succeeds, 269 * we allocate one for our caller. 270 * 271 * NOTE: Must be called at splbio(). 272 */ 273 static inline int 274 scsipi_grow_resources(struct scsipi_channel *chan) 275 { 276 277 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 278 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 279 scsipi_adapter_request(chan, 280 ADAPTER_REQ_GROW_RESOURCES, NULL); 281 return (scsipi_get_resource(chan)); 282 } 283 /* 284 * ask the channel thread to do it. It'll have to thaw the 285 * queue 286 */ 287 scsipi_channel_freeze(chan, 1); 288 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 289 wakeup(&chan->chan_complete); 290 return (0); 291 } 292 293 return (0); 294 } 295 296 /* 297 * scsipi_put_resource: 298 * 299 * Free a single xfer `resource' to the channel. 300 * 301 * NOTE: Must be called at splbio(). 302 */ 303 static void 304 scsipi_put_resource(struct scsipi_channel *chan) 305 { 306 struct scsipi_adapter *adapt = chan->chan_adapter; 307 308 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 309 chan->chan_openings++; 310 else 311 adapt->adapt_openings++; 312 } 313 314 /* 315 * scsipi_get_tag: 316 * 317 * Get a tag ID for the specified xfer. 318 * 319 * NOTE: Must be called at splbio(). 320 */ 321 static void 322 scsipi_get_tag(struct scsipi_xfer *xs) 323 { 324 struct scsipi_periph *periph = xs->xs_periph; 325 int bit, tag; 326 u_int word; 327 328 bit = 0; /* XXX gcc */ 329 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 330 bit = ffs(periph->periph_freetags[word]); 331 if (bit != 0) 332 break; 333 } 334 #ifdef DIAGNOSTIC 335 if (word == PERIPH_NTAGWORDS) { 336 scsipi_printaddr(periph); 337 printf("no free tags\n"); 338 panic("scsipi_get_tag"); 339 } 340 #endif 341 342 bit -= 1; 343 periph->periph_freetags[word] &= ~(1 << bit); 344 tag = (word << 5) | bit; 345 346 /* XXX Should eventually disallow this completely. */ 347 if (tag >= periph->periph_openings) { 348 scsipi_printaddr(periph); 349 printf("WARNING: tag %d greater than available openings %d\n", 350 tag, periph->periph_openings); 351 } 352 353 xs->xs_tag_id = tag; 354 } 355 356 /* 357 * scsipi_put_tag: 358 * 359 * Put the tag ID for the specified xfer back into the pool. 360 * 361 * NOTE: Must be called at splbio(). 362 */ 363 static void 364 scsipi_put_tag(struct scsipi_xfer *xs) 365 { 366 struct scsipi_periph *periph = xs->xs_periph; 367 int word, bit; 368 369 word = xs->xs_tag_id >> 5; 370 bit = xs->xs_tag_id & 0x1f; 371 372 periph->periph_freetags[word] |= (1 << bit); 373 } 374 375 /* 376 * scsipi_get_xs: 377 * 378 * Allocate an xfer descriptor and associate it with the 379 * specified peripheral. If the peripheral has no more 380 * available command openings, we either block waiting for 381 * one to become available, or fail. 382 */ 383 struct scsipi_xfer * 384 scsipi_get_xs(struct scsipi_periph *periph, int flags) 385 { 386 struct scsipi_xfer *xs; 387 int s; 388 389 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 390 391 KASSERT(!cold); 392 393 #ifdef DIAGNOSTIC 394 /* 395 * URGENT commands can never be ASYNC. 396 */ 397 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 398 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 399 scsipi_printaddr(periph); 400 printf("URGENT and ASYNC\n"); 401 panic("scsipi_get_xs"); 402 } 403 #endif 404 405 s = splbio(); 406 /* 407 * Wait for a command opening to become available. Rules: 408 * 409 * - All xfers must wait for an available opening. 410 * Exception: URGENT xfers can proceed when 411 * active == openings, because we use the opening 412 * of the command we're recovering for. 413 * - if the periph has sense pending, only URGENT & REQSENSE 414 * xfers may proceed. 415 * 416 * - If the periph is recovering, only URGENT xfers may 417 * proceed. 418 * 419 * - If the periph is currently executing a recovery 420 * command, URGENT commands must block, because only 421 * one recovery command can execute at a time. 422 */ 423 for (;;) { 424 if (flags & XS_CTL_URGENT) { 425 if (periph->periph_active > periph->periph_openings) 426 goto wait_for_opening; 427 if (periph->periph_flags & PERIPH_SENSE) { 428 if ((flags & XS_CTL_REQSENSE) == 0) 429 goto wait_for_opening; 430 } else { 431 if ((periph->periph_flags & 432 PERIPH_RECOVERY_ACTIVE) != 0) 433 goto wait_for_opening; 434 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 435 } 436 break; 437 } 438 if (periph->periph_active >= periph->periph_openings || 439 (periph->periph_flags & PERIPH_RECOVERING) != 0) 440 goto wait_for_opening; 441 periph->periph_active++; 442 break; 443 444 wait_for_opening: 445 if (flags & XS_CTL_NOSLEEP) { 446 splx(s); 447 return (NULL); 448 } 449 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 450 periph->periph_flags |= PERIPH_WAITING; 451 (void) tsleep(periph, PRIBIO, "getxs", 0); 452 } 453 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 454 xs = pool_get(&scsipi_xfer_pool, 455 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 456 if (xs == NULL) { 457 if (flags & XS_CTL_URGENT) { 458 if ((flags & XS_CTL_REQSENSE) == 0) 459 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 460 } else 461 periph->periph_active--; 462 scsipi_printaddr(periph); 463 printf("unable to allocate %sscsipi_xfer\n", 464 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 465 } 466 splx(s); 467 468 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 469 470 if (xs != NULL) { 471 memset(xs, 0, sizeof(*xs)); 472 callout_init(&xs->xs_callout, 0); 473 xs->xs_periph = periph; 474 xs->xs_control = flags; 475 xs->xs_status = 0; 476 s = splbio(); 477 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 478 splx(s); 479 } 480 return (xs); 481 } 482 483 /* 484 * scsipi_put_xs: 485 * 486 * Release an xfer descriptor, decreasing the outstanding command 487 * count for the peripheral. If there is a thread waiting for 488 * an opening, wake it up. If not, kick any queued I/O the 489 * peripheral may have. 490 * 491 * NOTE: Must be called at splbio(). 492 */ 493 void 494 scsipi_put_xs(struct scsipi_xfer *xs) 495 { 496 struct scsipi_periph *periph = xs->xs_periph; 497 int flags = xs->xs_control; 498 499 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 500 501 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 502 callout_destroy(&xs->xs_callout); 503 pool_put(&scsipi_xfer_pool, xs); 504 505 #ifdef DIAGNOSTIC 506 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 507 periph->periph_active == 0) { 508 scsipi_printaddr(periph); 509 printf("recovery without a command to recovery for\n"); 510 panic("scsipi_put_xs"); 511 } 512 #endif 513 514 if (flags & XS_CTL_URGENT) { 515 if ((flags & XS_CTL_REQSENSE) == 0) 516 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 517 } else 518 periph->periph_active--; 519 if (periph->periph_active == 0 && 520 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 521 periph->periph_flags &= ~PERIPH_WAITDRAIN; 522 wakeup(&periph->periph_active); 523 } 524 525 if (periph->periph_flags & PERIPH_WAITING) { 526 periph->periph_flags &= ~PERIPH_WAITING; 527 wakeup(periph); 528 } else { 529 if (periph->periph_switch->psw_start != NULL && 530 device_is_active(periph->periph_dev)) { 531 SC_DEBUG(periph, SCSIPI_DB2, 532 ("calling private start()\n")); 533 (*periph->periph_switch->psw_start)(periph); 534 } 535 } 536 } 537 538 /* 539 * scsipi_channel_freeze: 540 * 541 * Freeze a channel's xfer queue. 542 */ 543 void 544 scsipi_channel_freeze(struct scsipi_channel *chan, int count) 545 { 546 int s; 547 548 s = splbio(); 549 chan->chan_qfreeze += count; 550 splx(s); 551 } 552 553 /* 554 * scsipi_channel_thaw: 555 * 556 * Thaw a channel's xfer queue. 557 */ 558 void 559 scsipi_channel_thaw(struct scsipi_channel *chan, int count) 560 { 561 int s; 562 563 s = splbio(); 564 chan->chan_qfreeze -= count; 565 /* 566 * Don't let the freeze count go negative. 567 * 568 * Presumably the adapter driver could keep track of this, 569 * but it might just be easier to do this here so as to allow 570 * multiple callers, including those outside the adapter driver. 571 */ 572 if (chan->chan_qfreeze < 0) { 573 chan->chan_qfreeze = 0; 574 } 575 splx(s); 576 /* 577 * Kick the channel's queue here. Note, we may be running in 578 * interrupt context (softclock or HBA's interrupt), so the adapter 579 * driver had better not sleep. 580 */ 581 if (chan->chan_qfreeze == 0) 582 scsipi_run_queue(chan); 583 } 584 585 /* 586 * scsipi_channel_timed_thaw: 587 * 588 * Thaw a channel after some time has expired. This will also 589 * run the channel's queue if the freeze count has reached 0. 590 */ 591 void 592 scsipi_channel_timed_thaw(void *arg) 593 { 594 struct scsipi_channel *chan = arg; 595 596 scsipi_channel_thaw(chan, 1); 597 } 598 599 /* 600 * scsipi_periph_freeze: 601 * 602 * Freeze a device's xfer queue. 603 */ 604 void 605 scsipi_periph_freeze(struct scsipi_periph *periph, int count) 606 { 607 int s; 608 609 s = splbio(); 610 periph->periph_qfreeze += count; 611 splx(s); 612 } 613 614 /* 615 * scsipi_periph_thaw: 616 * 617 * Thaw a device's xfer queue. 618 */ 619 void 620 scsipi_periph_thaw(struct scsipi_periph *periph, int count) 621 { 622 int s; 623 624 s = splbio(); 625 periph->periph_qfreeze -= count; 626 #ifdef DIAGNOSTIC 627 if (periph->periph_qfreeze < 0) { 628 static const char pc[] = "periph freeze count < 0"; 629 scsipi_printaddr(periph); 630 printf("%s\n", pc); 631 panic(pc); 632 } 633 #endif 634 if (periph->periph_qfreeze == 0 && 635 (periph->periph_flags & PERIPH_WAITING) != 0) 636 wakeup(periph); 637 splx(s); 638 } 639 640 /* 641 * scsipi_periph_timed_thaw: 642 * 643 * Thaw a device after some time has expired. 644 */ 645 void 646 scsipi_periph_timed_thaw(void *arg) 647 { 648 int s; 649 struct scsipi_periph *periph = arg; 650 651 callout_stop(&periph->periph_callout); 652 653 s = splbio(); 654 scsipi_periph_thaw(periph, 1); 655 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 656 /* 657 * Kick the channel's queue here. Note, we're running in 658 * interrupt context (softclock), so the adapter driver 659 * had better not sleep. 660 */ 661 scsipi_run_queue(periph->periph_channel); 662 } else { 663 /* 664 * Tell the completion thread to kick the channel's queue here. 665 */ 666 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 667 wakeup(&periph->periph_channel->chan_complete); 668 } 669 splx(s); 670 } 671 672 /* 673 * scsipi_wait_drain: 674 * 675 * Wait for a periph's pending xfers to drain. 676 */ 677 void 678 scsipi_wait_drain(struct scsipi_periph *periph) 679 { 680 int s; 681 682 s = splbio(); 683 while (periph->periph_active != 0) { 684 periph->periph_flags |= PERIPH_WAITDRAIN; 685 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0); 686 } 687 splx(s); 688 } 689 690 /* 691 * scsipi_kill_pending: 692 * 693 * Kill off all pending xfers for a periph. 694 * 695 * NOTE: Must be called at splbio(). 696 */ 697 void 698 scsipi_kill_pending(struct scsipi_periph *periph) 699 { 700 701 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph); 702 scsipi_wait_drain(periph); 703 } 704 705 /* 706 * scsipi_print_cdb: 707 * prints a command descriptor block (for debug purpose, error messages, 708 * SCSIVERBOSE, ...) 709 */ 710 void 711 scsipi_print_cdb(struct scsipi_generic *cmd) 712 { 713 int i, j; 714 715 printf("0x%02x", cmd->opcode); 716 717 switch (CDB_GROUPID(cmd->opcode)) { 718 case CDB_GROUPID_0: 719 j = CDB_GROUP0; 720 break; 721 case CDB_GROUPID_1: 722 j = CDB_GROUP1; 723 break; 724 case CDB_GROUPID_2: 725 j = CDB_GROUP2; 726 break; 727 case CDB_GROUPID_3: 728 j = CDB_GROUP3; 729 break; 730 case CDB_GROUPID_4: 731 j = CDB_GROUP4; 732 break; 733 case CDB_GROUPID_5: 734 j = CDB_GROUP5; 735 break; 736 case CDB_GROUPID_6: 737 j = CDB_GROUP6; 738 break; 739 case CDB_GROUPID_7: 740 j = CDB_GROUP7; 741 break; 742 default: 743 j = 0; 744 } 745 if (j == 0) 746 j = sizeof (cmd->bytes); 747 for (i = 0; i < j-1; i++) /* already done the opcode */ 748 printf(" %02x", cmd->bytes[i]); 749 } 750 751 /* 752 * scsipi_interpret_sense: 753 * 754 * Look at the returned sense and act on the error, determining 755 * the unix error number to pass back. (0 = report no error) 756 * 757 * NOTE: If we return ERESTART, we are expected to haved 758 * thawed the device! 759 * 760 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 761 */ 762 int 763 scsipi_interpret_sense(struct scsipi_xfer *xs) 764 { 765 struct scsi_sense_data *sense; 766 struct scsipi_periph *periph = xs->xs_periph; 767 u_int8_t key; 768 int error; 769 u_int32_t info; 770 static const char *error_mes[] = { 771 "soft error (corrected)", 772 "not ready", "medium error", 773 "non-media hardware failure", "illegal request", 774 "unit attention", "readonly device", 775 "no data found", "vendor unique", 776 "copy aborted", "command aborted", 777 "search returned equal", "volume overflow", 778 "verify miscompare", "unknown error key" 779 }; 780 781 sense = &xs->sense.scsi_sense; 782 #ifdef SCSIPI_DEBUG 783 if (periph->periph_flags & SCSIPI_DB1) { 784 int count; 785 scsipi_printaddr(periph); 786 printf(" sense debug information:\n"); 787 printf("\tcode 0x%x valid %d\n", 788 SSD_RCODE(sense->response_code), 789 sense->response_code & SSD_RCODE_VALID ? 1 : 0); 790 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 791 sense->segment, 792 SSD_SENSE_KEY(sense->flags), 793 sense->flags & SSD_ILI ? 1 : 0, 794 sense->flags & SSD_EOM ? 1 : 0, 795 sense->flags & SSD_FILEMARK ? 1 : 0); 796 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 797 "extra bytes\n", 798 sense->info[0], 799 sense->info[1], 800 sense->info[2], 801 sense->info[3], 802 sense->extra_len); 803 printf("\textra: "); 804 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++) 805 printf("0x%x ", sense->csi[count]); 806 printf("\n"); 807 } 808 #endif 809 810 /* 811 * If the periph has its own error handler, call it first. 812 * If it returns a legit error value, return that, otherwise 813 * it wants us to continue with normal error processing. 814 */ 815 if (periph->periph_switch->psw_error != NULL) { 816 SC_DEBUG(periph, SCSIPI_DB2, 817 ("calling private err_handler()\n")); 818 error = (*periph->periph_switch->psw_error)(xs); 819 if (error != EJUSTRETURN) 820 return (error); 821 } 822 /* otherwise use the default */ 823 switch (SSD_RCODE(sense->response_code)) { 824 825 /* 826 * Old SCSI-1 and SASI devices respond with 827 * codes other than 70. 828 */ 829 case 0x00: /* no error (command completed OK) */ 830 return (0); 831 case 0x04: /* drive not ready after it was selected */ 832 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 833 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 834 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 835 return (0); 836 /* XXX - display some sort of error here? */ 837 return (EIO); 838 case 0x20: /* invalid command */ 839 if ((xs->xs_control & 840 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 841 return (0); 842 return (EINVAL); 843 case 0x25: /* invalid LUN (Adaptec ACB-4000) */ 844 return (EACCES); 845 846 /* 847 * If it's code 70, use the extended stuff and 848 * interpret the key 849 */ 850 case 0x71: /* delayed error */ 851 scsipi_printaddr(periph); 852 key = SSD_SENSE_KEY(sense->flags); 853 printf(" DEFERRED ERROR, key = 0x%x\n", key); 854 /* FALLTHROUGH */ 855 case 0x70: 856 if ((sense->response_code & SSD_RCODE_VALID) != 0) 857 info = _4btol(sense->info); 858 else 859 info = 0; 860 key = SSD_SENSE_KEY(sense->flags); 861 862 switch (key) { 863 case SKEY_NO_SENSE: 864 case SKEY_RECOVERED_ERROR: 865 if (xs->resid == xs->datalen && xs->datalen) { 866 /* 867 * Why is this here? 868 */ 869 xs->resid = 0; /* not short read */ 870 } 871 case SKEY_EQUAL: 872 error = 0; 873 break; 874 case SKEY_NOT_READY: 875 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 876 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 877 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 878 return (0); 879 if (sense->asc == 0x3A) { 880 error = ENODEV; /* Medium not present */ 881 if (xs->xs_control & XS_CTL_SILENT_NODEV) 882 return (error); 883 } else 884 error = EIO; 885 if ((xs->xs_control & XS_CTL_SILENT) != 0) 886 return (error); 887 break; 888 case SKEY_ILLEGAL_REQUEST: 889 if ((xs->xs_control & 890 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 891 return (0); 892 /* 893 * Handle the case where a device reports 894 * Logical Unit Not Supported during discovery. 895 */ 896 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 897 sense->asc == 0x25 && 898 sense->ascq == 0x00) 899 return (EINVAL); 900 if ((xs->xs_control & XS_CTL_SILENT) != 0) 901 return (EIO); 902 error = EINVAL; 903 break; 904 case SKEY_UNIT_ATTENTION: 905 if (sense->asc == 0x29 && 906 sense->ascq == 0x00) { 907 /* device or bus reset */ 908 return (ERESTART); 909 } 910 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 911 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 912 if ((xs->xs_control & 913 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 914 /* XXX Should reupload any transient state. */ 915 (periph->periph_flags & 916 PERIPH_REMOVABLE) == 0) { 917 return (ERESTART); 918 } 919 if ((xs->xs_control & XS_CTL_SILENT) != 0) 920 return (EIO); 921 error = EIO; 922 break; 923 case SKEY_DATA_PROTECT: 924 error = EROFS; 925 break; 926 case SKEY_BLANK_CHECK: 927 error = 0; 928 break; 929 case SKEY_ABORTED_COMMAND: 930 if (xs->xs_retries != 0) { 931 xs->xs_retries--; 932 error = ERESTART; 933 } else 934 error = EIO; 935 break; 936 case SKEY_VOLUME_OVERFLOW: 937 error = ENOSPC; 938 break; 939 default: 940 error = EIO; 941 break; 942 } 943 944 /* Print verbose decode if appropriate and possible */ 945 if ((key == 0) || 946 ((xs->xs_control & XS_CTL_SILENT) != 0) || 947 (scsipi_print_sense(xs, 0) != 0)) 948 return (error); 949 950 /* Print brief(er) sense information */ 951 scsipi_printaddr(periph); 952 printf("%s", error_mes[key - 1]); 953 if ((sense->response_code & SSD_RCODE_VALID) != 0) { 954 switch (key) { 955 case SKEY_NOT_READY: 956 case SKEY_ILLEGAL_REQUEST: 957 case SKEY_UNIT_ATTENTION: 958 case SKEY_DATA_PROTECT: 959 break; 960 case SKEY_BLANK_CHECK: 961 printf(", requested size: %d (decimal)", 962 info); 963 break; 964 case SKEY_ABORTED_COMMAND: 965 if (xs->xs_retries) 966 printf(", retrying"); 967 printf(", cmd 0x%x, info 0x%x", 968 xs->cmd->opcode, info); 969 break; 970 default: 971 printf(", info = %d (decimal)", info); 972 } 973 } 974 if (sense->extra_len != 0) { 975 int n; 976 printf(", data ="); 977 for (n = 0; n < sense->extra_len; n++) 978 printf(" %02x", 979 sense->csi[n]); 980 } 981 printf("\n"); 982 return (error); 983 984 /* 985 * Some other code, just report it 986 */ 987 default: 988 #if defined(SCSIDEBUG) || defined(DEBUG) 989 { 990 static const char *uc = "undecodable sense error"; 991 int i; 992 u_int8_t *cptr = (u_int8_t *) sense; 993 scsipi_printaddr(periph); 994 if (xs->cmd == &xs->cmdstore) { 995 printf("%s for opcode 0x%x, data=", 996 uc, xs->cmdstore.opcode); 997 } else { 998 printf("%s, data=", uc); 999 } 1000 for (i = 0; i < sizeof (sense); i++) 1001 printf(" 0x%02x", *(cptr++) & 0xff); 1002 printf("\n"); 1003 } 1004 #else 1005 scsipi_printaddr(periph); 1006 printf("Sense Error Code 0x%x", 1007 SSD_RCODE(sense->response_code)); 1008 if ((sense->response_code & SSD_RCODE_VALID) != 0) { 1009 struct scsi_sense_data_unextended *usense = 1010 (struct scsi_sense_data_unextended *)sense; 1011 printf(" at block no. %d (decimal)", 1012 _3btol(usense->block)); 1013 } 1014 printf("\n"); 1015 #endif 1016 return (EIO); 1017 } 1018 } 1019 1020 /* 1021 * scsipi_test_unit_ready: 1022 * 1023 * Issue a `test unit ready' request. 1024 */ 1025 int 1026 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags) 1027 { 1028 struct scsi_test_unit_ready cmd; 1029 int retries; 1030 1031 /* some ATAPI drives don't support TEST UNIT READY. Sigh */ 1032 if (periph->periph_quirks & PQUIRK_NOTUR) 1033 return (0); 1034 1035 if (flags & XS_CTL_DISCOVERY) 1036 retries = 0; 1037 else 1038 retries = SCSIPIRETRIES; 1039 1040 memset(&cmd, 0, sizeof(cmd)); 1041 cmd.opcode = SCSI_TEST_UNIT_READY; 1042 1043 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1044 retries, 10000, NULL, flags)); 1045 } 1046 1047 static const struct scsipi_inquiry3_pattern { 1048 const char vendor[8]; 1049 const char product[16]; 1050 const char revision[4]; 1051 } scsipi_inquiry3_quirk[] = { 1052 { "ES-6600 ", "", "" }, 1053 }; 1054 1055 static int 1056 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib) 1057 { 1058 for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) { 1059 const struct scsipi_inquiry3_pattern *q = 1060 &scsipi_inquiry3_quirk[i]; 1061 #define MATCH(field) \ 1062 (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1) 1063 if (MATCH(vendor) && MATCH(product) && MATCH(revision)) 1064 return 0; 1065 } 1066 return 1; 1067 } 1068 1069 /* 1070 * scsipi_inquire: 1071 * 1072 * Ask the device about itself. 1073 */ 1074 int 1075 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf, 1076 int flags) 1077 { 1078 struct scsipi_inquiry cmd; 1079 int error; 1080 int retries; 1081 1082 if (flags & XS_CTL_DISCOVERY) 1083 retries = 0; 1084 else 1085 retries = SCSIPIRETRIES; 1086 1087 /* 1088 * If we request more data than the device can provide, it SHOULD just 1089 * return a short response. However, some devices error with an 1090 * ILLEGAL REQUEST sense code, and yet others have even more special 1091 * failture modes (such as the GL641USB flash adapter, which goes loony 1092 * and sends corrupted CRCs). To work around this, and to bring our 1093 * behavior more in line with other OSes, we do a shorter inquiry, 1094 * covering all the SCSI-2 information, first, and then request more 1095 * data iff the "additional length" field indicates there is more. 1096 * - mycroft, 2003/10/16 1097 */ 1098 memset(&cmd, 0, sizeof(cmd)); 1099 cmd.opcode = INQUIRY; 1100 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2; 1101 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1102 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries, 1103 10000, NULL, flags | XS_CTL_DATA_IN); 1104 if (!error && 1105 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) { 1106 if (scsipi_inquiry3_ok(inqbuf)) { 1107 #if 0 1108 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length); 1109 #endif 1110 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3; 1111 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1112 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries, 1113 10000, NULL, flags | XS_CTL_DATA_IN); 1114 #if 0 1115 printf("inquire: error=%d\n", error); 1116 #endif 1117 } 1118 } 1119 1120 #ifdef SCSI_OLD_NOINQUIRY 1121 /* 1122 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator. 1123 * This board doesn't support the INQUIRY command at all. 1124 */ 1125 if (error == EINVAL || error == EACCES) { 1126 /* 1127 * Conjure up an INQUIRY response. 1128 */ 1129 inqbuf->device = (error == EINVAL ? 1130 SID_QUAL_LU_PRESENT : 1131 SID_QUAL_LU_NOTPRESENT) | T_DIRECT; 1132 inqbuf->dev_qual2 = 0; 1133 inqbuf->version = 0; 1134 inqbuf->response_format = SID_FORMAT_SCSI1; 1135 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1136 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1137 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28); 1138 error = 0; 1139 } 1140 1141 /* 1142 * Kludge for the Emulex MT-02 SCSI->QIC translator. 1143 * This board gives an empty response to an INQUIRY command. 1144 */ 1145 else if (error == 0 && 1146 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) && 1147 inqbuf->dev_qual2 == 0 && 1148 inqbuf->version == 0 && 1149 inqbuf->response_format == SID_FORMAT_SCSI1) { 1150 /* 1151 * Fill out the INQUIRY response. 1152 */ 1153 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL); 1154 inqbuf->dev_qual2 = SID_REMOVABLE; 1155 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1156 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1157 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28); 1158 } 1159 #endif /* SCSI_OLD_NOINQUIRY */ 1160 1161 return error; 1162 } 1163 1164 /* 1165 * scsipi_prevent: 1166 * 1167 * Prevent or allow the user to remove the media 1168 */ 1169 int 1170 scsipi_prevent(struct scsipi_periph *periph, int type, int flags) 1171 { 1172 struct scsi_prevent_allow_medium_removal cmd; 1173 1174 if (periph->periph_quirks & PQUIRK_NODOORLOCK) 1175 return 0; 1176 1177 memset(&cmd, 0, sizeof(cmd)); 1178 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL; 1179 cmd.how = type; 1180 1181 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1182 SCSIPIRETRIES, 5000, NULL, flags)); 1183 } 1184 1185 /* 1186 * scsipi_start: 1187 * 1188 * Send a START UNIT. 1189 */ 1190 int 1191 scsipi_start(struct scsipi_periph *periph, int type, int flags) 1192 { 1193 struct scsipi_start_stop cmd; 1194 1195 memset(&cmd, 0, sizeof(cmd)); 1196 cmd.opcode = START_STOP; 1197 cmd.byte2 = 0x00; 1198 cmd.how = type; 1199 1200 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1201 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags)); 1202 } 1203 1204 /* 1205 * scsipi_mode_sense, scsipi_mode_sense_big: 1206 * get a sense page from a device 1207 */ 1208 1209 int 1210 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page, 1211 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries, 1212 int timeout) 1213 { 1214 struct scsi_mode_sense_6 cmd; 1215 1216 memset(&cmd, 0, sizeof(cmd)); 1217 cmd.opcode = SCSI_MODE_SENSE_6; 1218 cmd.byte2 = byte2; 1219 cmd.page = page; 1220 cmd.length = len & 0xff; 1221 1222 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1223 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN)); 1224 } 1225 1226 int 1227 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page, 1228 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries, 1229 int timeout) 1230 { 1231 struct scsi_mode_sense_10 cmd; 1232 1233 memset(&cmd, 0, sizeof(cmd)); 1234 cmd.opcode = SCSI_MODE_SENSE_10; 1235 cmd.byte2 = byte2; 1236 cmd.page = page; 1237 _lto2b(len, cmd.length); 1238 1239 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1240 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN)); 1241 } 1242 1243 int 1244 scsipi_mode_select(struct scsipi_periph *periph, int byte2, 1245 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries, 1246 int timeout) 1247 { 1248 struct scsi_mode_select_6 cmd; 1249 1250 memset(&cmd, 0, sizeof(cmd)); 1251 cmd.opcode = SCSI_MODE_SELECT_6; 1252 cmd.byte2 = byte2; 1253 cmd.length = len & 0xff; 1254 1255 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1256 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT)); 1257 } 1258 1259 int 1260 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2, 1261 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries, 1262 int timeout) 1263 { 1264 struct scsi_mode_select_10 cmd; 1265 1266 memset(&cmd, 0, sizeof(cmd)); 1267 cmd.opcode = SCSI_MODE_SELECT_10; 1268 cmd.byte2 = byte2; 1269 _lto2b(len, cmd.length); 1270 1271 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1272 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT)); 1273 } 1274 1275 /* 1276 * scsipi_done: 1277 * 1278 * This routine is called by an adapter's interrupt handler when 1279 * an xfer is completed. 1280 */ 1281 void 1282 scsipi_done(struct scsipi_xfer *xs) 1283 { 1284 struct scsipi_periph *periph = xs->xs_periph; 1285 struct scsipi_channel *chan = periph->periph_channel; 1286 int s, freezecnt; 1287 1288 KASSERT(cold || KERNEL_LOCKED_P()); 1289 1290 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1291 #ifdef SCSIPI_DEBUG 1292 if (periph->periph_dbflags & SCSIPI_DB1) 1293 show_scsipi_cmd(xs); 1294 #endif 1295 1296 s = splbio(); 1297 /* 1298 * The resource this command was using is now free. 1299 */ 1300 if (xs->xs_status & XS_STS_DONE) { 1301 /* XXX in certain circumstances, such as a device 1302 * being detached, a xs that has already been 1303 * scsipi_done()'d by the main thread will be done'd 1304 * again by scsibusdetach(). Putting the xs on the 1305 * chan_complete queue causes list corruption and 1306 * everyone dies. This prevents that, but perhaps 1307 * there should be better coordination somewhere such 1308 * that this won't ever happen (and can be turned into 1309 * a KASSERT(). 1310 */ 1311 splx(s); 1312 goto out; 1313 } 1314 scsipi_put_resource(chan); 1315 xs->xs_periph->periph_sent--; 1316 1317 /* 1318 * If the command was tagged, free the tag. 1319 */ 1320 if (XS_CTL_TAGTYPE(xs) != 0) 1321 scsipi_put_tag(xs); 1322 else 1323 periph->periph_flags &= ~PERIPH_UNTAG; 1324 1325 /* Mark the command as `done'. */ 1326 xs->xs_status |= XS_STS_DONE; 1327 1328 #ifdef DIAGNOSTIC 1329 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1330 (XS_CTL_ASYNC|XS_CTL_POLL)) 1331 panic("scsipi_done: ASYNC and POLL"); 1332 #endif 1333 1334 /* 1335 * If the xfer had an error of any sort, freeze the 1336 * periph's queue. Freeze it again if we were requested 1337 * to do so in the xfer. 1338 */ 1339 freezecnt = 0; 1340 if (xs->error != XS_NOERROR) 1341 freezecnt++; 1342 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1343 freezecnt++; 1344 if (freezecnt != 0) 1345 scsipi_periph_freeze(periph, freezecnt); 1346 1347 /* 1348 * record the xfer with a pending sense, in case a SCSI reset is 1349 * received before the thread is waked up. 1350 */ 1351 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1352 periph->periph_flags |= PERIPH_SENSE; 1353 periph->periph_xscheck = xs; 1354 } 1355 1356 /* 1357 * If this was an xfer that was not to complete asynchronously, 1358 * let the requesting thread perform error checking/handling 1359 * in its context. 1360 */ 1361 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1362 splx(s); 1363 /* 1364 * If it's a polling job, just return, to unwind the 1365 * call graph. We don't need to restart the queue, 1366 * because pollings jobs are treated specially, and 1367 * are really only used during crash dumps anyway 1368 * (XXX or during boot-time autconfiguration of 1369 * ATAPI devices). 1370 */ 1371 if (xs->xs_control & XS_CTL_POLL) 1372 return; 1373 wakeup(xs); 1374 goto out; 1375 } 1376 1377 /* 1378 * Catch the extremely common case of I/O completing 1379 * without error; no use in taking a context switch 1380 * if we can handle it in interrupt context. 1381 */ 1382 if (xs->error == XS_NOERROR) { 1383 splx(s); 1384 (void) scsipi_complete(xs); 1385 goto out; 1386 } 1387 1388 /* 1389 * There is an error on this xfer. Put it on the channel's 1390 * completion queue, and wake up the completion thread. 1391 */ 1392 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1393 splx(s); 1394 wakeup(&chan->chan_complete); 1395 1396 out: 1397 /* 1398 * If there are more xfers on the channel's queue, attempt to 1399 * run them. 1400 */ 1401 scsipi_run_queue(chan); 1402 } 1403 1404 /* 1405 * scsipi_complete: 1406 * 1407 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1408 * 1409 * NOTE: This routine MUST be called with valid thread context 1410 * except for the case where the following two conditions are 1411 * true: 1412 * 1413 * xs->error == XS_NOERROR 1414 * XS_CTL_ASYNC is set in xs->xs_control 1415 * 1416 * The semantics of this routine can be tricky, so here is an 1417 * explanation: 1418 * 1419 * 0 Xfer completed successfully. 1420 * 1421 * ERESTART Xfer had an error, but was restarted. 1422 * 1423 * anything else Xfer had an error, return value is Unix 1424 * errno. 1425 * 1426 * If the return value is anything but ERESTART: 1427 * 1428 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1429 * the pool. 1430 * - If there is a buf associated with the xfer, 1431 * it has been biodone()'d. 1432 */ 1433 static int 1434 scsipi_complete(struct scsipi_xfer *xs) 1435 { 1436 struct scsipi_periph *periph = xs->xs_periph; 1437 struct scsipi_channel *chan = periph->periph_channel; 1438 int error, s; 1439 1440 #ifdef DIAGNOSTIC 1441 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1442 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1443 #endif 1444 /* 1445 * If command terminated with a CHECK CONDITION, we need to issue a 1446 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1447 * we'll have the real status. 1448 * Must be processed at splbio() to avoid missing a SCSI bus reset 1449 * for this command. 1450 */ 1451 s = splbio(); 1452 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1453 /* request sense for a request sense ? */ 1454 if (xs->xs_control & XS_CTL_REQSENSE) { 1455 scsipi_printaddr(periph); 1456 printf("request sense for a request sense ?\n"); 1457 /* XXX maybe we should reset the device ? */ 1458 /* we've been frozen because xs->error != XS_NOERROR */ 1459 scsipi_periph_thaw(periph, 1); 1460 splx(s); 1461 if (xs->resid < xs->datalen) { 1462 printf("we read %d bytes of sense anyway:\n", 1463 xs->datalen - xs->resid); 1464 scsipi_print_sense_data((void *)xs->data, 0); 1465 } 1466 return EINVAL; 1467 } 1468 scsipi_request_sense(xs); 1469 } 1470 splx(s); 1471 1472 /* 1473 * If it's a user level request, bypass all usual completion 1474 * processing, let the user work it out.. 1475 */ 1476 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1477 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1478 if (xs->error != XS_NOERROR) 1479 scsipi_periph_thaw(periph, 1); 1480 scsipi_user_done(xs); 1481 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1482 return 0; 1483 } 1484 1485 switch (xs->error) { 1486 case XS_NOERROR: 1487 error = 0; 1488 break; 1489 1490 case XS_SENSE: 1491 case XS_SHORTSENSE: 1492 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1493 break; 1494 1495 case XS_RESOURCE_SHORTAGE: 1496 /* 1497 * XXX Should freeze channel's queue. 1498 */ 1499 scsipi_printaddr(periph); 1500 printf("adapter resource shortage\n"); 1501 /* FALLTHROUGH */ 1502 1503 case XS_BUSY: 1504 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1505 struct scsipi_max_openings mo; 1506 1507 /* 1508 * We set the openings to active - 1, assuming that 1509 * the command that got us here is the first one that 1510 * can't fit into the device's queue. If that's not 1511 * the case, I guess we'll find out soon enough. 1512 */ 1513 mo.mo_target = periph->periph_target; 1514 mo.mo_lun = periph->periph_lun; 1515 if (periph->periph_active < periph->periph_openings) 1516 mo.mo_openings = periph->periph_active - 1; 1517 else 1518 mo.mo_openings = periph->periph_openings - 1; 1519 #ifdef DIAGNOSTIC 1520 if (mo.mo_openings < 0) { 1521 scsipi_printaddr(periph); 1522 printf("QUEUE FULL resulted in < 0 openings\n"); 1523 panic("scsipi_done"); 1524 } 1525 #endif 1526 if (mo.mo_openings == 0) { 1527 scsipi_printaddr(periph); 1528 printf("QUEUE FULL resulted in 0 openings\n"); 1529 mo.mo_openings = 1; 1530 } 1531 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1532 error = ERESTART; 1533 } else if (xs->xs_retries != 0) { 1534 xs->xs_retries--; 1535 /* 1536 * Wait one second, and try again. 1537 */ 1538 if ((xs->xs_control & XS_CTL_POLL) || 1539 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1540 /* XXX: quite extreme */ 1541 kpause("xsbusy", false, hz, NULL); 1542 } else if (!callout_pending(&periph->periph_callout)) { 1543 scsipi_periph_freeze(periph, 1); 1544 callout_reset(&periph->periph_callout, 1545 hz, scsipi_periph_timed_thaw, periph); 1546 } 1547 error = ERESTART; 1548 } else 1549 error = EBUSY; 1550 break; 1551 1552 case XS_REQUEUE: 1553 error = ERESTART; 1554 break; 1555 1556 case XS_SELTIMEOUT: 1557 case XS_TIMEOUT: 1558 /* 1559 * If the device hasn't gone away, honor retry counts. 1560 * 1561 * Note that if we're in the middle of probing it, 1562 * it won't be found because it isn't here yet so 1563 * we won't honor the retry count in that case. 1564 */ 1565 if (scsipi_lookup_periph(chan, periph->periph_target, 1566 periph->periph_lun) && xs->xs_retries != 0) { 1567 xs->xs_retries--; 1568 error = ERESTART; 1569 } else 1570 error = EIO; 1571 break; 1572 1573 case XS_RESET: 1574 if (xs->xs_control & XS_CTL_REQSENSE) { 1575 /* 1576 * request sense interrupted by reset: signal it 1577 * with EINTR return code. 1578 */ 1579 error = EINTR; 1580 } else { 1581 if (xs->xs_retries != 0) { 1582 xs->xs_retries--; 1583 error = ERESTART; 1584 } else 1585 error = EIO; 1586 } 1587 break; 1588 1589 case XS_DRIVER_STUFFUP: 1590 scsipi_printaddr(periph); 1591 printf("generic HBA error\n"); 1592 error = EIO; 1593 break; 1594 default: 1595 scsipi_printaddr(periph); 1596 printf("invalid return code from adapter: %d\n", xs->error); 1597 error = EIO; 1598 break; 1599 } 1600 1601 s = splbio(); 1602 if (error == ERESTART) { 1603 /* 1604 * If we get here, the periph has been thawed and frozen 1605 * again if we had to issue recovery commands. Alternatively, 1606 * it may have been frozen again and in a timed thaw. In 1607 * any case, we thaw the periph once we re-enqueue the 1608 * command. Once the periph is fully thawed, it will begin 1609 * operation again. 1610 */ 1611 xs->error = XS_NOERROR; 1612 xs->status = SCSI_OK; 1613 xs->xs_status &= ~XS_STS_DONE; 1614 xs->xs_requeuecnt++; 1615 error = scsipi_enqueue(xs); 1616 if (error == 0) { 1617 scsipi_periph_thaw(periph, 1); 1618 splx(s); 1619 return (ERESTART); 1620 } 1621 } 1622 1623 /* 1624 * scsipi_done() freezes the queue if not XS_NOERROR. 1625 * Thaw it here. 1626 */ 1627 if (xs->error != XS_NOERROR) 1628 scsipi_periph_thaw(periph, 1); 1629 1630 if (periph->periph_switch->psw_done) 1631 periph->periph_switch->psw_done(xs, error); 1632 1633 if (xs->xs_control & XS_CTL_ASYNC) 1634 scsipi_put_xs(xs); 1635 splx(s); 1636 1637 return (error); 1638 } 1639 1640 /* 1641 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1642 * returns with a CHECK_CONDITION status. Must be called in valid thread 1643 * context and at splbio(). 1644 */ 1645 1646 static void 1647 scsipi_request_sense(struct scsipi_xfer *xs) 1648 { 1649 struct scsipi_periph *periph = xs->xs_periph; 1650 int flags, error; 1651 struct scsi_request_sense cmd; 1652 1653 periph->periph_flags |= PERIPH_SENSE; 1654 1655 /* if command was polling, request sense will too */ 1656 flags = xs->xs_control & XS_CTL_POLL; 1657 /* Polling commands can't sleep */ 1658 if (flags) 1659 flags |= XS_CTL_NOSLEEP; 1660 1661 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1662 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1663 1664 memset(&cmd, 0, sizeof(cmd)); 1665 cmd.opcode = SCSI_REQUEST_SENSE; 1666 cmd.length = sizeof(struct scsi_sense_data); 1667 1668 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1669 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data), 1670 0, 1000, NULL, flags); 1671 periph->periph_flags &= ~PERIPH_SENSE; 1672 periph->periph_xscheck = NULL; 1673 switch (error) { 1674 case 0: 1675 /* we have a valid sense */ 1676 xs->error = XS_SENSE; 1677 return; 1678 case EINTR: 1679 /* REQUEST_SENSE interrupted by bus reset. */ 1680 xs->error = XS_RESET; 1681 return; 1682 case EIO: 1683 /* request sense coudn't be performed */ 1684 /* 1685 * XXX this isn't quite right but we don't have anything 1686 * better for now 1687 */ 1688 xs->error = XS_DRIVER_STUFFUP; 1689 return; 1690 default: 1691 /* Notify that request sense failed. */ 1692 xs->error = XS_DRIVER_STUFFUP; 1693 scsipi_printaddr(periph); 1694 printf("request sense failed with error %d\n", error); 1695 return; 1696 } 1697 } 1698 1699 /* 1700 * scsipi_enqueue: 1701 * 1702 * Enqueue an xfer on a channel. 1703 */ 1704 static int 1705 scsipi_enqueue(struct scsipi_xfer *xs) 1706 { 1707 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1708 struct scsipi_xfer *qxs; 1709 int s; 1710 1711 s = splbio(); 1712 1713 /* 1714 * If the xfer is to be polled, and there are already jobs on 1715 * the queue, we can't proceed. 1716 */ 1717 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1718 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1719 splx(s); 1720 xs->error = XS_DRIVER_STUFFUP; 1721 return (EAGAIN); 1722 } 1723 1724 /* 1725 * If we have an URGENT xfer, it's an error recovery command 1726 * and it should just go on the head of the channel's queue. 1727 */ 1728 if (xs->xs_control & XS_CTL_URGENT) { 1729 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1730 goto out; 1731 } 1732 1733 /* 1734 * If this xfer has already been on the queue before, we 1735 * need to reinsert it in the correct order. That order is: 1736 * 1737 * Immediately before the first xfer for this periph 1738 * with a requeuecnt less than xs->xs_requeuecnt. 1739 * 1740 * Failing that, at the end of the queue. (We'll end up 1741 * there naturally.) 1742 */ 1743 if (xs->xs_requeuecnt != 0) { 1744 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1745 qxs = TAILQ_NEXT(qxs, channel_q)) { 1746 if (qxs->xs_periph == xs->xs_periph && 1747 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1748 break; 1749 } 1750 if (qxs != NULL) { 1751 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1752 channel_q); 1753 goto out; 1754 } 1755 } 1756 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1757 out: 1758 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1759 scsipi_periph_thaw(xs->xs_periph, 1); 1760 splx(s); 1761 return (0); 1762 } 1763 1764 /* 1765 * scsipi_run_queue: 1766 * 1767 * Start as many xfers as possible running on the channel. 1768 */ 1769 static void 1770 scsipi_run_queue(struct scsipi_channel *chan) 1771 { 1772 struct scsipi_xfer *xs; 1773 struct scsipi_periph *periph; 1774 int s; 1775 1776 for (;;) { 1777 s = splbio(); 1778 1779 /* 1780 * If the channel is frozen, we can't do any work right 1781 * now. 1782 */ 1783 if (chan->chan_qfreeze != 0) { 1784 splx(s); 1785 return; 1786 } 1787 1788 /* 1789 * Look for work to do, and make sure we can do it. 1790 */ 1791 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1792 xs = TAILQ_NEXT(xs, channel_q)) { 1793 periph = xs->xs_periph; 1794 1795 if ((periph->periph_sent >= periph->periph_openings) || 1796 periph->periph_qfreeze != 0 || 1797 (periph->periph_flags & PERIPH_UNTAG) != 0) 1798 continue; 1799 1800 if ((periph->periph_flags & 1801 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1802 (xs->xs_control & XS_CTL_URGENT) == 0) 1803 continue; 1804 1805 /* 1806 * We can issue this xfer! 1807 */ 1808 goto got_one; 1809 } 1810 1811 /* 1812 * Can't find any work to do right now. 1813 */ 1814 splx(s); 1815 return; 1816 1817 got_one: 1818 /* 1819 * Have an xfer to run. Allocate a resource from 1820 * the adapter to run it. If we can't allocate that 1821 * resource, we don't dequeue the xfer. 1822 */ 1823 if (scsipi_get_resource(chan) == 0) { 1824 /* 1825 * Adapter is out of resources. If the adapter 1826 * supports it, attempt to grow them. 1827 */ 1828 if (scsipi_grow_resources(chan) == 0) { 1829 /* 1830 * Wasn't able to grow resources, 1831 * nothing more we can do. 1832 */ 1833 if (xs->xs_control & XS_CTL_POLL) { 1834 scsipi_printaddr(xs->xs_periph); 1835 printf("polling command but no " 1836 "adapter resources"); 1837 /* We'll panic shortly... */ 1838 } 1839 splx(s); 1840 1841 /* 1842 * XXX: We should be able to note that 1843 * XXX: that resources are needed here! 1844 */ 1845 return; 1846 } 1847 /* 1848 * scsipi_grow_resources() allocated the resource 1849 * for us. 1850 */ 1851 } 1852 1853 /* 1854 * We have a resource to run this xfer, do it! 1855 */ 1856 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1857 1858 /* 1859 * If the command is to be tagged, allocate a tag ID 1860 * for it. 1861 */ 1862 if (XS_CTL_TAGTYPE(xs) != 0) 1863 scsipi_get_tag(xs); 1864 else 1865 periph->periph_flags |= PERIPH_UNTAG; 1866 periph->periph_sent++; 1867 splx(s); 1868 1869 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1870 } 1871 #ifdef DIAGNOSTIC 1872 panic("scsipi_run_queue: impossible"); 1873 #endif 1874 } 1875 1876 /* 1877 * scsipi_execute_xs: 1878 * 1879 * Begin execution of an xfer, waiting for it to complete, if necessary. 1880 */ 1881 int 1882 scsipi_execute_xs(struct scsipi_xfer *xs) 1883 { 1884 struct scsipi_periph *periph = xs->xs_periph; 1885 struct scsipi_channel *chan = periph->periph_channel; 1886 int oasync, async, poll, error, s; 1887 1888 KASSERT(!cold); 1889 KASSERT(KERNEL_LOCKED_P()); 1890 1891 (chan->chan_bustype->bustype_cmd)(xs); 1892 1893 xs->xs_status &= ~XS_STS_DONE; 1894 xs->error = XS_NOERROR; 1895 xs->resid = xs->datalen; 1896 xs->status = SCSI_OK; 1897 1898 #ifdef SCSIPI_DEBUG 1899 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1900 printf("scsipi_execute_xs: "); 1901 show_scsipi_xs(xs); 1902 printf("\n"); 1903 } 1904 #endif 1905 1906 /* 1907 * Deal with command tagging: 1908 * 1909 * - If the device's current operating mode doesn't 1910 * include tagged queueing, clear the tag mask. 1911 * 1912 * - If the device's current operating mode *does* 1913 * include tagged queueing, set the tag_type in 1914 * the xfer to the appropriate byte for the tag 1915 * message. 1916 */ 1917 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 1918 (xs->xs_control & XS_CTL_REQSENSE)) { 1919 xs->xs_control &= ~XS_CTL_TAGMASK; 1920 xs->xs_tag_type = 0; 1921 } else { 1922 /* 1923 * If the request doesn't specify a tag, give Head 1924 * tags to URGENT operations and Ordered tags to 1925 * everything else. 1926 */ 1927 if (XS_CTL_TAGTYPE(xs) == 0) { 1928 if (xs->xs_control & XS_CTL_URGENT) 1929 xs->xs_control |= XS_CTL_HEAD_TAG; 1930 else 1931 xs->xs_control |= XS_CTL_ORDERED_TAG; 1932 } 1933 1934 switch (XS_CTL_TAGTYPE(xs)) { 1935 case XS_CTL_ORDERED_TAG: 1936 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 1937 break; 1938 1939 case XS_CTL_SIMPLE_TAG: 1940 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 1941 break; 1942 1943 case XS_CTL_HEAD_TAG: 1944 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 1945 break; 1946 1947 default: 1948 scsipi_printaddr(periph); 1949 printf("invalid tag mask 0x%08x\n", 1950 XS_CTL_TAGTYPE(xs)); 1951 panic("scsipi_execute_xs"); 1952 } 1953 } 1954 1955 /* If the adaptor wants us to poll, poll. */ 1956 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 1957 xs->xs_control |= XS_CTL_POLL; 1958 1959 /* 1960 * If we don't yet have a completion thread, or we are to poll for 1961 * completion, clear the ASYNC flag. 1962 */ 1963 oasync = (xs->xs_control & XS_CTL_ASYNC); 1964 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 1965 xs->xs_control &= ~XS_CTL_ASYNC; 1966 1967 async = (xs->xs_control & XS_CTL_ASYNC); 1968 poll = (xs->xs_control & XS_CTL_POLL); 1969 1970 #ifdef DIAGNOSTIC 1971 if (oasync != 0 && xs->bp == NULL) 1972 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 1973 #endif 1974 1975 /* 1976 * Enqueue the transfer. If we're not polling for completion, this 1977 * should ALWAYS return `no error'. 1978 */ 1979 error = scsipi_enqueue(xs); 1980 if (error) { 1981 if (poll == 0) { 1982 scsipi_printaddr(periph); 1983 printf("not polling, but enqueue failed with %d\n", 1984 error); 1985 panic("scsipi_execute_xs"); 1986 } 1987 1988 scsipi_printaddr(periph); 1989 printf("should have flushed queue?\n"); 1990 goto free_xs; 1991 } 1992 1993 restarted: 1994 scsipi_run_queue(chan); 1995 1996 /* 1997 * The xfer is enqueued, and possibly running. If it's to be 1998 * completed asynchronously, just return now. 1999 */ 2000 if (async) 2001 return (0); 2002 2003 /* 2004 * Not an asynchronous command; wait for it to complete. 2005 */ 2006 s = splbio(); 2007 while ((xs->xs_status & XS_STS_DONE) == 0) { 2008 if (poll) { 2009 scsipi_printaddr(periph); 2010 printf("polling command not done\n"); 2011 panic("scsipi_execute_xs"); 2012 } 2013 (void) tsleep(xs, PRIBIO, "xscmd", 0); 2014 } 2015 splx(s); 2016 2017 /* 2018 * Command is complete. scsipi_done() has awakened us to perform 2019 * the error handling. 2020 */ 2021 error = scsipi_complete(xs); 2022 if (error == ERESTART) 2023 goto restarted; 2024 2025 /* 2026 * If it was meant to run async and we cleared aync ourselve, 2027 * don't return an error here. It has already been handled 2028 */ 2029 if (oasync) 2030 error = 0; 2031 /* 2032 * Command completed successfully or fatal error occurred. Fall 2033 * into.... 2034 */ 2035 free_xs: 2036 s = splbio(); 2037 scsipi_put_xs(xs); 2038 splx(s); 2039 2040 /* 2041 * Kick the queue, keep it running in case it stopped for some 2042 * reason. 2043 */ 2044 scsipi_run_queue(chan); 2045 2046 return (error); 2047 } 2048 2049 /* 2050 * scsipi_completion_thread: 2051 * 2052 * This is the completion thread. We wait for errors on 2053 * asynchronous xfers, and perform the error handling 2054 * function, restarting the command, if necessary. 2055 */ 2056 static void 2057 scsipi_completion_thread(void *arg) 2058 { 2059 struct scsipi_channel *chan = arg; 2060 struct scsipi_xfer *xs; 2061 int s; 2062 2063 if (chan->chan_init_cb) 2064 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg); 2065 2066 s = splbio(); 2067 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 2068 splx(s); 2069 for (;;) { 2070 s = splbio(); 2071 xs = TAILQ_FIRST(&chan->chan_complete); 2072 if (xs == NULL && chan->chan_tflags == 0) { 2073 /* nothing to do; wait */ 2074 (void) tsleep(&chan->chan_complete, PRIBIO, 2075 "sccomp", 0); 2076 splx(s); 2077 continue; 2078 } 2079 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2080 /* call chan_callback from thread context */ 2081 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 2082 chan->chan_callback(chan, chan->chan_callback_arg); 2083 splx(s); 2084 continue; 2085 } 2086 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 2087 /* attempt to get more openings for this channel */ 2088 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2089 scsipi_adapter_request(chan, 2090 ADAPTER_REQ_GROW_RESOURCES, NULL); 2091 scsipi_channel_thaw(chan, 1); 2092 splx(s); 2093 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) 2094 kpause("scsizzz", FALSE, hz/10, NULL); 2095 continue; 2096 } 2097 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2098 /* explicitly run the queues for this channel */ 2099 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2100 scsipi_run_queue(chan); 2101 splx(s); 2102 continue; 2103 } 2104 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2105 splx(s); 2106 break; 2107 } 2108 if (xs) { 2109 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2110 splx(s); 2111 2112 /* 2113 * Have an xfer with an error; process it. 2114 */ 2115 (void) scsipi_complete(xs); 2116 2117 /* 2118 * Kick the queue; keep it running if it was stopped 2119 * for some reason. 2120 */ 2121 scsipi_run_queue(chan); 2122 } else { 2123 splx(s); 2124 } 2125 } 2126 2127 chan->chan_thread = NULL; 2128 2129 /* In case parent is waiting for us to exit. */ 2130 wakeup(&chan->chan_thread); 2131 2132 kthread_exit(0); 2133 } 2134 /* 2135 * scsipi_thread_call_callback: 2136 * 2137 * request to call a callback from the completion thread 2138 */ 2139 int 2140 scsipi_thread_call_callback(struct scsipi_channel *chan, 2141 void (*callback)(struct scsipi_channel *, void *), void *arg) 2142 { 2143 int s; 2144 2145 s = splbio(); 2146 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2147 /* kernel thread doesn't exist yet */ 2148 splx(s); 2149 return ESRCH; 2150 } 2151 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2152 splx(s); 2153 return EBUSY; 2154 } 2155 scsipi_channel_freeze(chan, 1); 2156 chan->chan_callback = callback; 2157 chan->chan_callback_arg = arg; 2158 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2159 wakeup(&chan->chan_complete); 2160 splx(s); 2161 return(0); 2162 } 2163 2164 /* 2165 * scsipi_async_event: 2166 * 2167 * Handle an asynchronous event from an adapter. 2168 */ 2169 void 2170 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event, 2171 void *arg) 2172 { 2173 int s; 2174 2175 s = splbio(); 2176 switch (event) { 2177 case ASYNC_EVENT_MAX_OPENINGS: 2178 scsipi_async_event_max_openings(chan, 2179 (struct scsipi_max_openings *)arg); 2180 break; 2181 2182 case ASYNC_EVENT_XFER_MODE: 2183 if (chan->chan_bustype->bustype_async_event_xfer_mode) { 2184 chan->chan_bustype->bustype_async_event_xfer_mode( 2185 chan, arg); 2186 } 2187 break; 2188 case ASYNC_EVENT_RESET: 2189 scsipi_async_event_channel_reset(chan); 2190 break; 2191 } 2192 splx(s); 2193 } 2194 2195 /* 2196 * scsipi_async_event_max_openings: 2197 * 2198 * Update the maximum number of outstanding commands a 2199 * device may have. 2200 */ 2201 static void 2202 scsipi_async_event_max_openings(struct scsipi_channel *chan, 2203 struct scsipi_max_openings *mo) 2204 { 2205 struct scsipi_periph *periph; 2206 int minlun, maxlun; 2207 2208 if (mo->mo_lun == -1) { 2209 /* 2210 * Wildcarded; apply it to all LUNs. 2211 */ 2212 minlun = 0; 2213 maxlun = chan->chan_nluns - 1; 2214 } else 2215 minlun = maxlun = mo->mo_lun; 2216 2217 /* XXX This could really suck with a large LUN space. */ 2218 for (; minlun <= maxlun; minlun++) { 2219 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun); 2220 if (periph == NULL) 2221 continue; 2222 2223 if (mo->mo_openings < periph->periph_openings) 2224 periph->periph_openings = mo->mo_openings; 2225 else if (mo->mo_openings > periph->periph_openings && 2226 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2227 periph->periph_openings = mo->mo_openings; 2228 } 2229 } 2230 2231 /* 2232 * scsipi_set_xfer_mode: 2233 * 2234 * Set the xfer mode for the specified I_T Nexus. 2235 */ 2236 void 2237 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed) 2238 { 2239 struct scsipi_xfer_mode xm; 2240 struct scsipi_periph *itperiph; 2241 int lun, s; 2242 2243 /* 2244 * Go to the minimal xfer mode. 2245 */ 2246 xm.xm_target = target; 2247 xm.xm_mode = 0; 2248 xm.xm_period = 0; /* ignored */ 2249 xm.xm_offset = 0; /* ignored */ 2250 2251 /* 2252 * Find the first LUN we know about on this I_T Nexus. 2253 */ 2254 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) { 2255 itperiph = scsipi_lookup_periph(chan, target, lun); 2256 if (itperiph != NULL) 2257 break; 2258 } 2259 if (itperiph != NULL) { 2260 xm.xm_mode = itperiph->periph_cap; 2261 /* 2262 * Now issue the request to the adapter. 2263 */ 2264 s = splbio(); 2265 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2266 splx(s); 2267 /* 2268 * If we want this to happen immediately, issue a dummy 2269 * command, since most adapters can't really negotiate unless 2270 * they're executing a job. 2271 */ 2272 if (immed != 0) { 2273 (void) scsipi_test_unit_ready(itperiph, 2274 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2275 XS_CTL_IGNORE_NOT_READY | 2276 XS_CTL_IGNORE_MEDIA_CHANGE); 2277 } 2278 } 2279 } 2280 2281 /* 2282 * scsipi_channel_reset: 2283 * 2284 * handle scsi bus reset 2285 * called at splbio 2286 */ 2287 static void 2288 scsipi_async_event_channel_reset(struct scsipi_channel *chan) 2289 { 2290 struct scsipi_xfer *xs, *xs_next; 2291 struct scsipi_periph *periph; 2292 int target, lun; 2293 2294 /* 2295 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2296 * commands; as the sense is not available any more. 2297 * can't call scsipi_done() from here, as the command has not been 2298 * sent to the adapter yet (this would corrupt accounting). 2299 */ 2300 2301 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2302 xs_next = TAILQ_NEXT(xs, channel_q); 2303 if (xs->xs_control & XS_CTL_REQSENSE) { 2304 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2305 xs->error = XS_RESET; 2306 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2307 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2308 channel_q); 2309 } 2310 } 2311 wakeup(&chan->chan_complete); 2312 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2313 for (target = 0; target < chan->chan_ntargets; target++) { 2314 if (target == chan->chan_id) 2315 continue; 2316 for (lun = 0; lun < chan->chan_nluns; lun++) { 2317 periph = scsipi_lookup_periph(chan, target, lun); 2318 if (periph) { 2319 xs = periph->periph_xscheck; 2320 if (xs) 2321 xs->error = XS_RESET; 2322 } 2323 } 2324 } 2325 } 2326 2327 /* 2328 * scsipi_target_detach: 2329 * 2330 * detach all periph associated with a I_T 2331 * must be called from valid thread context 2332 */ 2333 int 2334 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun, 2335 int flags) 2336 { 2337 struct scsipi_periph *periph; 2338 int ctarget, mintarget, maxtarget; 2339 int clun, minlun, maxlun; 2340 int error; 2341 2342 if (target == -1) { 2343 mintarget = 0; 2344 maxtarget = chan->chan_ntargets; 2345 } else { 2346 if (target == chan->chan_id) 2347 return EINVAL; 2348 if (target < 0 || target >= chan->chan_ntargets) 2349 return EINVAL; 2350 mintarget = target; 2351 maxtarget = target + 1; 2352 } 2353 2354 if (lun == -1) { 2355 minlun = 0; 2356 maxlun = chan->chan_nluns; 2357 } else { 2358 if (lun < 0 || lun >= chan->chan_nluns) 2359 return EINVAL; 2360 minlun = lun; 2361 maxlun = lun + 1; 2362 } 2363 2364 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2365 if (ctarget == chan->chan_id) 2366 continue; 2367 2368 for (clun = minlun; clun < maxlun; clun++) { 2369 periph = scsipi_lookup_periph(chan, ctarget, clun); 2370 if (periph == NULL) 2371 continue; 2372 error = config_detach(periph->periph_dev, flags); 2373 if (error) 2374 return (error); 2375 } 2376 } 2377 return(0); 2378 } 2379 2380 /* 2381 * scsipi_adapter_addref: 2382 * 2383 * Add a reference to the adapter pointed to by the provided 2384 * link, enabling the adapter if necessary. 2385 */ 2386 int 2387 scsipi_adapter_addref(struct scsipi_adapter *adapt) 2388 { 2389 int s, error = 0; 2390 2391 s = splbio(); 2392 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) { 2393 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1); 2394 if (error) 2395 adapt->adapt_refcnt--; 2396 } 2397 splx(s); 2398 return (error); 2399 } 2400 2401 /* 2402 * scsipi_adapter_delref: 2403 * 2404 * Delete a reference to the adapter pointed to by the provided 2405 * link, disabling the adapter if possible. 2406 */ 2407 void 2408 scsipi_adapter_delref(struct scsipi_adapter *adapt) 2409 { 2410 int s; 2411 2412 s = splbio(); 2413 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL) 2414 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0); 2415 splx(s); 2416 } 2417 2418 static struct scsipi_syncparam { 2419 int ss_factor; 2420 int ss_period; /* ns * 100 */ 2421 } scsipi_syncparams[] = { 2422 { 0x08, 625 }, /* FAST-160 (Ultra320) */ 2423 { 0x09, 1250 }, /* FAST-80 (Ultra160) */ 2424 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */ 2425 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */ 2426 { 0x0c, 5000 }, /* FAST-20 (Ultra) */ 2427 }; 2428 static const int scsipi_nsyncparams = 2429 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2430 2431 int 2432 scsipi_sync_period_to_factor(int period /* ns * 100 */) 2433 { 2434 int i; 2435 2436 for (i = 0; i < scsipi_nsyncparams; i++) { 2437 if (period <= scsipi_syncparams[i].ss_period) 2438 return (scsipi_syncparams[i].ss_factor); 2439 } 2440 2441 return ((period / 100) / 4); 2442 } 2443 2444 int 2445 scsipi_sync_factor_to_period(int factor) 2446 { 2447 int i; 2448 2449 for (i = 0; i < scsipi_nsyncparams; i++) { 2450 if (factor == scsipi_syncparams[i].ss_factor) 2451 return (scsipi_syncparams[i].ss_period); 2452 } 2453 2454 return ((factor * 4) * 100); 2455 } 2456 2457 int 2458 scsipi_sync_factor_to_freq(int factor) 2459 { 2460 int i; 2461 2462 for (i = 0; i < scsipi_nsyncparams; i++) { 2463 if (factor == scsipi_syncparams[i].ss_factor) 2464 return (100000000 / scsipi_syncparams[i].ss_period); 2465 } 2466 2467 return (10000000 / ((factor * 4) * 10)); 2468 } 2469 2470 #ifdef SCSIPI_DEBUG 2471 /* 2472 * Given a scsipi_xfer, dump the request, in all its glory 2473 */ 2474 void 2475 show_scsipi_xs(struct scsipi_xfer *xs) 2476 { 2477 2478 printf("xs(%p): ", xs); 2479 printf("xs_control(0x%08x)", xs->xs_control); 2480 printf("xs_status(0x%08x)", xs->xs_status); 2481 printf("periph(%p)", xs->xs_periph); 2482 printf("retr(0x%x)", xs->xs_retries); 2483 printf("timo(0x%x)", xs->timeout); 2484 printf("cmd(%p)", xs->cmd); 2485 printf("len(0x%x)", xs->cmdlen); 2486 printf("data(%p)", xs->data); 2487 printf("len(0x%x)", xs->datalen); 2488 printf("res(0x%x)", xs->resid); 2489 printf("err(0x%x)", xs->error); 2490 printf("bp(%p)", xs->bp); 2491 show_scsipi_cmd(xs); 2492 } 2493 2494 void 2495 show_scsipi_cmd(struct scsipi_xfer *xs) 2496 { 2497 u_char *b = (u_char *) xs->cmd; 2498 int i = 0; 2499 2500 scsipi_printaddr(xs->xs_periph); 2501 printf(" command: "); 2502 2503 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2504 while (i < xs->cmdlen) { 2505 if (i) 2506 printf(","); 2507 printf("0x%x", b[i++]); 2508 } 2509 printf("-[%d bytes]\n", xs->datalen); 2510 if (xs->datalen) 2511 show_mem(xs->data, min(64, xs->datalen)); 2512 } else 2513 printf("-RESET-\n"); 2514 } 2515 2516 void 2517 show_mem(u_char *address, int num) 2518 { 2519 int x; 2520 2521 printf("------------------------------"); 2522 for (x = 0; x < num; x++) { 2523 if ((x % 16) == 0) 2524 printf("\n%03d: ", x); 2525 printf("%02x ", *address++); 2526 } 2527 printf("\n------------------------------\n"); 2528 } 2529 #endif /* SCSIPI_DEBUG */ 2530