1 /* $NetBSD: scsipi_base.c,v 1.149 2009/04/07 18:10:45 dyoung Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.149 2009/04/07 18:10:45 dyoung Exp $"); 35 36 #include "opt_scsi.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/buf.h> 42 #include <sys/uio.h> 43 #include <sys/malloc.h> 44 #include <sys/pool.h> 45 #include <sys/errno.h> 46 #include <sys/device.h> 47 #include <sys/proc.h> 48 #include <sys/kthread.h> 49 #include <sys/hash.h> 50 51 #include <uvm/uvm_extern.h> 52 53 #include <dev/scsipi/scsi_spc.h> 54 #include <dev/scsipi/scsipi_all.h> 55 #include <dev/scsipi/scsipi_disk.h> 56 #include <dev/scsipi/scsipiconf.h> 57 #include <dev/scsipi/scsipi_base.h> 58 59 #include <dev/scsipi/scsi_all.h> 60 #include <dev/scsipi/scsi_message.h> 61 62 static int scsipi_complete(struct scsipi_xfer *); 63 static void scsipi_request_sense(struct scsipi_xfer *); 64 static int scsipi_enqueue(struct scsipi_xfer *); 65 static void scsipi_run_queue(struct scsipi_channel *chan); 66 67 static void scsipi_completion_thread(void *); 68 69 static void scsipi_get_tag(struct scsipi_xfer *); 70 static void scsipi_put_tag(struct scsipi_xfer *); 71 72 static int scsipi_get_resource(struct scsipi_channel *); 73 static void scsipi_put_resource(struct scsipi_channel *); 74 75 static void scsipi_async_event_max_openings(struct scsipi_channel *, 76 struct scsipi_max_openings *); 77 static void scsipi_async_event_xfer_mode(struct scsipi_channel *, 78 struct scsipi_xfer_mode *); 79 static void scsipi_async_event_channel_reset(struct scsipi_channel *); 80 81 static struct pool scsipi_xfer_pool; 82 83 /* 84 * scsipi_init: 85 * 86 * Called when a scsibus or atapibus is attached to the system 87 * to initialize shared data structures. 88 */ 89 void 90 scsipi_init(void) 91 { 92 static int scsipi_init_done; 93 94 if (scsipi_init_done) 95 return; 96 scsipi_init_done = 1; 97 98 /* Initialize the scsipi_xfer pool. */ 99 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 100 0, 0, "scxspl", NULL, IPL_BIO); 101 if (pool_prime(&scsipi_xfer_pool, 102 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) { 103 printf("WARNING: not enough memory for scsipi_xfer_pool\n"); 104 } 105 } 106 107 /* 108 * scsipi_channel_init: 109 * 110 * Initialize a scsipi_channel when it is attached. 111 */ 112 int 113 scsipi_channel_init(struct scsipi_channel *chan) 114 { 115 struct scsipi_adapter *adapt = chan->chan_adapter; 116 int i; 117 118 /* Initialize shared data. */ 119 scsipi_init(); 120 121 /* Initialize the queues. */ 122 TAILQ_INIT(&chan->chan_queue); 123 TAILQ_INIT(&chan->chan_complete); 124 125 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++) 126 LIST_INIT(&chan->chan_periphtab[i]); 127 128 /* 129 * Create the asynchronous completion thread. 130 */ 131 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan, 132 &chan->chan_thread, "%s", chan->chan_name)) { 133 aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for " 134 "channel %d\n", chan->chan_channel); 135 panic("scsipi_channel_init"); 136 } 137 138 return (0); 139 } 140 141 /* 142 * scsipi_channel_shutdown: 143 * 144 * Shutdown a scsipi_channel. 145 */ 146 void 147 scsipi_channel_shutdown(struct scsipi_channel *chan) 148 { 149 150 /* 151 * Shut down the completion thread. 152 */ 153 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 154 wakeup(&chan->chan_complete); 155 156 /* 157 * Now wait for the thread to exit. 158 */ 159 while (chan->chan_thread != NULL) 160 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0); 161 } 162 163 static uint32_t 164 scsipi_chan_periph_hash(uint64_t t, uint64_t l) 165 { 166 uint32_t hash; 167 168 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT); 169 hash = hash32_buf(&l, sizeof(l), hash); 170 171 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK); 172 } 173 174 /* 175 * scsipi_insert_periph: 176 * 177 * Insert a periph into the channel. 178 */ 179 void 180 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph) 181 { 182 uint32_t hash; 183 int s; 184 185 hash = scsipi_chan_periph_hash(periph->periph_target, 186 periph->periph_lun); 187 188 s = splbio(); 189 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash); 190 splx(s); 191 } 192 193 /* 194 * scsipi_remove_periph: 195 * 196 * Remove a periph from the channel. 197 */ 198 void 199 scsipi_remove_periph(struct scsipi_channel *chan, 200 struct scsipi_periph *periph) 201 { 202 int s; 203 204 s = splbio(); 205 LIST_REMOVE(periph, periph_hash); 206 splx(s); 207 } 208 209 /* 210 * scsipi_lookup_periph: 211 * 212 * Lookup a periph on the specified channel. 213 */ 214 struct scsipi_periph * 215 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun) 216 { 217 struct scsipi_periph *periph; 218 uint32_t hash; 219 int s; 220 221 if (target >= chan->chan_ntargets || 222 lun >= chan->chan_nluns) 223 return (NULL); 224 225 hash = scsipi_chan_periph_hash(target, lun); 226 227 s = splbio(); 228 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) { 229 if (periph->periph_target == target && 230 periph->periph_lun == lun) 231 break; 232 } 233 splx(s); 234 235 return (periph); 236 } 237 238 /* 239 * scsipi_get_resource: 240 * 241 * Allocate a single xfer `resource' from the channel. 242 * 243 * NOTE: Must be called at splbio(). 244 */ 245 static int 246 scsipi_get_resource(struct scsipi_channel *chan) 247 { 248 struct scsipi_adapter *adapt = chan->chan_adapter; 249 250 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 251 if (chan->chan_openings > 0) { 252 chan->chan_openings--; 253 return (1); 254 } 255 return (0); 256 } 257 258 if (adapt->adapt_openings > 0) { 259 adapt->adapt_openings--; 260 return (1); 261 } 262 return (0); 263 } 264 265 /* 266 * scsipi_grow_resources: 267 * 268 * Attempt to grow resources for a channel. If this succeeds, 269 * we allocate one for our caller. 270 * 271 * NOTE: Must be called at splbio(). 272 */ 273 static inline int 274 scsipi_grow_resources(struct scsipi_channel *chan) 275 { 276 277 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 278 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 279 scsipi_adapter_request(chan, 280 ADAPTER_REQ_GROW_RESOURCES, NULL); 281 return (scsipi_get_resource(chan)); 282 } 283 /* 284 * ask the channel thread to do it. It'll have to thaw the 285 * queue 286 */ 287 scsipi_channel_freeze(chan, 1); 288 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 289 wakeup(&chan->chan_complete); 290 return (0); 291 } 292 293 return (0); 294 } 295 296 /* 297 * scsipi_put_resource: 298 * 299 * Free a single xfer `resource' to the channel. 300 * 301 * NOTE: Must be called at splbio(). 302 */ 303 static void 304 scsipi_put_resource(struct scsipi_channel *chan) 305 { 306 struct scsipi_adapter *adapt = chan->chan_adapter; 307 308 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 309 chan->chan_openings++; 310 else 311 adapt->adapt_openings++; 312 } 313 314 /* 315 * scsipi_get_tag: 316 * 317 * Get a tag ID for the specified xfer. 318 * 319 * NOTE: Must be called at splbio(). 320 */ 321 static void 322 scsipi_get_tag(struct scsipi_xfer *xs) 323 { 324 struct scsipi_periph *periph = xs->xs_periph; 325 int bit, tag; 326 u_int word; 327 328 bit = 0; /* XXX gcc */ 329 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 330 bit = ffs(periph->periph_freetags[word]); 331 if (bit != 0) 332 break; 333 } 334 #ifdef DIAGNOSTIC 335 if (word == PERIPH_NTAGWORDS) { 336 scsipi_printaddr(periph); 337 printf("no free tags\n"); 338 panic("scsipi_get_tag"); 339 } 340 #endif 341 342 bit -= 1; 343 periph->periph_freetags[word] &= ~(1 << bit); 344 tag = (word << 5) | bit; 345 346 /* XXX Should eventually disallow this completely. */ 347 if (tag >= periph->periph_openings) { 348 scsipi_printaddr(periph); 349 printf("WARNING: tag %d greater than available openings %d\n", 350 tag, periph->periph_openings); 351 } 352 353 xs->xs_tag_id = tag; 354 } 355 356 /* 357 * scsipi_put_tag: 358 * 359 * Put the tag ID for the specified xfer back into the pool. 360 * 361 * NOTE: Must be called at splbio(). 362 */ 363 static void 364 scsipi_put_tag(struct scsipi_xfer *xs) 365 { 366 struct scsipi_periph *periph = xs->xs_periph; 367 int word, bit; 368 369 word = xs->xs_tag_id >> 5; 370 bit = xs->xs_tag_id & 0x1f; 371 372 periph->periph_freetags[word] |= (1 << bit); 373 } 374 375 /* 376 * scsipi_get_xs: 377 * 378 * Allocate an xfer descriptor and associate it with the 379 * specified peripherial. If the peripherial has no more 380 * available command openings, we either block waiting for 381 * one to become available, or fail. 382 */ 383 struct scsipi_xfer * 384 scsipi_get_xs(struct scsipi_periph *periph, int flags) 385 { 386 struct scsipi_xfer *xs; 387 int s; 388 389 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 390 391 KASSERT(!cold); 392 393 #ifdef DIAGNOSTIC 394 /* 395 * URGENT commands can never be ASYNC. 396 */ 397 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 398 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 399 scsipi_printaddr(periph); 400 printf("URGENT and ASYNC\n"); 401 panic("scsipi_get_xs"); 402 } 403 #endif 404 405 s = splbio(); 406 /* 407 * Wait for a command opening to become available. Rules: 408 * 409 * - All xfers must wait for an available opening. 410 * Exception: URGENT xfers can proceed when 411 * active == openings, because we use the opening 412 * of the command we're recovering for. 413 * - if the periph has sense pending, only URGENT & REQSENSE 414 * xfers may proceed. 415 * 416 * - If the periph is recovering, only URGENT xfers may 417 * proceed. 418 * 419 * - If the periph is currently executing a recovery 420 * command, URGENT commands must block, because only 421 * one recovery command can execute at a time. 422 */ 423 for (;;) { 424 if (flags & XS_CTL_URGENT) { 425 if (periph->periph_active > periph->periph_openings) 426 goto wait_for_opening; 427 if (periph->periph_flags & PERIPH_SENSE) { 428 if ((flags & XS_CTL_REQSENSE) == 0) 429 goto wait_for_opening; 430 } else { 431 if ((periph->periph_flags & 432 PERIPH_RECOVERY_ACTIVE) != 0) 433 goto wait_for_opening; 434 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 435 } 436 break; 437 } 438 if (periph->periph_active >= periph->periph_openings || 439 (periph->periph_flags & PERIPH_RECOVERING) != 0) 440 goto wait_for_opening; 441 periph->periph_active++; 442 break; 443 444 wait_for_opening: 445 if (flags & XS_CTL_NOSLEEP) { 446 splx(s); 447 return (NULL); 448 } 449 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 450 periph->periph_flags |= PERIPH_WAITING; 451 (void) tsleep(periph, PRIBIO, "getxs", 0); 452 } 453 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 454 xs = pool_get(&scsipi_xfer_pool, 455 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 456 if (xs == NULL) { 457 if (flags & XS_CTL_URGENT) { 458 if ((flags & XS_CTL_REQSENSE) == 0) 459 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 460 } else 461 periph->periph_active--; 462 scsipi_printaddr(periph); 463 printf("unable to allocate %sscsipi_xfer\n", 464 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 465 } 466 splx(s); 467 468 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 469 470 if (xs != NULL) { 471 memset(xs, 0, sizeof(*xs)); 472 callout_init(&xs->xs_callout, 0); 473 xs->xs_periph = periph; 474 xs->xs_control = flags; 475 xs->xs_status = 0; 476 s = splbio(); 477 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 478 splx(s); 479 } 480 return (xs); 481 } 482 483 /* 484 * scsipi_put_xs: 485 * 486 * Release an xfer descriptor, decreasing the outstanding command 487 * count for the peripherial. If there is a thread waiting for 488 * an opening, wake it up. If not, kick any queued I/O the 489 * peripherial may have. 490 * 491 * NOTE: Must be called at splbio(). 492 */ 493 void 494 scsipi_put_xs(struct scsipi_xfer *xs) 495 { 496 struct scsipi_periph *periph = xs->xs_periph; 497 int flags = xs->xs_control; 498 499 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 500 501 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 502 callout_destroy(&xs->xs_callout); 503 pool_put(&scsipi_xfer_pool, xs); 504 505 #ifdef DIAGNOSTIC 506 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 507 periph->periph_active == 0) { 508 scsipi_printaddr(periph); 509 printf("recovery without a command to recovery for\n"); 510 panic("scsipi_put_xs"); 511 } 512 #endif 513 514 if (flags & XS_CTL_URGENT) { 515 if ((flags & XS_CTL_REQSENSE) == 0) 516 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 517 } else 518 periph->periph_active--; 519 if (periph->periph_active == 0 && 520 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 521 periph->periph_flags &= ~PERIPH_WAITDRAIN; 522 wakeup(&periph->periph_active); 523 } 524 525 if (periph->periph_flags & PERIPH_WAITING) { 526 periph->periph_flags &= ~PERIPH_WAITING; 527 wakeup(periph); 528 } else { 529 if (periph->periph_switch->psw_start != NULL && 530 device_is_active(periph->periph_dev)) { 531 SC_DEBUG(periph, SCSIPI_DB2, 532 ("calling private start()\n")); 533 (*periph->periph_switch->psw_start)(periph); 534 } 535 } 536 } 537 538 /* 539 * scsipi_channel_freeze: 540 * 541 * Freeze a channel's xfer queue. 542 */ 543 void 544 scsipi_channel_freeze(struct scsipi_channel *chan, int count) 545 { 546 int s; 547 548 s = splbio(); 549 chan->chan_qfreeze += count; 550 splx(s); 551 } 552 553 /* 554 * scsipi_channel_thaw: 555 * 556 * Thaw a channel's xfer queue. 557 */ 558 void 559 scsipi_channel_thaw(struct scsipi_channel *chan, int count) 560 { 561 int s; 562 563 s = splbio(); 564 chan->chan_qfreeze -= count; 565 /* 566 * Don't let the freeze count go negative. 567 * 568 * Presumably the adapter driver could keep track of this, 569 * but it might just be easier to do this here so as to allow 570 * multiple callers, including those outside the adapter driver. 571 */ 572 if (chan->chan_qfreeze < 0) { 573 chan->chan_qfreeze = 0; 574 } 575 splx(s); 576 /* 577 * Kick the channel's queue here. Note, we may be running in 578 * interrupt context (softclock or HBA's interrupt), so the adapter 579 * driver had better not sleep. 580 */ 581 if (chan->chan_qfreeze == 0) 582 scsipi_run_queue(chan); 583 } 584 585 /* 586 * scsipi_channel_timed_thaw: 587 * 588 * Thaw a channel after some time has expired. This will also 589 * run the channel's queue if the freeze count has reached 0. 590 */ 591 void 592 scsipi_channel_timed_thaw(void *arg) 593 { 594 struct scsipi_channel *chan = arg; 595 596 scsipi_channel_thaw(chan, 1); 597 } 598 599 /* 600 * scsipi_periph_freeze: 601 * 602 * Freeze a device's xfer queue. 603 */ 604 void 605 scsipi_periph_freeze(struct scsipi_periph *periph, int count) 606 { 607 int s; 608 609 s = splbio(); 610 periph->periph_qfreeze += count; 611 splx(s); 612 } 613 614 /* 615 * scsipi_periph_thaw: 616 * 617 * Thaw a device's xfer queue. 618 */ 619 void 620 scsipi_periph_thaw(struct scsipi_periph *periph, int count) 621 { 622 int s; 623 624 s = splbio(); 625 periph->periph_qfreeze -= count; 626 #ifdef DIAGNOSTIC 627 if (periph->periph_qfreeze < 0) { 628 static const char pc[] = "periph freeze count < 0"; 629 scsipi_printaddr(periph); 630 printf("%s\n", pc); 631 panic(pc); 632 } 633 #endif 634 if (periph->periph_qfreeze == 0 && 635 (periph->periph_flags & PERIPH_WAITING) != 0) 636 wakeup(periph); 637 splx(s); 638 } 639 640 /* 641 * scsipi_periph_timed_thaw: 642 * 643 * Thaw a device after some time has expired. 644 */ 645 void 646 scsipi_periph_timed_thaw(void *arg) 647 { 648 int s; 649 struct scsipi_periph *periph = arg; 650 651 callout_stop(&periph->periph_callout); 652 653 s = splbio(); 654 scsipi_periph_thaw(periph, 1); 655 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 656 /* 657 * Kick the channel's queue here. Note, we're running in 658 * interrupt context (softclock), so the adapter driver 659 * had better not sleep. 660 */ 661 scsipi_run_queue(periph->periph_channel); 662 } else { 663 /* 664 * Tell the completion thread to kick the channel's queue here. 665 */ 666 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 667 wakeup(&periph->periph_channel->chan_complete); 668 } 669 splx(s); 670 } 671 672 /* 673 * scsipi_wait_drain: 674 * 675 * Wait for a periph's pending xfers to drain. 676 */ 677 void 678 scsipi_wait_drain(struct scsipi_periph *periph) 679 { 680 int s; 681 682 s = splbio(); 683 while (periph->periph_active != 0) { 684 periph->periph_flags |= PERIPH_WAITDRAIN; 685 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0); 686 } 687 splx(s); 688 } 689 690 /* 691 * scsipi_kill_pending: 692 * 693 * Kill off all pending xfers for a periph. 694 * 695 * NOTE: Must be called at splbio(). 696 */ 697 void 698 scsipi_kill_pending(struct scsipi_periph *periph) 699 { 700 701 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph); 702 scsipi_wait_drain(periph); 703 } 704 705 /* 706 * scsipi_print_cdb: 707 * prints a command descriptor block (for debug purpose, error messages, 708 * SCSIPI_VERBOSE, ...) 709 */ 710 void 711 scsipi_print_cdb(struct scsipi_generic *cmd) 712 { 713 int i, j; 714 715 printf("0x%02x", cmd->opcode); 716 717 switch (CDB_GROUPID(cmd->opcode)) { 718 case CDB_GROUPID_0: 719 j = CDB_GROUP0; 720 break; 721 case CDB_GROUPID_1: 722 j = CDB_GROUP1; 723 break; 724 case CDB_GROUPID_2: 725 j = CDB_GROUP2; 726 break; 727 case CDB_GROUPID_3: 728 j = CDB_GROUP3; 729 break; 730 case CDB_GROUPID_4: 731 j = CDB_GROUP4; 732 break; 733 case CDB_GROUPID_5: 734 j = CDB_GROUP5; 735 break; 736 case CDB_GROUPID_6: 737 j = CDB_GROUP6; 738 break; 739 case CDB_GROUPID_7: 740 j = CDB_GROUP7; 741 break; 742 default: 743 j = 0; 744 } 745 if (j == 0) 746 j = sizeof (cmd->bytes); 747 for (i = 0; i < j-1; i++) /* already done the opcode */ 748 printf(" %02x", cmd->bytes[i]); 749 } 750 751 /* 752 * scsipi_interpret_sense: 753 * 754 * Look at the returned sense and act on the error, determining 755 * the unix error number to pass back. (0 = report no error) 756 * 757 * NOTE: If we return ERESTART, we are expected to haved 758 * thawed the device! 759 * 760 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 761 */ 762 int 763 scsipi_interpret_sense(struct scsipi_xfer *xs) 764 { 765 struct scsi_sense_data *sense; 766 struct scsipi_periph *periph = xs->xs_periph; 767 u_int8_t key; 768 int error; 769 #ifndef SCSIVERBOSE 770 u_int32_t info; 771 static const char *error_mes[] = { 772 "soft error (corrected)", 773 "not ready", "medium error", 774 "non-media hardware failure", "illegal request", 775 "unit attention", "readonly device", 776 "no data found", "vendor unique", 777 "copy aborted", "command aborted", 778 "search returned equal", "volume overflow", 779 "verify miscompare", "unknown error key" 780 }; 781 #endif 782 783 sense = &xs->sense.scsi_sense; 784 #ifdef SCSIPI_DEBUG 785 if (periph->periph_flags & SCSIPI_DB1) { 786 int count; 787 scsipi_printaddr(periph); 788 printf(" sense debug information:\n"); 789 printf("\tcode 0x%x valid %d\n", 790 SSD_RCODE(sense->response_code), 791 sense->response_code & SSD_RCODE_VALID ? 1 : 0); 792 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 793 sense->segment, 794 SSD_SENSE_KEY(sense->flags), 795 sense->flags & SSD_ILI ? 1 : 0, 796 sense->flags & SSD_EOM ? 1 : 0, 797 sense->flags & SSD_FILEMARK ? 1 : 0); 798 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 799 "extra bytes\n", 800 sense->info[0], 801 sense->info[1], 802 sense->info[2], 803 sense->info[3], 804 sense->extra_len); 805 printf("\textra: "); 806 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++) 807 printf("0x%x ", sense->csi[count]); 808 printf("\n"); 809 } 810 #endif 811 812 /* 813 * If the periph has it's own error handler, call it first. 814 * If it returns a legit error value, return that, otherwise 815 * it wants us to continue with normal error processing. 816 */ 817 if (periph->periph_switch->psw_error != NULL) { 818 SC_DEBUG(periph, SCSIPI_DB2, 819 ("calling private err_handler()\n")); 820 error = (*periph->periph_switch->psw_error)(xs); 821 if (error != EJUSTRETURN) 822 return (error); 823 } 824 /* otherwise use the default */ 825 switch (SSD_RCODE(sense->response_code)) { 826 827 /* 828 * Old SCSI-1 and SASI devices respond with 829 * codes other than 70. 830 */ 831 case 0x00: /* no error (command completed OK) */ 832 return (0); 833 case 0x04: /* drive not ready after it was selected */ 834 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 835 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 836 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 837 return (0); 838 /* XXX - display some sort of error here? */ 839 return (EIO); 840 case 0x20: /* invalid command */ 841 if ((xs->xs_control & 842 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 843 return (0); 844 return (EINVAL); 845 case 0x25: /* invalid LUN (Adaptec ACB-4000) */ 846 return (EACCES); 847 848 /* 849 * If it's code 70, use the extended stuff and 850 * interpret the key 851 */ 852 case 0x71: /* delayed error */ 853 scsipi_printaddr(periph); 854 key = SSD_SENSE_KEY(sense->flags); 855 printf(" DEFERRED ERROR, key = 0x%x\n", key); 856 /* FALLTHROUGH */ 857 case 0x70: 858 #ifndef SCSIVERBOSE 859 if ((sense->response_code & SSD_RCODE_VALID) != 0) 860 info = _4btol(sense->info); 861 else 862 info = 0; 863 #endif 864 key = SSD_SENSE_KEY(sense->flags); 865 866 switch (key) { 867 case SKEY_NO_SENSE: 868 case SKEY_RECOVERED_ERROR: 869 if (xs->resid == xs->datalen && xs->datalen) { 870 /* 871 * Why is this here? 872 */ 873 xs->resid = 0; /* not short read */ 874 } 875 case SKEY_EQUAL: 876 error = 0; 877 break; 878 case SKEY_NOT_READY: 879 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 880 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 881 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 882 return (0); 883 if (sense->asc == 0x3A) { 884 error = ENODEV; /* Medium not present */ 885 if (xs->xs_control & XS_CTL_SILENT_NODEV) 886 return (error); 887 } else 888 error = EIO; 889 if ((xs->xs_control & XS_CTL_SILENT) != 0) 890 return (error); 891 break; 892 case SKEY_ILLEGAL_REQUEST: 893 if ((xs->xs_control & 894 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 895 return (0); 896 /* 897 * Handle the case where a device reports 898 * Logical Unit Not Supported during discovery. 899 */ 900 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 901 sense->asc == 0x25 && 902 sense->ascq == 0x00) 903 return (EINVAL); 904 if ((xs->xs_control & XS_CTL_SILENT) != 0) 905 return (EIO); 906 error = EINVAL; 907 break; 908 case SKEY_UNIT_ATTENTION: 909 if (sense->asc == 0x29 && 910 sense->ascq == 0x00) { 911 /* device or bus reset */ 912 return (ERESTART); 913 } 914 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 915 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 916 if ((xs->xs_control & 917 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 918 /* XXX Should reupload any transient state. */ 919 (periph->periph_flags & 920 PERIPH_REMOVABLE) == 0) { 921 return (ERESTART); 922 } 923 if ((xs->xs_control & XS_CTL_SILENT) != 0) 924 return (EIO); 925 error = EIO; 926 break; 927 case SKEY_DATA_PROTECT: 928 error = EROFS; 929 break; 930 case SKEY_BLANK_CHECK: 931 error = 0; 932 break; 933 case SKEY_ABORTED_COMMAND: 934 if (xs->xs_retries != 0) { 935 xs->xs_retries--; 936 error = ERESTART; 937 } else 938 error = EIO; 939 break; 940 case SKEY_VOLUME_OVERFLOW: 941 error = ENOSPC; 942 break; 943 default: 944 error = EIO; 945 break; 946 } 947 948 #ifdef SCSIVERBOSE 949 if (key && (xs->xs_control & XS_CTL_SILENT) == 0) 950 scsipi_print_sense(xs, 0); 951 #else 952 if (key) { 953 scsipi_printaddr(periph); 954 printf("%s", error_mes[key - 1]); 955 if ((sense->response_code & SSD_RCODE_VALID) != 0) { 956 switch (key) { 957 case SKEY_NOT_READY: 958 case SKEY_ILLEGAL_REQUEST: 959 case SKEY_UNIT_ATTENTION: 960 case SKEY_DATA_PROTECT: 961 break; 962 case SKEY_BLANK_CHECK: 963 printf(", requested size: %d (decimal)", 964 info); 965 break; 966 case SKEY_ABORTED_COMMAND: 967 if (xs->xs_retries) 968 printf(", retrying"); 969 printf(", cmd 0x%x, info 0x%x", 970 xs->cmd->opcode, info); 971 break; 972 default: 973 printf(", info = %d (decimal)", info); 974 } 975 } 976 if (sense->extra_len != 0) { 977 int n; 978 printf(", data ="); 979 for (n = 0; n < sense->extra_len; n++) 980 printf(" %02x", 981 sense->csi[n]); 982 } 983 printf("\n"); 984 } 985 #endif 986 return (error); 987 988 /* 989 * Some other code, just report it 990 */ 991 default: 992 #if defined(SCSIDEBUG) || defined(DEBUG) 993 { 994 static const char *uc = "undecodable sense error"; 995 int i; 996 u_int8_t *cptr = (u_int8_t *) sense; 997 scsipi_printaddr(periph); 998 if (xs->cmd == &xs->cmdstore) { 999 printf("%s for opcode 0x%x, data=", 1000 uc, xs->cmdstore.opcode); 1001 } else { 1002 printf("%s, data=", uc); 1003 } 1004 for (i = 0; i < sizeof (sense); i++) 1005 printf(" 0x%02x", *(cptr++) & 0xff); 1006 printf("\n"); 1007 } 1008 #else 1009 scsipi_printaddr(periph); 1010 printf("Sense Error Code 0x%x", 1011 SSD_RCODE(sense->response_code)); 1012 if ((sense->response_code & SSD_RCODE_VALID) != 0) { 1013 struct scsi_sense_data_unextended *usense = 1014 (struct scsi_sense_data_unextended *)sense; 1015 printf(" at block no. %d (decimal)", 1016 _3btol(usense->block)); 1017 } 1018 printf("\n"); 1019 #endif 1020 return (EIO); 1021 } 1022 } 1023 1024 /* 1025 * scsipi_test_unit_ready: 1026 * 1027 * Issue a `test unit ready' request. 1028 */ 1029 int 1030 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags) 1031 { 1032 struct scsi_test_unit_ready cmd; 1033 int retries; 1034 1035 /* some ATAPI drives don't support TEST UNIT READY. Sigh */ 1036 if (periph->periph_quirks & PQUIRK_NOTUR) 1037 return (0); 1038 1039 if (flags & XS_CTL_DISCOVERY) 1040 retries = 0; 1041 else 1042 retries = SCSIPIRETRIES; 1043 1044 memset(&cmd, 0, sizeof(cmd)); 1045 cmd.opcode = SCSI_TEST_UNIT_READY; 1046 1047 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1048 retries, 10000, NULL, flags)); 1049 } 1050 1051 /* 1052 * scsipi_inquire: 1053 * 1054 * Ask the device about itself. 1055 */ 1056 int 1057 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf, 1058 int flags) 1059 { 1060 struct scsipi_inquiry cmd; 1061 int error; 1062 int retries; 1063 1064 if (flags & XS_CTL_DISCOVERY) 1065 retries = 0; 1066 else 1067 retries = SCSIPIRETRIES; 1068 1069 /* 1070 * If we request more data than the device can provide, it SHOULD just 1071 * return a short reponse. However, some devices error with an 1072 * ILLEGAL REQUEST sense code, and yet others have even more special 1073 * failture modes (such as the GL641USB flash adapter, which goes loony 1074 * and sends corrupted CRCs). To work around this, and to bring our 1075 * behavior more in line with other OSes, we do a shorter inquiry, 1076 * covering all the SCSI-2 information, first, and then request more 1077 * data iff the "additional length" field indicates there is more. 1078 * - mycroft, 2003/10/16 1079 */ 1080 memset(&cmd, 0, sizeof(cmd)); 1081 cmd.opcode = INQUIRY; 1082 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2; 1083 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1084 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries, 1085 10000, NULL, flags | XS_CTL_DATA_IN); 1086 if (!error && 1087 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) { 1088 #if 0 1089 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length); 1090 #endif 1091 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3; 1092 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1093 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries, 1094 10000, NULL, flags | XS_CTL_DATA_IN); 1095 #if 0 1096 printf("inquire: error=%d\n", error); 1097 #endif 1098 } 1099 1100 #ifdef SCSI_OLD_NOINQUIRY 1101 /* 1102 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator. 1103 * This board doesn't support the INQUIRY command at all. 1104 */ 1105 if (error == EINVAL || error == EACCES) { 1106 /* 1107 * Conjure up an INQUIRY response. 1108 */ 1109 inqbuf->device = (error == EINVAL ? 1110 SID_QUAL_LU_PRESENT : 1111 SID_QUAL_LU_NOTPRESENT) | T_DIRECT; 1112 inqbuf->dev_qual2 = 0; 1113 inqbuf->version = 0; 1114 inqbuf->response_format = SID_FORMAT_SCSI1; 1115 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1116 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1117 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28); 1118 error = 0; 1119 } 1120 1121 /* 1122 * Kludge for the Emulex MT-02 SCSI->QIC translator. 1123 * This board gives an empty response to an INQUIRY command. 1124 */ 1125 else if (error == 0 && 1126 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) && 1127 inqbuf->dev_qual2 == 0 && 1128 inqbuf->version == 0 && 1129 inqbuf->response_format == SID_FORMAT_SCSI1) { 1130 /* 1131 * Fill out the INQUIRY response. 1132 */ 1133 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL); 1134 inqbuf->dev_qual2 = SID_REMOVABLE; 1135 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1136 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1137 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28); 1138 } 1139 #endif /* SCSI_OLD_NOINQUIRY */ 1140 1141 return error; 1142 } 1143 1144 /* 1145 * scsipi_prevent: 1146 * 1147 * Prevent or allow the user to remove the media 1148 */ 1149 int 1150 scsipi_prevent(struct scsipi_periph *periph, int type, int flags) 1151 { 1152 struct scsi_prevent_allow_medium_removal cmd; 1153 1154 if (periph->periph_quirks & PQUIRK_NODOORLOCK) 1155 return 0; 1156 1157 memset(&cmd, 0, sizeof(cmd)); 1158 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL; 1159 cmd.how = type; 1160 1161 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1162 SCSIPIRETRIES, 5000, NULL, flags)); 1163 } 1164 1165 /* 1166 * scsipi_start: 1167 * 1168 * Send a START UNIT. 1169 */ 1170 int 1171 scsipi_start(struct scsipi_periph *periph, int type, int flags) 1172 { 1173 struct scsipi_start_stop cmd; 1174 1175 memset(&cmd, 0, sizeof(cmd)); 1176 cmd.opcode = START_STOP; 1177 cmd.byte2 = 0x00; 1178 cmd.how = type; 1179 1180 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1181 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags)); 1182 } 1183 1184 /* 1185 * scsipi_mode_sense, scsipi_mode_sense_big: 1186 * get a sense page from a device 1187 */ 1188 1189 int 1190 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page, 1191 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries, 1192 int timeout) 1193 { 1194 struct scsi_mode_sense_6 cmd; 1195 1196 memset(&cmd, 0, sizeof(cmd)); 1197 cmd.opcode = SCSI_MODE_SENSE_6; 1198 cmd.byte2 = byte2; 1199 cmd.page = page; 1200 cmd.length = len & 0xff; 1201 1202 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1203 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN)); 1204 } 1205 1206 int 1207 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page, 1208 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries, 1209 int timeout) 1210 { 1211 struct scsi_mode_sense_10 cmd; 1212 1213 memset(&cmd, 0, sizeof(cmd)); 1214 cmd.opcode = SCSI_MODE_SENSE_10; 1215 cmd.byte2 = byte2; 1216 cmd.page = page; 1217 _lto2b(len, cmd.length); 1218 1219 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1220 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN)); 1221 } 1222 1223 int 1224 scsipi_mode_select(struct scsipi_periph *periph, int byte2, 1225 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries, 1226 int timeout) 1227 { 1228 struct scsi_mode_select_6 cmd; 1229 1230 memset(&cmd, 0, sizeof(cmd)); 1231 cmd.opcode = SCSI_MODE_SELECT_6; 1232 cmd.byte2 = byte2; 1233 cmd.length = len & 0xff; 1234 1235 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1236 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT)); 1237 } 1238 1239 int 1240 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2, 1241 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries, 1242 int timeout) 1243 { 1244 struct scsi_mode_select_10 cmd; 1245 1246 memset(&cmd, 0, sizeof(cmd)); 1247 cmd.opcode = SCSI_MODE_SELECT_10; 1248 cmd.byte2 = byte2; 1249 _lto2b(len, cmd.length); 1250 1251 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1252 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT)); 1253 } 1254 1255 /* 1256 * scsipi_done: 1257 * 1258 * This routine is called by an adapter's interrupt handler when 1259 * an xfer is completed. 1260 */ 1261 void 1262 scsipi_done(struct scsipi_xfer *xs) 1263 { 1264 struct scsipi_periph *periph = xs->xs_periph; 1265 struct scsipi_channel *chan = periph->periph_channel; 1266 int s, freezecnt; 1267 1268 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1269 #ifdef SCSIPI_DEBUG 1270 if (periph->periph_dbflags & SCSIPI_DB1) 1271 show_scsipi_cmd(xs); 1272 #endif 1273 1274 s = splbio(); 1275 /* 1276 * The resource this command was using is now free. 1277 */ 1278 if (xs->xs_status & XS_STS_DONE) { 1279 /* XXX in certain circumstances, such as a device 1280 * being detached, a xs that has already been 1281 * scsipi_done()'d by the main thread will be done'd 1282 * again by scsibusdetach(). Putting the xs on the 1283 * chan_complete queue causes list corruption and 1284 * everyone dies. This prevents that, but perhaps 1285 * there should be better coordination somewhere such 1286 * that this won't ever happen (and can be turned into 1287 * a KASSERT(). 1288 */ 1289 splx(s); 1290 goto out; 1291 } 1292 scsipi_put_resource(chan); 1293 xs->xs_periph->periph_sent--; 1294 1295 /* 1296 * If the command was tagged, free the tag. 1297 */ 1298 if (XS_CTL_TAGTYPE(xs) != 0) 1299 scsipi_put_tag(xs); 1300 else 1301 periph->periph_flags &= ~PERIPH_UNTAG; 1302 1303 /* Mark the command as `done'. */ 1304 xs->xs_status |= XS_STS_DONE; 1305 1306 #ifdef DIAGNOSTIC 1307 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1308 (XS_CTL_ASYNC|XS_CTL_POLL)) 1309 panic("scsipi_done: ASYNC and POLL"); 1310 #endif 1311 1312 /* 1313 * If the xfer had an error of any sort, freeze the 1314 * periph's queue. Freeze it again if we were requested 1315 * to do so in the xfer. 1316 */ 1317 freezecnt = 0; 1318 if (xs->error != XS_NOERROR) 1319 freezecnt++; 1320 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1321 freezecnt++; 1322 if (freezecnt != 0) 1323 scsipi_periph_freeze(periph, freezecnt); 1324 1325 /* 1326 * record the xfer with a pending sense, in case a SCSI reset is 1327 * received before the thread is waked up. 1328 */ 1329 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1330 periph->periph_flags |= PERIPH_SENSE; 1331 periph->periph_xscheck = xs; 1332 } 1333 1334 /* 1335 * If this was an xfer that was not to complete asynchronously, 1336 * let the requesting thread perform error checking/handling 1337 * in its context. 1338 */ 1339 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1340 splx(s); 1341 /* 1342 * If it's a polling job, just return, to unwind the 1343 * call graph. We don't need to restart the queue, 1344 * because pollings jobs are treated specially, and 1345 * are really only used during crash dumps anyway 1346 * (XXX or during boot-time autconfiguration of 1347 * ATAPI devices). 1348 */ 1349 if (xs->xs_control & XS_CTL_POLL) 1350 return; 1351 wakeup(xs); 1352 goto out; 1353 } 1354 1355 /* 1356 * Catch the extremely common case of I/O completing 1357 * without error; no use in taking a context switch 1358 * if we can handle it in interrupt context. 1359 */ 1360 if (xs->error == XS_NOERROR) { 1361 splx(s); 1362 (void) scsipi_complete(xs); 1363 goto out; 1364 } 1365 1366 /* 1367 * There is an error on this xfer. Put it on the channel's 1368 * completion queue, and wake up the completion thread. 1369 */ 1370 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1371 splx(s); 1372 wakeup(&chan->chan_complete); 1373 1374 out: 1375 /* 1376 * If there are more xfers on the channel's queue, attempt to 1377 * run them. 1378 */ 1379 scsipi_run_queue(chan); 1380 } 1381 1382 /* 1383 * scsipi_complete: 1384 * 1385 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1386 * 1387 * NOTE: This routine MUST be called with valid thread context 1388 * except for the case where the following two conditions are 1389 * true: 1390 * 1391 * xs->error == XS_NOERROR 1392 * XS_CTL_ASYNC is set in xs->xs_control 1393 * 1394 * The semantics of this routine can be tricky, so here is an 1395 * explanation: 1396 * 1397 * 0 Xfer completed successfully. 1398 * 1399 * ERESTART Xfer had an error, but was restarted. 1400 * 1401 * anything else Xfer had an error, return value is Unix 1402 * errno. 1403 * 1404 * If the return value is anything but ERESTART: 1405 * 1406 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1407 * the pool. 1408 * - If there is a buf associated with the xfer, 1409 * it has been biodone()'d. 1410 */ 1411 static int 1412 scsipi_complete(struct scsipi_xfer *xs) 1413 { 1414 struct scsipi_periph *periph = xs->xs_periph; 1415 struct scsipi_channel *chan = periph->periph_channel; 1416 int error, s; 1417 1418 #ifdef DIAGNOSTIC 1419 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1420 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1421 #endif 1422 /* 1423 * If command terminated with a CHECK CONDITION, we need to issue a 1424 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1425 * we'll have the real status. 1426 * Must be processed at splbio() to avoid missing a SCSI bus reset 1427 * for this command. 1428 */ 1429 s = splbio(); 1430 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1431 /* request sense for a request sense ? */ 1432 if (xs->xs_control & XS_CTL_REQSENSE) { 1433 scsipi_printaddr(periph); 1434 printf("request sense for a request sense ?\n"); 1435 /* XXX maybe we should reset the device ? */ 1436 /* we've been frozen because xs->error != XS_NOERROR */ 1437 scsipi_periph_thaw(periph, 1); 1438 splx(s); 1439 if (xs->resid < xs->datalen) { 1440 printf("we read %d bytes of sense anyway:\n", 1441 xs->datalen - xs->resid); 1442 #ifdef SCSIVERBOSE 1443 scsipi_print_sense_data((void *)xs->data, 0); 1444 #endif 1445 } 1446 return EINVAL; 1447 } 1448 scsipi_request_sense(xs); 1449 } 1450 splx(s); 1451 1452 /* 1453 * If it's a user level request, bypass all usual completion 1454 * processing, let the user work it out.. 1455 */ 1456 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1457 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1458 if (xs->error != XS_NOERROR) 1459 scsipi_periph_thaw(periph, 1); 1460 scsipi_user_done(xs); 1461 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1462 return 0; 1463 } 1464 1465 switch (xs->error) { 1466 case XS_NOERROR: 1467 error = 0; 1468 break; 1469 1470 case XS_SENSE: 1471 case XS_SHORTSENSE: 1472 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1473 break; 1474 1475 case XS_RESOURCE_SHORTAGE: 1476 /* 1477 * XXX Should freeze channel's queue. 1478 */ 1479 scsipi_printaddr(periph); 1480 printf("adapter resource shortage\n"); 1481 /* FALLTHROUGH */ 1482 1483 case XS_BUSY: 1484 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1485 struct scsipi_max_openings mo; 1486 1487 /* 1488 * We set the openings to active - 1, assuming that 1489 * the command that got us here is the first one that 1490 * can't fit into the device's queue. If that's not 1491 * the case, I guess we'll find out soon enough. 1492 */ 1493 mo.mo_target = periph->periph_target; 1494 mo.mo_lun = periph->periph_lun; 1495 if (periph->periph_active < periph->periph_openings) 1496 mo.mo_openings = periph->periph_active - 1; 1497 else 1498 mo.mo_openings = periph->periph_openings - 1; 1499 #ifdef DIAGNOSTIC 1500 if (mo.mo_openings < 0) { 1501 scsipi_printaddr(periph); 1502 printf("QUEUE FULL resulted in < 0 openings\n"); 1503 panic("scsipi_done"); 1504 } 1505 #endif 1506 if (mo.mo_openings == 0) { 1507 scsipi_printaddr(periph); 1508 printf("QUEUE FULL resulted in 0 openings\n"); 1509 mo.mo_openings = 1; 1510 } 1511 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1512 error = ERESTART; 1513 } else if (xs->xs_retries != 0) { 1514 xs->xs_retries--; 1515 /* 1516 * Wait one second, and try again. 1517 */ 1518 if ((xs->xs_control & XS_CTL_POLL) || 1519 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1520 delay(1000000); 1521 } else if (!callout_pending(&periph->periph_callout)) { 1522 scsipi_periph_freeze(periph, 1); 1523 callout_reset(&periph->periph_callout, 1524 hz, scsipi_periph_timed_thaw, periph); 1525 } 1526 error = ERESTART; 1527 } else 1528 error = EBUSY; 1529 break; 1530 1531 case XS_REQUEUE: 1532 error = ERESTART; 1533 break; 1534 1535 case XS_SELTIMEOUT: 1536 case XS_TIMEOUT: 1537 /* 1538 * If the device hasn't gone away, honor retry counts. 1539 * 1540 * Note that if we're in the middle of probing it, 1541 * it won't be found because it isn't here yet so 1542 * we won't honor the retry count in that case. 1543 */ 1544 if (scsipi_lookup_periph(chan, periph->periph_target, 1545 periph->periph_lun) && xs->xs_retries != 0) { 1546 xs->xs_retries--; 1547 error = ERESTART; 1548 } else 1549 error = EIO; 1550 break; 1551 1552 case XS_RESET: 1553 if (xs->xs_control & XS_CTL_REQSENSE) { 1554 /* 1555 * request sense interrupted by reset: signal it 1556 * with EINTR return code. 1557 */ 1558 error = EINTR; 1559 } else { 1560 if (xs->xs_retries != 0) { 1561 xs->xs_retries--; 1562 error = ERESTART; 1563 } else 1564 error = EIO; 1565 } 1566 break; 1567 1568 case XS_DRIVER_STUFFUP: 1569 scsipi_printaddr(periph); 1570 printf("generic HBA error\n"); 1571 error = EIO; 1572 break; 1573 default: 1574 scsipi_printaddr(periph); 1575 printf("invalid return code from adapter: %d\n", xs->error); 1576 error = EIO; 1577 break; 1578 } 1579 1580 s = splbio(); 1581 if (error == ERESTART) { 1582 /* 1583 * If we get here, the periph has been thawed and frozen 1584 * again if we had to issue recovery commands. Alternatively, 1585 * it may have been frozen again and in a timed thaw. In 1586 * any case, we thaw the periph once we re-enqueue the 1587 * command. Once the periph is fully thawed, it will begin 1588 * operation again. 1589 */ 1590 xs->error = XS_NOERROR; 1591 xs->status = SCSI_OK; 1592 xs->xs_status &= ~XS_STS_DONE; 1593 xs->xs_requeuecnt++; 1594 error = scsipi_enqueue(xs); 1595 if (error == 0) { 1596 scsipi_periph_thaw(periph, 1); 1597 splx(s); 1598 return (ERESTART); 1599 } 1600 } 1601 1602 /* 1603 * scsipi_done() freezes the queue if not XS_NOERROR. 1604 * Thaw it here. 1605 */ 1606 if (xs->error != XS_NOERROR) 1607 scsipi_periph_thaw(periph, 1); 1608 1609 if (periph->periph_switch->psw_done) 1610 periph->periph_switch->psw_done(xs, error); 1611 1612 if (xs->xs_control & XS_CTL_ASYNC) 1613 scsipi_put_xs(xs); 1614 splx(s); 1615 1616 return (error); 1617 } 1618 1619 /* 1620 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1621 * returns with a CHECK_CONDITION status. Must be called in valid thread 1622 * context and at splbio(). 1623 */ 1624 1625 static void 1626 scsipi_request_sense(struct scsipi_xfer *xs) 1627 { 1628 struct scsipi_periph *periph = xs->xs_periph; 1629 int flags, error; 1630 struct scsi_request_sense cmd; 1631 1632 periph->periph_flags |= PERIPH_SENSE; 1633 1634 /* if command was polling, request sense will too */ 1635 flags = xs->xs_control & XS_CTL_POLL; 1636 /* Polling commands can't sleep */ 1637 if (flags) 1638 flags |= XS_CTL_NOSLEEP; 1639 1640 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1641 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1642 1643 memset(&cmd, 0, sizeof(cmd)); 1644 cmd.opcode = SCSI_REQUEST_SENSE; 1645 cmd.length = sizeof(struct scsi_sense_data); 1646 1647 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1648 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data), 1649 0, 1000, NULL, flags); 1650 periph->periph_flags &= ~PERIPH_SENSE; 1651 periph->periph_xscheck = NULL; 1652 switch (error) { 1653 case 0: 1654 /* we have a valid sense */ 1655 xs->error = XS_SENSE; 1656 return; 1657 case EINTR: 1658 /* REQUEST_SENSE interrupted by bus reset. */ 1659 xs->error = XS_RESET; 1660 return; 1661 case EIO: 1662 /* request sense coudn't be performed */ 1663 /* 1664 * XXX this isn't quite right but we don't have anything 1665 * better for now 1666 */ 1667 xs->error = XS_DRIVER_STUFFUP; 1668 return; 1669 default: 1670 /* Notify that request sense failed. */ 1671 xs->error = XS_DRIVER_STUFFUP; 1672 scsipi_printaddr(periph); 1673 printf("request sense failed with error %d\n", error); 1674 return; 1675 } 1676 } 1677 1678 /* 1679 * scsipi_enqueue: 1680 * 1681 * Enqueue an xfer on a channel. 1682 */ 1683 static int 1684 scsipi_enqueue(struct scsipi_xfer *xs) 1685 { 1686 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1687 struct scsipi_xfer *qxs; 1688 int s; 1689 1690 s = splbio(); 1691 1692 /* 1693 * If the xfer is to be polled, and there are already jobs on 1694 * the queue, we can't proceed. 1695 */ 1696 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1697 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1698 splx(s); 1699 xs->error = XS_DRIVER_STUFFUP; 1700 return (EAGAIN); 1701 } 1702 1703 /* 1704 * If we have an URGENT xfer, it's an error recovery command 1705 * and it should just go on the head of the channel's queue. 1706 */ 1707 if (xs->xs_control & XS_CTL_URGENT) { 1708 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1709 goto out; 1710 } 1711 1712 /* 1713 * If this xfer has already been on the queue before, we 1714 * need to reinsert it in the correct order. That order is: 1715 * 1716 * Immediately before the first xfer for this periph 1717 * with a requeuecnt less than xs->xs_requeuecnt. 1718 * 1719 * Failing that, at the end of the queue. (We'll end up 1720 * there naturally.) 1721 */ 1722 if (xs->xs_requeuecnt != 0) { 1723 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1724 qxs = TAILQ_NEXT(qxs, channel_q)) { 1725 if (qxs->xs_periph == xs->xs_periph && 1726 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1727 break; 1728 } 1729 if (qxs != NULL) { 1730 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1731 channel_q); 1732 goto out; 1733 } 1734 } 1735 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1736 out: 1737 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1738 scsipi_periph_thaw(xs->xs_periph, 1); 1739 splx(s); 1740 return (0); 1741 } 1742 1743 /* 1744 * scsipi_run_queue: 1745 * 1746 * Start as many xfers as possible running on the channel. 1747 */ 1748 static void 1749 scsipi_run_queue(struct scsipi_channel *chan) 1750 { 1751 struct scsipi_xfer *xs; 1752 struct scsipi_periph *periph; 1753 int s; 1754 1755 for (;;) { 1756 s = splbio(); 1757 1758 /* 1759 * If the channel is frozen, we can't do any work right 1760 * now. 1761 */ 1762 if (chan->chan_qfreeze != 0) { 1763 splx(s); 1764 return; 1765 } 1766 1767 /* 1768 * Look for work to do, and make sure we can do it. 1769 */ 1770 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1771 xs = TAILQ_NEXT(xs, channel_q)) { 1772 periph = xs->xs_periph; 1773 1774 if ((periph->periph_sent >= periph->periph_openings) || 1775 periph->periph_qfreeze != 0 || 1776 (periph->periph_flags & PERIPH_UNTAG) != 0) 1777 continue; 1778 1779 if ((periph->periph_flags & 1780 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1781 (xs->xs_control & XS_CTL_URGENT) == 0) 1782 continue; 1783 1784 /* 1785 * We can issue this xfer! 1786 */ 1787 goto got_one; 1788 } 1789 1790 /* 1791 * Can't find any work to do right now. 1792 */ 1793 splx(s); 1794 return; 1795 1796 got_one: 1797 /* 1798 * Have an xfer to run. Allocate a resource from 1799 * the adapter to run it. If we can't allocate that 1800 * resource, we don't dequeue the xfer. 1801 */ 1802 if (scsipi_get_resource(chan) == 0) { 1803 /* 1804 * Adapter is out of resources. If the adapter 1805 * supports it, attempt to grow them. 1806 */ 1807 if (scsipi_grow_resources(chan) == 0) { 1808 /* 1809 * Wasn't able to grow resources, 1810 * nothing more we can do. 1811 */ 1812 if (xs->xs_control & XS_CTL_POLL) { 1813 scsipi_printaddr(xs->xs_periph); 1814 printf("polling command but no " 1815 "adapter resources"); 1816 /* We'll panic shortly... */ 1817 } 1818 splx(s); 1819 1820 /* 1821 * XXX: We should be able to note that 1822 * XXX: that resources are needed here! 1823 */ 1824 return; 1825 } 1826 /* 1827 * scsipi_grow_resources() allocated the resource 1828 * for us. 1829 */ 1830 } 1831 1832 /* 1833 * We have a resource to run this xfer, do it! 1834 */ 1835 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1836 1837 /* 1838 * If the command is to be tagged, allocate a tag ID 1839 * for it. 1840 */ 1841 if (XS_CTL_TAGTYPE(xs) != 0) 1842 scsipi_get_tag(xs); 1843 else 1844 periph->periph_flags |= PERIPH_UNTAG; 1845 periph->periph_sent++; 1846 splx(s); 1847 1848 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1849 } 1850 #ifdef DIAGNOSTIC 1851 panic("scsipi_run_queue: impossible"); 1852 #endif 1853 } 1854 1855 /* 1856 * scsipi_execute_xs: 1857 * 1858 * Begin execution of an xfer, waiting for it to complete, if necessary. 1859 */ 1860 int 1861 scsipi_execute_xs(struct scsipi_xfer *xs) 1862 { 1863 struct scsipi_periph *periph = xs->xs_periph; 1864 struct scsipi_channel *chan = periph->periph_channel; 1865 int oasync, async, poll, error, s; 1866 1867 KASSERT(!cold); 1868 1869 (chan->chan_bustype->bustype_cmd)(xs); 1870 1871 if (xs->xs_control & XS_CTL_DATA_ONSTACK) { 1872 #if 1 1873 if (xs->xs_control & XS_CTL_ASYNC) 1874 panic("scsipi_execute_xs: on stack and async"); 1875 #endif 1876 /* 1877 * If the I/O buffer is allocated on stack, the 1878 * process must NOT be swapped out, as the device will 1879 * be accessing the stack. 1880 */ 1881 uvm_lwp_hold(curlwp); 1882 } 1883 1884 xs->xs_status &= ~XS_STS_DONE; 1885 xs->error = XS_NOERROR; 1886 xs->resid = xs->datalen; 1887 xs->status = SCSI_OK; 1888 1889 #ifdef SCSIPI_DEBUG 1890 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1891 printf("scsipi_execute_xs: "); 1892 show_scsipi_xs(xs); 1893 printf("\n"); 1894 } 1895 #endif 1896 1897 /* 1898 * Deal with command tagging: 1899 * 1900 * - If the device's current operating mode doesn't 1901 * include tagged queueing, clear the tag mask. 1902 * 1903 * - If the device's current operating mode *does* 1904 * include tagged queueing, set the tag_type in 1905 * the xfer to the appropriate byte for the tag 1906 * message. 1907 */ 1908 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 1909 (xs->xs_control & XS_CTL_REQSENSE)) { 1910 xs->xs_control &= ~XS_CTL_TAGMASK; 1911 xs->xs_tag_type = 0; 1912 } else { 1913 /* 1914 * If the request doesn't specify a tag, give Head 1915 * tags to URGENT operations and Ordered tags to 1916 * everything else. 1917 */ 1918 if (XS_CTL_TAGTYPE(xs) == 0) { 1919 if (xs->xs_control & XS_CTL_URGENT) 1920 xs->xs_control |= XS_CTL_HEAD_TAG; 1921 else 1922 xs->xs_control |= XS_CTL_ORDERED_TAG; 1923 } 1924 1925 switch (XS_CTL_TAGTYPE(xs)) { 1926 case XS_CTL_ORDERED_TAG: 1927 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 1928 break; 1929 1930 case XS_CTL_SIMPLE_TAG: 1931 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 1932 break; 1933 1934 case XS_CTL_HEAD_TAG: 1935 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 1936 break; 1937 1938 default: 1939 scsipi_printaddr(periph); 1940 printf("invalid tag mask 0x%08x\n", 1941 XS_CTL_TAGTYPE(xs)); 1942 panic("scsipi_execute_xs"); 1943 } 1944 } 1945 1946 /* If the adaptor wants us to poll, poll. */ 1947 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 1948 xs->xs_control |= XS_CTL_POLL; 1949 1950 /* 1951 * If we don't yet have a completion thread, or we are to poll for 1952 * completion, clear the ASYNC flag. 1953 */ 1954 oasync = (xs->xs_control & XS_CTL_ASYNC); 1955 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 1956 xs->xs_control &= ~XS_CTL_ASYNC; 1957 1958 async = (xs->xs_control & XS_CTL_ASYNC); 1959 poll = (xs->xs_control & XS_CTL_POLL); 1960 1961 #ifdef DIAGNOSTIC 1962 if (oasync != 0 && xs->bp == NULL) 1963 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 1964 #endif 1965 1966 /* 1967 * Enqueue the transfer. If we're not polling for completion, this 1968 * should ALWAYS return `no error'. 1969 */ 1970 error = scsipi_enqueue(xs); 1971 if (error) { 1972 if (poll == 0) { 1973 scsipi_printaddr(periph); 1974 printf("not polling, but enqueue failed with %d\n", 1975 error); 1976 panic("scsipi_execute_xs"); 1977 } 1978 1979 scsipi_printaddr(periph); 1980 printf("should have flushed queue?\n"); 1981 goto free_xs; 1982 } 1983 1984 restarted: 1985 scsipi_run_queue(chan); 1986 1987 /* 1988 * The xfer is enqueued, and possibly running. If it's to be 1989 * completed asynchronously, just return now. 1990 */ 1991 if (async) 1992 return (0); 1993 1994 /* 1995 * Not an asynchronous command; wait for it to complete. 1996 */ 1997 s = splbio(); 1998 while ((xs->xs_status & XS_STS_DONE) == 0) { 1999 if (poll) { 2000 scsipi_printaddr(periph); 2001 printf("polling command not done\n"); 2002 panic("scsipi_execute_xs"); 2003 } 2004 (void) tsleep(xs, PRIBIO, "xscmd", 0); 2005 } 2006 splx(s); 2007 2008 /* 2009 * Command is complete. scsipi_done() has awakened us to perform 2010 * the error handling. 2011 */ 2012 error = scsipi_complete(xs); 2013 if (error == ERESTART) 2014 goto restarted; 2015 2016 /* 2017 * If it was meant to run async and we cleared aync ourselve, 2018 * don't return an error here. It has already been handled 2019 */ 2020 if (oasync) 2021 error = 0; 2022 /* 2023 * Command completed successfully or fatal error occurred. Fall 2024 * into.... 2025 */ 2026 free_xs: 2027 if (xs->xs_control & XS_CTL_DATA_ONSTACK) 2028 uvm_lwp_rele(curlwp); 2029 2030 s = splbio(); 2031 scsipi_put_xs(xs); 2032 splx(s); 2033 2034 /* 2035 * Kick the queue, keep it running in case it stopped for some 2036 * reason. 2037 */ 2038 scsipi_run_queue(chan); 2039 2040 return (error); 2041 } 2042 2043 /* 2044 * scsipi_completion_thread: 2045 * 2046 * This is the completion thread. We wait for errors on 2047 * asynchronous xfers, and perform the error handling 2048 * function, restarting the command, if necessary. 2049 */ 2050 static void 2051 scsipi_completion_thread(void *arg) 2052 { 2053 struct scsipi_channel *chan = arg; 2054 struct scsipi_xfer *xs; 2055 int s; 2056 2057 if (chan->chan_init_cb) 2058 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg); 2059 2060 s = splbio(); 2061 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 2062 splx(s); 2063 for (;;) { 2064 s = splbio(); 2065 xs = TAILQ_FIRST(&chan->chan_complete); 2066 if (xs == NULL && chan->chan_tflags == 0) { 2067 /* nothing to do; wait */ 2068 (void) tsleep(&chan->chan_complete, PRIBIO, 2069 "sccomp", 0); 2070 splx(s); 2071 continue; 2072 } 2073 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2074 /* call chan_callback from thread context */ 2075 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 2076 chan->chan_callback(chan, chan->chan_callback_arg); 2077 splx(s); 2078 continue; 2079 } 2080 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 2081 /* attempt to get more openings for this channel */ 2082 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2083 scsipi_adapter_request(chan, 2084 ADAPTER_REQ_GROW_RESOURCES, NULL); 2085 scsipi_channel_thaw(chan, 1); 2086 splx(s); 2087 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) 2088 kpause("scsizzz", FALSE, hz/10, NULL); 2089 continue; 2090 } 2091 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2092 /* explicitly run the queues for this channel */ 2093 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2094 scsipi_run_queue(chan); 2095 splx(s); 2096 continue; 2097 } 2098 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2099 splx(s); 2100 break; 2101 } 2102 if (xs) { 2103 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2104 splx(s); 2105 2106 /* 2107 * Have an xfer with an error; process it. 2108 */ 2109 (void) scsipi_complete(xs); 2110 2111 /* 2112 * Kick the queue; keep it running if it was stopped 2113 * for some reason. 2114 */ 2115 scsipi_run_queue(chan); 2116 } else { 2117 splx(s); 2118 } 2119 } 2120 2121 chan->chan_thread = NULL; 2122 2123 /* In case parent is waiting for us to exit. */ 2124 wakeup(&chan->chan_thread); 2125 2126 kthread_exit(0); 2127 } 2128 /* 2129 * scsipi_thread_call_callback: 2130 * 2131 * request to call a callback from the completion thread 2132 */ 2133 int 2134 scsipi_thread_call_callback(struct scsipi_channel *chan, 2135 void (*callback)(struct scsipi_channel *, void *), void *arg) 2136 { 2137 int s; 2138 2139 s = splbio(); 2140 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2141 /* kernel thread doesn't exist yet */ 2142 splx(s); 2143 return ESRCH; 2144 } 2145 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2146 splx(s); 2147 return EBUSY; 2148 } 2149 scsipi_channel_freeze(chan, 1); 2150 chan->chan_callback = callback; 2151 chan->chan_callback_arg = arg; 2152 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2153 wakeup(&chan->chan_complete); 2154 splx(s); 2155 return(0); 2156 } 2157 2158 /* 2159 * scsipi_async_event: 2160 * 2161 * Handle an asynchronous event from an adapter. 2162 */ 2163 void 2164 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event, 2165 void *arg) 2166 { 2167 int s; 2168 2169 s = splbio(); 2170 switch (event) { 2171 case ASYNC_EVENT_MAX_OPENINGS: 2172 scsipi_async_event_max_openings(chan, 2173 (struct scsipi_max_openings *)arg); 2174 break; 2175 2176 case ASYNC_EVENT_XFER_MODE: 2177 scsipi_async_event_xfer_mode(chan, 2178 (struct scsipi_xfer_mode *)arg); 2179 break; 2180 case ASYNC_EVENT_RESET: 2181 scsipi_async_event_channel_reset(chan); 2182 break; 2183 } 2184 splx(s); 2185 } 2186 2187 /* 2188 * scsipi_print_xfer_mode: 2189 * 2190 * Print a periph's capabilities. 2191 */ 2192 void 2193 scsipi_print_xfer_mode(struct scsipi_periph *periph) 2194 { 2195 int period, freq, speed, mbs; 2196 2197 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0) 2198 return; 2199 2200 aprint_normal_dev(periph->periph_dev, ""); 2201 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2202 period = scsipi_sync_factor_to_period(periph->periph_period); 2203 aprint_normal("sync (%d.%02dns offset %d)", 2204 period / 100, period % 100, periph->periph_offset); 2205 } else 2206 aprint_normal("async"); 2207 2208 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2209 aprint_normal(", 32-bit"); 2210 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2211 aprint_normal(", 16-bit"); 2212 else 2213 aprint_normal(", 8-bit"); 2214 2215 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2216 freq = scsipi_sync_factor_to_freq(periph->periph_period); 2217 speed = freq; 2218 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2219 speed *= 4; 2220 else if (periph->periph_mode & 2221 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2222 speed *= 2; 2223 mbs = speed / 1000; 2224 if (mbs > 0) 2225 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000); 2226 else 2227 aprint_normal(" (%dKB/s)", speed % 1000); 2228 } 2229 2230 aprint_normal(" transfers"); 2231 2232 if (periph->periph_mode & PERIPH_CAP_TQING) 2233 aprint_normal(", tagged queueing"); 2234 2235 aprint_normal("\n"); 2236 } 2237 2238 /* 2239 * scsipi_async_event_max_openings: 2240 * 2241 * Update the maximum number of outstanding commands a 2242 * device may have. 2243 */ 2244 static void 2245 scsipi_async_event_max_openings(struct scsipi_channel *chan, 2246 struct scsipi_max_openings *mo) 2247 { 2248 struct scsipi_periph *periph; 2249 int minlun, maxlun; 2250 2251 if (mo->mo_lun == -1) { 2252 /* 2253 * Wildcarded; apply it to all LUNs. 2254 */ 2255 minlun = 0; 2256 maxlun = chan->chan_nluns - 1; 2257 } else 2258 minlun = maxlun = mo->mo_lun; 2259 2260 /* XXX This could really suck with a large LUN space. */ 2261 for (; minlun <= maxlun; minlun++) { 2262 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun); 2263 if (periph == NULL) 2264 continue; 2265 2266 if (mo->mo_openings < periph->periph_openings) 2267 periph->periph_openings = mo->mo_openings; 2268 else if (mo->mo_openings > periph->periph_openings && 2269 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2270 periph->periph_openings = mo->mo_openings; 2271 } 2272 } 2273 2274 /* 2275 * scsipi_async_event_xfer_mode: 2276 * 2277 * Update the xfer mode for all periphs sharing the 2278 * specified I_T Nexus. 2279 */ 2280 static void 2281 scsipi_async_event_xfer_mode(struct scsipi_channel *chan, 2282 struct scsipi_xfer_mode *xm) 2283 { 2284 struct scsipi_periph *periph; 2285 int lun, announce, mode, period, offset; 2286 2287 for (lun = 0; lun < chan->chan_nluns; lun++) { 2288 periph = scsipi_lookup_periph(chan, xm->xm_target, lun); 2289 if (periph == NULL) 2290 continue; 2291 announce = 0; 2292 2293 /* 2294 * Clamp the xfer mode down to this periph's capabilities. 2295 */ 2296 mode = xm->xm_mode & periph->periph_cap; 2297 if (mode & PERIPH_CAP_SYNC) { 2298 period = xm->xm_period; 2299 offset = xm->xm_offset; 2300 } else { 2301 period = 0; 2302 offset = 0; 2303 } 2304 2305 /* 2306 * If we do not have a valid xfer mode yet, or the parameters 2307 * are different, announce them. 2308 */ 2309 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 || 2310 periph->periph_mode != mode || 2311 periph->periph_period != period || 2312 periph->periph_offset != offset) 2313 announce = 1; 2314 2315 periph->periph_mode = mode; 2316 periph->periph_period = period; 2317 periph->periph_offset = offset; 2318 periph->periph_flags |= PERIPH_MODE_VALID; 2319 2320 if (announce) 2321 scsipi_print_xfer_mode(periph); 2322 } 2323 } 2324 2325 /* 2326 * scsipi_set_xfer_mode: 2327 * 2328 * Set the xfer mode for the specified I_T Nexus. 2329 */ 2330 void 2331 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed) 2332 { 2333 struct scsipi_xfer_mode xm; 2334 struct scsipi_periph *itperiph; 2335 int lun, s; 2336 2337 /* 2338 * Go to the minimal xfer mode. 2339 */ 2340 xm.xm_target = target; 2341 xm.xm_mode = 0; 2342 xm.xm_period = 0; /* ignored */ 2343 xm.xm_offset = 0; /* ignored */ 2344 2345 /* 2346 * Find the first LUN we know about on this I_T Nexus. 2347 */ 2348 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) { 2349 itperiph = scsipi_lookup_periph(chan, target, lun); 2350 if (itperiph != NULL) 2351 break; 2352 } 2353 if (itperiph != NULL) { 2354 xm.xm_mode = itperiph->periph_cap; 2355 /* 2356 * Now issue the request to the adapter. 2357 */ 2358 s = splbio(); 2359 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2360 splx(s); 2361 /* 2362 * If we want this to happen immediately, issue a dummy 2363 * command, since most adapters can't really negotiate unless 2364 * they're executing a job. 2365 */ 2366 if (immed != 0) { 2367 (void) scsipi_test_unit_ready(itperiph, 2368 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2369 XS_CTL_IGNORE_NOT_READY | 2370 XS_CTL_IGNORE_MEDIA_CHANGE); 2371 } 2372 } 2373 } 2374 2375 /* 2376 * scsipi_channel_reset: 2377 * 2378 * handle scsi bus reset 2379 * called at splbio 2380 */ 2381 static void 2382 scsipi_async_event_channel_reset(struct scsipi_channel *chan) 2383 { 2384 struct scsipi_xfer *xs, *xs_next; 2385 struct scsipi_periph *periph; 2386 int target, lun; 2387 2388 /* 2389 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2390 * commands; as the sense is not available any more. 2391 * can't call scsipi_done() from here, as the command has not been 2392 * sent to the adapter yet (this would corrupt accounting). 2393 */ 2394 2395 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2396 xs_next = TAILQ_NEXT(xs, channel_q); 2397 if (xs->xs_control & XS_CTL_REQSENSE) { 2398 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2399 xs->error = XS_RESET; 2400 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2401 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2402 channel_q); 2403 } 2404 } 2405 wakeup(&chan->chan_complete); 2406 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2407 for (target = 0; target < chan->chan_ntargets; target++) { 2408 if (target == chan->chan_id) 2409 continue; 2410 for (lun = 0; lun < chan->chan_nluns; lun++) { 2411 periph = scsipi_lookup_periph(chan, target, lun); 2412 if (periph) { 2413 xs = periph->periph_xscheck; 2414 if (xs) 2415 xs->error = XS_RESET; 2416 } 2417 } 2418 } 2419 } 2420 2421 /* 2422 * scsipi_target_detach: 2423 * 2424 * detach all periph associated with a I_T 2425 * must be called from valid thread context 2426 */ 2427 int 2428 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun, 2429 int flags) 2430 { 2431 struct scsipi_periph *periph; 2432 int ctarget, mintarget, maxtarget; 2433 int clun, minlun, maxlun; 2434 int error; 2435 2436 if (target == -1) { 2437 mintarget = 0; 2438 maxtarget = chan->chan_ntargets; 2439 } else { 2440 if (target == chan->chan_id) 2441 return EINVAL; 2442 if (target < 0 || target >= chan->chan_ntargets) 2443 return EINVAL; 2444 mintarget = target; 2445 maxtarget = target + 1; 2446 } 2447 2448 if (lun == -1) { 2449 minlun = 0; 2450 maxlun = chan->chan_nluns; 2451 } else { 2452 if (lun < 0 || lun >= chan->chan_nluns) 2453 return EINVAL; 2454 minlun = lun; 2455 maxlun = lun + 1; 2456 } 2457 2458 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2459 if (ctarget == chan->chan_id) 2460 continue; 2461 2462 for (clun = minlun; clun < maxlun; clun++) { 2463 periph = scsipi_lookup_periph(chan, ctarget, clun); 2464 if (periph == NULL) 2465 continue; 2466 error = config_detach(periph->periph_dev, flags); 2467 if (error) 2468 return (error); 2469 } 2470 } 2471 return(0); 2472 } 2473 2474 /* 2475 * scsipi_adapter_addref: 2476 * 2477 * Add a reference to the adapter pointed to by the provided 2478 * link, enabling the adapter if necessary. 2479 */ 2480 int 2481 scsipi_adapter_addref(struct scsipi_adapter *adapt) 2482 { 2483 int s, error = 0; 2484 2485 s = splbio(); 2486 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) { 2487 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1); 2488 if (error) 2489 adapt->adapt_refcnt--; 2490 } 2491 splx(s); 2492 return (error); 2493 } 2494 2495 /* 2496 * scsipi_adapter_delref: 2497 * 2498 * Delete a reference to the adapter pointed to by the provided 2499 * link, disabling the adapter if possible. 2500 */ 2501 void 2502 scsipi_adapter_delref(struct scsipi_adapter *adapt) 2503 { 2504 int s; 2505 2506 s = splbio(); 2507 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL) 2508 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0); 2509 splx(s); 2510 } 2511 2512 static struct scsipi_syncparam { 2513 int ss_factor; 2514 int ss_period; /* ns * 100 */ 2515 } scsipi_syncparams[] = { 2516 { 0x08, 625 }, /* FAST-160 (Ultra320) */ 2517 { 0x09, 1250 }, /* FAST-80 (Ultra160) */ 2518 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */ 2519 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */ 2520 { 0x0c, 5000 }, /* FAST-20 (Ultra) */ 2521 }; 2522 static const int scsipi_nsyncparams = 2523 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2524 2525 int 2526 scsipi_sync_period_to_factor(int period /* ns * 100 */) 2527 { 2528 int i; 2529 2530 for (i = 0; i < scsipi_nsyncparams; i++) { 2531 if (period <= scsipi_syncparams[i].ss_period) 2532 return (scsipi_syncparams[i].ss_factor); 2533 } 2534 2535 return ((period / 100) / 4); 2536 } 2537 2538 int 2539 scsipi_sync_factor_to_period(int factor) 2540 { 2541 int i; 2542 2543 for (i = 0; i < scsipi_nsyncparams; i++) { 2544 if (factor == scsipi_syncparams[i].ss_factor) 2545 return (scsipi_syncparams[i].ss_period); 2546 } 2547 2548 return ((factor * 4) * 100); 2549 } 2550 2551 int 2552 scsipi_sync_factor_to_freq(int factor) 2553 { 2554 int i; 2555 2556 for (i = 0; i < scsipi_nsyncparams; i++) { 2557 if (factor == scsipi_syncparams[i].ss_factor) 2558 return (100000000 / scsipi_syncparams[i].ss_period); 2559 } 2560 2561 return (10000000 / ((factor * 4) * 10)); 2562 } 2563 2564 #ifdef SCSIPI_DEBUG 2565 /* 2566 * Given a scsipi_xfer, dump the request, in all it's glory 2567 */ 2568 void 2569 show_scsipi_xs(struct scsipi_xfer *xs) 2570 { 2571 2572 printf("xs(%p): ", xs); 2573 printf("xs_control(0x%08x)", xs->xs_control); 2574 printf("xs_status(0x%08x)", xs->xs_status); 2575 printf("periph(%p)", xs->xs_periph); 2576 printf("retr(0x%x)", xs->xs_retries); 2577 printf("timo(0x%x)", xs->timeout); 2578 printf("cmd(%p)", xs->cmd); 2579 printf("len(0x%x)", xs->cmdlen); 2580 printf("data(%p)", xs->data); 2581 printf("len(0x%x)", xs->datalen); 2582 printf("res(0x%x)", xs->resid); 2583 printf("err(0x%x)", xs->error); 2584 printf("bp(%p)", xs->bp); 2585 show_scsipi_cmd(xs); 2586 } 2587 2588 void 2589 show_scsipi_cmd(struct scsipi_xfer *xs) 2590 { 2591 u_char *b = (u_char *) xs->cmd; 2592 int i = 0; 2593 2594 scsipi_printaddr(xs->xs_periph); 2595 printf(" command: "); 2596 2597 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2598 while (i < xs->cmdlen) { 2599 if (i) 2600 printf(","); 2601 printf("0x%x", b[i++]); 2602 } 2603 printf("-[%d bytes]\n", xs->datalen); 2604 if (xs->datalen) 2605 show_mem(xs->data, min(64, xs->datalen)); 2606 } else 2607 printf("-RESET-\n"); 2608 } 2609 2610 void 2611 show_mem(u_char *address, int num) 2612 { 2613 int x; 2614 2615 printf("------------------------------"); 2616 for (x = 0; x < num; x++) { 2617 if ((x % 16) == 0) 2618 printf("\n%03d: ", x); 2619 printf("%02x ", *address++); 2620 } 2621 printf("\n------------------------------\n"); 2622 } 2623 #endif /* SCSIPI_DEBUG */ 2624