1 /* $NetBSD: scsipi_base.c,v 1.105 2004/04/27 18:15:37 bouyer Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000, 2002, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.105 2004/04/27 18:15:37 bouyer Exp $"); 42 43 #include "opt_scsi.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/buf.h> 49 #include <sys/uio.h> 50 #include <sys/malloc.h> 51 #include <sys/pool.h> 52 #include <sys/errno.h> 53 #include <sys/device.h> 54 #include <sys/proc.h> 55 #include <sys/kthread.h> 56 #include <sys/hash.h> 57 58 #include <dev/scsipi/scsipi_all.h> 59 #include <dev/scsipi/scsipi_disk.h> 60 #include <dev/scsipi/scsipiconf.h> 61 #include <dev/scsipi/scsipi_base.h> 62 63 #include <dev/scsipi/scsi_all.h> 64 #include <dev/scsipi/scsi_message.h> 65 66 int scsipi_complete __P((struct scsipi_xfer *)); 67 void scsipi_request_sense __P((struct scsipi_xfer *)); 68 int scsipi_enqueue __P((struct scsipi_xfer *)); 69 void scsipi_run_queue __P((struct scsipi_channel *chan)); 70 71 void scsipi_completion_thread __P((void *)); 72 73 void scsipi_get_tag __P((struct scsipi_xfer *)); 74 void scsipi_put_tag __P((struct scsipi_xfer *)); 75 76 int scsipi_get_resource __P((struct scsipi_channel *)); 77 void scsipi_put_resource __P((struct scsipi_channel *)); 78 __inline int scsipi_grow_resources __P((struct scsipi_channel *)); 79 80 void scsipi_async_event_max_openings __P((struct scsipi_channel *, 81 struct scsipi_max_openings *)); 82 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *, 83 struct scsipi_xfer_mode *)); 84 void scsipi_async_event_channel_reset __P((struct scsipi_channel *)); 85 86 struct pool scsipi_xfer_pool; 87 88 /* 89 * scsipi_init: 90 * 91 * Called when a scsibus or atapibus is attached to the system 92 * to initialize shared data structures. 93 */ 94 void 95 scsipi_init() 96 { 97 static int scsipi_init_done; 98 99 if (scsipi_init_done) 100 return; 101 scsipi_init_done = 1; 102 103 /* Initialize the scsipi_xfer pool. */ 104 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 105 0, 0, "scxspl", NULL); 106 } 107 108 /* 109 * scsipi_channel_init: 110 * 111 * Initialize a scsipi_channel when it is attached. 112 */ 113 int 114 scsipi_channel_init(chan) 115 struct scsipi_channel *chan; 116 { 117 int i; 118 119 /* Initialize shared data. */ 120 scsipi_init(); 121 122 /* Initialize the queues. */ 123 TAILQ_INIT(&chan->chan_queue); 124 TAILQ_INIT(&chan->chan_complete); 125 126 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++) 127 LIST_INIT(&chan->chan_periphtab[i]); 128 129 /* 130 * Create the asynchronous completion thread. 131 */ 132 kthread_create(scsipi_create_completion_thread, chan); 133 return (0); 134 } 135 136 /* 137 * scsipi_channel_shutdown: 138 * 139 * Shutdown a scsipi_channel. 140 */ 141 void 142 scsipi_channel_shutdown(chan) 143 struct scsipi_channel *chan; 144 { 145 146 /* 147 * Shut down the completion thread. 148 */ 149 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 150 wakeup(&chan->chan_complete); 151 152 /* 153 * Now wait for the thread to exit. 154 */ 155 while (chan->chan_thread != NULL) 156 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0); 157 } 158 159 static uint32_t 160 scsipi_chan_periph_hash(uint64_t t, uint64_t l) 161 { 162 uint32_t hash; 163 164 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT); 165 hash = hash32_buf(&l, sizeof(l), hash); 166 167 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK); 168 } 169 170 /* 171 * scsipi_insert_periph: 172 * 173 * Insert a periph into the channel. 174 */ 175 void 176 scsipi_insert_periph(chan, periph) 177 struct scsipi_channel *chan; 178 struct scsipi_periph *periph; 179 { 180 uint32_t hash; 181 int s; 182 183 hash = scsipi_chan_periph_hash(periph->periph_target, 184 periph->periph_lun); 185 186 s = splbio(); 187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash); 188 splx(s); 189 } 190 191 /* 192 * scsipi_remove_periph: 193 * 194 * Remove a periph from the channel. 195 */ 196 void 197 scsipi_remove_periph(chan, periph) 198 struct scsipi_channel *chan; 199 struct scsipi_periph *periph; 200 { 201 int s; 202 203 s = splbio(); 204 LIST_REMOVE(periph, periph_hash); 205 splx(s); 206 } 207 208 /* 209 * scsipi_lookup_periph: 210 * 211 * Lookup a periph on the specified channel. 212 */ 213 struct scsipi_periph * 214 scsipi_lookup_periph(chan, target, lun) 215 struct scsipi_channel *chan; 216 int target, lun; 217 { 218 struct scsipi_periph *periph; 219 uint32_t hash; 220 int s; 221 222 if (target >= chan->chan_ntargets || 223 lun >= chan->chan_nluns) 224 return (NULL); 225 226 hash = scsipi_chan_periph_hash(target, lun); 227 228 s = splbio(); 229 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) { 230 if (periph->periph_target == target && 231 periph->periph_lun == lun) 232 break; 233 } 234 splx(s); 235 236 return (periph); 237 } 238 239 /* 240 * scsipi_get_resource: 241 * 242 * Allocate a single xfer `resource' from the channel. 243 * 244 * NOTE: Must be called at splbio(). 245 */ 246 int 247 scsipi_get_resource(chan) 248 struct scsipi_channel *chan; 249 { 250 struct scsipi_adapter *adapt = chan->chan_adapter; 251 252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 253 if (chan->chan_openings > 0) { 254 chan->chan_openings--; 255 return (1); 256 } 257 return (0); 258 } 259 260 if (adapt->adapt_openings > 0) { 261 adapt->adapt_openings--; 262 return (1); 263 } 264 return (0); 265 } 266 267 /* 268 * scsipi_grow_resources: 269 * 270 * Attempt to grow resources for a channel. If this succeeds, 271 * we allocate one for our caller. 272 * 273 * NOTE: Must be called at splbio(). 274 */ 275 __inline int 276 scsipi_grow_resources(chan) 277 struct scsipi_channel *chan; 278 { 279 280 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 281 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 282 scsipi_adapter_request(chan, 283 ADAPTER_REQ_GROW_RESOURCES, NULL); 284 return (scsipi_get_resource(chan)); 285 } 286 /* 287 * ask the channel thread to do it. It'll have to thaw the 288 * queue 289 */ 290 scsipi_channel_freeze(chan, 1); 291 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 292 wakeup(&chan->chan_complete); 293 return (0); 294 } 295 296 return (0); 297 } 298 299 /* 300 * scsipi_put_resource: 301 * 302 * Free a single xfer `resource' to the channel. 303 * 304 * NOTE: Must be called at splbio(). 305 */ 306 void 307 scsipi_put_resource(chan) 308 struct scsipi_channel *chan; 309 { 310 struct scsipi_adapter *adapt = chan->chan_adapter; 311 312 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 313 chan->chan_openings++; 314 else 315 adapt->adapt_openings++; 316 } 317 318 /* 319 * scsipi_get_tag: 320 * 321 * Get a tag ID for the specified xfer. 322 * 323 * NOTE: Must be called at splbio(). 324 */ 325 void 326 scsipi_get_tag(xs) 327 struct scsipi_xfer *xs; 328 { 329 struct scsipi_periph *periph = xs->xs_periph; 330 int bit, tag; 331 u_int word; 332 333 bit = 0; /* XXX gcc */ 334 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 335 bit = ffs(periph->periph_freetags[word]); 336 if (bit != 0) 337 break; 338 } 339 #ifdef DIAGNOSTIC 340 if (word == PERIPH_NTAGWORDS) { 341 scsipi_printaddr(periph); 342 printf("no free tags\n"); 343 panic("scsipi_get_tag"); 344 } 345 #endif 346 347 bit -= 1; 348 periph->periph_freetags[word] &= ~(1 << bit); 349 tag = (word << 5) | bit; 350 351 /* XXX Should eventually disallow this completely. */ 352 if (tag >= periph->periph_openings) { 353 scsipi_printaddr(periph); 354 printf("WARNING: tag %d greater than available openings %d\n", 355 tag, periph->periph_openings); 356 } 357 358 xs->xs_tag_id = tag; 359 } 360 361 /* 362 * scsipi_put_tag: 363 * 364 * Put the tag ID for the specified xfer back into the pool. 365 * 366 * NOTE: Must be called at splbio(). 367 */ 368 void 369 scsipi_put_tag(xs) 370 struct scsipi_xfer *xs; 371 { 372 struct scsipi_periph *periph = xs->xs_periph; 373 int word, bit; 374 375 word = xs->xs_tag_id >> 5; 376 bit = xs->xs_tag_id & 0x1f; 377 378 periph->periph_freetags[word] |= (1 << bit); 379 } 380 381 /* 382 * scsipi_get_xs: 383 * 384 * Allocate an xfer descriptor and associate it with the 385 * specified peripherial. If the peripherial has no more 386 * available command openings, we either block waiting for 387 * one to become available, or fail. 388 */ 389 struct scsipi_xfer * 390 scsipi_get_xs(periph, flags) 391 struct scsipi_periph *periph; 392 int flags; 393 { 394 struct scsipi_xfer *xs; 395 int s; 396 397 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 398 399 /* 400 * If we're cold, make sure we poll. 401 */ 402 if (cold) 403 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL; 404 405 #ifdef DIAGNOSTIC 406 /* 407 * URGENT commands can never be ASYNC. 408 */ 409 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 410 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 411 scsipi_printaddr(periph); 412 printf("URGENT and ASYNC\n"); 413 panic("scsipi_get_xs"); 414 } 415 #endif 416 417 s = splbio(); 418 /* 419 * Wait for a command opening to become available. Rules: 420 * 421 * - All xfers must wait for an available opening. 422 * Exception: URGENT xfers can proceed when 423 * active == openings, because we use the opening 424 * of the command we're recovering for. 425 * - if the periph has sense pending, only URGENT & REQSENSE 426 * xfers may proceed. 427 * 428 * - If the periph is recovering, only URGENT xfers may 429 * proceed. 430 * 431 * - If the periph is currently executing a recovery 432 * command, URGENT commands must block, because only 433 * one recovery command can execute at a time. 434 */ 435 for (;;) { 436 if (flags & XS_CTL_URGENT) { 437 if (periph->periph_active > periph->periph_openings) 438 goto wait_for_opening; 439 if (periph->periph_flags & PERIPH_SENSE) { 440 if ((flags & XS_CTL_REQSENSE) == 0) 441 goto wait_for_opening; 442 } else { 443 if ((periph->periph_flags & 444 PERIPH_RECOVERY_ACTIVE) != 0) 445 goto wait_for_opening; 446 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 447 } 448 break; 449 } 450 if (periph->periph_active >= periph->periph_openings || 451 (periph->periph_flags & PERIPH_RECOVERING) != 0) 452 goto wait_for_opening; 453 periph->periph_active++; 454 break; 455 456 wait_for_opening: 457 if (flags & XS_CTL_NOSLEEP) { 458 splx(s); 459 return (NULL); 460 } 461 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 462 periph->periph_flags |= PERIPH_WAITING; 463 (void) tsleep(periph, PRIBIO, "getxs", 0); 464 } 465 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 466 xs = pool_get(&scsipi_xfer_pool, 467 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 468 if (xs == NULL) { 469 if (flags & XS_CTL_URGENT) { 470 if ((flags & XS_CTL_REQSENSE) == 0) 471 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 472 } else 473 periph->periph_active--; 474 scsipi_printaddr(periph); 475 printf("unable to allocate %sscsipi_xfer\n", 476 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 477 } 478 splx(s); 479 480 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 481 482 if (xs != NULL) { 483 memset(xs, 0, sizeof(*xs)); 484 callout_init(&xs->xs_callout); 485 xs->xs_periph = periph; 486 xs->xs_control = flags; 487 xs->xs_status = 0; 488 s = splbio(); 489 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 490 splx(s); 491 } 492 return (xs); 493 } 494 495 /* 496 * scsipi_put_xs: 497 * 498 * Release an xfer descriptor, decreasing the outstanding command 499 * count for the peripherial. If there is a thread waiting for 500 * an opening, wake it up. If not, kick any queued I/O the 501 * peripherial may have. 502 * 503 * NOTE: Must be called at splbio(). 504 */ 505 void 506 scsipi_put_xs(xs) 507 struct scsipi_xfer *xs; 508 { 509 struct scsipi_periph *periph = xs->xs_periph; 510 int flags = xs->xs_control; 511 512 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 513 514 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 515 pool_put(&scsipi_xfer_pool, xs); 516 517 #ifdef DIAGNOSTIC 518 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 519 periph->periph_active == 0) { 520 scsipi_printaddr(periph); 521 printf("recovery without a command to recovery for\n"); 522 panic("scsipi_put_xs"); 523 } 524 #endif 525 526 if (flags & XS_CTL_URGENT) { 527 if ((flags & XS_CTL_REQSENSE) == 0) 528 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 529 } else 530 periph->periph_active--; 531 if (periph->periph_active == 0 && 532 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 533 periph->periph_flags &= ~PERIPH_WAITDRAIN; 534 wakeup(&periph->periph_active); 535 } 536 537 if (periph->periph_flags & PERIPH_WAITING) { 538 periph->periph_flags &= ~PERIPH_WAITING; 539 wakeup(periph); 540 } else { 541 if (periph->periph_switch->psw_start != NULL) { 542 SC_DEBUG(periph, SCSIPI_DB2, 543 ("calling private start()\n")); 544 (*periph->periph_switch->psw_start)(periph); 545 } 546 } 547 } 548 549 /* 550 * scsipi_channel_freeze: 551 * 552 * Freeze a channel's xfer queue. 553 */ 554 void 555 scsipi_channel_freeze(chan, count) 556 struct scsipi_channel *chan; 557 int count; 558 { 559 int s; 560 561 s = splbio(); 562 chan->chan_qfreeze += count; 563 splx(s); 564 } 565 566 /* 567 * scsipi_channel_thaw: 568 * 569 * Thaw a channel's xfer queue. 570 */ 571 void 572 scsipi_channel_thaw(chan, count) 573 struct scsipi_channel *chan; 574 int count; 575 { 576 int s; 577 578 s = splbio(); 579 chan->chan_qfreeze -= count; 580 /* 581 * Don't let the freeze count go negative. 582 * 583 * Presumably the adapter driver could keep track of this, 584 * but it might just be easier to do this here so as to allow 585 * multiple callers, including those outside the adapter driver. 586 */ 587 if (chan->chan_qfreeze < 0) { 588 chan->chan_qfreeze = 0; 589 } 590 splx(s); 591 /* 592 * Kick the channel's queue here. Note, we may be running in 593 * interrupt context (softclock or HBA's interrupt), so the adapter 594 * driver had better not sleep. 595 */ 596 if (chan->chan_qfreeze == 0) 597 scsipi_run_queue(chan); 598 } 599 600 /* 601 * scsipi_channel_timed_thaw: 602 * 603 * Thaw a channel after some time has expired. This will also 604 * run the channel's queue if the freeze count has reached 0. 605 */ 606 void 607 scsipi_channel_timed_thaw(arg) 608 void *arg; 609 { 610 struct scsipi_channel *chan = arg; 611 612 scsipi_channel_thaw(chan, 1); 613 } 614 615 /* 616 * scsipi_periph_freeze: 617 * 618 * Freeze a device's xfer queue. 619 */ 620 void 621 scsipi_periph_freeze(periph, count) 622 struct scsipi_periph *periph; 623 int count; 624 { 625 int s; 626 627 s = splbio(); 628 periph->periph_qfreeze += count; 629 splx(s); 630 } 631 632 /* 633 * scsipi_periph_thaw: 634 * 635 * Thaw a device's xfer queue. 636 */ 637 void 638 scsipi_periph_thaw(periph, count) 639 struct scsipi_periph *periph; 640 int count; 641 { 642 int s; 643 644 s = splbio(); 645 periph->periph_qfreeze -= count; 646 #ifdef DIAGNOSTIC 647 if (periph->periph_qfreeze < 0) { 648 static const char pc[] = "periph freeze count < 0"; 649 scsipi_printaddr(periph); 650 printf("%s\n", pc); 651 panic(pc); 652 } 653 #endif 654 if (periph->periph_qfreeze == 0 && 655 (periph->periph_flags & PERIPH_WAITING) != 0) 656 wakeup(periph); 657 splx(s); 658 } 659 660 /* 661 * scsipi_periph_timed_thaw: 662 * 663 * Thaw a device after some time has expired. 664 */ 665 void 666 scsipi_periph_timed_thaw(arg) 667 void *arg; 668 { 669 int s; 670 struct scsipi_periph *periph = arg; 671 672 callout_stop(&periph->periph_callout); 673 674 s = splbio(); 675 scsipi_periph_thaw(periph, 1); 676 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 677 /* 678 * Kick the channel's queue here. Note, we're running in 679 * interrupt context (softclock), so the adapter driver 680 * had better not sleep. 681 */ 682 scsipi_run_queue(periph->periph_channel); 683 } else { 684 /* 685 * Tell the completion thread to kick the channel's queue here. 686 */ 687 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 688 wakeup(&periph->periph_channel->chan_complete); 689 } 690 splx(s); 691 } 692 693 /* 694 * scsipi_wait_drain: 695 * 696 * Wait for a periph's pending xfers to drain. 697 */ 698 void 699 scsipi_wait_drain(periph) 700 struct scsipi_periph *periph; 701 { 702 int s; 703 704 s = splbio(); 705 while (periph->periph_active != 0) { 706 periph->periph_flags |= PERIPH_WAITDRAIN; 707 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0); 708 } 709 splx(s); 710 } 711 712 /* 713 * scsipi_kill_pending: 714 * 715 * Kill off all pending xfers for a periph. 716 * 717 * NOTE: Must be called at splbio(). 718 */ 719 void 720 scsipi_kill_pending(periph) 721 struct scsipi_periph *periph; 722 { 723 724 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph); 725 #ifdef DIAGNOSTIC 726 if (TAILQ_FIRST(&periph->periph_xferq) != NULL) 727 panic("scsipi_kill_pending"); 728 #endif 729 scsipi_wait_drain(periph); 730 } 731 732 /* 733 * scsipi_print_cdb: 734 * prints a command descriptor block (for debug purpose, error messages, 735 * SCSIPI_VERBOSE, ...) 736 */ 737 void 738 scsipi_print_cdb(cmd) 739 struct scsipi_generic *cmd; 740 { 741 int i, j; 742 743 printf("0x%02x", cmd->opcode); 744 745 switch (CDB_GROUPID(cmd->opcode)) { 746 case CDB_GROUPID_0: 747 j = CDB_GROUP0; 748 break; 749 case CDB_GROUPID_1: 750 j = CDB_GROUP1; 751 break; 752 case CDB_GROUPID_2: 753 j = CDB_GROUP2; 754 break; 755 case CDB_GROUPID_3: 756 j = CDB_GROUP3; 757 break; 758 case CDB_GROUPID_4: 759 j = CDB_GROUP4; 760 break; 761 case CDB_GROUPID_5: 762 j = CDB_GROUP5; 763 break; 764 case CDB_GROUPID_6: 765 j = CDB_GROUP6; 766 break; 767 case CDB_GROUPID_7: 768 j = CDB_GROUP7; 769 break; 770 default: 771 j = 0; 772 } 773 if (j == 0) 774 j = sizeof (cmd->bytes); 775 for (i = 0; i < j-1; i++) /* already done the opcode */ 776 printf(" %02x", cmd->bytes[i]); 777 } 778 779 /* 780 * scsipi_interpret_sense: 781 * 782 * Look at the returned sense and act on the error, determining 783 * the unix error number to pass back. (0 = report no error) 784 * 785 * NOTE: If we return ERESTART, we are expected to haved 786 * thawed the device! 787 * 788 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 789 */ 790 int 791 scsipi_interpret_sense(xs) 792 struct scsipi_xfer *xs; 793 { 794 struct scsipi_sense_data *sense; 795 struct scsipi_periph *periph = xs->xs_periph; 796 u_int8_t key; 797 int error; 798 #ifndef SCSIVERBOSE 799 u_int32_t info; 800 static char *error_mes[] = { 801 "soft error (corrected)", 802 "not ready", "medium error", 803 "non-media hardware failure", "illegal request", 804 "unit attention", "readonly device", 805 "no data found", "vendor unique", 806 "copy aborted", "command aborted", 807 "search returned equal", "volume overflow", 808 "verify miscompare", "unknown error key" 809 }; 810 #endif 811 812 sense = &xs->sense.scsi_sense; 813 #ifdef SCSIPI_DEBUG 814 if (periph->periph_flags & SCSIPI_DB1) { 815 int count; 816 scsipi_printaddr(periph); 817 printf(" sense debug information:\n"); 818 printf("\tcode 0x%x valid 0x%x\n", 819 sense->error_code & SSD_ERRCODE, 820 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0); 821 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 822 sense->segment, 823 sense->flags & SSD_KEY, 824 sense->flags & SSD_ILI ? 1 : 0, 825 sense->flags & SSD_EOM ? 1 : 0, 826 sense->flags & SSD_FILEMARK ? 1 : 0); 827 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 828 "extra bytes\n", 829 sense->info[0], 830 sense->info[1], 831 sense->info[2], 832 sense->info[3], 833 sense->extra_len); 834 printf("\textra: "); 835 for (count = 0; count < ADD_BYTES_LIM(sense); count++) 836 printf("0x%x ", sense->cmd_spec_info[count]); 837 printf("\n"); 838 } 839 #endif 840 841 /* 842 * If the periph has it's own error handler, call it first. 843 * If it returns a legit error value, return that, otherwise 844 * it wants us to continue with normal error processing. 845 */ 846 if (periph->periph_switch->psw_error != NULL) { 847 SC_DEBUG(periph, SCSIPI_DB2, 848 ("calling private err_handler()\n")); 849 error = (*periph->periph_switch->psw_error)(xs); 850 if (error != EJUSTRETURN) 851 return (error); 852 } 853 /* otherwise use the default */ 854 switch (sense->error_code & SSD_ERRCODE) { 855 856 /* 857 * Old SCSI-1 and SASI devices respond with 858 * codes other than 70. 859 */ 860 case 0x00: /* no error (command completed OK) */ 861 return (0); 862 case 0x04: /* drive not ready after it was selected */ 863 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 864 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 865 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 866 return (0); 867 /* XXX - display some sort of error here? */ 868 return (EIO); 869 case 0x20: /* invalid command */ 870 if ((xs->xs_control & 871 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 872 return (0); 873 return (EINVAL); 874 case 0x25: /* invalid LUN (Adaptec ACB-4000) */ 875 return (EACCES); 876 877 /* 878 * If it's code 70, use the extended stuff and 879 * interpret the key 880 */ 881 case 0x71: /* delayed error */ 882 scsipi_printaddr(periph); 883 key = sense->flags & SSD_KEY; 884 printf(" DEFERRED ERROR, key = 0x%x\n", key); 885 /* FALLTHROUGH */ 886 case 0x70: 887 #ifndef SCSIVERBOSE 888 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) 889 info = _4btol(sense->info); 890 else 891 info = 0; 892 #endif 893 key = sense->flags & SSD_KEY; 894 895 switch (key) { 896 case SKEY_NO_SENSE: 897 case SKEY_RECOVERED_ERROR: 898 if (xs->resid == xs->datalen && xs->datalen) { 899 /* 900 * Why is this here? 901 */ 902 xs->resid = 0; /* not short read */ 903 } 904 case SKEY_EQUAL: 905 error = 0; 906 break; 907 case SKEY_NOT_READY: 908 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 909 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 910 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 911 return (0); 912 if (sense->add_sense_code == 0x3A) { 913 error = ENODEV; /* Medium not present */ 914 if (xs->xs_control & XS_CTL_SILENT_NODEV) 915 return (error); 916 } else 917 error = EIO; 918 if ((xs->xs_control & XS_CTL_SILENT) != 0) 919 return (error); 920 break; 921 case SKEY_ILLEGAL_REQUEST: 922 if ((xs->xs_control & 923 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 924 return (0); 925 /* 926 * Handle the case where a device reports 927 * Logical Unit Not Supported during discovery. 928 */ 929 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 930 sense->add_sense_code == 0x25 && 931 sense->add_sense_code_qual == 0x00) 932 return (EINVAL); 933 if ((xs->xs_control & XS_CTL_SILENT) != 0) 934 return (EIO); 935 error = EINVAL; 936 break; 937 case SKEY_UNIT_ATTENTION: 938 if (sense->add_sense_code == 0x29 && 939 sense->add_sense_code_qual == 0x00) { 940 /* device or bus reset */ 941 return (ERESTART); 942 } 943 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 944 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 945 if ((xs->xs_control & 946 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 947 /* XXX Should reupload any transient state. */ 948 (periph->periph_flags & 949 PERIPH_REMOVABLE) == 0) { 950 return (ERESTART); 951 } 952 if ((xs->xs_control & XS_CTL_SILENT) != 0) 953 return (EIO); 954 error = EIO; 955 break; 956 case SKEY_WRITE_PROTECT: 957 error = EROFS; 958 break; 959 case SKEY_BLANK_CHECK: 960 error = 0; 961 break; 962 case SKEY_ABORTED_COMMAND: 963 if (xs->xs_retries != 0) { 964 xs->xs_retries--; 965 error = ERESTART; 966 } else 967 error = EIO; 968 break; 969 case SKEY_VOLUME_OVERFLOW: 970 error = ENOSPC; 971 break; 972 default: 973 error = EIO; 974 break; 975 } 976 977 #ifdef SCSIVERBOSE 978 if (key && (xs->xs_control & XS_CTL_SILENT) == 0) 979 scsipi_print_sense(xs, 0); 980 #else 981 if (key) { 982 scsipi_printaddr(periph); 983 printf("%s", error_mes[key - 1]); 984 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 985 switch (key) { 986 case SKEY_NOT_READY: 987 case SKEY_ILLEGAL_REQUEST: 988 case SKEY_UNIT_ATTENTION: 989 case SKEY_WRITE_PROTECT: 990 break; 991 case SKEY_BLANK_CHECK: 992 printf(", requested size: %d (decimal)", 993 info); 994 break; 995 case SKEY_ABORTED_COMMAND: 996 if (xs->xs_retries) 997 printf(", retrying"); 998 printf(", cmd 0x%x, info 0x%x", 999 xs->cmd->opcode, info); 1000 break; 1001 default: 1002 printf(", info = %d (decimal)", info); 1003 } 1004 } 1005 if (sense->extra_len != 0) { 1006 int n; 1007 printf(", data ="); 1008 for (n = 0; n < sense->extra_len; n++) 1009 printf(" %02x", 1010 sense->cmd_spec_info[n]); 1011 } 1012 printf("\n"); 1013 } 1014 #endif 1015 return (error); 1016 1017 /* 1018 * Some other code, just report it 1019 */ 1020 default: 1021 #if defined(SCSIDEBUG) || defined(DEBUG) 1022 { 1023 static char *uc = "undecodable sense error"; 1024 int i; 1025 u_int8_t *cptr = (u_int8_t *) sense; 1026 scsipi_printaddr(periph); 1027 if (xs->cmd == &xs->cmdstore) { 1028 printf("%s for opcode 0x%x, data=", 1029 uc, xs->cmdstore.opcode); 1030 } else { 1031 printf("%s, data=", uc); 1032 } 1033 for (i = 0; i < sizeof (sense); i++) 1034 printf(" 0x%02x", *(cptr++) & 0xff); 1035 printf("\n"); 1036 } 1037 #else 1038 scsipi_printaddr(periph); 1039 printf("Sense Error Code 0x%x", 1040 sense->error_code & SSD_ERRCODE); 1041 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 1042 struct scsipi_sense_data_unextended *usense = 1043 (struct scsipi_sense_data_unextended *)sense; 1044 printf(" at block no. %d (decimal)", 1045 _3btol(usense->block)); 1046 } 1047 printf("\n"); 1048 #endif 1049 return (EIO); 1050 } 1051 } 1052 1053 /* 1054 * scsipi_size: 1055 * 1056 * Find out from the device what its capacity is. 1057 */ 1058 u_int64_t 1059 scsipi_size(periph, flags) 1060 struct scsipi_periph *periph; 1061 int flags; 1062 { 1063 struct scsipi_read_cap_data rdcap; 1064 struct scsipi_read_capacity scsipi_cmd; 1065 1066 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1067 scsipi_cmd.opcode = READ_CAPACITY; 1068 1069 /* 1070 * If the command works, interpret the result as a 4 byte 1071 * number of blocks 1072 */ 1073 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1074 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap), 1075 SCSIPIRETRIES, 20000, NULL, 1076 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0) 1077 return (0); 1078 1079 return (_4btol(rdcap.addr) + 1); 1080 } 1081 1082 /* 1083 * scsipi_test_unit_ready: 1084 * 1085 * Issue a `test unit ready' request. 1086 */ 1087 int 1088 scsipi_test_unit_ready(periph, flags) 1089 struct scsipi_periph *periph; 1090 int flags; 1091 { 1092 int retries; 1093 struct scsipi_test_unit_ready scsipi_cmd; 1094 1095 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */ 1096 if (periph->periph_quirks & PQUIRK_NOTUR) 1097 return (0); 1098 1099 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1100 scsipi_cmd.opcode = TEST_UNIT_READY; 1101 1102 if (flags & XS_CTL_DISCOVERY) 1103 retries = 0; 1104 else 1105 retries = SCSIPIRETRIES; 1106 1107 return (scsipi_command(periph, 1108 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd), 1109 0, 0, retries, 10000, NULL, flags)); 1110 } 1111 1112 /* 1113 * scsipi_inquire: 1114 * 1115 * Ask the device about itself. 1116 */ 1117 int 1118 scsipi_inquire(periph, inqbuf, flags) 1119 struct scsipi_periph *periph; 1120 struct scsipi_inquiry_data *inqbuf; 1121 int flags; 1122 { 1123 int retries; 1124 struct scsipi_inquiry scsipi_cmd; 1125 int error; 1126 1127 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1128 scsipi_cmd.opcode = INQUIRY; 1129 1130 if (flags & XS_CTL_DISCOVERY) 1131 retries = 0; 1132 else 1133 retries = SCSIPIRETRIES; 1134 1135 /* 1136 * If we request more data than the device can provide, it SHOULD just 1137 * return a short reponse. However, some devices error with an 1138 * ILLEGAL REQUEST sense code, and yet others have even more special 1139 * failture modes (such as the GL641USB flash adapter, which goes loony 1140 * and sends corrupted CRCs). To work around this, and to bring our 1141 * behavior more in line with other OSes, we do a shorter inquiry, 1142 * covering all the SCSI-2 information, first, and then request more 1143 * data iff the "additional length" field indicates there is more. 1144 * - mycroft, 2003/10/16 1145 */ 1146 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2; 1147 error = scsipi_command(periph, 1148 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1149 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, 1150 retries, 10000, NULL, XS_CTL_DATA_IN | flags); 1151 if (!error && inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) { 1152 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3; 1153 error = scsipi_command(periph, 1154 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1155 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, 1156 retries, 10000, NULL, XS_CTL_DATA_IN | flags); 1157 } 1158 1159 #ifdef SCSI_OLD_NOINQUIRY 1160 /* 1161 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator. 1162 * This board doesn't support the INQUIRY command at all. 1163 */ 1164 if (error == EINVAL || error == EACCES) { 1165 /* 1166 * Conjure up an INQUIRY response. 1167 */ 1168 inqbuf->device = (error == EINVAL ? 1169 SID_QUAL_LU_PRESENT : 1170 SID_QUAL_LU_NOTPRESENT) | T_DIRECT; 1171 inqbuf->dev_qual2 = 0; 1172 inqbuf->version = 0; 1173 inqbuf->response_format = SID_FORMAT_SCSI1; 1174 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1175 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1176 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28); 1177 error = 0; 1178 } 1179 1180 /* 1181 * Kludge for the Emulex MT-02 SCSI->QIC translator. 1182 * This board gives an empty response to an INQUIRY command. 1183 */ 1184 else if (error == 0 && 1185 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) && 1186 inqbuf->dev_qual2 == 0 && 1187 inqbuf->version == 0 && 1188 inqbuf->response_format == SID_FORMAT_SCSI1) { 1189 /* 1190 * Fill out the INQUIRY response. 1191 */ 1192 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL); 1193 inqbuf->dev_qual2 = SID_REMOVABLE; 1194 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1195 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1196 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28); 1197 } 1198 #endif /* SCSI_OLD_NOINQUIRY */ 1199 1200 return error; 1201 } 1202 1203 /* 1204 * scsipi_prevent: 1205 * 1206 * Prevent or allow the user to remove the media 1207 */ 1208 int 1209 scsipi_prevent(periph, type, flags) 1210 struct scsipi_periph *periph; 1211 int type, flags; 1212 { 1213 struct scsipi_prevent scsipi_cmd; 1214 1215 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1216 scsipi_cmd.opcode = PREVENT_ALLOW; 1217 scsipi_cmd.how = type; 1218 1219 return (scsipi_command(periph, 1220 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1221 0, 0, SCSIPIRETRIES, 5000, NULL, flags)); 1222 } 1223 1224 /* 1225 * scsipi_start: 1226 * 1227 * Send a START UNIT. 1228 */ 1229 int 1230 scsipi_start(periph, type, flags) 1231 struct scsipi_periph *periph; 1232 int type, flags; 1233 { 1234 struct scsipi_start_stop scsipi_cmd; 1235 1236 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1237 scsipi_cmd.opcode = START_STOP; 1238 scsipi_cmd.byte2 = 0x00; 1239 scsipi_cmd.how = type; 1240 1241 return (scsipi_command(periph, 1242 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1243 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, 1244 NULL, flags)); 1245 } 1246 1247 /* 1248 * scsipi_mode_sense, scsipi_mode_sense_big: 1249 * get a sense page from a device 1250 */ 1251 1252 int 1253 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout) 1254 struct scsipi_periph *periph; 1255 int byte2, page, len, flags, retries, timeout; 1256 struct scsipi_mode_header *data; 1257 { 1258 struct scsipi_mode_sense scsipi_cmd; 1259 int error; 1260 1261 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1262 scsipi_cmd.opcode = MODE_SENSE; 1263 scsipi_cmd.byte2 = byte2; 1264 scsipi_cmd.page = page; 1265 scsipi_cmd.length = len & 0xff; 1266 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1267 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1268 flags | XS_CTL_DATA_IN); 1269 SC_DEBUG(periph, SCSIPI_DB2, 1270 ("scsipi_mode_sense: error=%d\n", error)); 1271 return (error); 1272 } 1273 1274 int 1275 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout) 1276 struct scsipi_periph *periph; 1277 int byte2, page, len, flags, retries, timeout; 1278 struct scsipi_mode_header_big *data; 1279 { 1280 struct scsipi_mode_sense_big scsipi_cmd; 1281 int error; 1282 1283 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1284 scsipi_cmd.opcode = MODE_SENSE_BIG; 1285 scsipi_cmd.byte2 = byte2; 1286 scsipi_cmd.page = page; 1287 _lto2b(len, scsipi_cmd.length); 1288 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1289 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1290 flags | XS_CTL_DATA_IN); 1291 SC_DEBUG(periph, SCSIPI_DB2, 1292 ("scsipi_mode_sense_big: error=%d\n", error)); 1293 return (error); 1294 } 1295 1296 int 1297 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout) 1298 struct scsipi_periph *periph; 1299 int byte2, len, flags, retries, timeout; 1300 struct scsipi_mode_header *data; 1301 { 1302 struct scsipi_mode_select scsipi_cmd; 1303 int error; 1304 1305 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1306 scsipi_cmd.opcode = MODE_SELECT; 1307 scsipi_cmd.byte2 = byte2; 1308 scsipi_cmd.length = len & 0xff; 1309 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1310 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1311 flags | XS_CTL_DATA_OUT); 1312 SC_DEBUG(periph, SCSIPI_DB2, 1313 ("scsipi_mode_select: error=%d\n", error)); 1314 return (error); 1315 } 1316 1317 int 1318 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout) 1319 struct scsipi_periph *periph; 1320 int byte2, len, flags, retries, timeout; 1321 struct scsipi_mode_header_big *data; 1322 { 1323 struct scsipi_mode_select_big scsipi_cmd; 1324 int error; 1325 1326 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1327 scsipi_cmd.opcode = MODE_SELECT_BIG; 1328 scsipi_cmd.byte2 = byte2; 1329 _lto2b(len, scsipi_cmd.length); 1330 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1331 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1332 flags | XS_CTL_DATA_OUT); 1333 SC_DEBUG(periph, SCSIPI_DB2, 1334 ("scsipi_mode_select: error=%d\n", error)); 1335 return (error); 1336 } 1337 1338 /* 1339 * scsipi_done: 1340 * 1341 * This routine is called by an adapter's interrupt handler when 1342 * an xfer is completed. 1343 */ 1344 void 1345 scsipi_done(xs) 1346 struct scsipi_xfer *xs; 1347 { 1348 struct scsipi_periph *periph = xs->xs_periph; 1349 struct scsipi_channel *chan = periph->periph_channel; 1350 int s, freezecnt; 1351 1352 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1353 #ifdef SCSIPI_DEBUG 1354 if (periph->periph_dbflags & SCSIPI_DB1) 1355 show_scsipi_cmd(xs); 1356 #endif 1357 1358 s = splbio(); 1359 /* 1360 * The resource this command was using is now free. 1361 */ 1362 scsipi_put_resource(chan); 1363 xs->xs_periph->periph_sent--; 1364 1365 /* 1366 * If the command was tagged, free the tag. 1367 */ 1368 if (XS_CTL_TAGTYPE(xs) != 0) 1369 scsipi_put_tag(xs); 1370 else 1371 periph->periph_flags &= ~PERIPH_UNTAG; 1372 1373 /* Mark the command as `done'. */ 1374 xs->xs_status |= XS_STS_DONE; 1375 1376 #ifdef DIAGNOSTIC 1377 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1378 (XS_CTL_ASYNC|XS_CTL_POLL)) 1379 panic("scsipi_done: ASYNC and POLL"); 1380 #endif 1381 1382 /* 1383 * If the xfer had an error of any sort, freeze the 1384 * periph's queue. Freeze it again if we were requested 1385 * to do so in the xfer. 1386 */ 1387 freezecnt = 0; 1388 if (xs->error != XS_NOERROR) 1389 freezecnt++; 1390 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1391 freezecnt++; 1392 if (freezecnt != 0) 1393 scsipi_periph_freeze(periph, freezecnt); 1394 1395 /* 1396 * record the xfer with a pending sense, in case a SCSI reset is 1397 * received before the thread is waked up. 1398 */ 1399 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1400 periph->periph_flags |= PERIPH_SENSE; 1401 periph->periph_xscheck = xs; 1402 } 1403 1404 /* 1405 * If this was an xfer that was not to complete asynchronously, 1406 * let the requesting thread perform error checking/handling 1407 * in its context. 1408 */ 1409 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1410 splx(s); 1411 /* 1412 * If it's a polling job, just return, to unwind the 1413 * call graph. We don't need to restart the queue, 1414 * because pollings jobs are treated specially, and 1415 * are really only used during crash dumps anyway 1416 * (XXX or during boot-time autconfiguration of 1417 * ATAPI devices). 1418 */ 1419 if (xs->xs_control & XS_CTL_POLL) 1420 return; 1421 wakeup(xs); 1422 goto out; 1423 } 1424 1425 /* 1426 * Catch the extremely common case of I/O completing 1427 * without error; no use in taking a context switch 1428 * if we can handle it in interrupt context. 1429 */ 1430 if (xs->error == XS_NOERROR) { 1431 splx(s); 1432 (void) scsipi_complete(xs); 1433 goto out; 1434 } 1435 1436 /* 1437 * There is an error on this xfer. Put it on the channel's 1438 * completion queue, and wake up the completion thread. 1439 */ 1440 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1441 splx(s); 1442 wakeup(&chan->chan_complete); 1443 1444 out: 1445 /* 1446 * If there are more xfers on the channel's queue, attempt to 1447 * run them. 1448 */ 1449 scsipi_run_queue(chan); 1450 } 1451 1452 /* 1453 * scsipi_complete: 1454 * 1455 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1456 * 1457 * NOTE: This routine MUST be called with valid thread context 1458 * except for the case where the following two conditions are 1459 * true: 1460 * 1461 * xs->error == XS_NOERROR 1462 * XS_CTL_ASYNC is set in xs->xs_control 1463 * 1464 * The semantics of this routine can be tricky, so here is an 1465 * explanation: 1466 * 1467 * 0 Xfer completed successfully. 1468 * 1469 * ERESTART Xfer had an error, but was restarted. 1470 * 1471 * anything else Xfer had an error, return value is Unix 1472 * errno. 1473 * 1474 * If the return value is anything but ERESTART: 1475 * 1476 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1477 * the pool. 1478 * - If there is a buf associated with the xfer, 1479 * it has been biodone()'d. 1480 */ 1481 int 1482 scsipi_complete(xs) 1483 struct scsipi_xfer *xs; 1484 { 1485 struct scsipi_periph *periph = xs->xs_periph; 1486 struct scsipi_channel *chan = periph->periph_channel; 1487 struct buf *bp; 1488 int error, s; 1489 1490 #ifdef DIAGNOSTIC 1491 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1492 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1493 #endif 1494 /* 1495 * If command terminated with a CHECK CONDITION, we need to issue a 1496 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1497 * we'll have the real status. 1498 * Must be processed at splbio() to avoid missing a SCSI bus reset 1499 * for this command. 1500 */ 1501 s = splbio(); 1502 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1503 /* request sense for a request sense ? */ 1504 if (xs->xs_control & XS_CTL_REQSENSE) { 1505 scsipi_printaddr(periph); 1506 printf("request sense for a request sense ?\n"); 1507 /* XXX maybe we should reset the device ? */ 1508 /* we've been frozen because xs->error != XS_NOERROR */ 1509 scsipi_periph_thaw(periph, 1); 1510 splx(s); 1511 if (xs->resid < xs->datalen) { 1512 printf("we read %d bytes of sense anyway:\n", 1513 xs->datalen - xs->resid); 1514 #ifdef SCSIVERBOSE 1515 scsipi_print_sense_data((void *)xs->data, 0); 1516 #endif 1517 } 1518 return EINVAL; 1519 } 1520 scsipi_request_sense(xs); 1521 } 1522 splx(s); 1523 1524 /* 1525 * If it's a user level request, bypass all usual completion 1526 * processing, let the user work it out.. 1527 */ 1528 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1529 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1530 if (xs->error != XS_NOERROR) 1531 scsipi_periph_thaw(periph, 1); 1532 scsipi_user_done(xs); 1533 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1534 return 0; 1535 } 1536 1537 switch (xs->error) { 1538 case XS_NOERROR: 1539 error = 0; 1540 break; 1541 1542 case XS_SENSE: 1543 case XS_SHORTSENSE: 1544 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1545 break; 1546 1547 case XS_RESOURCE_SHORTAGE: 1548 /* 1549 * XXX Should freeze channel's queue. 1550 */ 1551 scsipi_printaddr(periph); 1552 printf("adapter resource shortage\n"); 1553 /* FALLTHROUGH */ 1554 1555 case XS_BUSY: 1556 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1557 struct scsipi_max_openings mo; 1558 1559 /* 1560 * We set the openings to active - 1, assuming that 1561 * the command that got us here is the first one that 1562 * can't fit into the device's queue. If that's not 1563 * the case, I guess we'll find out soon enough. 1564 */ 1565 mo.mo_target = periph->periph_target; 1566 mo.mo_lun = periph->periph_lun; 1567 if (periph->periph_active < periph->periph_openings) 1568 mo.mo_openings = periph->periph_active - 1; 1569 else 1570 mo.mo_openings = periph->periph_openings - 1; 1571 #ifdef DIAGNOSTIC 1572 if (mo.mo_openings < 0) { 1573 scsipi_printaddr(periph); 1574 printf("QUEUE FULL resulted in < 0 openings\n"); 1575 panic("scsipi_done"); 1576 } 1577 #endif 1578 if (mo.mo_openings == 0) { 1579 scsipi_printaddr(periph); 1580 printf("QUEUE FULL resulted in 0 openings\n"); 1581 mo.mo_openings = 1; 1582 } 1583 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1584 error = ERESTART; 1585 } else if (xs->xs_retries != 0) { 1586 xs->xs_retries--; 1587 /* 1588 * Wait one second, and try again. 1589 */ 1590 if ((xs->xs_control & XS_CTL_POLL) || 1591 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1592 delay(1000000); 1593 } else if (!callout_pending(&periph->periph_callout)) { 1594 scsipi_periph_freeze(periph, 1); 1595 callout_reset(&periph->periph_callout, 1596 hz, scsipi_periph_timed_thaw, periph); 1597 } 1598 error = ERESTART; 1599 } else 1600 error = EBUSY; 1601 break; 1602 1603 case XS_REQUEUE: 1604 error = ERESTART; 1605 break; 1606 1607 case XS_SELTIMEOUT: 1608 case XS_TIMEOUT: 1609 /* 1610 * If the device hasn't gone away, honor retry counts. 1611 * 1612 * Note that if we're in the middle of probing it, 1613 * it won't be found because it isn't here yet so 1614 * we won't honor the retry count in that case. 1615 */ 1616 if (scsipi_lookup_periph(chan, periph->periph_target, 1617 periph->periph_lun) && xs->xs_retries != 0) { 1618 xs->xs_retries--; 1619 error = ERESTART; 1620 } else 1621 error = EIO; 1622 break; 1623 1624 case XS_RESET: 1625 if (xs->xs_control & XS_CTL_REQSENSE) { 1626 /* 1627 * request sense interrupted by reset: signal it 1628 * with EINTR return code. 1629 */ 1630 error = EINTR; 1631 } else { 1632 if (xs->xs_retries != 0) { 1633 xs->xs_retries--; 1634 error = ERESTART; 1635 } else 1636 error = EIO; 1637 } 1638 break; 1639 1640 case XS_DRIVER_STUFFUP: 1641 scsipi_printaddr(periph); 1642 printf("generic HBA error\n"); 1643 error = EIO; 1644 break; 1645 default: 1646 scsipi_printaddr(periph); 1647 printf("invalid return code from adapter: %d\n", xs->error); 1648 error = EIO; 1649 break; 1650 } 1651 1652 s = splbio(); 1653 if (error == ERESTART) { 1654 /* 1655 * If we get here, the periph has been thawed and frozen 1656 * again if we had to issue recovery commands. Alternatively, 1657 * it may have been frozen again and in a timed thaw. In 1658 * any case, we thaw the periph once we re-enqueue the 1659 * command. Once the periph is fully thawed, it will begin 1660 * operation again. 1661 */ 1662 xs->error = XS_NOERROR; 1663 xs->status = SCSI_OK; 1664 xs->xs_status &= ~XS_STS_DONE; 1665 xs->xs_requeuecnt++; 1666 error = scsipi_enqueue(xs); 1667 if (error == 0) { 1668 scsipi_periph_thaw(periph, 1); 1669 splx(s); 1670 return (ERESTART); 1671 } 1672 } 1673 1674 /* 1675 * scsipi_done() freezes the queue if not XS_NOERROR. 1676 * Thaw it here. 1677 */ 1678 if (xs->error != XS_NOERROR) 1679 scsipi_periph_thaw(periph, 1); 1680 1681 /* 1682 * Set buffer fields in case the periph 1683 * switch done func uses them 1684 */ 1685 if ((bp = xs->bp) != NULL) { 1686 if (error) { 1687 bp->b_error = error; 1688 bp->b_flags |= B_ERROR; 1689 bp->b_resid = bp->b_bcount; 1690 } else { 1691 bp->b_error = 0; 1692 bp->b_resid = xs->resid; 1693 } 1694 } 1695 1696 if (periph->periph_switch->psw_done) 1697 periph->periph_switch->psw_done(xs); 1698 1699 if (bp) 1700 biodone(bp); 1701 1702 if (xs->xs_control & XS_CTL_ASYNC) 1703 scsipi_put_xs(xs); 1704 splx(s); 1705 1706 return (error); 1707 } 1708 1709 /* 1710 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1711 * returns with a CHECK_CONDITION status. Must be called in valid thread 1712 * context and at splbio(). 1713 */ 1714 1715 void 1716 scsipi_request_sense(xs) 1717 struct scsipi_xfer *xs; 1718 { 1719 struct scsipi_periph *periph = xs->xs_periph; 1720 int flags, error; 1721 struct scsipi_sense cmd; 1722 1723 periph->periph_flags |= PERIPH_SENSE; 1724 1725 /* if command was polling, request sense will too */ 1726 flags = xs->xs_control & XS_CTL_POLL; 1727 /* Polling commands can't sleep */ 1728 if (flags) 1729 flags |= XS_CTL_NOSLEEP; 1730 1731 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1732 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1733 1734 memset(&cmd, 0, sizeof(cmd)); 1735 cmd.opcode = REQUEST_SENSE; 1736 cmd.length = sizeof(struct scsipi_sense_data); 1737 1738 error = scsipi_command(periph, 1739 (struct scsipi_generic *) &cmd, sizeof(cmd), 1740 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data), 1741 0, 1000, NULL, flags); 1742 periph->periph_flags &= ~PERIPH_SENSE; 1743 periph->periph_xscheck = NULL; 1744 switch(error) { 1745 case 0: 1746 /* we have a valid sense */ 1747 xs->error = XS_SENSE; 1748 return; 1749 case EINTR: 1750 /* REQUEST_SENSE interrupted by bus reset. */ 1751 xs->error = XS_RESET; 1752 return; 1753 case EIO: 1754 /* request sense coudn't be performed */ 1755 /* 1756 * XXX this isn't quite right but we don't have anything 1757 * better for now 1758 */ 1759 xs->error = XS_DRIVER_STUFFUP; 1760 return; 1761 default: 1762 /* Notify that request sense failed. */ 1763 xs->error = XS_DRIVER_STUFFUP; 1764 scsipi_printaddr(periph); 1765 printf("request sense failed with error %d\n", error); 1766 return; 1767 } 1768 } 1769 1770 /* 1771 * scsipi_enqueue: 1772 * 1773 * Enqueue an xfer on a channel. 1774 */ 1775 int 1776 scsipi_enqueue(xs) 1777 struct scsipi_xfer *xs; 1778 { 1779 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1780 struct scsipi_xfer *qxs; 1781 int s; 1782 1783 s = splbio(); 1784 1785 /* 1786 * If the xfer is to be polled, and there are already jobs on 1787 * the queue, we can't proceed. 1788 */ 1789 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1790 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1791 splx(s); 1792 xs->error = XS_DRIVER_STUFFUP; 1793 return (EAGAIN); 1794 } 1795 1796 /* 1797 * If we have an URGENT xfer, it's an error recovery command 1798 * and it should just go on the head of the channel's queue. 1799 */ 1800 if (xs->xs_control & XS_CTL_URGENT) { 1801 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1802 goto out; 1803 } 1804 1805 /* 1806 * If this xfer has already been on the queue before, we 1807 * need to reinsert it in the correct order. That order is: 1808 * 1809 * Immediately before the first xfer for this periph 1810 * with a requeuecnt less than xs->xs_requeuecnt. 1811 * 1812 * Failing that, at the end of the queue. (We'll end up 1813 * there naturally.) 1814 */ 1815 if (xs->xs_requeuecnt != 0) { 1816 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1817 qxs = TAILQ_NEXT(qxs, channel_q)) { 1818 if (qxs->xs_periph == xs->xs_periph && 1819 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1820 break; 1821 } 1822 if (qxs != NULL) { 1823 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1824 channel_q); 1825 goto out; 1826 } 1827 } 1828 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1829 out: 1830 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1831 scsipi_periph_thaw(xs->xs_periph, 1); 1832 splx(s); 1833 return (0); 1834 } 1835 1836 /* 1837 * scsipi_run_queue: 1838 * 1839 * Start as many xfers as possible running on the channel. 1840 */ 1841 void 1842 scsipi_run_queue(chan) 1843 struct scsipi_channel *chan; 1844 { 1845 struct scsipi_xfer *xs; 1846 struct scsipi_periph *periph; 1847 int s; 1848 1849 for (;;) { 1850 s = splbio(); 1851 1852 /* 1853 * If the channel is frozen, we can't do any work right 1854 * now. 1855 */ 1856 if (chan->chan_qfreeze != 0) { 1857 splx(s); 1858 return; 1859 } 1860 1861 /* 1862 * Look for work to do, and make sure we can do it. 1863 */ 1864 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1865 xs = TAILQ_NEXT(xs, channel_q)) { 1866 periph = xs->xs_periph; 1867 1868 if ((periph->periph_sent >= periph->periph_openings) || 1869 periph->periph_qfreeze != 0 || 1870 (periph->periph_flags & PERIPH_UNTAG) != 0) 1871 continue; 1872 1873 if ((periph->periph_flags & 1874 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1875 (xs->xs_control & XS_CTL_URGENT) == 0) 1876 continue; 1877 1878 /* 1879 * We can issue this xfer! 1880 */ 1881 goto got_one; 1882 } 1883 1884 /* 1885 * Can't find any work to do right now. 1886 */ 1887 splx(s); 1888 return; 1889 1890 got_one: 1891 /* 1892 * Have an xfer to run. Allocate a resource from 1893 * the adapter to run it. If we can't allocate that 1894 * resource, we don't dequeue the xfer. 1895 */ 1896 if (scsipi_get_resource(chan) == 0) { 1897 /* 1898 * Adapter is out of resources. If the adapter 1899 * supports it, attempt to grow them. 1900 */ 1901 if (scsipi_grow_resources(chan) == 0) { 1902 /* 1903 * Wasn't able to grow resources, 1904 * nothing more we can do. 1905 */ 1906 if (xs->xs_control & XS_CTL_POLL) { 1907 scsipi_printaddr(xs->xs_periph); 1908 printf("polling command but no " 1909 "adapter resources"); 1910 /* We'll panic shortly... */ 1911 } 1912 splx(s); 1913 1914 /* 1915 * XXX: We should be able to note that 1916 * XXX: that resources are needed here! 1917 */ 1918 return; 1919 } 1920 /* 1921 * scsipi_grow_resources() allocated the resource 1922 * for us. 1923 */ 1924 } 1925 1926 /* 1927 * We have a resource to run this xfer, do it! 1928 */ 1929 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1930 1931 /* 1932 * If the command is to be tagged, allocate a tag ID 1933 * for it. 1934 */ 1935 if (XS_CTL_TAGTYPE(xs) != 0) 1936 scsipi_get_tag(xs); 1937 else 1938 periph->periph_flags |= PERIPH_UNTAG; 1939 periph->periph_sent++; 1940 splx(s); 1941 1942 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1943 } 1944 #ifdef DIAGNOSTIC 1945 panic("scsipi_run_queue: impossible"); 1946 #endif 1947 } 1948 1949 /* 1950 * scsipi_execute_xs: 1951 * 1952 * Begin execution of an xfer, waiting for it to complete, if necessary. 1953 */ 1954 int 1955 scsipi_execute_xs(xs) 1956 struct scsipi_xfer *xs; 1957 { 1958 struct scsipi_periph *periph = xs->xs_periph; 1959 struct scsipi_channel *chan = periph->periph_channel; 1960 int oasync, async, poll, retries, error, s; 1961 1962 xs->xs_status &= ~XS_STS_DONE; 1963 xs->error = XS_NOERROR; 1964 xs->resid = xs->datalen; 1965 xs->status = SCSI_OK; 1966 1967 #ifdef SCSIPI_DEBUG 1968 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1969 printf("scsipi_execute_xs: "); 1970 show_scsipi_xs(xs); 1971 printf("\n"); 1972 } 1973 #endif 1974 1975 /* 1976 * Deal with command tagging: 1977 * 1978 * - If the device's current operating mode doesn't 1979 * include tagged queueing, clear the tag mask. 1980 * 1981 * - If the device's current operating mode *does* 1982 * include tagged queueing, set the tag_type in 1983 * the xfer to the appropriate byte for the tag 1984 * message. 1985 */ 1986 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 1987 (xs->xs_control & XS_CTL_REQSENSE)) { 1988 xs->xs_control &= ~XS_CTL_TAGMASK; 1989 xs->xs_tag_type = 0; 1990 } else { 1991 /* 1992 * If the request doesn't specify a tag, give Head 1993 * tags to URGENT operations and Ordered tags to 1994 * everything else. 1995 */ 1996 if (XS_CTL_TAGTYPE(xs) == 0) { 1997 if (xs->xs_control & XS_CTL_URGENT) 1998 xs->xs_control |= XS_CTL_HEAD_TAG; 1999 else 2000 xs->xs_control |= XS_CTL_ORDERED_TAG; 2001 } 2002 2003 switch (XS_CTL_TAGTYPE(xs)) { 2004 case XS_CTL_ORDERED_TAG: 2005 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 2006 break; 2007 2008 case XS_CTL_SIMPLE_TAG: 2009 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 2010 break; 2011 2012 case XS_CTL_HEAD_TAG: 2013 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 2014 break; 2015 2016 default: 2017 scsipi_printaddr(periph); 2018 printf("invalid tag mask 0x%08x\n", 2019 XS_CTL_TAGTYPE(xs)); 2020 panic("scsipi_execute_xs"); 2021 } 2022 } 2023 2024 /* If the adaptor wants us to poll, poll. */ 2025 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 2026 xs->xs_control |= XS_CTL_POLL; 2027 2028 /* 2029 * If we don't yet have a completion thread, or we are to poll for 2030 * completion, clear the ASYNC flag. 2031 */ 2032 oasync = (xs->xs_control & XS_CTL_ASYNC); 2033 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 2034 xs->xs_control &= ~XS_CTL_ASYNC; 2035 2036 async = (xs->xs_control & XS_CTL_ASYNC); 2037 poll = (xs->xs_control & XS_CTL_POLL); 2038 retries = xs->xs_retries; /* for polling commands */ 2039 2040 #ifdef DIAGNOSTIC 2041 if (oasync != 0 && xs->bp == NULL) 2042 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 2043 #endif 2044 2045 /* 2046 * Enqueue the transfer. If we're not polling for completion, this 2047 * should ALWAYS return `no error'. 2048 */ 2049 try_again: 2050 error = scsipi_enqueue(xs); 2051 if (error) { 2052 if (poll == 0) { 2053 scsipi_printaddr(periph); 2054 printf("not polling, but enqueue failed with %d\n", 2055 error); 2056 panic("scsipi_execute_xs"); 2057 } 2058 2059 scsipi_printaddr(periph); 2060 printf("failed to enqueue polling command"); 2061 if (retries != 0) { 2062 printf(", retrying...\n"); 2063 delay(1000000); 2064 retries--; 2065 goto try_again; 2066 } 2067 printf("\n"); 2068 goto free_xs; 2069 } 2070 2071 restarted: 2072 scsipi_run_queue(chan); 2073 2074 /* 2075 * The xfer is enqueued, and possibly running. If it's to be 2076 * completed asynchronously, just return now. 2077 */ 2078 if (async) 2079 return (EJUSTRETURN); 2080 2081 /* 2082 * Not an asynchronous command; wait for it to complete. 2083 */ 2084 s = splbio(); 2085 while ((xs->xs_status & XS_STS_DONE) == 0) { 2086 if (poll) { 2087 scsipi_printaddr(periph); 2088 printf("polling command not done\n"); 2089 panic("scsipi_execute_xs"); 2090 } 2091 (void) tsleep(xs, PRIBIO, "xscmd", 0); 2092 } 2093 splx(s); 2094 2095 /* 2096 * Command is complete. scsipi_done() has awakened us to perform 2097 * the error handling. 2098 */ 2099 error = scsipi_complete(xs); 2100 if (error == ERESTART) 2101 goto restarted; 2102 2103 /* 2104 * If it was meant to run async and we cleared aync ourselve, 2105 * don't return an error here. It has already been handled 2106 */ 2107 if (oasync) 2108 error = EJUSTRETURN; 2109 /* 2110 * Command completed successfully or fatal error occurred. Fall 2111 * into.... 2112 */ 2113 free_xs: 2114 s = splbio(); 2115 scsipi_put_xs(xs); 2116 splx(s); 2117 2118 /* 2119 * Kick the queue, keep it running in case it stopped for some 2120 * reason. 2121 */ 2122 scsipi_run_queue(chan); 2123 2124 return (error); 2125 } 2126 2127 /* 2128 * scsipi_completion_thread: 2129 * 2130 * This is the completion thread. We wait for errors on 2131 * asynchronous xfers, and perform the error handling 2132 * function, restarting the command, if necessary. 2133 */ 2134 void 2135 scsipi_completion_thread(arg) 2136 void *arg; 2137 { 2138 struct scsipi_channel *chan = arg; 2139 struct scsipi_xfer *xs; 2140 int s; 2141 2142 if (chan->chan_init_cb) 2143 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg); 2144 2145 s = splbio(); 2146 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 2147 splx(s); 2148 for (;;) { 2149 s = splbio(); 2150 xs = TAILQ_FIRST(&chan->chan_complete); 2151 if (xs == NULL && chan->chan_tflags == 0) { 2152 /* nothing to do; wait */ 2153 (void) tsleep(&chan->chan_complete, PRIBIO, 2154 "sccomp", 0); 2155 splx(s); 2156 continue; 2157 } 2158 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2159 /* call chan_callback from thread context */ 2160 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 2161 chan->chan_callback(chan, chan->chan_callback_arg); 2162 splx(s); 2163 continue; 2164 } 2165 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 2166 /* attempt to get more openings for this channel */ 2167 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2168 scsipi_adapter_request(chan, 2169 ADAPTER_REQ_GROW_RESOURCES, NULL); 2170 scsipi_channel_thaw(chan, 1); 2171 splx(s); 2172 continue; 2173 } 2174 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2175 /* explicitly run the queues for this channel */ 2176 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2177 scsipi_run_queue(chan); 2178 splx(s); 2179 continue; 2180 } 2181 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2182 splx(s); 2183 break; 2184 } 2185 if (xs) { 2186 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2187 splx(s); 2188 2189 /* 2190 * Have an xfer with an error; process it. 2191 */ 2192 (void) scsipi_complete(xs); 2193 2194 /* 2195 * Kick the queue; keep it running if it was stopped 2196 * for some reason. 2197 */ 2198 scsipi_run_queue(chan); 2199 } else { 2200 splx(s); 2201 } 2202 } 2203 2204 chan->chan_thread = NULL; 2205 2206 /* In case parent is waiting for us to exit. */ 2207 wakeup(&chan->chan_thread); 2208 2209 kthread_exit(0); 2210 } 2211 2212 /* 2213 * scsipi_create_completion_thread: 2214 * 2215 * Callback to actually create the completion thread. 2216 */ 2217 void 2218 scsipi_create_completion_thread(arg) 2219 void *arg; 2220 { 2221 struct scsipi_channel *chan = arg; 2222 struct scsipi_adapter *adapt = chan->chan_adapter; 2223 2224 if (kthread_create1(scsipi_completion_thread, chan, 2225 &chan->chan_thread, "%s", chan->chan_name)) { 2226 printf("%s: unable to create completion thread for " 2227 "channel %d\n", adapt->adapt_dev->dv_xname, 2228 chan->chan_channel); 2229 panic("scsipi_create_completion_thread"); 2230 } 2231 } 2232 2233 /* 2234 * scsipi_thread_call_callback: 2235 * 2236 * request to call a callback from the completion thread 2237 */ 2238 int 2239 scsipi_thread_call_callback(chan, callback, arg) 2240 struct scsipi_channel *chan; 2241 void (*callback) __P((struct scsipi_channel *, void *)); 2242 void *arg; 2243 { 2244 int s; 2245 2246 s = splbio(); 2247 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2248 /* kernel thread doesn't exist yet */ 2249 splx(s); 2250 return ESRCH; 2251 } 2252 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2253 splx(s); 2254 return EBUSY; 2255 } 2256 scsipi_channel_freeze(chan, 1); 2257 chan->chan_callback = callback; 2258 chan->chan_callback_arg = arg; 2259 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2260 wakeup(&chan->chan_complete); 2261 splx(s); 2262 return(0); 2263 } 2264 2265 /* 2266 * scsipi_async_event: 2267 * 2268 * Handle an asynchronous event from an adapter. 2269 */ 2270 void 2271 scsipi_async_event(chan, event, arg) 2272 struct scsipi_channel *chan; 2273 scsipi_async_event_t event; 2274 void *arg; 2275 { 2276 int s; 2277 2278 s = splbio(); 2279 switch (event) { 2280 case ASYNC_EVENT_MAX_OPENINGS: 2281 scsipi_async_event_max_openings(chan, 2282 (struct scsipi_max_openings *)arg); 2283 break; 2284 2285 case ASYNC_EVENT_XFER_MODE: 2286 scsipi_async_event_xfer_mode(chan, 2287 (struct scsipi_xfer_mode *)arg); 2288 break; 2289 case ASYNC_EVENT_RESET: 2290 scsipi_async_event_channel_reset(chan); 2291 break; 2292 } 2293 splx(s); 2294 } 2295 2296 /* 2297 * scsipi_print_xfer_mode: 2298 * 2299 * Print a periph's capabilities. 2300 */ 2301 void 2302 scsipi_print_xfer_mode(periph) 2303 struct scsipi_periph *periph; 2304 { 2305 int period, freq, speed, mbs; 2306 2307 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0) 2308 return; 2309 2310 aprint_normal("%s: ", periph->periph_dev->dv_xname); 2311 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2312 period = scsipi_sync_factor_to_period(periph->periph_period); 2313 aprint_normal("sync (%d.%02dns offset %d)", 2314 period / 100, period % 100, periph->periph_offset); 2315 } else 2316 aprint_normal("async"); 2317 2318 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2319 aprint_normal(", 32-bit"); 2320 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2321 aprint_normal(", 16-bit"); 2322 else 2323 aprint_normal(", 8-bit"); 2324 2325 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2326 freq = scsipi_sync_factor_to_freq(periph->periph_period); 2327 speed = freq; 2328 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2329 speed *= 4; 2330 else if (periph->periph_mode & 2331 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2332 speed *= 2; 2333 mbs = speed / 1000; 2334 if (mbs > 0) 2335 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000); 2336 else 2337 aprint_normal(" (%dKB/s)", speed % 1000); 2338 } 2339 2340 aprint_normal(" transfers"); 2341 2342 if (periph->periph_mode & PERIPH_CAP_TQING) 2343 aprint_normal(", tagged queueing"); 2344 2345 aprint_normal("\n"); 2346 } 2347 2348 /* 2349 * scsipi_async_event_max_openings: 2350 * 2351 * Update the maximum number of outstanding commands a 2352 * device may have. 2353 */ 2354 void 2355 scsipi_async_event_max_openings(chan, mo) 2356 struct scsipi_channel *chan; 2357 struct scsipi_max_openings *mo; 2358 { 2359 struct scsipi_periph *periph; 2360 int minlun, maxlun; 2361 2362 if (mo->mo_lun == -1) { 2363 /* 2364 * Wildcarded; apply it to all LUNs. 2365 */ 2366 minlun = 0; 2367 maxlun = chan->chan_nluns - 1; 2368 } else 2369 minlun = maxlun = mo->mo_lun; 2370 2371 /* XXX This could really suck with a large LUN space. */ 2372 for (; minlun <= maxlun; minlun++) { 2373 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun); 2374 if (periph == NULL) 2375 continue; 2376 2377 if (mo->mo_openings < periph->periph_openings) 2378 periph->periph_openings = mo->mo_openings; 2379 else if (mo->mo_openings > periph->periph_openings && 2380 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2381 periph->periph_openings = mo->mo_openings; 2382 } 2383 } 2384 2385 /* 2386 * scsipi_async_event_xfer_mode: 2387 * 2388 * Update the xfer mode for all periphs sharing the 2389 * specified I_T Nexus. 2390 */ 2391 void 2392 scsipi_async_event_xfer_mode(chan, xm) 2393 struct scsipi_channel *chan; 2394 struct scsipi_xfer_mode *xm; 2395 { 2396 struct scsipi_periph *periph; 2397 int lun, announce, mode, period, offset; 2398 2399 for (lun = 0; lun < chan->chan_nluns; lun++) { 2400 periph = scsipi_lookup_periph(chan, xm->xm_target, lun); 2401 if (periph == NULL) 2402 continue; 2403 announce = 0; 2404 2405 /* 2406 * Clamp the xfer mode down to this periph's capabilities. 2407 */ 2408 mode = xm->xm_mode & periph->periph_cap; 2409 if (mode & PERIPH_CAP_SYNC) { 2410 period = xm->xm_period; 2411 offset = xm->xm_offset; 2412 } else { 2413 period = 0; 2414 offset = 0; 2415 } 2416 2417 /* 2418 * If we do not have a valid xfer mode yet, or the parameters 2419 * are different, announce them. 2420 */ 2421 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 || 2422 periph->periph_mode != mode || 2423 periph->periph_period != period || 2424 periph->periph_offset != offset) 2425 announce = 1; 2426 2427 periph->periph_mode = mode; 2428 periph->periph_period = period; 2429 periph->periph_offset = offset; 2430 periph->periph_flags |= PERIPH_MODE_VALID; 2431 2432 if (announce) 2433 scsipi_print_xfer_mode(periph); 2434 } 2435 } 2436 2437 /* 2438 * scsipi_set_xfer_mode: 2439 * 2440 * Set the xfer mode for the specified I_T Nexus. 2441 */ 2442 void 2443 scsipi_set_xfer_mode(chan, target, immed) 2444 struct scsipi_channel *chan; 2445 int target, immed; 2446 { 2447 struct scsipi_xfer_mode xm; 2448 struct scsipi_periph *itperiph; 2449 int lun, s; 2450 2451 /* 2452 * Go to the minimal xfer mode. 2453 */ 2454 xm.xm_target = target; 2455 xm.xm_mode = 0; 2456 xm.xm_period = 0; /* ignored */ 2457 xm.xm_offset = 0; /* ignored */ 2458 2459 /* 2460 * Find the first LUN we know about on this I_T Nexus. 2461 */ 2462 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) { 2463 itperiph = scsipi_lookup_periph(chan, target, lun); 2464 if (itperiph != NULL) 2465 break; 2466 } 2467 if (itperiph != NULL) { 2468 xm.xm_mode = itperiph->periph_cap; 2469 /* 2470 * Now issue the request to the adapter. 2471 */ 2472 s = splbio(); 2473 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2474 splx(s); 2475 /* 2476 * If we want this to happen immediately, issue a dummy 2477 * command, since most adapters can't really negotiate unless 2478 * they're executing a job. 2479 */ 2480 if (immed != 0) { 2481 (void) scsipi_test_unit_ready(itperiph, 2482 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2483 XS_CTL_IGNORE_NOT_READY | 2484 XS_CTL_IGNORE_MEDIA_CHANGE); 2485 } 2486 } 2487 } 2488 2489 /* 2490 * scsipi_channel_reset: 2491 * 2492 * handle scsi bus reset 2493 * called at splbio 2494 */ 2495 void 2496 scsipi_async_event_channel_reset(chan) 2497 struct scsipi_channel *chan; 2498 { 2499 struct scsipi_xfer *xs, *xs_next; 2500 struct scsipi_periph *periph; 2501 int target, lun; 2502 2503 /* 2504 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2505 * commands; as the sense is not available any more. 2506 * can't call scsipi_done() from here, as the command has not been 2507 * sent to the adapter yet (this would corrupt accounting). 2508 */ 2509 2510 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2511 xs_next = TAILQ_NEXT(xs, channel_q); 2512 if (xs->xs_control & XS_CTL_REQSENSE) { 2513 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2514 xs->error = XS_RESET; 2515 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2516 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2517 channel_q); 2518 } 2519 } 2520 wakeup(&chan->chan_complete); 2521 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2522 for (target = 0; target < chan->chan_ntargets; target++) { 2523 if (target == chan->chan_id) 2524 continue; 2525 for (lun = 0; lun < chan->chan_nluns; lun++) { 2526 periph = scsipi_lookup_periph(chan, target, lun); 2527 if (periph) { 2528 xs = periph->periph_xscheck; 2529 if (xs) 2530 xs->error = XS_RESET; 2531 } 2532 } 2533 } 2534 } 2535 2536 /* 2537 * scsipi_target_detach: 2538 * 2539 * detach all periph associated with a I_T 2540 * must be called from valid thread context 2541 */ 2542 int 2543 scsipi_target_detach(chan, target, lun, flags) 2544 struct scsipi_channel *chan; 2545 int target, lun; 2546 int flags; 2547 { 2548 struct scsipi_periph *periph; 2549 int ctarget, mintarget, maxtarget; 2550 int clun, minlun, maxlun; 2551 int error; 2552 2553 if (target == -1) { 2554 mintarget = 0; 2555 maxtarget = chan->chan_ntargets; 2556 } else { 2557 if (target == chan->chan_id) 2558 return EINVAL; 2559 if (target < 0 || target >= chan->chan_ntargets) 2560 return EINVAL; 2561 mintarget = target; 2562 maxtarget = target + 1; 2563 } 2564 2565 if (lun == -1) { 2566 minlun = 0; 2567 maxlun = chan->chan_nluns; 2568 } else { 2569 if (lun < 0 || lun >= chan->chan_nluns) 2570 return EINVAL; 2571 minlun = lun; 2572 maxlun = lun + 1; 2573 } 2574 2575 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2576 if (ctarget == chan->chan_id) 2577 continue; 2578 2579 for (clun = minlun; clun < maxlun; clun++) { 2580 periph = scsipi_lookup_periph(chan, ctarget, clun); 2581 if (periph == NULL) 2582 continue; 2583 error = config_detach(periph->periph_dev, flags); 2584 if (error) 2585 return (error); 2586 scsipi_remove_periph(chan, periph); 2587 free(periph, M_DEVBUF); 2588 } 2589 } 2590 return(0); 2591 } 2592 2593 /* 2594 * scsipi_adapter_addref: 2595 * 2596 * Add a reference to the adapter pointed to by the provided 2597 * link, enabling the adapter if necessary. 2598 */ 2599 int 2600 scsipi_adapter_addref(adapt) 2601 struct scsipi_adapter *adapt; 2602 { 2603 int s, error = 0; 2604 2605 s = splbio(); 2606 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) { 2607 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1); 2608 if (error) 2609 adapt->adapt_refcnt--; 2610 } 2611 splx(s); 2612 return (error); 2613 } 2614 2615 /* 2616 * scsipi_adapter_delref: 2617 * 2618 * Delete a reference to the adapter pointed to by the provided 2619 * link, disabling the adapter if possible. 2620 */ 2621 void 2622 scsipi_adapter_delref(adapt) 2623 struct scsipi_adapter *adapt; 2624 { 2625 int s; 2626 2627 s = splbio(); 2628 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL) 2629 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0); 2630 splx(s); 2631 } 2632 2633 struct scsipi_syncparam { 2634 int ss_factor; 2635 int ss_period; /* ns * 100 */ 2636 } scsipi_syncparams[] = { 2637 { 0x08, 625 }, /* FAST-160 (Ultra320) */ 2638 { 0x09, 1250 }, /* FAST-80 (Ultra160) */ 2639 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */ 2640 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */ 2641 { 0x0c, 5000 }, /* FAST-20 (Ultra) */ 2642 }; 2643 const int scsipi_nsyncparams = 2644 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2645 2646 int 2647 scsipi_sync_period_to_factor(period) 2648 int period; /* ns * 100 */ 2649 { 2650 int i; 2651 2652 for (i = 0; i < scsipi_nsyncparams; i++) { 2653 if (period <= scsipi_syncparams[i].ss_period) 2654 return (scsipi_syncparams[i].ss_factor); 2655 } 2656 2657 return ((period / 100) / 4); 2658 } 2659 2660 int 2661 scsipi_sync_factor_to_period(factor) 2662 int factor; 2663 { 2664 int i; 2665 2666 for (i = 0; i < scsipi_nsyncparams; i++) { 2667 if (factor == scsipi_syncparams[i].ss_factor) 2668 return (scsipi_syncparams[i].ss_period); 2669 } 2670 2671 return ((factor * 4) * 100); 2672 } 2673 2674 int 2675 scsipi_sync_factor_to_freq(factor) 2676 int factor; 2677 { 2678 int i; 2679 2680 for (i = 0; i < scsipi_nsyncparams; i++) { 2681 if (factor == scsipi_syncparams[i].ss_factor) 2682 return (100000000 / scsipi_syncparams[i].ss_period); 2683 } 2684 2685 return (10000000 / ((factor * 4) * 10)); 2686 } 2687 2688 #ifdef SCSIPI_DEBUG 2689 /* 2690 * Given a scsipi_xfer, dump the request, in all it's glory 2691 */ 2692 void 2693 show_scsipi_xs(xs) 2694 struct scsipi_xfer *xs; 2695 { 2696 2697 printf("xs(%p): ", xs); 2698 printf("xs_control(0x%08x)", xs->xs_control); 2699 printf("xs_status(0x%08x)", xs->xs_status); 2700 printf("periph(%p)", xs->xs_periph); 2701 printf("retr(0x%x)", xs->xs_retries); 2702 printf("timo(0x%x)", xs->timeout); 2703 printf("cmd(%p)", xs->cmd); 2704 printf("len(0x%x)", xs->cmdlen); 2705 printf("data(%p)", xs->data); 2706 printf("len(0x%x)", xs->datalen); 2707 printf("res(0x%x)", xs->resid); 2708 printf("err(0x%x)", xs->error); 2709 printf("bp(%p)", xs->bp); 2710 show_scsipi_cmd(xs); 2711 } 2712 2713 void 2714 show_scsipi_cmd(xs) 2715 struct scsipi_xfer *xs; 2716 { 2717 u_char *b = (u_char *) xs->cmd; 2718 int i = 0; 2719 2720 scsipi_printaddr(xs->xs_periph); 2721 printf(" command: "); 2722 2723 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2724 while (i < xs->cmdlen) { 2725 if (i) 2726 printf(","); 2727 printf("0x%x", b[i++]); 2728 } 2729 printf("-[%d bytes]\n", xs->datalen); 2730 if (xs->datalen) 2731 show_mem(xs->data, min(64, xs->datalen)); 2732 } else 2733 printf("-RESET-\n"); 2734 } 2735 2736 void 2737 show_mem(address, num) 2738 u_char *address; 2739 int num; 2740 { 2741 int x; 2742 2743 printf("------------------------------"); 2744 for (x = 0; x < num; x++) { 2745 if ((x % 16) == 0) 2746 printf("\n%03d: ", x); 2747 printf("%02x ", *address++); 2748 } 2749 printf("\n------------------------------\n"); 2750 } 2751 #endif /* SCSIPI_DEBUG */ 2752