1 /* $NetBSD: scsipi_base.c,v 1.154 2010/08/23 20:01:16 pooka Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.154 2010/08/23 20:01:16 pooka Exp $"); 35 36 #include "opt_scsi.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/buf.h> 42 #include <sys/uio.h> 43 #include <sys/malloc.h> 44 #include <sys/pool.h> 45 #include <sys/errno.h> 46 #include <sys/device.h> 47 #include <sys/proc.h> 48 #include <sys/kthread.h> 49 #include <sys/hash.h> 50 51 #include <uvm/uvm_extern.h> 52 53 #include <dev/scsipi/scsi_spc.h> 54 #include <dev/scsipi/scsipi_all.h> 55 #include <dev/scsipi/scsipi_disk.h> 56 #include <dev/scsipi/scsipiconf.h> 57 #include <dev/scsipi/scsipi_base.h> 58 59 #include <dev/scsipi/scsi_all.h> 60 #include <dev/scsipi/scsi_message.h> 61 62 #include <machine/param.h> 63 64 static int scsipi_complete(struct scsipi_xfer *); 65 static void scsipi_request_sense(struct scsipi_xfer *); 66 static int scsipi_enqueue(struct scsipi_xfer *); 67 static void scsipi_run_queue(struct scsipi_channel *chan); 68 69 static void scsipi_completion_thread(void *); 70 71 static void scsipi_get_tag(struct scsipi_xfer *); 72 static void scsipi_put_tag(struct scsipi_xfer *); 73 74 static int scsipi_get_resource(struct scsipi_channel *); 75 static void scsipi_put_resource(struct scsipi_channel *); 76 77 static void scsipi_async_event_max_openings(struct scsipi_channel *, 78 struct scsipi_max_openings *); 79 static void scsipi_async_event_xfer_mode(struct scsipi_channel *, 80 struct scsipi_xfer_mode *); 81 static void scsipi_async_event_channel_reset(struct scsipi_channel *); 82 83 static struct pool scsipi_xfer_pool; 84 85 /* 86 * scsipi_init: 87 * 88 * Called when a scsibus or atapibus is attached to the system 89 * to initialize shared data structures. 90 */ 91 void 92 scsipi_init(void) 93 { 94 static int scsipi_init_done; 95 96 if (scsipi_init_done) 97 return; 98 scsipi_init_done = 1; 99 100 /* Initialize the scsipi_xfer pool. */ 101 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 102 0, 0, "scxspl", NULL, IPL_BIO); 103 if (pool_prime(&scsipi_xfer_pool, 104 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) { 105 printf("WARNING: not enough memory for scsipi_xfer_pool\n"); 106 } 107 } 108 109 /* 110 * scsipi_channel_init: 111 * 112 * Initialize a scsipi_channel when it is attached. 113 */ 114 int 115 scsipi_channel_init(struct scsipi_channel *chan) 116 { 117 struct scsipi_adapter *adapt = chan->chan_adapter; 118 int i; 119 120 /* Initialize shared data. */ 121 scsipi_init(); 122 123 /* Initialize the queues. */ 124 TAILQ_INIT(&chan->chan_queue); 125 TAILQ_INIT(&chan->chan_complete); 126 127 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++) 128 LIST_INIT(&chan->chan_periphtab[i]); 129 130 /* 131 * Create the asynchronous completion thread. 132 */ 133 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan, 134 &chan->chan_thread, "%s", chan->chan_name)) { 135 aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for " 136 "channel %d\n", chan->chan_channel); 137 panic("scsipi_channel_init"); 138 } 139 140 return (0); 141 } 142 143 /* 144 * scsipi_channel_shutdown: 145 * 146 * Shutdown a scsipi_channel. 147 */ 148 void 149 scsipi_channel_shutdown(struct scsipi_channel *chan) 150 { 151 152 /* 153 * Shut down the completion thread. 154 */ 155 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 156 wakeup(&chan->chan_complete); 157 158 /* 159 * Now wait for the thread to exit. 160 */ 161 while (chan->chan_thread != NULL) 162 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0); 163 } 164 165 static uint32_t 166 scsipi_chan_periph_hash(uint64_t t, uint64_t l) 167 { 168 uint32_t hash; 169 170 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT); 171 hash = hash32_buf(&l, sizeof(l), hash); 172 173 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK); 174 } 175 176 /* 177 * scsipi_insert_periph: 178 * 179 * Insert a periph into the channel. 180 */ 181 void 182 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph) 183 { 184 uint32_t hash; 185 int s; 186 187 hash = scsipi_chan_periph_hash(periph->periph_target, 188 periph->periph_lun); 189 190 s = splbio(); 191 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash); 192 splx(s); 193 } 194 195 /* 196 * scsipi_remove_periph: 197 * 198 * Remove a periph from the channel. 199 */ 200 void 201 scsipi_remove_periph(struct scsipi_channel *chan, 202 struct scsipi_periph *periph) 203 { 204 int s; 205 206 s = splbio(); 207 LIST_REMOVE(periph, periph_hash); 208 splx(s); 209 } 210 211 /* 212 * scsipi_lookup_periph: 213 * 214 * Lookup a periph on the specified channel. 215 */ 216 struct scsipi_periph * 217 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun) 218 { 219 struct scsipi_periph *periph; 220 uint32_t hash; 221 int s; 222 223 if (target >= chan->chan_ntargets || 224 lun >= chan->chan_nluns) 225 return (NULL); 226 227 hash = scsipi_chan_periph_hash(target, lun); 228 229 s = splbio(); 230 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) { 231 if (periph->periph_target == target && 232 periph->periph_lun == lun) 233 break; 234 } 235 splx(s); 236 237 return (periph); 238 } 239 240 /* 241 * scsipi_get_resource: 242 * 243 * Allocate a single xfer `resource' from the channel. 244 * 245 * NOTE: Must be called at splbio(). 246 */ 247 static int 248 scsipi_get_resource(struct scsipi_channel *chan) 249 { 250 struct scsipi_adapter *adapt = chan->chan_adapter; 251 252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 253 if (chan->chan_openings > 0) { 254 chan->chan_openings--; 255 return (1); 256 } 257 return (0); 258 } 259 260 if (adapt->adapt_openings > 0) { 261 adapt->adapt_openings--; 262 return (1); 263 } 264 return (0); 265 } 266 267 /* 268 * scsipi_grow_resources: 269 * 270 * Attempt to grow resources for a channel. If this succeeds, 271 * we allocate one for our caller. 272 * 273 * NOTE: Must be called at splbio(). 274 */ 275 static inline int 276 scsipi_grow_resources(struct scsipi_channel *chan) 277 { 278 279 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 280 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 281 scsipi_adapter_request(chan, 282 ADAPTER_REQ_GROW_RESOURCES, NULL); 283 return (scsipi_get_resource(chan)); 284 } 285 /* 286 * ask the channel thread to do it. It'll have to thaw the 287 * queue 288 */ 289 scsipi_channel_freeze(chan, 1); 290 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 291 wakeup(&chan->chan_complete); 292 return (0); 293 } 294 295 return (0); 296 } 297 298 /* 299 * scsipi_put_resource: 300 * 301 * Free a single xfer `resource' to the channel. 302 * 303 * NOTE: Must be called at splbio(). 304 */ 305 static void 306 scsipi_put_resource(struct scsipi_channel *chan) 307 { 308 struct scsipi_adapter *adapt = chan->chan_adapter; 309 310 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 311 chan->chan_openings++; 312 else 313 adapt->adapt_openings++; 314 } 315 316 /* 317 * scsipi_get_tag: 318 * 319 * Get a tag ID for the specified xfer. 320 * 321 * NOTE: Must be called at splbio(). 322 */ 323 static void 324 scsipi_get_tag(struct scsipi_xfer *xs) 325 { 326 struct scsipi_periph *periph = xs->xs_periph; 327 int bit, tag; 328 u_int word; 329 330 bit = 0; /* XXX gcc */ 331 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 332 bit = ffs(periph->periph_freetags[word]); 333 if (bit != 0) 334 break; 335 } 336 #ifdef DIAGNOSTIC 337 if (word == PERIPH_NTAGWORDS) { 338 scsipi_printaddr(periph); 339 printf("no free tags\n"); 340 panic("scsipi_get_tag"); 341 } 342 #endif 343 344 bit -= 1; 345 periph->periph_freetags[word] &= ~(1 << bit); 346 tag = (word << 5) | bit; 347 348 /* XXX Should eventually disallow this completely. */ 349 if (tag >= periph->periph_openings) { 350 scsipi_printaddr(periph); 351 printf("WARNING: tag %d greater than available openings %d\n", 352 tag, periph->periph_openings); 353 } 354 355 xs->xs_tag_id = tag; 356 } 357 358 /* 359 * scsipi_put_tag: 360 * 361 * Put the tag ID for the specified xfer back into the pool. 362 * 363 * NOTE: Must be called at splbio(). 364 */ 365 static void 366 scsipi_put_tag(struct scsipi_xfer *xs) 367 { 368 struct scsipi_periph *periph = xs->xs_periph; 369 int word, bit; 370 371 word = xs->xs_tag_id >> 5; 372 bit = xs->xs_tag_id & 0x1f; 373 374 periph->periph_freetags[word] |= (1 << bit); 375 } 376 377 /* 378 * scsipi_get_xs: 379 * 380 * Allocate an xfer descriptor and associate it with the 381 * specified peripherial. If the peripherial has no more 382 * available command openings, we either block waiting for 383 * one to become available, or fail. 384 */ 385 struct scsipi_xfer * 386 scsipi_get_xs(struct scsipi_periph *periph, int flags) 387 { 388 struct scsipi_xfer *xs; 389 int s; 390 391 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 392 393 KASSERT(!cold); 394 395 #ifdef DIAGNOSTIC 396 /* 397 * URGENT commands can never be ASYNC. 398 */ 399 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 400 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 401 scsipi_printaddr(periph); 402 printf("URGENT and ASYNC\n"); 403 panic("scsipi_get_xs"); 404 } 405 #endif 406 407 s = splbio(); 408 /* 409 * Wait for a command opening to become available. Rules: 410 * 411 * - All xfers must wait for an available opening. 412 * Exception: URGENT xfers can proceed when 413 * active == openings, because we use the opening 414 * of the command we're recovering for. 415 * - if the periph has sense pending, only URGENT & REQSENSE 416 * xfers may proceed. 417 * 418 * - If the periph is recovering, only URGENT xfers may 419 * proceed. 420 * 421 * - If the periph is currently executing a recovery 422 * command, URGENT commands must block, because only 423 * one recovery command can execute at a time. 424 */ 425 for (;;) { 426 if (flags & XS_CTL_URGENT) { 427 if (periph->periph_active > periph->periph_openings) 428 goto wait_for_opening; 429 if (periph->periph_flags & PERIPH_SENSE) { 430 if ((flags & XS_CTL_REQSENSE) == 0) 431 goto wait_for_opening; 432 } else { 433 if ((periph->periph_flags & 434 PERIPH_RECOVERY_ACTIVE) != 0) 435 goto wait_for_opening; 436 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 437 } 438 break; 439 } 440 if (periph->periph_active >= periph->periph_openings || 441 (periph->periph_flags & PERIPH_RECOVERING) != 0) 442 goto wait_for_opening; 443 periph->periph_active++; 444 break; 445 446 wait_for_opening: 447 if (flags & XS_CTL_NOSLEEP) { 448 splx(s); 449 return (NULL); 450 } 451 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 452 periph->periph_flags |= PERIPH_WAITING; 453 (void) tsleep(periph, PRIBIO, "getxs", 0); 454 } 455 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 456 xs = pool_get(&scsipi_xfer_pool, 457 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 458 if (xs == NULL) { 459 if (flags & XS_CTL_URGENT) { 460 if ((flags & XS_CTL_REQSENSE) == 0) 461 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 462 } else 463 periph->periph_active--; 464 scsipi_printaddr(periph); 465 printf("unable to allocate %sscsipi_xfer\n", 466 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 467 } 468 splx(s); 469 470 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 471 472 if (xs != NULL) { 473 memset(xs, 0, sizeof(*xs)); 474 callout_init(&xs->xs_callout, 0); 475 xs->xs_periph = periph; 476 xs->xs_control = flags; 477 xs->xs_status = 0; 478 s = splbio(); 479 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 480 splx(s); 481 } 482 return (xs); 483 } 484 485 /* 486 * scsipi_put_xs: 487 * 488 * Release an xfer descriptor, decreasing the outstanding command 489 * count for the peripherial. If there is a thread waiting for 490 * an opening, wake it up. If not, kick any queued I/O the 491 * peripherial may have. 492 * 493 * NOTE: Must be called at splbio(). 494 */ 495 void 496 scsipi_put_xs(struct scsipi_xfer *xs) 497 { 498 struct scsipi_periph *periph = xs->xs_periph; 499 int flags = xs->xs_control; 500 501 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 502 503 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 504 callout_destroy(&xs->xs_callout); 505 pool_put(&scsipi_xfer_pool, xs); 506 507 #ifdef DIAGNOSTIC 508 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 509 periph->periph_active == 0) { 510 scsipi_printaddr(periph); 511 printf("recovery without a command to recovery for\n"); 512 panic("scsipi_put_xs"); 513 } 514 #endif 515 516 if (flags & XS_CTL_URGENT) { 517 if ((flags & XS_CTL_REQSENSE) == 0) 518 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 519 } else 520 periph->periph_active--; 521 if (periph->periph_active == 0 && 522 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 523 periph->periph_flags &= ~PERIPH_WAITDRAIN; 524 wakeup(&periph->periph_active); 525 } 526 527 if (periph->periph_flags & PERIPH_WAITING) { 528 periph->periph_flags &= ~PERIPH_WAITING; 529 wakeup(periph); 530 } else { 531 if (periph->periph_switch->psw_start != NULL && 532 device_is_active(periph->periph_dev)) { 533 SC_DEBUG(periph, SCSIPI_DB2, 534 ("calling private start()\n")); 535 (*periph->periph_switch->psw_start)(periph); 536 } 537 } 538 } 539 540 /* 541 * scsipi_channel_freeze: 542 * 543 * Freeze a channel's xfer queue. 544 */ 545 void 546 scsipi_channel_freeze(struct scsipi_channel *chan, int count) 547 { 548 int s; 549 550 s = splbio(); 551 chan->chan_qfreeze += count; 552 splx(s); 553 } 554 555 /* 556 * scsipi_channel_thaw: 557 * 558 * Thaw a channel's xfer queue. 559 */ 560 void 561 scsipi_channel_thaw(struct scsipi_channel *chan, int count) 562 { 563 int s; 564 565 s = splbio(); 566 chan->chan_qfreeze -= count; 567 /* 568 * Don't let the freeze count go negative. 569 * 570 * Presumably the adapter driver could keep track of this, 571 * but it might just be easier to do this here so as to allow 572 * multiple callers, including those outside the adapter driver. 573 */ 574 if (chan->chan_qfreeze < 0) { 575 chan->chan_qfreeze = 0; 576 } 577 splx(s); 578 /* 579 * Kick the channel's queue here. Note, we may be running in 580 * interrupt context (softclock or HBA's interrupt), so the adapter 581 * driver had better not sleep. 582 */ 583 if (chan->chan_qfreeze == 0) 584 scsipi_run_queue(chan); 585 } 586 587 /* 588 * scsipi_channel_timed_thaw: 589 * 590 * Thaw a channel after some time has expired. This will also 591 * run the channel's queue if the freeze count has reached 0. 592 */ 593 void 594 scsipi_channel_timed_thaw(void *arg) 595 { 596 struct scsipi_channel *chan = arg; 597 598 scsipi_channel_thaw(chan, 1); 599 } 600 601 /* 602 * scsipi_periph_freeze: 603 * 604 * Freeze a device's xfer queue. 605 */ 606 void 607 scsipi_periph_freeze(struct scsipi_periph *periph, int count) 608 { 609 int s; 610 611 s = splbio(); 612 periph->periph_qfreeze += count; 613 splx(s); 614 } 615 616 /* 617 * scsipi_periph_thaw: 618 * 619 * Thaw a device's xfer queue. 620 */ 621 void 622 scsipi_periph_thaw(struct scsipi_periph *periph, int count) 623 { 624 int s; 625 626 s = splbio(); 627 periph->periph_qfreeze -= count; 628 #ifdef DIAGNOSTIC 629 if (periph->periph_qfreeze < 0) { 630 static const char pc[] = "periph freeze count < 0"; 631 scsipi_printaddr(periph); 632 printf("%s\n", pc); 633 panic(pc); 634 } 635 #endif 636 if (periph->periph_qfreeze == 0 && 637 (periph->periph_flags & PERIPH_WAITING) != 0) 638 wakeup(periph); 639 splx(s); 640 } 641 642 /* 643 * scsipi_periph_timed_thaw: 644 * 645 * Thaw a device after some time has expired. 646 */ 647 void 648 scsipi_periph_timed_thaw(void *arg) 649 { 650 int s; 651 struct scsipi_periph *periph = arg; 652 653 callout_stop(&periph->periph_callout); 654 655 s = splbio(); 656 scsipi_periph_thaw(periph, 1); 657 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 658 /* 659 * Kick the channel's queue here. Note, we're running in 660 * interrupt context (softclock), so the adapter driver 661 * had better not sleep. 662 */ 663 scsipi_run_queue(periph->periph_channel); 664 } else { 665 /* 666 * Tell the completion thread to kick the channel's queue here. 667 */ 668 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 669 wakeup(&periph->periph_channel->chan_complete); 670 } 671 splx(s); 672 } 673 674 /* 675 * scsipi_wait_drain: 676 * 677 * Wait for a periph's pending xfers to drain. 678 */ 679 void 680 scsipi_wait_drain(struct scsipi_periph *periph) 681 { 682 int s; 683 684 s = splbio(); 685 while (periph->periph_active != 0) { 686 periph->periph_flags |= PERIPH_WAITDRAIN; 687 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0); 688 } 689 splx(s); 690 } 691 692 /* 693 * scsipi_kill_pending: 694 * 695 * Kill off all pending xfers for a periph. 696 * 697 * NOTE: Must be called at splbio(). 698 */ 699 void 700 scsipi_kill_pending(struct scsipi_periph *periph) 701 { 702 703 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph); 704 scsipi_wait_drain(periph); 705 } 706 707 /* 708 * scsipi_print_cdb: 709 * prints a command descriptor block (for debug purpose, error messages, 710 * SCSIVERBOSE, ...) 711 */ 712 void 713 scsipi_print_cdb(struct scsipi_generic *cmd) 714 { 715 int i, j; 716 717 printf("0x%02x", cmd->opcode); 718 719 switch (CDB_GROUPID(cmd->opcode)) { 720 case CDB_GROUPID_0: 721 j = CDB_GROUP0; 722 break; 723 case CDB_GROUPID_1: 724 j = CDB_GROUP1; 725 break; 726 case CDB_GROUPID_2: 727 j = CDB_GROUP2; 728 break; 729 case CDB_GROUPID_3: 730 j = CDB_GROUP3; 731 break; 732 case CDB_GROUPID_4: 733 j = CDB_GROUP4; 734 break; 735 case CDB_GROUPID_5: 736 j = CDB_GROUP5; 737 break; 738 case CDB_GROUPID_6: 739 j = CDB_GROUP6; 740 break; 741 case CDB_GROUPID_7: 742 j = CDB_GROUP7; 743 break; 744 default: 745 j = 0; 746 } 747 if (j == 0) 748 j = sizeof (cmd->bytes); 749 for (i = 0; i < j-1; i++) /* already done the opcode */ 750 printf(" %02x", cmd->bytes[i]); 751 } 752 753 /* 754 * scsipi_interpret_sense: 755 * 756 * Look at the returned sense and act on the error, determining 757 * the unix error number to pass back. (0 = report no error) 758 * 759 * NOTE: If we return ERESTART, we are expected to haved 760 * thawed the device! 761 * 762 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 763 */ 764 int 765 scsipi_interpret_sense(struct scsipi_xfer *xs) 766 { 767 struct scsi_sense_data *sense; 768 struct scsipi_periph *periph = xs->xs_periph; 769 u_int8_t key; 770 int error; 771 u_int32_t info; 772 static const char *error_mes[] = { 773 "soft error (corrected)", 774 "not ready", "medium error", 775 "non-media hardware failure", "illegal request", 776 "unit attention", "readonly device", 777 "no data found", "vendor unique", 778 "copy aborted", "command aborted", 779 "search returned equal", "volume overflow", 780 "verify miscompare", "unknown error key" 781 }; 782 783 sense = &xs->sense.scsi_sense; 784 #ifdef SCSIPI_DEBUG 785 if (periph->periph_flags & SCSIPI_DB1) { 786 int count; 787 scsipi_printaddr(periph); 788 printf(" sense debug information:\n"); 789 printf("\tcode 0x%x valid %d\n", 790 SSD_RCODE(sense->response_code), 791 sense->response_code & SSD_RCODE_VALID ? 1 : 0); 792 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 793 sense->segment, 794 SSD_SENSE_KEY(sense->flags), 795 sense->flags & SSD_ILI ? 1 : 0, 796 sense->flags & SSD_EOM ? 1 : 0, 797 sense->flags & SSD_FILEMARK ? 1 : 0); 798 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 799 "extra bytes\n", 800 sense->info[0], 801 sense->info[1], 802 sense->info[2], 803 sense->info[3], 804 sense->extra_len); 805 printf("\textra: "); 806 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++) 807 printf("0x%x ", sense->csi[count]); 808 printf("\n"); 809 } 810 #endif 811 812 /* 813 * If the periph has it's own error handler, call it first. 814 * If it returns a legit error value, return that, otherwise 815 * it wants us to continue with normal error processing. 816 */ 817 if (periph->periph_switch->psw_error != NULL) { 818 SC_DEBUG(periph, SCSIPI_DB2, 819 ("calling private err_handler()\n")); 820 error = (*periph->periph_switch->psw_error)(xs); 821 if (error != EJUSTRETURN) 822 return (error); 823 } 824 /* otherwise use the default */ 825 switch (SSD_RCODE(sense->response_code)) { 826 827 /* 828 * Old SCSI-1 and SASI devices respond with 829 * codes other than 70. 830 */ 831 case 0x00: /* no error (command completed OK) */ 832 return (0); 833 case 0x04: /* drive not ready after it was selected */ 834 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 835 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 836 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 837 return (0); 838 /* XXX - display some sort of error here? */ 839 return (EIO); 840 case 0x20: /* invalid command */ 841 if ((xs->xs_control & 842 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 843 return (0); 844 return (EINVAL); 845 case 0x25: /* invalid LUN (Adaptec ACB-4000) */ 846 return (EACCES); 847 848 /* 849 * If it's code 70, use the extended stuff and 850 * interpret the key 851 */ 852 case 0x71: /* delayed error */ 853 scsipi_printaddr(periph); 854 key = SSD_SENSE_KEY(sense->flags); 855 printf(" DEFERRED ERROR, key = 0x%x\n", key); 856 /* FALLTHROUGH */ 857 case 0x70: 858 if ((sense->response_code & SSD_RCODE_VALID) != 0) 859 info = _4btol(sense->info); 860 else 861 info = 0; 862 key = SSD_SENSE_KEY(sense->flags); 863 864 switch (key) { 865 case SKEY_NO_SENSE: 866 case SKEY_RECOVERED_ERROR: 867 if (xs->resid == xs->datalen && xs->datalen) { 868 /* 869 * Why is this here? 870 */ 871 xs->resid = 0; /* not short read */ 872 } 873 case SKEY_EQUAL: 874 error = 0; 875 break; 876 case SKEY_NOT_READY: 877 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 878 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 879 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 880 return (0); 881 if (sense->asc == 0x3A) { 882 error = ENODEV; /* Medium not present */ 883 if (xs->xs_control & XS_CTL_SILENT_NODEV) 884 return (error); 885 } else 886 error = EIO; 887 if ((xs->xs_control & XS_CTL_SILENT) != 0) 888 return (error); 889 break; 890 case SKEY_ILLEGAL_REQUEST: 891 if ((xs->xs_control & 892 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 893 return (0); 894 /* 895 * Handle the case where a device reports 896 * Logical Unit Not Supported during discovery. 897 */ 898 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 899 sense->asc == 0x25 && 900 sense->ascq == 0x00) 901 return (EINVAL); 902 if ((xs->xs_control & XS_CTL_SILENT) != 0) 903 return (EIO); 904 error = EINVAL; 905 break; 906 case SKEY_UNIT_ATTENTION: 907 if (sense->asc == 0x29 && 908 sense->ascq == 0x00) { 909 /* device or bus reset */ 910 return (ERESTART); 911 } 912 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 913 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 914 if ((xs->xs_control & 915 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 916 /* XXX Should reupload any transient state. */ 917 (periph->periph_flags & 918 PERIPH_REMOVABLE) == 0) { 919 return (ERESTART); 920 } 921 if ((xs->xs_control & XS_CTL_SILENT) != 0) 922 return (EIO); 923 error = EIO; 924 break; 925 case SKEY_DATA_PROTECT: 926 error = EROFS; 927 break; 928 case SKEY_BLANK_CHECK: 929 error = 0; 930 break; 931 case SKEY_ABORTED_COMMAND: 932 if (xs->xs_retries != 0) { 933 xs->xs_retries--; 934 error = ERESTART; 935 } else 936 error = EIO; 937 break; 938 case SKEY_VOLUME_OVERFLOW: 939 error = ENOSPC; 940 break; 941 default: 942 error = EIO; 943 break; 944 } 945 946 /* Print verbose decode if appropriate and possible */ 947 if ((key == 0) || 948 ((xs->xs_control & XS_CTL_SILENT) != 0) || 949 (scsipi_print_sense(xs, 0) != 0)) 950 return (error); 951 952 /* Print brief(er) sense information */ 953 scsipi_printaddr(periph); 954 printf("%s", error_mes[key - 1]); 955 if ((sense->response_code & SSD_RCODE_VALID) != 0) { 956 switch (key) { 957 case SKEY_NOT_READY: 958 case SKEY_ILLEGAL_REQUEST: 959 case SKEY_UNIT_ATTENTION: 960 case SKEY_DATA_PROTECT: 961 break; 962 case SKEY_BLANK_CHECK: 963 printf(", requested size: %d (decimal)", 964 info); 965 break; 966 case SKEY_ABORTED_COMMAND: 967 if (xs->xs_retries) 968 printf(", retrying"); 969 printf(", cmd 0x%x, info 0x%x", 970 xs->cmd->opcode, info); 971 break; 972 default: 973 printf(", info = %d (decimal)", info); 974 } 975 } 976 if (sense->extra_len != 0) { 977 int n; 978 printf(", data ="); 979 for (n = 0; n < sense->extra_len; n++) 980 printf(" %02x", 981 sense->csi[n]); 982 } 983 printf("\n"); 984 return (error); 985 986 /* 987 * Some other code, just report it 988 */ 989 default: 990 #if defined(SCSIDEBUG) || defined(DEBUG) 991 { 992 static const char *uc = "undecodable sense error"; 993 int i; 994 u_int8_t *cptr = (u_int8_t *) sense; 995 scsipi_printaddr(periph); 996 if (xs->cmd == &xs->cmdstore) { 997 printf("%s for opcode 0x%x, data=", 998 uc, xs->cmdstore.opcode); 999 } else { 1000 printf("%s, data=", uc); 1001 } 1002 for (i = 0; i < sizeof (sense); i++) 1003 printf(" 0x%02x", *(cptr++) & 0xff); 1004 printf("\n"); 1005 } 1006 #else 1007 scsipi_printaddr(periph); 1008 printf("Sense Error Code 0x%x", 1009 SSD_RCODE(sense->response_code)); 1010 if ((sense->response_code & SSD_RCODE_VALID) != 0) { 1011 struct scsi_sense_data_unextended *usense = 1012 (struct scsi_sense_data_unextended *)sense; 1013 printf(" at block no. %d (decimal)", 1014 _3btol(usense->block)); 1015 } 1016 printf("\n"); 1017 #endif 1018 return (EIO); 1019 } 1020 } 1021 1022 /* 1023 * scsipi_test_unit_ready: 1024 * 1025 * Issue a `test unit ready' request. 1026 */ 1027 int 1028 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags) 1029 { 1030 struct scsi_test_unit_ready cmd; 1031 int retries; 1032 1033 /* some ATAPI drives don't support TEST UNIT READY. Sigh */ 1034 if (periph->periph_quirks & PQUIRK_NOTUR) 1035 return (0); 1036 1037 if (flags & XS_CTL_DISCOVERY) 1038 retries = 0; 1039 else 1040 retries = SCSIPIRETRIES; 1041 1042 memset(&cmd, 0, sizeof(cmd)); 1043 cmd.opcode = SCSI_TEST_UNIT_READY; 1044 1045 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1046 retries, 10000, NULL, flags)); 1047 } 1048 1049 /* 1050 * scsipi_inquire: 1051 * 1052 * Ask the device about itself. 1053 */ 1054 int 1055 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf, 1056 int flags) 1057 { 1058 struct scsipi_inquiry cmd; 1059 int error; 1060 int retries; 1061 1062 if (flags & XS_CTL_DISCOVERY) 1063 retries = 0; 1064 else 1065 retries = SCSIPIRETRIES; 1066 1067 /* 1068 * If we request more data than the device can provide, it SHOULD just 1069 * return a short reponse. However, some devices error with an 1070 * ILLEGAL REQUEST sense code, and yet others have even more special 1071 * failture modes (such as the GL641USB flash adapter, which goes loony 1072 * and sends corrupted CRCs). To work around this, and to bring our 1073 * behavior more in line with other OSes, we do a shorter inquiry, 1074 * covering all the SCSI-2 information, first, and then request more 1075 * data iff the "additional length" field indicates there is more. 1076 * - mycroft, 2003/10/16 1077 */ 1078 memset(&cmd, 0, sizeof(cmd)); 1079 cmd.opcode = INQUIRY; 1080 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2; 1081 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1082 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries, 1083 10000, NULL, flags | XS_CTL_DATA_IN); 1084 if (!error && 1085 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) { 1086 #if 0 1087 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length); 1088 #endif 1089 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3; 1090 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1091 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries, 1092 10000, NULL, flags | XS_CTL_DATA_IN); 1093 #if 0 1094 printf("inquire: error=%d\n", error); 1095 #endif 1096 } 1097 1098 #ifdef SCSI_OLD_NOINQUIRY 1099 /* 1100 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator. 1101 * This board doesn't support the INQUIRY command at all. 1102 */ 1103 if (error == EINVAL || error == EACCES) { 1104 /* 1105 * Conjure up an INQUIRY response. 1106 */ 1107 inqbuf->device = (error == EINVAL ? 1108 SID_QUAL_LU_PRESENT : 1109 SID_QUAL_LU_NOTPRESENT) | T_DIRECT; 1110 inqbuf->dev_qual2 = 0; 1111 inqbuf->version = 0; 1112 inqbuf->response_format = SID_FORMAT_SCSI1; 1113 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1114 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1115 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28); 1116 error = 0; 1117 } 1118 1119 /* 1120 * Kludge for the Emulex MT-02 SCSI->QIC translator. 1121 * This board gives an empty response to an INQUIRY command. 1122 */ 1123 else if (error == 0 && 1124 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) && 1125 inqbuf->dev_qual2 == 0 && 1126 inqbuf->version == 0 && 1127 inqbuf->response_format == SID_FORMAT_SCSI1) { 1128 /* 1129 * Fill out the INQUIRY response. 1130 */ 1131 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL); 1132 inqbuf->dev_qual2 = SID_REMOVABLE; 1133 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1134 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1135 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28); 1136 } 1137 #endif /* SCSI_OLD_NOINQUIRY */ 1138 1139 return error; 1140 } 1141 1142 /* 1143 * scsipi_prevent: 1144 * 1145 * Prevent or allow the user to remove the media 1146 */ 1147 int 1148 scsipi_prevent(struct scsipi_periph *periph, int type, int flags) 1149 { 1150 struct scsi_prevent_allow_medium_removal cmd; 1151 1152 if (periph->periph_quirks & PQUIRK_NODOORLOCK) 1153 return 0; 1154 1155 memset(&cmd, 0, sizeof(cmd)); 1156 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL; 1157 cmd.how = type; 1158 1159 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1160 SCSIPIRETRIES, 5000, NULL, flags)); 1161 } 1162 1163 /* 1164 * scsipi_start: 1165 * 1166 * Send a START UNIT. 1167 */ 1168 int 1169 scsipi_start(struct scsipi_periph *periph, int type, int flags) 1170 { 1171 struct scsipi_start_stop cmd; 1172 1173 memset(&cmd, 0, sizeof(cmd)); 1174 cmd.opcode = START_STOP; 1175 cmd.byte2 = 0x00; 1176 cmd.how = type; 1177 1178 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1179 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags)); 1180 } 1181 1182 /* 1183 * scsipi_mode_sense, scsipi_mode_sense_big: 1184 * get a sense page from a device 1185 */ 1186 1187 int 1188 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page, 1189 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries, 1190 int timeout) 1191 { 1192 struct scsi_mode_sense_6 cmd; 1193 1194 memset(&cmd, 0, sizeof(cmd)); 1195 cmd.opcode = SCSI_MODE_SENSE_6; 1196 cmd.byte2 = byte2; 1197 cmd.page = page; 1198 cmd.length = len & 0xff; 1199 1200 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1201 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN)); 1202 } 1203 1204 int 1205 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page, 1206 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries, 1207 int timeout) 1208 { 1209 struct scsi_mode_sense_10 cmd; 1210 1211 memset(&cmd, 0, sizeof(cmd)); 1212 cmd.opcode = SCSI_MODE_SENSE_10; 1213 cmd.byte2 = byte2; 1214 cmd.page = page; 1215 _lto2b(len, cmd.length); 1216 1217 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1218 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN)); 1219 } 1220 1221 int 1222 scsipi_mode_select(struct scsipi_periph *periph, int byte2, 1223 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries, 1224 int timeout) 1225 { 1226 struct scsi_mode_select_6 cmd; 1227 1228 memset(&cmd, 0, sizeof(cmd)); 1229 cmd.opcode = SCSI_MODE_SELECT_6; 1230 cmd.byte2 = byte2; 1231 cmd.length = len & 0xff; 1232 1233 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1234 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT)); 1235 } 1236 1237 int 1238 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2, 1239 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries, 1240 int timeout) 1241 { 1242 struct scsi_mode_select_10 cmd; 1243 1244 memset(&cmd, 0, sizeof(cmd)); 1245 cmd.opcode = SCSI_MODE_SELECT_10; 1246 cmd.byte2 = byte2; 1247 _lto2b(len, cmd.length); 1248 1249 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1250 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT)); 1251 } 1252 1253 /* 1254 * scsipi_done: 1255 * 1256 * This routine is called by an adapter's interrupt handler when 1257 * an xfer is completed. 1258 */ 1259 void 1260 scsipi_done(struct scsipi_xfer *xs) 1261 { 1262 struct scsipi_periph *periph = xs->xs_periph; 1263 struct scsipi_channel *chan = periph->periph_channel; 1264 int s, freezecnt; 1265 1266 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1267 #ifdef SCSIPI_DEBUG 1268 if (periph->periph_dbflags & SCSIPI_DB1) 1269 show_scsipi_cmd(xs); 1270 #endif 1271 1272 s = splbio(); 1273 /* 1274 * The resource this command was using is now free. 1275 */ 1276 if (xs->xs_status & XS_STS_DONE) { 1277 /* XXX in certain circumstances, such as a device 1278 * being detached, a xs that has already been 1279 * scsipi_done()'d by the main thread will be done'd 1280 * again by scsibusdetach(). Putting the xs on the 1281 * chan_complete queue causes list corruption and 1282 * everyone dies. This prevents that, but perhaps 1283 * there should be better coordination somewhere such 1284 * that this won't ever happen (and can be turned into 1285 * a KASSERT(). 1286 */ 1287 splx(s); 1288 goto out; 1289 } 1290 scsipi_put_resource(chan); 1291 xs->xs_periph->periph_sent--; 1292 1293 /* 1294 * If the command was tagged, free the tag. 1295 */ 1296 if (XS_CTL_TAGTYPE(xs) != 0) 1297 scsipi_put_tag(xs); 1298 else 1299 periph->periph_flags &= ~PERIPH_UNTAG; 1300 1301 /* Mark the command as `done'. */ 1302 xs->xs_status |= XS_STS_DONE; 1303 1304 #ifdef DIAGNOSTIC 1305 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1306 (XS_CTL_ASYNC|XS_CTL_POLL)) 1307 panic("scsipi_done: ASYNC and POLL"); 1308 #endif 1309 1310 /* 1311 * If the xfer had an error of any sort, freeze the 1312 * periph's queue. Freeze it again if we were requested 1313 * to do so in the xfer. 1314 */ 1315 freezecnt = 0; 1316 if (xs->error != XS_NOERROR) 1317 freezecnt++; 1318 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1319 freezecnt++; 1320 if (freezecnt != 0) 1321 scsipi_periph_freeze(periph, freezecnt); 1322 1323 /* 1324 * record the xfer with a pending sense, in case a SCSI reset is 1325 * received before the thread is waked up. 1326 */ 1327 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1328 periph->periph_flags |= PERIPH_SENSE; 1329 periph->periph_xscheck = xs; 1330 } 1331 1332 /* 1333 * If this was an xfer that was not to complete asynchronously, 1334 * let the requesting thread perform error checking/handling 1335 * in its context. 1336 */ 1337 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1338 splx(s); 1339 /* 1340 * If it's a polling job, just return, to unwind the 1341 * call graph. We don't need to restart the queue, 1342 * because pollings jobs are treated specially, and 1343 * are really only used during crash dumps anyway 1344 * (XXX or during boot-time autconfiguration of 1345 * ATAPI devices). 1346 */ 1347 if (xs->xs_control & XS_CTL_POLL) 1348 return; 1349 wakeup(xs); 1350 goto out; 1351 } 1352 1353 /* 1354 * Catch the extremely common case of I/O completing 1355 * without error; no use in taking a context switch 1356 * if we can handle it in interrupt context. 1357 */ 1358 if (xs->error == XS_NOERROR) { 1359 splx(s); 1360 (void) scsipi_complete(xs); 1361 goto out; 1362 } 1363 1364 /* 1365 * There is an error on this xfer. Put it on the channel's 1366 * completion queue, and wake up the completion thread. 1367 */ 1368 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1369 splx(s); 1370 wakeup(&chan->chan_complete); 1371 1372 out: 1373 /* 1374 * If there are more xfers on the channel's queue, attempt to 1375 * run them. 1376 */ 1377 scsipi_run_queue(chan); 1378 } 1379 1380 /* 1381 * scsipi_complete: 1382 * 1383 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1384 * 1385 * NOTE: This routine MUST be called with valid thread context 1386 * except for the case where the following two conditions are 1387 * true: 1388 * 1389 * xs->error == XS_NOERROR 1390 * XS_CTL_ASYNC is set in xs->xs_control 1391 * 1392 * The semantics of this routine can be tricky, so here is an 1393 * explanation: 1394 * 1395 * 0 Xfer completed successfully. 1396 * 1397 * ERESTART Xfer had an error, but was restarted. 1398 * 1399 * anything else Xfer had an error, return value is Unix 1400 * errno. 1401 * 1402 * If the return value is anything but ERESTART: 1403 * 1404 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1405 * the pool. 1406 * - If there is a buf associated with the xfer, 1407 * it has been biodone()'d. 1408 */ 1409 static int 1410 scsipi_complete(struct scsipi_xfer *xs) 1411 { 1412 struct scsipi_periph *periph = xs->xs_periph; 1413 struct scsipi_channel *chan = periph->periph_channel; 1414 int error, s; 1415 1416 #ifdef DIAGNOSTIC 1417 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1418 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1419 #endif 1420 /* 1421 * If command terminated with a CHECK CONDITION, we need to issue a 1422 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1423 * we'll have the real status. 1424 * Must be processed at splbio() to avoid missing a SCSI bus reset 1425 * for this command. 1426 */ 1427 s = splbio(); 1428 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1429 /* request sense for a request sense ? */ 1430 if (xs->xs_control & XS_CTL_REQSENSE) { 1431 scsipi_printaddr(periph); 1432 printf("request sense for a request sense ?\n"); 1433 /* XXX maybe we should reset the device ? */ 1434 /* we've been frozen because xs->error != XS_NOERROR */ 1435 scsipi_periph_thaw(periph, 1); 1436 splx(s); 1437 if (xs->resid < xs->datalen) { 1438 printf("we read %d bytes of sense anyway:\n", 1439 xs->datalen - xs->resid); 1440 scsipi_print_sense_data((void *)xs->data, 0); 1441 } 1442 return EINVAL; 1443 } 1444 scsipi_request_sense(xs); 1445 } 1446 splx(s); 1447 1448 /* 1449 * If it's a user level request, bypass all usual completion 1450 * processing, let the user work it out.. 1451 */ 1452 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1453 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1454 if (xs->error != XS_NOERROR) 1455 scsipi_periph_thaw(periph, 1); 1456 scsipi_user_done(xs); 1457 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1458 return 0; 1459 } 1460 1461 switch (xs->error) { 1462 case XS_NOERROR: 1463 error = 0; 1464 break; 1465 1466 case XS_SENSE: 1467 case XS_SHORTSENSE: 1468 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1469 break; 1470 1471 case XS_RESOURCE_SHORTAGE: 1472 /* 1473 * XXX Should freeze channel's queue. 1474 */ 1475 scsipi_printaddr(periph); 1476 printf("adapter resource shortage\n"); 1477 /* FALLTHROUGH */ 1478 1479 case XS_BUSY: 1480 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1481 struct scsipi_max_openings mo; 1482 1483 /* 1484 * We set the openings to active - 1, assuming that 1485 * the command that got us here is the first one that 1486 * can't fit into the device's queue. If that's not 1487 * the case, I guess we'll find out soon enough. 1488 */ 1489 mo.mo_target = periph->periph_target; 1490 mo.mo_lun = periph->periph_lun; 1491 if (periph->periph_active < periph->periph_openings) 1492 mo.mo_openings = periph->periph_active - 1; 1493 else 1494 mo.mo_openings = periph->periph_openings - 1; 1495 #ifdef DIAGNOSTIC 1496 if (mo.mo_openings < 0) { 1497 scsipi_printaddr(periph); 1498 printf("QUEUE FULL resulted in < 0 openings\n"); 1499 panic("scsipi_done"); 1500 } 1501 #endif 1502 if (mo.mo_openings == 0) { 1503 scsipi_printaddr(periph); 1504 printf("QUEUE FULL resulted in 0 openings\n"); 1505 mo.mo_openings = 1; 1506 } 1507 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1508 error = ERESTART; 1509 } else if (xs->xs_retries != 0) { 1510 xs->xs_retries--; 1511 /* 1512 * Wait one second, and try again. 1513 */ 1514 if ((xs->xs_control & XS_CTL_POLL) || 1515 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1516 /* XXX: quite extreme */ 1517 kpause("xsbusy", false, hz, NULL); 1518 } else if (!callout_pending(&periph->periph_callout)) { 1519 scsipi_periph_freeze(periph, 1); 1520 callout_reset(&periph->periph_callout, 1521 hz, scsipi_periph_timed_thaw, periph); 1522 } 1523 error = ERESTART; 1524 } else 1525 error = EBUSY; 1526 break; 1527 1528 case XS_REQUEUE: 1529 error = ERESTART; 1530 break; 1531 1532 case XS_SELTIMEOUT: 1533 case XS_TIMEOUT: 1534 /* 1535 * If the device hasn't gone away, honor retry counts. 1536 * 1537 * Note that if we're in the middle of probing it, 1538 * it won't be found because it isn't here yet so 1539 * we won't honor the retry count in that case. 1540 */ 1541 if (scsipi_lookup_periph(chan, periph->periph_target, 1542 periph->periph_lun) && xs->xs_retries != 0) { 1543 xs->xs_retries--; 1544 error = ERESTART; 1545 } else 1546 error = EIO; 1547 break; 1548 1549 case XS_RESET: 1550 if (xs->xs_control & XS_CTL_REQSENSE) { 1551 /* 1552 * request sense interrupted by reset: signal it 1553 * with EINTR return code. 1554 */ 1555 error = EINTR; 1556 } else { 1557 if (xs->xs_retries != 0) { 1558 xs->xs_retries--; 1559 error = ERESTART; 1560 } else 1561 error = EIO; 1562 } 1563 break; 1564 1565 case XS_DRIVER_STUFFUP: 1566 scsipi_printaddr(periph); 1567 printf("generic HBA error\n"); 1568 error = EIO; 1569 break; 1570 default: 1571 scsipi_printaddr(periph); 1572 printf("invalid return code from adapter: %d\n", xs->error); 1573 error = EIO; 1574 break; 1575 } 1576 1577 s = splbio(); 1578 if (error == ERESTART) { 1579 /* 1580 * If we get here, the periph has been thawed and frozen 1581 * again if we had to issue recovery commands. Alternatively, 1582 * it may have been frozen again and in a timed thaw. In 1583 * any case, we thaw the periph once we re-enqueue the 1584 * command. Once the periph is fully thawed, it will begin 1585 * operation again. 1586 */ 1587 xs->error = XS_NOERROR; 1588 xs->status = SCSI_OK; 1589 xs->xs_status &= ~XS_STS_DONE; 1590 xs->xs_requeuecnt++; 1591 error = scsipi_enqueue(xs); 1592 if (error == 0) { 1593 scsipi_periph_thaw(periph, 1); 1594 splx(s); 1595 return (ERESTART); 1596 } 1597 } 1598 1599 /* 1600 * scsipi_done() freezes the queue if not XS_NOERROR. 1601 * Thaw it here. 1602 */ 1603 if (xs->error != XS_NOERROR) 1604 scsipi_periph_thaw(periph, 1); 1605 1606 if (periph->periph_switch->psw_done) 1607 periph->periph_switch->psw_done(xs, error); 1608 1609 if (xs->xs_control & XS_CTL_ASYNC) 1610 scsipi_put_xs(xs); 1611 splx(s); 1612 1613 return (error); 1614 } 1615 1616 /* 1617 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1618 * returns with a CHECK_CONDITION status. Must be called in valid thread 1619 * context and at splbio(). 1620 */ 1621 1622 static void 1623 scsipi_request_sense(struct scsipi_xfer *xs) 1624 { 1625 struct scsipi_periph *periph = xs->xs_periph; 1626 int flags, error; 1627 struct scsi_request_sense cmd; 1628 1629 periph->periph_flags |= PERIPH_SENSE; 1630 1631 /* if command was polling, request sense will too */ 1632 flags = xs->xs_control & XS_CTL_POLL; 1633 /* Polling commands can't sleep */ 1634 if (flags) 1635 flags |= XS_CTL_NOSLEEP; 1636 1637 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1638 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1639 1640 memset(&cmd, 0, sizeof(cmd)); 1641 cmd.opcode = SCSI_REQUEST_SENSE; 1642 cmd.length = sizeof(struct scsi_sense_data); 1643 1644 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1645 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data), 1646 0, 1000, NULL, flags); 1647 periph->periph_flags &= ~PERIPH_SENSE; 1648 periph->periph_xscheck = NULL; 1649 switch (error) { 1650 case 0: 1651 /* we have a valid sense */ 1652 xs->error = XS_SENSE; 1653 return; 1654 case EINTR: 1655 /* REQUEST_SENSE interrupted by bus reset. */ 1656 xs->error = XS_RESET; 1657 return; 1658 case EIO: 1659 /* request sense coudn't be performed */ 1660 /* 1661 * XXX this isn't quite right but we don't have anything 1662 * better for now 1663 */ 1664 xs->error = XS_DRIVER_STUFFUP; 1665 return; 1666 default: 1667 /* Notify that request sense failed. */ 1668 xs->error = XS_DRIVER_STUFFUP; 1669 scsipi_printaddr(periph); 1670 printf("request sense failed with error %d\n", error); 1671 return; 1672 } 1673 } 1674 1675 /* 1676 * scsipi_enqueue: 1677 * 1678 * Enqueue an xfer on a channel. 1679 */ 1680 static int 1681 scsipi_enqueue(struct scsipi_xfer *xs) 1682 { 1683 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1684 struct scsipi_xfer *qxs; 1685 int s; 1686 1687 s = splbio(); 1688 1689 /* 1690 * If the xfer is to be polled, and there are already jobs on 1691 * the queue, we can't proceed. 1692 */ 1693 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1694 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1695 splx(s); 1696 xs->error = XS_DRIVER_STUFFUP; 1697 return (EAGAIN); 1698 } 1699 1700 /* 1701 * If we have an URGENT xfer, it's an error recovery command 1702 * and it should just go on the head of the channel's queue. 1703 */ 1704 if (xs->xs_control & XS_CTL_URGENT) { 1705 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1706 goto out; 1707 } 1708 1709 /* 1710 * If this xfer has already been on the queue before, we 1711 * need to reinsert it in the correct order. That order is: 1712 * 1713 * Immediately before the first xfer for this periph 1714 * with a requeuecnt less than xs->xs_requeuecnt. 1715 * 1716 * Failing that, at the end of the queue. (We'll end up 1717 * there naturally.) 1718 */ 1719 if (xs->xs_requeuecnt != 0) { 1720 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1721 qxs = TAILQ_NEXT(qxs, channel_q)) { 1722 if (qxs->xs_periph == xs->xs_periph && 1723 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1724 break; 1725 } 1726 if (qxs != NULL) { 1727 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1728 channel_q); 1729 goto out; 1730 } 1731 } 1732 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1733 out: 1734 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1735 scsipi_periph_thaw(xs->xs_periph, 1); 1736 splx(s); 1737 return (0); 1738 } 1739 1740 /* 1741 * scsipi_run_queue: 1742 * 1743 * Start as many xfers as possible running on the channel. 1744 */ 1745 static void 1746 scsipi_run_queue(struct scsipi_channel *chan) 1747 { 1748 struct scsipi_xfer *xs; 1749 struct scsipi_periph *periph; 1750 int s; 1751 1752 for (;;) { 1753 s = splbio(); 1754 1755 /* 1756 * If the channel is frozen, we can't do any work right 1757 * now. 1758 */ 1759 if (chan->chan_qfreeze != 0) { 1760 splx(s); 1761 return; 1762 } 1763 1764 /* 1765 * Look for work to do, and make sure we can do it. 1766 */ 1767 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1768 xs = TAILQ_NEXT(xs, channel_q)) { 1769 periph = xs->xs_periph; 1770 1771 if ((periph->periph_sent >= periph->periph_openings) || 1772 periph->periph_qfreeze != 0 || 1773 (periph->periph_flags & PERIPH_UNTAG) != 0) 1774 continue; 1775 1776 if ((periph->periph_flags & 1777 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1778 (xs->xs_control & XS_CTL_URGENT) == 0) 1779 continue; 1780 1781 /* 1782 * We can issue this xfer! 1783 */ 1784 goto got_one; 1785 } 1786 1787 /* 1788 * Can't find any work to do right now. 1789 */ 1790 splx(s); 1791 return; 1792 1793 got_one: 1794 /* 1795 * Have an xfer to run. Allocate a resource from 1796 * the adapter to run it. If we can't allocate that 1797 * resource, we don't dequeue the xfer. 1798 */ 1799 if (scsipi_get_resource(chan) == 0) { 1800 /* 1801 * Adapter is out of resources. If the adapter 1802 * supports it, attempt to grow them. 1803 */ 1804 if (scsipi_grow_resources(chan) == 0) { 1805 /* 1806 * Wasn't able to grow resources, 1807 * nothing more we can do. 1808 */ 1809 if (xs->xs_control & XS_CTL_POLL) { 1810 scsipi_printaddr(xs->xs_periph); 1811 printf("polling command but no " 1812 "adapter resources"); 1813 /* We'll panic shortly... */ 1814 } 1815 splx(s); 1816 1817 /* 1818 * XXX: We should be able to note that 1819 * XXX: that resources are needed here! 1820 */ 1821 return; 1822 } 1823 /* 1824 * scsipi_grow_resources() allocated the resource 1825 * for us. 1826 */ 1827 } 1828 1829 /* 1830 * We have a resource to run this xfer, do it! 1831 */ 1832 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1833 1834 /* 1835 * If the command is to be tagged, allocate a tag ID 1836 * for it. 1837 */ 1838 if (XS_CTL_TAGTYPE(xs) != 0) 1839 scsipi_get_tag(xs); 1840 else 1841 periph->periph_flags |= PERIPH_UNTAG; 1842 periph->periph_sent++; 1843 splx(s); 1844 1845 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1846 } 1847 #ifdef DIAGNOSTIC 1848 panic("scsipi_run_queue: impossible"); 1849 #endif 1850 } 1851 1852 /* 1853 * scsipi_execute_xs: 1854 * 1855 * Begin execution of an xfer, waiting for it to complete, if necessary. 1856 */ 1857 int 1858 scsipi_execute_xs(struct scsipi_xfer *xs) 1859 { 1860 struct scsipi_periph *periph = xs->xs_periph; 1861 struct scsipi_channel *chan = periph->periph_channel; 1862 int oasync, async, poll, error, s; 1863 1864 KASSERT(!cold); 1865 1866 (chan->chan_bustype->bustype_cmd)(xs); 1867 1868 xs->xs_status &= ~XS_STS_DONE; 1869 xs->error = XS_NOERROR; 1870 xs->resid = xs->datalen; 1871 xs->status = SCSI_OK; 1872 1873 #ifdef SCSIPI_DEBUG 1874 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1875 printf("scsipi_execute_xs: "); 1876 show_scsipi_xs(xs); 1877 printf("\n"); 1878 } 1879 #endif 1880 1881 /* 1882 * Deal with command tagging: 1883 * 1884 * - If the device's current operating mode doesn't 1885 * include tagged queueing, clear the tag mask. 1886 * 1887 * - If the device's current operating mode *does* 1888 * include tagged queueing, set the tag_type in 1889 * the xfer to the appropriate byte for the tag 1890 * message. 1891 */ 1892 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 1893 (xs->xs_control & XS_CTL_REQSENSE)) { 1894 xs->xs_control &= ~XS_CTL_TAGMASK; 1895 xs->xs_tag_type = 0; 1896 } else { 1897 /* 1898 * If the request doesn't specify a tag, give Head 1899 * tags to URGENT operations and Ordered tags to 1900 * everything else. 1901 */ 1902 if (XS_CTL_TAGTYPE(xs) == 0) { 1903 if (xs->xs_control & XS_CTL_URGENT) 1904 xs->xs_control |= XS_CTL_HEAD_TAG; 1905 else 1906 xs->xs_control |= XS_CTL_ORDERED_TAG; 1907 } 1908 1909 switch (XS_CTL_TAGTYPE(xs)) { 1910 case XS_CTL_ORDERED_TAG: 1911 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 1912 break; 1913 1914 case XS_CTL_SIMPLE_TAG: 1915 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 1916 break; 1917 1918 case XS_CTL_HEAD_TAG: 1919 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 1920 break; 1921 1922 default: 1923 scsipi_printaddr(periph); 1924 printf("invalid tag mask 0x%08x\n", 1925 XS_CTL_TAGTYPE(xs)); 1926 panic("scsipi_execute_xs"); 1927 } 1928 } 1929 1930 /* If the adaptor wants us to poll, poll. */ 1931 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 1932 xs->xs_control |= XS_CTL_POLL; 1933 1934 /* 1935 * If we don't yet have a completion thread, or we are to poll for 1936 * completion, clear the ASYNC flag. 1937 */ 1938 oasync = (xs->xs_control & XS_CTL_ASYNC); 1939 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 1940 xs->xs_control &= ~XS_CTL_ASYNC; 1941 1942 async = (xs->xs_control & XS_CTL_ASYNC); 1943 poll = (xs->xs_control & XS_CTL_POLL); 1944 1945 #ifdef DIAGNOSTIC 1946 if (oasync != 0 && xs->bp == NULL) 1947 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 1948 #endif 1949 1950 /* 1951 * Enqueue the transfer. If we're not polling for completion, this 1952 * should ALWAYS return `no error'. 1953 */ 1954 error = scsipi_enqueue(xs); 1955 if (error) { 1956 if (poll == 0) { 1957 scsipi_printaddr(periph); 1958 printf("not polling, but enqueue failed with %d\n", 1959 error); 1960 panic("scsipi_execute_xs"); 1961 } 1962 1963 scsipi_printaddr(periph); 1964 printf("should have flushed queue?\n"); 1965 goto free_xs; 1966 } 1967 1968 restarted: 1969 scsipi_run_queue(chan); 1970 1971 /* 1972 * The xfer is enqueued, and possibly running. If it's to be 1973 * completed asynchronously, just return now. 1974 */ 1975 if (async) 1976 return (0); 1977 1978 /* 1979 * Not an asynchronous command; wait for it to complete. 1980 */ 1981 s = splbio(); 1982 while ((xs->xs_status & XS_STS_DONE) == 0) { 1983 if (poll) { 1984 scsipi_printaddr(periph); 1985 printf("polling command not done\n"); 1986 panic("scsipi_execute_xs"); 1987 } 1988 (void) tsleep(xs, PRIBIO, "xscmd", 0); 1989 } 1990 splx(s); 1991 1992 /* 1993 * Command is complete. scsipi_done() has awakened us to perform 1994 * the error handling. 1995 */ 1996 error = scsipi_complete(xs); 1997 if (error == ERESTART) 1998 goto restarted; 1999 2000 /* 2001 * If it was meant to run async and we cleared aync ourselve, 2002 * don't return an error here. It has already been handled 2003 */ 2004 if (oasync) 2005 error = 0; 2006 /* 2007 * Command completed successfully or fatal error occurred. Fall 2008 * into.... 2009 */ 2010 free_xs: 2011 s = splbio(); 2012 scsipi_put_xs(xs); 2013 splx(s); 2014 2015 /* 2016 * Kick the queue, keep it running in case it stopped for some 2017 * reason. 2018 */ 2019 scsipi_run_queue(chan); 2020 2021 return (error); 2022 } 2023 2024 /* 2025 * scsipi_completion_thread: 2026 * 2027 * This is the completion thread. We wait for errors on 2028 * asynchronous xfers, and perform the error handling 2029 * function, restarting the command, if necessary. 2030 */ 2031 static void 2032 scsipi_completion_thread(void *arg) 2033 { 2034 struct scsipi_channel *chan = arg; 2035 struct scsipi_xfer *xs; 2036 int s; 2037 2038 if (chan->chan_init_cb) 2039 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg); 2040 2041 s = splbio(); 2042 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 2043 splx(s); 2044 for (;;) { 2045 s = splbio(); 2046 xs = TAILQ_FIRST(&chan->chan_complete); 2047 if (xs == NULL && chan->chan_tflags == 0) { 2048 /* nothing to do; wait */ 2049 (void) tsleep(&chan->chan_complete, PRIBIO, 2050 "sccomp", 0); 2051 splx(s); 2052 continue; 2053 } 2054 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2055 /* call chan_callback from thread context */ 2056 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 2057 chan->chan_callback(chan, chan->chan_callback_arg); 2058 splx(s); 2059 continue; 2060 } 2061 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 2062 /* attempt to get more openings for this channel */ 2063 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2064 scsipi_adapter_request(chan, 2065 ADAPTER_REQ_GROW_RESOURCES, NULL); 2066 scsipi_channel_thaw(chan, 1); 2067 splx(s); 2068 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) 2069 kpause("scsizzz", FALSE, hz/10, NULL); 2070 continue; 2071 } 2072 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2073 /* explicitly run the queues for this channel */ 2074 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2075 scsipi_run_queue(chan); 2076 splx(s); 2077 continue; 2078 } 2079 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2080 splx(s); 2081 break; 2082 } 2083 if (xs) { 2084 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2085 splx(s); 2086 2087 /* 2088 * Have an xfer with an error; process it. 2089 */ 2090 (void) scsipi_complete(xs); 2091 2092 /* 2093 * Kick the queue; keep it running if it was stopped 2094 * for some reason. 2095 */ 2096 scsipi_run_queue(chan); 2097 } else { 2098 splx(s); 2099 } 2100 } 2101 2102 chan->chan_thread = NULL; 2103 2104 /* In case parent is waiting for us to exit. */ 2105 wakeup(&chan->chan_thread); 2106 2107 kthread_exit(0); 2108 } 2109 /* 2110 * scsipi_thread_call_callback: 2111 * 2112 * request to call a callback from the completion thread 2113 */ 2114 int 2115 scsipi_thread_call_callback(struct scsipi_channel *chan, 2116 void (*callback)(struct scsipi_channel *, void *), void *arg) 2117 { 2118 int s; 2119 2120 s = splbio(); 2121 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2122 /* kernel thread doesn't exist yet */ 2123 splx(s); 2124 return ESRCH; 2125 } 2126 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2127 splx(s); 2128 return EBUSY; 2129 } 2130 scsipi_channel_freeze(chan, 1); 2131 chan->chan_callback = callback; 2132 chan->chan_callback_arg = arg; 2133 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2134 wakeup(&chan->chan_complete); 2135 splx(s); 2136 return(0); 2137 } 2138 2139 /* 2140 * scsipi_async_event: 2141 * 2142 * Handle an asynchronous event from an adapter. 2143 */ 2144 void 2145 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event, 2146 void *arg) 2147 { 2148 int s; 2149 2150 s = splbio(); 2151 switch (event) { 2152 case ASYNC_EVENT_MAX_OPENINGS: 2153 scsipi_async_event_max_openings(chan, 2154 (struct scsipi_max_openings *)arg); 2155 break; 2156 2157 case ASYNC_EVENT_XFER_MODE: 2158 scsipi_async_event_xfer_mode(chan, 2159 (struct scsipi_xfer_mode *)arg); 2160 break; 2161 case ASYNC_EVENT_RESET: 2162 scsipi_async_event_channel_reset(chan); 2163 break; 2164 } 2165 splx(s); 2166 } 2167 2168 /* 2169 * scsipi_print_xfer_mode: 2170 * 2171 * Print a periph's capabilities. 2172 */ 2173 void 2174 scsipi_print_xfer_mode(struct scsipi_periph *periph) 2175 { 2176 int period, freq, speed, mbs; 2177 2178 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0) 2179 return; 2180 2181 aprint_normal_dev(periph->periph_dev, ""); 2182 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2183 period = scsipi_sync_factor_to_period(periph->periph_period); 2184 aprint_normal("sync (%d.%02dns offset %d)", 2185 period / 100, period % 100, periph->periph_offset); 2186 } else 2187 aprint_normal("async"); 2188 2189 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2190 aprint_normal(", 32-bit"); 2191 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2192 aprint_normal(", 16-bit"); 2193 else 2194 aprint_normal(", 8-bit"); 2195 2196 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2197 freq = scsipi_sync_factor_to_freq(periph->periph_period); 2198 speed = freq; 2199 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2200 speed *= 4; 2201 else if (periph->periph_mode & 2202 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2203 speed *= 2; 2204 mbs = speed / 1000; 2205 if (mbs > 0) 2206 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000); 2207 else 2208 aprint_normal(" (%dKB/s)", speed % 1000); 2209 } 2210 2211 aprint_normal(" transfers"); 2212 2213 if (periph->periph_mode & PERIPH_CAP_TQING) 2214 aprint_normal(", tagged queueing"); 2215 2216 aprint_normal("\n"); 2217 } 2218 2219 /* 2220 * scsipi_async_event_max_openings: 2221 * 2222 * Update the maximum number of outstanding commands a 2223 * device may have. 2224 */ 2225 static void 2226 scsipi_async_event_max_openings(struct scsipi_channel *chan, 2227 struct scsipi_max_openings *mo) 2228 { 2229 struct scsipi_periph *periph; 2230 int minlun, maxlun; 2231 2232 if (mo->mo_lun == -1) { 2233 /* 2234 * Wildcarded; apply it to all LUNs. 2235 */ 2236 minlun = 0; 2237 maxlun = chan->chan_nluns - 1; 2238 } else 2239 minlun = maxlun = mo->mo_lun; 2240 2241 /* XXX This could really suck with a large LUN space. */ 2242 for (; minlun <= maxlun; minlun++) { 2243 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun); 2244 if (periph == NULL) 2245 continue; 2246 2247 if (mo->mo_openings < periph->periph_openings) 2248 periph->periph_openings = mo->mo_openings; 2249 else if (mo->mo_openings > periph->periph_openings && 2250 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2251 periph->periph_openings = mo->mo_openings; 2252 } 2253 } 2254 2255 /* 2256 * scsipi_async_event_xfer_mode: 2257 * 2258 * Update the xfer mode for all periphs sharing the 2259 * specified I_T Nexus. 2260 */ 2261 static void 2262 scsipi_async_event_xfer_mode(struct scsipi_channel *chan, 2263 struct scsipi_xfer_mode *xm) 2264 { 2265 struct scsipi_periph *periph; 2266 int lun, announce, mode, period, offset; 2267 2268 for (lun = 0; lun < chan->chan_nluns; lun++) { 2269 periph = scsipi_lookup_periph(chan, xm->xm_target, lun); 2270 if (periph == NULL) 2271 continue; 2272 announce = 0; 2273 2274 /* 2275 * Clamp the xfer mode down to this periph's capabilities. 2276 */ 2277 mode = xm->xm_mode & periph->periph_cap; 2278 if (mode & PERIPH_CAP_SYNC) { 2279 period = xm->xm_period; 2280 offset = xm->xm_offset; 2281 } else { 2282 period = 0; 2283 offset = 0; 2284 } 2285 2286 /* 2287 * If we do not have a valid xfer mode yet, or the parameters 2288 * are different, announce them. 2289 */ 2290 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 || 2291 periph->periph_mode != mode || 2292 periph->periph_period != period || 2293 periph->periph_offset != offset) 2294 announce = 1; 2295 2296 periph->periph_mode = mode; 2297 periph->periph_period = period; 2298 periph->periph_offset = offset; 2299 periph->periph_flags |= PERIPH_MODE_VALID; 2300 2301 if (announce) 2302 scsipi_print_xfer_mode(periph); 2303 } 2304 } 2305 2306 /* 2307 * scsipi_set_xfer_mode: 2308 * 2309 * Set the xfer mode for the specified I_T Nexus. 2310 */ 2311 void 2312 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed) 2313 { 2314 struct scsipi_xfer_mode xm; 2315 struct scsipi_periph *itperiph; 2316 int lun, s; 2317 2318 /* 2319 * Go to the minimal xfer mode. 2320 */ 2321 xm.xm_target = target; 2322 xm.xm_mode = 0; 2323 xm.xm_period = 0; /* ignored */ 2324 xm.xm_offset = 0; /* ignored */ 2325 2326 /* 2327 * Find the first LUN we know about on this I_T Nexus. 2328 */ 2329 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) { 2330 itperiph = scsipi_lookup_periph(chan, target, lun); 2331 if (itperiph != NULL) 2332 break; 2333 } 2334 if (itperiph != NULL) { 2335 xm.xm_mode = itperiph->periph_cap; 2336 /* 2337 * Now issue the request to the adapter. 2338 */ 2339 s = splbio(); 2340 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2341 splx(s); 2342 /* 2343 * If we want this to happen immediately, issue a dummy 2344 * command, since most adapters can't really negotiate unless 2345 * they're executing a job. 2346 */ 2347 if (immed != 0) { 2348 (void) scsipi_test_unit_ready(itperiph, 2349 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2350 XS_CTL_IGNORE_NOT_READY | 2351 XS_CTL_IGNORE_MEDIA_CHANGE); 2352 } 2353 } 2354 } 2355 2356 /* 2357 * scsipi_channel_reset: 2358 * 2359 * handle scsi bus reset 2360 * called at splbio 2361 */ 2362 static void 2363 scsipi_async_event_channel_reset(struct scsipi_channel *chan) 2364 { 2365 struct scsipi_xfer *xs, *xs_next; 2366 struct scsipi_periph *periph; 2367 int target, lun; 2368 2369 /* 2370 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2371 * commands; as the sense is not available any more. 2372 * can't call scsipi_done() from here, as the command has not been 2373 * sent to the adapter yet (this would corrupt accounting). 2374 */ 2375 2376 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2377 xs_next = TAILQ_NEXT(xs, channel_q); 2378 if (xs->xs_control & XS_CTL_REQSENSE) { 2379 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2380 xs->error = XS_RESET; 2381 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2382 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2383 channel_q); 2384 } 2385 } 2386 wakeup(&chan->chan_complete); 2387 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2388 for (target = 0; target < chan->chan_ntargets; target++) { 2389 if (target == chan->chan_id) 2390 continue; 2391 for (lun = 0; lun < chan->chan_nluns; lun++) { 2392 periph = scsipi_lookup_periph(chan, target, lun); 2393 if (periph) { 2394 xs = periph->periph_xscheck; 2395 if (xs) 2396 xs->error = XS_RESET; 2397 } 2398 } 2399 } 2400 } 2401 2402 /* 2403 * scsipi_target_detach: 2404 * 2405 * detach all periph associated with a I_T 2406 * must be called from valid thread context 2407 */ 2408 int 2409 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun, 2410 int flags) 2411 { 2412 struct scsipi_periph *periph; 2413 int ctarget, mintarget, maxtarget; 2414 int clun, minlun, maxlun; 2415 int error; 2416 2417 if (target == -1) { 2418 mintarget = 0; 2419 maxtarget = chan->chan_ntargets; 2420 } else { 2421 if (target == chan->chan_id) 2422 return EINVAL; 2423 if (target < 0 || target >= chan->chan_ntargets) 2424 return EINVAL; 2425 mintarget = target; 2426 maxtarget = target + 1; 2427 } 2428 2429 if (lun == -1) { 2430 minlun = 0; 2431 maxlun = chan->chan_nluns; 2432 } else { 2433 if (lun < 0 || lun >= chan->chan_nluns) 2434 return EINVAL; 2435 minlun = lun; 2436 maxlun = lun + 1; 2437 } 2438 2439 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2440 if (ctarget == chan->chan_id) 2441 continue; 2442 2443 for (clun = minlun; clun < maxlun; clun++) { 2444 periph = scsipi_lookup_periph(chan, ctarget, clun); 2445 if (periph == NULL) 2446 continue; 2447 error = config_detach(periph->periph_dev, flags); 2448 if (error) 2449 return (error); 2450 } 2451 } 2452 return(0); 2453 } 2454 2455 /* 2456 * scsipi_adapter_addref: 2457 * 2458 * Add a reference to the adapter pointed to by the provided 2459 * link, enabling the adapter if necessary. 2460 */ 2461 int 2462 scsipi_adapter_addref(struct scsipi_adapter *adapt) 2463 { 2464 int s, error = 0; 2465 2466 s = splbio(); 2467 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) { 2468 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1); 2469 if (error) 2470 adapt->adapt_refcnt--; 2471 } 2472 splx(s); 2473 return (error); 2474 } 2475 2476 /* 2477 * scsipi_adapter_delref: 2478 * 2479 * Delete a reference to the adapter pointed to by the provided 2480 * link, disabling the adapter if possible. 2481 */ 2482 void 2483 scsipi_adapter_delref(struct scsipi_adapter *adapt) 2484 { 2485 int s; 2486 2487 s = splbio(); 2488 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL) 2489 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0); 2490 splx(s); 2491 } 2492 2493 static struct scsipi_syncparam { 2494 int ss_factor; 2495 int ss_period; /* ns * 100 */ 2496 } scsipi_syncparams[] = { 2497 { 0x08, 625 }, /* FAST-160 (Ultra320) */ 2498 { 0x09, 1250 }, /* FAST-80 (Ultra160) */ 2499 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */ 2500 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */ 2501 { 0x0c, 5000 }, /* FAST-20 (Ultra) */ 2502 }; 2503 static const int scsipi_nsyncparams = 2504 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2505 2506 int 2507 scsipi_sync_period_to_factor(int period /* ns * 100 */) 2508 { 2509 int i; 2510 2511 for (i = 0; i < scsipi_nsyncparams; i++) { 2512 if (period <= scsipi_syncparams[i].ss_period) 2513 return (scsipi_syncparams[i].ss_factor); 2514 } 2515 2516 return ((period / 100) / 4); 2517 } 2518 2519 int 2520 scsipi_sync_factor_to_period(int factor) 2521 { 2522 int i; 2523 2524 for (i = 0; i < scsipi_nsyncparams; i++) { 2525 if (factor == scsipi_syncparams[i].ss_factor) 2526 return (scsipi_syncparams[i].ss_period); 2527 } 2528 2529 return ((factor * 4) * 100); 2530 } 2531 2532 int 2533 scsipi_sync_factor_to_freq(int factor) 2534 { 2535 int i; 2536 2537 for (i = 0; i < scsipi_nsyncparams; i++) { 2538 if (factor == scsipi_syncparams[i].ss_factor) 2539 return (100000000 / scsipi_syncparams[i].ss_period); 2540 } 2541 2542 return (10000000 / ((factor * 4) * 10)); 2543 } 2544 2545 #ifdef SCSIPI_DEBUG 2546 /* 2547 * Given a scsipi_xfer, dump the request, in all it's glory 2548 */ 2549 void 2550 show_scsipi_xs(struct scsipi_xfer *xs) 2551 { 2552 2553 printf("xs(%p): ", xs); 2554 printf("xs_control(0x%08x)", xs->xs_control); 2555 printf("xs_status(0x%08x)", xs->xs_status); 2556 printf("periph(%p)", xs->xs_periph); 2557 printf("retr(0x%x)", xs->xs_retries); 2558 printf("timo(0x%x)", xs->timeout); 2559 printf("cmd(%p)", xs->cmd); 2560 printf("len(0x%x)", xs->cmdlen); 2561 printf("data(%p)", xs->data); 2562 printf("len(0x%x)", xs->datalen); 2563 printf("res(0x%x)", xs->resid); 2564 printf("err(0x%x)", xs->error); 2565 printf("bp(%p)", xs->bp); 2566 show_scsipi_cmd(xs); 2567 } 2568 2569 void 2570 show_scsipi_cmd(struct scsipi_xfer *xs) 2571 { 2572 u_char *b = (u_char *) xs->cmd; 2573 int i = 0; 2574 2575 scsipi_printaddr(xs->xs_periph); 2576 printf(" command: "); 2577 2578 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2579 while (i < xs->cmdlen) { 2580 if (i) 2581 printf(","); 2582 printf("0x%x", b[i++]); 2583 } 2584 printf("-[%d bytes]\n", xs->datalen); 2585 if (xs->datalen) 2586 show_mem(xs->data, min(64, xs->datalen)); 2587 } else 2588 printf("-RESET-\n"); 2589 } 2590 2591 void 2592 show_mem(u_char *address, int num) 2593 { 2594 int x; 2595 2596 printf("------------------------------"); 2597 for (x = 0; x < num; x++) { 2598 if ((x % 16) == 0) 2599 printf("\n%03d: ", x); 2600 printf("%02x ", *address++); 2601 } 2602 printf("\n------------------------------\n"); 2603 } 2604 #endif /* SCSIPI_DEBUG */ 2605