1 /* $NetBSD: scsipi_base.c,v 1.159 2012/04/20 20:23:21 bouyer Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.159 2012/04/20 20:23:21 bouyer Exp $"); 35 36 #include "opt_scsi.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/buf.h> 42 #include <sys/uio.h> 43 #include <sys/malloc.h> 44 #include <sys/pool.h> 45 #include <sys/errno.h> 46 #include <sys/device.h> 47 #include <sys/proc.h> 48 #include <sys/kthread.h> 49 #include <sys/hash.h> 50 51 #include <dev/scsipi/scsi_spc.h> 52 #include <dev/scsipi/scsipi_all.h> 53 #include <dev/scsipi/scsipi_disk.h> 54 #include <dev/scsipi/scsipiconf.h> 55 #include <dev/scsipi/scsipi_base.h> 56 57 #include <dev/scsipi/scsi_all.h> 58 #include <dev/scsipi/scsi_message.h> 59 60 #include <machine/param.h> 61 62 static int scsipi_complete(struct scsipi_xfer *); 63 static void scsipi_request_sense(struct scsipi_xfer *); 64 static int scsipi_enqueue(struct scsipi_xfer *); 65 static void scsipi_run_queue(struct scsipi_channel *chan); 66 67 static void scsipi_completion_thread(void *); 68 69 static void scsipi_get_tag(struct scsipi_xfer *); 70 static void scsipi_put_tag(struct scsipi_xfer *); 71 72 static int scsipi_get_resource(struct scsipi_channel *); 73 static void scsipi_put_resource(struct scsipi_channel *); 74 75 static void scsipi_async_event_max_openings(struct scsipi_channel *, 76 struct scsipi_max_openings *); 77 static void scsipi_async_event_channel_reset(struct scsipi_channel *); 78 79 static struct pool scsipi_xfer_pool; 80 81 /* 82 * scsipi_init: 83 * 84 * Called when a scsibus or atapibus is attached to the system 85 * to initialize shared data structures. 86 */ 87 void 88 scsipi_init(void) 89 { 90 static int scsipi_init_done; 91 92 if (scsipi_init_done) 93 return; 94 scsipi_init_done = 1; 95 96 /* Initialize the scsipi_xfer pool. */ 97 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 98 0, 0, "scxspl", NULL, IPL_BIO); 99 if (pool_prime(&scsipi_xfer_pool, 100 PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) { 101 printf("WARNING: not enough memory for scsipi_xfer_pool\n"); 102 } 103 } 104 105 /* 106 * scsipi_channel_init: 107 * 108 * Initialize a scsipi_channel when it is attached. 109 */ 110 int 111 scsipi_channel_init(struct scsipi_channel *chan) 112 { 113 struct scsipi_adapter *adapt = chan->chan_adapter; 114 int i; 115 116 /* Initialize shared data. */ 117 scsipi_init(); 118 119 /* Initialize the queues. */ 120 TAILQ_INIT(&chan->chan_queue); 121 TAILQ_INIT(&chan->chan_complete); 122 123 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++) 124 LIST_INIT(&chan->chan_periphtab[i]); 125 126 /* 127 * Create the asynchronous completion thread. 128 */ 129 if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan, 130 &chan->chan_thread, "%s", chan->chan_name)) { 131 aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for " 132 "channel %d\n", chan->chan_channel); 133 panic("scsipi_channel_init"); 134 } 135 136 return (0); 137 } 138 139 /* 140 * scsipi_channel_shutdown: 141 * 142 * Shutdown a scsipi_channel. 143 */ 144 void 145 scsipi_channel_shutdown(struct scsipi_channel *chan) 146 { 147 148 /* 149 * Shut down the completion thread. 150 */ 151 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 152 wakeup(&chan->chan_complete); 153 154 /* 155 * Now wait for the thread to exit. 156 */ 157 while (chan->chan_thread != NULL) 158 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0); 159 } 160 161 static uint32_t 162 scsipi_chan_periph_hash(uint64_t t, uint64_t l) 163 { 164 uint32_t hash; 165 166 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT); 167 hash = hash32_buf(&l, sizeof(l), hash); 168 169 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK); 170 } 171 172 /* 173 * scsipi_insert_periph: 174 * 175 * Insert a periph into the channel. 176 */ 177 void 178 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph) 179 { 180 uint32_t hash; 181 int s; 182 183 hash = scsipi_chan_periph_hash(periph->periph_target, 184 periph->periph_lun); 185 186 s = splbio(); 187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash); 188 splx(s); 189 } 190 191 /* 192 * scsipi_remove_periph: 193 * 194 * Remove a periph from the channel. 195 */ 196 void 197 scsipi_remove_periph(struct scsipi_channel *chan, 198 struct scsipi_periph *periph) 199 { 200 int s; 201 202 s = splbio(); 203 LIST_REMOVE(periph, periph_hash); 204 splx(s); 205 } 206 207 /* 208 * scsipi_lookup_periph: 209 * 210 * Lookup a periph on the specified channel. 211 */ 212 struct scsipi_periph * 213 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun) 214 { 215 struct scsipi_periph *periph; 216 uint32_t hash; 217 int s; 218 219 KASSERT(cold || KERNEL_LOCKED_P()); 220 221 if (target >= chan->chan_ntargets || 222 lun >= chan->chan_nluns) 223 return (NULL); 224 225 hash = scsipi_chan_periph_hash(target, lun); 226 227 s = splbio(); 228 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) { 229 if (periph->periph_target == target && 230 periph->periph_lun == lun) 231 break; 232 } 233 splx(s); 234 235 return (periph); 236 } 237 238 /* 239 * scsipi_get_resource: 240 * 241 * Allocate a single xfer `resource' from the channel. 242 * 243 * NOTE: Must be called at splbio(). 244 */ 245 static int 246 scsipi_get_resource(struct scsipi_channel *chan) 247 { 248 struct scsipi_adapter *adapt = chan->chan_adapter; 249 250 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 251 if (chan->chan_openings > 0) { 252 chan->chan_openings--; 253 return (1); 254 } 255 return (0); 256 } 257 258 if (adapt->adapt_openings > 0) { 259 adapt->adapt_openings--; 260 return (1); 261 } 262 return (0); 263 } 264 265 /* 266 * scsipi_grow_resources: 267 * 268 * Attempt to grow resources for a channel. If this succeeds, 269 * we allocate one for our caller. 270 * 271 * NOTE: Must be called at splbio(). 272 */ 273 static inline int 274 scsipi_grow_resources(struct scsipi_channel *chan) 275 { 276 277 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 278 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 279 scsipi_adapter_request(chan, 280 ADAPTER_REQ_GROW_RESOURCES, NULL); 281 return (scsipi_get_resource(chan)); 282 } 283 /* 284 * ask the channel thread to do it. It'll have to thaw the 285 * queue 286 */ 287 scsipi_channel_freeze(chan, 1); 288 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 289 wakeup(&chan->chan_complete); 290 return (0); 291 } 292 293 return (0); 294 } 295 296 /* 297 * scsipi_put_resource: 298 * 299 * Free a single xfer `resource' to the channel. 300 * 301 * NOTE: Must be called at splbio(). 302 */ 303 static void 304 scsipi_put_resource(struct scsipi_channel *chan) 305 { 306 struct scsipi_adapter *adapt = chan->chan_adapter; 307 308 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 309 chan->chan_openings++; 310 else 311 adapt->adapt_openings++; 312 } 313 314 /* 315 * scsipi_get_tag: 316 * 317 * Get a tag ID for the specified xfer. 318 * 319 * NOTE: Must be called at splbio(). 320 */ 321 static void 322 scsipi_get_tag(struct scsipi_xfer *xs) 323 { 324 struct scsipi_periph *periph = xs->xs_periph; 325 int bit, tag; 326 u_int word; 327 328 bit = 0; /* XXX gcc */ 329 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 330 bit = ffs(periph->periph_freetags[word]); 331 if (bit != 0) 332 break; 333 } 334 #ifdef DIAGNOSTIC 335 if (word == PERIPH_NTAGWORDS) { 336 scsipi_printaddr(periph); 337 printf("no free tags\n"); 338 panic("scsipi_get_tag"); 339 } 340 #endif 341 342 bit -= 1; 343 periph->periph_freetags[word] &= ~(1 << bit); 344 tag = (word << 5) | bit; 345 346 /* XXX Should eventually disallow this completely. */ 347 if (tag >= periph->periph_openings) { 348 scsipi_printaddr(periph); 349 printf("WARNING: tag %d greater than available openings %d\n", 350 tag, periph->periph_openings); 351 } 352 353 xs->xs_tag_id = tag; 354 } 355 356 /* 357 * scsipi_put_tag: 358 * 359 * Put the tag ID for the specified xfer back into the pool. 360 * 361 * NOTE: Must be called at splbio(). 362 */ 363 static void 364 scsipi_put_tag(struct scsipi_xfer *xs) 365 { 366 struct scsipi_periph *periph = xs->xs_periph; 367 int word, bit; 368 369 word = xs->xs_tag_id >> 5; 370 bit = xs->xs_tag_id & 0x1f; 371 372 periph->periph_freetags[word] |= (1 << bit); 373 } 374 375 /* 376 * scsipi_get_xs: 377 * 378 * Allocate an xfer descriptor and associate it with the 379 * specified peripherial. If the peripherial has no more 380 * available command openings, we either block waiting for 381 * one to become available, or fail. 382 */ 383 struct scsipi_xfer * 384 scsipi_get_xs(struct scsipi_periph *periph, int flags) 385 { 386 struct scsipi_xfer *xs; 387 int s; 388 389 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 390 391 KASSERT(!cold); 392 393 #ifdef DIAGNOSTIC 394 /* 395 * URGENT commands can never be ASYNC. 396 */ 397 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 398 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 399 scsipi_printaddr(periph); 400 printf("URGENT and ASYNC\n"); 401 panic("scsipi_get_xs"); 402 } 403 #endif 404 405 s = splbio(); 406 /* 407 * Wait for a command opening to become available. Rules: 408 * 409 * - All xfers must wait for an available opening. 410 * Exception: URGENT xfers can proceed when 411 * active == openings, because we use the opening 412 * of the command we're recovering for. 413 * - if the periph has sense pending, only URGENT & REQSENSE 414 * xfers may proceed. 415 * 416 * - If the periph is recovering, only URGENT xfers may 417 * proceed. 418 * 419 * - If the periph is currently executing a recovery 420 * command, URGENT commands must block, because only 421 * one recovery command can execute at a time. 422 */ 423 for (;;) { 424 if (flags & XS_CTL_URGENT) { 425 if (periph->periph_active > periph->periph_openings) 426 goto wait_for_opening; 427 if (periph->periph_flags & PERIPH_SENSE) { 428 if ((flags & XS_CTL_REQSENSE) == 0) 429 goto wait_for_opening; 430 } else { 431 if ((periph->periph_flags & 432 PERIPH_RECOVERY_ACTIVE) != 0) 433 goto wait_for_opening; 434 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 435 } 436 break; 437 } 438 if (periph->periph_active >= periph->periph_openings || 439 (periph->periph_flags & PERIPH_RECOVERING) != 0) 440 goto wait_for_opening; 441 periph->periph_active++; 442 break; 443 444 wait_for_opening: 445 if (flags & XS_CTL_NOSLEEP) { 446 splx(s); 447 return (NULL); 448 } 449 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 450 periph->periph_flags |= PERIPH_WAITING; 451 (void) tsleep(periph, PRIBIO, "getxs", 0); 452 } 453 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 454 xs = pool_get(&scsipi_xfer_pool, 455 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 456 if (xs == NULL) { 457 if (flags & XS_CTL_URGENT) { 458 if ((flags & XS_CTL_REQSENSE) == 0) 459 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 460 } else 461 periph->periph_active--; 462 scsipi_printaddr(periph); 463 printf("unable to allocate %sscsipi_xfer\n", 464 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 465 } 466 splx(s); 467 468 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 469 470 if (xs != NULL) { 471 memset(xs, 0, sizeof(*xs)); 472 callout_init(&xs->xs_callout, 0); 473 xs->xs_periph = periph; 474 xs->xs_control = flags; 475 xs->xs_status = 0; 476 s = splbio(); 477 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 478 splx(s); 479 } 480 return (xs); 481 } 482 483 /* 484 * scsipi_put_xs: 485 * 486 * Release an xfer descriptor, decreasing the outstanding command 487 * count for the peripherial. If there is a thread waiting for 488 * an opening, wake it up. If not, kick any queued I/O the 489 * peripherial may have. 490 * 491 * NOTE: Must be called at splbio(). 492 */ 493 void 494 scsipi_put_xs(struct scsipi_xfer *xs) 495 { 496 struct scsipi_periph *periph = xs->xs_periph; 497 int flags = xs->xs_control; 498 499 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 500 501 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 502 callout_destroy(&xs->xs_callout); 503 pool_put(&scsipi_xfer_pool, xs); 504 505 #ifdef DIAGNOSTIC 506 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 507 periph->periph_active == 0) { 508 scsipi_printaddr(periph); 509 printf("recovery without a command to recovery for\n"); 510 panic("scsipi_put_xs"); 511 } 512 #endif 513 514 if (flags & XS_CTL_URGENT) { 515 if ((flags & XS_CTL_REQSENSE) == 0) 516 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 517 } else 518 periph->periph_active--; 519 if (periph->periph_active == 0 && 520 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 521 periph->periph_flags &= ~PERIPH_WAITDRAIN; 522 wakeup(&periph->periph_active); 523 } 524 525 if (periph->periph_flags & PERIPH_WAITING) { 526 periph->periph_flags &= ~PERIPH_WAITING; 527 wakeup(periph); 528 } else { 529 if (periph->periph_switch->psw_start != NULL && 530 device_is_active(periph->periph_dev)) { 531 SC_DEBUG(periph, SCSIPI_DB2, 532 ("calling private start()\n")); 533 (*periph->periph_switch->psw_start)(periph); 534 } 535 } 536 } 537 538 /* 539 * scsipi_channel_freeze: 540 * 541 * Freeze a channel's xfer queue. 542 */ 543 void 544 scsipi_channel_freeze(struct scsipi_channel *chan, int count) 545 { 546 int s; 547 548 s = splbio(); 549 chan->chan_qfreeze += count; 550 splx(s); 551 } 552 553 /* 554 * scsipi_channel_thaw: 555 * 556 * Thaw a channel's xfer queue. 557 */ 558 void 559 scsipi_channel_thaw(struct scsipi_channel *chan, int count) 560 { 561 int s; 562 563 s = splbio(); 564 chan->chan_qfreeze -= count; 565 /* 566 * Don't let the freeze count go negative. 567 * 568 * Presumably the adapter driver could keep track of this, 569 * but it might just be easier to do this here so as to allow 570 * multiple callers, including those outside the adapter driver. 571 */ 572 if (chan->chan_qfreeze < 0) { 573 chan->chan_qfreeze = 0; 574 } 575 splx(s); 576 /* 577 * Kick the channel's queue here. Note, we may be running in 578 * interrupt context (softclock or HBA's interrupt), so the adapter 579 * driver had better not sleep. 580 */ 581 if (chan->chan_qfreeze == 0) 582 scsipi_run_queue(chan); 583 } 584 585 /* 586 * scsipi_channel_timed_thaw: 587 * 588 * Thaw a channel after some time has expired. This will also 589 * run the channel's queue if the freeze count has reached 0. 590 */ 591 void 592 scsipi_channel_timed_thaw(void *arg) 593 { 594 struct scsipi_channel *chan = arg; 595 596 scsipi_channel_thaw(chan, 1); 597 } 598 599 /* 600 * scsipi_periph_freeze: 601 * 602 * Freeze a device's xfer queue. 603 */ 604 void 605 scsipi_periph_freeze(struct scsipi_periph *periph, int count) 606 { 607 int s; 608 609 s = splbio(); 610 periph->periph_qfreeze += count; 611 splx(s); 612 } 613 614 /* 615 * scsipi_periph_thaw: 616 * 617 * Thaw a device's xfer queue. 618 */ 619 void 620 scsipi_periph_thaw(struct scsipi_periph *periph, int count) 621 { 622 int s; 623 624 s = splbio(); 625 periph->periph_qfreeze -= count; 626 #ifdef DIAGNOSTIC 627 if (periph->periph_qfreeze < 0) { 628 static const char pc[] = "periph freeze count < 0"; 629 scsipi_printaddr(periph); 630 printf("%s\n", pc); 631 panic(pc); 632 } 633 #endif 634 if (periph->periph_qfreeze == 0 && 635 (periph->periph_flags & PERIPH_WAITING) != 0) 636 wakeup(periph); 637 splx(s); 638 } 639 640 /* 641 * scsipi_periph_timed_thaw: 642 * 643 * Thaw a device after some time has expired. 644 */ 645 void 646 scsipi_periph_timed_thaw(void *arg) 647 { 648 int s; 649 struct scsipi_periph *periph = arg; 650 651 callout_stop(&periph->periph_callout); 652 653 s = splbio(); 654 scsipi_periph_thaw(periph, 1); 655 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 656 /* 657 * Kick the channel's queue here. Note, we're running in 658 * interrupt context (softclock), so the adapter driver 659 * had better not sleep. 660 */ 661 scsipi_run_queue(periph->periph_channel); 662 } else { 663 /* 664 * Tell the completion thread to kick the channel's queue here. 665 */ 666 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 667 wakeup(&periph->periph_channel->chan_complete); 668 } 669 splx(s); 670 } 671 672 /* 673 * scsipi_wait_drain: 674 * 675 * Wait for a periph's pending xfers to drain. 676 */ 677 void 678 scsipi_wait_drain(struct scsipi_periph *periph) 679 { 680 int s; 681 682 s = splbio(); 683 while (periph->periph_active != 0) { 684 periph->periph_flags |= PERIPH_WAITDRAIN; 685 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0); 686 } 687 splx(s); 688 } 689 690 /* 691 * scsipi_kill_pending: 692 * 693 * Kill off all pending xfers for a periph. 694 * 695 * NOTE: Must be called at splbio(). 696 */ 697 void 698 scsipi_kill_pending(struct scsipi_periph *periph) 699 { 700 701 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph); 702 scsipi_wait_drain(periph); 703 } 704 705 /* 706 * scsipi_print_cdb: 707 * prints a command descriptor block (for debug purpose, error messages, 708 * SCSIVERBOSE, ...) 709 */ 710 void 711 scsipi_print_cdb(struct scsipi_generic *cmd) 712 { 713 int i, j; 714 715 printf("0x%02x", cmd->opcode); 716 717 switch (CDB_GROUPID(cmd->opcode)) { 718 case CDB_GROUPID_0: 719 j = CDB_GROUP0; 720 break; 721 case CDB_GROUPID_1: 722 j = CDB_GROUP1; 723 break; 724 case CDB_GROUPID_2: 725 j = CDB_GROUP2; 726 break; 727 case CDB_GROUPID_3: 728 j = CDB_GROUP3; 729 break; 730 case CDB_GROUPID_4: 731 j = CDB_GROUP4; 732 break; 733 case CDB_GROUPID_5: 734 j = CDB_GROUP5; 735 break; 736 case CDB_GROUPID_6: 737 j = CDB_GROUP6; 738 break; 739 case CDB_GROUPID_7: 740 j = CDB_GROUP7; 741 break; 742 default: 743 j = 0; 744 } 745 if (j == 0) 746 j = sizeof (cmd->bytes); 747 for (i = 0; i < j-1; i++) /* already done the opcode */ 748 printf(" %02x", cmd->bytes[i]); 749 } 750 751 /* 752 * scsipi_interpret_sense: 753 * 754 * Look at the returned sense and act on the error, determining 755 * the unix error number to pass back. (0 = report no error) 756 * 757 * NOTE: If we return ERESTART, we are expected to haved 758 * thawed the device! 759 * 760 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 761 */ 762 int 763 scsipi_interpret_sense(struct scsipi_xfer *xs) 764 { 765 struct scsi_sense_data *sense; 766 struct scsipi_periph *periph = xs->xs_periph; 767 u_int8_t key; 768 int error; 769 u_int32_t info; 770 static const char *error_mes[] = { 771 "soft error (corrected)", 772 "not ready", "medium error", 773 "non-media hardware failure", "illegal request", 774 "unit attention", "readonly device", 775 "no data found", "vendor unique", 776 "copy aborted", "command aborted", 777 "search returned equal", "volume overflow", 778 "verify miscompare", "unknown error key" 779 }; 780 781 sense = &xs->sense.scsi_sense; 782 #ifdef SCSIPI_DEBUG 783 if (periph->periph_flags & SCSIPI_DB1) { 784 int count; 785 scsipi_printaddr(periph); 786 printf(" sense debug information:\n"); 787 printf("\tcode 0x%x valid %d\n", 788 SSD_RCODE(sense->response_code), 789 sense->response_code & SSD_RCODE_VALID ? 1 : 0); 790 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 791 sense->segment, 792 SSD_SENSE_KEY(sense->flags), 793 sense->flags & SSD_ILI ? 1 : 0, 794 sense->flags & SSD_EOM ? 1 : 0, 795 sense->flags & SSD_FILEMARK ? 1 : 0); 796 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 797 "extra bytes\n", 798 sense->info[0], 799 sense->info[1], 800 sense->info[2], 801 sense->info[3], 802 sense->extra_len); 803 printf("\textra: "); 804 for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++) 805 printf("0x%x ", sense->csi[count]); 806 printf("\n"); 807 } 808 #endif 809 810 /* 811 * If the periph has it's own error handler, call it first. 812 * If it returns a legit error value, return that, otherwise 813 * it wants us to continue with normal error processing. 814 */ 815 if (periph->periph_switch->psw_error != NULL) { 816 SC_DEBUG(periph, SCSIPI_DB2, 817 ("calling private err_handler()\n")); 818 error = (*periph->periph_switch->psw_error)(xs); 819 if (error != EJUSTRETURN) 820 return (error); 821 } 822 /* otherwise use the default */ 823 switch (SSD_RCODE(sense->response_code)) { 824 825 /* 826 * Old SCSI-1 and SASI devices respond with 827 * codes other than 70. 828 */ 829 case 0x00: /* no error (command completed OK) */ 830 return (0); 831 case 0x04: /* drive not ready after it was selected */ 832 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 833 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 834 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 835 return (0); 836 /* XXX - display some sort of error here? */ 837 return (EIO); 838 case 0x20: /* invalid command */ 839 if ((xs->xs_control & 840 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 841 return (0); 842 return (EINVAL); 843 case 0x25: /* invalid LUN (Adaptec ACB-4000) */ 844 return (EACCES); 845 846 /* 847 * If it's code 70, use the extended stuff and 848 * interpret the key 849 */ 850 case 0x71: /* delayed error */ 851 scsipi_printaddr(periph); 852 key = SSD_SENSE_KEY(sense->flags); 853 printf(" DEFERRED ERROR, key = 0x%x\n", key); 854 /* FALLTHROUGH */ 855 case 0x70: 856 if ((sense->response_code & SSD_RCODE_VALID) != 0) 857 info = _4btol(sense->info); 858 else 859 info = 0; 860 key = SSD_SENSE_KEY(sense->flags); 861 862 switch (key) { 863 case SKEY_NO_SENSE: 864 case SKEY_RECOVERED_ERROR: 865 if (xs->resid == xs->datalen && xs->datalen) { 866 /* 867 * Why is this here? 868 */ 869 xs->resid = 0; /* not short read */ 870 } 871 case SKEY_EQUAL: 872 error = 0; 873 break; 874 case SKEY_NOT_READY: 875 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 876 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 877 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 878 return (0); 879 if (sense->asc == 0x3A) { 880 error = ENODEV; /* Medium not present */ 881 if (xs->xs_control & XS_CTL_SILENT_NODEV) 882 return (error); 883 } else 884 error = EIO; 885 if ((xs->xs_control & XS_CTL_SILENT) != 0) 886 return (error); 887 break; 888 case SKEY_ILLEGAL_REQUEST: 889 if ((xs->xs_control & 890 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 891 return (0); 892 /* 893 * Handle the case where a device reports 894 * Logical Unit Not Supported during discovery. 895 */ 896 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 897 sense->asc == 0x25 && 898 sense->ascq == 0x00) 899 return (EINVAL); 900 if ((xs->xs_control & XS_CTL_SILENT) != 0) 901 return (EIO); 902 error = EINVAL; 903 break; 904 case SKEY_UNIT_ATTENTION: 905 if (sense->asc == 0x29 && 906 sense->ascq == 0x00) { 907 /* device or bus reset */ 908 return (ERESTART); 909 } 910 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 911 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 912 if ((xs->xs_control & 913 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 914 /* XXX Should reupload any transient state. */ 915 (periph->periph_flags & 916 PERIPH_REMOVABLE) == 0) { 917 return (ERESTART); 918 } 919 if ((xs->xs_control & XS_CTL_SILENT) != 0) 920 return (EIO); 921 error = EIO; 922 break; 923 case SKEY_DATA_PROTECT: 924 error = EROFS; 925 break; 926 case SKEY_BLANK_CHECK: 927 error = 0; 928 break; 929 case SKEY_ABORTED_COMMAND: 930 if (xs->xs_retries != 0) { 931 xs->xs_retries--; 932 error = ERESTART; 933 } else 934 error = EIO; 935 break; 936 case SKEY_VOLUME_OVERFLOW: 937 error = ENOSPC; 938 break; 939 default: 940 error = EIO; 941 break; 942 } 943 944 /* Print verbose decode if appropriate and possible */ 945 if ((key == 0) || 946 ((xs->xs_control & XS_CTL_SILENT) != 0) || 947 (scsipi_print_sense(xs, 0) != 0)) 948 return (error); 949 950 /* Print brief(er) sense information */ 951 scsipi_printaddr(periph); 952 printf("%s", error_mes[key - 1]); 953 if ((sense->response_code & SSD_RCODE_VALID) != 0) { 954 switch (key) { 955 case SKEY_NOT_READY: 956 case SKEY_ILLEGAL_REQUEST: 957 case SKEY_UNIT_ATTENTION: 958 case SKEY_DATA_PROTECT: 959 break; 960 case SKEY_BLANK_CHECK: 961 printf(", requested size: %d (decimal)", 962 info); 963 break; 964 case SKEY_ABORTED_COMMAND: 965 if (xs->xs_retries) 966 printf(", retrying"); 967 printf(", cmd 0x%x, info 0x%x", 968 xs->cmd->opcode, info); 969 break; 970 default: 971 printf(", info = %d (decimal)", info); 972 } 973 } 974 if (sense->extra_len != 0) { 975 int n; 976 printf(", data ="); 977 for (n = 0; n < sense->extra_len; n++) 978 printf(" %02x", 979 sense->csi[n]); 980 } 981 printf("\n"); 982 return (error); 983 984 /* 985 * Some other code, just report it 986 */ 987 default: 988 #if defined(SCSIDEBUG) || defined(DEBUG) 989 { 990 static const char *uc = "undecodable sense error"; 991 int i; 992 u_int8_t *cptr = (u_int8_t *) sense; 993 scsipi_printaddr(periph); 994 if (xs->cmd == &xs->cmdstore) { 995 printf("%s for opcode 0x%x, data=", 996 uc, xs->cmdstore.opcode); 997 } else { 998 printf("%s, data=", uc); 999 } 1000 for (i = 0; i < sizeof (sense); i++) 1001 printf(" 0x%02x", *(cptr++) & 0xff); 1002 printf("\n"); 1003 } 1004 #else 1005 scsipi_printaddr(periph); 1006 printf("Sense Error Code 0x%x", 1007 SSD_RCODE(sense->response_code)); 1008 if ((sense->response_code & SSD_RCODE_VALID) != 0) { 1009 struct scsi_sense_data_unextended *usense = 1010 (struct scsi_sense_data_unextended *)sense; 1011 printf(" at block no. %d (decimal)", 1012 _3btol(usense->block)); 1013 } 1014 printf("\n"); 1015 #endif 1016 return (EIO); 1017 } 1018 } 1019 1020 /* 1021 * scsipi_test_unit_ready: 1022 * 1023 * Issue a `test unit ready' request. 1024 */ 1025 int 1026 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags) 1027 { 1028 struct scsi_test_unit_ready cmd; 1029 int retries; 1030 1031 /* some ATAPI drives don't support TEST UNIT READY. Sigh */ 1032 if (periph->periph_quirks & PQUIRK_NOTUR) 1033 return (0); 1034 1035 if (flags & XS_CTL_DISCOVERY) 1036 retries = 0; 1037 else 1038 retries = SCSIPIRETRIES; 1039 1040 memset(&cmd, 0, sizeof(cmd)); 1041 cmd.opcode = SCSI_TEST_UNIT_READY; 1042 1043 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1044 retries, 10000, NULL, flags)); 1045 } 1046 1047 /* 1048 * scsipi_inquire: 1049 * 1050 * Ask the device about itself. 1051 */ 1052 int 1053 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf, 1054 int flags) 1055 { 1056 struct scsipi_inquiry cmd; 1057 int error; 1058 int retries; 1059 1060 if (flags & XS_CTL_DISCOVERY) 1061 retries = 0; 1062 else 1063 retries = SCSIPIRETRIES; 1064 1065 /* 1066 * If we request more data than the device can provide, it SHOULD just 1067 * return a short reponse. However, some devices error with an 1068 * ILLEGAL REQUEST sense code, and yet others have even more special 1069 * failture modes (such as the GL641USB flash adapter, which goes loony 1070 * and sends corrupted CRCs). To work around this, and to bring our 1071 * behavior more in line with other OSes, we do a shorter inquiry, 1072 * covering all the SCSI-2 information, first, and then request more 1073 * data iff the "additional length" field indicates there is more. 1074 * - mycroft, 2003/10/16 1075 */ 1076 memset(&cmd, 0, sizeof(cmd)); 1077 cmd.opcode = INQUIRY; 1078 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2; 1079 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1080 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries, 1081 10000, NULL, flags | XS_CTL_DATA_IN); 1082 if (!error && 1083 inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) { 1084 #if 0 1085 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length); 1086 #endif 1087 cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3; 1088 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1089 (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries, 1090 10000, NULL, flags | XS_CTL_DATA_IN); 1091 #if 0 1092 printf("inquire: error=%d\n", error); 1093 #endif 1094 } 1095 1096 #ifdef SCSI_OLD_NOINQUIRY 1097 /* 1098 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator. 1099 * This board doesn't support the INQUIRY command at all. 1100 */ 1101 if (error == EINVAL || error == EACCES) { 1102 /* 1103 * Conjure up an INQUIRY response. 1104 */ 1105 inqbuf->device = (error == EINVAL ? 1106 SID_QUAL_LU_PRESENT : 1107 SID_QUAL_LU_NOTPRESENT) | T_DIRECT; 1108 inqbuf->dev_qual2 = 0; 1109 inqbuf->version = 0; 1110 inqbuf->response_format = SID_FORMAT_SCSI1; 1111 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1112 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1113 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28); 1114 error = 0; 1115 } 1116 1117 /* 1118 * Kludge for the Emulex MT-02 SCSI->QIC translator. 1119 * This board gives an empty response to an INQUIRY command. 1120 */ 1121 else if (error == 0 && 1122 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) && 1123 inqbuf->dev_qual2 == 0 && 1124 inqbuf->version == 0 && 1125 inqbuf->response_format == SID_FORMAT_SCSI1) { 1126 /* 1127 * Fill out the INQUIRY response. 1128 */ 1129 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL); 1130 inqbuf->dev_qual2 = SID_REMOVABLE; 1131 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1132 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1133 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28); 1134 } 1135 #endif /* SCSI_OLD_NOINQUIRY */ 1136 1137 return error; 1138 } 1139 1140 /* 1141 * scsipi_prevent: 1142 * 1143 * Prevent or allow the user to remove the media 1144 */ 1145 int 1146 scsipi_prevent(struct scsipi_periph *periph, int type, int flags) 1147 { 1148 struct scsi_prevent_allow_medium_removal cmd; 1149 1150 if (periph->periph_quirks & PQUIRK_NODOORLOCK) 1151 return 0; 1152 1153 memset(&cmd, 0, sizeof(cmd)); 1154 cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL; 1155 cmd.how = type; 1156 1157 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1158 SCSIPIRETRIES, 5000, NULL, flags)); 1159 } 1160 1161 /* 1162 * scsipi_start: 1163 * 1164 * Send a START UNIT. 1165 */ 1166 int 1167 scsipi_start(struct scsipi_periph *periph, int type, int flags) 1168 { 1169 struct scsipi_start_stop cmd; 1170 1171 memset(&cmd, 0, sizeof(cmd)); 1172 cmd.opcode = START_STOP; 1173 cmd.byte2 = 0x00; 1174 cmd.how = type; 1175 1176 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0, 1177 SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags)); 1178 } 1179 1180 /* 1181 * scsipi_mode_sense, scsipi_mode_sense_big: 1182 * get a sense page from a device 1183 */ 1184 1185 int 1186 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page, 1187 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries, 1188 int timeout) 1189 { 1190 struct scsi_mode_sense_6 cmd; 1191 1192 memset(&cmd, 0, sizeof(cmd)); 1193 cmd.opcode = SCSI_MODE_SENSE_6; 1194 cmd.byte2 = byte2; 1195 cmd.page = page; 1196 cmd.length = len & 0xff; 1197 1198 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1199 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN)); 1200 } 1201 1202 int 1203 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page, 1204 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries, 1205 int timeout) 1206 { 1207 struct scsi_mode_sense_10 cmd; 1208 1209 memset(&cmd, 0, sizeof(cmd)); 1210 cmd.opcode = SCSI_MODE_SENSE_10; 1211 cmd.byte2 = byte2; 1212 cmd.page = page; 1213 _lto2b(len, cmd.length); 1214 1215 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1216 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN)); 1217 } 1218 1219 int 1220 scsipi_mode_select(struct scsipi_periph *periph, int byte2, 1221 struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries, 1222 int timeout) 1223 { 1224 struct scsi_mode_select_6 cmd; 1225 1226 memset(&cmd, 0, sizeof(cmd)); 1227 cmd.opcode = SCSI_MODE_SELECT_6; 1228 cmd.byte2 = byte2; 1229 cmd.length = len & 0xff; 1230 1231 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1232 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT)); 1233 } 1234 1235 int 1236 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2, 1237 struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries, 1238 int timeout) 1239 { 1240 struct scsi_mode_select_10 cmd; 1241 1242 memset(&cmd, 0, sizeof(cmd)); 1243 cmd.opcode = SCSI_MODE_SELECT_10; 1244 cmd.byte2 = byte2; 1245 _lto2b(len, cmd.length); 1246 1247 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1248 (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT)); 1249 } 1250 1251 /* 1252 * scsipi_done: 1253 * 1254 * This routine is called by an adapter's interrupt handler when 1255 * an xfer is completed. 1256 */ 1257 void 1258 scsipi_done(struct scsipi_xfer *xs) 1259 { 1260 struct scsipi_periph *periph = xs->xs_periph; 1261 struct scsipi_channel *chan = periph->periph_channel; 1262 int s, freezecnt; 1263 1264 KASSERT(cold || KERNEL_LOCKED_P()); 1265 1266 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1267 #ifdef SCSIPI_DEBUG 1268 if (periph->periph_dbflags & SCSIPI_DB1) 1269 show_scsipi_cmd(xs); 1270 #endif 1271 1272 s = splbio(); 1273 /* 1274 * The resource this command was using is now free. 1275 */ 1276 if (xs->xs_status & XS_STS_DONE) { 1277 /* XXX in certain circumstances, such as a device 1278 * being detached, a xs that has already been 1279 * scsipi_done()'d by the main thread will be done'd 1280 * again by scsibusdetach(). Putting the xs on the 1281 * chan_complete queue causes list corruption and 1282 * everyone dies. This prevents that, but perhaps 1283 * there should be better coordination somewhere such 1284 * that this won't ever happen (and can be turned into 1285 * a KASSERT(). 1286 */ 1287 splx(s); 1288 goto out; 1289 } 1290 scsipi_put_resource(chan); 1291 xs->xs_periph->periph_sent--; 1292 1293 /* 1294 * If the command was tagged, free the tag. 1295 */ 1296 if (XS_CTL_TAGTYPE(xs) != 0) 1297 scsipi_put_tag(xs); 1298 else 1299 periph->periph_flags &= ~PERIPH_UNTAG; 1300 1301 /* Mark the command as `done'. */ 1302 xs->xs_status |= XS_STS_DONE; 1303 1304 #ifdef DIAGNOSTIC 1305 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1306 (XS_CTL_ASYNC|XS_CTL_POLL)) 1307 panic("scsipi_done: ASYNC and POLL"); 1308 #endif 1309 1310 /* 1311 * If the xfer had an error of any sort, freeze the 1312 * periph's queue. Freeze it again if we were requested 1313 * to do so in the xfer. 1314 */ 1315 freezecnt = 0; 1316 if (xs->error != XS_NOERROR) 1317 freezecnt++; 1318 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1319 freezecnt++; 1320 if (freezecnt != 0) 1321 scsipi_periph_freeze(periph, freezecnt); 1322 1323 /* 1324 * record the xfer with a pending sense, in case a SCSI reset is 1325 * received before the thread is waked up. 1326 */ 1327 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1328 periph->periph_flags |= PERIPH_SENSE; 1329 periph->periph_xscheck = xs; 1330 } 1331 1332 /* 1333 * If this was an xfer that was not to complete asynchronously, 1334 * let the requesting thread perform error checking/handling 1335 * in its context. 1336 */ 1337 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1338 splx(s); 1339 /* 1340 * If it's a polling job, just return, to unwind the 1341 * call graph. We don't need to restart the queue, 1342 * because pollings jobs are treated specially, and 1343 * are really only used during crash dumps anyway 1344 * (XXX or during boot-time autconfiguration of 1345 * ATAPI devices). 1346 */ 1347 if (xs->xs_control & XS_CTL_POLL) 1348 return; 1349 wakeup(xs); 1350 goto out; 1351 } 1352 1353 /* 1354 * Catch the extremely common case of I/O completing 1355 * without error; no use in taking a context switch 1356 * if we can handle it in interrupt context. 1357 */ 1358 if (xs->error == XS_NOERROR) { 1359 splx(s); 1360 (void) scsipi_complete(xs); 1361 goto out; 1362 } 1363 1364 /* 1365 * There is an error on this xfer. Put it on the channel's 1366 * completion queue, and wake up the completion thread. 1367 */ 1368 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1369 splx(s); 1370 wakeup(&chan->chan_complete); 1371 1372 out: 1373 /* 1374 * If there are more xfers on the channel's queue, attempt to 1375 * run them. 1376 */ 1377 scsipi_run_queue(chan); 1378 } 1379 1380 /* 1381 * scsipi_complete: 1382 * 1383 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1384 * 1385 * NOTE: This routine MUST be called with valid thread context 1386 * except for the case where the following two conditions are 1387 * true: 1388 * 1389 * xs->error == XS_NOERROR 1390 * XS_CTL_ASYNC is set in xs->xs_control 1391 * 1392 * The semantics of this routine can be tricky, so here is an 1393 * explanation: 1394 * 1395 * 0 Xfer completed successfully. 1396 * 1397 * ERESTART Xfer had an error, but was restarted. 1398 * 1399 * anything else Xfer had an error, return value is Unix 1400 * errno. 1401 * 1402 * If the return value is anything but ERESTART: 1403 * 1404 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1405 * the pool. 1406 * - If there is a buf associated with the xfer, 1407 * it has been biodone()'d. 1408 */ 1409 static int 1410 scsipi_complete(struct scsipi_xfer *xs) 1411 { 1412 struct scsipi_periph *periph = xs->xs_periph; 1413 struct scsipi_channel *chan = periph->periph_channel; 1414 int error, s; 1415 1416 #ifdef DIAGNOSTIC 1417 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1418 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1419 #endif 1420 /* 1421 * If command terminated with a CHECK CONDITION, we need to issue a 1422 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1423 * we'll have the real status. 1424 * Must be processed at splbio() to avoid missing a SCSI bus reset 1425 * for this command. 1426 */ 1427 s = splbio(); 1428 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1429 /* request sense for a request sense ? */ 1430 if (xs->xs_control & XS_CTL_REQSENSE) { 1431 scsipi_printaddr(periph); 1432 printf("request sense for a request sense ?\n"); 1433 /* XXX maybe we should reset the device ? */ 1434 /* we've been frozen because xs->error != XS_NOERROR */ 1435 scsipi_periph_thaw(periph, 1); 1436 splx(s); 1437 if (xs->resid < xs->datalen) { 1438 printf("we read %d bytes of sense anyway:\n", 1439 xs->datalen - xs->resid); 1440 scsipi_print_sense_data((void *)xs->data, 0); 1441 } 1442 return EINVAL; 1443 } 1444 scsipi_request_sense(xs); 1445 } 1446 splx(s); 1447 1448 /* 1449 * If it's a user level request, bypass all usual completion 1450 * processing, let the user work it out.. 1451 */ 1452 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1453 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1454 if (xs->error != XS_NOERROR) 1455 scsipi_periph_thaw(periph, 1); 1456 scsipi_user_done(xs); 1457 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1458 return 0; 1459 } 1460 1461 switch (xs->error) { 1462 case XS_NOERROR: 1463 error = 0; 1464 break; 1465 1466 case XS_SENSE: 1467 case XS_SHORTSENSE: 1468 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1469 break; 1470 1471 case XS_RESOURCE_SHORTAGE: 1472 /* 1473 * XXX Should freeze channel's queue. 1474 */ 1475 scsipi_printaddr(periph); 1476 printf("adapter resource shortage\n"); 1477 /* FALLTHROUGH */ 1478 1479 case XS_BUSY: 1480 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1481 struct scsipi_max_openings mo; 1482 1483 /* 1484 * We set the openings to active - 1, assuming that 1485 * the command that got us here is the first one that 1486 * can't fit into the device's queue. If that's not 1487 * the case, I guess we'll find out soon enough. 1488 */ 1489 mo.mo_target = periph->periph_target; 1490 mo.mo_lun = periph->periph_lun; 1491 if (periph->periph_active < periph->periph_openings) 1492 mo.mo_openings = periph->periph_active - 1; 1493 else 1494 mo.mo_openings = periph->periph_openings - 1; 1495 #ifdef DIAGNOSTIC 1496 if (mo.mo_openings < 0) { 1497 scsipi_printaddr(periph); 1498 printf("QUEUE FULL resulted in < 0 openings\n"); 1499 panic("scsipi_done"); 1500 } 1501 #endif 1502 if (mo.mo_openings == 0) { 1503 scsipi_printaddr(periph); 1504 printf("QUEUE FULL resulted in 0 openings\n"); 1505 mo.mo_openings = 1; 1506 } 1507 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1508 error = ERESTART; 1509 } else if (xs->xs_retries != 0) { 1510 xs->xs_retries--; 1511 /* 1512 * Wait one second, and try again. 1513 */ 1514 if ((xs->xs_control & XS_CTL_POLL) || 1515 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1516 /* XXX: quite extreme */ 1517 kpause("xsbusy", false, hz, NULL); 1518 } else if (!callout_pending(&periph->periph_callout)) { 1519 scsipi_periph_freeze(periph, 1); 1520 callout_reset(&periph->periph_callout, 1521 hz, scsipi_periph_timed_thaw, periph); 1522 } 1523 error = ERESTART; 1524 } else 1525 error = EBUSY; 1526 break; 1527 1528 case XS_REQUEUE: 1529 error = ERESTART; 1530 break; 1531 1532 case XS_SELTIMEOUT: 1533 case XS_TIMEOUT: 1534 /* 1535 * If the device hasn't gone away, honor retry counts. 1536 * 1537 * Note that if we're in the middle of probing it, 1538 * it won't be found because it isn't here yet so 1539 * we won't honor the retry count in that case. 1540 */ 1541 if (scsipi_lookup_periph(chan, periph->periph_target, 1542 periph->periph_lun) && xs->xs_retries != 0) { 1543 xs->xs_retries--; 1544 error = ERESTART; 1545 } else 1546 error = EIO; 1547 break; 1548 1549 case XS_RESET: 1550 if (xs->xs_control & XS_CTL_REQSENSE) { 1551 /* 1552 * request sense interrupted by reset: signal it 1553 * with EINTR return code. 1554 */ 1555 error = EINTR; 1556 } else { 1557 if (xs->xs_retries != 0) { 1558 xs->xs_retries--; 1559 error = ERESTART; 1560 } else 1561 error = EIO; 1562 } 1563 break; 1564 1565 case XS_DRIVER_STUFFUP: 1566 scsipi_printaddr(periph); 1567 printf("generic HBA error\n"); 1568 error = EIO; 1569 break; 1570 default: 1571 scsipi_printaddr(periph); 1572 printf("invalid return code from adapter: %d\n", xs->error); 1573 error = EIO; 1574 break; 1575 } 1576 1577 s = splbio(); 1578 if (error == ERESTART) { 1579 /* 1580 * If we get here, the periph has been thawed and frozen 1581 * again if we had to issue recovery commands. Alternatively, 1582 * it may have been frozen again and in a timed thaw. In 1583 * any case, we thaw the periph once we re-enqueue the 1584 * command. Once the periph is fully thawed, it will begin 1585 * operation again. 1586 */ 1587 xs->error = XS_NOERROR; 1588 xs->status = SCSI_OK; 1589 xs->xs_status &= ~XS_STS_DONE; 1590 xs->xs_requeuecnt++; 1591 error = scsipi_enqueue(xs); 1592 if (error == 0) { 1593 scsipi_periph_thaw(periph, 1); 1594 splx(s); 1595 return (ERESTART); 1596 } 1597 } 1598 1599 /* 1600 * scsipi_done() freezes the queue if not XS_NOERROR. 1601 * Thaw it here. 1602 */ 1603 if (xs->error != XS_NOERROR) 1604 scsipi_periph_thaw(periph, 1); 1605 1606 if (periph->periph_switch->psw_done) 1607 periph->periph_switch->psw_done(xs, error); 1608 1609 if (xs->xs_control & XS_CTL_ASYNC) 1610 scsipi_put_xs(xs); 1611 splx(s); 1612 1613 return (error); 1614 } 1615 1616 /* 1617 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1618 * returns with a CHECK_CONDITION status. Must be called in valid thread 1619 * context and at splbio(). 1620 */ 1621 1622 static void 1623 scsipi_request_sense(struct scsipi_xfer *xs) 1624 { 1625 struct scsipi_periph *periph = xs->xs_periph; 1626 int flags, error; 1627 struct scsi_request_sense cmd; 1628 1629 periph->periph_flags |= PERIPH_SENSE; 1630 1631 /* if command was polling, request sense will too */ 1632 flags = xs->xs_control & XS_CTL_POLL; 1633 /* Polling commands can't sleep */ 1634 if (flags) 1635 flags |= XS_CTL_NOSLEEP; 1636 1637 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1638 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1639 1640 memset(&cmd, 0, sizeof(cmd)); 1641 cmd.opcode = SCSI_REQUEST_SENSE; 1642 cmd.length = sizeof(struct scsi_sense_data); 1643 1644 error = scsipi_command(periph, (void *)&cmd, sizeof(cmd), 1645 (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data), 1646 0, 1000, NULL, flags); 1647 periph->periph_flags &= ~PERIPH_SENSE; 1648 periph->periph_xscheck = NULL; 1649 switch (error) { 1650 case 0: 1651 /* we have a valid sense */ 1652 xs->error = XS_SENSE; 1653 return; 1654 case EINTR: 1655 /* REQUEST_SENSE interrupted by bus reset. */ 1656 xs->error = XS_RESET; 1657 return; 1658 case EIO: 1659 /* request sense coudn't be performed */ 1660 /* 1661 * XXX this isn't quite right but we don't have anything 1662 * better for now 1663 */ 1664 xs->error = XS_DRIVER_STUFFUP; 1665 return; 1666 default: 1667 /* Notify that request sense failed. */ 1668 xs->error = XS_DRIVER_STUFFUP; 1669 scsipi_printaddr(periph); 1670 printf("request sense failed with error %d\n", error); 1671 return; 1672 } 1673 } 1674 1675 /* 1676 * scsipi_enqueue: 1677 * 1678 * Enqueue an xfer on a channel. 1679 */ 1680 static int 1681 scsipi_enqueue(struct scsipi_xfer *xs) 1682 { 1683 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1684 struct scsipi_xfer *qxs; 1685 int s; 1686 1687 s = splbio(); 1688 1689 /* 1690 * If the xfer is to be polled, and there are already jobs on 1691 * the queue, we can't proceed. 1692 */ 1693 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1694 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1695 splx(s); 1696 xs->error = XS_DRIVER_STUFFUP; 1697 return (EAGAIN); 1698 } 1699 1700 /* 1701 * If we have an URGENT xfer, it's an error recovery command 1702 * and it should just go on the head of the channel's queue. 1703 */ 1704 if (xs->xs_control & XS_CTL_URGENT) { 1705 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1706 goto out; 1707 } 1708 1709 /* 1710 * If this xfer has already been on the queue before, we 1711 * need to reinsert it in the correct order. That order is: 1712 * 1713 * Immediately before the first xfer for this periph 1714 * with a requeuecnt less than xs->xs_requeuecnt. 1715 * 1716 * Failing that, at the end of the queue. (We'll end up 1717 * there naturally.) 1718 */ 1719 if (xs->xs_requeuecnt != 0) { 1720 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1721 qxs = TAILQ_NEXT(qxs, channel_q)) { 1722 if (qxs->xs_periph == xs->xs_periph && 1723 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1724 break; 1725 } 1726 if (qxs != NULL) { 1727 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1728 channel_q); 1729 goto out; 1730 } 1731 } 1732 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1733 out: 1734 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1735 scsipi_periph_thaw(xs->xs_periph, 1); 1736 splx(s); 1737 return (0); 1738 } 1739 1740 /* 1741 * scsipi_run_queue: 1742 * 1743 * Start as many xfers as possible running on the channel. 1744 */ 1745 static void 1746 scsipi_run_queue(struct scsipi_channel *chan) 1747 { 1748 struct scsipi_xfer *xs; 1749 struct scsipi_periph *periph; 1750 int s; 1751 1752 for (;;) { 1753 s = splbio(); 1754 1755 /* 1756 * If the channel is frozen, we can't do any work right 1757 * now. 1758 */ 1759 if (chan->chan_qfreeze != 0) { 1760 splx(s); 1761 return; 1762 } 1763 1764 /* 1765 * Look for work to do, and make sure we can do it. 1766 */ 1767 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1768 xs = TAILQ_NEXT(xs, channel_q)) { 1769 periph = xs->xs_periph; 1770 1771 if ((periph->periph_sent >= periph->periph_openings) || 1772 periph->periph_qfreeze != 0 || 1773 (periph->periph_flags & PERIPH_UNTAG) != 0) 1774 continue; 1775 1776 if ((periph->periph_flags & 1777 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1778 (xs->xs_control & XS_CTL_URGENT) == 0) 1779 continue; 1780 1781 /* 1782 * We can issue this xfer! 1783 */ 1784 goto got_one; 1785 } 1786 1787 /* 1788 * Can't find any work to do right now. 1789 */ 1790 splx(s); 1791 return; 1792 1793 got_one: 1794 /* 1795 * Have an xfer to run. Allocate a resource from 1796 * the adapter to run it. If we can't allocate that 1797 * resource, we don't dequeue the xfer. 1798 */ 1799 if (scsipi_get_resource(chan) == 0) { 1800 /* 1801 * Adapter is out of resources. If the adapter 1802 * supports it, attempt to grow them. 1803 */ 1804 if (scsipi_grow_resources(chan) == 0) { 1805 /* 1806 * Wasn't able to grow resources, 1807 * nothing more we can do. 1808 */ 1809 if (xs->xs_control & XS_CTL_POLL) { 1810 scsipi_printaddr(xs->xs_periph); 1811 printf("polling command but no " 1812 "adapter resources"); 1813 /* We'll panic shortly... */ 1814 } 1815 splx(s); 1816 1817 /* 1818 * XXX: We should be able to note that 1819 * XXX: that resources are needed here! 1820 */ 1821 return; 1822 } 1823 /* 1824 * scsipi_grow_resources() allocated the resource 1825 * for us. 1826 */ 1827 } 1828 1829 /* 1830 * We have a resource to run this xfer, do it! 1831 */ 1832 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1833 1834 /* 1835 * If the command is to be tagged, allocate a tag ID 1836 * for it. 1837 */ 1838 if (XS_CTL_TAGTYPE(xs) != 0) 1839 scsipi_get_tag(xs); 1840 else 1841 periph->periph_flags |= PERIPH_UNTAG; 1842 periph->periph_sent++; 1843 splx(s); 1844 1845 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1846 } 1847 #ifdef DIAGNOSTIC 1848 panic("scsipi_run_queue: impossible"); 1849 #endif 1850 } 1851 1852 /* 1853 * scsipi_execute_xs: 1854 * 1855 * Begin execution of an xfer, waiting for it to complete, if necessary. 1856 */ 1857 int 1858 scsipi_execute_xs(struct scsipi_xfer *xs) 1859 { 1860 struct scsipi_periph *periph = xs->xs_periph; 1861 struct scsipi_channel *chan = periph->periph_channel; 1862 int oasync, async, poll, error, s; 1863 1864 KASSERT(!cold); 1865 KASSERT(KERNEL_LOCKED_P()); 1866 1867 (chan->chan_bustype->bustype_cmd)(xs); 1868 1869 xs->xs_status &= ~XS_STS_DONE; 1870 xs->error = XS_NOERROR; 1871 xs->resid = xs->datalen; 1872 xs->status = SCSI_OK; 1873 1874 #ifdef SCSIPI_DEBUG 1875 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1876 printf("scsipi_execute_xs: "); 1877 show_scsipi_xs(xs); 1878 printf("\n"); 1879 } 1880 #endif 1881 1882 /* 1883 * Deal with command tagging: 1884 * 1885 * - If the device's current operating mode doesn't 1886 * include tagged queueing, clear the tag mask. 1887 * 1888 * - If the device's current operating mode *does* 1889 * include tagged queueing, set the tag_type in 1890 * the xfer to the appropriate byte for the tag 1891 * message. 1892 */ 1893 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 1894 (xs->xs_control & XS_CTL_REQSENSE)) { 1895 xs->xs_control &= ~XS_CTL_TAGMASK; 1896 xs->xs_tag_type = 0; 1897 } else { 1898 /* 1899 * If the request doesn't specify a tag, give Head 1900 * tags to URGENT operations and Ordered tags to 1901 * everything else. 1902 */ 1903 if (XS_CTL_TAGTYPE(xs) == 0) { 1904 if (xs->xs_control & XS_CTL_URGENT) 1905 xs->xs_control |= XS_CTL_HEAD_TAG; 1906 else 1907 xs->xs_control |= XS_CTL_ORDERED_TAG; 1908 } 1909 1910 switch (XS_CTL_TAGTYPE(xs)) { 1911 case XS_CTL_ORDERED_TAG: 1912 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 1913 break; 1914 1915 case XS_CTL_SIMPLE_TAG: 1916 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 1917 break; 1918 1919 case XS_CTL_HEAD_TAG: 1920 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 1921 break; 1922 1923 default: 1924 scsipi_printaddr(periph); 1925 printf("invalid tag mask 0x%08x\n", 1926 XS_CTL_TAGTYPE(xs)); 1927 panic("scsipi_execute_xs"); 1928 } 1929 } 1930 1931 /* If the adaptor wants us to poll, poll. */ 1932 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 1933 xs->xs_control |= XS_CTL_POLL; 1934 1935 /* 1936 * If we don't yet have a completion thread, or we are to poll for 1937 * completion, clear the ASYNC flag. 1938 */ 1939 oasync = (xs->xs_control & XS_CTL_ASYNC); 1940 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 1941 xs->xs_control &= ~XS_CTL_ASYNC; 1942 1943 async = (xs->xs_control & XS_CTL_ASYNC); 1944 poll = (xs->xs_control & XS_CTL_POLL); 1945 1946 #ifdef DIAGNOSTIC 1947 if (oasync != 0 && xs->bp == NULL) 1948 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 1949 #endif 1950 1951 /* 1952 * Enqueue the transfer. If we're not polling for completion, this 1953 * should ALWAYS return `no error'. 1954 */ 1955 error = scsipi_enqueue(xs); 1956 if (error) { 1957 if (poll == 0) { 1958 scsipi_printaddr(periph); 1959 printf("not polling, but enqueue failed with %d\n", 1960 error); 1961 panic("scsipi_execute_xs"); 1962 } 1963 1964 scsipi_printaddr(periph); 1965 printf("should have flushed queue?\n"); 1966 goto free_xs; 1967 } 1968 1969 restarted: 1970 scsipi_run_queue(chan); 1971 1972 /* 1973 * The xfer is enqueued, and possibly running. If it's to be 1974 * completed asynchronously, just return now. 1975 */ 1976 if (async) 1977 return (0); 1978 1979 /* 1980 * Not an asynchronous command; wait for it to complete. 1981 */ 1982 s = splbio(); 1983 while ((xs->xs_status & XS_STS_DONE) == 0) { 1984 if (poll) { 1985 scsipi_printaddr(periph); 1986 printf("polling command not done\n"); 1987 panic("scsipi_execute_xs"); 1988 } 1989 (void) tsleep(xs, PRIBIO, "xscmd", 0); 1990 } 1991 splx(s); 1992 1993 /* 1994 * Command is complete. scsipi_done() has awakened us to perform 1995 * the error handling. 1996 */ 1997 error = scsipi_complete(xs); 1998 if (error == ERESTART) 1999 goto restarted; 2000 2001 /* 2002 * If it was meant to run async and we cleared aync ourselve, 2003 * don't return an error here. It has already been handled 2004 */ 2005 if (oasync) 2006 error = 0; 2007 /* 2008 * Command completed successfully or fatal error occurred. Fall 2009 * into.... 2010 */ 2011 free_xs: 2012 s = splbio(); 2013 scsipi_put_xs(xs); 2014 splx(s); 2015 2016 /* 2017 * Kick the queue, keep it running in case it stopped for some 2018 * reason. 2019 */ 2020 scsipi_run_queue(chan); 2021 2022 return (error); 2023 } 2024 2025 /* 2026 * scsipi_completion_thread: 2027 * 2028 * This is the completion thread. We wait for errors on 2029 * asynchronous xfers, and perform the error handling 2030 * function, restarting the command, if necessary. 2031 */ 2032 static void 2033 scsipi_completion_thread(void *arg) 2034 { 2035 struct scsipi_channel *chan = arg; 2036 struct scsipi_xfer *xs; 2037 int s; 2038 2039 if (chan->chan_init_cb) 2040 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg); 2041 2042 s = splbio(); 2043 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 2044 splx(s); 2045 for (;;) { 2046 s = splbio(); 2047 xs = TAILQ_FIRST(&chan->chan_complete); 2048 if (xs == NULL && chan->chan_tflags == 0) { 2049 /* nothing to do; wait */ 2050 (void) tsleep(&chan->chan_complete, PRIBIO, 2051 "sccomp", 0); 2052 splx(s); 2053 continue; 2054 } 2055 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2056 /* call chan_callback from thread context */ 2057 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 2058 chan->chan_callback(chan, chan->chan_callback_arg); 2059 splx(s); 2060 continue; 2061 } 2062 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 2063 /* attempt to get more openings for this channel */ 2064 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2065 scsipi_adapter_request(chan, 2066 ADAPTER_REQ_GROW_RESOURCES, NULL); 2067 scsipi_channel_thaw(chan, 1); 2068 splx(s); 2069 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) 2070 kpause("scsizzz", FALSE, hz/10, NULL); 2071 continue; 2072 } 2073 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2074 /* explicitly run the queues for this channel */ 2075 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2076 scsipi_run_queue(chan); 2077 splx(s); 2078 continue; 2079 } 2080 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2081 splx(s); 2082 break; 2083 } 2084 if (xs) { 2085 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2086 splx(s); 2087 2088 /* 2089 * Have an xfer with an error; process it. 2090 */ 2091 (void) scsipi_complete(xs); 2092 2093 /* 2094 * Kick the queue; keep it running if it was stopped 2095 * for some reason. 2096 */ 2097 scsipi_run_queue(chan); 2098 } else { 2099 splx(s); 2100 } 2101 } 2102 2103 chan->chan_thread = NULL; 2104 2105 /* In case parent is waiting for us to exit. */ 2106 wakeup(&chan->chan_thread); 2107 2108 kthread_exit(0); 2109 } 2110 /* 2111 * scsipi_thread_call_callback: 2112 * 2113 * request to call a callback from the completion thread 2114 */ 2115 int 2116 scsipi_thread_call_callback(struct scsipi_channel *chan, 2117 void (*callback)(struct scsipi_channel *, void *), void *arg) 2118 { 2119 int s; 2120 2121 s = splbio(); 2122 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2123 /* kernel thread doesn't exist yet */ 2124 splx(s); 2125 return ESRCH; 2126 } 2127 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2128 splx(s); 2129 return EBUSY; 2130 } 2131 scsipi_channel_freeze(chan, 1); 2132 chan->chan_callback = callback; 2133 chan->chan_callback_arg = arg; 2134 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2135 wakeup(&chan->chan_complete); 2136 splx(s); 2137 return(0); 2138 } 2139 2140 /* 2141 * scsipi_async_event: 2142 * 2143 * Handle an asynchronous event from an adapter. 2144 */ 2145 void 2146 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event, 2147 void *arg) 2148 { 2149 int s; 2150 2151 s = splbio(); 2152 switch (event) { 2153 case ASYNC_EVENT_MAX_OPENINGS: 2154 scsipi_async_event_max_openings(chan, 2155 (struct scsipi_max_openings *)arg); 2156 break; 2157 2158 case ASYNC_EVENT_XFER_MODE: 2159 if (chan->chan_bustype->bustype_async_event_xfer_mode) { 2160 chan->chan_bustype->bustype_async_event_xfer_mode( 2161 chan, arg); 2162 } 2163 break; 2164 case ASYNC_EVENT_RESET: 2165 scsipi_async_event_channel_reset(chan); 2166 break; 2167 } 2168 splx(s); 2169 } 2170 2171 /* 2172 * scsipi_async_event_max_openings: 2173 * 2174 * Update the maximum number of outstanding commands a 2175 * device may have. 2176 */ 2177 static void 2178 scsipi_async_event_max_openings(struct scsipi_channel *chan, 2179 struct scsipi_max_openings *mo) 2180 { 2181 struct scsipi_periph *periph; 2182 int minlun, maxlun; 2183 2184 if (mo->mo_lun == -1) { 2185 /* 2186 * Wildcarded; apply it to all LUNs. 2187 */ 2188 minlun = 0; 2189 maxlun = chan->chan_nluns - 1; 2190 } else 2191 minlun = maxlun = mo->mo_lun; 2192 2193 /* XXX This could really suck with a large LUN space. */ 2194 for (; minlun <= maxlun; minlun++) { 2195 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun); 2196 if (periph == NULL) 2197 continue; 2198 2199 if (mo->mo_openings < periph->periph_openings) 2200 periph->periph_openings = mo->mo_openings; 2201 else if (mo->mo_openings > periph->periph_openings && 2202 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2203 periph->periph_openings = mo->mo_openings; 2204 } 2205 } 2206 2207 /* 2208 * scsipi_set_xfer_mode: 2209 * 2210 * Set the xfer mode for the specified I_T Nexus. 2211 */ 2212 void 2213 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed) 2214 { 2215 struct scsipi_xfer_mode xm; 2216 struct scsipi_periph *itperiph; 2217 int lun, s; 2218 2219 /* 2220 * Go to the minimal xfer mode. 2221 */ 2222 xm.xm_target = target; 2223 xm.xm_mode = 0; 2224 xm.xm_period = 0; /* ignored */ 2225 xm.xm_offset = 0; /* ignored */ 2226 2227 /* 2228 * Find the first LUN we know about on this I_T Nexus. 2229 */ 2230 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) { 2231 itperiph = scsipi_lookup_periph(chan, target, lun); 2232 if (itperiph != NULL) 2233 break; 2234 } 2235 if (itperiph != NULL) { 2236 xm.xm_mode = itperiph->periph_cap; 2237 /* 2238 * Now issue the request to the adapter. 2239 */ 2240 s = splbio(); 2241 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2242 splx(s); 2243 /* 2244 * If we want this to happen immediately, issue a dummy 2245 * command, since most adapters can't really negotiate unless 2246 * they're executing a job. 2247 */ 2248 if (immed != 0) { 2249 (void) scsipi_test_unit_ready(itperiph, 2250 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2251 XS_CTL_IGNORE_NOT_READY | 2252 XS_CTL_IGNORE_MEDIA_CHANGE); 2253 } 2254 } 2255 } 2256 2257 /* 2258 * scsipi_channel_reset: 2259 * 2260 * handle scsi bus reset 2261 * called at splbio 2262 */ 2263 static void 2264 scsipi_async_event_channel_reset(struct scsipi_channel *chan) 2265 { 2266 struct scsipi_xfer *xs, *xs_next; 2267 struct scsipi_periph *periph; 2268 int target, lun; 2269 2270 /* 2271 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2272 * commands; as the sense is not available any more. 2273 * can't call scsipi_done() from here, as the command has not been 2274 * sent to the adapter yet (this would corrupt accounting). 2275 */ 2276 2277 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2278 xs_next = TAILQ_NEXT(xs, channel_q); 2279 if (xs->xs_control & XS_CTL_REQSENSE) { 2280 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2281 xs->error = XS_RESET; 2282 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2283 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2284 channel_q); 2285 } 2286 } 2287 wakeup(&chan->chan_complete); 2288 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2289 for (target = 0; target < chan->chan_ntargets; target++) { 2290 if (target == chan->chan_id) 2291 continue; 2292 for (lun = 0; lun < chan->chan_nluns; lun++) { 2293 periph = scsipi_lookup_periph(chan, target, lun); 2294 if (periph) { 2295 xs = periph->periph_xscheck; 2296 if (xs) 2297 xs->error = XS_RESET; 2298 } 2299 } 2300 } 2301 } 2302 2303 /* 2304 * scsipi_target_detach: 2305 * 2306 * detach all periph associated with a I_T 2307 * must be called from valid thread context 2308 */ 2309 int 2310 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun, 2311 int flags) 2312 { 2313 struct scsipi_periph *periph; 2314 int ctarget, mintarget, maxtarget; 2315 int clun, minlun, maxlun; 2316 int error; 2317 2318 if (target == -1) { 2319 mintarget = 0; 2320 maxtarget = chan->chan_ntargets; 2321 } else { 2322 if (target == chan->chan_id) 2323 return EINVAL; 2324 if (target < 0 || target >= chan->chan_ntargets) 2325 return EINVAL; 2326 mintarget = target; 2327 maxtarget = target + 1; 2328 } 2329 2330 if (lun == -1) { 2331 minlun = 0; 2332 maxlun = chan->chan_nluns; 2333 } else { 2334 if (lun < 0 || lun >= chan->chan_nluns) 2335 return EINVAL; 2336 minlun = lun; 2337 maxlun = lun + 1; 2338 } 2339 2340 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2341 if (ctarget == chan->chan_id) 2342 continue; 2343 2344 for (clun = minlun; clun < maxlun; clun++) { 2345 periph = scsipi_lookup_periph(chan, ctarget, clun); 2346 if (periph == NULL) 2347 continue; 2348 error = config_detach(periph->periph_dev, flags); 2349 if (error) 2350 return (error); 2351 } 2352 } 2353 return(0); 2354 } 2355 2356 /* 2357 * scsipi_adapter_addref: 2358 * 2359 * Add a reference to the adapter pointed to by the provided 2360 * link, enabling the adapter if necessary. 2361 */ 2362 int 2363 scsipi_adapter_addref(struct scsipi_adapter *adapt) 2364 { 2365 int s, error = 0; 2366 2367 s = splbio(); 2368 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) { 2369 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1); 2370 if (error) 2371 adapt->adapt_refcnt--; 2372 } 2373 splx(s); 2374 return (error); 2375 } 2376 2377 /* 2378 * scsipi_adapter_delref: 2379 * 2380 * Delete a reference to the adapter pointed to by the provided 2381 * link, disabling the adapter if possible. 2382 */ 2383 void 2384 scsipi_adapter_delref(struct scsipi_adapter *adapt) 2385 { 2386 int s; 2387 2388 s = splbio(); 2389 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL) 2390 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0); 2391 splx(s); 2392 } 2393 2394 static struct scsipi_syncparam { 2395 int ss_factor; 2396 int ss_period; /* ns * 100 */ 2397 } scsipi_syncparams[] = { 2398 { 0x08, 625 }, /* FAST-160 (Ultra320) */ 2399 { 0x09, 1250 }, /* FAST-80 (Ultra160) */ 2400 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */ 2401 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */ 2402 { 0x0c, 5000 }, /* FAST-20 (Ultra) */ 2403 }; 2404 static const int scsipi_nsyncparams = 2405 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2406 2407 int 2408 scsipi_sync_period_to_factor(int period /* ns * 100 */) 2409 { 2410 int i; 2411 2412 for (i = 0; i < scsipi_nsyncparams; i++) { 2413 if (period <= scsipi_syncparams[i].ss_period) 2414 return (scsipi_syncparams[i].ss_factor); 2415 } 2416 2417 return ((period / 100) / 4); 2418 } 2419 2420 int 2421 scsipi_sync_factor_to_period(int factor) 2422 { 2423 int i; 2424 2425 for (i = 0; i < scsipi_nsyncparams; i++) { 2426 if (factor == scsipi_syncparams[i].ss_factor) 2427 return (scsipi_syncparams[i].ss_period); 2428 } 2429 2430 return ((factor * 4) * 100); 2431 } 2432 2433 int 2434 scsipi_sync_factor_to_freq(int factor) 2435 { 2436 int i; 2437 2438 for (i = 0; i < scsipi_nsyncparams; i++) { 2439 if (factor == scsipi_syncparams[i].ss_factor) 2440 return (100000000 / scsipi_syncparams[i].ss_period); 2441 } 2442 2443 return (10000000 / ((factor * 4) * 10)); 2444 } 2445 2446 #ifdef SCSIPI_DEBUG 2447 /* 2448 * Given a scsipi_xfer, dump the request, in all it's glory 2449 */ 2450 void 2451 show_scsipi_xs(struct scsipi_xfer *xs) 2452 { 2453 2454 printf("xs(%p): ", xs); 2455 printf("xs_control(0x%08x)", xs->xs_control); 2456 printf("xs_status(0x%08x)", xs->xs_status); 2457 printf("periph(%p)", xs->xs_periph); 2458 printf("retr(0x%x)", xs->xs_retries); 2459 printf("timo(0x%x)", xs->timeout); 2460 printf("cmd(%p)", xs->cmd); 2461 printf("len(0x%x)", xs->cmdlen); 2462 printf("data(%p)", xs->data); 2463 printf("len(0x%x)", xs->datalen); 2464 printf("res(0x%x)", xs->resid); 2465 printf("err(0x%x)", xs->error); 2466 printf("bp(%p)", xs->bp); 2467 show_scsipi_cmd(xs); 2468 } 2469 2470 void 2471 show_scsipi_cmd(struct scsipi_xfer *xs) 2472 { 2473 u_char *b = (u_char *) xs->cmd; 2474 int i = 0; 2475 2476 scsipi_printaddr(xs->xs_periph); 2477 printf(" command: "); 2478 2479 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2480 while (i < xs->cmdlen) { 2481 if (i) 2482 printf(","); 2483 printf("0x%x", b[i++]); 2484 } 2485 printf("-[%d bytes]\n", xs->datalen); 2486 if (xs->datalen) 2487 show_mem(xs->data, min(64, xs->datalen)); 2488 } else 2489 printf("-RESET-\n"); 2490 } 2491 2492 void 2493 show_mem(u_char *address, int num) 2494 { 2495 int x; 2496 2497 printf("------------------------------"); 2498 for (x = 0; x < num; x++) { 2499 if ((x % 16) == 0) 2500 printf("\n%03d: ", x); 2501 printf("%02x ", *address++); 2502 } 2503 printf("\n------------------------------\n"); 2504 } 2505 #endif /* SCSIPI_DEBUG */ 2506