1 /* $NetBSD: scsipi_base.c,v 1.101 2004/01/03 01:48:38 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000, 2002, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.101 2004/01/03 01:48:38 thorpej Exp $"); 42 43 #include "opt_scsi.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/buf.h> 49 #include <sys/uio.h> 50 #include <sys/malloc.h> 51 #include <sys/pool.h> 52 #include <sys/errno.h> 53 #include <sys/device.h> 54 #include <sys/proc.h> 55 #include <sys/kthread.h> 56 #include <sys/hash.h> 57 58 #include <dev/scsipi/scsipi_all.h> 59 #include <dev/scsipi/scsipi_disk.h> 60 #include <dev/scsipi/scsipiconf.h> 61 #include <dev/scsipi/scsipi_base.h> 62 63 #include <dev/scsipi/scsi_all.h> 64 #include <dev/scsipi/scsi_message.h> 65 66 int scsipi_complete __P((struct scsipi_xfer *)); 67 void scsipi_request_sense __P((struct scsipi_xfer *)); 68 int scsipi_enqueue __P((struct scsipi_xfer *)); 69 void scsipi_run_queue __P((struct scsipi_channel *chan)); 70 71 void scsipi_completion_thread __P((void *)); 72 73 void scsipi_get_tag __P((struct scsipi_xfer *)); 74 void scsipi_put_tag __P((struct scsipi_xfer *)); 75 76 int scsipi_get_resource __P((struct scsipi_channel *)); 77 void scsipi_put_resource __P((struct scsipi_channel *)); 78 __inline int scsipi_grow_resources __P((struct scsipi_channel *)); 79 80 void scsipi_async_event_max_openings __P((struct scsipi_channel *, 81 struct scsipi_max_openings *)); 82 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *, 83 struct scsipi_xfer_mode *)); 84 void scsipi_async_event_channel_reset __P((struct scsipi_channel *)); 85 86 struct pool scsipi_xfer_pool; 87 88 /* 89 * scsipi_init: 90 * 91 * Called when a scsibus or atapibus is attached to the system 92 * to initialize shared data structures. 93 */ 94 void 95 scsipi_init() 96 { 97 static int scsipi_init_done; 98 99 if (scsipi_init_done) 100 return; 101 scsipi_init_done = 1; 102 103 /* Initialize the scsipi_xfer pool. */ 104 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 105 0, 0, "scxspl", NULL); 106 } 107 108 /* 109 * scsipi_channel_init: 110 * 111 * Initialize a scsipi_channel when it is attached. 112 */ 113 int 114 scsipi_channel_init(chan) 115 struct scsipi_channel *chan; 116 { 117 int i; 118 119 /* Initialize shared data. */ 120 scsipi_init(); 121 122 /* Initialize the queues. */ 123 TAILQ_INIT(&chan->chan_queue); 124 TAILQ_INIT(&chan->chan_complete); 125 126 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++) 127 LIST_INIT(&chan->chan_periphtab[i]); 128 129 /* 130 * Create the asynchronous completion thread. 131 */ 132 kthread_create(scsipi_create_completion_thread, chan); 133 return (0); 134 } 135 136 /* 137 * scsipi_channel_shutdown: 138 * 139 * Shutdown a scsipi_channel. 140 */ 141 void 142 scsipi_channel_shutdown(chan) 143 struct scsipi_channel *chan; 144 { 145 146 /* 147 * Shut down the completion thread. 148 */ 149 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 150 wakeup(&chan->chan_complete); 151 152 /* 153 * Now wait for the thread to exit. 154 */ 155 while (chan->chan_thread != NULL) 156 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0); 157 } 158 159 static uint32_t 160 scsipi_chan_periph_hash(uint64_t t, uint64_t l) 161 { 162 uint32_t hash; 163 164 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT); 165 hash = hash32_buf(&l, sizeof(l), hash); 166 167 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK); 168 } 169 170 /* 171 * scsipi_insert_periph: 172 * 173 * Insert a periph into the channel. 174 */ 175 void 176 scsipi_insert_periph(chan, periph) 177 struct scsipi_channel *chan; 178 struct scsipi_periph *periph; 179 { 180 uint32_t hash; 181 int s; 182 183 hash = scsipi_chan_periph_hash(periph->periph_target, 184 periph->periph_lun); 185 186 s = splbio(); 187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash); 188 splx(s); 189 } 190 191 /* 192 * scsipi_remove_periph: 193 * 194 * Remove a periph from the channel. 195 */ 196 void 197 scsipi_remove_periph(chan, periph) 198 struct scsipi_channel *chan; 199 struct scsipi_periph *periph; 200 { 201 int s; 202 203 s = splbio(); 204 LIST_REMOVE(periph, periph_hash); 205 splx(s); 206 } 207 208 /* 209 * scsipi_lookup_periph: 210 * 211 * Lookup a periph on the specified channel. 212 */ 213 struct scsipi_periph * 214 scsipi_lookup_periph(chan, target, lun) 215 struct scsipi_channel *chan; 216 int target, lun; 217 { 218 struct scsipi_periph *periph; 219 uint32_t hash; 220 int s; 221 222 if (target >= chan->chan_ntargets || 223 lun >= chan->chan_nluns) 224 return (NULL); 225 226 hash = scsipi_chan_periph_hash(target, lun); 227 228 s = splbio(); 229 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) { 230 if (periph->periph_target == target && 231 periph->periph_lun == lun) 232 break; 233 } 234 splx(s); 235 236 return (periph); 237 } 238 239 /* 240 * scsipi_get_resource: 241 * 242 * Allocate a single xfer `resource' from the channel. 243 * 244 * NOTE: Must be called at splbio(). 245 */ 246 int 247 scsipi_get_resource(chan) 248 struct scsipi_channel *chan; 249 { 250 struct scsipi_adapter *adapt = chan->chan_adapter; 251 252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 253 if (chan->chan_openings > 0) { 254 chan->chan_openings--; 255 return (1); 256 } 257 return (0); 258 } 259 260 if (adapt->adapt_openings > 0) { 261 adapt->adapt_openings--; 262 return (1); 263 } 264 return (0); 265 } 266 267 /* 268 * scsipi_grow_resources: 269 * 270 * Attempt to grow resources for a channel. If this succeeds, 271 * we allocate one for our caller. 272 * 273 * NOTE: Must be called at splbio(). 274 */ 275 __inline int 276 scsipi_grow_resources(chan) 277 struct scsipi_channel *chan; 278 { 279 280 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 281 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 282 scsipi_adapter_request(chan, 283 ADAPTER_REQ_GROW_RESOURCES, NULL); 284 return (scsipi_get_resource(chan)); 285 } 286 /* 287 * ask the channel thread to do it. It'll have to thaw the 288 * queue 289 */ 290 scsipi_channel_freeze(chan, 1); 291 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 292 wakeup(&chan->chan_complete); 293 return (0); 294 } 295 296 return (0); 297 } 298 299 /* 300 * scsipi_put_resource: 301 * 302 * Free a single xfer `resource' to the channel. 303 * 304 * NOTE: Must be called at splbio(). 305 */ 306 void 307 scsipi_put_resource(chan) 308 struct scsipi_channel *chan; 309 { 310 struct scsipi_adapter *adapt = chan->chan_adapter; 311 312 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 313 chan->chan_openings++; 314 else 315 adapt->adapt_openings++; 316 } 317 318 /* 319 * scsipi_get_tag: 320 * 321 * Get a tag ID for the specified xfer. 322 * 323 * NOTE: Must be called at splbio(). 324 */ 325 void 326 scsipi_get_tag(xs) 327 struct scsipi_xfer *xs; 328 { 329 struct scsipi_periph *periph = xs->xs_periph; 330 int bit, tag; 331 u_int word; 332 333 bit = 0; /* XXX gcc */ 334 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 335 bit = ffs(periph->periph_freetags[word]); 336 if (bit != 0) 337 break; 338 } 339 #ifdef DIAGNOSTIC 340 if (word == PERIPH_NTAGWORDS) { 341 scsipi_printaddr(periph); 342 printf("no free tags\n"); 343 panic("scsipi_get_tag"); 344 } 345 #endif 346 347 bit -= 1; 348 periph->periph_freetags[word] &= ~(1 << bit); 349 tag = (word << 5) | bit; 350 351 /* XXX Should eventually disallow this completely. */ 352 if (tag >= periph->periph_openings) { 353 scsipi_printaddr(periph); 354 printf("WARNING: tag %d greater than available openings %d\n", 355 tag, periph->periph_openings); 356 } 357 358 xs->xs_tag_id = tag; 359 } 360 361 /* 362 * scsipi_put_tag: 363 * 364 * Put the tag ID for the specified xfer back into the pool. 365 * 366 * NOTE: Must be called at splbio(). 367 */ 368 void 369 scsipi_put_tag(xs) 370 struct scsipi_xfer *xs; 371 { 372 struct scsipi_periph *periph = xs->xs_periph; 373 int word, bit; 374 375 word = xs->xs_tag_id >> 5; 376 bit = xs->xs_tag_id & 0x1f; 377 378 periph->periph_freetags[word] |= (1 << bit); 379 } 380 381 /* 382 * scsipi_get_xs: 383 * 384 * Allocate an xfer descriptor and associate it with the 385 * specified peripherial. If the peripherial has no more 386 * available command openings, we either block waiting for 387 * one to become available, or fail. 388 */ 389 struct scsipi_xfer * 390 scsipi_get_xs(periph, flags) 391 struct scsipi_periph *periph; 392 int flags; 393 { 394 struct scsipi_xfer *xs; 395 int s; 396 397 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 398 399 /* 400 * If we're cold, make sure we poll. 401 */ 402 if (cold) 403 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL; 404 405 #ifdef DIAGNOSTIC 406 /* 407 * URGENT commands can never be ASYNC. 408 */ 409 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 410 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 411 scsipi_printaddr(periph); 412 printf("URGENT and ASYNC\n"); 413 panic("scsipi_get_xs"); 414 } 415 #endif 416 417 s = splbio(); 418 /* 419 * Wait for a command opening to become available. Rules: 420 * 421 * - All xfers must wait for an available opening. 422 * Exception: URGENT xfers can proceed when 423 * active == openings, because we use the opening 424 * of the command we're recovering for. 425 * - if the periph has sense pending, only URGENT & REQSENSE 426 * xfers may proceed. 427 * 428 * - If the periph is recovering, only URGENT xfers may 429 * proceed. 430 * 431 * - If the periph is currently executing a recovery 432 * command, URGENT commands must block, because only 433 * one recovery command can execute at a time. 434 */ 435 for (;;) { 436 if (flags & XS_CTL_URGENT) { 437 if (periph->periph_active > periph->periph_openings) 438 goto wait_for_opening; 439 if (periph->periph_flags & PERIPH_SENSE) { 440 if ((flags & XS_CTL_REQSENSE) == 0) 441 goto wait_for_opening; 442 } else { 443 if ((periph->periph_flags & 444 PERIPH_RECOVERY_ACTIVE) != 0) 445 goto wait_for_opening; 446 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 447 } 448 break; 449 } 450 if (periph->periph_active >= periph->periph_openings || 451 (periph->periph_flags & PERIPH_RECOVERING) != 0) 452 goto wait_for_opening; 453 periph->periph_active++; 454 break; 455 456 wait_for_opening: 457 if (flags & XS_CTL_NOSLEEP) { 458 splx(s); 459 return (NULL); 460 } 461 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 462 periph->periph_flags |= PERIPH_WAITING; 463 (void) tsleep(periph, PRIBIO, "getxs", 0); 464 } 465 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 466 xs = pool_get(&scsipi_xfer_pool, 467 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 468 if (xs == NULL) { 469 if (flags & XS_CTL_URGENT) { 470 if ((flags & XS_CTL_REQSENSE) == 0) 471 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 472 } else 473 periph->periph_active--; 474 scsipi_printaddr(periph); 475 printf("unable to allocate %sscsipi_xfer\n", 476 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 477 } 478 splx(s); 479 480 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 481 482 if (xs != NULL) { 483 memset(xs, 0, sizeof(*xs)); 484 callout_init(&xs->xs_callout); 485 xs->xs_periph = periph; 486 xs->xs_control = flags; 487 xs->xs_status = 0; 488 s = splbio(); 489 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 490 splx(s); 491 } 492 return (xs); 493 } 494 495 /* 496 * scsipi_put_xs: 497 * 498 * Release an xfer descriptor, decreasing the outstanding command 499 * count for the peripherial. If there is a thread waiting for 500 * an opening, wake it up. If not, kick any queued I/O the 501 * peripherial may have. 502 * 503 * NOTE: Must be called at splbio(). 504 */ 505 void 506 scsipi_put_xs(xs) 507 struct scsipi_xfer *xs; 508 { 509 struct scsipi_periph *periph = xs->xs_periph; 510 int flags = xs->xs_control; 511 512 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 513 514 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 515 pool_put(&scsipi_xfer_pool, xs); 516 517 #ifdef DIAGNOSTIC 518 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 519 periph->periph_active == 0) { 520 scsipi_printaddr(periph); 521 printf("recovery without a command to recovery for\n"); 522 panic("scsipi_put_xs"); 523 } 524 #endif 525 526 if (flags & XS_CTL_URGENT) { 527 if ((flags & XS_CTL_REQSENSE) == 0) 528 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 529 } else 530 periph->periph_active--; 531 if (periph->periph_active == 0 && 532 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 533 periph->periph_flags &= ~PERIPH_WAITDRAIN; 534 wakeup(&periph->periph_active); 535 } 536 537 if (periph->periph_flags & PERIPH_WAITING) { 538 periph->periph_flags &= ~PERIPH_WAITING; 539 wakeup(periph); 540 } else { 541 if (periph->periph_switch->psw_start != NULL) { 542 SC_DEBUG(periph, SCSIPI_DB2, 543 ("calling private start()\n")); 544 (*periph->periph_switch->psw_start)(periph); 545 } 546 } 547 } 548 549 /* 550 * scsipi_channel_freeze: 551 * 552 * Freeze a channel's xfer queue. 553 */ 554 void 555 scsipi_channel_freeze(chan, count) 556 struct scsipi_channel *chan; 557 int count; 558 { 559 int s; 560 561 s = splbio(); 562 chan->chan_qfreeze += count; 563 splx(s); 564 } 565 566 /* 567 * scsipi_channel_thaw: 568 * 569 * Thaw a channel's xfer queue. 570 */ 571 void 572 scsipi_channel_thaw(chan, count) 573 struct scsipi_channel *chan; 574 int count; 575 { 576 int s; 577 578 s = splbio(); 579 chan->chan_qfreeze -= count; 580 /* 581 * Don't let the freeze count go negative. 582 * 583 * Presumably the adapter driver could keep track of this, 584 * but it might just be easier to do this here so as to allow 585 * multiple callers, including those outside the adapter driver. 586 */ 587 if (chan->chan_qfreeze < 0) { 588 chan->chan_qfreeze = 0; 589 } 590 splx(s); 591 /* 592 * Kick the channel's queue here. Note, we may be running in 593 * interrupt context (softclock or HBA's interrupt), so the adapter 594 * driver had better not sleep. 595 */ 596 if (chan->chan_qfreeze == 0) 597 scsipi_run_queue(chan); 598 } 599 600 /* 601 * scsipi_channel_timed_thaw: 602 * 603 * Thaw a channel after some time has expired. This will also 604 * run the channel's queue if the freeze count has reached 0. 605 */ 606 void 607 scsipi_channel_timed_thaw(arg) 608 void *arg; 609 { 610 struct scsipi_channel *chan = arg; 611 612 scsipi_channel_thaw(chan, 1); 613 } 614 615 /* 616 * scsipi_periph_freeze: 617 * 618 * Freeze a device's xfer queue. 619 */ 620 void 621 scsipi_periph_freeze(periph, count) 622 struct scsipi_periph *periph; 623 int count; 624 { 625 int s; 626 627 s = splbio(); 628 periph->periph_qfreeze += count; 629 splx(s); 630 } 631 632 /* 633 * scsipi_periph_thaw: 634 * 635 * Thaw a device's xfer queue. 636 */ 637 void 638 scsipi_periph_thaw(periph, count) 639 struct scsipi_periph *periph; 640 int count; 641 { 642 int s; 643 644 s = splbio(); 645 periph->periph_qfreeze -= count; 646 #ifdef DIAGNOSTIC 647 if (periph->periph_qfreeze < 0) { 648 static const char pc[] = "periph freeze count < 0"; 649 scsipi_printaddr(periph); 650 printf("%s\n", pc); 651 panic(pc); 652 } 653 #endif 654 if (periph->periph_qfreeze == 0 && 655 (periph->periph_flags & PERIPH_WAITING) != 0) 656 wakeup(periph); 657 splx(s); 658 } 659 660 /* 661 * scsipi_periph_timed_thaw: 662 * 663 * Thaw a device after some time has expired. 664 */ 665 void 666 scsipi_periph_timed_thaw(arg) 667 void *arg; 668 { 669 int s; 670 struct scsipi_periph *periph = arg; 671 672 callout_stop(&periph->periph_callout); 673 674 s = splbio(); 675 scsipi_periph_thaw(periph, 1); 676 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 677 /* 678 * Kick the channel's queue here. Note, we're running in 679 * interrupt context (softclock), so the adapter driver 680 * had better not sleep. 681 */ 682 scsipi_run_queue(periph->periph_channel); 683 } else { 684 /* 685 * Tell the completion thread to kick the channel's queue here. 686 */ 687 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 688 wakeup(&periph->periph_channel->chan_complete); 689 } 690 splx(s); 691 } 692 693 /* 694 * scsipi_wait_drain: 695 * 696 * Wait for a periph's pending xfers to drain. 697 */ 698 void 699 scsipi_wait_drain(periph) 700 struct scsipi_periph *periph; 701 { 702 int s; 703 704 s = splbio(); 705 while (periph->periph_active != 0) { 706 periph->periph_flags |= PERIPH_WAITDRAIN; 707 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0); 708 } 709 splx(s); 710 } 711 712 /* 713 * scsipi_kill_pending: 714 * 715 * Kill off all pending xfers for a periph. 716 * 717 * NOTE: Must be called at splbio(). 718 */ 719 void 720 scsipi_kill_pending(periph) 721 struct scsipi_periph *periph; 722 { 723 724 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph); 725 #ifdef DIAGNOSTIC 726 if (TAILQ_FIRST(&periph->periph_xferq) != NULL) 727 panic("scsipi_kill_pending"); 728 #endif 729 scsipi_wait_drain(periph); 730 } 731 732 /* 733 * scsipi_interpret_sense: 734 * 735 * Look at the returned sense and act on the error, determining 736 * the unix error number to pass back. (0 = report no error) 737 * 738 * NOTE: If we return ERESTART, we are expected to haved 739 * thawed the device! 740 * 741 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 742 */ 743 int 744 scsipi_interpret_sense(xs) 745 struct scsipi_xfer *xs; 746 { 747 struct scsipi_sense_data *sense; 748 struct scsipi_periph *periph = xs->xs_periph; 749 u_int8_t key; 750 int error; 751 #ifndef SCSIVERBOSE 752 u_int32_t info; 753 static char *error_mes[] = { 754 "soft error (corrected)", 755 "not ready", "medium error", 756 "non-media hardware failure", "illegal request", 757 "unit attention", "readonly device", 758 "no data found", "vendor unique", 759 "copy aborted", "command aborted", 760 "search returned equal", "volume overflow", 761 "verify miscompare", "unknown error key" 762 }; 763 #endif 764 765 sense = &xs->sense.scsi_sense; 766 #ifdef SCSIPI_DEBUG 767 if (periph->periph_flags & SCSIPI_DB1) { 768 int count; 769 scsipi_printaddr(periph); 770 printf(" sense debug information:\n"); 771 printf("\tcode 0x%x valid 0x%x\n", 772 sense->error_code & SSD_ERRCODE, 773 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0); 774 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 775 sense->segment, 776 sense->flags & SSD_KEY, 777 sense->flags & SSD_ILI ? 1 : 0, 778 sense->flags & SSD_EOM ? 1 : 0, 779 sense->flags & SSD_FILEMARK ? 1 : 0); 780 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 781 "extra bytes\n", 782 sense->info[0], 783 sense->info[1], 784 sense->info[2], 785 sense->info[3], 786 sense->extra_len); 787 printf("\textra: "); 788 for (count = 0; count < ADD_BYTES_LIM(sense); count++) 789 printf("0x%x ", sense->cmd_spec_info[count]); 790 printf("\n"); 791 } 792 #endif 793 794 /* 795 * If the periph has it's own error handler, call it first. 796 * If it returns a legit error value, return that, otherwise 797 * it wants us to continue with normal error processing. 798 */ 799 if (periph->periph_switch->psw_error != NULL) { 800 SC_DEBUG(periph, SCSIPI_DB2, 801 ("calling private err_handler()\n")); 802 error = (*periph->periph_switch->psw_error)(xs); 803 if (error != EJUSTRETURN) 804 return (error); 805 } 806 /* otherwise use the default */ 807 switch (sense->error_code & SSD_ERRCODE) { 808 809 /* 810 * Old SCSI-1 and SASI devices respond with 811 * codes other than 70. 812 */ 813 case 0x00: /* no error (command completed OK) */ 814 return (0); 815 case 0x04: /* drive not ready after it was selected */ 816 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 817 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 818 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 819 return (0); 820 /* XXX - display some sort of error here? */ 821 return (EIO); 822 case 0x20: /* invalid command */ 823 if ((xs->xs_control & 824 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 825 return (0); 826 return (EINVAL); 827 case 0x25: /* invalid LUN (Adaptec ACB-4000) */ 828 return (EACCES); 829 830 /* 831 * If it's code 70, use the extended stuff and 832 * interpret the key 833 */ 834 case 0x71: /* delayed error */ 835 scsipi_printaddr(periph); 836 key = sense->flags & SSD_KEY; 837 printf(" DEFERRED ERROR, key = 0x%x\n", key); 838 /* FALLTHROUGH */ 839 case 0x70: 840 #ifndef SCSIVERBOSE 841 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) 842 info = _4btol(sense->info); 843 else 844 info = 0; 845 #endif 846 key = sense->flags & SSD_KEY; 847 848 switch (key) { 849 case SKEY_NO_SENSE: 850 case SKEY_RECOVERED_ERROR: 851 if (xs->resid == xs->datalen && xs->datalen) { 852 /* 853 * Why is this here? 854 */ 855 xs->resid = 0; /* not short read */ 856 } 857 case SKEY_EQUAL: 858 error = 0; 859 break; 860 case SKEY_NOT_READY: 861 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 862 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 863 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 864 return (0); 865 if (sense->add_sense_code == 0x3A) { 866 error = ENODEV; /* Medium not present */ 867 if (xs->xs_control & XS_CTL_SILENT_NODEV) 868 return (error); 869 } else 870 error = EIO; 871 if ((xs->xs_control & XS_CTL_SILENT) != 0) 872 return (error); 873 break; 874 case SKEY_ILLEGAL_REQUEST: 875 if ((xs->xs_control & 876 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 877 return (0); 878 /* 879 * Handle the case where a device reports 880 * Logical Unit Not Supported during discovery. 881 */ 882 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 883 sense->add_sense_code == 0x25 && 884 sense->add_sense_code_qual == 0x00) 885 return (EINVAL); 886 if ((xs->xs_control & XS_CTL_SILENT) != 0) 887 return (EIO); 888 error = EINVAL; 889 break; 890 case SKEY_UNIT_ATTENTION: 891 if (sense->add_sense_code == 0x29 && 892 sense->add_sense_code_qual == 0x00) { 893 /* device or bus reset */ 894 return (ERESTART); 895 } 896 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 897 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 898 if ((xs->xs_control & 899 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 900 /* XXX Should reupload any transient state. */ 901 (periph->periph_flags & 902 PERIPH_REMOVABLE) == 0) { 903 return (ERESTART); 904 } 905 if ((xs->xs_control & XS_CTL_SILENT) != 0) 906 return (EIO); 907 error = EIO; 908 break; 909 case SKEY_WRITE_PROTECT: 910 error = EROFS; 911 break; 912 case SKEY_BLANK_CHECK: 913 error = 0; 914 break; 915 case SKEY_ABORTED_COMMAND: 916 error = ERESTART; 917 break; 918 case SKEY_VOLUME_OVERFLOW: 919 error = ENOSPC; 920 break; 921 default: 922 error = EIO; 923 break; 924 } 925 926 #ifdef SCSIVERBOSE 927 if (key && (xs->xs_control & XS_CTL_SILENT) == 0) 928 scsipi_print_sense(xs, 0); 929 #else 930 if (key) { 931 scsipi_printaddr(periph); 932 printf("%s", error_mes[key - 1]); 933 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 934 switch (key) { 935 case SKEY_NOT_READY: 936 case SKEY_ILLEGAL_REQUEST: 937 case SKEY_UNIT_ATTENTION: 938 case SKEY_WRITE_PROTECT: 939 break; 940 case SKEY_BLANK_CHECK: 941 printf(", requested size: %d (decimal)", 942 info); 943 break; 944 case SKEY_ABORTED_COMMAND: 945 if (xs->xs_retries) 946 printf(", retrying"); 947 printf(", cmd 0x%x, info 0x%x", 948 xs->cmd->opcode, info); 949 break; 950 default: 951 printf(", info = %d (decimal)", info); 952 } 953 } 954 if (sense->extra_len != 0) { 955 int n; 956 printf(", data ="); 957 for (n = 0; n < sense->extra_len; n++) 958 printf(" %02x", 959 sense->cmd_spec_info[n]); 960 } 961 printf("\n"); 962 } 963 #endif 964 return (error); 965 966 /* 967 * Some other code, just report it 968 */ 969 default: 970 #if defined(SCSIDEBUG) || defined(DEBUG) 971 { 972 static char *uc = "undecodable sense error"; 973 int i; 974 u_int8_t *cptr = (u_int8_t *) sense; 975 scsipi_printaddr(periph); 976 if (xs->cmd == &xs->cmdstore) { 977 printf("%s for opcode 0x%x, data=", 978 uc, xs->cmdstore.opcode); 979 } else { 980 printf("%s, data=", uc); 981 } 982 for (i = 0; i < sizeof (sense); i++) 983 printf(" 0x%02x", *(cptr++) & 0xff); 984 printf("\n"); 985 } 986 #else 987 scsipi_printaddr(periph); 988 printf("Sense Error Code 0x%x", 989 sense->error_code & SSD_ERRCODE); 990 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 991 struct scsipi_sense_data_unextended *usense = 992 (struct scsipi_sense_data_unextended *)sense; 993 printf(" at block no. %d (decimal)", 994 _3btol(usense->block)); 995 } 996 printf("\n"); 997 #endif 998 return (EIO); 999 } 1000 } 1001 1002 /* 1003 * scsipi_size: 1004 * 1005 * Find out from the device what its capacity is. 1006 */ 1007 u_int64_t 1008 scsipi_size(periph, flags) 1009 struct scsipi_periph *periph; 1010 int flags; 1011 { 1012 struct scsipi_read_cap_data rdcap; 1013 struct scsipi_read_capacity scsipi_cmd; 1014 1015 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1016 scsipi_cmd.opcode = READ_CAPACITY; 1017 1018 /* 1019 * If the command works, interpret the result as a 4 byte 1020 * number of blocks 1021 */ 1022 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1023 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap), 1024 SCSIPIRETRIES, 20000, NULL, 1025 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0) 1026 return (0); 1027 1028 return (_4btol(rdcap.addr) + 1); 1029 } 1030 1031 /* 1032 * scsipi_test_unit_ready: 1033 * 1034 * Issue a `test unit ready' request. 1035 */ 1036 int 1037 scsipi_test_unit_ready(periph, flags) 1038 struct scsipi_periph *periph; 1039 int flags; 1040 { 1041 int retries; 1042 struct scsipi_test_unit_ready scsipi_cmd; 1043 1044 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */ 1045 if (periph->periph_quirks & PQUIRK_NOTUR) 1046 return (0); 1047 1048 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1049 scsipi_cmd.opcode = TEST_UNIT_READY; 1050 1051 if (flags & XS_CTL_DISCOVERY) 1052 retries = 0; 1053 else 1054 retries = SCSIPIRETRIES; 1055 1056 return (scsipi_command(periph, 1057 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd), 1058 0, 0, retries, 10000, NULL, flags)); 1059 } 1060 1061 /* 1062 * scsipi_inquire: 1063 * 1064 * Ask the device about itself. 1065 */ 1066 int 1067 scsipi_inquire(periph, inqbuf, flags) 1068 struct scsipi_periph *periph; 1069 struct scsipi_inquiry_data *inqbuf; 1070 int flags; 1071 { 1072 int retries; 1073 struct scsipi_inquiry scsipi_cmd; 1074 int error; 1075 1076 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1077 scsipi_cmd.opcode = INQUIRY; 1078 1079 if (flags & XS_CTL_DISCOVERY) 1080 retries = 0; 1081 else 1082 retries = SCSIPIRETRIES; 1083 1084 /* 1085 * If we request more data than the device can provide, it SHOULD just 1086 * return a short reponse. However, some devices error with an 1087 * ILLEGAL REQUEST sense code, and yet others have even more special 1088 * failture modes (such as the GL641USB flash adapter, which goes loony 1089 * and sends corrupted CRCs). To work around this, and to bring our 1090 * behavior more in line with other OSes, we do a shorter inquiry, 1091 * covering all the SCSI-2 information, first, and then request more 1092 * data iff the "additional length" field indicates there is more. 1093 * - mycroft, 2003/10/16 1094 */ 1095 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2; 1096 error = scsipi_command(periph, 1097 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1098 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, 1099 retries, 10000, NULL, XS_CTL_DATA_IN | flags); 1100 if (!error && inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) { 1101 scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3; 1102 error = scsipi_command(periph, 1103 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1104 (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, 1105 retries, 10000, NULL, XS_CTL_DATA_IN | flags); 1106 } 1107 1108 #ifdef SCSI_OLD_NOINQUIRY 1109 /* 1110 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator. 1111 * This board doesn't support the INQUIRY command at all. 1112 */ 1113 if (error == EINVAL || error == EACCES) { 1114 /* 1115 * Conjure up an INQUIRY response. 1116 */ 1117 inqbuf->device = (error == EINVAL ? 1118 SID_QUAL_LU_PRESENT : 1119 SID_QUAL_LU_NOTPRESENT) | T_DIRECT; 1120 inqbuf->dev_qual2 = 0; 1121 inqbuf->version = 0; 1122 inqbuf->response_format = SID_FORMAT_SCSI1; 1123 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1124 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1125 memcpy(inqbuf->vendor, "ADAPTEC ACB-4000 ", 28); 1126 error = 0; 1127 } 1128 1129 /* 1130 * Kludge for the Emulex MT-02 SCSI->QIC translator. 1131 * This board gives an empty response to an INQUIRY command. 1132 */ 1133 else if (error == 0 && 1134 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) && 1135 inqbuf->dev_qual2 == 0 && 1136 inqbuf->version == 0 && 1137 inqbuf->response_format == SID_FORMAT_SCSI1) { 1138 /* 1139 * Fill out the INQUIRY response. 1140 */ 1141 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL); 1142 inqbuf->dev_qual2 = SID_REMOVABLE; 1143 inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4; 1144 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1145 memcpy(inqbuf->vendor, "EMULEX MT-02 QIC ", 28); 1146 } 1147 #endif /* SCSI_OLD_NOINQUIRY */ 1148 1149 return error; 1150 } 1151 1152 /* 1153 * scsipi_prevent: 1154 * 1155 * Prevent or allow the user to remove the media 1156 */ 1157 int 1158 scsipi_prevent(periph, type, flags) 1159 struct scsipi_periph *periph; 1160 int type, flags; 1161 { 1162 struct scsipi_prevent scsipi_cmd; 1163 1164 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1165 scsipi_cmd.opcode = PREVENT_ALLOW; 1166 scsipi_cmd.how = type; 1167 1168 return (scsipi_command(periph, 1169 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1170 0, 0, SCSIPIRETRIES, 5000, NULL, flags)); 1171 } 1172 1173 /* 1174 * scsipi_start: 1175 * 1176 * Send a START UNIT. 1177 */ 1178 int 1179 scsipi_start(periph, type, flags) 1180 struct scsipi_periph *periph; 1181 int type, flags; 1182 { 1183 struct scsipi_start_stop scsipi_cmd; 1184 1185 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1186 scsipi_cmd.opcode = START_STOP; 1187 scsipi_cmd.byte2 = 0x00; 1188 scsipi_cmd.how = type; 1189 1190 return (scsipi_command(periph, 1191 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1192 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, 1193 NULL, flags)); 1194 } 1195 1196 /* 1197 * scsipi_mode_sense, scsipi_mode_sense_big: 1198 * get a sense page from a device 1199 */ 1200 1201 int 1202 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout) 1203 struct scsipi_periph *periph; 1204 int byte2, page, len, flags, retries, timeout; 1205 struct scsipi_mode_header *data; 1206 { 1207 struct scsipi_mode_sense scsipi_cmd; 1208 int error; 1209 1210 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1211 scsipi_cmd.opcode = MODE_SENSE; 1212 scsipi_cmd.byte2 = byte2; 1213 scsipi_cmd.page = page; 1214 scsipi_cmd.length = len & 0xff; 1215 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1216 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1217 flags | XS_CTL_DATA_IN); 1218 SC_DEBUG(periph, SCSIPI_DB2, 1219 ("scsipi_mode_sense: error=%d\n", error)); 1220 return (error); 1221 } 1222 1223 int 1224 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout) 1225 struct scsipi_periph *periph; 1226 int byte2, page, len, flags, retries, timeout; 1227 struct scsipi_mode_header_big *data; 1228 { 1229 struct scsipi_mode_sense_big scsipi_cmd; 1230 int error; 1231 1232 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1233 scsipi_cmd.opcode = MODE_SENSE_BIG; 1234 scsipi_cmd.byte2 = byte2; 1235 scsipi_cmd.page = page; 1236 _lto2b(len, scsipi_cmd.length); 1237 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1238 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1239 flags | XS_CTL_DATA_IN); 1240 SC_DEBUG(periph, SCSIPI_DB2, 1241 ("scsipi_mode_sense_big: error=%d\n", error)); 1242 return (error); 1243 } 1244 1245 int 1246 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout) 1247 struct scsipi_periph *periph; 1248 int byte2, len, flags, retries, timeout; 1249 struct scsipi_mode_header *data; 1250 { 1251 struct scsipi_mode_select scsipi_cmd; 1252 int error; 1253 1254 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1255 scsipi_cmd.opcode = MODE_SELECT; 1256 scsipi_cmd.byte2 = byte2; 1257 scsipi_cmd.length = len & 0xff; 1258 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1259 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1260 flags | XS_CTL_DATA_OUT); 1261 SC_DEBUG(periph, SCSIPI_DB2, 1262 ("scsipi_mode_select: error=%d\n", error)); 1263 return (error); 1264 } 1265 1266 int 1267 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout) 1268 struct scsipi_periph *periph; 1269 int byte2, len, flags, retries, timeout; 1270 struct scsipi_mode_header_big *data; 1271 { 1272 struct scsipi_mode_select_big scsipi_cmd; 1273 int error; 1274 1275 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1276 scsipi_cmd.opcode = MODE_SELECT_BIG; 1277 scsipi_cmd.byte2 = byte2; 1278 _lto2b(len, scsipi_cmd.length); 1279 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1280 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1281 flags | XS_CTL_DATA_OUT); 1282 SC_DEBUG(periph, SCSIPI_DB2, 1283 ("scsipi_mode_select: error=%d\n", error)); 1284 return (error); 1285 } 1286 1287 /* 1288 * scsipi_done: 1289 * 1290 * This routine is called by an adapter's interrupt handler when 1291 * an xfer is completed. 1292 */ 1293 void 1294 scsipi_done(xs) 1295 struct scsipi_xfer *xs; 1296 { 1297 struct scsipi_periph *periph = xs->xs_periph; 1298 struct scsipi_channel *chan = periph->periph_channel; 1299 int s, freezecnt; 1300 1301 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1302 #ifdef SCSIPI_DEBUG 1303 if (periph->periph_dbflags & SCSIPI_DB1) 1304 show_scsipi_cmd(xs); 1305 #endif 1306 1307 s = splbio(); 1308 /* 1309 * The resource this command was using is now free. 1310 */ 1311 scsipi_put_resource(chan); 1312 xs->xs_periph->periph_sent--; 1313 1314 /* 1315 * If the command was tagged, free the tag. 1316 */ 1317 if (XS_CTL_TAGTYPE(xs) != 0) 1318 scsipi_put_tag(xs); 1319 else 1320 periph->periph_flags &= ~PERIPH_UNTAG; 1321 1322 /* Mark the command as `done'. */ 1323 xs->xs_status |= XS_STS_DONE; 1324 1325 #ifdef DIAGNOSTIC 1326 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1327 (XS_CTL_ASYNC|XS_CTL_POLL)) 1328 panic("scsipi_done: ASYNC and POLL"); 1329 #endif 1330 1331 /* 1332 * If the xfer had an error of any sort, freeze the 1333 * periph's queue. Freeze it again if we were requested 1334 * to do so in the xfer. 1335 */ 1336 freezecnt = 0; 1337 if (xs->error != XS_NOERROR) 1338 freezecnt++; 1339 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1340 freezecnt++; 1341 if (freezecnt != 0) 1342 scsipi_periph_freeze(periph, freezecnt); 1343 1344 /* 1345 * record the xfer with a pending sense, in case a SCSI reset is 1346 * received before the thread is waked up. 1347 */ 1348 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1349 periph->periph_flags |= PERIPH_SENSE; 1350 periph->periph_xscheck = xs; 1351 } 1352 1353 /* 1354 * If this was an xfer that was not to complete asynchronously, 1355 * let the requesting thread perform error checking/handling 1356 * in its context. 1357 */ 1358 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1359 splx(s); 1360 /* 1361 * If it's a polling job, just return, to unwind the 1362 * call graph. We don't need to restart the queue, 1363 * because pollings jobs are treated specially, and 1364 * are really only used during crash dumps anyway 1365 * (XXX or during boot-time autconfiguration of 1366 * ATAPI devices). 1367 */ 1368 if (xs->xs_control & XS_CTL_POLL) 1369 return; 1370 wakeup(xs); 1371 goto out; 1372 } 1373 1374 /* 1375 * Catch the extremely common case of I/O completing 1376 * without error; no use in taking a context switch 1377 * if we can handle it in interrupt context. 1378 */ 1379 if (xs->error == XS_NOERROR) { 1380 splx(s); 1381 (void) scsipi_complete(xs); 1382 goto out; 1383 } 1384 1385 /* 1386 * There is an error on this xfer. Put it on the channel's 1387 * completion queue, and wake up the completion thread. 1388 */ 1389 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1390 splx(s); 1391 wakeup(&chan->chan_complete); 1392 1393 out: 1394 /* 1395 * If there are more xfers on the channel's queue, attempt to 1396 * run them. 1397 */ 1398 scsipi_run_queue(chan); 1399 } 1400 1401 /* 1402 * scsipi_complete: 1403 * 1404 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1405 * 1406 * NOTE: This routine MUST be called with valid thread context 1407 * except for the case where the following two conditions are 1408 * true: 1409 * 1410 * xs->error == XS_NOERROR 1411 * XS_CTL_ASYNC is set in xs->xs_control 1412 * 1413 * The semantics of this routine can be tricky, so here is an 1414 * explanation: 1415 * 1416 * 0 Xfer completed successfully. 1417 * 1418 * ERESTART Xfer had an error, but was restarted. 1419 * 1420 * anything else Xfer had an error, return value is Unix 1421 * errno. 1422 * 1423 * If the return value is anything but ERESTART: 1424 * 1425 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1426 * the pool. 1427 * - If there is a buf associated with the xfer, 1428 * it has been biodone()'d. 1429 */ 1430 int 1431 scsipi_complete(xs) 1432 struct scsipi_xfer *xs; 1433 { 1434 struct scsipi_periph *periph = xs->xs_periph; 1435 struct scsipi_channel *chan = periph->periph_channel; 1436 struct buf *bp; 1437 int error, s; 1438 1439 #ifdef DIAGNOSTIC 1440 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1441 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1442 #endif 1443 /* 1444 * If command terminated with a CHECK CONDITION, we need to issue a 1445 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1446 * we'll have the real status. 1447 * Must be processed at splbio() to avoid missing a SCSI bus reset 1448 * for this command. 1449 */ 1450 s = splbio(); 1451 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1452 /* request sense for a request sense ? */ 1453 if (xs->xs_control & XS_CTL_REQSENSE) { 1454 scsipi_printaddr(periph); 1455 printf("request sense for a request sense ?\n"); 1456 /* XXX maybe we should reset the device ? */ 1457 /* we've been frozen because xs->error != XS_NOERROR */ 1458 scsipi_periph_thaw(periph, 1); 1459 splx(s); 1460 if (xs->resid < xs->datalen) { 1461 printf("we read %d bytes of sense anyway:\n", 1462 xs->datalen - xs->resid); 1463 #ifdef SCSIVERBOSE 1464 scsipi_print_sense_data((void *)xs->data, 0); 1465 #endif 1466 } 1467 return EINVAL; 1468 } 1469 scsipi_request_sense(xs); 1470 } 1471 splx(s); 1472 1473 /* 1474 * If it's a user level request, bypass all usual completion 1475 * processing, let the user work it out.. 1476 */ 1477 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1478 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1479 if (xs->error != XS_NOERROR) 1480 scsipi_periph_thaw(periph, 1); 1481 scsipi_user_done(xs); 1482 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1483 return 0; 1484 } 1485 1486 switch (xs->error) { 1487 case XS_NOERROR: 1488 error = 0; 1489 break; 1490 1491 case XS_SENSE: 1492 case XS_SHORTSENSE: 1493 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1494 break; 1495 1496 case XS_RESOURCE_SHORTAGE: 1497 /* 1498 * XXX Should freeze channel's queue. 1499 */ 1500 scsipi_printaddr(periph); 1501 printf("adapter resource shortage\n"); 1502 /* FALLTHROUGH */ 1503 1504 case XS_BUSY: 1505 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1506 struct scsipi_max_openings mo; 1507 1508 /* 1509 * We set the openings to active - 1, assuming that 1510 * the command that got us here is the first one that 1511 * can't fit into the device's queue. If that's not 1512 * the case, I guess we'll find out soon enough. 1513 */ 1514 mo.mo_target = periph->periph_target; 1515 mo.mo_lun = periph->periph_lun; 1516 if (periph->periph_active < periph->periph_openings) 1517 mo.mo_openings = periph->periph_active - 1; 1518 else 1519 mo.mo_openings = periph->periph_openings - 1; 1520 #ifdef DIAGNOSTIC 1521 if (mo.mo_openings < 0) { 1522 scsipi_printaddr(periph); 1523 printf("QUEUE FULL resulted in < 0 openings\n"); 1524 panic("scsipi_done"); 1525 } 1526 #endif 1527 if (mo.mo_openings == 0) { 1528 scsipi_printaddr(periph); 1529 printf("QUEUE FULL resulted in 0 openings\n"); 1530 mo.mo_openings = 1; 1531 } 1532 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1533 error = ERESTART; 1534 } else if (xs->xs_retries != 0) { 1535 xs->xs_retries--; 1536 /* 1537 * Wait one second, and try again. 1538 */ 1539 if ((xs->xs_control & XS_CTL_POLL) || 1540 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1541 delay(1000000); 1542 } else if (!callout_pending(&periph->periph_callout)) { 1543 scsipi_periph_freeze(periph, 1); 1544 callout_reset(&periph->periph_callout, 1545 hz, scsipi_periph_timed_thaw, periph); 1546 } 1547 error = ERESTART; 1548 } else 1549 error = EBUSY; 1550 break; 1551 1552 case XS_REQUEUE: 1553 error = ERESTART; 1554 break; 1555 1556 case XS_SELTIMEOUT: 1557 case XS_TIMEOUT: 1558 /* 1559 * If the device hasn't gone away, honor retry counts. 1560 * 1561 * Note that if we're in the middle of probing it, 1562 * it won't be found because it isn't here yet so 1563 * we won't honor the retry count in that case. 1564 */ 1565 if (scsipi_lookup_periph(chan, periph->periph_target, 1566 periph->periph_lun) && xs->xs_retries != 0) { 1567 xs->xs_retries--; 1568 error = ERESTART; 1569 } else 1570 error = EIO; 1571 break; 1572 1573 case XS_RESET: 1574 if (xs->xs_control & XS_CTL_REQSENSE) { 1575 /* 1576 * request sense interrupted by reset: signal it 1577 * with EINTR return code. 1578 */ 1579 error = EINTR; 1580 } else { 1581 if (xs->xs_retries != 0) { 1582 xs->xs_retries--; 1583 error = ERESTART; 1584 } else 1585 error = EIO; 1586 } 1587 break; 1588 1589 case XS_DRIVER_STUFFUP: 1590 scsipi_printaddr(periph); 1591 printf("generic HBA error\n"); 1592 error = EIO; 1593 break; 1594 default: 1595 scsipi_printaddr(periph); 1596 printf("invalid return code from adapter: %d\n", xs->error); 1597 error = EIO; 1598 break; 1599 } 1600 1601 s = splbio(); 1602 if (error == ERESTART) { 1603 /* 1604 * If we get here, the periph has been thawed and frozen 1605 * again if we had to issue recovery commands. Alternatively, 1606 * it may have been frozen again and in a timed thaw. In 1607 * any case, we thaw the periph once we re-enqueue the 1608 * command. Once the periph is fully thawed, it will begin 1609 * operation again. 1610 */ 1611 xs->error = XS_NOERROR; 1612 xs->status = SCSI_OK; 1613 xs->xs_status &= ~XS_STS_DONE; 1614 xs->xs_requeuecnt++; 1615 error = scsipi_enqueue(xs); 1616 if (error == 0) { 1617 scsipi_periph_thaw(periph, 1); 1618 splx(s); 1619 return (ERESTART); 1620 } 1621 } 1622 1623 /* 1624 * scsipi_done() freezes the queue if not XS_NOERROR. 1625 * Thaw it here. 1626 */ 1627 if (xs->error != XS_NOERROR) 1628 scsipi_periph_thaw(periph, 1); 1629 1630 /* 1631 * Set buffer fields in case the periph 1632 * switch done func uses them 1633 */ 1634 if ((bp = xs->bp) != NULL) { 1635 if (error) { 1636 bp->b_error = error; 1637 bp->b_flags |= B_ERROR; 1638 bp->b_resid = bp->b_bcount; 1639 } else { 1640 bp->b_error = 0; 1641 bp->b_resid = xs->resid; 1642 } 1643 } 1644 1645 if (periph->periph_switch->psw_done) 1646 periph->periph_switch->psw_done(xs); 1647 1648 if (bp) 1649 biodone(bp); 1650 1651 if (xs->xs_control & XS_CTL_ASYNC) 1652 scsipi_put_xs(xs); 1653 splx(s); 1654 1655 return (error); 1656 } 1657 1658 /* 1659 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1660 * returns with a CHECK_CONDITION status. Must be called in valid thread 1661 * context and at splbio(). 1662 */ 1663 1664 void 1665 scsipi_request_sense(xs) 1666 struct scsipi_xfer *xs; 1667 { 1668 struct scsipi_periph *periph = xs->xs_periph; 1669 int flags, error; 1670 struct scsipi_sense cmd; 1671 1672 periph->periph_flags |= PERIPH_SENSE; 1673 1674 /* if command was polling, request sense will too */ 1675 flags = xs->xs_control & XS_CTL_POLL; 1676 /* Polling commands can't sleep */ 1677 if (flags) 1678 flags |= XS_CTL_NOSLEEP; 1679 1680 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1681 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1682 1683 memset(&cmd, 0, sizeof(cmd)); 1684 cmd.opcode = REQUEST_SENSE; 1685 cmd.length = sizeof(struct scsipi_sense_data); 1686 1687 error = scsipi_command(periph, 1688 (struct scsipi_generic *) &cmd, sizeof(cmd), 1689 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data), 1690 0, 1000, NULL, flags); 1691 periph->periph_flags &= ~PERIPH_SENSE; 1692 periph->periph_xscheck = NULL; 1693 switch(error) { 1694 case 0: 1695 /* we have a valid sense */ 1696 xs->error = XS_SENSE; 1697 return; 1698 case EINTR: 1699 /* REQUEST_SENSE interrupted by bus reset. */ 1700 xs->error = XS_RESET; 1701 return; 1702 case EIO: 1703 /* request sense coudn't be performed */ 1704 /* 1705 * XXX this isn't quite right but we don't have anything 1706 * better for now 1707 */ 1708 xs->error = XS_DRIVER_STUFFUP; 1709 return; 1710 default: 1711 /* Notify that request sense failed. */ 1712 xs->error = XS_DRIVER_STUFFUP; 1713 scsipi_printaddr(periph); 1714 printf("request sense failed with error %d\n", error); 1715 return; 1716 } 1717 } 1718 1719 /* 1720 * scsipi_enqueue: 1721 * 1722 * Enqueue an xfer on a channel. 1723 */ 1724 int 1725 scsipi_enqueue(xs) 1726 struct scsipi_xfer *xs; 1727 { 1728 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1729 struct scsipi_xfer *qxs; 1730 int s; 1731 1732 s = splbio(); 1733 1734 /* 1735 * If the xfer is to be polled, and there are already jobs on 1736 * the queue, we can't proceed. 1737 */ 1738 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1739 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1740 splx(s); 1741 xs->error = XS_DRIVER_STUFFUP; 1742 return (EAGAIN); 1743 } 1744 1745 /* 1746 * If we have an URGENT xfer, it's an error recovery command 1747 * and it should just go on the head of the channel's queue. 1748 */ 1749 if (xs->xs_control & XS_CTL_URGENT) { 1750 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1751 goto out; 1752 } 1753 1754 /* 1755 * If this xfer has already been on the queue before, we 1756 * need to reinsert it in the correct order. That order is: 1757 * 1758 * Immediately before the first xfer for this periph 1759 * with a requeuecnt less than xs->xs_requeuecnt. 1760 * 1761 * Failing that, at the end of the queue. (We'll end up 1762 * there naturally.) 1763 */ 1764 if (xs->xs_requeuecnt != 0) { 1765 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1766 qxs = TAILQ_NEXT(qxs, channel_q)) { 1767 if (qxs->xs_periph == xs->xs_periph && 1768 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1769 break; 1770 } 1771 if (qxs != NULL) { 1772 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1773 channel_q); 1774 goto out; 1775 } 1776 } 1777 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1778 out: 1779 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1780 scsipi_periph_thaw(xs->xs_periph, 1); 1781 splx(s); 1782 return (0); 1783 } 1784 1785 /* 1786 * scsipi_run_queue: 1787 * 1788 * Start as many xfers as possible running on the channel. 1789 */ 1790 void 1791 scsipi_run_queue(chan) 1792 struct scsipi_channel *chan; 1793 { 1794 struct scsipi_xfer *xs; 1795 struct scsipi_periph *periph; 1796 int s; 1797 1798 for (;;) { 1799 s = splbio(); 1800 1801 /* 1802 * If the channel is frozen, we can't do any work right 1803 * now. 1804 */ 1805 if (chan->chan_qfreeze != 0) { 1806 splx(s); 1807 return; 1808 } 1809 1810 /* 1811 * Look for work to do, and make sure we can do it. 1812 */ 1813 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1814 xs = TAILQ_NEXT(xs, channel_q)) { 1815 periph = xs->xs_periph; 1816 1817 if ((periph->periph_sent >= periph->periph_openings) || 1818 periph->periph_qfreeze != 0 || 1819 (periph->periph_flags & PERIPH_UNTAG) != 0) 1820 continue; 1821 1822 if ((periph->periph_flags & 1823 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1824 (xs->xs_control & XS_CTL_URGENT) == 0) 1825 continue; 1826 1827 /* 1828 * We can issue this xfer! 1829 */ 1830 goto got_one; 1831 } 1832 1833 /* 1834 * Can't find any work to do right now. 1835 */ 1836 splx(s); 1837 return; 1838 1839 got_one: 1840 /* 1841 * Have an xfer to run. Allocate a resource from 1842 * the adapter to run it. If we can't allocate that 1843 * resource, we don't dequeue the xfer. 1844 */ 1845 if (scsipi_get_resource(chan) == 0) { 1846 /* 1847 * Adapter is out of resources. If the adapter 1848 * supports it, attempt to grow them. 1849 */ 1850 if (scsipi_grow_resources(chan) == 0) { 1851 /* 1852 * Wasn't able to grow resources, 1853 * nothing more we can do. 1854 */ 1855 if (xs->xs_control & XS_CTL_POLL) { 1856 scsipi_printaddr(xs->xs_periph); 1857 printf("polling command but no " 1858 "adapter resources"); 1859 /* We'll panic shortly... */ 1860 } 1861 splx(s); 1862 1863 /* 1864 * XXX: We should be able to note that 1865 * XXX: that resources are needed here! 1866 */ 1867 return; 1868 } 1869 /* 1870 * scsipi_grow_resources() allocated the resource 1871 * for us. 1872 */ 1873 } 1874 1875 /* 1876 * We have a resource to run this xfer, do it! 1877 */ 1878 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1879 1880 /* 1881 * If the command is to be tagged, allocate a tag ID 1882 * for it. 1883 */ 1884 if (XS_CTL_TAGTYPE(xs) != 0) 1885 scsipi_get_tag(xs); 1886 else 1887 periph->periph_flags |= PERIPH_UNTAG; 1888 periph->periph_sent++; 1889 splx(s); 1890 1891 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1892 } 1893 #ifdef DIAGNOSTIC 1894 panic("scsipi_run_queue: impossible"); 1895 #endif 1896 } 1897 1898 /* 1899 * scsipi_execute_xs: 1900 * 1901 * Begin execution of an xfer, waiting for it to complete, if necessary. 1902 */ 1903 int 1904 scsipi_execute_xs(xs) 1905 struct scsipi_xfer *xs; 1906 { 1907 struct scsipi_periph *periph = xs->xs_periph; 1908 struct scsipi_channel *chan = periph->periph_channel; 1909 int oasync, async, poll, retries, error, s; 1910 1911 xs->xs_status &= ~XS_STS_DONE; 1912 xs->error = XS_NOERROR; 1913 xs->resid = xs->datalen; 1914 xs->status = SCSI_OK; 1915 1916 #ifdef SCSIPI_DEBUG 1917 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1918 printf("scsipi_execute_xs: "); 1919 show_scsipi_xs(xs); 1920 printf("\n"); 1921 } 1922 #endif 1923 1924 /* 1925 * Deal with command tagging: 1926 * 1927 * - If the device's current operating mode doesn't 1928 * include tagged queueing, clear the tag mask. 1929 * 1930 * - If the device's current operating mode *does* 1931 * include tagged queueing, set the tag_type in 1932 * the xfer to the appropriate byte for the tag 1933 * message. 1934 */ 1935 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 1936 (xs->xs_control & XS_CTL_REQSENSE)) { 1937 xs->xs_control &= ~XS_CTL_TAGMASK; 1938 xs->xs_tag_type = 0; 1939 } else { 1940 /* 1941 * If the request doesn't specify a tag, give Head 1942 * tags to URGENT operations and Ordered tags to 1943 * everything else. 1944 */ 1945 if (XS_CTL_TAGTYPE(xs) == 0) { 1946 if (xs->xs_control & XS_CTL_URGENT) 1947 xs->xs_control |= XS_CTL_HEAD_TAG; 1948 else 1949 xs->xs_control |= XS_CTL_ORDERED_TAG; 1950 } 1951 1952 switch (XS_CTL_TAGTYPE(xs)) { 1953 case XS_CTL_ORDERED_TAG: 1954 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 1955 break; 1956 1957 case XS_CTL_SIMPLE_TAG: 1958 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 1959 break; 1960 1961 case XS_CTL_HEAD_TAG: 1962 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 1963 break; 1964 1965 default: 1966 scsipi_printaddr(periph); 1967 printf("invalid tag mask 0x%08x\n", 1968 XS_CTL_TAGTYPE(xs)); 1969 panic("scsipi_execute_xs"); 1970 } 1971 } 1972 1973 /* If the adaptor wants us to poll, poll. */ 1974 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 1975 xs->xs_control |= XS_CTL_POLL; 1976 1977 /* 1978 * If we don't yet have a completion thread, or we are to poll for 1979 * completion, clear the ASYNC flag. 1980 */ 1981 oasync = (xs->xs_control & XS_CTL_ASYNC); 1982 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 1983 xs->xs_control &= ~XS_CTL_ASYNC; 1984 1985 async = (xs->xs_control & XS_CTL_ASYNC); 1986 poll = (xs->xs_control & XS_CTL_POLL); 1987 retries = xs->xs_retries; /* for polling commands */ 1988 1989 #ifdef DIAGNOSTIC 1990 if (oasync != 0 && xs->bp == NULL) 1991 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 1992 #endif 1993 1994 /* 1995 * Enqueue the transfer. If we're not polling for completion, this 1996 * should ALWAYS return `no error'. 1997 */ 1998 try_again: 1999 error = scsipi_enqueue(xs); 2000 if (error) { 2001 if (poll == 0) { 2002 scsipi_printaddr(periph); 2003 printf("not polling, but enqueue failed with %d\n", 2004 error); 2005 panic("scsipi_execute_xs"); 2006 } 2007 2008 scsipi_printaddr(periph); 2009 printf("failed to enqueue polling command"); 2010 if (retries != 0) { 2011 printf(", retrying...\n"); 2012 delay(1000000); 2013 retries--; 2014 goto try_again; 2015 } 2016 printf("\n"); 2017 goto free_xs; 2018 } 2019 2020 restarted: 2021 scsipi_run_queue(chan); 2022 2023 /* 2024 * The xfer is enqueued, and possibly running. If it's to be 2025 * completed asynchronously, just return now. 2026 */ 2027 if (async) 2028 return (EJUSTRETURN); 2029 2030 /* 2031 * Not an asynchronous command; wait for it to complete. 2032 */ 2033 s = splbio(); 2034 while ((xs->xs_status & XS_STS_DONE) == 0) { 2035 if (poll) { 2036 scsipi_printaddr(periph); 2037 printf("polling command not done\n"); 2038 panic("scsipi_execute_xs"); 2039 } 2040 (void) tsleep(xs, PRIBIO, "xscmd", 0); 2041 } 2042 splx(s); 2043 2044 /* 2045 * Command is complete. scsipi_done() has awakened us to perform 2046 * the error handling. 2047 */ 2048 error = scsipi_complete(xs); 2049 if (error == ERESTART) 2050 goto restarted; 2051 2052 /* 2053 * If it was meant to run async and we cleared aync ourselve, 2054 * don't return an error here. It has already been handled 2055 */ 2056 if (oasync) 2057 error = EJUSTRETURN; 2058 /* 2059 * Command completed successfully or fatal error occurred. Fall 2060 * into.... 2061 */ 2062 free_xs: 2063 s = splbio(); 2064 scsipi_put_xs(xs); 2065 splx(s); 2066 2067 /* 2068 * Kick the queue, keep it running in case it stopped for some 2069 * reason. 2070 */ 2071 scsipi_run_queue(chan); 2072 2073 return (error); 2074 } 2075 2076 /* 2077 * scsipi_completion_thread: 2078 * 2079 * This is the completion thread. We wait for errors on 2080 * asynchronous xfers, and perform the error handling 2081 * function, restarting the command, if necessary. 2082 */ 2083 void 2084 scsipi_completion_thread(arg) 2085 void *arg; 2086 { 2087 struct scsipi_channel *chan = arg; 2088 struct scsipi_xfer *xs; 2089 int s; 2090 2091 if (chan->chan_init_cb) 2092 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg); 2093 2094 s = splbio(); 2095 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 2096 splx(s); 2097 for (;;) { 2098 s = splbio(); 2099 xs = TAILQ_FIRST(&chan->chan_complete); 2100 if (xs == NULL && chan->chan_tflags == 0) { 2101 /* nothing to do; wait */ 2102 (void) tsleep(&chan->chan_complete, PRIBIO, 2103 "sccomp", 0); 2104 splx(s); 2105 continue; 2106 } 2107 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2108 /* call chan_callback from thread context */ 2109 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 2110 chan->chan_callback(chan, chan->chan_callback_arg); 2111 splx(s); 2112 continue; 2113 } 2114 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 2115 /* attempt to get more openings for this channel */ 2116 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2117 scsipi_adapter_request(chan, 2118 ADAPTER_REQ_GROW_RESOURCES, NULL); 2119 scsipi_channel_thaw(chan, 1); 2120 splx(s); 2121 continue; 2122 } 2123 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2124 /* explicitly run the queues for this channel */ 2125 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2126 scsipi_run_queue(chan); 2127 splx(s); 2128 continue; 2129 } 2130 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2131 splx(s); 2132 break; 2133 } 2134 if (xs) { 2135 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2136 splx(s); 2137 2138 /* 2139 * Have an xfer with an error; process it. 2140 */ 2141 (void) scsipi_complete(xs); 2142 2143 /* 2144 * Kick the queue; keep it running if it was stopped 2145 * for some reason. 2146 */ 2147 scsipi_run_queue(chan); 2148 } else { 2149 splx(s); 2150 } 2151 } 2152 2153 chan->chan_thread = NULL; 2154 2155 /* In case parent is waiting for us to exit. */ 2156 wakeup(&chan->chan_thread); 2157 2158 kthread_exit(0); 2159 } 2160 2161 /* 2162 * scsipi_create_completion_thread: 2163 * 2164 * Callback to actually create the completion thread. 2165 */ 2166 void 2167 scsipi_create_completion_thread(arg) 2168 void *arg; 2169 { 2170 struct scsipi_channel *chan = arg; 2171 struct scsipi_adapter *adapt = chan->chan_adapter; 2172 2173 if (kthread_create1(scsipi_completion_thread, chan, 2174 &chan->chan_thread, "%s", chan->chan_name)) { 2175 printf("%s: unable to create completion thread for " 2176 "channel %d\n", adapt->adapt_dev->dv_xname, 2177 chan->chan_channel); 2178 panic("scsipi_create_completion_thread"); 2179 } 2180 } 2181 2182 /* 2183 * scsipi_thread_call_callback: 2184 * 2185 * request to call a callback from the completion thread 2186 */ 2187 int 2188 scsipi_thread_call_callback(chan, callback, arg) 2189 struct scsipi_channel *chan; 2190 void (*callback) __P((struct scsipi_channel *, void *)); 2191 void *arg; 2192 { 2193 int s; 2194 2195 s = splbio(); 2196 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2197 /* kernel thread doesn't exist yet */ 2198 splx(s); 2199 return ESRCH; 2200 } 2201 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2202 splx(s); 2203 return EBUSY; 2204 } 2205 scsipi_channel_freeze(chan, 1); 2206 chan->chan_callback = callback; 2207 chan->chan_callback_arg = arg; 2208 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2209 wakeup(&chan->chan_complete); 2210 splx(s); 2211 return(0); 2212 } 2213 2214 /* 2215 * scsipi_async_event: 2216 * 2217 * Handle an asynchronous event from an adapter. 2218 */ 2219 void 2220 scsipi_async_event(chan, event, arg) 2221 struct scsipi_channel *chan; 2222 scsipi_async_event_t event; 2223 void *arg; 2224 { 2225 int s; 2226 2227 s = splbio(); 2228 switch (event) { 2229 case ASYNC_EVENT_MAX_OPENINGS: 2230 scsipi_async_event_max_openings(chan, 2231 (struct scsipi_max_openings *)arg); 2232 break; 2233 2234 case ASYNC_EVENT_XFER_MODE: 2235 scsipi_async_event_xfer_mode(chan, 2236 (struct scsipi_xfer_mode *)arg); 2237 break; 2238 case ASYNC_EVENT_RESET: 2239 scsipi_async_event_channel_reset(chan); 2240 break; 2241 } 2242 splx(s); 2243 } 2244 2245 /* 2246 * scsipi_print_xfer_mode: 2247 * 2248 * Print a periph's capabilities. 2249 */ 2250 void 2251 scsipi_print_xfer_mode(periph) 2252 struct scsipi_periph *periph; 2253 { 2254 int period, freq, speed, mbs; 2255 2256 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0) 2257 return; 2258 2259 aprint_normal("%s: ", periph->periph_dev->dv_xname); 2260 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2261 period = scsipi_sync_factor_to_period(periph->periph_period); 2262 aprint_normal("sync (%d.%02dns offset %d)", 2263 period / 100, period % 100, periph->periph_offset); 2264 } else 2265 aprint_normal("async"); 2266 2267 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2268 aprint_normal(", 32-bit"); 2269 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2270 aprint_normal(", 16-bit"); 2271 else 2272 aprint_normal(", 8-bit"); 2273 2274 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2275 freq = scsipi_sync_factor_to_freq(periph->periph_period); 2276 speed = freq; 2277 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2278 speed *= 4; 2279 else if (periph->periph_mode & 2280 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2281 speed *= 2; 2282 mbs = speed / 1000; 2283 if (mbs > 0) 2284 aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000); 2285 else 2286 aprint_normal(" (%dKB/s)", speed % 1000); 2287 } 2288 2289 aprint_normal(" transfers"); 2290 2291 if (periph->periph_mode & PERIPH_CAP_TQING) 2292 aprint_normal(", tagged queueing"); 2293 2294 aprint_normal("\n"); 2295 } 2296 2297 /* 2298 * scsipi_async_event_max_openings: 2299 * 2300 * Update the maximum number of outstanding commands a 2301 * device may have. 2302 */ 2303 void 2304 scsipi_async_event_max_openings(chan, mo) 2305 struct scsipi_channel *chan; 2306 struct scsipi_max_openings *mo; 2307 { 2308 struct scsipi_periph *periph; 2309 int minlun, maxlun; 2310 2311 if (mo->mo_lun == -1) { 2312 /* 2313 * Wildcarded; apply it to all LUNs. 2314 */ 2315 minlun = 0; 2316 maxlun = chan->chan_nluns - 1; 2317 } else 2318 minlun = maxlun = mo->mo_lun; 2319 2320 /* XXX This could really suck with a large LUN space. */ 2321 for (; minlun <= maxlun; minlun++) { 2322 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun); 2323 if (periph == NULL) 2324 continue; 2325 2326 if (mo->mo_openings < periph->periph_openings) 2327 periph->periph_openings = mo->mo_openings; 2328 else if (mo->mo_openings > periph->periph_openings && 2329 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2330 periph->periph_openings = mo->mo_openings; 2331 } 2332 } 2333 2334 /* 2335 * scsipi_async_event_xfer_mode: 2336 * 2337 * Update the xfer mode for all periphs sharing the 2338 * specified I_T Nexus. 2339 */ 2340 void 2341 scsipi_async_event_xfer_mode(chan, xm) 2342 struct scsipi_channel *chan; 2343 struct scsipi_xfer_mode *xm; 2344 { 2345 struct scsipi_periph *periph; 2346 int lun, announce, mode, period, offset; 2347 2348 for (lun = 0; lun < chan->chan_nluns; lun++) { 2349 periph = scsipi_lookup_periph(chan, xm->xm_target, lun); 2350 if (periph == NULL) 2351 continue; 2352 announce = 0; 2353 2354 /* 2355 * Clamp the xfer mode down to this periph's capabilities. 2356 */ 2357 mode = xm->xm_mode & periph->periph_cap; 2358 if (mode & PERIPH_CAP_SYNC) { 2359 period = xm->xm_period; 2360 offset = xm->xm_offset; 2361 } else { 2362 period = 0; 2363 offset = 0; 2364 } 2365 2366 /* 2367 * If we do not have a valid xfer mode yet, or the parameters 2368 * are different, announce them. 2369 */ 2370 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 || 2371 periph->periph_mode != mode || 2372 periph->periph_period != period || 2373 periph->periph_offset != offset) 2374 announce = 1; 2375 2376 periph->periph_mode = mode; 2377 periph->periph_period = period; 2378 periph->periph_offset = offset; 2379 periph->periph_flags |= PERIPH_MODE_VALID; 2380 2381 if (announce) 2382 scsipi_print_xfer_mode(periph); 2383 } 2384 } 2385 2386 /* 2387 * scsipi_set_xfer_mode: 2388 * 2389 * Set the xfer mode for the specified I_T Nexus. 2390 */ 2391 void 2392 scsipi_set_xfer_mode(chan, target, immed) 2393 struct scsipi_channel *chan; 2394 int target, immed; 2395 { 2396 struct scsipi_xfer_mode xm; 2397 struct scsipi_periph *itperiph; 2398 int lun, s; 2399 2400 /* 2401 * Go to the minimal xfer mode. 2402 */ 2403 xm.xm_target = target; 2404 xm.xm_mode = 0; 2405 xm.xm_period = 0; /* ignored */ 2406 xm.xm_offset = 0; /* ignored */ 2407 2408 /* 2409 * Find the first LUN we know about on this I_T Nexus. 2410 */ 2411 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) { 2412 itperiph = scsipi_lookup_periph(chan, target, lun); 2413 if (itperiph != NULL) 2414 break; 2415 } 2416 if (itperiph != NULL) { 2417 xm.xm_mode = itperiph->periph_cap; 2418 /* 2419 * Now issue the request to the adapter. 2420 */ 2421 s = splbio(); 2422 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2423 splx(s); 2424 /* 2425 * If we want this to happen immediately, issue a dummy 2426 * command, since most adapters can't really negotiate unless 2427 * they're executing a job. 2428 */ 2429 if (immed != 0) { 2430 (void) scsipi_test_unit_ready(itperiph, 2431 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2432 XS_CTL_IGNORE_NOT_READY | 2433 XS_CTL_IGNORE_MEDIA_CHANGE); 2434 } 2435 } 2436 } 2437 2438 /* 2439 * scsipi_channel_reset: 2440 * 2441 * handle scsi bus reset 2442 * called at splbio 2443 */ 2444 void 2445 scsipi_async_event_channel_reset(chan) 2446 struct scsipi_channel *chan; 2447 { 2448 struct scsipi_xfer *xs, *xs_next; 2449 struct scsipi_periph *periph; 2450 int target, lun; 2451 2452 /* 2453 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2454 * commands; as the sense is not available any more. 2455 * can't call scsipi_done() from here, as the command has not been 2456 * sent to the adapter yet (this would corrupt accounting). 2457 */ 2458 2459 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2460 xs_next = TAILQ_NEXT(xs, channel_q); 2461 if (xs->xs_control & XS_CTL_REQSENSE) { 2462 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2463 xs->error = XS_RESET; 2464 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2465 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2466 channel_q); 2467 } 2468 } 2469 wakeup(&chan->chan_complete); 2470 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2471 for (target = 0; target < chan->chan_ntargets; target++) { 2472 if (target == chan->chan_id) 2473 continue; 2474 for (lun = 0; lun < chan->chan_nluns; lun++) { 2475 periph = scsipi_lookup_periph(chan, target, lun); 2476 if (periph) { 2477 xs = periph->periph_xscheck; 2478 if (xs) 2479 xs->error = XS_RESET; 2480 } 2481 } 2482 } 2483 } 2484 2485 /* 2486 * scsipi_target_detach: 2487 * 2488 * detach all periph associated with a I_T 2489 * must be called from valid thread context 2490 */ 2491 int 2492 scsipi_target_detach(chan, target, lun, flags) 2493 struct scsipi_channel *chan; 2494 int target, lun; 2495 int flags; 2496 { 2497 struct scsipi_periph *periph; 2498 int ctarget, mintarget, maxtarget; 2499 int clun, minlun, maxlun; 2500 int error; 2501 2502 if (target == -1) { 2503 mintarget = 0; 2504 maxtarget = chan->chan_ntargets; 2505 } else { 2506 if (target == chan->chan_id) 2507 return EINVAL; 2508 if (target < 0 || target >= chan->chan_ntargets) 2509 return EINVAL; 2510 mintarget = target; 2511 maxtarget = target + 1; 2512 } 2513 2514 if (lun == -1) { 2515 minlun = 0; 2516 maxlun = chan->chan_nluns; 2517 } else { 2518 if (lun < 0 || lun >= chan->chan_nluns) 2519 return EINVAL; 2520 minlun = lun; 2521 maxlun = lun + 1; 2522 } 2523 2524 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2525 if (ctarget == chan->chan_id) 2526 continue; 2527 2528 for (clun = minlun; clun < maxlun; clun++) { 2529 periph = scsipi_lookup_periph(chan, ctarget, clun); 2530 if (periph == NULL) 2531 continue; 2532 error = config_detach(periph->periph_dev, flags); 2533 if (error) 2534 return (error); 2535 scsipi_remove_periph(chan, periph); 2536 free(periph, M_DEVBUF); 2537 } 2538 } 2539 return(0); 2540 } 2541 2542 /* 2543 * scsipi_adapter_addref: 2544 * 2545 * Add a reference to the adapter pointed to by the provided 2546 * link, enabling the adapter if necessary. 2547 */ 2548 int 2549 scsipi_adapter_addref(adapt) 2550 struct scsipi_adapter *adapt; 2551 { 2552 int s, error = 0; 2553 2554 s = splbio(); 2555 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) { 2556 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1); 2557 if (error) 2558 adapt->adapt_refcnt--; 2559 } 2560 splx(s); 2561 return (error); 2562 } 2563 2564 /* 2565 * scsipi_adapter_delref: 2566 * 2567 * Delete a reference to the adapter pointed to by the provided 2568 * link, disabling the adapter if possible. 2569 */ 2570 void 2571 scsipi_adapter_delref(adapt) 2572 struct scsipi_adapter *adapt; 2573 { 2574 int s; 2575 2576 s = splbio(); 2577 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL) 2578 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0); 2579 splx(s); 2580 } 2581 2582 struct scsipi_syncparam { 2583 int ss_factor; 2584 int ss_period; /* ns * 100 */ 2585 } scsipi_syncparams[] = { 2586 { 0x08, 625 }, /* FAST-160 (Ultra320) */ 2587 { 0x09, 1250 }, /* FAST-80 (Ultra160) */ 2588 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */ 2589 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */ 2590 { 0x0c, 5000 }, /* FAST-20 (Ultra) */ 2591 }; 2592 const int scsipi_nsyncparams = 2593 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2594 2595 int 2596 scsipi_sync_period_to_factor(period) 2597 int period; /* ns * 100 */ 2598 { 2599 int i; 2600 2601 for (i = 0; i < scsipi_nsyncparams; i++) { 2602 if (period <= scsipi_syncparams[i].ss_period) 2603 return (scsipi_syncparams[i].ss_factor); 2604 } 2605 2606 return ((period / 100) / 4); 2607 } 2608 2609 int 2610 scsipi_sync_factor_to_period(factor) 2611 int factor; 2612 { 2613 int i; 2614 2615 for (i = 0; i < scsipi_nsyncparams; i++) { 2616 if (factor == scsipi_syncparams[i].ss_factor) 2617 return (scsipi_syncparams[i].ss_period); 2618 } 2619 2620 return ((factor * 4) * 100); 2621 } 2622 2623 int 2624 scsipi_sync_factor_to_freq(factor) 2625 int factor; 2626 { 2627 int i; 2628 2629 for (i = 0; i < scsipi_nsyncparams; i++) { 2630 if (factor == scsipi_syncparams[i].ss_factor) 2631 return (100000000 / scsipi_syncparams[i].ss_period); 2632 } 2633 2634 return (10000000 / ((factor * 4) * 10)); 2635 } 2636 2637 #ifdef SCSIPI_DEBUG 2638 /* 2639 * Given a scsipi_xfer, dump the request, in all it's glory 2640 */ 2641 void 2642 show_scsipi_xs(xs) 2643 struct scsipi_xfer *xs; 2644 { 2645 2646 printf("xs(%p): ", xs); 2647 printf("xs_control(0x%08x)", xs->xs_control); 2648 printf("xs_status(0x%08x)", xs->xs_status); 2649 printf("periph(%p)", xs->xs_periph); 2650 printf("retr(0x%x)", xs->xs_retries); 2651 printf("timo(0x%x)", xs->timeout); 2652 printf("cmd(%p)", xs->cmd); 2653 printf("len(0x%x)", xs->cmdlen); 2654 printf("data(%p)", xs->data); 2655 printf("len(0x%x)", xs->datalen); 2656 printf("res(0x%x)", xs->resid); 2657 printf("err(0x%x)", xs->error); 2658 printf("bp(%p)", xs->bp); 2659 show_scsipi_cmd(xs); 2660 } 2661 2662 void 2663 show_scsipi_cmd(xs) 2664 struct scsipi_xfer *xs; 2665 { 2666 u_char *b = (u_char *) xs->cmd; 2667 int i = 0; 2668 2669 scsipi_printaddr(xs->xs_periph); 2670 printf(" command: "); 2671 2672 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2673 while (i < xs->cmdlen) { 2674 if (i) 2675 printf(","); 2676 printf("0x%x", b[i++]); 2677 } 2678 printf("-[%d bytes]\n", xs->datalen); 2679 if (xs->datalen) 2680 show_mem(xs->data, min(64, xs->datalen)); 2681 } else 2682 printf("-RESET-\n"); 2683 } 2684 2685 void 2686 show_mem(address, num) 2687 u_char *address; 2688 int num; 2689 { 2690 int x; 2691 2692 printf("------------------------------"); 2693 for (x = 0; x < num; x++) { 2694 if ((x % 16) == 0) 2695 printf("\n%03d: ", x); 2696 printf("%02x ", *address++); 2697 } 2698 printf("\n------------------------------\n"); 2699 } 2700 #endif /* SCSIPI_DEBUG */ 2701