1 /* $NetBSD: scsipi_base.c,v 1.88 2003/04/19 19:12:59 fvdl Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.88 2003/04/19 19:12:59 fvdl Exp $"); 42 43 #include "opt_scsi.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/buf.h> 49 #include <sys/uio.h> 50 #include <sys/malloc.h> 51 #include <sys/pool.h> 52 #include <sys/errno.h> 53 #include <sys/device.h> 54 #include <sys/proc.h> 55 #include <sys/kthread.h> 56 #include <sys/hash.h> 57 58 #include <dev/scsipi/scsipi_all.h> 59 #include <dev/scsipi/scsipi_disk.h> 60 #include <dev/scsipi/scsipiconf.h> 61 #include <dev/scsipi/scsipi_base.h> 62 63 #include <dev/scsipi/scsi_all.h> 64 #include <dev/scsipi/scsi_message.h> 65 66 int scsipi_complete __P((struct scsipi_xfer *)); 67 void scsipi_request_sense __P((struct scsipi_xfer *)); 68 int scsipi_enqueue __P((struct scsipi_xfer *)); 69 void scsipi_run_queue __P((struct scsipi_channel *chan)); 70 71 void scsipi_completion_thread __P((void *)); 72 73 void scsipi_get_tag __P((struct scsipi_xfer *)); 74 void scsipi_put_tag __P((struct scsipi_xfer *)); 75 76 int scsipi_get_resource __P((struct scsipi_channel *)); 77 void scsipi_put_resource __P((struct scsipi_channel *)); 78 __inline int scsipi_grow_resources __P((struct scsipi_channel *)); 79 80 void scsipi_async_event_max_openings __P((struct scsipi_channel *, 81 struct scsipi_max_openings *)); 82 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *, 83 struct scsipi_xfer_mode *)); 84 void scsipi_async_event_channel_reset __P((struct scsipi_channel *)); 85 86 struct pool scsipi_xfer_pool; 87 88 /* 89 * scsipi_init: 90 * 91 * Called when a scsibus or atapibus is attached to the system 92 * to initialize shared data structures. 93 */ 94 void 95 scsipi_init() 96 { 97 static int scsipi_init_done; 98 99 if (scsipi_init_done) 100 return; 101 scsipi_init_done = 1; 102 103 /* Initialize the scsipi_xfer pool. */ 104 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 105 0, 0, "scxspl", NULL); 106 } 107 108 /* 109 * scsipi_channel_init: 110 * 111 * Initialize a scsipi_channel when it is attached. 112 */ 113 int 114 scsipi_channel_init(chan) 115 struct scsipi_channel *chan; 116 { 117 int i; 118 119 /* Initialize shared data. */ 120 scsipi_init(); 121 122 /* Initialize the queues. */ 123 TAILQ_INIT(&chan->chan_queue); 124 TAILQ_INIT(&chan->chan_complete); 125 126 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++) 127 LIST_INIT(&chan->chan_periphtab[i]); 128 129 /* 130 * Create the asynchronous completion thread. 131 */ 132 kthread_create(scsipi_create_completion_thread, chan); 133 return (0); 134 } 135 136 /* 137 * scsipi_channel_shutdown: 138 * 139 * Shutdown a scsipi_channel. 140 */ 141 void 142 scsipi_channel_shutdown(chan) 143 struct scsipi_channel *chan; 144 { 145 146 /* 147 * Shut down the completion thread. 148 */ 149 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 150 wakeup(&chan->chan_complete); 151 152 /* 153 * Now wait for the thread to exit. 154 */ 155 while (chan->chan_thread != NULL) 156 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0); 157 } 158 159 static uint32_t 160 scsipi_chan_periph_hash(uint64_t t, uint64_t l) 161 { 162 uint32_t hash; 163 164 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT); 165 hash = hash32_buf(&l, sizeof(l), hash); 166 167 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK); 168 } 169 170 /* 171 * scsipi_insert_periph: 172 * 173 * Insert a periph into the channel. 174 */ 175 void 176 scsipi_insert_periph(chan, periph) 177 struct scsipi_channel *chan; 178 struct scsipi_periph *periph; 179 { 180 uint32_t hash; 181 int s; 182 183 hash = scsipi_chan_periph_hash(periph->periph_target, 184 periph->periph_lun); 185 186 s = splbio(); 187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash); 188 splx(s); 189 } 190 191 /* 192 * scsipi_remove_periph: 193 * 194 * Remove a periph from the channel. 195 */ 196 void 197 scsipi_remove_periph(chan, periph) 198 struct scsipi_channel *chan; 199 struct scsipi_periph *periph; 200 { 201 int s; 202 203 s = splbio(); 204 LIST_REMOVE(periph, periph_hash); 205 splx(s); 206 } 207 208 /* 209 * scsipi_lookup_periph: 210 * 211 * Lookup a periph on the specified channel. 212 */ 213 struct scsipi_periph * 214 scsipi_lookup_periph(chan, target, lun) 215 struct scsipi_channel *chan; 216 int target, lun; 217 { 218 struct scsipi_periph *periph; 219 uint32_t hash; 220 int s; 221 222 if (target >= chan->chan_ntargets || 223 lun >= chan->chan_nluns) 224 return (NULL); 225 226 hash = scsipi_chan_periph_hash(target, lun); 227 228 s = splbio(); 229 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) { 230 if (periph->periph_target == target && 231 periph->periph_lun == lun) 232 break; 233 } 234 splx(s); 235 236 return (periph); 237 } 238 239 /* 240 * scsipi_get_resource: 241 * 242 * Allocate a single xfer `resource' from the channel. 243 * 244 * NOTE: Must be called at splbio(). 245 */ 246 int 247 scsipi_get_resource(chan) 248 struct scsipi_channel *chan; 249 { 250 struct scsipi_adapter *adapt = chan->chan_adapter; 251 252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 253 if (chan->chan_openings > 0) { 254 chan->chan_openings--; 255 return (1); 256 } 257 return (0); 258 } 259 260 if (adapt->adapt_openings > 0) { 261 adapt->adapt_openings--; 262 return (1); 263 } 264 return (0); 265 } 266 267 /* 268 * scsipi_grow_resources: 269 * 270 * Attempt to grow resources for a channel. If this succeeds, 271 * we allocate one for our caller. 272 * 273 * NOTE: Must be called at splbio(). 274 */ 275 __inline int 276 scsipi_grow_resources(chan) 277 struct scsipi_channel *chan; 278 { 279 280 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 281 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 282 scsipi_adapter_request(chan, 283 ADAPTER_REQ_GROW_RESOURCES, NULL); 284 return (scsipi_get_resource(chan)); 285 } 286 /* 287 * ask the channel thread to do it. It'll have to thaw the 288 * queue 289 */ 290 scsipi_channel_freeze(chan, 1); 291 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 292 wakeup(&chan->chan_complete); 293 return (0); 294 } 295 296 return (0); 297 } 298 299 /* 300 * scsipi_put_resource: 301 * 302 * Free a single xfer `resource' to the channel. 303 * 304 * NOTE: Must be called at splbio(). 305 */ 306 void 307 scsipi_put_resource(chan) 308 struct scsipi_channel *chan; 309 { 310 struct scsipi_adapter *adapt = chan->chan_adapter; 311 312 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 313 chan->chan_openings++; 314 else 315 adapt->adapt_openings++; 316 } 317 318 /* 319 * scsipi_get_tag: 320 * 321 * Get a tag ID for the specified xfer. 322 * 323 * NOTE: Must be called at splbio(). 324 */ 325 void 326 scsipi_get_tag(xs) 327 struct scsipi_xfer *xs; 328 { 329 struct scsipi_periph *periph = xs->xs_periph; 330 int bit, tag; 331 u_int word; 332 333 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 334 bit = ffs(periph->periph_freetags[word]); 335 if (bit != 0) 336 break; 337 } 338 #ifdef DIAGNOSTIC 339 if (word == PERIPH_NTAGWORDS) { 340 scsipi_printaddr(periph); 341 printf("no free tags\n"); 342 panic("scsipi_get_tag"); 343 } 344 #endif 345 346 bit -= 1; 347 periph->periph_freetags[word] &= ~(1 << bit); 348 tag = (word << 5) | bit; 349 350 /* XXX Should eventually disallow this completely. */ 351 if (tag >= periph->periph_openings) { 352 scsipi_printaddr(periph); 353 printf("WARNING: tag %d greater than available openings %d\n", 354 tag, periph->periph_openings); 355 } 356 357 xs->xs_tag_id = tag; 358 } 359 360 /* 361 * scsipi_put_tag: 362 * 363 * Put the tag ID for the specified xfer back into the pool. 364 * 365 * NOTE: Must be called at splbio(). 366 */ 367 void 368 scsipi_put_tag(xs) 369 struct scsipi_xfer *xs; 370 { 371 struct scsipi_periph *periph = xs->xs_periph; 372 int word, bit; 373 374 word = xs->xs_tag_id >> 5; 375 bit = xs->xs_tag_id & 0x1f; 376 377 periph->periph_freetags[word] |= (1 << bit); 378 } 379 380 /* 381 * scsipi_get_xs: 382 * 383 * Allocate an xfer descriptor and associate it with the 384 * specified peripherial. If the peripherial has no more 385 * available command openings, we either block waiting for 386 * one to become available, or fail. 387 */ 388 struct scsipi_xfer * 389 scsipi_get_xs(periph, flags) 390 struct scsipi_periph *periph; 391 int flags; 392 { 393 struct scsipi_xfer *xs; 394 int s; 395 396 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 397 398 /* 399 * If we're cold, make sure we poll. 400 */ 401 if (cold) 402 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL; 403 404 #ifdef DIAGNOSTIC 405 /* 406 * URGENT commands can never be ASYNC. 407 */ 408 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 409 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 410 scsipi_printaddr(periph); 411 printf("URGENT and ASYNC\n"); 412 panic("scsipi_get_xs"); 413 } 414 #endif 415 416 s = splbio(); 417 /* 418 * Wait for a command opening to become available. Rules: 419 * 420 * - All xfers must wait for an available opening. 421 * Exception: URGENT xfers can proceed when 422 * active == openings, because we use the opening 423 * of the command we're recovering for. 424 * - if the periph has sense pending, only URGENT & REQSENSE 425 * xfers may proceed. 426 * 427 * - If the periph is recovering, only URGENT xfers may 428 * proceed. 429 * 430 * - If the periph is currently executing a recovery 431 * command, URGENT commands must block, because only 432 * one recovery command can execute at a time. 433 */ 434 for (;;) { 435 if (flags & XS_CTL_URGENT) { 436 if (periph->periph_active > periph->periph_openings) 437 goto wait_for_opening; 438 if (periph->periph_flags & PERIPH_SENSE) { 439 if ((flags & XS_CTL_REQSENSE) == 0) 440 goto wait_for_opening; 441 } else { 442 if ((periph->periph_flags & 443 PERIPH_RECOVERY_ACTIVE) != 0) 444 goto wait_for_opening; 445 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 446 } 447 break; 448 } 449 if (periph->periph_active >= periph->periph_openings || 450 (periph->periph_flags & PERIPH_RECOVERING) != 0) 451 goto wait_for_opening; 452 periph->periph_active++; 453 break; 454 455 wait_for_opening: 456 if (flags & XS_CTL_NOSLEEP) { 457 splx(s); 458 return (NULL); 459 } 460 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 461 periph->periph_flags |= PERIPH_WAITING; 462 (void) tsleep(periph, PRIBIO, "getxs", 0); 463 } 464 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 465 xs = pool_get(&scsipi_xfer_pool, 466 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 467 if (xs == NULL) { 468 if (flags & XS_CTL_URGENT) { 469 if ((flags & XS_CTL_REQSENSE) == 0) 470 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 471 } else 472 periph->periph_active--; 473 scsipi_printaddr(periph); 474 printf("unable to allocate %sscsipi_xfer\n", 475 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 476 } 477 splx(s); 478 479 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 480 481 if (xs != NULL) { 482 callout_init(&xs->xs_callout); 483 memset(xs, 0, sizeof(*xs)); 484 xs->xs_periph = periph; 485 xs->xs_control = flags; 486 xs->xs_status = 0; 487 s = splbio(); 488 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 489 splx(s); 490 } 491 return (xs); 492 } 493 494 /* 495 * scsipi_put_xs: 496 * 497 * Release an xfer descriptor, decreasing the outstanding command 498 * count for the peripherial. If there is a thread waiting for 499 * an opening, wake it up. If not, kick any queued I/O the 500 * peripherial may have. 501 * 502 * NOTE: Must be called at splbio(). 503 */ 504 void 505 scsipi_put_xs(xs) 506 struct scsipi_xfer *xs; 507 { 508 struct scsipi_periph *periph = xs->xs_periph; 509 int flags = xs->xs_control; 510 511 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 512 513 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 514 pool_put(&scsipi_xfer_pool, xs); 515 516 #ifdef DIAGNOSTIC 517 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 518 periph->periph_active == 0) { 519 scsipi_printaddr(periph); 520 printf("recovery without a command to recovery for\n"); 521 panic("scsipi_put_xs"); 522 } 523 #endif 524 525 if (flags & XS_CTL_URGENT) { 526 if ((flags & XS_CTL_REQSENSE) == 0) 527 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 528 } else 529 periph->periph_active--; 530 if (periph->periph_active == 0 && 531 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 532 periph->periph_flags &= ~PERIPH_WAITDRAIN; 533 wakeup(&periph->periph_active); 534 } 535 536 if (periph->periph_flags & PERIPH_WAITING) { 537 periph->periph_flags &= ~PERIPH_WAITING; 538 wakeup(periph); 539 } else { 540 if (periph->periph_switch->psw_start != NULL) { 541 SC_DEBUG(periph, SCSIPI_DB2, 542 ("calling private start()\n")); 543 (*periph->periph_switch->psw_start)(periph); 544 } 545 } 546 } 547 548 /* 549 * scsipi_channel_freeze: 550 * 551 * Freeze a channel's xfer queue. 552 */ 553 void 554 scsipi_channel_freeze(chan, count) 555 struct scsipi_channel *chan; 556 int count; 557 { 558 int s; 559 560 s = splbio(); 561 chan->chan_qfreeze += count; 562 splx(s); 563 } 564 565 /* 566 * scsipi_channel_thaw: 567 * 568 * Thaw a channel's xfer queue. 569 */ 570 void 571 scsipi_channel_thaw(chan, count) 572 struct scsipi_channel *chan; 573 int count; 574 { 575 int s; 576 577 s = splbio(); 578 chan->chan_qfreeze -= count; 579 /* 580 * Don't let the freeze count go negative. 581 * 582 * Presumably the adapter driver could keep track of this, 583 * but it might just be easier to do this here so as to allow 584 * multiple callers, including those outside the adapter driver. 585 */ 586 if (chan->chan_qfreeze < 0) { 587 chan->chan_qfreeze = 0; 588 } 589 splx(s); 590 /* 591 * Kick the channel's queue here. Note, we may be running in 592 * interrupt context (softclock or HBA's interrupt), so the adapter 593 * driver had better not sleep. 594 */ 595 if (chan->chan_qfreeze == 0) 596 scsipi_run_queue(chan); 597 } 598 599 /* 600 * scsipi_channel_timed_thaw: 601 * 602 * Thaw a channel after some time has expired. This will also 603 * run the channel's queue if the freeze count has reached 0. 604 */ 605 void 606 scsipi_channel_timed_thaw(arg) 607 void *arg; 608 { 609 struct scsipi_channel *chan = arg; 610 611 scsipi_channel_thaw(chan, 1); 612 } 613 614 /* 615 * scsipi_periph_freeze: 616 * 617 * Freeze a device's xfer queue. 618 */ 619 void 620 scsipi_periph_freeze(periph, count) 621 struct scsipi_periph *periph; 622 int count; 623 { 624 int s; 625 626 s = splbio(); 627 periph->periph_qfreeze += count; 628 splx(s); 629 } 630 631 /* 632 * scsipi_periph_thaw: 633 * 634 * Thaw a device's xfer queue. 635 */ 636 void 637 scsipi_periph_thaw(periph, count) 638 struct scsipi_periph *periph; 639 int count; 640 { 641 int s; 642 643 s = splbio(); 644 periph->periph_qfreeze -= count; 645 #ifdef DIAGNOSTIC 646 if (periph->periph_qfreeze < 0) { 647 static const char pc[] = "periph freeze count < 0"; 648 scsipi_printaddr(periph); 649 printf("%s\n", pc); 650 panic(pc); 651 } 652 #endif 653 if (periph->periph_qfreeze == 0 && 654 (periph->periph_flags & PERIPH_WAITING) != 0) 655 wakeup(periph); 656 splx(s); 657 } 658 659 /* 660 * scsipi_periph_timed_thaw: 661 * 662 * Thaw a device after some time has expired. 663 */ 664 void 665 scsipi_periph_timed_thaw(arg) 666 void *arg; 667 { 668 int s; 669 struct scsipi_periph *periph = arg; 670 671 callout_stop(&periph->periph_callout); 672 673 s = splbio(); 674 scsipi_periph_thaw(periph, 1); 675 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 676 /* 677 * Kick the channel's queue here. Note, we're running in 678 * interrupt context (softclock), so the adapter driver 679 * had better not sleep. 680 */ 681 scsipi_run_queue(periph->periph_channel); 682 } else { 683 /* 684 * Tell the completion thread to kick the channel's queue here. 685 */ 686 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 687 wakeup(&periph->periph_channel->chan_complete); 688 } 689 splx(s); 690 } 691 692 /* 693 * scsipi_wait_drain: 694 * 695 * Wait for a periph's pending xfers to drain. 696 */ 697 void 698 scsipi_wait_drain(periph) 699 struct scsipi_periph *periph; 700 { 701 int s; 702 703 s = splbio(); 704 while (periph->periph_active != 0) { 705 periph->periph_flags |= PERIPH_WAITDRAIN; 706 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0); 707 } 708 splx(s); 709 } 710 711 /* 712 * scsipi_kill_pending: 713 * 714 * Kill off all pending xfers for a periph. 715 * 716 * NOTE: Must be called at splbio(). 717 */ 718 void 719 scsipi_kill_pending(periph) 720 struct scsipi_periph *periph; 721 { 722 723 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph); 724 #ifdef DIAGNOSTIC 725 if (TAILQ_FIRST(&periph->periph_xferq) != NULL) 726 panic("scsipi_kill_pending"); 727 #endif 728 scsipi_wait_drain(periph); 729 } 730 731 /* 732 * scsipi_interpret_sense: 733 * 734 * Look at the returned sense and act on the error, determining 735 * the unix error number to pass back. (0 = report no error) 736 * 737 * NOTE: If we return ERESTART, we are expected to haved 738 * thawed the device! 739 * 740 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 741 */ 742 int 743 scsipi_interpret_sense(xs) 744 struct scsipi_xfer *xs; 745 { 746 struct scsipi_sense_data *sense; 747 struct scsipi_periph *periph = xs->xs_periph; 748 u_int8_t key; 749 int error; 750 #ifndef SCSIVERBOSE 751 u_int32_t info; 752 static char *error_mes[] = { 753 "soft error (corrected)", 754 "not ready", "medium error", 755 "non-media hardware failure", "illegal request", 756 "unit attention", "readonly device", 757 "no data found", "vendor unique", 758 "copy aborted", "command aborted", 759 "search returned equal", "volume overflow", 760 "verify miscompare", "unknown error key" 761 }; 762 #endif 763 764 sense = &xs->sense.scsi_sense; 765 #ifdef SCSIPI_DEBUG 766 if (periph->periph_flags & SCSIPI_DB1) { 767 int count; 768 scsipi_printaddr(periph); 769 printf(" sense debug information:\n"); 770 printf("\tcode 0x%x valid 0x%x\n", 771 sense->error_code & SSD_ERRCODE, 772 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0); 773 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 774 sense->segment, 775 sense->flags & SSD_KEY, 776 sense->flags & SSD_ILI ? 1 : 0, 777 sense->flags & SSD_EOM ? 1 : 0, 778 sense->flags & SSD_FILEMARK ? 1 : 0); 779 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 780 "extra bytes\n", 781 sense->info[0], 782 sense->info[1], 783 sense->info[2], 784 sense->info[3], 785 sense->extra_len); 786 printf("\textra: "); 787 for (count = 0; count < ADD_BYTES_LIM(sense); count++) 788 printf("0x%x ", sense->cmd_spec_info[count]); 789 printf("\n"); 790 } 791 #endif 792 793 /* 794 * If the periph has it's own error handler, call it first. 795 * If it returns a legit error value, return that, otherwise 796 * it wants us to continue with normal error processing. 797 */ 798 if (periph->periph_switch->psw_error != NULL) { 799 SC_DEBUG(periph, SCSIPI_DB2, 800 ("calling private err_handler()\n")); 801 error = (*periph->periph_switch->psw_error)(xs); 802 if (error != EJUSTRETURN) 803 return (error); 804 } 805 /* otherwise use the default */ 806 switch (sense->error_code & SSD_ERRCODE) { 807 808 /* 809 * Old SCSI-1 and SASI devices respond with 810 * codes other than 70. 811 */ 812 case 0x00: /* no error (command completed OK) */ 813 return (0); 814 case 0x04: /* drive not ready after it was selected */ 815 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 816 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 817 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 818 return (0); 819 /* XXX - display some sort of error here? */ 820 return (EIO); 821 case 0x20: /* invalid command */ 822 if ((xs->xs_control & 823 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 824 return (0); 825 return (EINVAL); 826 case 0x25: /* invalid LUN (Adaptec ACB-4000) */ 827 return (EACCES); 828 829 /* 830 * If it's code 70, use the extended stuff and 831 * interpret the key 832 */ 833 case 0x71: /* delayed error */ 834 scsipi_printaddr(periph); 835 key = sense->flags & SSD_KEY; 836 printf(" DEFERRED ERROR, key = 0x%x\n", key); 837 /* FALLTHROUGH */ 838 case 0x70: 839 #ifndef SCSIVERBOSE 840 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) 841 info = _4btol(sense->info); 842 else 843 info = 0; 844 #endif 845 key = sense->flags & SSD_KEY; 846 847 switch (key) { 848 case SKEY_NO_SENSE: 849 case SKEY_RECOVERED_ERROR: 850 if (xs->resid == xs->datalen && xs->datalen) { 851 /* 852 * Why is this here? 853 */ 854 xs->resid = 0; /* not short read */ 855 } 856 case SKEY_EQUAL: 857 error = 0; 858 break; 859 case SKEY_NOT_READY: 860 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 861 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 862 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 863 return (0); 864 if (sense->add_sense_code == 0x3A) { 865 error = ENODEV; /* Medium not present */ 866 if (xs->xs_control & XS_CTL_SILENT_NODEV) 867 return (error); 868 } else 869 error = EIO; 870 if ((xs->xs_control & XS_CTL_SILENT) != 0) 871 return (error); 872 break; 873 case SKEY_ILLEGAL_REQUEST: 874 if ((xs->xs_control & 875 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 876 return (0); 877 /* 878 * Handle the case where a device reports 879 * Logical Unit Not Supported during discovery. 880 */ 881 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 882 sense->add_sense_code == 0x25 && 883 sense->add_sense_code_qual == 0x00) 884 return (EINVAL); 885 if ((xs->xs_control & XS_CTL_SILENT) != 0) 886 return (EIO); 887 error = EINVAL; 888 break; 889 case SKEY_UNIT_ATTENTION: 890 if (sense->add_sense_code == 0x29 && 891 sense->add_sense_code_qual == 0x00) { 892 /* device or bus reset */ 893 return (ERESTART); 894 } 895 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 896 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 897 if ((xs->xs_control & 898 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 899 /* XXX Should reupload any transient state. */ 900 (periph->periph_flags & 901 PERIPH_REMOVABLE) == 0) { 902 return (ERESTART); 903 } 904 if ((xs->xs_control & XS_CTL_SILENT) != 0) 905 return (EIO); 906 error = EIO; 907 break; 908 case SKEY_WRITE_PROTECT: 909 error = EROFS; 910 break; 911 case SKEY_BLANK_CHECK: 912 error = 0; 913 break; 914 case SKEY_ABORTED_COMMAND: 915 error = ERESTART; 916 break; 917 case SKEY_VOLUME_OVERFLOW: 918 error = ENOSPC; 919 break; 920 default: 921 error = EIO; 922 break; 923 } 924 925 #ifdef SCSIVERBOSE 926 if (key && (xs->xs_control & XS_CTL_SILENT) == 0) 927 scsipi_print_sense(xs, 0); 928 #else 929 if (key) { 930 scsipi_printaddr(periph); 931 printf("%s", error_mes[key - 1]); 932 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 933 switch (key) { 934 case SKEY_NOT_READY: 935 case SKEY_ILLEGAL_REQUEST: 936 case SKEY_UNIT_ATTENTION: 937 case SKEY_WRITE_PROTECT: 938 break; 939 case SKEY_BLANK_CHECK: 940 printf(", requested size: %d (decimal)", 941 info); 942 break; 943 case SKEY_ABORTED_COMMAND: 944 if (xs->xs_retries) 945 printf(", retrying"); 946 printf(", cmd 0x%x, info 0x%x", 947 xs->cmd->opcode, info); 948 break; 949 default: 950 printf(", info = %d (decimal)", info); 951 } 952 } 953 if (sense->extra_len != 0) { 954 int n; 955 printf(", data ="); 956 for (n = 0; n < sense->extra_len; n++) 957 printf(" %02x", 958 sense->cmd_spec_info[n]); 959 } 960 printf("\n"); 961 } 962 #endif 963 return (error); 964 965 /* 966 * Some other code, just report it 967 */ 968 default: 969 #if defined(SCSIDEBUG) || defined(DEBUG) 970 { 971 static char *uc = "undecodable sense error"; 972 int i; 973 u_int8_t *cptr = (u_int8_t *) sense; 974 scsipi_printaddr(periph); 975 if (xs->cmd == &xs->cmdstore) { 976 printf("%s for opcode 0x%x, data=", 977 uc, xs->cmdstore.opcode); 978 } else { 979 printf("%s, data=", uc); 980 } 981 for (i = 0; i < sizeof (sense); i++) 982 printf(" 0x%02x", *(cptr++) & 0xff); 983 printf("\n"); 984 } 985 #else 986 scsipi_printaddr(periph); 987 printf("Sense Error Code 0x%x", 988 sense->error_code & SSD_ERRCODE); 989 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 990 struct scsipi_sense_data_unextended *usense = 991 (struct scsipi_sense_data_unextended *)sense; 992 printf(" at block no. %d (decimal)", 993 _3btol(usense->block)); 994 } 995 printf("\n"); 996 #endif 997 return (EIO); 998 } 999 } 1000 1001 /* 1002 * scsipi_size: 1003 * 1004 * Find out from the device what its capacity is. 1005 */ 1006 u_int64_t 1007 scsipi_size(periph, flags) 1008 struct scsipi_periph *periph; 1009 int flags; 1010 { 1011 struct scsipi_read_cap_data rdcap; 1012 struct scsipi_read_capacity scsipi_cmd; 1013 1014 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1015 scsipi_cmd.opcode = READ_CAPACITY; 1016 1017 /* 1018 * If the command works, interpret the result as a 4 byte 1019 * number of blocks 1020 */ 1021 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1022 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap), 1023 SCSIPIRETRIES, 20000, NULL, 1024 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0) 1025 return (0); 1026 1027 return (_4btol(rdcap.addr) + 1); 1028 } 1029 1030 /* 1031 * scsipi_test_unit_ready: 1032 * 1033 * Issue a `test unit ready' request. 1034 */ 1035 int 1036 scsipi_test_unit_ready(periph, flags) 1037 struct scsipi_periph *periph; 1038 int flags; 1039 { 1040 int retries; 1041 struct scsipi_test_unit_ready scsipi_cmd; 1042 1043 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */ 1044 if (periph->periph_quirks & PQUIRK_NOTUR) 1045 return (0); 1046 1047 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1048 scsipi_cmd.opcode = TEST_UNIT_READY; 1049 1050 if (flags & XS_CTL_DISCOVERY) 1051 retries = 0; 1052 else 1053 retries = SCSIPIRETRIES; 1054 1055 return (scsipi_command(periph, 1056 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd), 1057 0, 0, retries, 10000, NULL, flags)); 1058 } 1059 1060 /* 1061 * scsipi_inquire: 1062 * 1063 * Ask the device about itself. 1064 */ 1065 int 1066 scsipi_inquire(periph, inqbuf, flags) 1067 struct scsipi_periph *periph; 1068 struct scsipi_inquiry_data *inqbuf; 1069 int flags; 1070 { 1071 int retries; 1072 struct scsipi_inquiry scsipi_cmd; 1073 int error; 1074 1075 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1076 scsipi_cmd.opcode = INQUIRY; 1077 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data); 1078 1079 if (flags & XS_CTL_DISCOVERY) 1080 retries = 0; 1081 else 1082 retries = SCSIPIRETRIES; 1083 1084 error = scsipi_command(periph, 1085 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1086 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data), 1087 retries, 10000, NULL, XS_CTL_DATA_IN | flags); 1088 1089 #ifdef SCSI_OLD_NOINQUIRY 1090 /* 1091 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator. 1092 * This board doesn't support the INQUIRY command at all. 1093 */ 1094 if (error == EINVAL || error == EACCES) { 1095 /* 1096 * Conjure up an INQUIRY response. 1097 */ 1098 inqbuf->device = (error == EINVAL ? 1099 SID_QUAL_LU_PRESENT : 1100 SID_QUAL_LU_NOTPRESENT) | T_DIRECT; 1101 inqbuf->dev_qual2 = 0; 1102 inqbuf->version = 0; 1103 inqbuf->response_format = SID_FORMAT_SCSI1; 1104 inqbuf->additional_length = 3 + 28; 1105 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1106 memcpy(inqbuf->vendor, "ADAPTEC ", sizeof(inqbuf->vendor)); 1107 memcpy(inqbuf->product, "ACB-4000 ", 1108 sizeof(inqbuf->product)); 1109 memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision)); 1110 error = 0; 1111 } 1112 1113 /* 1114 * Kludge for the Emulex MT-02 SCSI->QIC translator. 1115 * This board gives an empty response to an INQUIRY command. 1116 */ 1117 else if (error == 0 && 1118 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) && 1119 inqbuf->dev_qual2 == 0 && 1120 inqbuf->version == 0 && 1121 inqbuf->response_format == SID_FORMAT_SCSI1) { 1122 /* 1123 * Fill out the INQUIRY response. 1124 */ 1125 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL); 1126 inqbuf->dev_qual2 = SID_REMOVABLE; 1127 inqbuf->additional_length = 3 + 28; 1128 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1129 memcpy(inqbuf->vendor, "EMULEX ", sizeof(inqbuf->vendor)); 1130 memcpy(inqbuf->product, "MT-02 QIC ", 1131 sizeof(inqbuf->product)); 1132 memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision)); 1133 } 1134 #endif /* SCSI_OLD_NOINQUIRY */ 1135 1136 return error; 1137 } 1138 1139 /* 1140 * scsipi_prevent: 1141 * 1142 * Prevent or allow the user to remove the media 1143 */ 1144 int 1145 scsipi_prevent(periph, type, flags) 1146 struct scsipi_periph *periph; 1147 int type, flags; 1148 { 1149 struct scsipi_prevent scsipi_cmd; 1150 1151 if (periph->periph_quirks & PQUIRK_NODOORLOCK) 1152 return (0); 1153 1154 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1155 scsipi_cmd.opcode = PREVENT_ALLOW; 1156 scsipi_cmd.how = type; 1157 1158 return (scsipi_command(periph, 1159 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1160 0, 0, SCSIPIRETRIES, 5000, NULL, flags)); 1161 } 1162 1163 /* 1164 * scsipi_start: 1165 * 1166 * Send a START UNIT. 1167 */ 1168 int 1169 scsipi_start(periph, type, flags) 1170 struct scsipi_periph *periph; 1171 int type, flags; 1172 { 1173 struct scsipi_start_stop scsipi_cmd; 1174 1175 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT) 1176 return 0; 1177 1178 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1179 scsipi_cmd.opcode = START_STOP; 1180 scsipi_cmd.byte2 = 0x00; 1181 scsipi_cmd.how = type; 1182 1183 return (scsipi_command(periph, 1184 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1185 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, 1186 NULL, flags)); 1187 } 1188 1189 /* 1190 * scsipi_mode_sense, scsipi_mode_sense_big: 1191 * get a sense page from a device 1192 */ 1193 1194 int 1195 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout) 1196 struct scsipi_periph *periph; 1197 int byte2, page, len, flags, retries, timeout; 1198 struct scsipi_mode_header *data; 1199 { 1200 struct scsipi_mode_sense scsipi_cmd; 1201 int error; 1202 1203 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1204 scsipi_cmd.opcode = MODE_SENSE; 1205 scsipi_cmd.byte2 = byte2; 1206 scsipi_cmd.page = page; 1207 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI) 1208 _lto2b(len, scsipi_cmd.u_len.atapi.length); 1209 else 1210 scsipi_cmd.u_len.scsi.length = len & 0xff; 1211 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1212 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1213 flags | XS_CTL_DATA_IN); 1214 SC_DEBUG(periph, SCSIPI_DB2, 1215 ("scsipi_mode_sense: error=%d\n", error)); 1216 return (error); 1217 } 1218 1219 int 1220 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout) 1221 struct scsipi_periph *periph; 1222 int byte2, page, len, flags, retries, timeout; 1223 struct scsipi_mode_header_big *data; 1224 { 1225 struct scsipi_mode_sense_big scsipi_cmd; 1226 int error; 1227 1228 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1229 scsipi_cmd.opcode = MODE_SENSE_BIG; 1230 scsipi_cmd.byte2 = byte2; 1231 scsipi_cmd.page = page; 1232 _lto2b(len, scsipi_cmd.length); 1233 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1234 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1235 flags | XS_CTL_DATA_IN); 1236 SC_DEBUG(periph, SCSIPI_DB2, 1237 ("scsipi_mode_sense_big: error=%d\n", error)); 1238 return (error); 1239 } 1240 1241 int 1242 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout) 1243 struct scsipi_periph *periph; 1244 int byte2, len, flags, retries, timeout; 1245 struct scsipi_mode_header *data; 1246 { 1247 struct scsipi_mode_select scsipi_cmd; 1248 int error; 1249 1250 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1251 scsipi_cmd.opcode = MODE_SELECT; 1252 scsipi_cmd.byte2 = byte2; 1253 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI) 1254 _lto2b(len, scsipi_cmd.u_len.atapi.length); 1255 else 1256 scsipi_cmd.u_len.scsi.length = len & 0xff; 1257 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1258 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1259 flags | XS_CTL_DATA_OUT); 1260 SC_DEBUG(periph, SCSIPI_DB2, 1261 ("scsipi_mode_select: error=%d\n", error)); 1262 return (error); 1263 } 1264 1265 int 1266 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout) 1267 struct scsipi_periph *periph; 1268 int byte2, len, flags, retries, timeout; 1269 struct scsipi_mode_header_big *data; 1270 { 1271 struct scsipi_mode_select_big scsipi_cmd; 1272 int error; 1273 1274 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1275 scsipi_cmd.opcode = MODE_SELECT_BIG; 1276 scsipi_cmd.byte2 = byte2; 1277 _lto2b(len, scsipi_cmd.length); 1278 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1279 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1280 flags | XS_CTL_DATA_OUT); 1281 SC_DEBUG(periph, SCSIPI_DB2, 1282 ("scsipi_mode_select: error=%d\n", error)); 1283 return (error); 1284 } 1285 1286 /* 1287 * scsipi_done: 1288 * 1289 * This routine is called by an adapter's interrupt handler when 1290 * an xfer is completed. 1291 */ 1292 void 1293 scsipi_done(xs) 1294 struct scsipi_xfer *xs; 1295 { 1296 struct scsipi_periph *periph = xs->xs_periph; 1297 struct scsipi_channel *chan = periph->periph_channel; 1298 int s, freezecnt; 1299 1300 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1301 #ifdef SCSIPI_DEBUG 1302 if (periph->periph_dbflags & SCSIPI_DB1) 1303 show_scsipi_cmd(xs); 1304 #endif 1305 1306 s = splbio(); 1307 /* 1308 * The resource this command was using is now free. 1309 */ 1310 scsipi_put_resource(chan); 1311 xs->xs_periph->periph_sent--; 1312 1313 /* 1314 * If the command was tagged, free the tag. 1315 */ 1316 if (XS_CTL_TAGTYPE(xs) != 0) 1317 scsipi_put_tag(xs); 1318 else 1319 periph->periph_flags &= ~PERIPH_UNTAG; 1320 1321 /* Mark the command as `done'. */ 1322 xs->xs_status |= XS_STS_DONE; 1323 1324 #ifdef DIAGNOSTIC 1325 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1326 (XS_CTL_ASYNC|XS_CTL_POLL)) 1327 panic("scsipi_done: ASYNC and POLL"); 1328 #endif 1329 1330 /* 1331 * If the xfer had an error of any sort, freeze the 1332 * periph's queue. Freeze it again if we were requested 1333 * to do so in the xfer. 1334 */ 1335 freezecnt = 0; 1336 if (xs->error != XS_NOERROR) 1337 freezecnt++; 1338 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1339 freezecnt++; 1340 if (freezecnt != 0) 1341 scsipi_periph_freeze(periph, freezecnt); 1342 1343 /* 1344 * record the xfer with a pending sense, in case a SCSI reset is 1345 * received before the thread is waked up. 1346 */ 1347 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1348 periph->periph_flags |= PERIPH_SENSE; 1349 periph->periph_xscheck = xs; 1350 } 1351 1352 /* 1353 * If this was an xfer that was not to complete asynchronously, 1354 * let the requesting thread perform error checking/handling 1355 * in its context. 1356 */ 1357 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1358 splx(s); 1359 /* 1360 * If it's a polling job, just return, to unwind the 1361 * call graph. We don't need to restart the queue, 1362 * because pollings jobs are treated specially, and 1363 * are really only used during crash dumps anyway 1364 * (XXX or during boot-time autconfiguration of 1365 * ATAPI devices). 1366 */ 1367 if (xs->xs_control & XS_CTL_POLL) 1368 return; 1369 wakeup(xs); 1370 goto out; 1371 } 1372 1373 /* 1374 * Catch the extremely common case of I/O completing 1375 * without error; no use in taking a context switch 1376 * if we can handle it in interrupt context. 1377 */ 1378 if (xs->error == XS_NOERROR) { 1379 splx(s); 1380 (void) scsipi_complete(xs); 1381 goto out; 1382 } 1383 1384 /* 1385 * There is an error on this xfer. Put it on the channel's 1386 * completion queue, and wake up the completion thread. 1387 */ 1388 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1389 splx(s); 1390 wakeup(&chan->chan_complete); 1391 1392 out: 1393 /* 1394 * If there are more xfers on the channel's queue, attempt to 1395 * run them. 1396 */ 1397 scsipi_run_queue(chan); 1398 } 1399 1400 /* 1401 * scsipi_complete: 1402 * 1403 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1404 * 1405 * NOTE: This routine MUST be called with valid thread context 1406 * except for the case where the following two conditions are 1407 * true: 1408 * 1409 * xs->error == XS_NOERROR 1410 * XS_CTL_ASYNC is set in xs->xs_control 1411 * 1412 * The semantics of this routine can be tricky, so here is an 1413 * explanation: 1414 * 1415 * 0 Xfer completed successfully. 1416 * 1417 * ERESTART Xfer had an error, but was restarted. 1418 * 1419 * anything else Xfer had an error, return value is Unix 1420 * errno. 1421 * 1422 * If the return value is anything but ERESTART: 1423 * 1424 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1425 * the pool. 1426 * - If there is a buf associated with the xfer, 1427 * it has been biodone()'d. 1428 */ 1429 int 1430 scsipi_complete(xs) 1431 struct scsipi_xfer *xs; 1432 { 1433 struct scsipi_periph *periph = xs->xs_periph; 1434 struct scsipi_channel *chan = periph->periph_channel; 1435 struct buf *bp; 1436 int error, s; 1437 1438 #ifdef DIAGNOSTIC 1439 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1440 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1441 #endif 1442 /* 1443 * If command terminated with a CHECK CONDITION, we need to issue a 1444 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1445 * we'll have the real status. 1446 * Must be processed at splbio() to avoid missing a SCSI bus reset 1447 * for this command. 1448 */ 1449 s = splbio(); 1450 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1451 /* request sense for a request sense ? */ 1452 if (xs->xs_control & XS_CTL_REQSENSE) { 1453 scsipi_printaddr(periph); 1454 printf("request sense for a request sense ?\n"); 1455 /* XXX maybe we should reset the device ? */ 1456 /* we've been frozen because xs->error != XS_NOERROR */ 1457 scsipi_periph_thaw(periph, 1); 1458 splx(s); 1459 if (xs->resid < xs->datalen) { 1460 printf("we read %d bytes of sense anyway:\n", 1461 xs->datalen - xs->resid); 1462 #ifdef SCSIVERBOSE 1463 scsipi_print_sense_data((void *)xs->data, 0); 1464 #endif 1465 } 1466 return EINVAL; 1467 } 1468 scsipi_request_sense(xs); 1469 } 1470 splx(s); 1471 1472 /* 1473 * If it's a user level request, bypass all usual completion 1474 * processing, let the user work it out.. 1475 */ 1476 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1477 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1478 if (xs->error != XS_NOERROR) 1479 scsipi_periph_thaw(periph, 1); 1480 scsipi_user_done(xs); 1481 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1482 return 0; 1483 } 1484 1485 switch (xs->error) { 1486 case XS_NOERROR: 1487 error = 0; 1488 break; 1489 1490 case XS_SENSE: 1491 case XS_SHORTSENSE: 1492 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1493 break; 1494 1495 case XS_RESOURCE_SHORTAGE: 1496 /* 1497 * XXX Should freeze channel's queue. 1498 */ 1499 scsipi_printaddr(periph); 1500 printf("adapter resource shortage\n"); 1501 /* FALLTHROUGH */ 1502 1503 case XS_BUSY: 1504 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1505 struct scsipi_max_openings mo; 1506 1507 /* 1508 * We set the openings to active - 1, assuming that 1509 * the command that got us here is the first one that 1510 * can't fit into the device's queue. If that's not 1511 * the case, I guess we'll find out soon enough. 1512 */ 1513 mo.mo_target = periph->periph_target; 1514 mo.mo_lun = periph->periph_lun; 1515 if (periph->periph_active < periph->periph_openings) 1516 mo.mo_openings = periph->periph_active - 1; 1517 else 1518 mo.mo_openings = periph->periph_openings - 1; 1519 #ifdef DIAGNOSTIC 1520 if (mo.mo_openings < 0) { 1521 scsipi_printaddr(periph); 1522 printf("QUEUE FULL resulted in < 0 openings\n"); 1523 panic("scsipi_done"); 1524 } 1525 #endif 1526 if (mo.mo_openings == 0) { 1527 scsipi_printaddr(periph); 1528 printf("QUEUE FULL resulted in 0 openings\n"); 1529 mo.mo_openings = 1; 1530 } 1531 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1532 error = ERESTART; 1533 } else if (xs->xs_retries != 0) { 1534 xs->xs_retries--; 1535 /* 1536 * Wait one second, and try again. 1537 */ 1538 if ((xs->xs_control & XS_CTL_POLL) || 1539 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1540 delay(1000000); 1541 } else if (!callout_pending(&periph->periph_callout)) { 1542 scsipi_periph_freeze(periph, 1); 1543 callout_reset(&periph->periph_callout, 1544 hz, scsipi_periph_timed_thaw, periph); 1545 } 1546 error = ERESTART; 1547 } else 1548 error = EBUSY; 1549 break; 1550 1551 case XS_REQUEUE: 1552 error = ERESTART; 1553 break; 1554 1555 case XS_SELTIMEOUT: 1556 case XS_TIMEOUT: 1557 /* 1558 * If the device hasn't gone away, honor retry counts. 1559 * 1560 * Note that if we're in the middle of probing it, 1561 * it won't be found because it isn't here yet so 1562 * we won't honor the retry count in that case. 1563 */ 1564 if (scsipi_lookup_periph(chan, periph->periph_target, 1565 periph->periph_lun) && xs->xs_retries != 0) { 1566 xs->xs_retries--; 1567 error = ERESTART; 1568 } else 1569 error = EIO; 1570 break; 1571 1572 case XS_RESET: 1573 if (xs->xs_control & XS_CTL_REQSENSE) { 1574 /* 1575 * request sense interrupted by reset: signal it 1576 * with EINTR return code. 1577 */ 1578 error = EINTR; 1579 } else { 1580 if (xs->xs_retries != 0) { 1581 xs->xs_retries--; 1582 error = ERESTART; 1583 } else 1584 error = EIO; 1585 } 1586 break; 1587 1588 case XS_DRIVER_STUFFUP: 1589 scsipi_printaddr(periph); 1590 printf("generic HBA error\n"); 1591 error = EIO; 1592 break; 1593 default: 1594 scsipi_printaddr(periph); 1595 printf("invalid return code from adapter: %d\n", xs->error); 1596 error = EIO; 1597 break; 1598 } 1599 1600 s = splbio(); 1601 if (error == ERESTART) { 1602 /* 1603 * If we get here, the periph has been thawed and frozen 1604 * again if we had to issue recovery commands. Alternatively, 1605 * it may have been frozen again and in a timed thaw. In 1606 * any case, we thaw the periph once we re-enqueue the 1607 * command. Once the periph is fully thawed, it will begin 1608 * operation again. 1609 */ 1610 xs->error = XS_NOERROR; 1611 xs->status = SCSI_OK; 1612 xs->xs_status &= ~XS_STS_DONE; 1613 xs->xs_requeuecnt++; 1614 error = scsipi_enqueue(xs); 1615 if (error == 0) { 1616 scsipi_periph_thaw(periph, 1); 1617 splx(s); 1618 return (ERESTART); 1619 } 1620 } 1621 1622 /* 1623 * scsipi_done() freezes the queue if not XS_NOERROR. 1624 * Thaw it here. 1625 */ 1626 if (xs->error != XS_NOERROR) 1627 scsipi_periph_thaw(periph, 1); 1628 1629 /* 1630 * Set buffer fields in case the periph 1631 * switch done func uses them 1632 */ 1633 if ((bp = xs->bp) != NULL) { 1634 if (error) { 1635 bp->b_error = error; 1636 bp->b_flags |= B_ERROR; 1637 bp->b_resid = bp->b_bcount; 1638 } else { 1639 bp->b_error = 0; 1640 bp->b_resid = xs->resid; 1641 } 1642 } 1643 1644 if (periph->periph_switch->psw_done) 1645 periph->periph_switch->psw_done(xs); 1646 1647 if (bp) 1648 biodone(bp); 1649 1650 if (xs->xs_control & XS_CTL_ASYNC) 1651 scsipi_put_xs(xs); 1652 splx(s); 1653 1654 return (error); 1655 } 1656 1657 /* 1658 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1659 * returns with a CHECK_CONDITION status. Must be called in valid thread 1660 * context and at splbio(). 1661 */ 1662 1663 void 1664 scsipi_request_sense(xs) 1665 struct scsipi_xfer *xs; 1666 { 1667 struct scsipi_periph *periph = xs->xs_periph; 1668 int flags, error; 1669 struct scsipi_sense cmd; 1670 1671 periph->periph_flags |= PERIPH_SENSE; 1672 1673 /* if command was polling, request sense will too */ 1674 flags = xs->xs_control & XS_CTL_POLL; 1675 /* Polling commands can't sleep */ 1676 if (flags) 1677 flags |= XS_CTL_NOSLEEP; 1678 1679 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1680 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1681 1682 memset(&cmd, 0, sizeof(cmd)); 1683 cmd.opcode = REQUEST_SENSE; 1684 cmd.length = sizeof(struct scsipi_sense_data); 1685 1686 error = scsipi_command(periph, 1687 (struct scsipi_generic *) &cmd, sizeof(cmd), 1688 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data), 1689 0, 1000, NULL, flags); 1690 periph->periph_flags &= ~PERIPH_SENSE; 1691 periph->periph_xscheck = NULL; 1692 switch(error) { 1693 case 0: 1694 /* we have a valid sense */ 1695 xs->error = XS_SENSE; 1696 return; 1697 case EINTR: 1698 /* REQUEST_SENSE interrupted by bus reset. */ 1699 xs->error = XS_RESET; 1700 return; 1701 case EIO: 1702 /* request sense coudn't be performed */ 1703 /* 1704 * XXX this isn't quite right but we don't have anything 1705 * better for now 1706 */ 1707 xs->error = XS_DRIVER_STUFFUP; 1708 return; 1709 default: 1710 /* Notify that request sense failed. */ 1711 xs->error = XS_DRIVER_STUFFUP; 1712 scsipi_printaddr(periph); 1713 printf("request sense failed with error %d\n", error); 1714 return; 1715 } 1716 } 1717 1718 /* 1719 * scsipi_enqueue: 1720 * 1721 * Enqueue an xfer on a channel. 1722 */ 1723 int 1724 scsipi_enqueue(xs) 1725 struct scsipi_xfer *xs; 1726 { 1727 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1728 struct scsipi_xfer *qxs; 1729 int s; 1730 1731 s = splbio(); 1732 1733 /* 1734 * If the xfer is to be polled, and there are already jobs on 1735 * the queue, we can't proceed. 1736 */ 1737 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1738 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1739 splx(s); 1740 xs->error = XS_DRIVER_STUFFUP; 1741 return (EAGAIN); 1742 } 1743 1744 /* 1745 * If we have an URGENT xfer, it's an error recovery command 1746 * and it should just go on the head of the channel's queue. 1747 */ 1748 if (xs->xs_control & XS_CTL_URGENT) { 1749 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1750 goto out; 1751 } 1752 1753 /* 1754 * If this xfer has already been on the queue before, we 1755 * need to reinsert it in the correct order. That order is: 1756 * 1757 * Immediately before the first xfer for this periph 1758 * with a requeuecnt less than xs->xs_requeuecnt. 1759 * 1760 * Failing that, at the end of the queue. (We'll end up 1761 * there naturally.) 1762 */ 1763 if (xs->xs_requeuecnt != 0) { 1764 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1765 qxs = TAILQ_NEXT(qxs, channel_q)) { 1766 if (qxs->xs_periph == xs->xs_periph && 1767 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1768 break; 1769 } 1770 if (qxs != NULL) { 1771 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1772 channel_q); 1773 goto out; 1774 } 1775 } 1776 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1777 out: 1778 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1779 scsipi_periph_thaw(xs->xs_periph, 1); 1780 splx(s); 1781 return (0); 1782 } 1783 1784 /* 1785 * scsipi_run_queue: 1786 * 1787 * Start as many xfers as possible running on the channel. 1788 */ 1789 void 1790 scsipi_run_queue(chan) 1791 struct scsipi_channel *chan; 1792 { 1793 struct scsipi_xfer *xs; 1794 struct scsipi_periph *periph; 1795 int s; 1796 1797 for (;;) { 1798 s = splbio(); 1799 1800 /* 1801 * If the channel is frozen, we can't do any work right 1802 * now. 1803 */ 1804 if (chan->chan_qfreeze != 0) { 1805 splx(s); 1806 return; 1807 } 1808 1809 /* 1810 * Look for work to do, and make sure we can do it. 1811 */ 1812 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1813 xs = TAILQ_NEXT(xs, channel_q)) { 1814 periph = xs->xs_periph; 1815 1816 if ((periph->periph_sent >= periph->periph_openings) || 1817 periph->periph_qfreeze != 0 || 1818 (periph->periph_flags & PERIPH_UNTAG) != 0) 1819 continue; 1820 1821 if ((periph->periph_flags & 1822 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1823 (xs->xs_control & XS_CTL_URGENT) == 0) 1824 continue; 1825 1826 /* 1827 * We can issue this xfer! 1828 */ 1829 goto got_one; 1830 } 1831 1832 /* 1833 * Can't find any work to do right now. 1834 */ 1835 splx(s); 1836 return; 1837 1838 got_one: 1839 /* 1840 * Have an xfer to run. Allocate a resource from 1841 * the adapter to run it. If we can't allocate that 1842 * resource, we don't dequeue the xfer. 1843 */ 1844 if (scsipi_get_resource(chan) == 0) { 1845 /* 1846 * Adapter is out of resources. If the adapter 1847 * supports it, attempt to grow them. 1848 */ 1849 if (scsipi_grow_resources(chan) == 0) { 1850 /* 1851 * Wasn't able to grow resources, 1852 * nothing more we can do. 1853 */ 1854 if (xs->xs_control & XS_CTL_POLL) { 1855 scsipi_printaddr(xs->xs_periph); 1856 printf("polling command but no " 1857 "adapter resources"); 1858 /* We'll panic shortly... */ 1859 } 1860 splx(s); 1861 1862 /* 1863 * XXX: We should be able to note that 1864 * XXX: that resources are needed here! 1865 */ 1866 return; 1867 } 1868 /* 1869 * scsipi_grow_resources() allocated the resource 1870 * for us. 1871 */ 1872 } 1873 1874 /* 1875 * We have a resource to run this xfer, do it! 1876 */ 1877 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1878 1879 /* 1880 * If the command is to be tagged, allocate a tag ID 1881 * for it. 1882 */ 1883 if (XS_CTL_TAGTYPE(xs) != 0) 1884 scsipi_get_tag(xs); 1885 else 1886 periph->periph_flags |= PERIPH_UNTAG; 1887 periph->periph_sent++; 1888 splx(s); 1889 1890 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1891 } 1892 #ifdef DIAGNOSTIC 1893 panic("scsipi_run_queue: impossible"); 1894 #endif 1895 } 1896 1897 /* 1898 * scsipi_execute_xs: 1899 * 1900 * Begin execution of an xfer, waiting for it to complete, if necessary. 1901 */ 1902 int 1903 scsipi_execute_xs(xs) 1904 struct scsipi_xfer *xs; 1905 { 1906 struct scsipi_periph *periph = xs->xs_periph; 1907 struct scsipi_channel *chan = periph->periph_channel; 1908 int oasync, async, poll, retries, error, s; 1909 1910 xs->xs_status &= ~XS_STS_DONE; 1911 xs->error = XS_NOERROR; 1912 xs->resid = xs->datalen; 1913 xs->status = SCSI_OK; 1914 1915 #ifdef SCSIPI_DEBUG 1916 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1917 printf("scsipi_execute_xs: "); 1918 show_scsipi_xs(xs); 1919 printf("\n"); 1920 } 1921 #endif 1922 1923 /* 1924 * Deal with command tagging: 1925 * 1926 * - If the device's current operating mode doesn't 1927 * include tagged queueing, clear the tag mask. 1928 * 1929 * - If the device's current operating mode *does* 1930 * include tagged queueing, set the tag_type in 1931 * the xfer to the appropriate byte for the tag 1932 * message. 1933 */ 1934 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 1935 (xs->xs_control & XS_CTL_REQSENSE)) { 1936 xs->xs_control &= ~XS_CTL_TAGMASK; 1937 xs->xs_tag_type = 0; 1938 } else { 1939 /* 1940 * If the request doesn't specify a tag, give Head 1941 * tags to URGENT operations and Ordered tags to 1942 * everything else. 1943 */ 1944 if (XS_CTL_TAGTYPE(xs) == 0) { 1945 if (xs->xs_control & XS_CTL_URGENT) 1946 xs->xs_control |= XS_CTL_HEAD_TAG; 1947 else 1948 xs->xs_control |= XS_CTL_ORDERED_TAG; 1949 } 1950 1951 switch (XS_CTL_TAGTYPE(xs)) { 1952 case XS_CTL_ORDERED_TAG: 1953 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 1954 break; 1955 1956 case XS_CTL_SIMPLE_TAG: 1957 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 1958 break; 1959 1960 case XS_CTL_HEAD_TAG: 1961 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 1962 break; 1963 1964 default: 1965 scsipi_printaddr(periph); 1966 printf("invalid tag mask 0x%08x\n", 1967 XS_CTL_TAGTYPE(xs)); 1968 panic("scsipi_execute_xs"); 1969 } 1970 } 1971 1972 /* If the adaptor wants us to poll, poll. */ 1973 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 1974 xs->xs_control |= XS_CTL_POLL; 1975 1976 /* 1977 * If we don't yet have a completion thread, or we are to poll for 1978 * completion, clear the ASYNC flag. 1979 */ 1980 oasync = (xs->xs_control & XS_CTL_ASYNC); 1981 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 1982 xs->xs_control &= ~XS_CTL_ASYNC; 1983 1984 async = (xs->xs_control & XS_CTL_ASYNC); 1985 poll = (xs->xs_control & XS_CTL_POLL); 1986 retries = xs->xs_retries; /* for polling commands */ 1987 1988 #ifdef DIAGNOSTIC 1989 if (oasync != 0 && xs->bp == NULL) 1990 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 1991 #endif 1992 1993 /* 1994 * Enqueue the transfer. If we're not polling for completion, this 1995 * should ALWAYS return `no error'. 1996 */ 1997 try_again: 1998 error = scsipi_enqueue(xs); 1999 if (error) { 2000 if (poll == 0) { 2001 scsipi_printaddr(periph); 2002 printf("not polling, but enqueue failed with %d\n", 2003 error); 2004 panic("scsipi_execute_xs"); 2005 } 2006 2007 scsipi_printaddr(periph); 2008 printf("failed to enqueue polling command"); 2009 if (retries != 0) { 2010 printf(", retrying...\n"); 2011 delay(1000000); 2012 retries--; 2013 goto try_again; 2014 } 2015 printf("\n"); 2016 goto free_xs; 2017 } 2018 2019 restarted: 2020 scsipi_run_queue(chan); 2021 2022 /* 2023 * The xfer is enqueued, and possibly running. If it's to be 2024 * completed asynchronously, just return now. 2025 */ 2026 if (async) 2027 return (EJUSTRETURN); 2028 2029 /* 2030 * Not an asynchronous command; wait for it to complete. 2031 */ 2032 s = splbio(); 2033 while ((xs->xs_status & XS_STS_DONE) == 0) { 2034 if (poll) { 2035 scsipi_printaddr(periph); 2036 printf("polling command not done\n"); 2037 panic("scsipi_execute_xs"); 2038 } 2039 (void) tsleep(xs, PRIBIO, "xscmd", 0); 2040 } 2041 splx(s); 2042 2043 /* 2044 * Command is complete. scsipi_done() has awakened us to perform 2045 * the error handling. 2046 */ 2047 error = scsipi_complete(xs); 2048 if (error == ERESTART) 2049 goto restarted; 2050 2051 /* 2052 * If it was meant to run async and we cleared aync ourselve, 2053 * don't return an error here. It has already been handled 2054 */ 2055 if (oasync) 2056 error = EJUSTRETURN; 2057 /* 2058 * Command completed successfully or fatal error occurred. Fall 2059 * into.... 2060 */ 2061 free_xs: 2062 s = splbio(); 2063 scsipi_put_xs(xs); 2064 splx(s); 2065 2066 /* 2067 * Kick the queue, keep it running in case it stopped for some 2068 * reason. 2069 */ 2070 scsipi_run_queue(chan); 2071 2072 return (error); 2073 } 2074 2075 /* 2076 * scsipi_completion_thread: 2077 * 2078 * This is the completion thread. We wait for errors on 2079 * asynchronous xfers, and perform the error handling 2080 * function, restarting the command, if necessary. 2081 */ 2082 void 2083 scsipi_completion_thread(arg) 2084 void *arg; 2085 { 2086 struct scsipi_channel *chan = arg; 2087 struct scsipi_xfer *xs; 2088 int s; 2089 2090 if (chan->chan_init_cb) 2091 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg); 2092 2093 s = splbio(); 2094 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 2095 splx(s); 2096 for (;;) { 2097 s = splbio(); 2098 xs = TAILQ_FIRST(&chan->chan_complete); 2099 if (xs == NULL && chan->chan_tflags == 0) { 2100 /* nothing to do; wait */ 2101 (void) tsleep(&chan->chan_complete, PRIBIO, 2102 "sccomp", 0); 2103 splx(s); 2104 continue; 2105 } 2106 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2107 /* call chan_callback from thread context */ 2108 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 2109 chan->chan_callback(chan, chan->chan_callback_arg); 2110 splx(s); 2111 continue; 2112 } 2113 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 2114 /* attempt to get more openings for this channel */ 2115 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2116 scsipi_adapter_request(chan, 2117 ADAPTER_REQ_GROW_RESOURCES, NULL); 2118 scsipi_channel_thaw(chan, 1); 2119 splx(s); 2120 continue; 2121 } 2122 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2123 /* explicitly run the queues for this channel */ 2124 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2125 scsipi_run_queue(chan); 2126 splx(s); 2127 continue; 2128 } 2129 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2130 splx(s); 2131 break; 2132 } 2133 if (xs) { 2134 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2135 splx(s); 2136 2137 /* 2138 * Have an xfer with an error; process it. 2139 */ 2140 (void) scsipi_complete(xs); 2141 2142 /* 2143 * Kick the queue; keep it running if it was stopped 2144 * for some reason. 2145 */ 2146 scsipi_run_queue(chan); 2147 } else { 2148 splx(s); 2149 } 2150 } 2151 2152 chan->chan_thread = NULL; 2153 2154 /* In case parent is waiting for us to exit. */ 2155 wakeup(&chan->chan_thread); 2156 2157 kthread_exit(0); 2158 } 2159 2160 /* 2161 * scsipi_create_completion_thread: 2162 * 2163 * Callback to actually create the completion thread. 2164 */ 2165 void 2166 scsipi_create_completion_thread(arg) 2167 void *arg; 2168 { 2169 struct scsipi_channel *chan = arg; 2170 struct scsipi_adapter *adapt = chan->chan_adapter; 2171 2172 if (kthread_create1(scsipi_completion_thread, chan, 2173 &chan->chan_thread, "%s", chan->chan_name)) { 2174 printf("%s: unable to create completion thread for " 2175 "channel %d\n", adapt->adapt_dev->dv_xname, 2176 chan->chan_channel); 2177 panic("scsipi_create_completion_thread"); 2178 } 2179 } 2180 2181 /* 2182 * scsipi_thread_call_callback: 2183 * 2184 * request to call a callback from the completion thread 2185 */ 2186 int 2187 scsipi_thread_call_callback(chan, callback, arg) 2188 struct scsipi_channel *chan; 2189 void (*callback) __P((struct scsipi_channel *, void *)); 2190 void *arg; 2191 { 2192 int s; 2193 2194 s = splbio(); 2195 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2196 /* kernel thread doesn't exist yet */ 2197 splx(s); 2198 return ESRCH; 2199 } 2200 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2201 splx(s); 2202 return EBUSY; 2203 } 2204 scsipi_channel_freeze(chan, 1); 2205 chan->chan_callback = callback; 2206 chan->chan_callback_arg = arg; 2207 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2208 wakeup(&chan->chan_complete); 2209 splx(s); 2210 return(0); 2211 } 2212 2213 /* 2214 * scsipi_async_event: 2215 * 2216 * Handle an asynchronous event from an adapter. 2217 */ 2218 void 2219 scsipi_async_event(chan, event, arg) 2220 struct scsipi_channel *chan; 2221 scsipi_async_event_t event; 2222 void *arg; 2223 { 2224 int s; 2225 2226 s = splbio(); 2227 switch (event) { 2228 case ASYNC_EVENT_MAX_OPENINGS: 2229 scsipi_async_event_max_openings(chan, 2230 (struct scsipi_max_openings *)arg); 2231 break; 2232 2233 case ASYNC_EVENT_XFER_MODE: 2234 scsipi_async_event_xfer_mode(chan, 2235 (struct scsipi_xfer_mode *)arg); 2236 break; 2237 case ASYNC_EVENT_RESET: 2238 scsipi_async_event_channel_reset(chan); 2239 break; 2240 } 2241 splx(s); 2242 } 2243 2244 /* 2245 * scsipi_print_xfer_mode: 2246 * 2247 * Print a periph's capabilities. 2248 */ 2249 void 2250 scsipi_print_xfer_mode(periph) 2251 struct scsipi_periph *periph; 2252 { 2253 int period, freq, speed, mbs; 2254 2255 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0) 2256 return; 2257 2258 printf("%s: ", periph->periph_dev->dv_xname); 2259 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2260 period = scsipi_sync_factor_to_period(periph->periph_period); 2261 printf("sync (%d.%02dns offset %d)", 2262 period / 100, period % 100, periph->periph_offset); 2263 } else 2264 printf("async"); 2265 2266 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2267 printf(", 32-bit"); 2268 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2269 printf(", 16-bit"); 2270 else 2271 printf(", 8-bit"); 2272 2273 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2274 freq = scsipi_sync_factor_to_freq(periph->periph_period); 2275 speed = freq; 2276 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2277 speed *= 4; 2278 else if (periph->periph_mode & 2279 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2280 speed *= 2; 2281 mbs = speed / 1000; 2282 if (mbs > 0) 2283 printf(" (%d.%03dMB/s)", mbs, speed % 1000); 2284 else 2285 printf(" (%dKB/s)", speed % 1000); 2286 } 2287 2288 printf(" transfers"); 2289 2290 if (periph->periph_mode & PERIPH_CAP_TQING) 2291 printf(", tagged queueing"); 2292 2293 printf("\n"); 2294 } 2295 2296 /* 2297 * scsipi_async_event_max_openings: 2298 * 2299 * Update the maximum number of outstanding commands a 2300 * device may have. 2301 */ 2302 void 2303 scsipi_async_event_max_openings(chan, mo) 2304 struct scsipi_channel *chan; 2305 struct scsipi_max_openings *mo; 2306 { 2307 struct scsipi_periph *periph; 2308 int minlun, maxlun; 2309 2310 if (mo->mo_lun == -1) { 2311 /* 2312 * Wildcarded; apply it to all LUNs. 2313 */ 2314 minlun = 0; 2315 maxlun = chan->chan_nluns - 1; 2316 } else 2317 minlun = maxlun = mo->mo_lun; 2318 2319 /* XXX This could really suck with a large LUN space. */ 2320 for (; minlun <= maxlun; minlun++) { 2321 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun); 2322 if (periph == NULL) 2323 continue; 2324 2325 if (mo->mo_openings < periph->periph_openings) 2326 periph->periph_openings = mo->mo_openings; 2327 else if (mo->mo_openings > periph->periph_openings && 2328 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2329 periph->periph_openings = mo->mo_openings; 2330 } 2331 } 2332 2333 /* 2334 * scsipi_async_event_xfer_mode: 2335 * 2336 * Update the xfer mode for all periphs sharing the 2337 * specified I_T Nexus. 2338 */ 2339 void 2340 scsipi_async_event_xfer_mode(chan, xm) 2341 struct scsipi_channel *chan; 2342 struct scsipi_xfer_mode *xm; 2343 { 2344 struct scsipi_periph *periph; 2345 int lun, announce, mode, period, offset; 2346 2347 for (lun = 0; lun < chan->chan_nluns; lun++) { 2348 periph = scsipi_lookup_periph(chan, xm->xm_target, lun); 2349 if (periph == NULL) 2350 continue; 2351 announce = 0; 2352 2353 /* 2354 * Clamp the xfer mode down to this periph's capabilities. 2355 */ 2356 mode = xm->xm_mode & periph->periph_cap; 2357 if (mode & PERIPH_CAP_SYNC) { 2358 period = xm->xm_period; 2359 offset = xm->xm_offset; 2360 } else { 2361 period = 0; 2362 offset = 0; 2363 } 2364 2365 /* 2366 * If we do not have a valid xfer mode yet, or the parameters 2367 * are different, announce them. 2368 */ 2369 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 || 2370 periph->periph_mode != mode || 2371 periph->periph_period != period || 2372 periph->periph_offset != offset) 2373 announce = 1; 2374 2375 periph->periph_mode = mode; 2376 periph->periph_period = period; 2377 periph->periph_offset = offset; 2378 periph->periph_flags |= PERIPH_MODE_VALID; 2379 2380 if (announce) 2381 scsipi_print_xfer_mode(periph); 2382 } 2383 } 2384 2385 /* 2386 * scsipi_set_xfer_mode: 2387 * 2388 * Set the xfer mode for the specified I_T Nexus. 2389 */ 2390 void 2391 scsipi_set_xfer_mode(chan, target, immed) 2392 struct scsipi_channel *chan; 2393 int target, immed; 2394 { 2395 struct scsipi_xfer_mode xm; 2396 struct scsipi_periph *itperiph; 2397 int lun, s; 2398 2399 /* 2400 * Go to the minimal xfer mode. 2401 */ 2402 xm.xm_target = target; 2403 xm.xm_mode = 0; 2404 xm.xm_period = 0; /* ignored */ 2405 xm.xm_offset = 0; /* ignored */ 2406 2407 /* 2408 * Find the first LUN we know about on this I_T Nexus. 2409 */ 2410 for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) { 2411 itperiph = scsipi_lookup_periph(chan, target, lun); 2412 if (itperiph != NULL) 2413 break; 2414 } 2415 if (itperiph != NULL) { 2416 xm.xm_mode = itperiph->periph_cap; 2417 /* 2418 * Now issue the request to the adapter. 2419 */ 2420 s = splbio(); 2421 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2422 splx(s); 2423 /* 2424 * If we want this to happen immediately, issue a dummy 2425 * command, since most adapters can't really negotiate unless 2426 * they're executing a job. 2427 */ 2428 if (immed != 0) { 2429 (void) scsipi_test_unit_ready(itperiph, 2430 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2431 XS_CTL_IGNORE_NOT_READY | 2432 XS_CTL_IGNORE_MEDIA_CHANGE); 2433 } 2434 } 2435 } 2436 2437 /* 2438 * scsipi_channel_reset: 2439 * 2440 * handle scsi bus reset 2441 * called at splbio 2442 */ 2443 void 2444 scsipi_async_event_channel_reset(chan) 2445 struct scsipi_channel *chan; 2446 { 2447 struct scsipi_xfer *xs, *xs_next; 2448 struct scsipi_periph *periph; 2449 int target, lun; 2450 2451 /* 2452 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2453 * commands; as the sense is not available any more. 2454 * can't call scsipi_done() from here, as the command has not been 2455 * sent to the adapter yet (this would corrupt accounting). 2456 */ 2457 2458 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2459 xs_next = TAILQ_NEXT(xs, channel_q); 2460 if (xs->xs_control & XS_CTL_REQSENSE) { 2461 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2462 xs->error = XS_RESET; 2463 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2464 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2465 channel_q); 2466 } 2467 } 2468 wakeup(&chan->chan_complete); 2469 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2470 for (target = 0; target < chan->chan_ntargets; target++) { 2471 if (target == chan->chan_id) 2472 continue; 2473 for (lun = 0; lun < chan->chan_nluns; lun++) { 2474 periph = scsipi_lookup_periph(chan, target, lun); 2475 if (periph) { 2476 xs = periph->periph_xscheck; 2477 if (xs) 2478 xs->error = XS_RESET; 2479 } 2480 } 2481 } 2482 } 2483 2484 /* 2485 * scsipi_target_detach: 2486 * 2487 * detach all periph associated with a I_T 2488 * must be called from valid thread context 2489 */ 2490 int 2491 scsipi_target_detach(chan, target, lun, flags) 2492 struct scsipi_channel *chan; 2493 int target, lun; 2494 int flags; 2495 { 2496 struct scsipi_periph *periph; 2497 int ctarget, mintarget, maxtarget; 2498 int clun, minlun, maxlun; 2499 int error; 2500 2501 if (target == -1) { 2502 mintarget = 0; 2503 maxtarget = chan->chan_ntargets; 2504 } else { 2505 if (target == chan->chan_id) 2506 return EINVAL; 2507 if (target < 0 || target >= chan->chan_ntargets) 2508 return EINVAL; 2509 mintarget = target; 2510 maxtarget = target + 1; 2511 } 2512 2513 if (lun == -1) { 2514 minlun = 0; 2515 maxlun = chan->chan_nluns; 2516 } else { 2517 if (lun < 0 || lun >= chan->chan_nluns) 2518 return EINVAL; 2519 minlun = lun; 2520 maxlun = lun + 1; 2521 } 2522 2523 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2524 if (ctarget == chan->chan_id) 2525 continue; 2526 2527 for (clun = minlun; clun < maxlun; clun++) { 2528 periph = scsipi_lookup_periph(chan, ctarget, clun); 2529 if (periph == NULL) 2530 continue; 2531 error = config_detach(periph->periph_dev, flags); 2532 if (error) 2533 return (error); 2534 scsipi_remove_periph(chan, periph); 2535 free(periph, M_DEVBUF); 2536 } 2537 } 2538 return(0); 2539 } 2540 2541 /* 2542 * scsipi_adapter_addref: 2543 * 2544 * Add a reference to the adapter pointed to by the provided 2545 * link, enabling the adapter if necessary. 2546 */ 2547 int 2548 scsipi_adapter_addref(adapt) 2549 struct scsipi_adapter *adapt; 2550 { 2551 int s, error = 0; 2552 2553 s = splbio(); 2554 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) { 2555 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1); 2556 if (error) 2557 adapt->adapt_refcnt--; 2558 } 2559 splx(s); 2560 return (error); 2561 } 2562 2563 /* 2564 * scsipi_adapter_delref: 2565 * 2566 * Delete a reference to the adapter pointed to by the provided 2567 * link, disabling the adapter if possible. 2568 */ 2569 void 2570 scsipi_adapter_delref(adapt) 2571 struct scsipi_adapter *adapt; 2572 { 2573 int s; 2574 2575 s = splbio(); 2576 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL) 2577 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0); 2578 splx(s); 2579 } 2580 2581 struct scsipi_syncparam { 2582 int ss_factor; 2583 int ss_period; /* ns * 100 */ 2584 } scsipi_syncparams[] = { 2585 { 0x08, 625 }, /* FAST-160 (Ultra320) */ 2586 { 0x09, 1250 }, /* FAST-80 (Ultra160) */ 2587 { 0x0a, 2500 }, /* FAST-40 40MHz (Ultra2) */ 2588 { 0x0b, 3030 }, /* FAST-40 33MHz (Ultra2) */ 2589 { 0x0c, 5000 }, /* FAST-20 (Ultra) */ 2590 }; 2591 const int scsipi_nsyncparams = 2592 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2593 2594 int 2595 scsipi_sync_period_to_factor(period) 2596 int period; /* ns * 100 */ 2597 { 2598 int i; 2599 2600 for (i = 0; i < scsipi_nsyncparams; i++) { 2601 if (period <= scsipi_syncparams[i].ss_period) 2602 return (scsipi_syncparams[i].ss_factor); 2603 } 2604 2605 return ((period / 100) / 4); 2606 } 2607 2608 int 2609 scsipi_sync_factor_to_period(factor) 2610 int factor; 2611 { 2612 int i; 2613 2614 for (i = 0; i < scsipi_nsyncparams; i++) { 2615 if (factor == scsipi_syncparams[i].ss_factor) 2616 return (scsipi_syncparams[i].ss_period); 2617 } 2618 2619 return ((factor * 4) * 100); 2620 } 2621 2622 int 2623 scsipi_sync_factor_to_freq(factor) 2624 int factor; 2625 { 2626 int i; 2627 2628 for (i = 0; i < scsipi_nsyncparams; i++) { 2629 if (factor == scsipi_syncparams[i].ss_factor) 2630 return (100000000 / scsipi_syncparams[i].ss_period); 2631 } 2632 2633 return (10000000 / ((factor * 4) * 10)); 2634 } 2635 2636 #ifdef SCSIPI_DEBUG 2637 /* 2638 * Given a scsipi_xfer, dump the request, in all it's glory 2639 */ 2640 void 2641 show_scsipi_xs(xs) 2642 struct scsipi_xfer *xs; 2643 { 2644 2645 printf("xs(%p): ", xs); 2646 printf("xs_control(0x%08x)", xs->xs_control); 2647 printf("xs_status(0x%08x)", xs->xs_status); 2648 printf("periph(%p)", xs->xs_periph); 2649 printf("retr(0x%x)", xs->xs_retries); 2650 printf("timo(0x%x)", xs->timeout); 2651 printf("cmd(%p)", xs->cmd); 2652 printf("len(0x%x)", xs->cmdlen); 2653 printf("data(%p)", xs->data); 2654 printf("len(0x%x)", xs->datalen); 2655 printf("res(0x%x)", xs->resid); 2656 printf("err(0x%x)", xs->error); 2657 printf("bp(%p)", xs->bp); 2658 show_scsipi_cmd(xs); 2659 } 2660 2661 void 2662 show_scsipi_cmd(xs) 2663 struct scsipi_xfer *xs; 2664 { 2665 u_char *b = (u_char *) xs->cmd; 2666 int i = 0; 2667 2668 scsipi_printaddr(xs->xs_periph); 2669 printf(" command: "); 2670 2671 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2672 while (i < xs->cmdlen) { 2673 if (i) 2674 printf(","); 2675 printf("0x%x", b[i++]); 2676 } 2677 printf("-[%d bytes]\n", xs->datalen); 2678 if (xs->datalen) 2679 show_mem(xs->data, min(64, xs->datalen)); 2680 } else 2681 printf("-RESET-\n"); 2682 } 2683 2684 void 2685 show_mem(address, num) 2686 u_char *address; 2687 int num; 2688 { 2689 int x; 2690 2691 printf("------------------------------"); 2692 for (x = 0; x < num; x++) { 2693 if ((x % 16) == 0) 2694 printf("\n%03d: ", x); 2695 printf("%02x ", *address++); 2696 } 2697 printf("\n------------------------------\n"); 2698 } 2699 #endif /* SCSIPI_DEBUG */ 2700