1 /* $NetBSD: scsipi_base.c,v 1.80 2002/10/04 03:41:50 soren Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.80 2002/10/04 03:41:50 soren Exp $"); 42 43 #include "opt_scsi.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/buf.h> 49 #include <sys/uio.h> 50 #include <sys/malloc.h> 51 #include <sys/pool.h> 52 #include <sys/errno.h> 53 #include <sys/device.h> 54 #include <sys/proc.h> 55 #include <sys/kthread.h> 56 #include <sys/hash.h> 57 58 #include <dev/scsipi/scsipi_all.h> 59 #include <dev/scsipi/scsipi_disk.h> 60 #include <dev/scsipi/scsipiconf.h> 61 #include <dev/scsipi/scsipi_base.h> 62 63 #include <dev/scsipi/scsi_all.h> 64 #include <dev/scsipi/scsi_message.h> 65 66 int scsipi_complete __P((struct scsipi_xfer *)); 67 void scsipi_request_sense __P((struct scsipi_xfer *)); 68 int scsipi_enqueue __P((struct scsipi_xfer *)); 69 void scsipi_run_queue __P((struct scsipi_channel *chan)); 70 71 void scsipi_completion_thread __P((void *)); 72 73 void scsipi_get_tag __P((struct scsipi_xfer *)); 74 void scsipi_put_tag __P((struct scsipi_xfer *)); 75 76 int scsipi_get_resource __P((struct scsipi_channel *)); 77 void scsipi_put_resource __P((struct scsipi_channel *)); 78 __inline int scsipi_grow_resources __P((struct scsipi_channel *)); 79 80 void scsipi_async_event_max_openings __P((struct scsipi_channel *, 81 struct scsipi_max_openings *)); 82 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *, 83 struct scsipi_xfer_mode *)); 84 void scsipi_async_event_channel_reset __P((struct scsipi_channel *)); 85 86 struct pool scsipi_xfer_pool; 87 88 /* 89 * scsipi_init: 90 * 91 * Called when a scsibus or atapibus is attached to the system 92 * to initialize shared data structures. 93 */ 94 void 95 scsipi_init() 96 { 97 static int scsipi_init_done; 98 99 if (scsipi_init_done) 100 return; 101 scsipi_init_done = 1; 102 103 /* Initialize the scsipi_xfer pool. */ 104 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 105 0, 0, "scxspl", NULL); 106 } 107 108 /* 109 * scsipi_channel_init: 110 * 111 * Initialize a scsipi_channel when it is attached. 112 */ 113 int 114 scsipi_channel_init(chan) 115 struct scsipi_channel *chan; 116 { 117 int i; 118 119 /* Initialize shared data. */ 120 scsipi_init(); 121 122 /* Initialize the queues. */ 123 TAILQ_INIT(&chan->chan_queue); 124 TAILQ_INIT(&chan->chan_complete); 125 126 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++) 127 LIST_INIT(&chan->chan_periphtab[i]); 128 129 /* 130 * Create the asynchronous completion thread. 131 */ 132 kthread_create(scsipi_create_completion_thread, chan); 133 return (0); 134 } 135 136 /* 137 * scsipi_channel_shutdown: 138 * 139 * Shutdown a scsipi_channel. 140 */ 141 void 142 scsipi_channel_shutdown(chan) 143 struct scsipi_channel *chan; 144 { 145 146 /* 147 * Shut down the completion thread. 148 */ 149 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 150 wakeup(&chan->chan_complete); 151 152 /* 153 * Now wait for the thread to exit. 154 */ 155 while (chan->chan_thread != NULL) 156 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0); 157 } 158 159 static uint32_t 160 scsipi_chan_periph_hash(uint64_t t, uint64_t l) 161 { 162 uint32_t hash; 163 164 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT); 165 hash = hash32_buf(&l, sizeof(l), hash); 166 167 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK); 168 } 169 170 /* 171 * scsipi_insert_periph: 172 * 173 * Insert a periph into the channel. 174 */ 175 void 176 scsipi_insert_periph(chan, periph) 177 struct scsipi_channel *chan; 178 struct scsipi_periph *periph; 179 { 180 uint32_t hash; 181 int s; 182 183 hash = scsipi_chan_periph_hash(periph->periph_target, 184 periph->periph_lun); 185 186 s = splbio(); 187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash); 188 splx(s); 189 } 190 191 /* 192 * scsipi_remove_periph: 193 * 194 * Remove a periph from the channel. 195 */ 196 void 197 scsipi_remove_periph(chan, periph) 198 struct scsipi_channel *chan; 199 struct scsipi_periph *periph; 200 { 201 int s; 202 203 s = splbio(); 204 LIST_REMOVE(periph, periph_hash); 205 splx(s); 206 } 207 208 /* 209 * scsipi_lookup_periph: 210 * 211 * Lookup a periph on the specified channel. 212 */ 213 struct scsipi_periph * 214 scsipi_lookup_periph(chan, target, lun) 215 struct scsipi_channel *chan; 216 int target, lun; 217 { 218 struct scsipi_periph *periph; 219 uint32_t hash; 220 int s; 221 222 if (target >= chan->chan_ntargets || 223 lun >= chan->chan_nluns) 224 return (NULL); 225 226 hash = scsipi_chan_periph_hash(target, lun); 227 228 s = splbio(); 229 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) { 230 if (periph->periph_target == target && 231 periph->periph_lun == lun) 232 break; 233 } 234 splx(s); 235 236 return (periph); 237 } 238 239 /* 240 * scsipi_get_resource: 241 * 242 * Allocate a single xfer `resource' from the channel. 243 * 244 * NOTE: Must be called at splbio(). 245 */ 246 int 247 scsipi_get_resource(chan) 248 struct scsipi_channel *chan; 249 { 250 struct scsipi_adapter *adapt = chan->chan_adapter; 251 252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 253 if (chan->chan_openings > 0) { 254 chan->chan_openings--; 255 return (1); 256 } 257 return (0); 258 } 259 260 if (adapt->adapt_openings > 0) { 261 adapt->adapt_openings--; 262 return (1); 263 } 264 return (0); 265 } 266 267 /* 268 * scsipi_grow_resources: 269 * 270 * Attempt to grow resources for a channel. If this succeeds, 271 * we allocate one for our caller. 272 * 273 * NOTE: Must be called at splbio(). 274 */ 275 __inline int 276 scsipi_grow_resources(chan) 277 struct scsipi_channel *chan; 278 { 279 280 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 281 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 282 scsipi_adapter_request(chan, 283 ADAPTER_REQ_GROW_RESOURCES, NULL); 284 return (scsipi_get_resource(chan)); 285 } 286 /* 287 * ask the channel thread to do it. It'll have to thaw the 288 * queue 289 */ 290 scsipi_channel_freeze(chan, 1); 291 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 292 wakeup(&chan->chan_complete); 293 return (0); 294 } 295 296 return (0); 297 } 298 299 /* 300 * scsipi_put_resource: 301 * 302 * Free a single xfer `resource' to the channel. 303 * 304 * NOTE: Must be called at splbio(). 305 */ 306 void 307 scsipi_put_resource(chan) 308 struct scsipi_channel *chan; 309 { 310 struct scsipi_adapter *adapt = chan->chan_adapter; 311 312 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 313 chan->chan_openings++; 314 else 315 adapt->adapt_openings++; 316 } 317 318 /* 319 * scsipi_get_tag: 320 * 321 * Get a tag ID for the specified xfer. 322 * 323 * NOTE: Must be called at splbio(). 324 */ 325 void 326 scsipi_get_tag(xs) 327 struct scsipi_xfer *xs; 328 { 329 struct scsipi_periph *periph = xs->xs_periph; 330 int word, bit, tag; 331 332 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 333 bit = ffs(periph->periph_freetags[word]); 334 if (bit != 0) 335 break; 336 } 337 #ifdef DIAGNOSTIC 338 if (word == PERIPH_NTAGWORDS) { 339 scsipi_printaddr(periph); 340 printf("no free tags\n"); 341 panic("scsipi_get_tag"); 342 } 343 #endif 344 345 bit -= 1; 346 periph->periph_freetags[word] &= ~(1 << bit); 347 tag = (word << 5) | bit; 348 349 /* XXX Should eventually disallow this completely. */ 350 if (tag >= periph->periph_openings) { 351 scsipi_printaddr(periph); 352 printf("WARNING: tag %d greater than available openings %d\n", 353 tag, periph->periph_openings); 354 } 355 356 xs->xs_tag_id = tag; 357 } 358 359 /* 360 * scsipi_put_tag: 361 * 362 * Put the tag ID for the specified xfer back into the pool. 363 * 364 * NOTE: Must be called at splbio(). 365 */ 366 void 367 scsipi_put_tag(xs) 368 struct scsipi_xfer *xs; 369 { 370 struct scsipi_periph *periph = xs->xs_periph; 371 int word, bit; 372 373 word = xs->xs_tag_id >> 5; 374 bit = xs->xs_tag_id & 0x1f; 375 376 periph->periph_freetags[word] |= (1 << bit); 377 } 378 379 /* 380 * scsipi_get_xs: 381 * 382 * Allocate an xfer descriptor and associate it with the 383 * specified peripherial. If the peripherial has no more 384 * available command openings, we either block waiting for 385 * one to become available, or fail. 386 */ 387 struct scsipi_xfer * 388 scsipi_get_xs(periph, flags) 389 struct scsipi_periph *periph; 390 int flags; 391 { 392 struct scsipi_xfer *xs; 393 int s; 394 395 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 396 397 /* 398 * If we're cold, make sure we poll. 399 */ 400 if (cold) 401 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL; 402 403 #ifdef DIAGNOSTIC 404 /* 405 * URGENT commands can never be ASYNC. 406 */ 407 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 408 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 409 scsipi_printaddr(periph); 410 printf("URGENT and ASYNC\n"); 411 panic("scsipi_get_xs"); 412 } 413 #endif 414 415 s = splbio(); 416 /* 417 * Wait for a command opening to become available. Rules: 418 * 419 * - All xfers must wait for an available opening. 420 * Exception: URGENT xfers can proceed when 421 * active == openings, because we use the opening 422 * of the command we're recovering for. 423 * - if the periph has sense pending, only URGENT & REQSENSE 424 * xfers may proceed. 425 * 426 * - If the periph is recovering, only URGENT xfers may 427 * proceed. 428 * 429 * - If the periph is currently executing a recovery 430 * command, URGENT commands must block, because only 431 * one recovery command can execute at a time. 432 */ 433 for (;;) { 434 if (flags & XS_CTL_URGENT) { 435 if (periph->periph_active > periph->periph_openings) 436 goto wait_for_opening; 437 if (periph->periph_flags & PERIPH_SENSE) { 438 if ((flags & XS_CTL_REQSENSE) == 0) 439 goto wait_for_opening; 440 } else { 441 if ((periph->periph_flags & 442 PERIPH_RECOVERY_ACTIVE) != 0) 443 goto wait_for_opening; 444 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 445 } 446 break; 447 } 448 if (periph->periph_active >= periph->periph_openings || 449 (periph->periph_flags & PERIPH_RECOVERING) != 0) 450 goto wait_for_opening; 451 periph->periph_active++; 452 break; 453 454 wait_for_opening: 455 if (flags & XS_CTL_NOSLEEP) { 456 splx(s); 457 return (NULL); 458 } 459 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 460 periph->periph_flags |= PERIPH_WAITING; 461 (void) tsleep(periph, PRIBIO, "getxs", 0); 462 } 463 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 464 xs = pool_get(&scsipi_xfer_pool, 465 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 466 if (xs == NULL) { 467 if (flags & XS_CTL_URGENT) { 468 if ((flags & XS_CTL_REQSENSE) == 0) 469 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 470 } else 471 periph->periph_active--; 472 scsipi_printaddr(periph); 473 printf("unable to allocate %sscsipi_xfer\n", 474 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 475 } 476 splx(s); 477 478 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 479 480 if (xs != NULL) { 481 callout_init(&xs->xs_callout); 482 memset(xs, 0, sizeof(*xs)); 483 xs->xs_periph = periph; 484 xs->xs_control = flags; 485 xs->xs_status = 0; 486 s = splbio(); 487 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 488 splx(s); 489 } 490 return (xs); 491 } 492 493 /* 494 * scsipi_put_xs: 495 * 496 * Release an xfer descriptor, decreasing the outstanding command 497 * count for the peripherial. If there is a thread waiting for 498 * an opening, wake it up. If not, kick any queued I/O the 499 * peripherial may have. 500 * 501 * NOTE: Must be called at splbio(). 502 */ 503 void 504 scsipi_put_xs(xs) 505 struct scsipi_xfer *xs; 506 { 507 struct scsipi_periph *periph = xs->xs_periph; 508 int flags = xs->xs_control; 509 510 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 511 512 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 513 pool_put(&scsipi_xfer_pool, xs); 514 515 #ifdef DIAGNOSTIC 516 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 517 periph->periph_active == 0) { 518 scsipi_printaddr(periph); 519 printf("recovery without a command to recovery for\n"); 520 panic("scsipi_put_xs"); 521 } 522 #endif 523 524 if (flags & XS_CTL_URGENT) { 525 if ((flags & XS_CTL_REQSENSE) == 0) 526 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 527 } else 528 periph->periph_active--; 529 if (periph->periph_active == 0 && 530 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 531 periph->periph_flags &= ~PERIPH_WAITDRAIN; 532 wakeup(&periph->periph_active); 533 } 534 535 if (periph->periph_flags & PERIPH_WAITING) { 536 periph->periph_flags &= ~PERIPH_WAITING; 537 wakeup(periph); 538 } else { 539 if (periph->periph_switch->psw_start != NULL) { 540 SC_DEBUG(periph, SCSIPI_DB2, 541 ("calling private start()\n")); 542 (*periph->periph_switch->psw_start)(periph); 543 } 544 } 545 } 546 547 /* 548 * scsipi_channel_freeze: 549 * 550 * Freeze a channel's xfer queue. 551 */ 552 void 553 scsipi_channel_freeze(chan, count) 554 struct scsipi_channel *chan; 555 int count; 556 { 557 int s; 558 559 s = splbio(); 560 chan->chan_qfreeze += count; 561 splx(s); 562 } 563 564 /* 565 * scsipi_channel_thaw: 566 * 567 * Thaw a channel's xfer queue. 568 */ 569 void 570 scsipi_channel_thaw(chan, count) 571 struct scsipi_channel *chan; 572 int count; 573 { 574 int s; 575 576 s = splbio(); 577 chan->chan_qfreeze -= count; 578 /* 579 * Don't let the freeze count go negative. 580 * 581 * Presumably the adapter driver could keep track of this, 582 * but it might just be easier to do this here so as to allow 583 * multiple callers, including those outside the adapter driver. 584 */ 585 if (chan->chan_qfreeze < 0) { 586 chan->chan_qfreeze = 0; 587 } 588 splx(s); 589 /* 590 * Kick the channel's queue here. Note, we may be running in 591 * interrupt context (softclock or HBA's interrupt), so the adapter 592 * driver had better not sleep. 593 */ 594 if (chan->chan_qfreeze == 0) 595 scsipi_run_queue(chan); 596 } 597 598 /* 599 * scsipi_channel_timed_thaw: 600 * 601 * Thaw a channel after some time has expired. This will also 602 * run the channel's queue if the freeze count has reached 0. 603 */ 604 void 605 scsipi_channel_timed_thaw(arg) 606 void *arg; 607 { 608 struct scsipi_channel *chan = arg; 609 610 scsipi_channel_thaw(chan, 1); 611 } 612 613 /* 614 * scsipi_periph_freeze: 615 * 616 * Freeze a device's xfer queue. 617 */ 618 void 619 scsipi_periph_freeze(periph, count) 620 struct scsipi_periph *periph; 621 int count; 622 { 623 int s; 624 625 s = splbio(); 626 periph->periph_qfreeze += count; 627 splx(s); 628 } 629 630 /* 631 * scsipi_periph_thaw: 632 * 633 * Thaw a device's xfer queue. 634 */ 635 void 636 scsipi_periph_thaw(periph, count) 637 struct scsipi_periph *periph; 638 int count; 639 { 640 int s; 641 642 s = splbio(); 643 periph->periph_qfreeze -= count; 644 #ifdef DIAGNOSTIC 645 if (periph->periph_qfreeze < 0) { 646 static const char pc[] = "periph freeze count < 0"; 647 scsipi_printaddr(periph); 648 printf("%s\n", pc); 649 panic(pc); 650 } 651 #endif 652 if (periph->periph_qfreeze == 0 && 653 (periph->periph_flags & PERIPH_WAITING) != 0) 654 wakeup(periph); 655 splx(s); 656 } 657 658 /* 659 * scsipi_periph_timed_thaw: 660 * 661 * Thaw a device after some time has expired. 662 */ 663 void 664 scsipi_periph_timed_thaw(arg) 665 void *arg; 666 { 667 int s; 668 struct scsipi_periph *periph = arg; 669 670 callout_stop(&periph->periph_callout); 671 672 s = splbio(); 673 scsipi_periph_thaw(periph, 1); 674 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 675 /* 676 * Kick the channel's queue here. Note, we're running in 677 * interrupt context (softclock), so the adapter driver 678 * had better not sleep. 679 */ 680 scsipi_run_queue(periph->periph_channel); 681 } else { 682 /* 683 * Tell the completion thread to kick the channel's queue here. 684 */ 685 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 686 wakeup(&periph->periph_channel->chan_complete); 687 } 688 splx(s); 689 } 690 691 /* 692 * scsipi_wait_drain: 693 * 694 * Wait for a periph's pending xfers to drain. 695 */ 696 void 697 scsipi_wait_drain(periph) 698 struct scsipi_periph *periph; 699 { 700 int s; 701 702 s = splbio(); 703 while (periph->periph_active != 0) { 704 periph->periph_flags |= PERIPH_WAITDRAIN; 705 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0); 706 } 707 splx(s); 708 } 709 710 /* 711 * scsipi_kill_pending: 712 * 713 * Kill off all pending xfers for a periph. 714 * 715 * NOTE: Must be called at splbio(). 716 */ 717 void 718 scsipi_kill_pending(periph) 719 struct scsipi_periph *periph; 720 { 721 722 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph); 723 #ifdef DIAGNOSTIC 724 if (TAILQ_FIRST(&periph->periph_xferq) != NULL) 725 panic("scsipi_kill_pending"); 726 #endif 727 scsipi_wait_drain(periph); 728 } 729 730 /* 731 * scsipi_interpret_sense: 732 * 733 * Look at the returned sense and act on the error, determining 734 * the unix error number to pass back. (0 = report no error) 735 * 736 * NOTE: If we return ERESTART, we are expected to haved 737 * thawed the device! 738 * 739 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 740 */ 741 int 742 scsipi_interpret_sense(xs) 743 struct scsipi_xfer *xs; 744 { 745 struct scsipi_sense_data *sense; 746 struct scsipi_periph *periph = xs->xs_periph; 747 u_int8_t key; 748 u_int32_t info; 749 int error; 750 #ifndef SCSIVERBOSE 751 static char *error_mes[] = { 752 "soft error (corrected)", 753 "not ready", "medium error", 754 "non-media hardware failure", "illegal request", 755 "unit attention", "readonly device", 756 "no data found", "vendor unique", 757 "copy aborted", "command aborted", 758 "search returned equal", "volume overflow", 759 "verify miscompare", "unknown error key" 760 }; 761 #endif 762 763 sense = &xs->sense.scsi_sense; 764 #ifdef SCSIPI_DEBUG 765 if (periph->periph_flags & SCSIPI_DB1) { 766 int count; 767 scsipi_printaddr(periph); 768 printf(" sense debug information:\n"); 769 printf("\tcode 0x%x valid 0x%x\n", 770 sense->error_code & SSD_ERRCODE, 771 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0); 772 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 773 sense->segment, 774 sense->flags & SSD_KEY, 775 sense->flags & SSD_ILI ? 1 : 0, 776 sense->flags & SSD_EOM ? 1 : 0, 777 sense->flags & SSD_FILEMARK ? 1 : 0); 778 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 779 "extra bytes\n", 780 sense->info[0], 781 sense->info[1], 782 sense->info[2], 783 sense->info[3], 784 sense->extra_len); 785 printf("\textra: "); 786 for (count = 0; count < ADD_BYTES_LIM(sense); count++) 787 printf("0x%x ", sense->cmd_spec_info[count]); 788 printf("\n"); 789 } 790 #endif 791 792 /* 793 * If the periph has it's own error handler, call it first. 794 * If it returns a legit error value, return that, otherwise 795 * it wants us to continue with normal error processing. 796 */ 797 if (periph->periph_switch->psw_error != NULL) { 798 SC_DEBUG(periph, SCSIPI_DB2, 799 ("calling private err_handler()\n")); 800 error = (*periph->periph_switch->psw_error)(xs); 801 if (error != EJUSTRETURN) 802 return (error); 803 } 804 /* otherwise use the default */ 805 switch (sense->error_code & SSD_ERRCODE) { 806 807 /* 808 * Old SCSI-1 and SASI devices respond with 809 * codes other than 70. 810 */ 811 case 0x00: /* no error (command completed OK) */ 812 return (0); 813 case 0x04: /* drive not ready after it was selected */ 814 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 815 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 816 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 817 return (0); 818 /* XXX - display some sort of error here? */ 819 return (EIO); 820 case 0x20: /* invalid command */ 821 if ((xs->xs_control & 822 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 823 return (0); 824 return (EINVAL); 825 case 0x25: /* invalid LUN (Adaptec ACB-4000) */ 826 return (EACCES); 827 828 /* 829 * If it's code 70, use the extended stuff and 830 * interpret the key 831 */ 832 case 0x71: /* delayed error */ 833 scsipi_printaddr(periph); 834 key = sense->flags & SSD_KEY; 835 printf(" DEFERRED ERROR, key = 0x%x\n", key); 836 /* FALLTHROUGH */ 837 case 0x70: 838 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) 839 info = _4btol(sense->info); 840 else 841 info = 0; 842 key = sense->flags & SSD_KEY; 843 844 switch (key) { 845 case SKEY_NO_SENSE: 846 case SKEY_RECOVERED_ERROR: 847 if (xs->resid == xs->datalen && xs->datalen) { 848 /* 849 * Why is this here? 850 */ 851 xs->resid = 0; /* not short read */ 852 } 853 case SKEY_EQUAL: 854 error = 0; 855 break; 856 case SKEY_NOT_READY: 857 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 858 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 859 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 860 return (0); 861 if (sense->add_sense_code == 0x3A) { 862 error = ENODEV; /* Medium not present */ 863 if (xs->xs_control & XS_CTL_SILENT_NODEV) 864 return (error); 865 } else 866 error = EIO; 867 if ((xs->xs_control & XS_CTL_SILENT) != 0) 868 return (error); 869 break; 870 case SKEY_ILLEGAL_REQUEST: 871 if ((xs->xs_control & 872 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 873 return (0); 874 /* 875 * Handle the case where a device reports 876 * Logical Unit Not Supported during discovery. 877 */ 878 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 879 sense->add_sense_code == 0x25 && 880 sense->add_sense_code_qual == 0x00) 881 return (EINVAL); 882 if ((xs->xs_control & XS_CTL_SILENT) != 0) 883 return (EIO); 884 error = EINVAL; 885 break; 886 case SKEY_UNIT_ATTENTION: 887 if (sense->add_sense_code == 0x29 && 888 sense->add_sense_code_qual == 0x00) { 889 /* device or bus reset */ 890 return (ERESTART); 891 } 892 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 893 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 894 if ((xs->xs_control & 895 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 896 /* XXX Should reupload any transient state. */ 897 (periph->periph_flags & 898 PERIPH_REMOVABLE) == 0) { 899 return (ERESTART); 900 } 901 if ((xs->xs_control & XS_CTL_SILENT) != 0) 902 return (EIO); 903 error = EIO; 904 break; 905 case SKEY_WRITE_PROTECT: 906 error = EROFS; 907 break; 908 case SKEY_BLANK_CHECK: 909 error = 0; 910 break; 911 case SKEY_ABORTED_COMMAND: 912 error = ERESTART; 913 break; 914 case SKEY_VOLUME_OVERFLOW: 915 error = ENOSPC; 916 break; 917 default: 918 error = EIO; 919 break; 920 } 921 922 #ifdef SCSIVERBOSE 923 if (key && (xs->xs_control & XS_CTL_SILENT) == 0) 924 scsipi_print_sense(xs, 0); 925 #else 926 if (key) { 927 scsipi_printaddr(periph); 928 printf("%s", error_mes[key - 1]); 929 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 930 switch (key) { 931 case SKEY_NOT_READY: 932 case SKEY_ILLEGAL_REQUEST: 933 case SKEY_UNIT_ATTENTION: 934 case SKEY_WRITE_PROTECT: 935 break; 936 case SKEY_BLANK_CHECK: 937 printf(", requested size: %d (decimal)", 938 info); 939 break; 940 case SKEY_ABORTED_COMMAND: 941 if (xs->xs_retries) 942 printf(", retrying"); 943 printf(", cmd 0x%x, info 0x%x", 944 xs->cmd->opcode, info); 945 break; 946 default: 947 printf(", info = %d (decimal)", info); 948 } 949 } 950 if (sense->extra_len != 0) { 951 int n; 952 printf(", data ="); 953 for (n = 0; n < sense->extra_len; n++) 954 printf(" %02x", 955 sense->cmd_spec_info[n]); 956 } 957 printf("\n"); 958 } 959 #endif 960 return (error); 961 962 /* 963 * Some other code, just report it 964 */ 965 default: 966 #if defined(SCSIDEBUG) || defined(DEBUG) 967 { 968 static char *uc = "undecodable sense error"; 969 int i; 970 u_int8_t *cptr = (u_int8_t *) sense; 971 scsipi_printaddr(periph); 972 if (xs->cmd == &xs->cmdstore) { 973 printf("%s for opcode 0x%x, data=", 974 uc, xs->cmdstore.opcode); 975 } else { 976 printf("%s, data=", uc); 977 } 978 for (i = 0; i < sizeof (sense); i++) 979 printf(" 0x%02x", *(cptr++) & 0xff); 980 printf("\n"); 981 } 982 #else 983 scsipi_printaddr(periph); 984 printf("Sense Error Code 0x%x", 985 sense->error_code & SSD_ERRCODE); 986 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 987 struct scsipi_sense_data_unextended *usense = 988 (struct scsipi_sense_data_unextended *)sense; 989 printf(" at block no. %d (decimal)", 990 _3btol(usense->block)); 991 } 992 printf("\n"); 993 #endif 994 return (EIO); 995 } 996 } 997 998 /* 999 * scsipi_size: 1000 * 1001 * Find out from the device what its capacity is. 1002 */ 1003 u_long 1004 scsipi_size(periph, flags) 1005 struct scsipi_periph *periph; 1006 int flags; 1007 { 1008 struct scsipi_read_cap_data rdcap; 1009 struct scsipi_read_capacity scsipi_cmd; 1010 1011 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1012 scsipi_cmd.opcode = READ_CAPACITY; 1013 1014 /* 1015 * If the command works, interpret the result as a 4 byte 1016 * number of blocks 1017 */ 1018 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1019 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap), 1020 SCSIPIRETRIES, 20000, NULL, 1021 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0) 1022 return (0); 1023 1024 return (_4btol(rdcap.addr) + 1); 1025 } 1026 1027 /* 1028 * scsipi_test_unit_ready: 1029 * 1030 * Issue a `test unit ready' request. 1031 */ 1032 int 1033 scsipi_test_unit_ready(periph, flags) 1034 struct scsipi_periph *periph; 1035 int flags; 1036 { 1037 struct scsipi_test_unit_ready scsipi_cmd; 1038 1039 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */ 1040 if (periph->periph_quirks & PQUIRK_NOTUR) 1041 return (0); 1042 1043 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1044 scsipi_cmd.opcode = TEST_UNIT_READY; 1045 1046 return (scsipi_command(periph, 1047 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd), 1048 0, 0, SCSIPIRETRIES, 10000, NULL, flags)); 1049 } 1050 1051 /* 1052 * scsipi_inquire: 1053 * 1054 * Ask the device about itself. 1055 */ 1056 int 1057 scsipi_inquire(periph, inqbuf, flags) 1058 struct scsipi_periph *periph; 1059 struct scsipi_inquiry_data *inqbuf; 1060 int flags; 1061 { 1062 struct scsipi_inquiry scsipi_cmd; 1063 int error; 1064 1065 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1066 scsipi_cmd.opcode = INQUIRY; 1067 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data); 1068 1069 error = scsipi_command(periph, 1070 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1071 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data), 1072 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags); 1073 1074 #ifdef SCSI_OLD_NOINQUIRY 1075 /* 1076 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator. 1077 * This board doesn't support the INQUIRY command at all. 1078 */ 1079 if (error == EINVAL || error == EACCES) { 1080 /* 1081 * Conjure up an INQUIRY response. 1082 */ 1083 inqbuf->device = (error == EINVAL ? 1084 SID_QUAL_LU_PRESENT : 1085 SID_QUAL_LU_NOTPRESENT) | T_DIRECT; 1086 inqbuf->dev_qual2 = 0; 1087 inqbuf->version = 0; 1088 inqbuf->response_format = SID_FORMAT_SCSI1; 1089 inqbuf->additional_length = 3 + 28; 1090 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1091 memcpy(inqbuf->vendor, "ADAPTEC ", sizeof(inqbuf->vendor)); 1092 memcpy(inqbuf->product, "ACB-4000 ", 1093 sizeof(inqbuf->product)); 1094 memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision)); 1095 error = 0; 1096 } 1097 1098 /* 1099 * Kludge for the Emulex MT-02 SCSI->QIC translator. 1100 * This board gives an empty response to an INQUIRY command. 1101 */ 1102 else if (error == 0 && 1103 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) && 1104 inqbuf->dev_qual2 == 0 && 1105 inqbuf->version == 0 && 1106 inqbuf->response_format == SID_FORMAT_SCSI1) { 1107 /* 1108 * Fill out the INQUIRY response. 1109 */ 1110 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL); 1111 inqbuf->dev_qual2 = SID_REMOVABLE; 1112 inqbuf->additional_length = 3 + 28; 1113 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1114 memcpy(inqbuf->vendor, "EMULEX ", sizeof(inqbuf->vendor)); 1115 memcpy(inqbuf->product, "MT-02 QIC ", 1116 sizeof(inqbuf->product)); 1117 memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision)); 1118 } 1119 #endif /* SCSI_OLD_NOINQUIRY */ 1120 1121 return error; 1122 } 1123 1124 /* 1125 * scsipi_prevent: 1126 * 1127 * Prevent or allow the user to remove the media 1128 */ 1129 int 1130 scsipi_prevent(periph, type, flags) 1131 struct scsipi_periph *periph; 1132 int type, flags; 1133 { 1134 struct scsipi_prevent scsipi_cmd; 1135 1136 if (periph->periph_quirks & PQUIRK_NODOORLOCK) 1137 return (0); 1138 1139 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1140 scsipi_cmd.opcode = PREVENT_ALLOW; 1141 scsipi_cmd.how = type; 1142 1143 return (scsipi_command(periph, 1144 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1145 0, 0, SCSIPIRETRIES, 5000, NULL, flags)); 1146 } 1147 1148 /* 1149 * scsipi_start: 1150 * 1151 * Send a START UNIT. 1152 */ 1153 int 1154 scsipi_start(periph, type, flags) 1155 struct scsipi_periph *periph; 1156 int type, flags; 1157 { 1158 struct scsipi_start_stop scsipi_cmd; 1159 1160 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT) 1161 return 0; 1162 1163 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1164 scsipi_cmd.opcode = START_STOP; 1165 scsipi_cmd.byte2 = 0x00; 1166 scsipi_cmd.how = type; 1167 1168 return (scsipi_command(periph, 1169 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1170 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, 1171 NULL, flags)); 1172 } 1173 1174 /* 1175 * scsipi_mode_sense, scsipi_mode_sense_big: 1176 * get a sense page from a device 1177 */ 1178 1179 int 1180 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout) 1181 struct scsipi_periph *periph; 1182 int byte2, page, len, flags, retries, timeout; 1183 struct scsipi_mode_header *data; 1184 { 1185 struct scsipi_mode_sense scsipi_cmd; 1186 int error; 1187 1188 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1189 scsipi_cmd.opcode = MODE_SENSE; 1190 scsipi_cmd.byte2 = byte2; 1191 scsipi_cmd.page = page; 1192 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI) 1193 _lto2b(len, scsipi_cmd.u_len.atapi.length); 1194 else 1195 scsipi_cmd.u_len.scsi.length = len & 0xff; 1196 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1197 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1198 flags | XS_CTL_DATA_IN); 1199 SC_DEBUG(periph, SCSIPI_DB2, 1200 ("scsipi_mode_sense: error=%d\n", error)); 1201 return (error); 1202 } 1203 1204 int 1205 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout) 1206 struct scsipi_periph *periph; 1207 int byte2, page, len, flags, retries, timeout; 1208 struct scsipi_mode_header_big *data; 1209 { 1210 struct scsipi_mode_sense_big scsipi_cmd; 1211 int error; 1212 1213 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1214 scsipi_cmd.opcode = MODE_SENSE_BIG; 1215 scsipi_cmd.byte2 = byte2; 1216 scsipi_cmd.page = page; 1217 _lto2b(len, scsipi_cmd.length); 1218 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1219 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1220 flags | XS_CTL_DATA_IN); 1221 SC_DEBUG(periph, SCSIPI_DB2, 1222 ("scsipi_mode_sense_big: error=%d\n", error)); 1223 return (error); 1224 } 1225 1226 int 1227 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout) 1228 struct scsipi_periph *periph; 1229 int byte2, len, flags, retries, timeout; 1230 struct scsipi_mode_header *data; 1231 { 1232 struct scsipi_mode_select scsipi_cmd; 1233 int error; 1234 1235 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1236 scsipi_cmd.opcode = MODE_SELECT; 1237 scsipi_cmd.byte2 = byte2; 1238 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI) 1239 _lto2b(len, scsipi_cmd.u_len.atapi.length); 1240 else 1241 scsipi_cmd.u_len.scsi.length = len & 0xff; 1242 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1243 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1244 flags | XS_CTL_DATA_OUT); 1245 SC_DEBUG(periph, SCSIPI_DB2, 1246 ("scsipi_mode_select: error=%d\n", error)); 1247 return (error); 1248 } 1249 1250 int 1251 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout) 1252 struct scsipi_periph *periph; 1253 int byte2, len, flags, retries, timeout; 1254 struct scsipi_mode_header_big *data; 1255 { 1256 struct scsipi_mode_select_big scsipi_cmd; 1257 int error; 1258 1259 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1260 scsipi_cmd.opcode = MODE_SELECT_BIG; 1261 scsipi_cmd.byte2 = byte2; 1262 _lto2b(len, scsipi_cmd.length); 1263 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1264 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1265 flags | XS_CTL_DATA_OUT); 1266 SC_DEBUG(periph, SCSIPI_DB2, 1267 ("scsipi_mode_select: error=%d\n", error)); 1268 return (error); 1269 } 1270 1271 /* 1272 * scsipi_done: 1273 * 1274 * This routine is called by an adapter's interrupt handler when 1275 * an xfer is completed. 1276 */ 1277 void 1278 scsipi_done(xs) 1279 struct scsipi_xfer *xs; 1280 { 1281 struct scsipi_periph *periph = xs->xs_periph; 1282 struct scsipi_channel *chan = periph->periph_channel; 1283 int s, freezecnt; 1284 1285 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1286 #ifdef SCSIPI_DEBUG 1287 if (periph->periph_dbflags & SCSIPI_DB1) 1288 show_scsipi_cmd(xs); 1289 #endif 1290 1291 s = splbio(); 1292 /* 1293 * The resource this command was using is now free. 1294 */ 1295 scsipi_put_resource(chan); 1296 xs->xs_periph->periph_sent--; 1297 1298 /* 1299 * If the command was tagged, free the tag. 1300 */ 1301 if (XS_CTL_TAGTYPE(xs) != 0) 1302 scsipi_put_tag(xs); 1303 else 1304 periph->periph_flags &= ~PERIPH_UNTAG; 1305 1306 /* Mark the command as `done'. */ 1307 xs->xs_status |= XS_STS_DONE; 1308 1309 #ifdef DIAGNOSTIC 1310 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1311 (XS_CTL_ASYNC|XS_CTL_POLL)) 1312 panic("scsipi_done: ASYNC and POLL"); 1313 #endif 1314 1315 /* 1316 * If the xfer had an error of any sort, freeze the 1317 * periph's queue. Freeze it again if we were requested 1318 * to do so in the xfer. 1319 */ 1320 freezecnt = 0; 1321 if (xs->error != XS_NOERROR) 1322 freezecnt++; 1323 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1324 freezecnt++; 1325 if (freezecnt != 0) 1326 scsipi_periph_freeze(periph, freezecnt); 1327 1328 /* 1329 * record the xfer with a pending sense, in case a SCSI reset is 1330 * received before the thread is waked up. 1331 */ 1332 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1333 periph->periph_flags |= PERIPH_SENSE; 1334 periph->periph_xscheck = xs; 1335 } 1336 1337 /* 1338 * If this was an xfer that was not to complete asynchronously, 1339 * let the requesting thread perform error checking/handling 1340 * in its context. 1341 */ 1342 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1343 splx(s); 1344 /* 1345 * If it's a polling job, just return, to unwind the 1346 * call graph. We don't need to restart the queue, 1347 * because pollings jobs are treated specially, and 1348 * are really only used during crash dumps anyway 1349 * (XXX or during boot-time autconfiguration of 1350 * ATAPI devices). 1351 */ 1352 if (xs->xs_control & XS_CTL_POLL) 1353 return; 1354 wakeup(xs); 1355 goto out; 1356 } 1357 1358 /* 1359 * Catch the extremely common case of I/O completing 1360 * without error; no use in taking a context switch 1361 * if we can handle it in interrupt context. 1362 */ 1363 if (xs->error == XS_NOERROR) { 1364 splx(s); 1365 (void) scsipi_complete(xs); 1366 goto out; 1367 } 1368 1369 /* 1370 * There is an error on this xfer. Put it on the channel's 1371 * completion queue, and wake up the completion thread. 1372 */ 1373 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1374 splx(s); 1375 wakeup(&chan->chan_complete); 1376 1377 out: 1378 /* 1379 * If there are more xfers on the channel's queue, attempt to 1380 * run them. 1381 */ 1382 scsipi_run_queue(chan); 1383 } 1384 1385 /* 1386 * scsipi_complete: 1387 * 1388 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1389 * 1390 * NOTE: This routine MUST be called with valid thread context 1391 * except for the case where the following two conditions are 1392 * true: 1393 * 1394 * xs->error == XS_NOERROR 1395 * XS_CTL_ASYNC is set in xs->xs_control 1396 * 1397 * The semantics of this routine can be tricky, so here is an 1398 * explanation: 1399 * 1400 * 0 Xfer completed successfully. 1401 * 1402 * ERESTART Xfer had an error, but was restarted. 1403 * 1404 * anything else Xfer had an error, return value is Unix 1405 * errno. 1406 * 1407 * If the return value is anything but ERESTART: 1408 * 1409 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1410 * the pool. 1411 * - If there is a buf associated with the xfer, 1412 * it has been biodone()'d. 1413 */ 1414 int 1415 scsipi_complete(xs) 1416 struct scsipi_xfer *xs; 1417 { 1418 struct scsipi_periph *periph = xs->xs_periph; 1419 struct scsipi_channel *chan = periph->periph_channel; 1420 struct buf *bp; 1421 int error, s; 1422 1423 #ifdef DIAGNOSTIC 1424 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1425 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1426 #endif 1427 /* 1428 * If command terminated with a CHECK CONDITION, we need to issue a 1429 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1430 * we'll have the real status. 1431 * Must be processed at splbio() to avoid missing a SCSI bus reset 1432 * for this command. 1433 */ 1434 s = splbio(); 1435 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1436 /* request sense for a request sense ? */ 1437 if (xs->xs_control & XS_CTL_REQSENSE) { 1438 scsipi_printaddr(periph); 1439 printf("request sense for a request sense ?\n"); 1440 /* XXX maybe we should reset the device ? */ 1441 /* we've been frozen because xs->error != XS_NOERROR */ 1442 scsipi_periph_thaw(periph, 1); 1443 splx(s); 1444 if (xs->resid < xs->datalen) { 1445 printf("we read %d bytes of sense anyway:\n", 1446 xs->datalen - xs->resid); 1447 #ifdef SCSIVERBOSE 1448 scsipi_print_sense_data((void *)xs->data, 0); 1449 #endif 1450 } 1451 return EINVAL; 1452 } 1453 scsipi_request_sense(xs); 1454 } 1455 splx(s); 1456 1457 /* 1458 * If it's a user level request, bypass all usual completion 1459 * processing, let the user work it out.. 1460 */ 1461 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1462 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1463 if (xs->error != XS_NOERROR) 1464 scsipi_periph_thaw(periph, 1); 1465 scsipi_user_done(xs); 1466 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1467 return 0; 1468 } 1469 1470 switch (xs->error) { 1471 case XS_NOERROR: 1472 error = 0; 1473 break; 1474 1475 case XS_SENSE: 1476 case XS_SHORTSENSE: 1477 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1478 break; 1479 1480 case XS_RESOURCE_SHORTAGE: 1481 /* 1482 * XXX Should freeze channel's queue. 1483 */ 1484 scsipi_printaddr(periph); 1485 printf("adapter resource shortage\n"); 1486 /* FALLTHROUGH */ 1487 1488 case XS_BUSY: 1489 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1490 struct scsipi_max_openings mo; 1491 1492 /* 1493 * We set the openings to active - 1, assuming that 1494 * the command that got us here is the first one that 1495 * can't fit into the device's queue. If that's not 1496 * the case, I guess we'll find out soon enough. 1497 */ 1498 mo.mo_target = periph->periph_target; 1499 mo.mo_lun = periph->periph_lun; 1500 if (periph->periph_active < periph->periph_openings) 1501 mo.mo_openings = periph->periph_active - 1; 1502 else 1503 mo.mo_openings = periph->periph_openings - 1; 1504 #ifdef DIAGNOSTIC 1505 if (mo.mo_openings < 0) { 1506 scsipi_printaddr(periph); 1507 printf("QUEUE FULL resulted in < 0 openings\n"); 1508 panic("scsipi_done"); 1509 } 1510 #endif 1511 if (mo.mo_openings == 0) { 1512 scsipi_printaddr(periph); 1513 printf("QUEUE FULL resulted in 0 openings\n"); 1514 mo.mo_openings = 1; 1515 } 1516 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1517 error = ERESTART; 1518 } else if (xs->xs_retries != 0) { 1519 xs->xs_retries--; 1520 /* 1521 * Wait one second, and try again. 1522 */ 1523 if ((xs->xs_control & XS_CTL_POLL) || 1524 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1525 delay(1000000); 1526 } else if (!callout_active(&periph->periph_callout)) { 1527 scsipi_periph_freeze(periph, 1); 1528 callout_reset(&periph->periph_callout, 1529 hz, scsipi_periph_timed_thaw, periph); 1530 } 1531 error = ERESTART; 1532 } else 1533 error = EBUSY; 1534 break; 1535 1536 case XS_REQUEUE: 1537 error = ERESTART; 1538 break; 1539 1540 case XS_SELTIMEOUT: 1541 case XS_TIMEOUT: 1542 /* 1543 * If the device hasn't gone away, honor retry counts. 1544 * 1545 * Note that if we're in the middle of probing it, 1546 * it won't be found because it isn't here yet so 1547 * we won't honor the retry count in that case. 1548 */ 1549 if (scsipi_lookup_periph(chan, periph->periph_target, 1550 periph->periph_lun) && xs->xs_retries != 0) { 1551 xs->xs_retries--; 1552 error = ERESTART; 1553 } else 1554 error = EIO; 1555 break; 1556 1557 case XS_RESET: 1558 if (xs->xs_control & XS_CTL_REQSENSE) { 1559 /* 1560 * request sense interrupted by reset: signal it 1561 * with EINTR return code. 1562 */ 1563 error = EINTR; 1564 } else { 1565 if (xs->xs_retries != 0) { 1566 xs->xs_retries--; 1567 error = ERESTART; 1568 } else 1569 error = EIO; 1570 } 1571 break; 1572 1573 case XS_DRIVER_STUFFUP: 1574 scsipi_printaddr(periph); 1575 printf("generic HBA error\n"); 1576 error = EIO; 1577 break; 1578 default: 1579 scsipi_printaddr(periph); 1580 printf("invalid return code from adapter: %d\n", xs->error); 1581 error = EIO; 1582 break; 1583 } 1584 1585 s = splbio(); 1586 if (error == ERESTART) { 1587 /* 1588 * If we get here, the periph has been thawed and frozen 1589 * again if we had to issue recovery commands. Alternatively, 1590 * it may have been frozen again and in a timed thaw. In 1591 * any case, we thaw the periph once we re-enqueue the 1592 * command. Once the periph is fully thawed, it will begin 1593 * operation again. 1594 */ 1595 xs->error = XS_NOERROR; 1596 xs->status = SCSI_OK; 1597 xs->xs_status &= ~XS_STS_DONE; 1598 xs->xs_requeuecnt++; 1599 error = scsipi_enqueue(xs); 1600 if (error == 0) { 1601 scsipi_periph_thaw(periph, 1); 1602 splx(s); 1603 return (ERESTART); 1604 } 1605 } 1606 1607 /* 1608 * scsipi_done() freezes the queue if not XS_NOERROR. 1609 * Thaw it here. 1610 */ 1611 if (xs->error != XS_NOERROR) 1612 scsipi_periph_thaw(periph, 1); 1613 1614 /* 1615 * Set buffer fields in case the periph 1616 * switch done func uses them 1617 */ 1618 if ((bp = xs->bp) != NULL) { 1619 if (error) { 1620 bp->b_error = error; 1621 bp->b_flags |= B_ERROR; 1622 bp->b_resid = bp->b_bcount; 1623 } else { 1624 bp->b_error = 0; 1625 bp->b_resid = xs->resid; 1626 } 1627 } 1628 1629 if (periph->periph_switch->psw_done) 1630 periph->periph_switch->psw_done(xs); 1631 1632 if (bp) 1633 biodone(bp); 1634 1635 if (xs->xs_control & XS_CTL_ASYNC) 1636 scsipi_put_xs(xs); 1637 splx(s); 1638 1639 return (error); 1640 } 1641 1642 /* 1643 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1644 * returns with a CHECK_CONDITION status. Must be called in valid thread 1645 * context and at splbio(). 1646 */ 1647 1648 void 1649 scsipi_request_sense(xs) 1650 struct scsipi_xfer *xs; 1651 { 1652 struct scsipi_periph *periph = xs->xs_periph; 1653 int flags, error; 1654 struct scsipi_sense cmd; 1655 1656 periph->periph_flags |= PERIPH_SENSE; 1657 1658 /* if command was polling, request sense will too */ 1659 flags = xs->xs_control & XS_CTL_POLL; 1660 /* Polling commands can't sleep */ 1661 if (flags) 1662 flags |= XS_CTL_NOSLEEP; 1663 1664 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1665 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1666 1667 memset(&cmd, 0, sizeof(cmd)); 1668 cmd.opcode = REQUEST_SENSE; 1669 cmd.length = sizeof(struct scsipi_sense_data); 1670 1671 error = scsipi_command(periph, 1672 (struct scsipi_generic *) &cmd, sizeof(cmd), 1673 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data), 1674 0, 1000, NULL, flags); 1675 periph->periph_flags &= ~PERIPH_SENSE; 1676 periph->periph_xscheck = NULL; 1677 switch(error) { 1678 case 0: 1679 /* we have a valid sense */ 1680 xs->error = XS_SENSE; 1681 return; 1682 case EINTR: 1683 /* REQUEST_SENSE interrupted by bus reset. */ 1684 xs->error = XS_RESET; 1685 return; 1686 case EIO: 1687 /* request sense coudn't be performed */ 1688 /* 1689 * XXX this isn't quite right but we don't have anything 1690 * better for now 1691 */ 1692 xs->error = XS_DRIVER_STUFFUP; 1693 return; 1694 default: 1695 /* Notify that request sense failed. */ 1696 xs->error = XS_DRIVER_STUFFUP; 1697 scsipi_printaddr(periph); 1698 printf("request sense failed with error %d\n", error); 1699 return; 1700 } 1701 } 1702 1703 /* 1704 * scsipi_enqueue: 1705 * 1706 * Enqueue an xfer on a channel. 1707 */ 1708 int 1709 scsipi_enqueue(xs) 1710 struct scsipi_xfer *xs; 1711 { 1712 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1713 struct scsipi_xfer *qxs; 1714 int s; 1715 1716 s = splbio(); 1717 1718 /* 1719 * If the xfer is to be polled, and there are already jobs on 1720 * the queue, we can't proceed. 1721 */ 1722 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1723 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1724 splx(s); 1725 xs->error = XS_DRIVER_STUFFUP; 1726 return (EAGAIN); 1727 } 1728 1729 /* 1730 * If we have an URGENT xfer, it's an error recovery command 1731 * and it should just go on the head of the channel's queue. 1732 */ 1733 if (xs->xs_control & XS_CTL_URGENT) { 1734 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1735 goto out; 1736 } 1737 1738 /* 1739 * If this xfer has already been on the queue before, we 1740 * need to reinsert it in the correct order. That order is: 1741 * 1742 * Immediately before the first xfer for this periph 1743 * with a requeuecnt less than xs->xs_requeuecnt. 1744 * 1745 * Failing that, at the end of the queue. (We'll end up 1746 * there naturally.) 1747 */ 1748 if (xs->xs_requeuecnt != 0) { 1749 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1750 qxs = TAILQ_NEXT(qxs, channel_q)) { 1751 if (qxs->xs_periph == xs->xs_periph && 1752 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1753 break; 1754 } 1755 if (qxs != NULL) { 1756 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1757 channel_q); 1758 goto out; 1759 } 1760 } 1761 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1762 out: 1763 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1764 scsipi_periph_thaw(xs->xs_periph, 1); 1765 splx(s); 1766 return (0); 1767 } 1768 1769 /* 1770 * scsipi_run_queue: 1771 * 1772 * Start as many xfers as possible running on the channel. 1773 */ 1774 void 1775 scsipi_run_queue(chan) 1776 struct scsipi_channel *chan; 1777 { 1778 struct scsipi_xfer *xs; 1779 struct scsipi_periph *periph; 1780 int s; 1781 1782 for (;;) { 1783 s = splbio(); 1784 1785 /* 1786 * If the channel is frozen, we can't do any work right 1787 * now. 1788 */ 1789 if (chan->chan_qfreeze != 0) { 1790 splx(s); 1791 return; 1792 } 1793 1794 /* 1795 * Look for work to do, and make sure we can do it. 1796 */ 1797 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1798 xs = TAILQ_NEXT(xs, channel_q)) { 1799 periph = xs->xs_periph; 1800 1801 if ((periph->periph_sent >= periph->periph_openings) || 1802 periph->periph_qfreeze != 0 || 1803 (periph->periph_flags & PERIPH_UNTAG) != 0) 1804 continue; 1805 1806 if ((periph->periph_flags & 1807 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1808 (xs->xs_control & XS_CTL_URGENT) == 0) 1809 continue; 1810 1811 /* 1812 * We can issue this xfer! 1813 */ 1814 goto got_one; 1815 } 1816 1817 /* 1818 * Can't find any work to do right now. 1819 */ 1820 splx(s); 1821 return; 1822 1823 got_one: 1824 /* 1825 * Have an xfer to run. Allocate a resource from 1826 * the adapter to run it. If we can't allocate that 1827 * resource, we don't dequeue the xfer. 1828 */ 1829 if (scsipi_get_resource(chan) == 0) { 1830 /* 1831 * Adapter is out of resources. If the adapter 1832 * supports it, attempt to grow them. 1833 */ 1834 if (scsipi_grow_resources(chan) == 0) { 1835 /* 1836 * Wasn't able to grow resources, 1837 * nothing more we can do. 1838 */ 1839 if (xs->xs_control & XS_CTL_POLL) { 1840 scsipi_printaddr(xs->xs_periph); 1841 printf("polling command but no " 1842 "adapter resources"); 1843 /* We'll panic shortly... */ 1844 } 1845 splx(s); 1846 1847 /* 1848 * XXX: We should be able to note that 1849 * XXX: that resources are needed here! 1850 */ 1851 return; 1852 } 1853 /* 1854 * scsipi_grow_resources() allocated the resource 1855 * for us. 1856 */ 1857 } 1858 1859 /* 1860 * We have a resource to run this xfer, do it! 1861 */ 1862 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1863 1864 /* 1865 * If the command is to be tagged, allocate a tag ID 1866 * for it. 1867 */ 1868 if (XS_CTL_TAGTYPE(xs) != 0) 1869 scsipi_get_tag(xs); 1870 else 1871 periph->periph_flags |= PERIPH_UNTAG; 1872 periph->periph_sent++; 1873 splx(s); 1874 1875 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1876 } 1877 #ifdef DIAGNOSTIC 1878 panic("scsipi_run_queue: impossible"); 1879 #endif 1880 } 1881 1882 /* 1883 * scsipi_execute_xs: 1884 * 1885 * Begin execution of an xfer, waiting for it to complete, if necessary. 1886 */ 1887 int 1888 scsipi_execute_xs(xs) 1889 struct scsipi_xfer *xs; 1890 { 1891 struct scsipi_periph *periph = xs->xs_periph; 1892 struct scsipi_channel *chan = periph->periph_channel; 1893 int oasync, async, poll, retries, error, s; 1894 1895 xs->xs_status &= ~XS_STS_DONE; 1896 xs->error = XS_NOERROR; 1897 xs->resid = xs->datalen; 1898 xs->status = SCSI_OK; 1899 1900 #ifdef SCSIPI_DEBUG 1901 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1902 printf("scsipi_execute_xs: "); 1903 show_scsipi_xs(xs); 1904 printf("\n"); 1905 } 1906 #endif 1907 1908 /* 1909 * Deal with command tagging: 1910 * 1911 * - If the device's current operating mode doesn't 1912 * include tagged queueing, clear the tag mask. 1913 * 1914 * - If the device's current operating mode *does* 1915 * include tagged queueing, set the tag_type in 1916 * the xfer to the appropriate byte for the tag 1917 * message. 1918 */ 1919 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 1920 (xs->xs_control & XS_CTL_REQSENSE)) { 1921 xs->xs_control &= ~XS_CTL_TAGMASK; 1922 xs->xs_tag_type = 0; 1923 } else { 1924 /* 1925 * If the request doesn't specify a tag, give Head 1926 * tags to URGENT operations and Ordered tags to 1927 * everything else. 1928 */ 1929 if (XS_CTL_TAGTYPE(xs) == 0) { 1930 if (xs->xs_control & XS_CTL_URGENT) 1931 xs->xs_control |= XS_CTL_HEAD_TAG; 1932 else 1933 xs->xs_control |= XS_CTL_ORDERED_TAG; 1934 } 1935 1936 switch (XS_CTL_TAGTYPE(xs)) { 1937 case XS_CTL_ORDERED_TAG: 1938 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 1939 break; 1940 1941 case XS_CTL_SIMPLE_TAG: 1942 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 1943 break; 1944 1945 case XS_CTL_HEAD_TAG: 1946 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 1947 break; 1948 1949 default: 1950 scsipi_printaddr(periph); 1951 printf("invalid tag mask 0x%08x\n", 1952 XS_CTL_TAGTYPE(xs)); 1953 panic("scsipi_execute_xs"); 1954 } 1955 } 1956 1957 /* If the adaptor wants us to poll, poll. */ 1958 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 1959 xs->xs_control |= XS_CTL_POLL; 1960 1961 /* 1962 * If we don't yet have a completion thread, or we are to poll for 1963 * completion, clear the ASYNC flag. 1964 */ 1965 oasync = (xs->xs_control & XS_CTL_ASYNC); 1966 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 1967 xs->xs_control &= ~XS_CTL_ASYNC; 1968 1969 async = (xs->xs_control & XS_CTL_ASYNC); 1970 poll = (xs->xs_control & XS_CTL_POLL); 1971 retries = xs->xs_retries; /* for polling commands */ 1972 1973 #ifdef DIAGNOSTIC 1974 if (oasync != 0 && xs->bp == NULL) 1975 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 1976 #endif 1977 1978 /* 1979 * Enqueue the transfer. If we're not polling for completion, this 1980 * should ALWAYS return `no error'. 1981 */ 1982 try_again: 1983 error = scsipi_enqueue(xs); 1984 if (error) { 1985 if (poll == 0) { 1986 scsipi_printaddr(periph); 1987 printf("not polling, but enqueue failed with %d\n", 1988 error); 1989 panic("scsipi_execute_xs"); 1990 } 1991 1992 scsipi_printaddr(periph); 1993 printf("failed to enqueue polling command"); 1994 if (retries != 0) { 1995 printf(", retrying...\n"); 1996 delay(1000000); 1997 retries--; 1998 goto try_again; 1999 } 2000 printf("\n"); 2001 goto free_xs; 2002 } 2003 2004 restarted: 2005 scsipi_run_queue(chan); 2006 2007 /* 2008 * The xfer is enqueued, and possibly running. If it's to be 2009 * completed asynchronously, just return now. 2010 */ 2011 if (async) 2012 return (EJUSTRETURN); 2013 2014 /* 2015 * Not an asynchronous command; wait for it to complete. 2016 */ 2017 s = splbio(); 2018 while ((xs->xs_status & XS_STS_DONE) == 0) { 2019 if (poll) { 2020 scsipi_printaddr(periph); 2021 printf("polling command not done\n"); 2022 panic("scsipi_execute_xs"); 2023 } 2024 (void) tsleep(xs, PRIBIO, "xscmd", 0); 2025 } 2026 splx(s); 2027 2028 /* 2029 * Command is complete. scsipi_done() has awakened us to perform 2030 * the error handling. 2031 */ 2032 error = scsipi_complete(xs); 2033 if (error == ERESTART) 2034 goto restarted; 2035 2036 /* 2037 * If it was meant to run async and we cleared aync ourselve, 2038 * don't return an error here. It has already been handled 2039 */ 2040 if (oasync) 2041 error = EJUSTRETURN; 2042 /* 2043 * Command completed successfully or fatal error occurred. Fall 2044 * into.... 2045 */ 2046 free_xs: 2047 s = splbio(); 2048 scsipi_put_xs(xs); 2049 splx(s); 2050 2051 /* 2052 * Kick the queue, keep it running in case it stopped for some 2053 * reason. 2054 */ 2055 scsipi_run_queue(chan); 2056 2057 return (error); 2058 } 2059 2060 /* 2061 * scsipi_completion_thread: 2062 * 2063 * This is the completion thread. We wait for errors on 2064 * asynchronous xfers, and perform the error handling 2065 * function, restarting the command, if necessary. 2066 */ 2067 void 2068 scsipi_completion_thread(arg) 2069 void *arg; 2070 { 2071 struct scsipi_channel *chan = arg; 2072 struct scsipi_xfer *xs; 2073 int s; 2074 2075 if (chan->chan_init_cb) 2076 (*chan->chan_init_cb)(chan, chan->chan_init_cb_arg); 2077 2078 s = splbio(); 2079 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 2080 splx(s); 2081 for (;;) { 2082 s = splbio(); 2083 xs = TAILQ_FIRST(&chan->chan_complete); 2084 if (xs == NULL && chan->chan_tflags == 0) { 2085 /* nothing to do; wait */ 2086 (void) tsleep(&chan->chan_complete, PRIBIO, 2087 "sccomp", 0); 2088 splx(s); 2089 continue; 2090 } 2091 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2092 /* call chan_callback from thread context */ 2093 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 2094 chan->chan_callback(chan, chan->chan_callback_arg); 2095 splx(s); 2096 continue; 2097 } 2098 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 2099 /* attempt to get more openings for this channel */ 2100 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2101 scsipi_adapter_request(chan, 2102 ADAPTER_REQ_GROW_RESOURCES, NULL); 2103 scsipi_channel_thaw(chan, 1); 2104 splx(s); 2105 continue; 2106 } 2107 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2108 /* explicitly run the queues for this channel */ 2109 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2110 scsipi_run_queue(chan); 2111 splx(s); 2112 continue; 2113 } 2114 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2115 splx(s); 2116 break; 2117 } 2118 if (xs) { 2119 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2120 splx(s); 2121 2122 /* 2123 * Have an xfer with an error; process it. 2124 */ 2125 (void) scsipi_complete(xs); 2126 2127 /* 2128 * Kick the queue; keep it running if it was stopped 2129 * for some reason. 2130 */ 2131 scsipi_run_queue(chan); 2132 } else { 2133 splx(s); 2134 } 2135 } 2136 2137 chan->chan_thread = NULL; 2138 2139 /* In case parent is waiting for us to exit. */ 2140 wakeup(&chan->chan_thread); 2141 2142 kthread_exit(0); 2143 } 2144 2145 /* 2146 * scsipi_create_completion_thread: 2147 * 2148 * Callback to actually create the completion thread. 2149 */ 2150 void 2151 scsipi_create_completion_thread(arg) 2152 void *arg; 2153 { 2154 struct scsipi_channel *chan = arg; 2155 struct scsipi_adapter *adapt = chan->chan_adapter; 2156 2157 if (kthread_create1(scsipi_completion_thread, chan, 2158 &chan->chan_thread, "%s", chan->chan_name)) { 2159 printf("%s: unable to create completion thread for " 2160 "channel %d\n", adapt->adapt_dev->dv_xname, 2161 chan->chan_channel); 2162 panic("scsipi_create_completion_thread"); 2163 } 2164 } 2165 2166 /* 2167 * scsipi_thread_call_callback: 2168 * 2169 * request to call a callback from the completion thread 2170 */ 2171 int 2172 scsipi_thread_call_callback(chan, callback, arg) 2173 struct scsipi_channel *chan; 2174 void (*callback) __P((struct scsipi_channel *, void *)); 2175 void *arg; 2176 { 2177 int s; 2178 2179 s = splbio(); 2180 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2181 /* kernel thread doesn't exist yet */ 2182 splx(s); 2183 return ESRCH; 2184 } 2185 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2186 splx(s); 2187 return EBUSY; 2188 } 2189 scsipi_channel_freeze(chan, 1); 2190 chan->chan_callback = callback; 2191 chan->chan_callback_arg = arg; 2192 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2193 wakeup(&chan->chan_complete); 2194 splx(s); 2195 return(0); 2196 } 2197 2198 /* 2199 * scsipi_async_event: 2200 * 2201 * Handle an asynchronous event from an adapter. 2202 */ 2203 void 2204 scsipi_async_event(chan, event, arg) 2205 struct scsipi_channel *chan; 2206 scsipi_async_event_t event; 2207 void *arg; 2208 { 2209 int s; 2210 2211 s = splbio(); 2212 switch (event) { 2213 case ASYNC_EVENT_MAX_OPENINGS: 2214 scsipi_async_event_max_openings(chan, 2215 (struct scsipi_max_openings *)arg); 2216 break; 2217 2218 case ASYNC_EVENT_XFER_MODE: 2219 scsipi_async_event_xfer_mode(chan, 2220 (struct scsipi_xfer_mode *)arg); 2221 break; 2222 case ASYNC_EVENT_RESET: 2223 scsipi_async_event_channel_reset(chan); 2224 break; 2225 } 2226 splx(s); 2227 } 2228 2229 /* 2230 * scsipi_print_xfer_mode: 2231 * 2232 * Print a periph's capabilities. 2233 */ 2234 void 2235 scsipi_print_xfer_mode(periph) 2236 struct scsipi_periph *periph; 2237 { 2238 int period, freq, speed, mbs; 2239 2240 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0) 2241 return; 2242 2243 printf("%s: ", periph->periph_dev->dv_xname); 2244 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2245 period = scsipi_sync_factor_to_period(periph->periph_period); 2246 printf("sync (%d.%dns offset %d)", 2247 period / 10, period % 10, periph->periph_offset); 2248 } else 2249 printf("async"); 2250 2251 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2252 printf(", 32-bit"); 2253 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2254 printf(", 16-bit"); 2255 else 2256 printf(", 8-bit"); 2257 2258 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2259 freq = scsipi_sync_factor_to_freq(periph->periph_period); 2260 speed = freq; 2261 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2262 speed *= 4; 2263 else if (periph->periph_mode & 2264 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2265 speed *= 2; 2266 mbs = speed / 1000; 2267 if (mbs > 0) 2268 printf(" (%d.%03dMB/s)", mbs, speed % 1000); 2269 else 2270 printf(" (%dKB/s)", speed % 1000); 2271 } 2272 2273 printf(" transfers"); 2274 2275 if (periph->periph_mode & PERIPH_CAP_TQING) 2276 printf(", tagged queueing"); 2277 2278 printf("\n"); 2279 } 2280 2281 /* 2282 * scsipi_async_event_max_openings: 2283 * 2284 * Update the maximum number of outstanding commands a 2285 * device may have. 2286 */ 2287 void 2288 scsipi_async_event_max_openings(chan, mo) 2289 struct scsipi_channel *chan; 2290 struct scsipi_max_openings *mo; 2291 { 2292 struct scsipi_periph *periph; 2293 int minlun, maxlun; 2294 2295 if (mo->mo_lun == -1) { 2296 /* 2297 * Wildcarded; apply it to all LUNs. 2298 */ 2299 minlun = 0; 2300 maxlun = chan->chan_nluns - 1; 2301 } else 2302 minlun = maxlun = mo->mo_lun; 2303 2304 /* XXX This could really suck with a large LUN space. */ 2305 for (; minlun <= maxlun; minlun++) { 2306 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun); 2307 if (periph == NULL) 2308 continue; 2309 2310 if (mo->mo_openings < periph->periph_openings) 2311 periph->periph_openings = mo->mo_openings; 2312 else if (mo->mo_openings > periph->periph_openings && 2313 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2314 periph->periph_openings = mo->mo_openings; 2315 } 2316 } 2317 2318 /* 2319 * scsipi_async_event_xfer_mode: 2320 * 2321 * Update the xfer mode for all periphs sharing the 2322 * specified I_T Nexus. 2323 */ 2324 void 2325 scsipi_async_event_xfer_mode(chan, xm) 2326 struct scsipi_channel *chan; 2327 struct scsipi_xfer_mode *xm; 2328 { 2329 struct scsipi_periph *periph; 2330 int lun, announce, mode, period, offset; 2331 2332 for (lun = 0; lun < chan->chan_nluns; lun++) { 2333 periph = scsipi_lookup_periph(chan, xm->xm_target, lun); 2334 if (periph == NULL) 2335 continue; 2336 announce = 0; 2337 2338 /* 2339 * Clamp the xfer mode down to this periph's capabilities. 2340 */ 2341 mode = xm->xm_mode & periph->periph_cap; 2342 if (mode & PERIPH_CAP_SYNC) { 2343 period = xm->xm_period; 2344 offset = xm->xm_offset; 2345 } else { 2346 period = 0; 2347 offset = 0; 2348 } 2349 2350 /* 2351 * If we do not have a valid xfer mode yet, or the parameters 2352 * are different, announce them. 2353 */ 2354 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 || 2355 periph->periph_mode != mode || 2356 periph->periph_period != period || 2357 periph->periph_offset != offset) 2358 announce = 1; 2359 2360 periph->periph_mode = mode; 2361 periph->periph_period = period; 2362 periph->periph_offset = offset; 2363 periph->periph_flags |= PERIPH_MODE_VALID; 2364 2365 if (announce) 2366 scsipi_print_xfer_mode(periph); 2367 } 2368 } 2369 2370 /* 2371 * scsipi_set_xfer_mode: 2372 * 2373 * Set the xfer mode for the specified I_T Nexus. 2374 */ 2375 void 2376 scsipi_set_xfer_mode(chan, target, immed) 2377 struct scsipi_channel *chan; 2378 int target, immed; 2379 { 2380 struct scsipi_xfer_mode xm; 2381 struct scsipi_periph *itperiph; 2382 int lun, s; 2383 2384 /* 2385 * Go to the minimal xfer mode. 2386 */ 2387 xm.xm_target = target; 2388 xm.xm_mode = 0; 2389 xm.xm_period = 0; /* ignored */ 2390 xm.xm_offset = 0; /* ignored */ 2391 2392 /* 2393 * Find the first LUN we know about on this I_T Nexus. 2394 */ 2395 for (lun = 0; lun < chan->chan_nluns; lun++) { 2396 itperiph = scsipi_lookup_periph(chan, target, lun); 2397 if (itperiph != NULL) 2398 break; 2399 } 2400 if (itperiph != NULL) { 2401 xm.xm_mode = itperiph->periph_cap; 2402 /* 2403 * Now issue the request to the adapter. 2404 */ 2405 s = splbio(); 2406 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2407 splx(s); 2408 /* 2409 * If we want this to happen immediately, issue a dummy 2410 * command, since most adapters can't really negotiate unless 2411 * they're executing a job. 2412 */ 2413 if (immed != 0) { 2414 (void) scsipi_test_unit_ready(itperiph, 2415 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2416 XS_CTL_IGNORE_NOT_READY | 2417 XS_CTL_IGNORE_MEDIA_CHANGE); 2418 } 2419 } 2420 } 2421 2422 /* 2423 * scsipi_channel_reset: 2424 * 2425 * handle scsi bus reset 2426 * called at splbio 2427 */ 2428 void 2429 scsipi_async_event_channel_reset(chan) 2430 struct scsipi_channel *chan; 2431 { 2432 struct scsipi_xfer *xs, *xs_next; 2433 struct scsipi_periph *periph; 2434 int target, lun; 2435 2436 /* 2437 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2438 * commands; as the sense is not available any more. 2439 * can't call scsipi_done() from here, as the command has not been 2440 * sent to the adapter yet (this would corrupt accounting). 2441 */ 2442 2443 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2444 xs_next = TAILQ_NEXT(xs, channel_q); 2445 if (xs->xs_control & XS_CTL_REQSENSE) { 2446 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2447 xs->error = XS_RESET; 2448 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2449 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2450 channel_q); 2451 } 2452 } 2453 wakeup(&chan->chan_complete); 2454 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2455 for (target = 0; target < chan->chan_ntargets; target++) { 2456 if (target == chan->chan_id) 2457 continue; 2458 for (lun = 0; lun < chan->chan_nluns; lun++) { 2459 periph = scsipi_lookup_periph(chan, target, lun); 2460 if (periph) { 2461 xs = periph->periph_xscheck; 2462 if (xs) 2463 xs->error = XS_RESET; 2464 } 2465 } 2466 } 2467 } 2468 2469 /* 2470 * scsipi_target_detach: 2471 * 2472 * detach all periph associated with a I_T 2473 * must be called from valid thread context 2474 */ 2475 int 2476 scsipi_target_detach(chan, target, lun, flags) 2477 struct scsipi_channel *chan; 2478 int target, lun; 2479 int flags; 2480 { 2481 struct scsipi_periph *periph; 2482 int ctarget, mintarget, maxtarget; 2483 int clun, minlun, maxlun; 2484 int error; 2485 2486 if (target == -1) { 2487 mintarget = 0; 2488 maxtarget = chan->chan_ntargets; 2489 } else { 2490 if (target == chan->chan_id) 2491 return EINVAL; 2492 if (target < 0 || target >= chan->chan_ntargets) 2493 return EINVAL; 2494 mintarget = target; 2495 maxtarget = target + 1; 2496 } 2497 2498 if (lun == -1) { 2499 minlun = 0; 2500 maxlun = chan->chan_nluns; 2501 } else { 2502 if (lun < 0 || lun >= chan->chan_nluns) 2503 return EINVAL; 2504 minlun = lun; 2505 maxlun = lun + 1; 2506 } 2507 2508 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2509 if (ctarget == chan->chan_id) 2510 continue; 2511 2512 for (clun = minlun; clun < maxlun; clun++) { 2513 periph = scsipi_lookup_periph(chan, ctarget, clun); 2514 if (periph == NULL) 2515 continue; 2516 error = config_detach(periph->periph_dev, flags); 2517 if (error) 2518 return (error); 2519 scsipi_remove_periph(chan, periph); 2520 free(periph, M_DEVBUF); 2521 } 2522 } 2523 return(0); 2524 } 2525 2526 /* 2527 * scsipi_adapter_addref: 2528 * 2529 * Add a reference to the adapter pointed to by the provided 2530 * link, enabling the adapter if necessary. 2531 */ 2532 int 2533 scsipi_adapter_addref(adapt) 2534 struct scsipi_adapter *adapt; 2535 { 2536 int s, error = 0; 2537 2538 s = splbio(); 2539 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) { 2540 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1); 2541 if (error) 2542 adapt->adapt_refcnt--; 2543 } 2544 splx(s); 2545 return (error); 2546 } 2547 2548 /* 2549 * scsipi_adapter_delref: 2550 * 2551 * Delete a reference to the adapter pointed to by the provided 2552 * link, disabling the adapter if possible. 2553 */ 2554 void 2555 scsipi_adapter_delref(adapt) 2556 struct scsipi_adapter *adapt; 2557 { 2558 int s; 2559 2560 s = splbio(); 2561 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL) 2562 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0); 2563 splx(s); 2564 } 2565 2566 struct scsipi_syncparam { 2567 int ss_factor; 2568 int ss_period; /* ns * 10 */ 2569 } scsipi_syncparams[] = { 2570 { 0x09, 125 }, 2571 { 0x0a, 250 }, 2572 { 0x0b, 303 }, 2573 { 0x0c, 500 }, 2574 }; 2575 const int scsipi_nsyncparams = 2576 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2577 2578 int 2579 scsipi_sync_period_to_factor(period) 2580 int period; /* ns * 10 */ 2581 { 2582 int i; 2583 2584 for (i = 0; i < scsipi_nsyncparams; i++) { 2585 if (period <= scsipi_syncparams[i].ss_period) 2586 return (scsipi_syncparams[i].ss_factor); 2587 } 2588 2589 return ((period / 10) / 4); 2590 } 2591 2592 int 2593 scsipi_sync_factor_to_period(factor) 2594 int factor; 2595 { 2596 int i; 2597 2598 for (i = 0; i < scsipi_nsyncparams; i++) { 2599 if (factor == scsipi_syncparams[i].ss_factor) 2600 return (scsipi_syncparams[i].ss_period); 2601 } 2602 2603 return ((factor * 4) * 10); 2604 } 2605 2606 int 2607 scsipi_sync_factor_to_freq(factor) 2608 int factor; 2609 { 2610 int i; 2611 2612 for (i = 0; i < scsipi_nsyncparams; i++) { 2613 if (factor == scsipi_syncparams[i].ss_factor) 2614 return (10000000 / scsipi_syncparams[i].ss_period); 2615 } 2616 2617 return (10000000 / ((factor * 4) * 10)); 2618 } 2619 2620 #ifdef SCSIPI_DEBUG 2621 /* 2622 * Given a scsipi_xfer, dump the request, in all it's glory 2623 */ 2624 void 2625 show_scsipi_xs(xs) 2626 struct scsipi_xfer *xs; 2627 { 2628 2629 printf("xs(%p): ", xs); 2630 printf("xs_control(0x%08x)", xs->xs_control); 2631 printf("xs_status(0x%08x)", xs->xs_status); 2632 printf("periph(%p)", xs->xs_periph); 2633 printf("retr(0x%x)", xs->xs_retries); 2634 printf("timo(0x%x)", xs->timeout); 2635 printf("cmd(%p)", xs->cmd); 2636 printf("len(0x%x)", xs->cmdlen); 2637 printf("data(%p)", xs->data); 2638 printf("len(0x%x)", xs->datalen); 2639 printf("res(0x%x)", xs->resid); 2640 printf("err(0x%x)", xs->error); 2641 printf("bp(%p)", xs->bp); 2642 show_scsipi_cmd(xs); 2643 } 2644 2645 void 2646 show_scsipi_cmd(xs) 2647 struct scsipi_xfer *xs; 2648 { 2649 u_char *b = (u_char *) xs->cmd; 2650 int i = 0; 2651 2652 scsipi_printaddr(xs->xs_periph); 2653 printf(" command: "); 2654 2655 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2656 while (i < xs->cmdlen) { 2657 if (i) 2658 printf(","); 2659 printf("0x%x", b[i++]); 2660 } 2661 printf("-[%d bytes]\n", xs->datalen); 2662 if (xs->datalen) 2663 show_mem(xs->data, min(64, xs->datalen)); 2664 } else 2665 printf("-RESET-\n"); 2666 } 2667 2668 void 2669 show_mem(address, num) 2670 u_char *address; 2671 int num; 2672 { 2673 int x; 2674 2675 printf("------------------------------"); 2676 for (x = 0; x < num; x++) { 2677 if ((x % 16) == 0) 2678 printf("\n%03d: ", x); 2679 printf("%02x ", *address++); 2680 } 2681 printf("\n------------------------------\n"); 2682 } 2683 #endif /* SCSIPI_DEBUG */ 2684