1 /* $NetBSD: scsipi_base.c,v 1.75 2002/05/17 18:56:05 mjacob Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.75 2002/05/17 18:56:05 mjacob Exp $"); 42 43 #include "opt_scsi.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/buf.h> 49 #include <sys/uio.h> 50 #include <sys/malloc.h> 51 #include <sys/pool.h> 52 #include <sys/errno.h> 53 #include <sys/device.h> 54 #include <sys/proc.h> 55 #include <sys/kthread.h> 56 #include <sys/hash.h> 57 58 #include <dev/scsipi/scsipi_all.h> 59 #include <dev/scsipi/scsipi_disk.h> 60 #include <dev/scsipi/scsipiconf.h> 61 #include <dev/scsipi/scsipi_base.h> 62 63 #include <dev/scsipi/scsi_all.h> 64 #include <dev/scsipi/scsi_message.h> 65 66 int scsipi_complete __P((struct scsipi_xfer *)); 67 void scsipi_request_sense __P((struct scsipi_xfer *)); 68 int scsipi_enqueue __P((struct scsipi_xfer *)); 69 void scsipi_run_queue __P((struct scsipi_channel *chan)); 70 71 void scsipi_completion_thread __P((void *)); 72 73 void scsipi_get_tag __P((struct scsipi_xfer *)); 74 void scsipi_put_tag __P((struct scsipi_xfer *)); 75 76 int scsipi_get_resource __P((struct scsipi_channel *)); 77 void scsipi_put_resource __P((struct scsipi_channel *)); 78 __inline int scsipi_grow_resources __P((struct scsipi_channel *)); 79 80 void scsipi_async_event_max_openings __P((struct scsipi_channel *, 81 struct scsipi_max_openings *)); 82 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *, 83 struct scsipi_xfer_mode *)); 84 void scsipi_async_event_channel_reset __P((struct scsipi_channel *)); 85 86 struct pool scsipi_xfer_pool; 87 88 /* 89 * scsipi_init: 90 * 91 * Called when a scsibus or atapibus is attached to the system 92 * to initialize shared data structures. 93 */ 94 void 95 scsipi_init() 96 { 97 static int scsipi_init_done; 98 99 if (scsipi_init_done) 100 return; 101 scsipi_init_done = 1; 102 103 /* Initialize the scsipi_xfer pool. */ 104 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 105 0, 0, "scxspl", NULL); 106 } 107 108 /* 109 * scsipi_channel_init: 110 * 111 * Initialize a scsipi_channel when it is attached. 112 */ 113 int 114 scsipi_channel_init(chan) 115 struct scsipi_channel *chan; 116 { 117 int i; 118 119 /* Initialize shared data. */ 120 scsipi_init(); 121 122 /* Initialize the queues. */ 123 TAILQ_INIT(&chan->chan_queue); 124 TAILQ_INIT(&chan->chan_complete); 125 126 for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++) 127 LIST_INIT(&chan->chan_periphtab[i]); 128 129 /* 130 * Create the asynchronous completion thread. 131 */ 132 kthread_create(scsipi_create_completion_thread, chan); 133 return (0); 134 } 135 136 /* 137 * scsipi_channel_shutdown: 138 * 139 * Shutdown a scsipi_channel. 140 */ 141 void 142 scsipi_channel_shutdown(chan) 143 struct scsipi_channel *chan; 144 { 145 146 /* 147 * Shut down the completion thread. 148 */ 149 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 150 wakeup(&chan->chan_complete); 151 152 /* 153 * Now wait for the thread to exit. 154 */ 155 while (chan->chan_thread != NULL) 156 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0); 157 } 158 159 static uint32_t 160 scsipi_chan_periph_hash(uint64_t t, uint64_t l) 161 { 162 uint32_t hash; 163 164 hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT); 165 hash = hash32_buf(&l, sizeof(l), hash); 166 167 return (hash & SCSIPI_CHAN_PERIPH_HASHMASK); 168 } 169 170 /* 171 * scsipi_insert_periph: 172 * 173 * Insert a periph into the channel. 174 */ 175 void 176 scsipi_insert_periph(chan, periph) 177 struct scsipi_channel *chan; 178 struct scsipi_periph *periph; 179 { 180 uint32_t hash; 181 int s; 182 183 hash = scsipi_chan_periph_hash(periph->periph_target, 184 periph->periph_lun); 185 186 s = splbio(); 187 LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash); 188 splx(s); 189 } 190 191 /* 192 * scsipi_remove_periph: 193 * 194 * Remove a periph from the channel. 195 */ 196 void 197 scsipi_remove_periph(chan, periph) 198 struct scsipi_channel *chan; 199 struct scsipi_periph *periph; 200 { 201 int s; 202 203 s = splbio(); 204 LIST_REMOVE(periph, periph_hash); 205 splx(s); 206 } 207 208 /* 209 * scsipi_lookup_periph: 210 * 211 * Lookup a periph on the specified channel. 212 */ 213 struct scsipi_periph * 214 scsipi_lookup_periph(chan, target, lun) 215 struct scsipi_channel *chan; 216 int target, lun; 217 { 218 struct scsipi_periph *periph; 219 uint32_t hash; 220 int s; 221 222 if (target >= chan->chan_ntargets || 223 lun >= chan->chan_nluns) 224 return (NULL); 225 226 hash = scsipi_chan_periph_hash(target, lun); 227 228 s = splbio(); 229 LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) { 230 if (periph->periph_target == target && 231 periph->periph_lun == lun) 232 break; 233 } 234 splx(s); 235 236 return (periph); 237 } 238 239 /* 240 * scsipi_get_resource: 241 * 242 * Allocate a single xfer `resource' from the channel. 243 * 244 * NOTE: Must be called at splbio(). 245 */ 246 int 247 scsipi_get_resource(chan) 248 struct scsipi_channel *chan; 249 { 250 struct scsipi_adapter *adapt = chan->chan_adapter; 251 252 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 253 if (chan->chan_openings > 0) { 254 chan->chan_openings--; 255 return (1); 256 } 257 return (0); 258 } 259 260 if (adapt->adapt_openings > 0) { 261 adapt->adapt_openings--; 262 return (1); 263 } 264 return (0); 265 } 266 267 /* 268 * scsipi_grow_resources: 269 * 270 * Attempt to grow resources for a channel. If this succeeds, 271 * we allocate one for our caller. 272 * 273 * NOTE: Must be called at splbio(). 274 */ 275 __inline int 276 scsipi_grow_resources(chan) 277 struct scsipi_channel *chan; 278 { 279 280 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 281 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 282 scsipi_adapter_request(chan, 283 ADAPTER_REQ_GROW_RESOURCES, NULL); 284 return (scsipi_get_resource(chan)); 285 } 286 /* 287 * ask the channel thread to do it. It'll have to thaw the 288 * queue 289 */ 290 scsipi_channel_freeze(chan, 1); 291 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 292 wakeup(&chan->chan_complete); 293 return (0); 294 } 295 296 return (0); 297 } 298 299 /* 300 * scsipi_put_resource: 301 * 302 * Free a single xfer `resource' to the channel. 303 * 304 * NOTE: Must be called at splbio(). 305 */ 306 void 307 scsipi_put_resource(chan) 308 struct scsipi_channel *chan; 309 { 310 struct scsipi_adapter *adapt = chan->chan_adapter; 311 312 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 313 chan->chan_openings++; 314 else 315 adapt->adapt_openings++; 316 } 317 318 /* 319 * scsipi_get_tag: 320 * 321 * Get a tag ID for the specified xfer. 322 * 323 * NOTE: Must be called at splbio(). 324 */ 325 void 326 scsipi_get_tag(xs) 327 struct scsipi_xfer *xs; 328 { 329 struct scsipi_periph *periph = xs->xs_periph; 330 int word, bit, tag; 331 332 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 333 bit = ffs(periph->periph_freetags[word]); 334 if (bit != 0) 335 break; 336 } 337 #ifdef DIAGNOSTIC 338 if (word == PERIPH_NTAGWORDS) { 339 scsipi_printaddr(periph); 340 printf("no free tags\n"); 341 panic("scsipi_get_tag"); 342 } 343 #endif 344 345 bit -= 1; 346 periph->periph_freetags[word] &= ~(1 << bit); 347 tag = (word << 5) | bit; 348 349 /* XXX Should eventually disallow this completely. */ 350 if (tag >= periph->periph_openings) { 351 scsipi_printaddr(periph); 352 printf("WARNING: tag %d greater than available openings %d\n", 353 tag, periph->periph_openings); 354 } 355 356 xs->xs_tag_id = tag; 357 } 358 359 /* 360 * scsipi_put_tag: 361 * 362 * Put the tag ID for the specified xfer back into the pool. 363 * 364 * NOTE: Must be called at splbio(). 365 */ 366 void 367 scsipi_put_tag(xs) 368 struct scsipi_xfer *xs; 369 { 370 struct scsipi_periph *periph = xs->xs_periph; 371 int word, bit; 372 373 word = xs->xs_tag_id >> 5; 374 bit = xs->xs_tag_id & 0x1f; 375 376 periph->periph_freetags[word] |= (1 << bit); 377 } 378 379 /* 380 * scsipi_get_xs: 381 * 382 * Allocate an xfer descriptor and associate it with the 383 * specified peripherial. If the peripherial has no more 384 * available command openings, we either block waiting for 385 * one to become available, or fail. 386 */ 387 struct scsipi_xfer * 388 scsipi_get_xs(periph, flags) 389 struct scsipi_periph *periph; 390 int flags; 391 { 392 struct scsipi_xfer *xs; 393 int s; 394 395 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 396 397 /* 398 * If we're cold, make sure we poll. 399 */ 400 if (cold) 401 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL; 402 403 #ifdef DIAGNOSTIC 404 /* 405 * URGENT commands can never be ASYNC. 406 */ 407 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 408 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 409 scsipi_printaddr(periph); 410 printf("URGENT and ASYNC\n"); 411 panic("scsipi_get_xs"); 412 } 413 #endif 414 415 s = splbio(); 416 /* 417 * Wait for a command opening to become available. Rules: 418 * 419 * - All xfers must wait for an available opening. 420 * Exception: URGENT xfers can proceed when 421 * active == openings, because we use the opening 422 * of the command we're recovering for. 423 * - if the periph has sense pending, only URGENT & REQSENSE 424 * xfers may proceed. 425 * 426 * - If the periph is recovering, only URGENT xfers may 427 * proceed. 428 * 429 * - If the periph is currently executing a recovery 430 * command, URGENT commands must block, because only 431 * one recovery command can execute at a time. 432 */ 433 for (;;) { 434 if (flags & XS_CTL_URGENT) { 435 if (periph->periph_active > periph->periph_openings) 436 goto wait_for_opening; 437 if (periph->periph_flags & PERIPH_SENSE) { 438 if ((flags & XS_CTL_REQSENSE) == 0) 439 goto wait_for_opening; 440 } else { 441 if ((periph->periph_flags & 442 PERIPH_RECOVERY_ACTIVE) != 0) 443 goto wait_for_opening; 444 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 445 } 446 break; 447 } 448 if (periph->periph_active >= periph->periph_openings || 449 (periph->periph_flags & PERIPH_RECOVERING) != 0) 450 goto wait_for_opening; 451 periph->periph_active++; 452 break; 453 454 wait_for_opening: 455 if (flags & XS_CTL_NOSLEEP) { 456 splx(s); 457 return (NULL); 458 } 459 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 460 periph->periph_flags |= PERIPH_WAITING; 461 (void) tsleep(periph, PRIBIO, "getxs", 0); 462 } 463 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 464 xs = pool_get(&scsipi_xfer_pool, 465 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 466 if (xs == NULL) { 467 if (flags & XS_CTL_URGENT) { 468 if ((flags & XS_CTL_REQSENSE) == 0) 469 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 470 } else 471 periph->periph_active--; 472 scsipi_printaddr(periph); 473 printf("unable to allocate %sscsipi_xfer\n", 474 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 475 } 476 splx(s); 477 478 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 479 480 if (xs != NULL) { 481 callout_init(&xs->xs_callout); 482 memset(xs, 0, sizeof(*xs)); 483 xs->xs_periph = periph; 484 xs->xs_control = flags; 485 xs->xs_status = 0; 486 s = splbio(); 487 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 488 splx(s); 489 } 490 return (xs); 491 } 492 493 /* 494 * scsipi_put_xs: 495 * 496 * Release an xfer descriptor, decreasing the outstanding command 497 * count for the peripherial. If there is a thread waiting for 498 * an opening, wake it up. If not, kick any queued I/O the 499 * peripherial may have. 500 * 501 * NOTE: Must be called at splbio(). 502 */ 503 void 504 scsipi_put_xs(xs) 505 struct scsipi_xfer *xs; 506 { 507 struct scsipi_periph *periph = xs->xs_periph; 508 int flags = xs->xs_control; 509 510 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 511 512 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 513 pool_put(&scsipi_xfer_pool, xs); 514 515 #ifdef DIAGNOSTIC 516 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 517 periph->periph_active == 0) { 518 scsipi_printaddr(periph); 519 printf("recovery without a command to recovery for\n"); 520 panic("scsipi_put_xs"); 521 } 522 #endif 523 524 if (flags & XS_CTL_URGENT) { 525 if ((flags & XS_CTL_REQSENSE) == 0) 526 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 527 } else 528 periph->periph_active--; 529 if (periph->periph_active == 0 && 530 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 531 periph->periph_flags &= ~PERIPH_WAITDRAIN; 532 wakeup(&periph->periph_active); 533 } 534 535 if (periph->periph_flags & PERIPH_WAITING) { 536 periph->periph_flags &= ~PERIPH_WAITING; 537 wakeup(periph); 538 } else { 539 if (periph->periph_switch->psw_start != NULL) { 540 SC_DEBUG(periph, SCSIPI_DB2, 541 ("calling private start()\n")); 542 (*periph->periph_switch->psw_start)(periph); 543 } 544 } 545 } 546 547 /* 548 * scsipi_channel_freeze: 549 * 550 * Freeze a channel's xfer queue. 551 */ 552 void 553 scsipi_channel_freeze(chan, count) 554 struct scsipi_channel *chan; 555 int count; 556 { 557 int s; 558 559 s = splbio(); 560 chan->chan_qfreeze += count; 561 splx(s); 562 } 563 564 /* 565 * scsipi_channel_thaw: 566 * 567 * Thaw a channel's xfer queue. 568 */ 569 void 570 scsipi_channel_thaw(chan, count) 571 struct scsipi_channel *chan; 572 int count; 573 { 574 int s; 575 576 s = splbio(); 577 chan->chan_qfreeze -= count; 578 /* 579 * Don't let the freeze count go negative. 580 * 581 * Presumably the adapter driver could keep track of this, 582 * but it might just be easier to do this here so as to allow 583 * multiple callers, including those outside the adapter driver. 584 */ 585 if (chan->chan_qfreeze < 0) { 586 chan->chan_qfreeze = 0; 587 } 588 splx(s); 589 /* 590 * Kick the channel's queue here. Note, we may be running in 591 * interrupt context (softclock or HBA's interrupt), so the adapter 592 * driver had better not sleep. 593 */ 594 if (chan->chan_qfreeze == 0) 595 scsipi_run_queue(chan); 596 } 597 598 /* 599 * scsipi_channel_timed_thaw: 600 * 601 * Thaw a channel after some time has expired. This will also 602 * run the channel's queue if the freeze count has reached 0. 603 */ 604 void 605 scsipi_channel_timed_thaw(arg) 606 void *arg; 607 { 608 struct scsipi_channel *chan = arg; 609 610 scsipi_channel_thaw(chan, 1); 611 } 612 613 /* 614 * scsipi_periph_freeze: 615 * 616 * Freeze a device's xfer queue. 617 */ 618 void 619 scsipi_periph_freeze(periph, count) 620 struct scsipi_periph *periph; 621 int count; 622 { 623 int s; 624 625 s = splbio(); 626 periph->periph_qfreeze += count; 627 splx(s); 628 } 629 630 /* 631 * scsipi_periph_thaw: 632 * 633 * Thaw a device's xfer queue. 634 */ 635 void 636 scsipi_periph_thaw(periph, count) 637 struct scsipi_periph *periph; 638 int count; 639 { 640 int s; 641 642 s = splbio(); 643 periph->periph_qfreeze -= count; 644 #ifdef DIAGNOSTIC 645 if (periph->periph_qfreeze < 0) { 646 static const char pc[] = "periph freeze count < 0"; 647 scsipi_printaddr(periph); 648 printf("%s\n", pc); 649 panic(pc); 650 } 651 #endif 652 if (periph->periph_qfreeze == 0 && 653 (periph->periph_flags & PERIPH_WAITING) != 0) 654 wakeup(periph); 655 splx(s); 656 } 657 658 /* 659 * scsipi_periph_timed_thaw: 660 * 661 * Thaw a device after some time has expired. 662 */ 663 void 664 scsipi_periph_timed_thaw(arg) 665 void *arg; 666 { 667 int s; 668 struct scsipi_periph *periph = arg; 669 670 callout_stop(&periph->periph_callout); 671 672 s = splbio(); 673 scsipi_periph_thaw(periph, 1); 674 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 675 /* 676 * Kick the channel's queue here. Note, we're running in 677 * interrupt context (softclock), so the adapter driver 678 * had better not sleep. 679 */ 680 scsipi_run_queue(periph->periph_channel); 681 } else { 682 /* 683 * Tell the completion thread to kick the channel's queue here. 684 */ 685 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 686 wakeup(&periph->periph_channel->chan_complete); 687 } 688 splx(s); 689 } 690 691 /* 692 * scsipi_wait_drain: 693 * 694 * Wait for a periph's pending xfers to drain. 695 */ 696 void 697 scsipi_wait_drain(periph) 698 struct scsipi_periph *periph; 699 { 700 int s; 701 702 s = splbio(); 703 while (periph->periph_active != 0) { 704 periph->periph_flags |= PERIPH_WAITDRAIN; 705 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0); 706 } 707 splx(s); 708 } 709 710 /* 711 * scsipi_kill_pending: 712 * 713 * Kill off all pending xfers for a periph. 714 * 715 * NOTE: Must be called at splbio(). 716 */ 717 void 718 scsipi_kill_pending(periph) 719 struct scsipi_periph *periph; 720 { 721 722 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph); 723 #ifdef DIAGNOSTIC 724 if (TAILQ_FIRST(&periph->periph_xferq) != NULL) 725 panic("scsipi_kill_pending"); 726 #endif 727 scsipi_wait_drain(periph); 728 } 729 730 /* 731 * scsipi_interpret_sense: 732 * 733 * Look at the returned sense and act on the error, determining 734 * the unix error number to pass back. (0 = report no error) 735 * 736 * NOTE: If we return ERESTART, we are expected to haved 737 * thawed the device! 738 * 739 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 740 */ 741 int 742 scsipi_interpret_sense(xs) 743 struct scsipi_xfer *xs; 744 { 745 struct scsipi_sense_data *sense; 746 struct scsipi_periph *periph = xs->xs_periph; 747 u_int8_t key; 748 u_int32_t info; 749 int error; 750 #ifndef SCSIVERBOSE 751 static char *error_mes[] = { 752 "soft error (corrected)", 753 "not ready", "medium error", 754 "non-media hardware failure", "illegal request", 755 "unit attention", "readonly device", 756 "no data found", "vendor unique", 757 "copy aborted", "command aborted", 758 "search returned equal", "volume overflow", 759 "verify miscompare", "unknown error key" 760 }; 761 #endif 762 763 sense = &xs->sense.scsi_sense; 764 #ifdef SCSIPI_DEBUG 765 if (periph->periph_flags & SCSIPI_DB1) { 766 int count; 767 scsipi_printaddr(periph); 768 printf(" sense debug information:\n"); 769 printf("\tcode 0x%x valid 0x%x\n", 770 sense->error_code & SSD_ERRCODE, 771 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0); 772 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 773 sense->segment, 774 sense->flags & SSD_KEY, 775 sense->flags & SSD_ILI ? 1 : 0, 776 sense->flags & SSD_EOM ? 1 : 0, 777 sense->flags & SSD_FILEMARK ? 1 : 0); 778 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 779 "extra bytes\n", 780 sense->info[0], 781 sense->info[1], 782 sense->info[2], 783 sense->info[3], 784 sense->extra_len); 785 printf("\textra: "); 786 for (count = 0; count < ADD_BYTES_LIM(sense); count++) 787 printf("0x%x ", sense->cmd_spec_info[count]); 788 printf("\n"); 789 } 790 #endif 791 792 /* 793 * If the periph has it's own error handler, call it first. 794 * If it returns a legit error value, return that, otherwise 795 * it wants us to continue with normal error processing. 796 */ 797 if (periph->periph_switch->psw_error != NULL) { 798 SC_DEBUG(periph, SCSIPI_DB2, 799 ("calling private err_handler()\n")); 800 error = (*periph->periph_switch->psw_error)(xs); 801 if (error != EJUSTRETURN) 802 return (error); 803 } 804 /* otherwise use the default */ 805 switch (sense->error_code & SSD_ERRCODE) { 806 807 /* 808 * Old SCSI-1 and SASI devices respond with 809 * codes other than 70. 810 */ 811 case 0x00: /* no error (command completed OK) */ 812 return (0); 813 case 0x04: /* drive not ready after it was selected */ 814 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 815 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 816 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 817 return (0); 818 /* XXX - display some sort of error here? */ 819 return (EIO); 820 case 0x20: /* invalid command */ 821 if ((xs->xs_control & 822 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 823 return (0); 824 return (EINVAL); 825 case 0x25: /* invalid LUN (Adaptec ACB-4000) */ 826 return (EACCES); 827 828 /* 829 * If it's code 70, use the extended stuff and 830 * interpret the key 831 */ 832 case 0x71: /* delayed error */ 833 scsipi_printaddr(periph); 834 key = sense->flags & SSD_KEY; 835 printf(" DEFERRED ERROR, key = 0x%x\n", key); 836 /* FALLTHROUGH */ 837 case 0x70: 838 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) 839 info = _4btol(sense->info); 840 else 841 info = 0; 842 key = sense->flags & SSD_KEY; 843 844 switch (key) { 845 case SKEY_NO_SENSE: 846 case SKEY_RECOVERED_ERROR: 847 if (xs->resid == xs->datalen && xs->datalen) { 848 /* 849 * Why is this here? 850 */ 851 xs->resid = 0; /* not short read */ 852 } 853 case SKEY_EQUAL: 854 error = 0; 855 break; 856 case SKEY_NOT_READY: 857 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 858 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 859 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 860 return (0); 861 if (sense->add_sense_code == 0x3A) { 862 error = ENODEV; /* Medium not present */ 863 if (xs->xs_control & XS_CTL_SILENT_NODEV) 864 return (error); 865 } else 866 error = EIO; 867 if ((xs->xs_control & XS_CTL_SILENT) != 0) 868 return (error); 869 break; 870 case SKEY_ILLEGAL_REQUEST: 871 if ((xs->xs_control & 872 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 873 return (0); 874 /* 875 * Handle the case where a device reports 876 * Logical Unit Not Supported during discovery. 877 */ 878 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 879 sense->add_sense_code == 0x25 && 880 sense->add_sense_code_qual == 0x00) 881 return (EINVAL); 882 if ((xs->xs_control & XS_CTL_SILENT) != 0) 883 return (EIO); 884 error = EINVAL; 885 break; 886 case SKEY_UNIT_ATTENTION: 887 if (sense->add_sense_code == 0x29 && 888 sense->add_sense_code_qual == 0x00) { 889 /* device or bus reset */ 890 return (ERESTART); 891 } 892 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 893 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 894 if ((xs->xs_control & 895 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 896 /* XXX Should reupload any transient state. */ 897 (periph->periph_flags & 898 PERIPH_REMOVABLE) == 0) { 899 return (ERESTART); 900 } 901 if ((xs->xs_control & XS_CTL_SILENT) != 0) 902 return (EIO); 903 error = EIO; 904 break; 905 case SKEY_WRITE_PROTECT: 906 error = EROFS; 907 break; 908 case SKEY_BLANK_CHECK: 909 error = 0; 910 break; 911 case SKEY_ABORTED_COMMAND: 912 error = ERESTART; 913 break; 914 case SKEY_VOLUME_OVERFLOW: 915 error = ENOSPC; 916 break; 917 default: 918 error = EIO; 919 break; 920 } 921 922 #ifdef SCSIVERBOSE 923 if (key && (xs->xs_control & XS_CTL_SILENT) == 0) 924 scsipi_print_sense(xs, 0); 925 #else 926 if (key) { 927 scsipi_printaddr(periph); 928 printf("%s", error_mes[key - 1]); 929 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 930 switch (key) { 931 case SKEY_NOT_READY: 932 case SKEY_ILLEGAL_REQUEST: 933 case SKEY_UNIT_ATTENTION: 934 case SKEY_WRITE_PROTECT: 935 break; 936 case SKEY_BLANK_CHECK: 937 printf(", requested size: %d (decimal)", 938 info); 939 break; 940 case SKEY_ABORTED_COMMAND: 941 if (xs->xs_retries) 942 printf(", retrying"); 943 printf(", cmd 0x%x, info 0x%x", 944 xs->cmd->opcode, info); 945 break; 946 default: 947 printf(", info = %d (decimal)", info); 948 } 949 } 950 if (sense->extra_len != 0) { 951 int n; 952 printf(", data ="); 953 for (n = 0; n < sense->extra_len; n++) 954 printf(" %02x", 955 sense->cmd_spec_info[n]); 956 } 957 printf("\n"); 958 } 959 #endif 960 return (error); 961 962 /* 963 * Some other code, just report it 964 */ 965 default: 966 #if defined(SCSIDEBUG) || defined(DEBUG) 967 { 968 static char *uc = "undecodable sense error"; 969 int i; 970 u_int8_t *cptr = (u_int8_t *) sense; 971 scsipi_printaddr(periph); 972 if (xs->cmd == &xs->cmdstore) { 973 printf("%s for opcode 0x%x, data=", 974 uc, xs->cmdstore.opcode); 975 } else { 976 printf("%s, data=", uc); 977 } 978 for (i = 0; i < sizeof (sense); i++) 979 printf(" 0x%02x", *(cptr++) & 0xff); 980 printf("\n"); 981 } 982 #else 983 scsipi_printaddr(periph); 984 printf("Sense Error Code 0x%x", 985 sense->error_code & SSD_ERRCODE); 986 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 987 struct scsipi_sense_data_unextended *usense = 988 (struct scsipi_sense_data_unextended *)sense; 989 printf(" at block no. %d (decimal)", 990 _3btol(usense->block)); 991 } 992 printf("\n"); 993 #endif 994 return (EIO); 995 } 996 } 997 998 /* 999 * scsipi_size: 1000 * 1001 * Find out from the device what its capacity is. 1002 */ 1003 u_long 1004 scsipi_size(periph, flags) 1005 struct scsipi_periph *periph; 1006 int flags; 1007 { 1008 struct scsipi_read_cap_data rdcap; 1009 struct scsipi_read_capacity scsipi_cmd; 1010 1011 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1012 scsipi_cmd.opcode = READ_CAPACITY; 1013 1014 /* 1015 * If the command works, interpret the result as a 4 byte 1016 * number of blocks 1017 */ 1018 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1019 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap), 1020 SCSIPIRETRIES, 20000, NULL, 1021 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) { 1022 scsipi_printaddr(periph); 1023 printf("could not get size\n"); 1024 return (0); 1025 } 1026 1027 return (_4btol(rdcap.addr) + 1); 1028 } 1029 1030 /* 1031 * scsipi_test_unit_ready: 1032 * 1033 * Issue a `test unit ready' request. 1034 */ 1035 int 1036 scsipi_test_unit_ready(periph, flags) 1037 struct scsipi_periph *periph; 1038 int flags; 1039 { 1040 struct scsipi_test_unit_ready scsipi_cmd; 1041 1042 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */ 1043 if (periph->periph_quirks & PQUIRK_NOTUR) 1044 return (0); 1045 1046 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1047 scsipi_cmd.opcode = TEST_UNIT_READY; 1048 1049 return (scsipi_command(periph, 1050 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd), 1051 0, 0, SCSIPIRETRIES, 10000, NULL, flags)); 1052 } 1053 1054 /* 1055 * scsipi_inquire: 1056 * 1057 * Ask the device about itself. 1058 */ 1059 int 1060 scsipi_inquire(periph, inqbuf, flags) 1061 struct scsipi_periph *periph; 1062 struct scsipi_inquiry_data *inqbuf; 1063 int flags; 1064 { 1065 struct scsipi_inquiry scsipi_cmd; 1066 int error; 1067 1068 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1069 scsipi_cmd.opcode = INQUIRY; 1070 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data); 1071 1072 error = scsipi_command(periph, 1073 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1074 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data), 1075 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags); 1076 1077 #ifdef SCSI_OLD_NOINQUIRY 1078 /* 1079 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator. 1080 * This board doesn't support the INQUIRY command at all. 1081 */ 1082 if (error == EINVAL || error == EACCES) { 1083 /* 1084 * Conjure up an INQUIRY response. 1085 */ 1086 inqbuf->device = (error == EINVAL ? 1087 SID_QUAL_LU_PRESENT : 1088 SID_QUAL_LU_NOTPRESENT) | T_DIRECT; 1089 inqbuf->dev_qual2 = 0; 1090 inqbuf->version = 0; 1091 inqbuf->response_format = SID_FORMAT_SCSI1; 1092 inqbuf->additional_length = 3 + 28; 1093 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1094 memcpy(inqbuf->vendor, "ADAPTEC ", sizeof(inqbuf->vendor)); 1095 memcpy(inqbuf->product, "ACB-4000 ", 1096 sizeof(inqbuf->product)); 1097 memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision)); 1098 error = 0; 1099 } 1100 1101 /* 1102 * Kludge for the Emulex MT-02 SCSI->QIC translator. 1103 * This board gives an empty response to an INQUIRY command. 1104 */ 1105 else if (error == 0 && 1106 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) && 1107 inqbuf->dev_qual2 == 0 && 1108 inqbuf->version == 0 && 1109 inqbuf->response_format == SID_FORMAT_SCSI1) { 1110 /* 1111 * Fill out the INQUIRY response. 1112 */ 1113 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL); 1114 inqbuf->dev_qual2 = SID_REMOVABLE; 1115 inqbuf->additional_length = 3 + 28; 1116 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1117 memcpy(inqbuf->vendor, "EMULEX ", sizeof(inqbuf->vendor)); 1118 memcpy(inqbuf->product, "MT-02 QIC ", 1119 sizeof(inqbuf->product)); 1120 memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision)); 1121 } 1122 #endif /* SCSI_OLD_NOINQUIRY */ 1123 1124 return error; 1125 } 1126 1127 /* 1128 * scsipi_prevent: 1129 * 1130 * Prevent or allow the user to remove the media 1131 */ 1132 int 1133 scsipi_prevent(periph, type, flags) 1134 struct scsipi_periph *periph; 1135 int type, flags; 1136 { 1137 struct scsipi_prevent scsipi_cmd; 1138 1139 if (periph->periph_quirks & PQUIRK_NODOORLOCK) 1140 return (0); 1141 1142 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1143 scsipi_cmd.opcode = PREVENT_ALLOW; 1144 scsipi_cmd.how = type; 1145 1146 return (scsipi_command(periph, 1147 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1148 0, 0, SCSIPIRETRIES, 5000, NULL, flags)); 1149 } 1150 1151 /* 1152 * scsipi_start: 1153 * 1154 * Send a START UNIT. 1155 */ 1156 int 1157 scsipi_start(periph, type, flags) 1158 struct scsipi_periph *periph; 1159 int type, flags; 1160 { 1161 struct scsipi_start_stop scsipi_cmd; 1162 1163 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT) 1164 return 0; 1165 1166 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1167 scsipi_cmd.opcode = START_STOP; 1168 scsipi_cmd.byte2 = 0x00; 1169 scsipi_cmd.how = type; 1170 1171 return (scsipi_command(periph, 1172 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1173 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, 1174 NULL, flags)); 1175 } 1176 1177 /* 1178 * scsipi_mode_sense, scsipi_mode_sense_big: 1179 * get a sense page from a device 1180 */ 1181 1182 int 1183 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout) 1184 struct scsipi_periph *periph; 1185 int byte2, page, len, flags, retries, timeout; 1186 struct scsipi_mode_header *data; 1187 { 1188 struct scsipi_mode_sense scsipi_cmd; 1189 int error; 1190 1191 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1192 scsipi_cmd.opcode = MODE_SENSE; 1193 scsipi_cmd.byte2 = byte2; 1194 scsipi_cmd.page = page; 1195 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI) 1196 _lto2b(len, scsipi_cmd.u_len.atapi.length); 1197 else 1198 scsipi_cmd.u_len.scsi.length = len & 0xff; 1199 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1200 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1201 flags | XS_CTL_DATA_IN); 1202 SC_DEBUG(periph, SCSIPI_DB2, 1203 ("scsipi_mode_sense: error=%d\n", error)); 1204 return (error); 1205 } 1206 1207 int 1208 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout) 1209 struct scsipi_periph *periph; 1210 int byte2, page, len, flags, retries, timeout; 1211 struct scsipi_mode_header_big *data; 1212 { 1213 struct scsipi_mode_sense_big scsipi_cmd; 1214 int error; 1215 1216 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1217 scsipi_cmd.opcode = MODE_SENSE_BIG; 1218 scsipi_cmd.byte2 = byte2; 1219 scsipi_cmd.page = page; 1220 _lto2b(len, scsipi_cmd.length); 1221 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1222 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1223 flags | XS_CTL_DATA_IN); 1224 SC_DEBUG(periph, SCSIPI_DB2, 1225 ("scsipi_mode_sense_big: error=%d\n", error)); 1226 return (error); 1227 } 1228 1229 int 1230 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout) 1231 struct scsipi_periph *periph; 1232 int byte2, len, flags, retries, timeout; 1233 struct scsipi_mode_header *data; 1234 { 1235 struct scsipi_mode_select scsipi_cmd; 1236 int error; 1237 1238 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1239 scsipi_cmd.opcode = MODE_SELECT; 1240 scsipi_cmd.byte2 = byte2; 1241 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI) 1242 _lto2b(len, scsipi_cmd.u_len.atapi.length); 1243 else 1244 scsipi_cmd.u_len.scsi.length = len & 0xff; 1245 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1246 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1247 flags | XS_CTL_DATA_OUT); 1248 SC_DEBUG(periph, SCSIPI_DB2, 1249 ("scsipi_mode_select: error=%d\n", error)); 1250 return (error); 1251 } 1252 1253 int 1254 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout) 1255 struct scsipi_periph *periph; 1256 int byte2, len, flags, retries, timeout; 1257 struct scsipi_mode_header_big *data; 1258 { 1259 struct scsipi_mode_select_big scsipi_cmd; 1260 int error; 1261 1262 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1263 scsipi_cmd.opcode = MODE_SELECT_BIG; 1264 scsipi_cmd.byte2 = byte2; 1265 _lto2b(len, scsipi_cmd.length); 1266 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1267 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1268 flags | XS_CTL_DATA_OUT); 1269 SC_DEBUG(periph, SCSIPI_DB2, 1270 ("scsipi_mode_select: error=%d\n", error)); 1271 return (error); 1272 } 1273 1274 /* 1275 * scsipi_done: 1276 * 1277 * This routine is called by an adapter's interrupt handler when 1278 * an xfer is completed. 1279 */ 1280 void 1281 scsipi_done(xs) 1282 struct scsipi_xfer *xs; 1283 { 1284 struct scsipi_periph *periph = xs->xs_periph; 1285 struct scsipi_channel *chan = periph->periph_channel; 1286 int s, freezecnt; 1287 1288 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1289 #ifdef SCSIPI_DEBUG 1290 if (periph->periph_dbflags & SCSIPI_DB1) 1291 show_scsipi_cmd(xs); 1292 #endif 1293 1294 s = splbio(); 1295 /* 1296 * The resource this command was using is now free. 1297 */ 1298 scsipi_put_resource(chan); 1299 xs->xs_periph->periph_sent--; 1300 1301 /* 1302 * If the command was tagged, free the tag. 1303 */ 1304 if (XS_CTL_TAGTYPE(xs) != 0) 1305 scsipi_put_tag(xs); 1306 else 1307 periph->periph_flags &= ~PERIPH_UNTAG; 1308 1309 /* Mark the command as `done'. */ 1310 xs->xs_status |= XS_STS_DONE; 1311 1312 #ifdef DIAGNOSTIC 1313 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1314 (XS_CTL_ASYNC|XS_CTL_POLL)) 1315 panic("scsipi_done: ASYNC and POLL"); 1316 #endif 1317 1318 /* 1319 * If the xfer had an error of any sort, freeze the 1320 * periph's queue. Freeze it again if we were requested 1321 * to do so in the xfer. 1322 */ 1323 freezecnt = 0; 1324 if (xs->error != XS_NOERROR) 1325 freezecnt++; 1326 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1327 freezecnt++; 1328 if (freezecnt != 0) 1329 scsipi_periph_freeze(periph, freezecnt); 1330 1331 /* 1332 * record the xfer with a pending sense, in case a SCSI reset is 1333 * received before the thread is waked up. 1334 */ 1335 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1336 periph->periph_flags |= PERIPH_SENSE; 1337 periph->periph_xscheck = xs; 1338 } 1339 1340 /* 1341 * If this was an xfer that was not to complete asynchronously, 1342 * let the requesting thread perform error checking/handling 1343 * in its context. 1344 */ 1345 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1346 splx(s); 1347 /* 1348 * If it's a polling job, just return, to unwind the 1349 * call graph. We don't need to restart the queue, 1350 * because pollings jobs are treated specially, and 1351 * are really only used during crash dumps anyway 1352 * (XXX or during boot-time autconfiguration of 1353 * ATAPI devices). 1354 */ 1355 if (xs->xs_control & XS_CTL_POLL) 1356 return; 1357 wakeup(xs); 1358 goto out; 1359 } 1360 1361 /* 1362 * Catch the extremely common case of I/O completing 1363 * without error; no use in taking a context switch 1364 * if we can handle it in interrupt context. 1365 */ 1366 if (xs->error == XS_NOERROR) { 1367 splx(s); 1368 (void) scsipi_complete(xs); 1369 goto out; 1370 } 1371 1372 /* 1373 * There is an error on this xfer. Put it on the channel's 1374 * completion queue, and wake up the completion thread. 1375 */ 1376 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1377 splx(s); 1378 wakeup(&chan->chan_complete); 1379 1380 out: 1381 /* 1382 * If there are more xfers on the channel's queue, attempt to 1383 * run them. 1384 */ 1385 scsipi_run_queue(chan); 1386 } 1387 1388 /* 1389 * scsipi_complete: 1390 * 1391 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1392 * 1393 * NOTE: This routine MUST be called with valid thread context 1394 * except for the case where the following two conditions are 1395 * true: 1396 * 1397 * xs->error == XS_NOERROR 1398 * XS_CTL_ASYNC is set in xs->xs_control 1399 * 1400 * The semantics of this routine can be tricky, so here is an 1401 * explanation: 1402 * 1403 * 0 Xfer completed successfully. 1404 * 1405 * ERESTART Xfer had an error, but was restarted. 1406 * 1407 * anything else Xfer had an error, return value is Unix 1408 * errno. 1409 * 1410 * If the return value is anything but ERESTART: 1411 * 1412 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1413 * the pool. 1414 * - If there is a buf associated with the xfer, 1415 * it has been biodone()'d. 1416 */ 1417 int 1418 scsipi_complete(xs) 1419 struct scsipi_xfer *xs; 1420 { 1421 struct scsipi_periph *periph = xs->xs_periph; 1422 struct scsipi_channel *chan = periph->periph_channel; 1423 struct buf *bp; 1424 int error, s; 1425 1426 #ifdef DIAGNOSTIC 1427 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1428 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1429 #endif 1430 /* 1431 * If command terminated with a CHECK CONDITION, we need to issue a 1432 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1433 * we'll have the real status. 1434 * Must be processed at splbio() to avoid missing a SCSI bus reset 1435 * for this command. 1436 */ 1437 s = splbio(); 1438 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1439 /* request sense for a request sense ? */ 1440 if (xs->xs_control & XS_CTL_REQSENSE) { 1441 scsipi_printaddr(periph); 1442 printf("request sense for a request sense ?\n"); 1443 /* XXX maybe we should reset the device ? */ 1444 /* we've been frozen because xs->error != XS_NOERROR */ 1445 scsipi_periph_thaw(periph, 1); 1446 splx(s); 1447 if (xs->resid < xs->datalen) { 1448 printf("we read %d bytes of sense anyway:\n", 1449 xs->datalen - xs->resid); 1450 #ifdef SCSIVERBOSE 1451 scsipi_print_sense_data((void *)xs->data, 0); 1452 #endif 1453 } 1454 return EINVAL; 1455 } 1456 scsipi_request_sense(xs); 1457 } 1458 splx(s); 1459 1460 /* 1461 * If it's a user level request, bypass all usual completion 1462 * processing, let the user work it out.. 1463 */ 1464 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1465 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1466 if (xs->error != XS_NOERROR) 1467 scsipi_periph_thaw(periph, 1); 1468 scsipi_user_done(xs); 1469 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1470 return 0; 1471 } 1472 1473 switch (xs->error) { 1474 case XS_NOERROR: 1475 error = 0; 1476 break; 1477 1478 case XS_SENSE: 1479 case XS_SHORTSENSE: 1480 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1481 break; 1482 1483 case XS_RESOURCE_SHORTAGE: 1484 /* 1485 * XXX Should freeze channel's queue. 1486 */ 1487 scsipi_printaddr(periph); 1488 printf("adapter resource shortage\n"); 1489 /* FALLTHROUGH */ 1490 1491 case XS_BUSY: 1492 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1493 struct scsipi_max_openings mo; 1494 1495 /* 1496 * We set the openings to active - 1, assuming that 1497 * the command that got us here is the first one that 1498 * can't fit into the device's queue. If that's not 1499 * the case, I guess we'll find out soon enough. 1500 */ 1501 mo.mo_target = periph->periph_target; 1502 mo.mo_lun = periph->periph_lun; 1503 if (periph->periph_active < periph->periph_openings) 1504 mo.mo_openings = periph->periph_active - 1; 1505 else 1506 mo.mo_openings = periph->periph_openings - 1; 1507 #ifdef DIAGNOSTIC 1508 if (mo.mo_openings < 0) { 1509 scsipi_printaddr(periph); 1510 printf("QUEUE FULL resulted in < 0 openings\n"); 1511 panic("scsipi_done"); 1512 } 1513 #endif 1514 if (mo.mo_openings == 0) { 1515 scsipi_printaddr(periph); 1516 printf("QUEUE FULL resulted in 0 openings\n"); 1517 mo.mo_openings = 1; 1518 } 1519 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1520 error = ERESTART; 1521 } else if (xs->xs_retries != 0) { 1522 xs->xs_retries--; 1523 /* 1524 * Wait one second, and try again. 1525 */ 1526 if ((xs->xs_control & XS_CTL_POLL) || 1527 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1528 delay(1000000); 1529 } else if (!callout_active(&periph->periph_callout)) { 1530 scsipi_periph_freeze(periph, 1); 1531 callout_reset(&periph->periph_callout, 1532 hz, scsipi_periph_timed_thaw, periph); 1533 } 1534 error = ERESTART; 1535 } else 1536 error = EBUSY; 1537 break; 1538 1539 case XS_REQUEUE: 1540 error = ERESTART; 1541 break; 1542 1543 case XS_TIMEOUT: 1544 if (xs->xs_retries != 0) { 1545 xs->xs_retries--; 1546 error = ERESTART; 1547 } else 1548 error = EIO; 1549 break; 1550 1551 case XS_SELTIMEOUT: 1552 /* XXX Disable device? */ 1553 error = EIO; 1554 break; 1555 1556 case XS_RESET: 1557 if (xs->xs_control & XS_CTL_REQSENSE) { 1558 /* 1559 * request sense interrupted by reset: signal it 1560 * with EINTR return code. 1561 */ 1562 error = EINTR; 1563 } else { 1564 if (xs->xs_retries != 0) { 1565 xs->xs_retries--; 1566 error = ERESTART; 1567 } else 1568 error = EIO; 1569 } 1570 break; 1571 1572 case XS_DRIVER_STUFFUP: 1573 scsipi_printaddr(periph); 1574 printf("generic HBA error\n"); 1575 error = EIO; 1576 break; 1577 default: 1578 scsipi_printaddr(periph); 1579 printf("invalid return code from adapter: %d\n", xs->error); 1580 error = EIO; 1581 break; 1582 } 1583 1584 s = splbio(); 1585 if (error == ERESTART) { 1586 /* 1587 * If we get here, the periph has been thawed and frozen 1588 * again if we had to issue recovery commands. Alternatively, 1589 * it may have been frozen again and in a timed thaw. In 1590 * any case, we thaw the periph once we re-enqueue the 1591 * command. Once the periph is fully thawed, it will begin 1592 * operation again. 1593 */ 1594 xs->error = XS_NOERROR; 1595 xs->status = SCSI_OK; 1596 xs->xs_status &= ~XS_STS_DONE; 1597 xs->xs_requeuecnt++; 1598 error = scsipi_enqueue(xs); 1599 if (error == 0) { 1600 scsipi_periph_thaw(periph, 1); 1601 splx(s); 1602 return (ERESTART); 1603 } 1604 } 1605 1606 /* 1607 * scsipi_done() freezes the queue if not XS_NOERROR. 1608 * Thaw it here. 1609 */ 1610 if (xs->error != XS_NOERROR) 1611 scsipi_periph_thaw(periph, 1); 1612 1613 /* 1614 * Set buffer fields in case the periph 1615 * switch done func uses them 1616 */ 1617 if ((bp = xs->bp) != NULL) { 1618 if (error) { 1619 bp->b_error = error; 1620 bp->b_flags |= B_ERROR; 1621 bp->b_resid = bp->b_bcount; 1622 } else { 1623 bp->b_error = 0; 1624 bp->b_resid = xs->resid; 1625 } 1626 } 1627 1628 if (periph->periph_switch->psw_done) 1629 periph->periph_switch->psw_done(xs); 1630 1631 if (bp) 1632 biodone(bp); 1633 1634 if (xs->xs_control & XS_CTL_ASYNC) 1635 scsipi_put_xs(xs); 1636 splx(s); 1637 1638 return (error); 1639 } 1640 1641 /* 1642 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1643 * returns with a CHECK_CONDITION status. Must be called in valid thread 1644 * context and at splbio(). 1645 */ 1646 1647 void 1648 scsipi_request_sense(xs) 1649 struct scsipi_xfer *xs; 1650 { 1651 struct scsipi_periph *periph = xs->xs_periph; 1652 int flags, error; 1653 struct scsipi_sense cmd; 1654 1655 periph->periph_flags |= PERIPH_SENSE; 1656 1657 /* if command was polling, request sense will too */ 1658 flags = xs->xs_control & XS_CTL_POLL; 1659 /* Polling commands can't sleep */ 1660 if (flags) 1661 flags |= XS_CTL_NOSLEEP; 1662 1663 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1664 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1665 1666 memset(&cmd, 0, sizeof(cmd)); 1667 cmd.opcode = REQUEST_SENSE; 1668 cmd.length = sizeof(struct scsipi_sense_data); 1669 1670 error = scsipi_command(periph, 1671 (struct scsipi_generic *) &cmd, sizeof(cmd), 1672 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data), 1673 0, 1000, NULL, flags); 1674 periph->periph_flags &= ~PERIPH_SENSE; 1675 periph->periph_xscheck = NULL; 1676 switch(error) { 1677 case 0: 1678 /* we have a valid sense */ 1679 xs->error = XS_SENSE; 1680 return; 1681 case EINTR: 1682 /* REQUEST_SENSE interrupted by bus reset. */ 1683 xs->error = XS_RESET; 1684 return; 1685 case EIO: 1686 /* request sense coudn't be performed */ 1687 /* 1688 * XXX this isn't quite rigth but we don't have anything 1689 * better for now 1690 */ 1691 xs->error = XS_DRIVER_STUFFUP; 1692 return; 1693 default: 1694 /* Notify that request sense failed. */ 1695 xs->error = XS_DRIVER_STUFFUP; 1696 scsipi_printaddr(periph); 1697 printf("request sense failed with error %d\n", error); 1698 return; 1699 } 1700 } 1701 1702 /* 1703 * scsipi_enqueue: 1704 * 1705 * Enqueue an xfer on a channel. 1706 */ 1707 int 1708 scsipi_enqueue(xs) 1709 struct scsipi_xfer *xs; 1710 { 1711 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1712 struct scsipi_xfer *qxs; 1713 int s; 1714 1715 s = splbio(); 1716 1717 /* 1718 * If the xfer is to be polled, and there are already jobs on 1719 * the queue, we can't proceed. 1720 */ 1721 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1722 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1723 splx(s); 1724 xs->error = XS_DRIVER_STUFFUP; 1725 return (EAGAIN); 1726 } 1727 1728 /* 1729 * If we have an URGENT xfer, it's an error recovery command 1730 * and it should just go on the head of the channel's queue. 1731 */ 1732 if (xs->xs_control & XS_CTL_URGENT) { 1733 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1734 goto out; 1735 } 1736 1737 /* 1738 * If this xfer has already been on the queue before, we 1739 * need to reinsert it in the correct order. That order is: 1740 * 1741 * Immediately before the first xfer for this periph 1742 * with a requeuecnt less than xs->xs_requeuecnt. 1743 * 1744 * Failing that, at the end of the queue. (We'll end up 1745 * there naturally.) 1746 */ 1747 if (xs->xs_requeuecnt != 0) { 1748 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1749 qxs = TAILQ_NEXT(qxs, channel_q)) { 1750 if (qxs->xs_periph == xs->xs_periph && 1751 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1752 break; 1753 } 1754 if (qxs != NULL) { 1755 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1756 channel_q); 1757 goto out; 1758 } 1759 } 1760 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1761 out: 1762 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1763 scsipi_periph_thaw(xs->xs_periph, 1); 1764 splx(s); 1765 return (0); 1766 } 1767 1768 /* 1769 * scsipi_run_queue: 1770 * 1771 * Start as many xfers as possible running on the channel. 1772 */ 1773 void 1774 scsipi_run_queue(chan) 1775 struct scsipi_channel *chan; 1776 { 1777 struct scsipi_xfer *xs; 1778 struct scsipi_periph *periph; 1779 int s; 1780 1781 for (;;) { 1782 s = splbio(); 1783 1784 /* 1785 * If the channel is frozen, we can't do any work right 1786 * now. 1787 */ 1788 if (chan->chan_qfreeze != 0) { 1789 splx(s); 1790 return; 1791 } 1792 1793 /* 1794 * Look for work to do, and make sure we can do it. 1795 */ 1796 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1797 xs = TAILQ_NEXT(xs, channel_q)) { 1798 periph = xs->xs_periph; 1799 1800 if ((periph->periph_sent >= periph->periph_openings) || 1801 periph->periph_qfreeze != 0 || 1802 (periph->periph_flags & PERIPH_UNTAG) != 0) 1803 continue; 1804 1805 if ((periph->periph_flags & 1806 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1807 (xs->xs_control & XS_CTL_URGENT) == 0) 1808 continue; 1809 1810 /* 1811 * We can issue this xfer! 1812 */ 1813 goto got_one; 1814 } 1815 1816 /* 1817 * Can't find any work to do right now. 1818 */ 1819 splx(s); 1820 return; 1821 1822 got_one: 1823 /* 1824 * Have an xfer to run. Allocate a resource from 1825 * the adapter to run it. If we can't allocate that 1826 * resource, we don't dequeue the xfer. 1827 */ 1828 if (scsipi_get_resource(chan) == 0) { 1829 /* 1830 * Adapter is out of resources. If the adapter 1831 * supports it, attempt to grow them. 1832 */ 1833 if (scsipi_grow_resources(chan) == 0) { 1834 /* 1835 * Wasn't able to grow resources, 1836 * nothing more we can do. 1837 */ 1838 if (xs->xs_control & XS_CTL_POLL) { 1839 scsipi_printaddr(xs->xs_periph); 1840 printf("polling command but no " 1841 "adapter resources"); 1842 /* We'll panic shortly... */ 1843 } 1844 splx(s); 1845 1846 /* 1847 * XXX: We should be able to note that 1848 * XXX: that resources are needed here! 1849 */ 1850 return; 1851 } 1852 /* 1853 * scsipi_grow_resources() allocated the resource 1854 * for us. 1855 */ 1856 } 1857 1858 /* 1859 * We have a resource to run this xfer, do it! 1860 */ 1861 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1862 1863 /* 1864 * If the command is to be tagged, allocate a tag ID 1865 * for it. 1866 */ 1867 if (XS_CTL_TAGTYPE(xs) != 0) 1868 scsipi_get_tag(xs); 1869 else 1870 periph->periph_flags |= PERIPH_UNTAG; 1871 periph->periph_sent++; 1872 splx(s); 1873 1874 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1875 } 1876 #ifdef DIAGNOSTIC 1877 panic("scsipi_run_queue: impossible"); 1878 #endif 1879 } 1880 1881 /* 1882 * scsipi_execute_xs: 1883 * 1884 * Begin execution of an xfer, waiting for it to complete, if necessary. 1885 */ 1886 int 1887 scsipi_execute_xs(xs) 1888 struct scsipi_xfer *xs; 1889 { 1890 struct scsipi_periph *periph = xs->xs_periph; 1891 struct scsipi_channel *chan = periph->periph_channel; 1892 int oasync, async, poll, retries, error, s; 1893 1894 xs->xs_status &= ~XS_STS_DONE; 1895 xs->error = XS_NOERROR; 1896 xs->resid = xs->datalen; 1897 xs->status = SCSI_OK; 1898 1899 #ifdef SCSIPI_DEBUG 1900 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1901 printf("scsipi_execute_xs: "); 1902 show_scsipi_xs(xs); 1903 printf("\n"); 1904 } 1905 #endif 1906 1907 /* 1908 * Deal with command tagging: 1909 * 1910 * - If the device's current operating mode doesn't 1911 * include tagged queueing, clear the tag mask. 1912 * 1913 * - If the device's current operating mode *does* 1914 * include tagged queueing, set the tag_type in 1915 * the xfer to the appropriate byte for the tag 1916 * message. 1917 */ 1918 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 1919 (xs->xs_control & XS_CTL_REQSENSE)) { 1920 xs->xs_control &= ~XS_CTL_TAGMASK; 1921 xs->xs_tag_type = 0; 1922 } else { 1923 /* 1924 * If the request doesn't specify a tag, give Head 1925 * tags to URGENT operations and Ordered tags to 1926 * everything else. 1927 */ 1928 if (XS_CTL_TAGTYPE(xs) == 0) { 1929 if (xs->xs_control & XS_CTL_URGENT) 1930 xs->xs_control |= XS_CTL_HEAD_TAG; 1931 else 1932 xs->xs_control |= XS_CTL_ORDERED_TAG; 1933 } 1934 1935 switch (XS_CTL_TAGTYPE(xs)) { 1936 case XS_CTL_ORDERED_TAG: 1937 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 1938 break; 1939 1940 case XS_CTL_SIMPLE_TAG: 1941 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 1942 break; 1943 1944 case XS_CTL_HEAD_TAG: 1945 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 1946 break; 1947 1948 default: 1949 scsipi_printaddr(periph); 1950 printf("invalid tag mask 0x%08x\n", 1951 XS_CTL_TAGTYPE(xs)); 1952 panic("scsipi_execute_xs"); 1953 } 1954 } 1955 1956 /* If the adaptor wants us to poll, poll. */ 1957 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 1958 xs->xs_control |= XS_CTL_POLL; 1959 1960 /* 1961 * If we don't yet have a completion thread, or we are to poll for 1962 * completion, clear the ASYNC flag. 1963 */ 1964 oasync = (xs->xs_control & XS_CTL_ASYNC); 1965 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 1966 xs->xs_control &= ~XS_CTL_ASYNC; 1967 1968 async = (xs->xs_control & XS_CTL_ASYNC); 1969 poll = (xs->xs_control & XS_CTL_POLL); 1970 retries = xs->xs_retries; /* for polling commands */ 1971 1972 #ifdef DIAGNOSTIC 1973 if (oasync != 0 && xs->bp == NULL) 1974 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 1975 #endif 1976 1977 /* 1978 * Enqueue the transfer. If we're not polling for completion, this 1979 * should ALWAYS return `no error'. 1980 */ 1981 try_again: 1982 error = scsipi_enqueue(xs); 1983 if (error) { 1984 if (poll == 0) { 1985 scsipi_printaddr(periph); 1986 printf("not polling, but enqueue failed with %d\n", 1987 error); 1988 panic("scsipi_execute_xs"); 1989 } 1990 1991 scsipi_printaddr(periph); 1992 printf("failed to enqueue polling command"); 1993 if (retries != 0) { 1994 printf(", retrying...\n"); 1995 delay(1000000); 1996 retries--; 1997 goto try_again; 1998 } 1999 printf("\n"); 2000 goto free_xs; 2001 } 2002 2003 restarted: 2004 scsipi_run_queue(chan); 2005 2006 /* 2007 * The xfer is enqueued, and possibly running. If it's to be 2008 * completed asynchronously, just return now. 2009 */ 2010 if (async) 2011 return (EJUSTRETURN); 2012 2013 /* 2014 * Not an asynchronous command; wait for it to complete. 2015 */ 2016 s = splbio(); 2017 while ((xs->xs_status & XS_STS_DONE) == 0) { 2018 if (poll) { 2019 scsipi_printaddr(periph); 2020 printf("polling command not done\n"); 2021 panic("scsipi_execute_xs"); 2022 } 2023 (void) tsleep(xs, PRIBIO, "xscmd", 0); 2024 } 2025 splx(s); 2026 2027 /* 2028 * Command is complete. scsipi_done() has awakened us to perform 2029 * the error handling. 2030 */ 2031 error = scsipi_complete(xs); 2032 if (error == ERESTART) 2033 goto restarted; 2034 2035 /* 2036 * If it was meant to run async and we cleared aync ourselve, 2037 * don't return an error here. It has already been handled 2038 */ 2039 if (oasync) 2040 error = EJUSTRETURN; 2041 /* 2042 * Command completed successfully or fatal error occurred. Fall 2043 * into.... 2044 */ 2045 free_xs: 2046 s = splbio(); 2047 scsipi_put_xs(xs); 2048 splx(s); 2049 2050 /* 2051 * Kick the queue, keep it running in case it stopped for some 2052 * reason. 2053 */ 2054 scsipi_run_queue(chan); 2055 2056 return (error); 2057 } 2058 2059 /* 2060 * scsipi_completion_thread: 2061 * 2062 * This is the completion thread. We wait for errors on 2063 * asynchronous xfers, and perform the error handling 2064 * function, restarting the command, if necessary. 2065 */ 2066 void 2067 scsipi_completion_thread(arg) 2068 void *arg; 2069 { 2070 struct scsipi_channel *chan = arg; 2071 struct scsipi_xfer *xs; 2072 int s; 2073 2074 s = splbio(); 2075 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 2076 splx(s); 2077 for (;;) { 2078 s = splbio(); 2079 xs = TAILQ_FIRST(&chan->chan_complete); 2080 if (xs == NULL && chan->chan_tflags == 0) { 2081 /* nothing to do; wait */ 2082 (void) tsleep(&chan->chan_complete, PRIBIO, 2083 "sccomp", 0); 2084 splx(s); 2085 continue; 2086 } 2087 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2088 /* call chan_callback from thread context */ 2089 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 2090 chan->chan_callback(chan, chan->chan_callback_arg); 2091 splx(s); 2092 continue; 2093 } 2094 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 2095 /* attempt to get more openings for this channel */ 2096 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2097 scsipi_adapter_request(chan, 2098 ADAPTER_REQ_GROW_RESOURCES, NULL); 2099 scsipi_channel_thaw(chan, 1); 2100 splx(s); 2101 continue; 2102 } 2103 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2104 /* explicitly run the queues for this channel */ 2105 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2106 scsipi_run_queue(chan); 2107 splx(s); 2108 continue; 2109 } 2110 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2111 splx(s); 2112 break; 2113 } 2114 if (xs) { 2115 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2116 splx(s); 2117 2118 /* 2119 * Have an xfer with an error; process it. 2120 */ 2121 (void) scsipi_complete(xs); 2122 2123 /* 2124 * Kick the queue; keep it running if it was stopped 2125 * for some reason. 2126 */ 2127 scsipi_run_queue(chan); 2128 } else { 2129 splx(s); 2130 } 2131 } 2132 2133 chan->chan_thread = NULL; 2134 2135 /* In case parent is waiting for us to exit. */ 2136 wakeup(&chan->chan_thread); 2137 2138 kthread_exit(0); 2139 } 2140 2141 /* 2142 * scsipi_create_completion_thread: 2143 * 2144 * Callback to actually create the completion thread. 2145 */ 2146 void 2147 scsipi_create_completion_thread(arg) 2148 void *arg; 2149 { 2150 struct scsipi_channel *chan = arg; 2151 struct scsipi_adapter *adapt = chan->chan_adapter; 2152 2153 if (kthread_create1(scsipi_completion_thread, chan, 2154 &chan->chan_thread, "%s", chan->chan_name)) { 2155 printf("%s: unable to create completion thread for " 2156 "channel %d\n", adapt->adapt_dev->dv_xname, 2157 chan->chan_channel); 2158 panic("scsipi_create_completion_thread"); 2159 } 2160 } 2161 2162 /* 2163 * scsipi_thread_call_callback: 2164 * 2165 * request to call a callback from the completion thread 2166 */ 2167 int 2168 scsipi_thread_call_callback(chan, callback, arg) 2169 struct scsipi_channel *chan; 2170 void (*callback) __P((struct scsipi_channel *, void *)); 2171 void *arg; 2172 { 2173 int s; 2174 2175 s = splbio(); 2176 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2177 /* kernel thread doesn't exist yet */ 2178 splx(s); 2179 return ESRCH; 2180 } 2181 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2182 splx(s); 2183 return EBUSY; 2184 } 2185 scsipi_channel_freeze(chan, 1); 2186 chan->chan_callback = callback; 2187 chan->chan_callback_arg = arg; 2188 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2189 wakeup(&chan->chan_complete); 2190 splx(s); 2191 return(0); 2192 } 2193 2194 /* 2195 * scsipi_async_event: 2196 * 2197 * Handle an asynchronous event from an adapter. 2198 */ 2199 void 2200 scsipi_async_event(chan, event, arg) 2201 struct scsipi_channel *chan; 2202 scsipi_async_event_t event; 2203 void *arg; 2204 { 2205 int s; 2206 2207 s = splbio(); 2208 switch (event) { 2209 case ASYNC_EVENT_MAX_OPENINGS: 2210 scsipi_async_event_max_openings(chan, 2211 (struct scsipi_max_openings *)arg); 2212 break; 2213 2214 case ASYNC_EVENT_XFER_MODE: 2215 scsipi_async_event_xfer_mode(chan, 2216 (struct scsipi_xfer_mode *)arg); 2217 break; 2218 case ASYNC_EVENT_RESET: 2219 scsipi_async_event_channel_reset(chan); 2220 break; 2221 } 2222 splx(s); 2223 } 2224 2225 /* 2226 * scsipi_print_xfer_mode: 2227 * 2228 * Print a periph's capabilities. 2229 */ 2230 void 2231 scsipi_print_xfer_mode(periph) 2232 struct scsipi_periph *periph; 2233 { 2234 int period, freq, speed, mbs; 2235 2236 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0) 2237 return; 2238 2239 printf("%s: ", periph->periph_dev->dv_xname); 2240 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2241 period = scsipi_sync_factor_to_period(periph->periph_period); 2242 printf("sync (%d.%dns offset %d)", 2243 period / 10, period % 10, periph->periph_offset); 2244 } else 2245 printf("async"); 2246 2247 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2248 printf(", 32-bit"); 2249 else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2250 printf(", 16-bit"); 2251 else 2252 printf(", 8-bit"); 2253 2254 if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) { 2255 freq = scsipi_sync_factor_to_freq(periph->periph_period); 2256 speed = freq; 2257 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2258 speed *= 4; 2259 else if (periph->periph_mode & 2260 (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) 2261 speed *= 2; 2262 mbs = speed / 1000; 2263 if (mbs > 0) 2264 printf(" (%d.%03dMB/s)", mbs, speed % 1000); 2265 else 2266 printf(" (%dKB/s)", speed % 1000); 2267 } 2268 2269 printf(" transfers"); 2270 2271 if (periph->periph_mode & PERIPH_CAP_TQING) 2272 printf(", tagged queueing"); 2273 2274 printf("\n"); 2275 } 2276 2277 /* 2278 * scsipi_async_event_max_openings: 2279 * 2280 * Update the maximum number of outstanding commands a 2281 * device may have. 2282 */ 2283 void 2284 scsipi_async_event_max_openings(chan, mo) 2285 struct scsipi_channel *chan; 2286 struct scsipi_max_openings *mo; 2287 { 2288 struct scsipi_periph *periph; 2289 int minlun, maxlun; 2290 2291 if (mo->mo_lun == -1) { 2292 /* 2293 * Wildcarded; apply it to all LUNs. 2294 */ 2295 minlun = 0; 2296 maxlun = chan->chan_nluns - 1; 2297 } else 2298 minlun = maxlun = mo->mo_lun; 2299 2300 /* XXX This could really suck with a large LUN space. */ 2301 for (; minlun <= maxlun; minlun++) { 2302 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun); 2303 if (periph == NULL) 2304 continue; 2305 2306 if (mo->mo_openings < periph->periph_openings) 2307 periph->periph_openings = mo->mo_openings; 2308 else if (mo->mo_openings > periph->periph_openings && 2309 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2310 periph->periph_openings = mo->mo_openings; 2311 } 2312 } 2313 2314 /* 2315 * scsipi_async_event_xfer_mode: 2316 * 2317 * Update the xfer mode for all periphs sharing the 2318 * specified I_T Nexus. 2319 */ 2320 void 2321 scsipi_async_event_xfer_mode(chan, xm) 2322 struct scsipi_channel *chan; 2323 struct scsipi_xfer_mode *xm; 2324 { 2325 struct scsipi_periph *periph; 2326 int lun, announce, mode, period, offset; 2327 2328 for (lun = 0; lun < chan->chan_nluns; lun++) { 2329 periph = scsipi_lookup_periph(chan, xm->xm_target, lun); 2330 if (periph == NULL) 2331 continue; 2332 announce = 0; 2333 2334 /* 2335 * Clamp the xfer mode down to this periph's capabilities. 2336 */ 2337 mode = xm->xm_mode & periph->periph_cap; 2338 if (mode & PERIPH_CAP_SYNC) { 2339 period = xm->xm_period; 2340 offset = xm->xm_offset; 2341 } else { 2342 period = 0; 2343 offset = 0; 2344 } 2345 2346 /* 2347 * If we do not have a valid xfer mode yet, or the parameters 2348 * are different, announce them. 2349 */ 2350 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 || 2351 periph->periph_mode != mode || 2352 periph->periph_period != period || 2353 periph->periph_offset != offset) 2354 announce = 1; 2355 2356 periph->periph_mode = mode; 2357 periph->periph_period = period; 2358 periph->periph_offset = offset; 2359 periph->periph_flags |= PERIPH_MODE_VALID; 2360 2361 if (announce) 2362 scsipi_print_xfer_mode(periph); 2363 } 2364 } 2365 2366 /* 2367 * scsipi_set_xfer_mode: 2368 * 2369 * Set the xfer mode for the specified I_T Nexus. 2370 */ 2371 void 2372 scsipi_set_xfer_mode(chan, target, immed) 2373 struct scsipi_channel *chan; 2374 int target, immed; 2375 { 2376 struct scsipi_xfer_mode xm; 2377 struct scsipi_periph *itperiph; 2378 int lun, s; 2379 2380 /* 2381 * Go to the minimal xfer mode. 2382 */ 2383 xm.xm_target = target; 2384 xm.xm_mode = 0; 2385 xm.xm_period = 0; /* ignored */ 2386 xm.xm_offset = 0; /* ignored */ 2387 2388 /* 2389 * Find the first LUN we know about on this I_T Nexus. 2390 */ 2391 for (lun = 0; lun < chan->chan_nluns; lun++) { 2392 itperiph = scsipi_lookup_periph(chan, target, lun); 2393 if (itperiph != NULL) 2394 break; 2395 } 2396 if (itperiph != NULL) { 2397 xm.xm_mode = itperiph->periph_cap; 2398 /* 2399 * Now issue the request to the adapter. 2400 */ 2401 s = splbio(); 2402 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2403 splx(s); 2404 /* 2405 * If we want this to happen immediately, issue a dummy 2406 * command, since most adapters can't really negotiate unless 2407 * they're executing a job. 2408 */ 2409 if (immed != 0) { 2410 (void) scsipi_test_unit_ready(itperiph, 2411 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2412 XS_CTL_IGNORE_NOT_READY | 2413 XS_CTL_IGNORE_MEDIA_CHANGE); 2414 } 2415 } 2416 } 2417 2418 /* 2419 * scsipi_channel_reset: 2420 * 2421 * handle scsi bus reset 2422 * called at splbio 2423 */ 2424 void 2425 scsipi_async_event_channel_reset(chan) 2426 struct scsipi_channel *chan; 2427 { 2428 struct scsipi_xfer *xs, *xs_next; 2429 struct scsipi_periph *periph; 2430 int target, lun; 2431 2432 /* 2433 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2434 * commands; as the sense is not available any more. 2435 * can't call scsipi_done() from here, as the command has not been 2436 * sent to the adapter yet (this would corrupt accounting). 2437 */ 2438 2439 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2440 xs_next = TAILQ_NEXT(xs, channel_q); 2441 if (xs->xs_control & XS_CTL_REQSENSE) { 2442 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2443 xs->error = XS_RESET; 2444 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2445 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2446 channel_q); 2447 } 2448 } 2449 wakeup(&chan->chan_complete); 2450 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2451 for (target = 0; target < chan->chan_ntargets; target++) { 2452 if (target == chan->chan_id) 2453 continue; 2454 for (lun = 0; lun < chan->chan_nluns; lun++) { 2455 scsipi_lookup_periph(chan, target, lun); 2456 if (periph) { 2457 xs = periph->periph_xscheck; 2458 if (xs) 2459 xs->error = XS_RESET; 2460 } 2461 } 2462 } 2463 } 2464 2465 /* 2466 * scsipi_target_detach: 2467 * 2468 * detach all periph associated with a I_T 2469 * must be called from valid thread context 2470 */ 2471 int 2472 scsipi_target_detach(chan, target, lun, flags) 2473 struct scsipi_channel *chan; 2474 int target, lun; 2475 int flags; 2476 { 2477 struct scsipi_periph *periph; 2478 int ctarget, mintarget, maxtarget; 2479 int clun, minlun, maxlun; 2480 int error; 2481 2482 if (target == -1) { 2483 mintarget = 0; 2484 maxtarget = chan->chan_ntargets; 2485 } else { 2486 if (target == chan->chan_id) 2487 return EINVAL; 2488 if (target < 0 || target >= chan->chan_ntargets) 2489 return EINVAL; 2490 mintarget = target; 2491 maxtarget = target + 1; 2492 } 2493 2494 if (lun == -1) { 2495 minlun = 0; 2496 maxlun = chan->chan_nluns; 2497 } else { 2498 if (lun < 0 || lun >= chan->chan_nluns) 2499 return EINVAL; 2500 minlun = lun; 2501 maxlun = lun + 1; 2502 } 2503 2504 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2505 if (ctarget == chan->chan_id) 2506 continue; 2507 2508 for (clun = minlun; clun < maxlun; clun++) { 2509 periph = scsipi_lookup_periph(chan, ctarget, clun); 2510 if (periph == NULL) 2511 continue; 2512 error = config_detach(periph->periph_dev, flags); 2513 if (error) 2514 return (error); 2515 scsipi_remove_periph(chan, periph); 2516 free(periph, M_DEVBUF); 2517 } 2518 } 2519 return(0); 2520 } 2521 2522 /* 2523 * scsipi_adapter_addref: 2524 * 2525 * Add a reference to the adapter pointed to by the provided 2526 * link, enabling the adapter if necessary. 2527 */ 2528 int 2529 scsipi_adapter_addref(adapt) 2530 struct scsipi_adapter *adapt; 2531 { 2532 int s, error = 0; 2533 2534 s = splbio(); 2535 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) { 2536 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1); 2537 if (error) 2538 adapt->adapt_refcnt--; 2539 } 2540 splx(s); 2541 return (error); 2542 } 2543 2544 /* 2545 * scsipi_adapter_delref: 2546 * 2547 * Delete a reference to the adapter pointed to by the provided 2548 * link, disabling the adapter if possible. 2549 */ 2550 void 2551 scsipi_adapter_delref(adapt) 2552 struct scsipi_adapter *adapt; 2553 { 2554 int s; 2555 2556 s = splbio(); 2557 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL) 2558 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0); 2559 splx(s); 2560 } 2561 2562 struct scsipi_syncparam { 2563 int ss_factor; 2564 int ss_period; /* ns * 10 */ 2565 } scsipi_syncparams[] = { 2566 { 0x09, 125 }, 2567 { 0x0a, 250 }, 2568 { 0x0b, 303 }, 2569 { 0x0c, 500 }, 2570 }; 2571 const int scsipi_nsyncparams = 2572 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2573 2574 int 2575 scsipi_sync_period_to_factor(period) 2576 int period; /* ns * 10 */ 2577 { 2578 int i; 2579 2580 for (i = 0; i < scsipi_nsyncparams; i++) { 2581 if (period <= scsipi_syncparams[i].ss_period) 2582 return (scsipi_syncparams[i].ss_factor); 2583 } 2584 2585 return ((period / 10) / 4); 2586 } 2587 2588 int 2589 scsipi_sync_factor_to_period(factor) 2590 int factor; 2591 { 2592 int i; 2593 2594 for (i = 0; i < scsipi_nsyncparams; i++) { 2595 if (factor == scsipi_syncparams[i].ss_factor) 2596 return (scsipi_syncparams[i].ss_period); 2597 } 2598 2599 return ((factor * 4) * 10); 2600 } 2601 2602 int 2603 scsipi_sync_factor_to_freq(factor) 2604 int factor; 2605 { 2606 int i; 2607 2608 for (i = 0; i < scsipi_nsyncparams; i++) { 2609 if (factor == scsipi_syncparams[i].ss_factor) 2610 return (10000000 / scsipi_syncparams[i].ss_period); 2611 } 2612 2613 return (10000000 / ((factor * 4) * 10)); 2614 } 2615 2616 #ifdef SCSIPI_DEBUG 2617 /* 2618 * Given a scsipi_xfer, dump the request, in all it's glory 2619 */ 2620 void 2621 show_scsipi_xs(xs) 2622 struct scsipi_xfer *xs; 2623 { 2624 2625 printf("xs(%p): ", xs); 2626 printf("xs_control(0x%08x)", xs->xs_control); 2627 printf("xs_status(0x%08x)", xs->xs_status); 2628 printf("periph(%p)", xs->xs_periph); 2629 printf("retr(0x%x)", xs->xs_retries); 2630 printf("timo(0x%x)", xs->timeout); 2631 printf("cmd(%p)", xs->cmd); 2632 printf("len(0x%x)", xs->cmdlen); 2633 printf("data(%p)", xs->data); 2634 printf("len(0x%x)", xs->datalen); 2635 printf("res(0x%x)", xs->resid); 2636 printf("err(0x%x)", xs->error); 2637 printf("bp(%p)", xs->bp); 2638 show_scsipi_cmd(xs); 2639 } 2640 2641 void 2642 show_scsipi_cmd(xs) 2643 struct scsipi_xfer *xs; 2644 { 2645 u_char *b = (u_char *) xs->cmd; 2646 int i = 0; 2647 2648 scsipi_printaddr(xs->xs_periph); 2649 printf(" command: "); 2650 2651 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2652 while (i < xs->cmdlen) { 2653 if (i) 2654 printf(","); 2655 printf("0x%x", b[i++]); 2656 } 2657 printf("-[%d bytes]\n", xs->datalen); 2658 if (xs->datalen) 2659 show_mem(xs->data, min(64, xs->datalen)); 2660 } else 2661 printf("-RESET-\n"); 2662 } 2663 2664 void 2665 show_mem(address, num) 2666 u_char *address; 2667 int num; 2668 { 2669 int x; 2670 2671 printf("------------------------------"); 2672 for (x = 0; x < num; x++) { 2673 if ((x % 16) == 0) 2674 printf("\n%03d: ", x); 2675 printf("%02x ", *address++); 2676 } 2677 printf("\n------------------------------\n"); 2678 } 2679 #endif /* SCSIPI_DEBUG */ 2680