1 /* $NetBSD: scsipi_base.c,v 1.71 2002/04/01 20:37:42 bouyer Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.71 2002/04/01 20:37:42 bouyer Exp $"); 42 43 #include "opt_scsi.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/buf.h> 49 #include <sys/uio.h> 50 #include <sys/malloc.h> 51 #include <sys/pool.h> 52 #include <sys/errno.h> 53 #include <sys/device.h> 54 #include <sys/proc.h> 55 #include <sys/kthread.h> 56 57 #include <dev/scsipi/scsipi_all.h> 58 #include <dev/scsipi/scsipi_disk.h> 59 #include <dev/scsipi/scsipiconf.h> 60 #include <dev/scsipi/scsipi_base.h> 61 62 #include <dev/scsipi/scsi_all.h> 63 #include <dev/scsipi/scsi_message.h> 64 65 int scsipi_complete __P((struct scsipi_xfer *)); 66 void scsipi_request_sense __P((struct scsipi_xfer *)); 67 int scsipi_enqueue __P((struct scsipi_xfer *)); 68 void scsipi_run_queue __P((struct scsipi_channel *chan)); 69 70 void scsipi_completion_thread __P((void *)); 71 72 void scsipi_get_tag __P((struct scsipi_xfer *)); 73 void scsipi_put_tag __P((struct scsipi_xfer *)); 74 75 int scsipi_get_resource __P((struct scsipi_channel *)); 76 void scsipi_put_resource __P((struct scsipi_channel *)); 77 __inline int scsipi_grow_resources __P((struct scsipi_channel *)); 78 79 void scsipi_async_event_max_openings __P((struct scsipi_channel *, 80 struct scsipi_max_openings *)); 81 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *, 82 struct scsipi_xfer_mode *)); 83 void scsipi_async_event_channel_reset __P((struct scsipi_channel *)); 84 85 struct pool scsipi_xfer_pool; 86 87 /* 88 * scsipi_init: 89 * 90 * Called when a scsibus or atapibus is attached to the system 91 * to initialize shared data structures. 92 */ 93 void 94 scsipi_init() 95 { 96 static int scsipi_init_done; 97 98 if (scsipi_init_done) 99 return; 100 scsipi_init_done = 1; 101 102 /* Initialize the scsipi_xfer pool. */ 103 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 104 0, 0, "scxspl", NULL); 105 } 106 107 /* 108 * scsipi_channel_init: 109 * 110 * Initialize a scsipi_channel when it is attached. 111 */ 112 int 113 scsipi_channel_init(chan) 114 struct scsipi_channel *chan; 115 { 116 size_t nbytes; 117 int i; 118 119 /* Initialize shared data. */ 120 scsipi_init(); 121 122 /* Initialize the queues. */ 123 TAILQ_INIT(&chan->chan_queue); 124 TAILQ_INIT(&chan->chan_complete); 125 126 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **); 127 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT); 128 if (chan->chan_periphs == NULL) 129 return (ENOMEM); 130 131 132 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *); 133 for (i = 0; i < chan->chan_ntargets; i++) { 134 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, 135 M_NOWAIT|M_ZERO); 136 if (chan->chan_periphs[i] == NULL) { 137 while (--i >= 0) { 138 free(chan->chan_periphs[i], M_DEVBUF); 139 } 140 return (ENOMEM); 141 } 142 } 143 144 /* 145 * Create the asynchronous completion thread. 146 */ 147 kthread_create(scsipi_create_completion_thread, chan); 148 return (0); 149 } 150 151 /* 152 * scsipi_channel_shutdown: 153 * 154 * Shutdown a scsipi_channel. 155 */ 156 void 157 scsipi_channel_shutdown(chan) 158 struct scsipi_channel *chan; 159 { 160 161 /* 162 * Shut down the completion thread. 163 */ 164 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 165 wakeup(&chan->chan_complete); 166 167 /* 168 * Now wait for the thread to exit. 169 */ 170 while (chan->chan_thread != NULL) 171 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0); 172 } 173 174 /* 175 * scsipi_insert_periph: 176 * 177 * Insert a periph into the channel. 178 */ 179 void 180 scsipi_insert_periph(chan, periph) 181 struct scsipi_channel *chan; 182 struct scsipi_periph *periph; 183 { 184 int s; 185 186 s = splbio(); 187 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph; 188 splx(s); 189 } 190 191 /* 192 * scsipi_remove_periph: 193 * 194 * Remove a periph from the channel. 195 */ 196 void 197 scsipi_remove_periph(chan, periph) 198 struct scsipi_channel *chan; 199 struct scsipi_periph *periph; 200 { 201 int s; 202 203 s = splbio(); 204 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL; 205 splx(s); 206 } 207 208 /* 209 * scsipi_lookup_periph: 210 * 211 * Lookup a periph on the specified channel. 212 */ 213 struct scsipi_periph * 214 scsipi_lookup_periph(chan, target, lun) 215 struct scsipi_channel *chan; 216 int target, lun; 217 { 218 struct scsipi_periph *periph; 219 int s; 220 221 if (target >= chan->chan_ntargets || 222 lun >= chan->chan_nluns) 223 return (NULL); 224 225 s = splbio(); 226 periph = chan->chan_periphs[target][lun]; 227 splx(s); 228 229 return (periph); 230 } 231 232 /* 233 * scsipi_get_resource: 234 * 235 * Allocate a single xfer `resource' from the channel. 236 * 237 * NOTE: Must be called at splbio(). 238 */ 239 int 240 scsipi_get_resource(chan) 241 struct scsipi_channel *chan; 242 { 243 struct scsipi_adapter *adapt = chan->chan_adapter; 244 245 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 246 if (chan->chan_openings > 0) { 247 chan->chan_openings--; 248 return (1); 249 } 250 return (0); 251 } 252 253 if (adapt->adapt_openings > 0) { 254 adapt->adapt_openings--; 255 return (1); 256 } 257 return (0); 258 } 259 260 /* 261 * scsipi_grow_resources: 262 * 263 * Attempt to grow resources for a channel. If this succeeds, 264 * we allocate one for our caller. 265 * 266 * NOTE: Must be called at splbio(). 267 */ 268 __inline int 269 scsipi_grow_resources(chan) 270 struct scsipi_channel *chan; 271 { 272 273 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 274 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 275 scsipi_adapter_request(chan, 276 ADAPTER_REQ_GROW_RESOURCES, NULL); 277 return (scsipi_get_resource(chan)); 278 } 279 /* 280 * ask the channel thread to do it. It'll have to thaw the 281 * queue 282 */ 283 scsipi_channel_freeze(chan, 1); 284 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 285 wakeup(&chan->chan_complete); 286 return (0); 287 } 288 289 return (0); 290 } 291 292 /* 293 * scsipi_put_resource: 294 * 295 * Free a single xfer `resource' to the channel. 296 * 297 * NOTE: Must be called at splbio(). 298 */ 299 void 300 scsipi_put_resource(chan) 301 struct scsipi_channel *chan; 302 { 303 struct scsipi_adapter *adapt = chan->chan_adapter; 304 305 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 306 chan->chan_openings++; 307 else 308 adapt->adapt_openings++; 309 } 310 311 /* 312 * scsipi_get_tag: 313 * 314 * Get a tag ID for the specified xfer. 315 * 316 * NOTE: Must be called at splbio(). 317 */ 318 void 319 scsipi_get_tag(xs) 320 struct scsipi_xfer *xs; 321 { 322 struct scsipi_periph *periph = xs->xs_periph; 323 int word, bit, tag; 324 325 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 326 bit = ffs(periph->periph_freetags[word]); 327 if (bit != 0) 328 break; 329 } 330 #ifdef DIAGNOSTIC 331 if (word == PERIPH_NTAGWORDS) { 332 scsipi_printaddr(periph); 333 printf("no free tags\n"); 334 panic("scsipi_get_tag"); 335 } 336 #endif 337 338 bit -= 1; 339 periph->periph_freetags[word] &= ~(1 << bit); 340 tag = (word << 5) | bit; 341 342 /* XXX Should eventually disallow this completely. */ 343 if (tag >= periph->periph_openings) { 344 scsipi_printaddr(periph); 345 printf("WARNING: tag %d greater than available openings %d\n", 346 tag, periph->periph_openings); 347 } 348 349 xs->xs_tag_id = tag; 350 } 351 352 /* 353 * scsipi_put_tag: 354 * 355 * Put the tag ID for the specified xfer back into the pool. 356 * 357 * NOTE: Must be called at splbio(). 358 */ 359 void 360 scsipi_put_tag(xs) 361 struct scsipi_xfer *xs; 362 { 363 struct scsipi_periph *periph = xs->xs_periph; 364 int word, bit; 365 366 word = xs->xs_tag_id >> 5; 367 bit = xs->xs_tag_id & 0x1f; 368 369 periph->periph_freetags[word] |= (1 << bit); 370 } 371 372 /* 373 * scsipi_get_xs: 374 * 375 * Allocate an xfer descriptor and associate it with the 376 * specified peripherial. If the peripherial has no more 377 * available command openings, we either block waiting for 378 * one to become available, or fail. 379 */ 380 struct scsipi_xfer * 381 scsipi_get_xs(periph, flags) 382 struct scsipi_periph *periph; 383 int flags; 384 { 385 struct scsipi_xfer *xs; 386 int s; 387 388 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 389 390 /* 391 * If we're cold, make sure we poll. 392 */ 393 if (cold) 394 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL; 395 396 #ifdef DIAGNOSTIC 397 /* 398 * URGENT commands can never be ASYNC. 399 */ 400 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 401 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 402 scsipi_printaddr(periph); 403 printf("URGENT and ASYNC\n"); 404 panic("scsipi_get_xs"); 405 } 406 #endif 407 408 s = splbio(); 409 /* 410 * Wait for a command opening to become available. Rules: 411 * 412 * - All xfers must wait for an available opening. 413 * Exception: URGENT xfers can proceed when 414 * active == openings, because we use the opening 415 * of the command we're recovering for. 416 * - if the periph has sense pending, only URGENT & REQSENSE 417 * xfers may proceed. 418 * 419 * - If the periph is recovering, only URGENT xfers may 420 * proceed. 421 * 422 * - If the periph is currently executing a recovery 423 * command, URGENT commands must block, because only 424 * one recovery command can execute at a time. 425 */ 426 for (;;) { 427 if (flags & XS_CTL_URGENT) { 428 if (periph->periph_active > periph->periph_openings) 429 goto wait_for_opening; 430 if (periph->periph_flags & PERIPH_SENSE) { 431 if ((flags & XS_CTL_REQSENSE) == 0) 432 goto wait_for_opening; 433 } else { 434 if ((periph->periph_flags & 435 PERIPH_RECOVERY_ACTIVE) != 0) 436 goto wait_for_opening; 437 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 438 } 439 break; 440 } 441 if (periph->periph_active >= periph->periph_openings || 442 (periph->periph_flags & PERIPH_RECOVERING) != 0) 443 goto wait_for_opening; 444 periph->periph_active++; 445 break; 446 447 wait_for_opening: 448 if (flags & XS_CTL_NOSLEEP) { 449 splx(s); 450 return (NULL); 451 } 452 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 453 periph->periph_flags |= PERIPH_WAITING; 454 (void) tsleep(periph, PRIBIO, "getxs", 0); 455 } 456 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 457 xs = pool_get(&scsipi_xfer_pool, 458 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 459 if (xs == NULL) { 460 if (flags & XS_CTL_URGENT) { 461 if ((flags & XS_CTL_REQSENSE) == 0) 462 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 463 } else 464 periph->periph_active--; 465 scsipi_printaddr(periph); 466 printf("unable to allocate %sscsipi_xfer\n", 467 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 468 } 469 splx(s); 470 471 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 472 473 if (xs != NULL) { 474 callout_init(&xs->xs_callout); 475 memset(xs, 0, sizeof(*xs)); 476 xs->xs_periph = periph; 477 xs->xs_control = flags; 478 xs->xs_status = 0; 479 s = splbio(); 480 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 481 splx(s); 482 } 483 return (xs); 484 } 485 486 /* 487 * scsipi_put_xs: 488 * 489 * Release an xfer descriptor, decreasing the outstanding command 490 * count for the peripherial. If there is a thread waiting for 491 * an opening, wake it up. If not, kick any queued I/O the 492 * peripherial may have. 493 * 494 * NOTE: Must be called at splbio(). 495 */ 496 void 497 scsipi_put_xs(xs) 498 struct scsipi_xfer *xs; 499 { 500 struct scsipi_periph *periph = xs->xs_periph; 501 int flags = xs->xs_control; 502 503 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 504 505 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 506 pool_put(&scsipi_xfer_pool, xs); 507 508 #ifdef DIAGNOSTIC 509 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 510 periph->periph_active == 0) { 511 scsipi_printaddr(periph); 512 printf("recovery without a command to recovery for\n"); 513 panic("scsipi_put_xs"); 514 } 515 #endif 516 517 if (flags & XS_CTL_URGENT) { 518 if ((flags & XS_CTL_REQSENSE) == 0) 519 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 520 } else 521 periph->periph_active--; 522 if (periph->periph_active == 0 && 523 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 524 periph->periph_flags &= ~PERIPH_WAITDRAIN; 525 wakeup(&periph->periph_active); 526 } 527 528 if (periph->periph_flags & PERIPH_WAITING) { 529 periph->periph_flags &= ~PERIPH_WAITING; 530 wakeup(periph); 531 } else { 532 if (periph->periph_switch->psw_start != NULL) { 533 SC_DEBUG(periph, SCSIPI_DB2, 534 ("calling private start()\n")); 535 (*periph->periph_switch->psw_start)(periph); 536 } 537 } 538 } 539 540 /* 541 * scsipi_channel_freeze: 542 * 543 * Freeze a channel's xfer queue. 544 */ 545 void 546 scsipi_channel_freeze(chan, count) 547 struct scsipi_channel *chan; 548 int count; 549 { 550 int s; 551 552 s = splbio(); 553 chan->chan_qfreeze += count; 554 splx(s); 555 } 556 557 /* 558 * scsipi_channel_thaw: 559 * 560 * Thaw a channel's xfer queue. 561 */ 562 void 563 scsipi_channel_thaw(chan, count) 564 struct scsipi_channel *chan; 565 int count; 566 { 567 int s; 568 569 s = splbio(); 570 chan->chan_qfreeze -= count; 571 /* 572 * Don't let the freeze count go negative. 573 * 574 * Presumably the adapter driver could keep track of this, 575 * but it might just be easier to do this here so as to allow 576 * multiple callers, including those outside the adapter driver. 577 */ 578 if (chan->chan_qfreeze < 0) { 579 chan->chan_qfreeze = 0; 580 } 581 splx(s); 582 /* 583 * Kick the channel's queue here. Note, we may be running in 584 * interrupt context (softclock or HBA's interrupt), so the adapter 585 * driver had better not sleep. 586 */ 587 if (chan->chan_qfreeze == 0) 588 scsipi_run_queue(chan); 589 } 590 591 /* 592 * scsipi_channel_timed_thaw: 593 * 594 * Thaw a channel after some time has expired. This will also 595 * run the channel's queue if the freeze count has reached 0. 596 */ 597 void 598 scsipi_channel_timed_thaw(arg) 599 void *arg; 600 { 601 struct scsipi_channel *chan = arg; 602 603 scsipi_channel_thaw(chan, 1); 604 } 605 606 /* 607 * scsipi_periph_freeze: 608 * 609 * Freeze a device's xfer queue. 610 */ 611 void 612 scsipi_periph_freeze(periph, count) 613 struct scsipi_periph *periph; 614 int count; 615 { 616 int s; 617 618 s = splbio(); 619 periph->periph_qfreeze += count; 620 splx(s); 621 } 622 623 /* 624 * scsipi_periph_thaw: 625 * 626 * Thaw a device's xfer queue. 627 */ 628 void 629 scsipi_periph_thaw(periph, count) 630 struct scsipi_periph *periph; 631 int count; 632 { 633 int s; 634 635 s = splbio(); 636 periph->periph_qfreeze -= count; 637 #ifdef DIAGNOSTIC 638 if (periph->periph_qfreeze < 0) { 639 static const char pc[] = "periph freeze count < 0"; 640 scsipi_printaddr(periph); 641 printf("%s\n", pc); 642 panic(pc); 643 } 644 #endif 645 if (periph->periph_qfreeze == 0 && 646 (periph->periph_flags & PERIPH_WAITING) != 0) 647 wakeup(periph); 648 splx(s); 649 } 650 651 /* 652 * scsipi_periph_timed_thaw: 653 * 654 * Thaw a device after some time has expired. 655 */ 656 void 657 scsipi_periph_timed_thaw(arg) 658 void *arg; 659 { 660 int s; 661 struct scsipi_periph *periph = arg; 662 663 callout_stop(&periph->periph_callout); 664 665 s = splbio(); 666 scsipi_periph_thaw(periph, 1); 667 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 668 /* 669 * Kick the channel's queue here. Note, we're running in 670 * interrupt context (softclock), so the adapter driver 671 * had better not sleep. 672 */ 673 scsipi_run_queue(periph->periph_channel); 674 } else { 675 /* 676 * Tell the completion thread to kick the channel's queue here. 677 */ 678 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 679 wakeup(&periph->periph_channel->chan_complete); 680 } 681 splx(s); 682 } 683 684 /* 685 * scsipi_wait_drain: 686 * 687 * Wait for a periph's pending xfers to drain. 688 */ 689 void 690 scsipi_wait_drain(periph) 691 struct scsipi_periph *periph; 692 { 693 int s; 694 695 s = splbio(); 696 while (periph->periph_active != 0) { 697 periph->periph_flags |= PERIPH_WAITDRAIN; 698 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0); 699 } 700 splx(s); 701 } 702 703 /* 704 * scsipi_kill_pending: 705 * 706 * Kill off all pending xfers for a periph. 707 * 708 * NOTE: Must be called at splbio(). 709 */ 710 void 711 scsipi_kill_pending(periph) 712 struct scsipi_periph *periph; 713 { 714 715 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph); 716 #ifdef DIAGNOSTIC 717 if (TAILQ_FIRST(&periph->periph_xferq) != NULL) 718 panic("scsipi_kill_pending"); 719 #endif 720 scsipi_wait_drain(periph); 721 } 722 723 /* 724 * scsipi_interpret_sense: 725 * 726 * Look at the returned sense and act on the error, determining 727 * the unix error number to pass back. (0 = report no error) 728 * 729 * NOTE: If we return ERESTART, we are expected to haved 730 * thawed the device! 731 * 732 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 733 */ 734 int 735 scsipi_interpret_sense(xs) 736 struct scsipi_xfer *xs; 737 { 738 struct scsipi_sense_data *sense; 739 struct scsipi_periph *periph = xs->xs_periph; 740 u_int8_t key; 741 u_int32_t info; 742 int error; 743 #ifndef SCSIVERBOSE 744 static char *error_mes[] = { 745 "soft error (corrected)", 746 "not ready", "medium error", 747 "non-media hardware failure", "illegal request", 748 "unit attention", "readonly device", 749 "no data found", "vendor unique", 750 "copy aborted", "command aborted", 751 "search returned equal", "volume overflow", 752 "verify miscompare", "unknown error key" 753 }; 754 #endif 755 756 sense = &xs->sense.scsi_sense; 757 #ifdef SCSIPI_DEBUG 758 if (periph->periph_flags & SCSIPI_DB1) { 759 int count; 760 scsipi_printaddr(periph); 761 printf(" sense debug information:\n"); 762 printf("\tcode 0x%x valid 0x%x\n", 763 sense->error_code & SSD_ERRCODE, 764 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0); 765 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 766 sense->segment, 767 sense->flags & SSD_KEY, 768 sense->flags & SSD_ILI ? 1 : 0, 769 sense->flags & SSD_EOM ? 1 : 0, 770 sense->flags & SSD_FILEMARK ? 1 : 0); 771 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 772 "extra bytes\n", 773 sense->info[0], 774 sense->info[1], 775 sense->info[2], 776 sense->info[3], 777 sense->extra_len); 778 printf("\textra: "); 779 for (count = 0; count < ADD_BYTES_LIM(sense); count++) 780 printf("0x%x ", sense->cmd_spec_info[count]); 781 printf("\n"); 782 } 783 #endif 784 785 /* 786 * If the periph has it's own error handler, call it first. 787 * If it returns a legit error value, return that, otherwise 788 * it wants us to continue with normal error processing. 789 */ 790 if (periph->periph_switch->psw_error != NULL) { 791 SC_DEBUG(periph, SCSIPI_DB2, 792 ("calling private err_handler()\n")); 793 error = (*periph->periph_switch->psw_error)(xs); 794 if (error != EJUSTRETURN) 795 return (error); 796 } 797 /* otherwise use the default */ 798 switch (sense->error_code & SSD_ERRCODE) { 799 800 /* 801 * Old SCSI-1 and SASI devices respond with 802 * codes other than 70. 803 */ 804 case 0x00: /* no error (command completed OK) */ 805 return (0); 806 case 0x04: /* drive not ready after it was selected */ 807 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 808 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 809 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 810 return (0); 811 /* XXX - display some sort of error here? */ 812 return (EIO); 813 case 0x20: /* invalid command */ 814 if ((xs->xs_control & 815 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 816 return (0); 817 return (EINVAL); 818 case 0x25: /* invalid LUN (Adaptec ACB-4000) */ 819 return (EACCES); 820 821 /* 822 * If it's code 70, use the extended stuff and 823 * interpret the key 824 */ 825 case 0x71: /* delayed error */ 826 scsipi_printaddr(periph); 827 key = sense->flags & SSD_KEY; 828 printf(" DEFERRED ERROR, key = 0x%x\n", key); 829 /* FALLTHROUGH */ 830 case 0x70: 831 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) 832 info = _4btol(sense->info); 833 else 834 info = 0; 835 key = sense->flags & SSD_KEY; 836 837 switch (key) { 838 case SKEY_NO_SENSE: 839 case SKEY_RECOVERED_ERROR: 840 if (xs->resid == xs->datalen && xs->datalen) { 841 /* 842 * Why is this here? 843 */ 844 xs->resid = 0; /* not short read */ 845 } 846 case SKEY_EQUAL: 847 error = 0; 848 break; 849 case SKEY_NOT_READY: 850 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 851 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 852 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 853 return (0); 854 if (sense->add_sense_code == 0x3A) { 855 error = ENODEV; /* Medium not present */ 856 if (xs->xs_control & XS_CTL_SILENT_NODEV) 857 return (error); 858 } else 859 error = EIO; 860 if ((xs->xs_control & XS_CTL_SILENT) != 0) 861 return (error); 862 break; 863 case SKEY_ILLEGAL_REQUEST: 864 if ((xs->xs_control & 865 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 866 return (0); 867 /* 868 * Handle the case where a device reports 869 * Logical Unit Not Supported during discovery. 870 */ 871 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 872 sense->add_sense_code == 0x25 && 873 sense->add_sense_code_qual == 0x00) 874 return (EINVAL); 875 if ((xs->xs_control & XS_CTL_SILENT) != 0) 876 return (EIO); 877 error = EINVAL; 878 break; 879 case SKEY_UNIT_ATTENTION: 880 if (sense->add_sense_code == 0x29 && 881 sense->add_sense_code_qual == 0x00) { 882 /* device or bus reset */ 883 return (ERESTART); 884 } 885 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 886 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 887 if ((xs->xs_control & 888 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 889 /* XXX Should reupload any transient state. */ 890 (periph->periph_flags & 891 PERIPH_REMOVABLE) == 0) { 892 return (ERESTART); 893 } 894 if ((xs->xs_control & XS_CTL_SILENT) != 0) 895 return (EIO); 896 error = EIO; 897 break; 898 case SKEY_WRITE_PROTECT: 899 error = EROFS; 900 break; 901 case SKEY_BLANK_CHECK: 902 error = 0; 903 break; 904 case SKEY_ABORTED_COMMAND: 905 error = ERESTART; 906 break; 907 case SKEY_VOLUME_OVERFLOW: 908 error = ENOSPC; 909 break; 910 default: 911 error = EIO; 912 break; 913 } 914 915 #ifdef SCSIVERBOSE 916 if (key && (xs->xs_control & XS_CTL_SILENT) == 0) 917 scsipi_print_sense(xs, 0); 918 #else 919 if (key) { 920 scsipi_printaddr(periph); 921 printf("%s", error_mes[key - 1]); 922 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 923 switch (key) { 924 case SKEY_NOT_READY: 925 case SKEY_ILLEGAL_REQUEST: 926 case SKEY_UNIT_ATTENTION: 927 case SKEY_WRITE_PROTECT: 928 break; 929 case SKEY_BLANK_CHECK: 930 printf(", requested size: %d (decimal)", 931 info); 932 break; 933 case SKEY_ABORTED_COMMAND: 934 if (xs->xs_retries) 935 printf(", retrying"); 936 printf(", cmd 0x%x, info 0x%x", 937 xs->cmd->opcode, info); 938 break; 939 default: 940 printf(", info = %d (decimal)", info); 941 } 942 } 943 if (sense->extra_len != 0) { 944 int n; 945 printf(", data ="); 946 for (n = 0; n < sense->extra_len; n++) 947 printf(" %02x", 948 sense->cmd_spec_info[n]); 949 } 950 printf("\n"); 951 } 952 #endif 953 return (error); 954 955 /* 956 * Some other code, just report it 957 */ 958 default: 959 #if defined(SCSIDEBUG) || defined(DEBUG) 960 { 961 static char *uc = "undecodable sense error"; 962 int i; 963 u_int8_t *cptr = (u_int8_t *) sense; 964 scsipi_printaddr(periph); 965 if (xs->cmd == &xs->cmdstore) { 966 printf("%s for opcode 0x%x, data=", 967 uc, xs->cmdstore.opcode); 968 } else { 969 printf("%s, data=", uc); 970 } 971 for (i = 0; i < sizeof (sense); i++) 972 printf(" 0x%02x", *(cptr++) & 0xff); 973 printf("\n"); 974 } 975 #else 976 scsipi_printaddr(periph); 977 printf("Sense Error Code 0x%x", 978 sense->error_code & SSD_ERRCODE); 979 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 980 struct scsipi_sense_data_unextended *usense = 981 (struct scsipi_sense_data_unextended *)sense; 982 printf(" at block no. %d (decimal)", 983 _3btol(usense->block)); 984 } 985 printf("\n"); 986 #endif 987 return (EIO); 988 } 989 } 990 991 /* 992 * scsipi_size: 993 * 994 * Find out from the device what its capacity is. 995 */ 996 u_long 997 scsipi_size(periph, flags) 998 struct scsipi_periph *periph; 999 int flags; 1000 { 1001 struct scsipi_read_cap_data rdcap; 1002 struct scsipi_read_capacity scsipi_cmd; 1003 1004 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1005 scsipi_cmd.opcode = READ_CAPACITY; 1006 1007 /* 1008 * If the command works, interpret the result as a 4 byte 1009 * number of blocks 1010 */ 1011 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1012 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap), 1013 SCSIPIRETRIES, 20000, NULL, 1014 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) { 1015 scsipi_printaddr(periph); 1016 printf("could not get size\n"); 1017 return (0); 1018 } 1019 1020 return (_4btol(rdcap.addr) + 1); 1021 } 1022 1023 /* 1024 * scsipi_test_unit_ready: 1025 * 1026 * Issue a `test unit ready' request. 1027 */ 1028 int 1029 scsipi_test_unit_ready(periph, flags) 1030 struct scsipi_periph *periph; 1031 int flags; 1032 { 1033 struct scsipi_test_unit_ready scsipi_cmd; 1034 1035 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */ 1036 if (periph->periph_quirks & PQUIRK_NOTUR) 1037 return (0); 1038 1039 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1040 scsipi_cmd.opcode = TEST_UNIT_READY; 1041 1042 return (scsipi_command(periph, 1043 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd), 1044 0, 0, SCSIPIRETRIES, 10000, NULL, flags)); 1045 } 1046 1047 /* 1048 * scsipi_inquire: 1049 * 1050 * Ask the device about itself. 1051 */ 1052 int 1053 scsipi_inquire(periph, inqbuf, flags) 1054 struct scsipi_periph *periph; 1055 struct scsipi_inquiry_data *inqbuf; 1056 int flags; 1057 { 1058 struct scsipi_inquiry scsipi_cmd; 1059 int error; 1060 1061 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1062 scsipi_cmd.opcode = INQUIRY; 1063 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data); 1064 1065 error = scsipi_command(periph, 1066 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1067 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data), 1068 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags); 1069 1070 #ifdef SCSI_OLD_NOINQUIRY 1071 /* 1072 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator. 1073 * This board doesn't support the INQUIRY command at all. 1074 */ 1075 if (error == EINVAL || error == EACCES) { 1076 /* 1077 * Conjure up an INQUIRY response. 1078 */ 1079 inqbuf->device = (error == EINVAL ? 1080 SID_QUAL_LU_PRESENT : 1081 SID_QUAL_LU_NOTPRESENT) | T_DIRECT; 1082 inqbuf->dev_qual2 = 0; 1083 inqbuf->version = 0; 1084 inqbuf->response_format = SID_FORMAT_SCSI1; 1085 inqbuf->additional_length = 3 + 28; 1086 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1087 memcpy(inqbuf->vendor, "ADAPTEC ", sizeof(inqbuf->vendor)); 1088 memcpy(inqbuf->product, "ACB-4000 ", 1089 sizeof(inqbuf->product)); 1090 memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision)); 1091 error = 0; 1092 } 1093 1094 /* 1095 * Kludge for the Emulex MT-02 SCSI->QIC translator. 1096 * This board gives an empty response to an INQUIRY command. 1097 */ 1098 else if (error == 0 && 1099 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) && 1100 inqbuf->dev_qual2 == 0 && 1101 inqbuf->version == 0 && 1102 inqbuf->response_format == SID_FORMAT_SCSI1) { 1103 /* 1104 * Fill out the INQUIRY response. 1105 */ 1106 inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL); 1107 inqbuf->dev_qual2 = SID_REMOVABLE; 1108 inqbuf->additional_length = 3 + 28; 1109 inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0; 1110 memcpy(inqbuf->vendor, "EMULEX ", sizeof(inqbuf->vendor)); 1111 memcpy(inqbuf->product, "MT-02 QIC ", 1112 sizeof(inqbuf->product)); 1113 memcpy(inqbuf->revision, " ", sizeof(inqbuf->revision)); 1114 } 1115 #endif /* SCSI_OLD_NOINQUIRY */ 1116 1117 return error; 1118 } 1119 1120 /* 1121 * scsipi_prevent: 1122 * 1123 * Prevent or allow the user to remove the media 1124 */ 1125 int 1126 scsipi_prevent(periph, type, flags) 1127 struct scsipi_periph *periph; 1128 int type, flags; 1129 { 1130 struct scsipi_prevent scsipi_cmd; 1131 1132 if (periph->periph_quirks & PQUIRK_NODOORLOCK) 1133 return (0); 1134 1135 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1136 scsipi_cmd.opcode = PREVENT_ALLOW; 1137 scsipi_cmd.how = type; 1138 1139 return (scsipi_command(periph, 1140 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1141 0, 0, SCSIPIRETRIES, 5000, NULL, flags)); 1142 } 1143 1144 /* 1145 * scsipi_start: 1146 * 1147 * Send a START UNIT. 1148 */ 1149 int 1150 scsipi_start(periph, type, flags) 1151 struct scsipi_periph *periph; 1152 int type, flags; 1153 { 1154 struct scsipi_start_stop scsipi_cmd; 1155 1156 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT) 1157 return 0; 1158 1159 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1160 scsipi_cmd.opcode = START_STOP; 1161 scsipi_cmd.byte2 = 0x00; 1162 scsipi_cmd.how = type; 1163 1164 return (scsipi_command(periph, 1165 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1166 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, 1167 NULL, flags)); 1168 } 1169 1170 /* 1171 * scsipi_mode_sense, scsipi_mode_sense_big: 1172 * get a sense page from a device 1173 */ 1174 1175 int 1176 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout) 1177 struct scsipi_periph *periph; 1178 int byte2, page, len, flags, retries, timeout; 1179 struct scsipi_mode_header *data; 1180 { 1181 struct scsipi_mode_sense scsipi_cmd; 1182 int error; 1183 1184 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1185 scsipi_cmd.opcode = MODE_SENSE; 1186 scsipi_cmd.byte2 = byte2; 1187 scsipi_cmd.page = page; 1188 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI) 1189 _lto2b(len, scsipi_cmd.u_len.atapi.length); 1190 else 1191 scsipi_cmd.u_len.scsi.length = len & 0xff; 1192 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1193 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1194 flags | XS_CTL_DATA_IN); 1195 SC_DEBUG(periph, SCSIPI_DB2, 1196 ("scsipi_mode_sense: error=%d\n", error)); 1197 return (error); 1198 } 1199 1200 int 1201 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout) 1202 struct scsipi_periph *periph; 1203 int byte2, page, len, flags, retries, timeout; 1204 struct scsipi_mode_header_big *data; 1205 { 1206 struct scsipi_mode_sense_big scsipi_cmd; 1207 int error; 1208 1209 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1210 scsipi_cmd.opcode = MODE_SENSE_BIG; 1211 scsipi_cmd.byte2 = byte2; 1212 scsipi_cmd.page = page; 1213 _lto2b(len, scsipi_cmd.length); 1214 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1215 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1216 flags | XS_CTL_DATA_IN); 1217 SC_DEBUG(periph, SCSIPI_DB2, 1218 ("scsipi_mode_sense_big: error=%d\n", error)); 1219 return (error); 1220 } 1221 1222 int 1223 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout) 1224 struct scsipi_periph *periph; 1225 int byte2, len, flags, retries, timeout; 1226 struct scsipi_mode_header *data; 1227 { 1228 struct scsipi_mode_select scsipi_cmd; 1229 int error; 1230 1231 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1232 scsipi_cmd.opcode = MODE_SELECT; 1233 scsipi_cmd.byte2 = byte2; 1234 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI) 1235 _lto2b(len, scsipi_cmd.u_len.atapi.length); 1236 else 1237 scsipi_cmd.u_len.scsi.length = len & 0xff; 1238 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1239 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1240 flags | XS_CTL_DATA_OUT); 1241 SC_DEBUG(periph, SCSIPI_DB2, 1242 ("scsipi_mode_select: error=%d\n", error)); 1243 return (error); 1244 } 1245 1246 int 1247 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout) 1248 struct scsipi_periph *periph; 1249 int byte2, len, flags, retries, timeout; 1250 struct scsipi_mode_header_big *data; 1251 { 1252 struct scsipi_mode_select_big scsipi_cmd; 1253 int error; 1254 1255 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1256 scsipi_cmd.opcode = MODE_SELECT_BIG; 1257 scsipi_cmd.byte2 = byte2; 1258 _lto2b(len, scsipi_cmd.length); 1259 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1260 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1261 flags | XS_CTL_DATA_OUT); 1262 SC_DEBUG(periph, SCSIPI_DB2, 1263 ("scsipi_mode_select: error=%d\n", error)); 1264 return (error); 1265 } 1266 1267 /* 1268 * scsipi_done: 1269 * 1270 * This routine is called by an adapter's interrupt handler when 1271 * an xfer is completed. 1272 */ 1273 void 1274 scsipi_done(xs) 1275 struct scsipi_xfer *xs; 1276 { 1277 struct scsipi_periph *periph = xs->xs_periph; 1278 struct scsipi_channel *chan = periph->periph_channel; 1279 int s, freezecnt; 1280 1281 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1282 #ifdef SCSIPI_DEBUG 1283 if (periph->periph_dbflags & SCSIPI_DB1) 1284 show_scsipi_cmd(xs); 1285 #endif 1286 1287 s = splbio(); 1288 /* 1289 * The resource this command was using is now free. 1290 */ 1291 scsipi_put_resource(chan); 1292 xs->xs_periph->periph_sent--; 1293 1294 /* 1295 * If the command was tagged, free the tag. 1296 */ 1297 if (XS_CTL_TAGTYPE(xs) != 0) 1298 scsipi_put_tag(xs); 1299 else 1300 periph->periph_flags &= ~PERIPH_UNTAG; 1301 1302 /* Mark the command as `done'. */ 1303 xs->xs_status |= XS_STS_DONE; 1304 1305 #ifdef DIAGNOSTIC 1306 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1307 (XS_CTL_ASYNC|XS_CTL_POLL)) 1308 panic("scsipi_done: ASYNC and POLL"); 1309 #endif 1310 1311 /* 1312 * If the xfer had an error of any sort, freeze the 1313 * periph's queue. Freeze it again if we were requested 1314 * to do so in the xfer. 1315 */ 1316 freezecnt = 0; 1317 if (xs->error != XS_NOERROR) 1318 freezecnt++; 1319 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1320 freezecnt++; 1321 if (freezecnt != 0) 1322 scsipi_periph_freeze(periph, freezecnt); 1323 1324 /* 1325 * record the xfer with a pending sense, in case a SCSI reset is 1326 * received before the thread is waked up. 1327 */ 1328 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1329 periph->periph_flags |= PERIPH_SENSE; 1330 periph->periph_xscheck = xs; 1331 } 1332 1333 /* 1334 * If this was an xfer that was not to complete asynchronously, 1335 * let the requesting thread perform error checking/handling 1336 * in its context. 1337 */ 1338 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1339 splx(s); 1340 /* 1341 * If it's a polling job, just return, to unwind the 1342 * call graph. We don't need to restart the queue, 1343 * because pollings jobs are treated specially, and 1344 * are really only used during crash dumps anyway 1345 * (XXX or during boot-time autconfiguration of 1346 * ATAPI devices). 1347 */ 1348 if (xs->xs_control & XS_CTL_POLL) 1349 return; 1350 wakeup(xs); 1351 goto out; 1352 } 1353 1354 /* 1355 * Catch the extremely common case of I/O completing 1356 * without error; no use in taking a context switch 1357 * if we can handle it in interrupt context. 1358 */ 1359 if (xs->error == XS_NOERROR) { 1360 splx(s); 1361 (void) scsipi_complete(xs); 1362 goto out; 1363 } 1364 1365 /* 1366 * There is an error on this xfer. Put it on the channel's 1367 * completion queue, and wake up the completion thread. 1368 */ 1369 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1370 splx(s); 1371 wakeup(&chan->chan_complete); 1372 1373 out: 1374 /* 1375 * If there are more xfers on the channel's queue, attempt to 1376 * run them. 1377 */ 1378 scsipi_run_queue(chan); 1379 } 1380 1381 /* 1382 * scsipi_complete: 1383 * 1384 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1385 * 1386 * NOTE: This routine MUST be called with valid thread context 1387 * except for the case where the following two conditions are 1388 * true: 1389 * 1390 * xs->error == XS_NOERROR 1391 * XS_CTL_ASYNC is set in xs->xs_control 1392 * 1393 * The semantics of this routine can be tricky, so here is an 1394 * explanation: 1395 * 1396 * 0 Xfer completed successfully. 1397 * 1398 * ERESTART Xfer had an error, but was restarted. 1399 * 1400 * anything else Xfer had an error, return value is Unix 1401 * errno. 1402 * 1403 * If the return value is anything but ERESTART: 1404 * 1405 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1406 * the pool. 1407 * - If there is a buf associated with the xfer, 1408 * it has been biodone()'d. 1409 */ 1410 int 1411 scsipi_complete(xs) 1412 struct scsipi_xfer *xs; 1413 { 1414 struct scsipi_periph *periph = xs->xs_periph; 1415 struct scsipi_channel *chan = periph->periph_channel; 1416 struct buf *bp; 1417 int error, s; 1418 1419 #ifdef DIAGNOSTIC 1420 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1421 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1422 #endif 1423 /* 1424 * If command terminated with a CHECK CONDITION, we need to issue a 1425 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1426 * we'll have the real status. 1427 * Must be processed at splbio() to avoid missing a SCSI bus reset 1428 * for this command. 1429 */ 1430 s = splbio(); 1431 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1432 /* request sense for a request sense ? */ 1433 if (xs->xs_control & XS_CTL_REQSENSE) { 1434 scsipi_printaddr(periph); 1435 printf("request sense for a request sense ?\n"); 1436 /* XXX maybe we should reset the device ? */ 1437 /* we've been frozen because xs->error != XS_NOERROR */ 1438 scsipi_periph_thaw(periph, 1); 1439 splx(s); 1440 if (xs->resid < xs->datalen) { 1441 printf("we read %d bytes of sense anyway:\n", 1442 xs->datalen - xs->resid); 1443 #ifdef SCSIVERBOSE 1444 scsipi_print_sense_data((void *)xs->data, 0); 1445 #endif 1446 } 1447 return EINVAL; 1448 } 1449 scsipi_request_sense(xs); 1450 } 1451 splx(s); 1452 1453 /* 1454 * If it's a user level request, bypass all usual completion 1455 * processing, let the user work it out.. 1456 */ 1457 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1458 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1459 if (xs->error != XS_NOERROR) 1460 scsipi_periph_thaw(periph, 1); 1461 scsipi_user_done(xs); 1462 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1463 return 0; 1464 } 1465 1466 switch (xs->error) { 1467 case XS_NOERROR: 1468 error = 0; 1469 break; 1470 1471 case XS_SENSE: 1472 case XS_SHORTSENSE: 1473 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1474 break; 1475 1476 case XS_RESOURCE_SHORTAGE: 1477 /* 1478 * XXX Should freeze channel's queue. 1479 */ 1480 scsipi_printaddr(periph); 1481 printf("adapter resource shortage\n"); 1482 /* FALLTHROUGH */ 1483 1484 case XS_BUSY: 1485 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1486 struct scsipi_max_openings mo; 1487 1488 /* 1489 * We set the openings to active - 1, assuming that 1490 * the command that got us here is the first one that 1491 * can't fit into the device's queue. If that's not 1492 * the case, I guess we'll find out soon enough. 1493 */ 1494 mo.mo_target = periph->periph_target; 1495 mo.mo_lun = periph->periph_lun; 1496 if (periph->periph_active < periph->periph_openings) 1497 mo.mo_openings = periph->periph_active - 1; 1498 else 1499 mo.mo_openings = periph->periph_openings - 1; 1500 #ifdef DIAGNOSTIC 1501 if (mo.mo_openings < 0) { 1502 scsipi_printaddr(periph); 1503 printf("QUEUE FULL resulted in < 0 openings\n"); 1504 panic("scsipi_done"); 1505 } 1506 #endif 1507 if (mo.mo_openings == 0) { 1508 scsipi_printaddr(periph); 1509 printf("QUEUE FULL resulted in 0 openings\n"); 1510 mo.mo_openings = 1; 1511 } 1512 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1513 error = ERESTART; 1514 } else if (xs->xs_retries != 0) { 1515 xs->xs_retries--; 1516 /* 1517 * Wait one second, and try again. 1518 */ 1519 if ((xs->xs_control & XS_CTL_POLL) || 1520 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1521 delay(1000000); 1522 } else { 1523 scsipi_periph_freeze(periph, 1); 1524 callout_reset(&periph->periph_callout, 1525 hz, scsipi_periph_timed_thaw, periph); 1526 } 1527 error = ERESTART; 1528 } else 1529 error = EBUSY; 1530 break; 1531 1532 case XS_REQUEUE: 1533 error = ERESTART; 1534 break; 1535 1536 case XS_TIMEOUT: 1537 if (xs->xs_retries != 0) { 1538 xs->xs_retries--; 1539 error = ERESTART; 1540 } else 1541 error = EIO; 1542 break; 1543 1544 case XS_SELTIMEOUT: 1545 /* XXX Disable device? */ 1546 error = EIO; 1547 break; 1548 1549 case XS_RESET: 1550 if (xs->xs_control & XS_CTL_REQSENSE) { 1551 /* 1552 * request sense interrupted by reset: signal it 1553 * with EINTR return code. 1554 */ 1555 error = EINTR; 1556 } else { 1557 if (xs->xs_retries != 0) { 1558 xs->xs_retries--; 1559 error = ERESTART; 1560 } else 1561 error = EIO; 1562 } 1563 break; 1564 1565 default: 1566 scsipi_printaddr(periph); 1567 printf("invalid return code from adapter: %d\n", xs->error); 1568 error = EIO; 1569 break; 1570 } 1571 1572 s = splbio(); 1573 if (error == ERESTART) { 1574 /* 1575 * If we get here, the periph has been thawed and frozen 1576 * again if we had to issue recovery commands. Alternatively, 1577 * it may have been frozen again and in a timed thaw. In 1578 * any case, we thaw the periph once we re-enqueue the 1579 * command. Once the periph is fully thawed, it will begin 1580 * operation again. 1581 */ 1582 xs->error = XS_NOERROR; 1583 xs->status = SCSI_OK; 1584 xs->xs_status &= ~XS_STS_DONE; 1585 xs->xs_requeuecnt++; 1586 error = scsipi_enqueue(xs); 1587 if (error == 0) { 1588 scsipi_periph_thaw(periph, 1); 1589 splx(s); 1590 return (ERESTART); 1591 } 1592 } 1593 1594 /* 1595 * scsipi_done() freezes the queue if not XS_NOERROR. 1596 * Thaw it here. 1597 */ 1598 if (xs->error != XS_NOERROR) 1599 scsipi_periph_thaw(periph, 1); 1600 1601 /* 1602 * Set buffer fields in case the periph 1603 * switch done func uses them 1604 */ 1605 if ((bp = xs->bp) != NULL) { 1606 if (error) { 1607 bp->b_error = error; 1608 bp->b_flags |= B_ERROR; 1609 bp->b_resid = bp->b_bcount; 1610 } else { 1611 bp->b_error = 0; 1612 bp->b_resid = xs->resid; 1613 } 1614 } 1615 1616 if (periph->periph_switch->psw_done) 1617 periph->periph_switch->psw_done(xs); 1618 1619 if (bp) 1620 biodone(bp); 1621 1622 if (xs->xs_control & XS_CTL_ASYNC) 1623 scsipi_put_xs(xs); 1624 splx(s); 1625 1626 return (error); 1627 } 1628 1629 /* 1630 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1631 * returns with a CHECK_CONDITION status. Must be called in valid thread 1632 * context and at splbio(). 1633 */ 1634 1635 void 1636 scsipi_request_sense(xs) 1637 struct scsipi_xfer *xs; 1638 { 1639 struct scsipi_periph *periph = xs->xs_periph; 1640 int flags, error; 1641 struct scsipi_sense cmd; 1642 1643 periph->periph_flags |= PERIPH_SENSE; 1644 1645 /* if command was polling, request sense will too */ 1646 flags = xs->xs_control & XS_CTL_POLL; 1647 /* Polling commands can't sleep */ 1648 if (flags) 1649 flags |= XS_CTL_NOSLEEP; 1650 1651 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1652 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1653 1654 memset(&cmd, 0, sizeof(cmd)); 1655 cmd.opcode = REQUEST_SENSE; 1656 cmd.length = sizeof(struct scsipi_sense_data); 1657 1658 error = scsipi_command(periph, 1659 (struct scsipi_generic *) &cmd, sizeof(cmd), 1660 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data), 1661 0, 1000, NULL, flags); 1662 periph->periph_flags &= ~PERIPH_SENSE; 1663 periph->periph_xscheck = NULL; 1664 switch(error) { 1665 case 0: 1666 /* we have a valid sense */ 1667 xs->error = XS_SENSE; 1668 return; 1669 case EINTR: 1670 /* REQUEST_SENSE interrupted by bus reset. */ 1671 xs->error = XS_RESET; 1672 return; 1673 case EIO: 1674 /* request sense coudn't be performed */ 1675 /* 1676 * XXX this isn't quite rigth but we don't have anything 1677 * better for now 1678 */ 1679 xs->error = XS_DRIVER_STUFFUP; 1680 return; 1681 default: 1682 /* Notify that request sense failed. */ 1683 xs->error = XS_DRIVER_STUFFUP; 1684 scsipi_printaddr(periph); 1685 printf("request sense failed with error %d\n", error); 1686 return; 1687 } 1688 } 1689 1690 /* 1691 * scsipi_enqueue: 1692 * 1693 * Enqueue an xfer on a channel. 1694 */ 1695 int 1696 scsipi_enqueue(xs) 1697 struct scsipi_xfer *xs; 1698 { 1699 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1700 struct scsipi_xfer *qxs; 1701 int s; 1702 1703 s = splbio(); 1704 1705 /* 1706 * If the xfer is to be polled, and there are already jobs on 1707 * the queue, we can't proceed. 1708 */ 1709 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1710 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1711 splx(s); 1712 xs->error = XS_DRIVER_STUFFUP; 1713 return (EAGAIN); 1714 } 1715 1716 /* 1717 * If we have an URGENT xfer, it's an error recovery command 1718 * and it should just go on the head of the channel's queue. 1719 */ 1720 if (xs->xs_control & XS_CTL_URGENT) { 1721 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1722 goto out; 1723 } 1724 1725 /* 1726 * If this xfer has already been on the queue before, we 1727 * need to reinsert it in the correct order. That order is: 1728 * 1729 * Immediately before the first xfer for this periph 1730 * with a requeuecnt less than xs->xs_requeuecnt. 1731 * 1732 * Failing that, at the end of the queue. (We'll end up 1733 * there naturally.) 1734 */ 1735 if (xs->xs_requeuecnt != 0) { 1736 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1737 qxs = TAILQ_NEXT(qxs, channel_q)) { 1738 if (qxs->xs_periph == xs->xs_periph && 1739 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1740 break; 1741 } 1742 if (qxs != NULL) { 1743 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1744 channel_q); 1745 goto out; 1746 } 1747 } 1748 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1749 out: 1750 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1751 scsipi_periph_thaw(xs->xs_periph, 1); 1752 splx(s); 1753 return (0); 1754 } 1755 1756 /* 1757 * scsipi_run_queue: 1758 * 1759 * Start as many xfers as possible running on the channel. 1760 */ 1761 void 1762 scsipi_run_queue(chan) 1763 struct scsipi_channel *chan; 1764 { 1765 struct scsipi_xfer *xs; 1766 struct scsipi_periph *periph; 1767 int s; 1768 1769 for (;;) { 1770 s = splbio(); 1771 1772 /* 1773 * If the channel is frozen, we can't do any work right 1774 * now. 1775 */ 1776 if (chan->chan_qfreeze != 0) { 1777 splx(s); 1778 return; 1779 } 1780 1781 /* 1782 * Look for work to do, and make sure we can do it. 1783 */ 1784 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1785 xs = TAILQ_NEXT(xs, channel_q)) { 1786 periph = xs->xs_periph; 1787 1788 if ((periph->periph_sent >= periph->periph_openings) || 1789 periph->periph_qfreeze != 0 || 1790 (periph->periph_flags & PERIPH_UNTAG) != 0) 1791 continue; 1792 1793 if ((periph->periph_flags & 1794 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1795 (xs->xs_control & XS_CTL_URGENT) == 0) 1796 continue; 1797 1798 /* 1799 * We can issue this xfer! 1800 */ 1801 goto got_one; 1802 } 1803 1804 /* 1805 * Can't find any work to do right now. 1806 */ 1807 splx(s); 1808 return; 1809 1810 got_one: 1811 /* 1812 * Have an xfer to run. Allocate a resource from 1813 * the adapter to run it. If we can't allocate that 1814 * resource, we don't dequeue the xfer. 1815 */ 1816 if (scsipi_get_resource(chan) == 0) { 1817 /* 1818 * Adapter is out of resources. If the adapter 1819 * supports it, attempt to grow them. 1820 */ 1821 if (scsipi_grow_resources(chan) == 0) { 1822 /* 1823 * Wasn't able to grow resources, 1824 * nothing more we can do. 1825 */ 1826 if (xs->xs_control & XS_CTL_POLL) { 1827 scsipi_printaddr(xs->xs_periph); 1828 printf("polling command but no " 1829 "adapter resources"); 1830 /* We'll panic shortly... */ 1831 } 1832 splx(s); 1833 1834 /* 1835 * XXX: We should be able to note that 1836 * XXX: that resources are needed here! 1837 */ 1838 return; 1839 } 1840 /* 1841 * scsipi_grow_resources() allocated the resource 1842 * for us. 1843 */ 1844 } 1845 1846 /* 1847 * We have a resource to run this xfer, do it! 1848 */ 1849 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1850 1851 /* 1852 * If the command is to be tagged, allocate a tag ID 1853 * for it. 1854 */ 1855 if (XS_CTL_TAGTYPE(xs) != 0) 1856 scsipi_get_tag(xs); 1857 else 1858 periph->periph_flags |= PERIPH_UNTAG; 1859 periph->periph_sent++; 1860 splx(s); 1861 1862 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1863 } 1864 #ifdef DIAGNOSTIC 1865 panic("scsipi_run_queue: impossible"); 1866 #endif 1867 } 1868 1869 /* 1870 * scsipi_execute_xs: 1871 * 1872 * Begin execution of an xfer, waiting for it to complete, if necessary. 1873 */ 1874 int 1875 scsipi_execute_xs(xs) 1876 struct scsipi_xfer *xs; 1877 { 1878 struct scsipi_periph *periph = xs->xs_periph; 1879 struct scsipi_channel *chan = periph->periph_channel; 1880 int oasync, async, poll, retries, error, s; 1881 1882 xs->xs_status &= ~XS_STS_DONE; 1883 xs->error = XS_NOERROR; 1884 xs->resid = xs->datalen; 1885 xs->status = SCSI_OK; 1886 1887 #ifdef SCSIPI_DEBUG 1888 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1889 printf("scsipi_execute_xs: "); 1890 show_scsipi_xs(xs); 1891 printf("\n"); 1892 } 1893 #endif 1894 1895 /* 1896 * Deal with command tagging: 1897 * 1898 * - If the device's current operating mode doesn't 1899 * include tagged queueing, clear the tag mask. 1900 * 1901 * - If the device's current operating mode *does* 1902 * include tagged queueing, set the tag_type in 1903 * the xfer to the appropriate byte for the tag 1904 * message. 1905 */ 1906 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 1907 (xs->xs_control & XS_CTL_REQSENSE)) { 1908 xs->xs_control &= ~XS_CTL_TAGMASK; 1909 xs->xs_tag_type = 0; 1910 } else { 1911 /* 1912 * If the request doesn't specify a tag, give Head 1913 * tags to URGENT operations and Ordered tags to 1914 * everything else. 1915 */ 1916 if (XS_CTL_TAGTYPE(xs) == 0) { 1917 if (xs->xs_control & XS_CTL_URGENT) 1918 xs->xs_control |= XS_CTL_HEAD_TAG; 1919 else 1920 xs->xs_control |= XS_CTL_ORDERED_TAG; 1921 } 1922 1923 switch (XS_CTL_TAGTYPE(xs)) { 1924 case XS_CTL_ORDERED_TAG: 1925 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 1926 break; 1927 1928 case XS_CTL_SIMPLE_TAG: 1929 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 1930 break; 1931 1932 case XS_CTL_HEAD_TAG: 1933 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 1934 break; 1935 1936 default: 1937 scsipi_printaddr(periph); 1938 printf("invalid tag mask 0x%08x\n", 1939 XS_CTL_TAGTYPE(xs)); 1940 panic("scsipi_execute_xs"); 1941 } 1942 } 1943 1944 /* If the adaptor wants us to poll, poll. */ 1945 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 1946 xs->xs_control |= XS_CTL_POLL; 1947 1948 /* 1949 * If we don't yet have a completion thread, or we are to poll for 1950 * completion, clear the ASYNC flag. 1951 */ 1952 oasync = (xs->xs_control & XS_CTL_ASYNC); 1953 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 1954 xs->xs_control &= ~XS_CTL_ASYNC; 1955 1956 async = (xs->xs_control & XS_CTL_ASYNC); 1957 poll = (xs->xs_control & XS_CTL_POLL); 1958 retries = xs->xs_retries; /* for polling commands */ 1959 1960 #ifdef DIAGNOSTIC 1961 if (oasync != 0 && xs->bp == NULL) 1962 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 1963 #endif 1964 1965 /* 1966 * Enqueue the transfer. If we're not polling for completion, this 1967 * should ALWAYS return `no error'. 1968 */ 1969 try_again: 1970 error = scsipi_enqueue(xs); 1971 if (error) { 1972 if (poll == 0) { 1973 scsipi_printaddr(periph); 1974 printf("not polling, but enqueue failed with %d\n", 1975 error); 1976 panic("scsipi_execute_xs"); 1977 } 1978 1979 scsipi_printaddr(periph); 1980 printf("failed to enqueue polling command"); 1981 if (retries != 0) { 1982 printf(", retrying...\n"); 1983 delay(1000000); 1984 retries--; 1985 goto try_again; 1986 } 1987 printf("\n"); 1988 goto free_xs; 1989 } 1990 1991 restarted: 1992 scsipi_run_queue(chan); 1993 1994 /* 1995 * The xfer is enqueued, and possibly running. If it's to be 1996 * completed asynchronously, just return now. 1997 */ 1998 if (async) 1999 return (EJUSTRETURN); 2000 2001 /* 2002 * Not an asynchronous command; wait for it to complete. 2003 */ 2004 s = splbio(); 2005 while ((xs->xs_status & XS_STS_DONE) == 0) { 2006 if (poll) { 2007 scsipi_printaddr(periph); 2008 printf("polling command not done\n"); 2009 panic("scsipi_execute_xs"); 2010 } 2011 (void) tsleep(xs, PRIBIO, "xscmd", 0); 2012 } 2013 splx(s); 2014 2015 /* 2016 * Command is complete. scsipi_done() has awakened us to perform 2017 * the error handling. 2018 */ 2019 error = scsipi_complete(xs); 2020 if (error == ERESTART) 2021 goto restarted; 2022 2023 /* 2024 * If it was meant to run async and we cleared aync ourselve, 2025 * don't return an error here. It has already been handled 2026 */ 2027 if (oasync) 2028 error = EJUSTRETURN; 2029 /* 2030 * Command completed successfully or fatal error occurred. Fall 2031 * into.... 2032 */ 2033 free_xs: 2034 s = splbio(); 2035 scsipi_put_xs(xs); 2036 splx(s); 2037 2038 /* 2039 * Kick the queue, keep it running in case it stopped for some 2040 * reason. 2041 */ 2042 scsipi_run_queue(chan); 2043 2044 return (error); 2045 } 2046 2047 /* 2048 * scsipi_completion_thread: 2049 * 2050 * This is the completion thread. We wait for errors on 2051 * asynchronous xfers, and perform the error handling 2052 * function, restarting the command, if necessary. 2053 */ 2054 void 2055 scsipi_completion_thread(arg) 2056 void *arg; 2057 { 2058 struct scsipi_channel *chan = arg; 2059 struct scsipi_xfer *xs; 2060 int s; 2061 2062 s = splbio(); 2063 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 2064 splx(s); 2065 for (;;) { 2066 s = splbio(); 2067 xs = TAILQ_FIRST(&chan->chan_complete); 2068 if (xs == NULL && chan->chan_tflags == 0) { 2069 /* nothing to do; wait */ 2070 (void) tsleep(&chan->chan_complete, PRIBIO, 2071 "sccomp", 0); 2072 splx(s); 2073 continue; 2074 } 2075 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2076 /* call chan_callback from thread context */ 2077 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 2078 chan->chan_callback(chan, chan->chan_callback_arg); 2079 splx(s); 2080 continue; 2081 } 2082 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 2083 /* attempt to get more openings for this channel */ 2084 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2085 scsipi_adapter_request(chan, 2086 ADAPTER_REQ_GROW_RESOURCES, NULL); 2087 scsipi_channel_thaw(chan, 1); 2088 splx(s); 2089 continue; 2090 } 2091 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2092 /* explicitly run the queues for this channel */ 2093 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2094 scsipi_run_queue(chan); 2095 splx(s); 2096 continue; 2097 } 2098 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2099 splx(s); 2100 break; 2101 } 2102 if (xs) { 2103 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2104 splx(s); 2105 2106 /* 2107 * Have an xfer with an error; process it. 2108 */ 2109 (void) scsipi_complete(xs); 2110 2111 /* 2112 * Kick the queue; keep it running if it was stopped 2113 * for some reason. 2114 */ 2115 scsipi_run_queue(chan); 2116 } else { 2117 splx(s); 2118 } 2119 } 2120 2121 chan->chan_thread = NULL; 2122 2123 /* In case parent is waiting for us to exit. */ 2124 wakeup(&chan->chan_thread); 2125 2126 kthread_exit(0); 2127 } 2128 2129 /* 2130 * scsipi_create_completion_thread: 2131 * 2132 * Callback to actually create the completion thread. 2133 */ 2134 void 2135 scsipi_create_completion_thread(arg) 2136 void *arg; 2137 { 2138 struct scsipi_channel *chan = arg; 2139 struct scsipi_adapter *adapt = chan->chan_adapter; 2140 2141 if (kthread_create1(scsipi_completion_thread, chan, 2142 &chan->chan_thread, "%s", chan->chan_name)) { 2143 printf("%s: unable to create completion thread for " 2144 "channel %d\n", adapt->adapt_dev->dv_xname, 2145 chan->chan_channel); 2146 panic("scsipi_create_completion_thread"); 2147 } 2148 } 2149 2150 /* 2151 * scsipi_thread_call_callback: 2152 * 2153 * request to call a callback from the completion thread 2154 */ 2155 int 2156 scsipi_thread_call_callback(chan, callback, arg) 2157 struct scsipi_channel *chan; 2158 void (*callback) __P((struct scsipi_channel *, void *)); 2159 void *arg; 2160 { 2161 int s; 2162 2163 s = splbio(); 2164 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2165 /* kernel thread doesn't exist yet */ 2166 splx(s); 2167 return ESRCH; 2168 } 2169 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2170 splx(s); 2171 return EBUSY; 2172 } 2173 scsipi_channel_freeze(chan, 1); 2174 chan->chan_callback = callback; 2175 chan->chan_callback_arg = arg; 2176 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2177 wakeup(&chan->chan_complete); 2178 splx(s); 2179 return(0); 2180 } 2181 2182 /* 2183 * scsipi_async_event: 2184 * 2185 * Handle an asynchronous event from an adapter. 2186 */ 2187 void 2188 scsipi_async_event(chan, event, arg) 2189 struct scsipi_channel *chan; 2190 scsipi_async_event_t event; 2191 void *arg; 2192 { 2193 int s; 2194 2195 s = splbio(); 2196 switch (event) { 2197 case ASYNC_EVENT_MAX_OPENINGS: 2198 scsipi_async_event_max_openings(chan, 2199 (struct scsipi_max_openings *)arg); 2200 break; 2201 2202 case ASYNC_EVENT_XFER_MODE: 2203 scsipi_async_event_xfer_mode(chan, 2204 (struct scsipi_xfer_mode *)arg); 2205 break; 2206 case ASYNC_EVENT_RESET: 2207 scsipi_async_event_channel_reset(chan); 2208 break; 2209 } 2210 splx(s); 2211 } 2212 2213 /* 2214 * scsipi_print_xfer_mode: 2215 * 2216 * Print a periph's capabilities. 2217 */ 2218 void 2219 scsipi_print_xfer_mode(periph) 2220 struct scsipi_periph *periph; 2221 { 2222 int period, freq, speed, mbs; 2223 2224 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0) 2225 return; 2226 2227 printf("%s: ", periph->periph_dev->dv_xname); 2228 if (periph->periph_mode & PERIPH_CAP_SYNC) { 2229 period = scsipi_sync_factor_to_period(periph->periph_period); 2230 printf("sync (%d.%dns offset %d)", 2231 period / 10, period % 10, periph->periph_offset); 2232 } else 2233 printf("async"); 2234 2235 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2236 printf(", 32-bit"); 2237 else if (periph->periph_mode & PERIPH_CAP_WIDE16) 2238 printf(", 16-bit"); 2239 else 2240 printf(", 8-bit"); 2241 2242 if (periph->periph_mode & PERIPH_CAP_SYNC) { 2243 freq = scsipi_sync_factor_to_freq(periph->periph_period); 2244 speed = freq; 2245 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2246 speed *= 4; 2247 else if (periph->periph_mode & PERIPH_CAP_WIDE16) 2248 speed *= 2; 2249 mbs = speed / 1000; 2250 if (mbs > 0) 2251 printf(" (%d.%03dMB/s)", mbs, speed % 1000); 2252 else 2253 printf(" (%dKB/s)", speed % 1000); 2254 } 2255 2256 printf(" transfers"); 2257 2258 if (periph->periph_mode & PERIPH_CAP_TQING) 2259 printf(", tagged queueing"); 2260 2261 printf("\n"); 2262 } 2263 2264 /* 2265 * scsipi_async_event_max_openings: 2266 * 2267 * Update the maximum number of outstanding commands a 2268 * device may have. 2269 */ 2270 void 2271 scsipi_async_event_max_openings(chan, mo) 2272 struct scsipi_channel *chan; 2273 struct scsipi_max_openings *mo; 2274 { 2275 struct scsipi_periph *periph; 2276 int minlun, maxlun; 2277 2278 if (mo->mo_lun == -1) { 2279 /* 2280 * Wildcarded; apply it to all LUNs. 2281 */ 2282 minlun = 0; 2283 maxlun = chan->chan_nluns - 1; 2284 } else 2285 minlun = maxlun = mo->mo_lun; 2286 2287 for (; minlun <= maxlun; minlun++) { 2288 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun); 2289 if (periph == NULL) 2290 continue; 2291 2292 if (mo->mo_openings < periph->periph_openings) 2293 periph->periph_openings = mo->mo_openings; 2294 else if (mo->mo_openings > periph->periph_openings && 2295 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2296 periph->periph_openings = mo->mo_openings; 2297 } 2298 } 2299 2300 /* 2301 * scsipi_async_event_xfer_mode: 2302 * 2303 * Update the xfer mode for all periphs sharing the 2304 * specified I_T Nexus. 2305 */ 2306 void 2307 scsipi_async_event_xfer_mode(chan, xm) 2308 struct scsipi_channel *chan; 2309 struct scsipi_xfer_mode *xm; 2310 { 2311 struct scsipi_periph *periph; 2312 int lun, announce, mode, period, offset; 2313 2314 for (lun = 0; lun < chan->chan_nluns; lun++) { 2315 periph = scsipi_lookup_periph(chan, xm->xm_target, lun); 2316 if (periph == NULL) 2317 continue; 2318 announce = 0; 2319 2320 /* 2321 * Clamp the xfer mode down to this periph's capabilities. 2322 */ 2323 mode = xm->xm_mode & periph->periph_cap; 2324 if (mode & PERIPH_CAP_SYNC) { 2325 period = xm->xm_period; 2326 offset = xm->xm_offset; 2327 } else { 2328 period = 0; 2329 offset = 0; 2330 } 2331 2332 /* 2333 * If we do not have a valid xfer mode yet, or the parameters 2334 * are different, announce them. 2335 */ 2336 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 || 2337 periph->periph_mode != mode || 2338 periph->periph_period != period || 2339 periph->periph_offset != offset) 2340 announce = 1; 2341 2342 periph->periph_mode = mode; 2343 periph->periph_period = period; 2344 periph->periph_offset = offset; 2345 periph->periph_flags |= PERIPH_MODE_VALID; 2346 2347 if (announce) 2348 scsipi_print_xfer_mode(periph); 2349 } 2350 } 2351 2352 /* 2353 * scsipi_set_xfer_mode: 2354 * 2355 * Set the xfer mode for the specified I_T Nexus. 2356 */ 2357 void 2358 scsipi_set_xfer_mode(chan, target, immed) 2359 struct scsipi_channel *chan; 2360 int target, immed; 2361 { 2362 struct scsipi_xfer_mode xm; 2363 struct scsipi_periph *itperiph; 2364 int lun, s; 2365 2366 /* 2367 * Go to the minimal xfer mode. 2368 */ 2369 xm.xm_target = target; 2370 xm.xm_mode = 0; 2371 xm.xm_period = 0; /* ignored */ 2372 xm.xm_offset = 0; /* ignored */ 2373 2374 /* 2375 * Find the first LUN we know about on this I_T Nexus. 2376 */ 2377 for (lun = 0; lun < chan->chan_nluns; lun++) { 2378 itperiph = scsipi_lookup_periph(chan, target, lun); 2379 if (itperiph != NULL) 2380 break; 2381 } 2382 if (itperiph != NULL) { 2383 xm.xm_mode = itperiph->periph_cap; 2384 /* 2385 * Now issue the request to the adapter. 2386 */ 2387 s = splbio(); 2388 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2389 splx(s); 2390 /* 2391 * If we want this to happen immediately, issue a dummy 2392 * command, since most adapters can't really negotiate unless 2393 * they're executing a job. 2394 */ 2395 if (immed != 0) { 2396 (void) scsipi_test_unit_ready(itperiph, 2397 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2398 XS_CTL_IGNORE_NOT_READY | 2399 XS_CTL_IGNORE_MEDIA_CHANGE); 2400 } 2401 } 2402 } 2403 2404 /* 2405 * scsipi_channel_reset: 2406 * 2407 * handle scsi bus reset 2408 * called at splbio 2409 */ 2410 void 2411 scsipi_async_event_channel_reset(chan) 2412 struct scsipi_channel *chan; 2413 { 2414 struct scsipi_xfer *xs, *xs_next; 2415 struct scsipi_periph *periph; 2416 int target, lun; 2417 2418 /* 2419 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2420 * commands; as the sense is not available any more. 2421 * can't call scsipi_done() from here, as the command has not been 2422 * sent to the adapter yet (this would corrupt accounting). 2423 */ 2424 2425 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2426 xs_next = TAILQ_NEXT(xs, channel_q); 2427 if (xs->xs_control & XS_CTL_REQSENSE) { 2428 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2429 xs->error = XS_RESET; 2430 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2431 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2432 channel_q); 2433 } 2434 } 2435 wakeup(&chan->chan_complete); 2436 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2437 for (target = 0; target < chan->chan_ntargets; target++) { 2438 if (target == chan->chan_id) 2439 continue; 2440 for (lun = 0; lun < chan->chan_nluns; lun++) { 2441 periph = chan->chan_periphs[target][lun]; 2442 if (periph) { 2443 xs = periph->periph_xscheck; 2444 if (xs) 2445 xs->error = XS_RESET; 2446 } 2447 } 2448 } 2449 } 2450 2451 /* 2452 * scsipi_target_detach: 2453 * 2454 * detach all periph associated with a I_T 2455 * must be called from valid thread context 2456 */ 2457 int 2458 scsipi_target_detach(chan, target, lun, flags) 2459 struct scsipi_channel *chan; 2460 int target, lun; 2461 int flags; 2462 { 2463 struct scsipi_periph *periph; 2464 int ctarget, mintarget, maxtarget; 2465 int clun, minlun, maxlun; 2466 int error; 2467 2468 if (target == -1) { 2469 mintarget = 0; 2470 maxtarget = chan->chan_ntargets; 2471 } else { 2472 if (target == chan->chan_id) 2473 return EINVAL; 2474 if (target < 0 || target >= chan->chan_ntargets) 2475 return EINVAL; 2476 mintarget = target; 2477 maxtarget = target + 1; 2478 } 2479 2480 if (lun == -1) { 2481 minlun = 0; 2482 maxlun = chan->chan_nluns; 2483 } else { 2484 if (lun < 0 || lun >= chan->chan_nluns) 2485 return EINVAL; 2486 minlun = lun; 2487 maxlun = lun + 1; 2488 } 2489 2490 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2491 if (ctarget == chan->chan_id) 2492 continue; 2493 2494 for (clun = minlun; clun < maxlun; clun++) { 2495 periph = scsipi_lookup_periph(chan, ctarget, clun); 2496 if (periph == NULL) 2497 continue; 2498 error = config_detach(periph->periph_dev, flags); 2499 if (error) 2500 return (error); 2501 scsipi_remove_periph(chan, periph); 2502 free(periph, M_DEVBUF); 2503 } 2504 } 2505 return(0); 2506 } 2507 2508 /* 2509 * scsipi_adapter_addref: 2510 * 2511 * Add a reference to the adapter pointed to by the provided 2512 * link, enabling the adapter if necessary. 2513 */ 2514 int 2515 scsipi_adapter_addref(adapt) 2516 struct scsipi_adapter *adapt; 2517 { 2518 int s, error = 0; 2519 2520 s = splbio(); 2521 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) { 2522 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1); 2523 if (error) 2524 adapt->adapt_refcnt--; 2525 } 2526 splx(s); 2527 return (error); 2528 } 2529 2530 /* 2531 * scsipi_adapter_delref: 2532 * 2533 * Delete a reference to the adapter pointed to by the provided 2534 * link, disabling the adapter if possible. 2535 */ 2536 void 2537 scsipi_adapter_delref(adapt) 2538 struct scsipi_adapter *adapt; 2539 { 2540 int s; 2541 2542 s = splbio(); 2543 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL) 2544 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0); 2545 splx(s); 2546 } 2547 2548 struct scsipi_syncparam { 2549 int ss_factor; 2550 int ss_period; /* ns * 10 */ 2551 } scsipi_syncparams[] = { 2552 { 0x09, 125 }, 2553 { 0x0a, 250 }, 2554 { 0x0b, 303 }, 2555 { 0x0c, 500 }, 2556 }; 2557 const int scsipi_nsyncparams = 2558 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2559 2560 int 2561 scsipi_sync_period_to_factor(period) 2562 int period; /* ns * 10 */ 2563 { 2564 int i; 2565 2566 for (i = 0; i < scsipi_nsyncparams; i++) { 2567 if (period <= scsipi_syncparams[i].ss_period) 2568 return (scsipi_syncparams[i].ss_factor); 2569 } 2570 2571 return ((period / 10) / 4); 2572 } 2573 2574 int 2575 scsipi_sync_factor_to_period(factor) 2576 int factor; 2577 { 2578 int i; 2579 2580 for (i = 0; i < scsipi_nsyncparams; i++) { 2581 if (factor == scsipi_syncparams[i].ss_factor) 2582 return (scsipi_syncparams[i].ss_period); 2583 } 2584 2585 return ((factor * 4) * 10); 2586 } 2587 2588 int 2589 scsipi_sync_factor_to_freq(factor) 2590 int factor; 2591 { 2592 int i; 2593 2594 for (i = 0; i < scsipi_nsyncparams; i++) { 2595 if (factor == scsipi_syncparams[i].ss_factor) 2596 return (10000000 / scsipi_syncparams[i].ss_period); 2597 } 2598 2599 return (10000000 / ((factor * 4) * 10)); 2600 } 2601 2602 #ifdef SCSIPI_DEBUG 2603 /* 2604 * Given a scsipi_xfer, dump the request, in all it's glory 2605 */ 2606 void 2607 show_scsipi_xs(xs) 2608 struct scsipi_xfer *xs; 2609 { 2610 2611 printf("xs(%p): ", xs); 2612 printf("xs_control(0x%08x)", xs->xs_control); 2613 printf("xs_status(0x%08x)", xs->xs_status); 2614 printf("periph(%p)", xs->xs_periph); 2615 printf("retr(0x%x)", xs->xs_retries); 2616 printf("timo(0x%x)", xs->timeout); 2617 printf("cmd(%p)", xs->cmd); 2618 printf("len(0x%x)", xs->cmdlen); 2619 printf("data(%p)", xs->data); 2620 printf("len(0x%x)", xs->datalen); 2621 printf("res(0x%x)", xs->resid); 2622 printf("err(0x%x)", xs->error); 2623 printf("bp(%p)", xs->bp); 2624 show_scsipi_cmd(xs); 2625 } 2626 2627 void 2628 show_scsipi_cmd(xs) 2629 struct scsipi_xfer *xs; 2630 { 2631 u_char *b = (u_char *) xs->cmd; 2632 int i = 0; 2633 2634 scsipi_printaddr(xs->xs_periph); 2635 printf(" command: "); 2636 2637 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2638 while (i < xs->cmdlen) { 2639 if (i) 2640 printf(","); 2641 printf("0x%x", b[i++]); 2642 } 2643 printf("-[%d bytes]\n", xs->datalen); 2644 if (xs->datalen) 2645 show_mem(xs->data, min(64, xs->datalen)); 2646 } else 2647 printf("-RESET-\n"); 2648 } 2649 2650 void 2651 show_mem(address, num) 2652 u_char *address; 2653 int num; 2654 { 2655 int x; 2656 2657 printf("------------------------------"); 2658 for (x = 0; x < num; x++) { 2659 if ((x % 16) == 0) 2660 printf("\n%03d: ", x); 2661 printf("%02x ", *address++); 2662 } 2663 printf("\n------------------------------\n"); 2664 } 2665 #endif /* SCSIPI_DEBUG */ 2666