1 /* $NetBSD: scsipi_base.c,v 1.62 2001/11/13 06:56:40 lukem Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.62 2001/11/13 06:56:40 lukem Exp $"); 42 43 #include "opt_scsi.h" 44 45 #include <sys/types.h> 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/buf.h> 50 #include <sys/uio.h> 51 #include <sys/malloc.h> 52 #include <sys/pool.h> 53 #include <sys/errno.h> 54 #include <sys/device.h> 55 #include <sys/proc.h> 56 #include <sys/kthread.h> 57 58 #include <dev/scsipi/scsipi_all.h> 59 #include <dev/scsipi/scsipi_disk.h> 60 #include <dev/scsipi/scsipiconf.h> 61 #include <dev/scsipi/scsipi_base.h> 62 63 #include <dev/scsipi/scsi_all.h> 64 #include <dev/scsipi/scsi_message.h> 65 66 int scsipi_complete __P((struct scsipi_xfer *)); 67 void scsipi_request_sense __P((struct scsipi_xfer *)); 68 int scsipi_enqueue __P((struct scsipi_xfer *)); 69 void scsipi_run_queue __P((struct scsipi_channel *chan)); 70 71 void scsipi_completion_thread __P((void *)); 72 73 void scsipi_get_tag __P((struct scsipi_xfer *)); 74 void scsipi_put_tag __P((struct scsipi_xfer *)); 75 76 int scsipi_get_resource __P((struct scsipi_channel *)); 77 void scsipi_put_resource __P((struct scsipi_channel *)); 78 __inline int scsipi_grow_resources __P((struct scsipi_channel *)); 79 80 void scsipi_async_event_max_openings __P((struct scsipi_channel *, 81 struct scsipi_max_openings *)); 82 void scsipi_async_event_xfer_mode __P((struct scsipi_channel *, 83 struct scsipi_xfer_mode *)); 84 void scsipi_async_event_channel_reset __P((struct scsipi_channel *)); 85 86 struct pool scsipi_xfer_pool; 87 88 /* 89 * scsipi_init: 90 * 91 * Called when a scsibus or atapibus is attached to the system 92 * to initialize shared data structures. 93 */ 94 void 95 scsipi_init() 96 { 97 static int scsipi_init_done; 98 99 if (scsipi_init_done) 100 return; 101 scsipi_init_done = 1; 102 103 /* Initialize the scsipi_xfer pool. */ 104 pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0, 105 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF); 106 } 107 108 /* 109 * scsipi_channel_init: 110 * 111 * Initialize a scsipi_channel when it is attached. 112 */ 113 int 114 scsipi_channel_init(chan) 115 struct scsipi_channel *chan; 116 { 117 size_t nbytes; 118 int i; 119 120 /* Initialize shared data. */ 121 scsipi_init(); 122 123 /* Initialize the queues. */ 124 TAILQ_INIT(&chan->chan_queue); 125 TAILQ_INIT(&chan->chan_complete); 126 127 nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **); 128 chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT); 129 if (chan->chan_periphs == NULL) 130 return (ENOMEM); 131 132 133 nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *); 134 for (i = 0; i < chan->chan_ntargets; i++) { 135 chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT); 136 if (chan->chan_periphs[i] == NULL) { 137 while (--i >= 0) { 138 free(chan->chan_periphs[i], M_DEVBUF); 139 } 140 return (ENOMEM); 141 } 142 memset(chan->chan_periphs[i], 0, nbytes); 143 } 144 145 /* 146 * Create the asynchronous completion thread. 147 */ 148 kthread_create(scsipi_create_completion_thread, chan); 149 return (0); 150 } 151 152 /* 153 * scsipi_channel_shutdown: 154 * 155 * Shutdown a scsipi_channel. 156 */ 157 void 158 scsipi_channel_shutdown(chan) 159 struct scsipi_channel *chan; 160 { 161 162 /* 163 * Shut down the completion thread. 164 */ 165 chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN; 166 wakeup(&chan->chan_complete); 167 168 /* 169 * Now wait for the thread to exit. 170 */ 171 while (chan->chan_thread != NULL) 172 (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0); 173 } 174 175 /* 176 * scsipi_insert_periph: 177 * 178 * Insert a periph into the channel. 179 */ 180 void 181 scsipi_insert_periph(chan, periph) 182 struct scsipi_channel *chan; 183 struct scsipi_periph *periph; 184 { 185 int s; 186 187 s = splbio(); 188 chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph; 189 splx(s); 190 } 191 192 /* 193 * scsipi_remove_periph: 194 * 195 * Remove a periph from the channel. 196 */ 197 void 198 scsipi_remove_periph(chan, periph) 199 struct scsipi_channel *chan; 200 struct scsipi_periph *periph; 201 { 202 int s; 203 204 s = splbio(); 205 chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL; 206 splx(s); 207 } 208 209 /* 210 * scsipi_lookup_periph: 211 * 212 * Lookup a periph on the specified channel. 213 */ 214 struct scsipi_periph * 215 scsipi_lookup_periph(chan, target, lun) 216 struct scsipi_channel *chan; 217 int target, lun; 218 { 219 struct scsipi_periph *periph; 220 int s; 221 222 if (target >= chan->chan_ntargets || 223 lun >= chan->chan_nluns) 224 return (NULL); 225 226 s = splbio(); 227 periph = chan->chan_periphs[target][lun]; 228 splx(s); 229 230 return (periph); 231 } 232 233 /* 234 * scsipi_get_resource: 235 * 236 * Allocate a single xfer `resource' from the channel. 237 * 238 * NOTE: Must be called at splbio(). 239 */ 240 int 241 scsipi_get_resource(chan) 242 struct scsipi_channel *chan; 243 { 244 struct scsipi_adapter *adapt = chan->chan_adapter; 245 246 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) { 247 if (chan->chan_openings > 0) { 248 chan->chan_openings--; 249 return (1); 250 } 251 return (0); 252 } 253 254 if (adapt->adapt_openings > 0) { 255 adapt->adapt_openings--; 256 return (1); 257 } 258 return (0); 259 } 260 261 /* 262 * scsipi_grow_resources: 263 * 264 * Attempt to grow resources for a channel. If this succeeds, 265 * we allocate one for our caller. 266 * 267 * NOTE: Must be called at splbio(). 268 */ 269 __inline int 270 scsipi_grow_resources(chan) 271 struct scsipi_channel *chan; 272 { 273 274 if (chan->chan_flags & SCSIPI_CHAN_CANGROW) { 275 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 276 scsipi_adapter_request(chan, 277 ADAPTER_REQ_GROW_RESOURCES, NULL); 278 return (scsipi_get_resource(chan)); 279 } 280 /* 281 * ask the channel thread to do it. It'll have to thaw the 282 * queue 283 */ 284 scsipi_channel_freeze(chan, 1); 285 chan->chan_tflags |= SCSIPI_CHANT_GROWRES; 286 wakeup(&chan->chan_complete); 287 return (0); 288 } 289 290 return (0); 291 } 292 293 /* 294 * scsipi_put_resource: 295 * 296 * Free a single xfer `resource' to the channel. 297 * 298 * NOTE: Must be called at splbio(). 299 */ 300 void 301 scsipi_put_resource(chan) 302 struct scsipi_channel *chan; 303 { 304 struct scsipi_adapter *adapt = chan->chan_adapter; 305 306 if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) 307 chan->chan_openings++; 308 else 309 adapt->adapt_openings++; 310 } 311 312 /* 313 * scsipi_get_tag: 314 * 315 * Get a tag ID for the specified xfer. 316 * 317 * NOTE: Must be called at splbio(). 318 */ 319 void 320 scsipi_get_tag(xs) 321 struct scsipi_xfer *xs; 322 { 323 struct scsipi_periph *periph = xs->xs_periph; 324 int word, bit, tag; 325 326 for (word = 0; word < PERIPH_NTAGWORDS; word++) { 327 bit = ffs(periph->periph_freetags[word]); 328 if (bit != 0) 329 break; 330 } 331 #ifdef DIAGNOSTIC 332 if (word == PERIPH_NTAGWORDS) { 333 scsipi_printaddr(periph); 334 printf("no free tags\n"); 335 panic("scsipi_get_tag"); 336 } 337 #endif 338 339 bit -= 1; 340 periph->periph_freetags[word] &= ~(1 << bit); 341 tag = (word << 5) | bit; 342 343 /* XXX Should eventually disallow this completely. */ 344 if (tag >= periph->periph_openings) { 345 scsipi_printaddr(periph); 346 printf("WARNING: tag %d greater than available openings %d\n", 347 tag, periph->periph_openings); 348 } 349 350 xs->xs_tag_id = tag; 351 } 352 353 /* 354 * scsipi_put_tag: 355 * 356 * Put the tag ID for the specified xfer back into the pool. 357 * 358 * NOTE: Must be called at splbio(). 359 */ 360 void 361 scsipi_put_tag(xs) 362 struct scsipi_xfer *xs; 363 { 364 struct scsipi_periph *periph = xs->xs_periph; 365 int word, bit; 366 367 word = xs->xs_tag_id >> 5; 368 bit = xs->xs_tag_id & 0x1f; 369 370 periph->periph_freetags[word] |= (1 << bit); 371 } 372 373 /* 374 * scsipi_get_xs: 375 * 376 * Allocate an xfer descriptor and associate it with the 377 * specified peripherial. If the peripherial has no more 378 * available command openings, we either block waiting for 379 * one to become available, or fail. 380 */ 381 struct scsipi_xfer * 382 scsipi_get_xs(periph, flags) 383 struct scsipi_periph *periph; 384 int flags; 385 { 386 struct scsipi_xfer *xs; 387 int s; 388 389 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n")); 390 391 /* 392 * If we're cold, make sure we poll. 393 */ 394 if (cold) 395 flags |= XS_CTL_NOSLEEP | XS_CTL_POLL; 396 397 #ifdef DIAGNOSTIC 398 /* 399 * URGENT commands can never be ASYNC. 400 */ 401 if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) == 402 (XS_CTL_URGENT|XS_CTL_ASYNC)) { 403 scsipi_printaddr(periph); 404 printf("URGENT and ASYNC\n"); 405 panic("scsipi_get_xs"); 406 } 407 #endif 408 409 s = splbio(); 410 /* 411 * Wait for a command opening to become available. Rules: 412 * 413 * - All xfers must wait for an available opening. 414 * Exception: URGENT xfers can proceed when 415 * active == openings, because we use the opening 416 * of the command we're recovering for. 417 * - if the periph has sense pending, only URGENT & REQSENSE 418 * xfers may proceed. 419 * 420 * - If the periph is recovering, only URGENT xfers may 421 * proceed. 422 * 423 * - If the periph is currently executing a recovery 424 * command, URGENT commands must block, because only 425 * one recovery command can execute at a time. 426 */ 427 for (;;) { 428 if (flags & XS_CTL_URGENT) { 429 if (periph->periph_active > periph->periph_openings) 430 goto wait_for_opening; 431 if (periph->periph_flags & PERIPH_SENSE) { 432 if ((flags & XS_CTL_REQSENSE) == 0) 433 goto wait_for_opening; 434 } else { 435 if ((periph->periph_flags & 436 PERIPH_RECOVERY_ACTIVE) != 0) 437 goto wait_for_opening; 438 periph->periph_flags |= PERIPH_RECOVERY_ACTIVE; 439 } 440 break; 441 } 442 if (periph->periph_active >= periph->periph_openings || 443 (periph->periph_flags & PERIPH_RECOVERING) != 0) 444 goto wait_for_opening; 445 periph->periph_active++; 446 break; 447 448 wait_for_opening: 449 if (flags & XS_CTL_NOSLEEP) { 450 splx(s); 451 return (NULL); 452 } 453 SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n")); 454 periph->periph_flags |= PERIPH_WAITING; 455 (void) tsleep(periph, PRIBIO, "getxs", 0); 456 } 457 SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n")); 458 xs = pool_get(&scsipi_xfer_pool, 459 ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK)); 460 if (xs == NULL) { 461 if (flags & XS_CTL_URGENT) { 462 if ((flags & XS_CTL_REQSENSE) == 0) 463 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 464 } else 465 periph->periph_active--; 466 scsipi_printaddr(periph); 467 printf("unable to allocate %sscsipi_xfer\n", 468 (flags & XS_CTL_URGENT) ? "URGENT " : ""); 469 } 470 splx(s); 471 472 SC_DEBUG(periph, SCSIPI_DB3, ("returning\n")); 473 474 if (xs != NULL) { 475 callout_init(&xs->xs_callout); 476 memset(xs, 0, sizeof(*xs)); 477 xs->xs_periph = periph; 478 xs->xs_control = flags; 479 xs->xs_status = 0; 480 s = splbio(); 481 TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q); 482 splx(s); 483 } 484 return (xs); 485 } 486 487 /* 488 * scsipi_put_xs: 489 * 490 * Release an xfer descriptor, decreasing the outstanding command 491 * count for the peripherial. If there is a thread waiting for 492 * an opening, wake it up. If not, kick any queued I/O the 493 * peripherial may have. 494 * 495 * NOTE: Must be called at splbio(). 496 */ 497 void 498 scsipi_put_xs(xs) 499 struct scsipi_xfer *xs; 500 { 501 struct scsipi_periph *periph = xs->xs_periph; 502 int flags = xs->xs_control; 503 504 SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n")); 505 506 TAILQ_REMOVE(&periph->periph_xferq, xs, device_q); 507 pool_put(&scsipi_xfer_pool, xs); 508 509 #ifdef DIAGNOSTIC 510 if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 && 511 periph->periph_active == 0) { 512 scsipi_printaddr(periph); 513 printf("recovery without a command to recovery for\n"); 514 panic("scsipi_put_xs"); 515 } 516 #endif 517 518 if (flags & XS_CTL_URGENT) { 519 if ((flags & XS_CTL_REQSENSE) == 0) 520 periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE; 521 } else 522 periph->periph_active--; 523 if (periph->periph_active == 0 && 524 (periph->periph_flags & PERIPH_WAITDRAIN) != 0) { 525 periph->periph_flags &= ~PERIPH_WAITDRAIN; 526 wakeup(&periph->periph_active); 527 } 528 529 if (periph->periph_flags & PERIPH_WAITING) { 530 periph->periph_flags &= ~PERIPH_WAITING; 531 wakeup(periph); 532 } else { 533 if (periph->periph_switch->psw_start != NULL) { 534 SC_DEBUG(periph, SCSIPI_DB2, 535 ("calling private start()\n")); 536 (*periph->periph_switch->psw_start)(periph); 537 } 538 } 539 } 540 541 /* 542 * scsipi_channel_freeze: 543 * 544 * Freeze a channel's xfer queue. 545 */ 546 void 547 scsipi_channel_freeze(chan, count) 548 struct scsipi_channel *chan; 549 int count; 550 { 551 int s; 552 553 s = splbio(); 554 chan->chan_qfreeze += count; 555 splx(s); 556 } 557 558 /* 559 * scsipi_channel_thaw: 560 * 561 * Thaw a channel's xfer queue. 562 */ 563 void 564 scsipi_channel_thaw(chan, count) 565 struct scsipi_channel *chan; 566 int count; 567 { 568 int s; 569 570 s = splbio(); 571 chan->chan_qfreeze -= count; 572 /* 573 * Don't let the freeze count go negative. 574 * 575 * Presumably the adapter driver could keep track of this, 576 * but it might just be easier to do this here so as to allow 577 * multiple callers, including those outside the adapter driver. 578 */ 579 if (chan->chan_qfreeze < 0) { 580 chan->chan_qfreeze = 0; 581 } 582 splx(s); 583 /* 584 * Kick the channel's queue here. Note, we may be running in 585 * interrupt context (softclock or HBA's interrupt), so the adapter 586 * driver had better not sleep. 587 */ 588 if (chan->chan_qfreeze == 0) 589 scsipi_run_queue(chan); 590 } 591 592 /* 593 * scsipi_channel_timed_thaw: 594 * 595 * Thaw a channel after some time has expired. This will also 596 * run the channel's queue if the freeze count has reached 0. 597 */ 598 void 599 scsipi_channel_timed_thaw(arg) 600 void *arg; 601 { 602 struct scsipi_channel *chan = arg; 603 604 scsipi_channel_thaw(chan, 1); 605 } 606 607 /* 608 * scsipi_periph_freeze: 609 * 610 * Freeze a device's xfer queue. 611 */ 612 void 613 scsipi_periph_freeze(periph, count) 614 struct scsipi_periph *periph; 615 int count; 616 { 617 int s; 618 619 s = splbio(); 620 periph->periph_qfreeze += count; 621 splx(s); 622 } 623 624 /* 625 * scsipi_periph_thaw: 626 * 627 * Thaw a device's xfer queue. 628 */ 629 void 630 scsipi_periph_thaw(periph, count) 631 struct scsipi_periph *periph; 632 int count; 633 { 634 int s; 635 636 s = splbio(); 637 periph->periph_qfreeze -= count; 638 #ifdef DIAGNOSTIC 639 if (periph->periph_qfreeze < 0) { 640 static const char pc[] = "periph freeze count < 0"; 641 scsipi_printaddr(periph); 642 printf("%s\n", pc); 643 panic(pc); 644 } 645 #endif 646 if (periph->periph_qfreeze == 0 && 647 (periph->periph_flags & PERIPH_WAITING) != 0) 648 wakeup(periph); 649 splx(s); 650 } 651 652 /* 653 * scsipi_periph_timed_thaw: 654 * 655 * Thaw a device after some time has expired. 656 */ 657 void 658 scsipi_periph_timed_thaw(arg) 659 void *arg; 660 { 661 int s; 662 struct scsipi_periph *periph = arg; 663 664 callout_stop(&periph->periph_callout); 665 666 s = splbio(); 667 scsipi_periph_thaw(periph, 1); 668 if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 669 /* 670 * Kick the channel's queue here. Note, we're running in 671 * interrupt context (softclock), so the adapter driver 672 * had better not sleep. 673 */ 674 scsipi_run_queue(periph->periph_channel); 675 } else { 676 /* 677 * Tell the completion thread to kick the channel's queue here. 678 */ 679 periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK; 680 wakeup(&periph->periph_channel->chan_complete); 681 } 682 splx(s); 683 } 684 685 /* 686 * scsipi_wait_drain: 687 * 688 * Wait for a periph's pending xfers to drain. 689 */ 690 void 691 scsipi_wait_drain(periph) 692 struct scsipi_periph *periph; 693 { 694 int s; 695 696 s = splbio(); 697 while (periph->periph_active != 0) { 698 periph->periph_flags |= PERIPH_WAITDRAIN; 699 (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0); 700 } 701 splx(s); 702 } 703 704 /* 705 * scsipi_kill_pending: 706 * 707 * Kill off all pending xfers for a periph. 708 * 709 * NOTE: Must be called at splbio(). 710 */ 711 void 712 scsipi_kill_pending(periph) 713 struct scsipi_periph *periph; 714 { 715 716 (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph); 717 #ifdef DIAGNOSTIC 718 if (TAILQ_FIRST(&periph->periph_xferq) != NULL) 719 panic("scsipi_kill_pending"); 720 #endif 721 scsipi_wait_drain(periph); 722 } 723 724 /* 725 * scsipi_interpret_sense: 726 * 727 * Look at the returned sense and act on the error, determining 728 * the unix error number to pass back. (0 = report no error) 729 * 730 * NOTE: If we return ERESTART, we are expected to haved 731 * thawed the device! 732 * 733 * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES. 734 */ 735 int 736 scsipi_interpret_sense(xs) 737 struct scsipi_xfer *xs; 738 { 739 struct scsipi_sense_data *sense; 740 struct scsipi_periph *periph = xs->xs_periph; 741 u_int8_t key; 742 u_int32_t info; 743 int error; 744 #ifndef SCSIVERBOSE 745 static char *error_mes[] = { 746 "soft error (corrected)", 747 "not ready", "medium error", 748 "non-media hardware failure", "illegal request", 749 "unit attention", "readonly device", 750 "no data found", "vendor unique", 751 "copy aborted", "command aborted", 752 "search returned equal", "volume overflow", 753 "verify miscompare", "unknown error key" 754 }; 755 #endif 756 757 sense = &xs->sense.scsi_sense; 758 #ifdef SCSIPI_DEBUG 759 if (periph->periph_flags & SCSIPI_DB1) { 760 int count; 761 scsipi_printaddr(periph); 762 printf(" sense debug information:\n"); 763 printf("\tcode 0x%x valid 0x%x\n", 764 sense->error_code & SSD_ERRCODE, 765 sense->error_code & SSD_ERRCODE_VALID ? 1 : 0); 766 printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n", 767 sense->segment, 768 sense->flags & SSD_KEY, 769 sense->flags & SSD_ILI ? 1 : 0, 770 sense->flags & SSD_EOM ? 1 : 0, 771 sense->flags & SSD_FILEMARK ? 1 : 0); 772 printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d " 773 "extra bytes\n", 774 sense->info[0], 775 sense->info[1], 776 sense->info[2], 777 sense->info[3], 778 sense->extra_len); 779 printf("\textra: "); 780 for (count = 0; count < ADD_BYTES_LIM(sense); count++) 781 printf("0x%x ", sense->cmd_spec_info[count]); 782 printf("\n"); 783 } 784 #endif 785 786 /* 787 * If the periph has it's own error handler, call it first. 788 * If it returns a legit error value, return that, otherwise 789 * it wants us to continue with normal error processing. 790 */ 791 if (periph->periph_switch->psw_error != NULL) { 792 SC_DEBUG(periph, SCSIPI_DB2, 793 ("calling private err_handler()\n")); 794 error = (*periph->periph_switch->psw_error)(xs); 795 if (error != EJUSTRETURN) 796 return (error); 797 } 798 /* otherwise use the default */ 799 switch (sense->error_code & SSD_ERRCODE) { 800 /* 801 * If it's code 70, use the extended stuff and 802 * interpret the key 803 */ 804 case 0x71: /* delayed error */ 805 scsipi_printaddr(periph); 806 key = sense->flags & SSD_KEY; 807 printf(" DEFERRED ERROR, key = 0x%x\n", key); 808 /* FALLTHROUGH */ 809 case 0x70: 810 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) 811 info = _4btol(sense->info); 812 else 813 info = 0; 814 key = sense->flags & SSD_KEY; 815 816 switch (key) { 817 case SKEY_NO_SENSE: 818 case SKEY_RECOVERED_ERROR: 819 if (xs->resid == xs->datalen && xs->datalen) { 820 /* 821 * Why is this here? 822 */ 823 xs->resid = 0; /* not short read */ 824 } 825 case SKEY_EQUAL: 826 error = 0; 827 break; 828 case SKEY_NOT_READY: 829 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 830 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 831 if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0) 832 return (0); 833 if (sense->add_sense_code == 0x3A) { 834 error = ENODEV; /* Medium not present */ 835 if (xs->xs_control & XS_CTL_SILENT_NODEV) 836 return (error); 837 } else 838 error = EIO; 839 if ((xs->xs_control & XS_CTL_SILENT) != 0) 840 return (error); 841 break; 842 case SKEY_ILLEGAL_REQUEST: 843 if ((xs->xs_control & 844 XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0) 845 return (0); 846 /* 847 * Handle the case where a device reports 848 * Logical Unit Not Supported during discovery. 849 */ 850 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 && 851 sense->add_sense_code == 0x25 && 852 sense->add_sense_code_qual == 0x00) 853 return (EINVAL); 854 if ((xs->xs_control & XS_CTL_SILENT) != 0) 855 return (EIO); 856 error = EINVAL; 857 break; 858 case SKEY_UNIT_ATTENTION: 859 if (sense->add_sense_code == 0x29 && 860 sense->add_sense_code_qual == 0x00) { 861 /* device or bus reset */ 862 return (ERESTART); 863 } 864 if ((periph->periph_flags & PERIPH_REMOVABLE) != 0) 865 periph->periph_flags &= ~PERIPH_MEDIA_LOADED; 866 if ((xs->xs_control & 867 XS_CTL_IGNORE_MEDIA_CHANGE) != 0 || 868 /* XXX Should reupload any transient state. */ 869 (periph->periph_flags & 870 PERIPH_REMOVABLE) == 0) { 871 return (ERESTART); 872 } 873 if ((xs->xs_control & XS_CTL_SILENT) != 0) 874 return (EIO); 875 error = EIO; 876 break; 877 case SKEY_WRITE_PROTECT: 878 error = EROFS; 879 break; 880 case SKEY_BLANK_CHECK: 881 error = 0; 882 break; 883 case SKEY_ABORTED_COMMAND: 884 error = ERESTART; 885 break; 886 case SKEY_VOLUME_OVERFLOW: 887 error = ENOSPC; 888 break; 889 default: 890 error = EIO; 891 break; 892 } 893 894 #ifdef SCSIVERBOSE 895 if (key && (xs->xs_control & XS_CTL_SILENT) == 0) 896 scsipi_print_sense(xs, 0); 897 #else 898 if (key) { 899 scsipi_printaddr(periph); 900 printf("%s", error_mes[key - 1]); 901 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 902 switch (key) { 903 case SKEY_NOT_READY: 904 case SKEY_ILLEGAL_REQUEST: 905 case SKEY_UNIT_ATTENTION: 906 case SKEY_WRITE_PROTECT: 907 break; 908 case SKEY_BLANK_CHECK: 909 printf(", requested size: %d (decimal)", 910 info); 911 break; 912 case SKEY_ABORTED_COMMAND: 913 if (xs->xs_retries) 914 printf(", retrying"); 915 printf(", cmd 0x%x, info 0x%x", 916 xs->cmd->opcode, info); 917 break; 918 default: 919 printf(", info = %d (decimal)", info); 920 } 921 } 922 if (sense->extra_len != 0) { 923 int n; 924 printf(", data ="); 925 for (n = 0; n < sense->extra_len; n++) 926 printf(" %02x", 927 sense->cmd_spec_info[n]); 928 } 929 printf("\n"); 930 } 931 #endif 932 return (error); 933 934 /* 935 * Not code 70, just report it 936 */ 937 default: 938 #if defined(SCSIDEBUG) || defined(DEBUG) 939 { 940 static char *uc = "undecodable sense error"; 941 int i; 942 u_int8_t *cptr = (u_int8_t *) sense; 943 scsipi_printaddr(periph); 944 if (xs->cmd == &xs->cmdstore) { 945 printf("%s for opcode 0x%x, data=", 946 uc, xs->cmdstore.opcode); 947 } else { 948 printf("%s, data=", uc); 949 } 950 for (i = 0; i < sizeof (sense); i++) 951 printf(" 0x%02x", *(cptr++) & 0xff); 952 printf("\n"); 953 } 954 #else 955 scsipi_printaddr(periph); 956 printf("Sense Error Code 0x%x", 957 sense->error_code & SSD_ERRCODE); 958 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 959 struct scsipi_sense_data_unextended *usense = 960 (struct scsipi_sense_data_unextended *)sense; 961 printf(" at block no. %d (decimal)", 962 _3btol(usense->block)); 963 } 964 printf("\n"); 965 #endif 966 return (EIO); 967 } 968 } 969 970 /* 971 * scsipi_size: 972 * 973 * Find out from the device what its capacity is. 974 */ 975 u_long 976 scsipi_size(periph, flags) 977 struct scsipi_periph *periph; 978 int flags; 979 { 980 struct scsipi_read_cap_data rdcap; 981 struct scsipi_read_capacity scsipi_cmd; 982 983 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 984 scsipi_cmd.opcode = READ_CAPACITY; 985 986 /* 987 * If the command works, interpret the result as a 4 byte 988 * number of blocks 989 */ 990 if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 991 sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap), 992 SCSIPIRETRIES, 20000, NULL, 993 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) { 994 scsipi_printaddr(periph); 995 printf("could not get size\n"); 996 return (0); 997 } 998 999 return (_4btol(rdcap.addr) + 1); 1000 } 1001 1002 /* 1003 * scsipi_test_unit_ready: 1004 * 1005 * Issue a `test unit ready' request. 1006 */ 1007 int 1008 scsipi_test_unit_ready(periph, flags) 1009 struct scsipi_periph *periph; 1010 int flags; 1011 { 1012 struct scsipi_test_unit_ready scsipi_cmd; 1013 1014 /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */ 1015 if (periph->periph_quirks & PQUIRK_NOTUR) 1016 return (0); 1017 1018 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1019 scsipi_cmd.opcode = TEST_UNIT_READY; 1020 1021 return (scsipi_command(periph, 1022 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd), 1023 0, 0, SCSIPIRETRIES, 10000, NULL, flags)); 1024 } 1025 1026 /* 1027 * scsipi_inquire: 1028 * 1029 * Ask the device about itself. 1030 */ 1031 int 1032 scsipi_inquire(periph, inqbuf, flags) 1033 struct scsipi_periph *periph; 1034 struct scsipi_inquiry_data *inqbuf; 1035 int flags; 1036 { 1037 struct scsipi_inquiry scsipi_cmd; 1038 1039 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1040 scsipi_cmd.opcode = INQUIRY; 1041 scsipi_cmd.length = sizeof(struct scsipi_inquiry_data); 1042 1043 return (scsipi_command(periph, 1044 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1045 (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data), 1046 SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags)); 1047 } 1048 1049 /* 1050 * scsipi_prevent: 1051 * 1052 * Prevent or allow the user to remove the media 1053 */ 1054 int 1055 scsipi_prevent(periph, type, flags) 1056 struct scsipi_periph *periph; 1057 int type, flags; 1058 { 1059 struct scsipi_prevent scsipi_cmd; 1060 1061 if (periph->periph_quirks & PQUIRK_NODOORLOCK) 1062 return (0); 1063 1064 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1065 scsipi_cmd.opcode = PREVENT_ALLOW; 1066 scsipi_cmd.how = type; 1067 1068 return (scsipi_command(periph, 1069 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1070 0, 0, SCSIPIRETRIES, 5000, NULL, flags)); 1071 } 1072 1073 /* 1074 * scsipi_start: 1075 * 1076 * Send a START UNIT. 1077 */ 1078 int 1079 scsipi_start(periph, type, flags) 1080 struct scsipi_periph *periph; 1081 int type, flags; 1082 { 1083 struct scsipi_start_stop scsipi_cmd; 1084 1085 if (periph->periph_quirks & PQUIRK_NOSTARTUNIT) 1086 return 0; 1087 1088 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1089 scsipi_cmd.opcode = START_STOP; 1090 scsipi_cmd.byte2 = 0x00; 1091 scsipi_cmd.how = type; 1092 1093 return (scsipi_command(periph, 1094 (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd), 1095 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, 1096 NULL, flags)); 1097 } 1098 1099 /* 1100 * scsipi_mode_sense, scsipi_mode_sense_big: 1101 * get a sense page from a device 1102 */ 1103 1104 int 1105 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout) 1106 struct scsipi_periph *periph; 1107 int byte2, page, len, flags, retries, timeout; 1108 struct scsipi_mode_header *data; 1109 { 1110 struct scsipi_mode_sense scsipi_cmd; 1111 int error; 1112 1113 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1114 scsipi_cmd.opcode = MODE_SENSE; 1115 scsipi_cmd.byte2 = byte2; 1116 scsipi_cmd.page = page; 1117 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI) 1118 _lto2b(len, scsipi_cmd.u_len.atapi.length); 1119 else 1120 scsipi_cmd.u_len.scsi.length = len & 0xff; 1121 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1122 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1123 flags | XS_CTL_DATA_IN); 1124 SC_DEBUG(periph, SCSIPI_DB2, 1125 ("scsipi_mode_sense: error=%d\n", error)); 1126 return (error); 1127 } 1128 1129 int 1130 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout) 1131 struct scsipi_periph *periph; 1132 int byte2, page, len, flags, retries, timeout; 1133 struct scsipi_mode_header_big *data; 1134 { 1135 struct scsipi_mode_sense_big scsipi_cmd; 1136 int error; 1137 1138 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1139 scsipi_cmd.opcode = MODE_SENSE_BIG; 1140 scsipi_cmd.byte2 = byte2; 1141 scsipi_cmd.page = page; 1142 _lto2b(len, scsipi_cmd.length); 1143 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1144 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1145 flags | XS_CTL_DATA_IN); 1146 SC_DEBUG(periph, SCSIPI_DB2, 1147 ("scsipi_mode_sense_big: error=%d\n", error)); 1148 return (error); 1149 } 1150 1151 int 1152 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout) 1153 struct scsipi_periph *periph; 1154 int byte2, len, flags, retries, timeout; 1155 struct scsipi_mode_header *data; 1156 { 1157 struct scsipi_mode_select scsipi_cmd; 1158 int error; 1159 1160 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1161 scsipi_cmd.opcode = MODE_SELECT; 1162 scsipi_cmd.byte2 = byte2; 1163 if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI) 1164 _lto2b(len, scsipi_cmd.u_len.atapi.length); 1165 else 1166 scsipi_cmd.u_len.scsi.length = len & 0xff; 1167 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1168 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1169 flags | XS_CTL_DATA_OUT); 1170 SC_DEBUG(periph, SCSIPI_DB2, 1171 ("scsipi_mode_select: error=%d\n", error)); 1172 return (error); 1173 } 1174 1175 int 1176 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout) 1177 struct scsipi_periph *periph; 1178 int byte2, len, flags, retries, timeout; 1179 struct scsipi_mode_header_big *data; 1180 { 1181 struct scsipi_mode_select_big scsipi_cmd; 1182 int error; 1183 1184 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd)); 1185 scsipi_cmd.opcode = MODE_SELECT_BIG; 1186 scsipi_cmd.byte2 = byte2; 1187 _lto2b(len, scsipi_cmd.length); 1188 error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd, 1189 sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL, 1190 flags | XS_CTL_DATA_OUT); 1191 SC_DEBUG(periph, SCSIPI_DB2, 1192 ("scsipi_mode_select: error=%d\n", error)); 1193 return (error); 1194 } 1195 1196 /* 1197 * scsipi_done: 1198 * 1199 * This routine is called by an adapter's interrupt handler when 1200 * an xfer is completed. 1201 */ 1202 void 1203 scsipi_done(xs) 1204 struct scsipi_xfer *xs; 1205 { 1206 struct scsipi_periph *periph = xs->xs_periph; 1207 struct scsipi_channel *chan = periph->periph_channel; 1208 int s, freezecnt; 1209 1210 SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n")); 1211 #ifdef SCSIPI_DEBUG 1212 if (periph->periph_dbflags & SCSIPI_DB1) 1213 show_scsipi_cmd(xs); 1214 #endif 1215 1216 s = splbio(); 1217 /* 1218 * The resource this command was using is now free. 1219 */ 1220 scsipi_put_resource(chan); 1221 xs->xs_periph->periph_sent--; 1222 1223 /* 1224 * If the command was tagged, free the tag. 1225 */ 1226 if (XS_CTL_TAGTYPE(xs) != 0) 1227 scsipi_put_tag(xs); 1228 else 1229 periph->periph_flags &= ~PERIPH_UNTAG; 1230 1231 /* Mark the command as `done'. */ 1232 xs->xs_status |= XS_STS_DONE; 1233 1234 #ifdef DIAGNOSTIC 1235 if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) == 1236 (XS_CTL_ASYNC|XS_CTL_POLL)) 1237 panic("scsipi_done: ASYNC and POLL"); 1238 #endif 1239 1240 /* 1241 * If the xfer had an error of any sort, freeze the 1242 * periph's queue. Freeze it again if we were requested 1243 * to do so in the xfer. 1244 */ 1245 freezecnt = 0; 1246 if (xs->error != XS_NOERROR) 1247 freezecnt++; 1248 if (xs->xs_control & XS_CTL_FREEZE_PERIPH) 1249 freezecnt++; 1250 if (freezecnt != 0) 1251 scsipi_periph_freeze(periph, freezecnt); 1252 1253 /* 1254 * record the xfer with a pending sense, in case a SCSI reset is 1255 * received before the thread is waked up. 1256 */ 1257 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1258 periph->periph_flags |= PERIPH_SENSE; 1259 periph->periph_xscheck = xs; 1260 } 1261 1262 /* 1263 * If this was an xfer that was not to complete asynchronously, 1264 * let the requesting thread perform error checking/handling 1265 * in its context. 1266 */ 1267 if ((xs->xs_control & XS_CTL_ASYNC) == 0) { 1268 splx(s); 1269 /* 1270 * If it's a polling job, just return, to unwind the 1271 * call graph. We don't need to restart the queue, 1272 * because pollings jobs are treated specially, and 1273 * are really only used during crash dumps anyway 1274 * (XXX or during boot-time autconfiguration of 1275 * ATAPI devices). 1276 */ 1277 if (xs->xs_control & XS_CTL_POLL) 1278 return; 1279 wakeup(xs); 1280 goto out; 1281 } 1282 1283 /* 1284 * Catch the extremely common case of I/O completing 1285 * without error; no use in taking a context switch 1286 * if we can handle it in interrupt context. 1287 */ 1288 if (xs->error == XS_NOERROR) { 1289 splx(s); 1290 (void) scsipi_complete(xs); 1291 goto out; 1292 } 1293 1294 /* 1295 * There is an error on this xfer. Put it on the channel's 1296 * completion queue, and wake up the completion thread. 1297 */ 1298 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q); 1299 splx(s); 1300 wakeup(&chan->chan_complete); 1301 1302 out: 1303 /* 1304 * If there are more xfers on the channel's queue, attempt to 1305 * run them. 1306 */ 1307 scsipi_run_queue(chan); 1308 } 1309 1310 /* 1311 * scsipi_complete: 1312 * 1313 * Completion of a scsipi_xfer. This is the guts of scsipi_done(). 1314 * 1315 * NOTE: This routine MUST be called with valid thread context 1316 * except for the case where the following two conditions are 1317 * true: 1318 * 1319 * xs->error == XS_NOERROR 1320 * XS_CTL_ASYNC is set in xs->xs_control 1321 * 1322 * The semantics of this routine can be tricky, so here is an 1323 * explanation: 1324 * 1325 * 0 Xfer completed successfully. 1326 * 1327 * ERESTART Xfer had an error, but was restarted. 1328 * 1329 * anything else Xfer had an error, return value is Unix 1330 * errno. 1331 * 1332 * If the return value is anything but ERESTART: 1333 * 1334 * - If XS_CTL_ASYNC is set, `xs' has been freed back to 1335 * the pool. 1336 * - If there is a buf associated with the xfer, 1337 * it has been biodone()'d. 1338 */ 1339 int 1340 scsipi_complete(xs) 1341 struct scsipi_xfer *xs; 1342 { 1343 struct scsipi_periph *periph = xs->xs_periph; 1344 struct scsipi_channel *chan = periph->periph_channel; 1345 struct buf *bp; 1346 int error, s; 1347 1348 #ifdef DIAGNOSTIC 1349 if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL) 1350 panic("scsipi_complete: XS_CTL_ASYNC but no buf"); 1351 #endif 1352 /* 1353 * If command terminated with a CHECK CONDITION, we need to issue a 1354 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed 1355 * we'll have the real status. 1356 * Must be processed at splbio() to avoid missing a SCSI bus reset 1357 * for this command. 1358 */ 1359 s = splbio(); 1360 if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) { 1361 /* request sense for a request sense ? */ 1362 if (xs->xs_control & XS_CTL_REQSENSE) { 1363 scsipi_printaddr(periph); 1364 printf("request sense for a request sense ?\n"); 1365 /* XXX maybe we should reset the device ? */ 1366 /* we've been frozen because xs->error != XS_NOERROR */ 1367 scsipi_periph_thaw(periph, 1); 1368 splx(s); 1369 if (xs->resid < xs->datalen) { 1370 printf("we read %d bytes of sense anyway:\n", 1371 xs->datalen - xs->resid); 1372 #ifdef SCSIVERBOSE 1373 scsipi_print_sense_data((void *)xs->data, 0); 1374 #endif 1375 } 1376 return EINVAL; 1377 } 1378 scsipi_request_sense(xs); 1379 } 1380 splx(s); 1381 1382 /* 1383 * If it's a user level request, bypass all usual completion 1384 * processing, let the user work it out.. 1385 */ 1386 if ((xs->xs_control & XS_CTL_USERCMD) != 0) { 1387 SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n")); 1388 if (xs->error != XS_NOERROR) 1389 scsipi_periph_thaw(periph, 1); 1390 scsipi_user_done(xs); 1391 SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n ")); 1392 return 0; 1393 } 1394 1395 switch (xs->error) { 1396 case XS_NOERROR: 1397 error = 0; 1398 break; 1399 1400 case XS_SENSE: 1401 case XS_SHORTSENSE: 1402 error = (*chan->chan_bustype->bustype_interpret_sense)(xs); 1403 break; 1404 1405 case XS_RESOURCE_SHORTAGE: 1406 /* 1407 * XXX Should freeze channel's queue. 1408 */ 1409 scsipi_printaddr(periph); 1410 printf("adapter resource shortage\n"); 1411 /* FALLTHROUGH */ 1412 1413 case XS_BUSY: 1414 if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) { 1415 struct scsipi_max_openings mo; 1416 1417 /* 1418 * We set the openings to active - 1, assuming that 1419 * the command that got us here is the first one that 1420 * can't fit into the device's queue. If that's not 1421 * the case, I guess we'll find out soon enough. 1422 */ 1423 mo.mo_target = periph->periph_target; 1424 mo.mo_lun = periph->periph_lun; 1425 if (periph->periph_active < periph->periph_openings) 1426 mo.mo_openings = periph->periph_active - 1; 1427 else 1428 mo.mo_openings = periph->periph_openings - 1; 1429 #ifdef DIAGNOSTIC 1430 if (mo.mo_openings < 0) { 1431 scsipi_printaddr(periph); 1432 printf("QUEUE FULL resulted in < 0 openings\n"); 1433 panic("scsipi_done"); 1434 } 1435 #endif 1436 if (mo.mo_openings == 0) { 1437 scsipi_printaddr(periph); 1438 printf("QUEUE FULL resulted in 0 openings\n"); 1439 mo.mo_openings = 1; 1440 } 1441 scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo); 1442 error = ERESTART; 1443 } else if (xs->xs_retries != 0) { 1444 xs->xs_retries--; 1445 /* 1446 * Wait one second, and try again. 1447 */ 1448 if ((xs->xs_control & XS_CTL_POLL) || 1449 (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 1450 delay(1000000); 1451 } else { 1452 scsipi_periph_freeze(periph, 1); 1453 callout_reset(&periph->periph_callout, 1454 hz, scsipi_periph_timed_thaw, periph); 1455 } 1456 error = ERESTART; 1457 } else 1458 error = EBUSY; 1459 break; 1460 1461 case XS_REQUEUE: 1462 error = ERESTART; 1463 break; 1464 1465 case XS_TIMEOUT: 1466 if (xs->xs_retries != 0) { 1467 xs->xs_retries--; 1468 error = ERESTART; 1469 } else 1470 error = EIO; 1471 break; 1472 1473 case XS_SELTIMEOUT: 1474 /* XXX Disable device? */ 1475 error = EIO; 1476 break; 1477 1478 case XS_RESET: 1479 if (xs->xs_control & XS_CTL_REQSENSE) { 1480 /* 1481 * request sense interrupted by reset: signal it 1482 * with EINTR return code. 1483 */ 1484 error = EINTR; 1485 } else { 1486 if (xs->xs_retries != 0) { 1487 xs->xs_retries--; 1488 error = ERESTART; 1489 } else 1490 error = EIO; 1491 } 1492 break; 1493 1494 default: 1495 scsipi_printaddr(periph); 1496 printf("invalid return code from adapter: %d\n", xs->error); 1497 error = EIO; 1498 break; 1499 } 1500 1501 s = splbio(); 1502 if (error == ERESTART) { 1503 /* 1504 * If we get here, the periph has been thawed and frozen 1505 * again if we had to issue recovery commands. Alternatively, 1506 * it may have been frozen again and in a timed thaw. In 1507 * any case, we thaw the periph once we re-enqueue the 1508 * command. Once the periph is fully thawed, it will begin 1509 * operation again. 1510 */ 1511 xs->error = XS_NOERROR; 1512 xs->status = SCSI_OK; 1513 xs->xs_status &= ~XS_STS_DONE; 1514 xs->xs_requeuecnt++; 1515 error = scsipi_enqueue(xs); 1516 if (error == 0) { 1517 scsipi_periph_thaw(periph, 1); 1518 splx(s); 1519 return (ERESTART); 1520 } 1521 } 1522 1523 /* 1524 * scsipi_done() freezes the queue if not XS_NOERROR. 1525 * Thaw it here. 1526 */ 1527 if (xs->error != XS_NOERROR) 1528 scsipi_periph_thaw(periph, 1); 1529 1530 1531 if (periph->periph_switch->psw_done) 1532 periph->periph_switch->psw_done(xs); 1533 if ((bp = xs->bp) != NULL) { 1534 if (error) { 1535 bp->b_error = error; 1536 bp->b_flags |= B_ERROR; 1537 bp->b_resid = bp->b_bcount; 1538 } else { 1539 bp->b_error = 0; 1540 bp->b_resid = xs->resid; 1541 } 1542 biodone(bp); 1543 } 1544 1545 if (xs->xs_control & XS_CTL_ASYNC) 1546 scsipi_put_xs(xs); 1547 splx(s); 1548 1549 return (error); 1550 } 1551 1552 /* 1553 * Issue a request sense for the given scsipi_xfer. Called when the xfer 1554 * returns with a CHECK_CONDITION status. Must be called in valid thread 1555 * context and at splbio(). 1556 */ 1557 1558 void 1559 scsipi_request_sense(xs) 1560 struct scsipi_xfer *xs; 1561 { 1562 struct scsipi_periph *periph = xs->xs_periph; 1563 int flags, error; 1564 struct scsipi_sense cmd; 1565 1566 periph->periph_flags |= PERIPH_SENSE; 1567 1568 /* if command was polling, request sense will too */ 1569 flags = xs->xs_control & XS_CTL_POLL; 1570 /* Polling commands can't sleep */ 1571 if (flags) 1572 flags |= XS_CTL_NOSLEEP; 1573 1574 flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN | 1575 XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH; 1576 1577 memset(&cmd, 0, sizeof(cmd)); 1578 cmd.opcode = REQUEST_SENSE; 1579 cmd.length = sizeof(struct scsipi_sense_data); 1580 1581 error = scsipi_command(periph, 1582 (struct scsipi_generic *) &cmd, sizeof(cmd), 1583 (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data), 1584 0, 1000, NULL, flags); 1585 periph->periph_flags &= ~PERIPH_SENSE; 1586 periph->periph_xscheck = NULL; 1587 switch(error) { 1588 case 0: 1589 /* we have a valid sense */ 1590 xs->error = XS_SENSE; 1591 return; 1592 case EINTR: 1593 /* REQUEST_SENSE interrupted by bus reset. */ 1594 xs->error = XS_RESET; 1595 return; 1596 case EIO: 1597 /* request sense coudn't be performed */ 1598 /* 1599 * XXX this isn't quite rigth but we don't have anything 1600 * better for now 1601 */ 1602 xs->error = XS_DRIVER_STUFFUP; 1603 return; 1604 default: 1605 /* Notify that request sense failed. */ 1606 xs->error = XS_DRIVER_STUFFUP; 1607 scsipi_printaddr(periph); 1608 printf("request sense failed with error %d\n", error); 1609 return; 1610 } 1611 } 1612 1613 /* 1614 * scsipi_enqueue: 1615 * 1616 * Enqueue an xfer on a channel. 1617 */ 1618 int 1619 scsipi_enqueue(xs) 1620 struct scsipi_xfer *xs; 1621 { 1622 struct scsipi_channel *chan = xs->xs_periph->periph_channel; 1623 struct scsipi_xfer *qxs; 1624 int s; 1625 1626 s = splbio(); 1627 1628 /* 1629 * If the xfer is to be polled, and there are already jobs on 1630 * the queue, we can't proceed. 1631 */ 1632 if ((xs->xs_control & XS_CTL_POLL) != 0 && 1633 TAILQ_FIRST(&chan->chan_queue) != NULL) { 1634 splx(s); 1635 xs->error = XS_DRIVER_STUFFUP; 1636 return (EAGAIN); 1637 } 1638 1639 /* 1640 * If we have an URGENT xfer, it's an error recovery command 1641 * and it should just go on the head of the channel's queue. 1642 */ 1643 if (xs->xs_control & XS_CTL_URGENT) { 1644 TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q); 1645 goto out; 1646 } 1647 1648 /* 1649 * If this xfer has already been on the queue before, we 1650 * need to reinsert it in the correct order. That order is: 1651 * 1652 * Immediately before the first xfer for this periph 1653 * with a requeuecnt less than xs->xs_requeuecnt. 1654 * 1655 * Failing that, at the end of the queue. (We'll end up 1656 * there naturally.) 1657 */ 1658 if (xs->xs_requeuecnt != 0) { 1659 for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL; 1660 qxs = TAILQ_NEXT(qxs, channel_q)) { 1661 if (qxs->xs_periph == xs->xs_periph && 1662 qxs->xs_requeuecnt < xs->xs_requeuecnt) 1663 break; 1664 } 1665 if (qxs != NULL) { 1666 TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs, 1667 channel_q); 1668 goto out; 1669 } 1670 } 1671 TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q); 1672 out: 1673 if (xs->xs_control & XS_CTL_THAW_PERIPH) 1674 scsipi_periph_thaw(xs->xs_periph, 1); 1675 splx(s); 1676 return (0); 1677 } 1678 1679 /* 1680 * scsipi_run_queue: 1681 * 1682 * Start as many xfers as possible running on the channel. 1683 */ 1684 void 1685 scsipi_run_queue(chan) 1686 struct scsipi_channel *chan; 1687 { 1688 struct scsipi_xfer *xs; 1689 struct scsipi_periph *periph; 1690 int s; 1691 1692 for (;;) { 1693 s = splbio(); 1694 1695 /* 1696 * If the channel is frozen, we can't do any work right 1697 * now. 1698 */ 1699 if (chan->chan_qfreeze != 0) { 1700 splx(s); 1701 return; 1702 } 1703 1704 /* 1705 * Look for work to do, and make sure we can do it. 1706 */ 1707 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; 1708 xs = TAILQ_NEXT(xs, channel_q)) { 1709 periph = xs->xs_periph; 1710 1711 if ((periph->periph_sent >= periph->periph_openings) || 1712 periph->periph_qfreeze != 0 || 1713 (periph->periph_flags & PERIPH_UNTAG) != 0) 1714 continue; 1715 1716 if ((periph->periph_flags & 1717 (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 && 1718 (xs->xs_control & XS_CTL_URGENT) == 0) 1719 continue; 1720 1721 /* 1722 * We can issue this xfer! 1723 */ 1724 goto got_one; 1725 } 1726 1727 /* 1728 * Can't find any work to do right now. 1729 */ 1730 splx(s); 1731 return; 1732 1733 got_one: 1734 /* 1735 * Have an xfer to run. Allocate a resource from 1736 * the adapter to run it. If we can't allocate that 1737 * resource, we don't dequeue the xfer. 1738 */ 1739 if (scsipi_get_resource(chan) == 0) { 1740 /* 1741 * Adapter is out of resources. If the adapter 1742 * supports it, attempt to grow them. 1743 */ 1744 if (scsipi_grow_resources(chan) == 0) { 1745 /* 1746 * Wasn't able to grow resources, 1747 * nothing more we can do. 1748 */ 1749 if (xs->xs_control & XS_CTL_POLL) { 1750 scsipi_printaddr(xs->xs_periph); 1751 printf("polling command but no " 1752 "adapter resources"); 1753 /* We'll panic shortly... */ 1754 } 1755 splx(s); 1756 1757 /* 1758 * XXX: We should be able to note that 1759 * XXX: that resources are needed here! 1760 */ 1761 return; 1762 } 1763 /* 1764 * scsipi_grow_resources() allocated the resource 1765 * for us. 1766 */ 1767 } 1768 1769 /* 1770 * We have a resource to run this xfer, do it! 1771 */ 1772 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 1773 1774 /* 1775 * If the command is to be tagged, allocate a tag ID 1776 * for it. 1777 */ 1778 if (XS_CTL_TAGTYPE(xs) != 0) 1779 scsipi_get_tag(xs); 1780 else 1781 periph->periph_flags |= PERIPH_UNTAG; 1782 periph->periph_sent++; 1783 splx(s); 1784 1785 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs); 1786 } 1787 #ifdef DIAGNOSTIC 1788 panic("scsipi_run_queue: impossible"); 1789 #endif 1790 } 1791 1792 /* 1793 * scsipi_execute_xs: 1794 * 1795 * Begin execution of an xfer, waiting for it to complete, if necessary. 1796 */ 1797 int 1798 scsipi_execute_xs(xs) 1799 struct scsipi_xfer *xs; 1800 { 1801 struct scsipi_periph *periph = xs->xs_periph; 1802 struct scsipi_channel *chan = periph->periph_channel; 1803 int async, poll, retries, error, s; 1804 1805 xs->xs_status &= ~XS_STS_DONE; 1806 xs->error = XS_NOERROR; 1807 xs->resid = xs->datalen; 1808 xs->status = SCSI_OK; 1809 1810 #ifdef SCSIPI_DEBUG 1811 if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) { 1812 printf("scsipi_execute_xs: "); 1813 show_scsipi_xs(xs); 1814 printf("\n"); 1815 } 1816 #endif 1817 1818 /* 1819 * Deal with command tagging: 1820 * 1821 * - If the device's current operating mode doesn't 1822 * include tagged queueing, clear the tag mask. 1823 * 1824 * - If the device's current operating mode *does* 1825 * include tagged queueing, set the tag_type in 1826 * the xfer to the appropriate byte for the tag 1827 * message. 1828 */ 1829 if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 || 1830 (xs->xs_control & XS_CTL_REQSENSE)) { 1831 xs->xs_control &= ~XS_CTL_TAGMASK; 1832 xs->xs_tag_type = 0; 1833 } else { 1834 /* 1835 * If the request doesn't specify a tag, give Head 1836 * tags to URGENT operations and Ordered tags to 1837 * everything else. 1838 */ 1839 if (XS_CTL_TAGTYPE(xs) == 0) { 1840 if (xs->xs_control & XS_CTL_URGENT) 1841 xs->xs_control |= XS_CTL_HEAD_TAG; 1842 else 1843 xs->xs_control |= XS_CTL_ORDERED_TAG; 1844 } 1845 1846 switch (XS_CTL_TAGTYPE(xs)) { 1847 case XS_CTL_ORDERED_TAG: 1848 xs->xs_tag_type = MSG_ORDERED_Q_TAG; 1849 break; 1850 1851 case XS_CTL_SIMPLE_TAG: 1852 xs->xs_tag_type = MSG_SIMPLE_Q_TAG; 1853 break; 1854 1855 case XS_CTL_HEAD_TAG: 1856 xs->xs_tag_type = MSG_HEAD_OF_Q_TAG; 1857 break; 1858 1859 default: 1860 scsipi_printaddr(periph); 1861 printf("invalid tag mask 0x%08x\n", 1862 XS_CTL_TAGTYPE(xs)); 1863 panic("scsipi_execute_xs"); 1864 } 1865 } 1866 1867 /* If the adaptor wants us to poll, poll. */ 1868 if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY) 1869 xs->xs_control |= XS_CTL_POLL; 1870 1871 /* 1872 * If we don't yet have a completion thread, or we are to poll for 1873 * completion, clear the ASYNC flag. 1874 */ 1875 if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0) 1876 xs->xs_control &= ~XS_CTL_ASYNC; 1877 1878 async = (xs->xs_control & XS_CTL_ASYNC); 1879 poll = (xs->xs_control & XS_CTL_POLL); 1880 retries = xs->xs_retries; /* for polling commands */ 1881 1882 #ifdef DIAGNOSTIC 1883 if (async != 0 && xs->bp == NULL) 1884 panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf"); 1885 #endif 1886 1887 /* 1888 * Enqueue the transfer. If we're not polling for completion, this 1889 * should ALWAYS return `no error'. 1890 */ 1891 try_again: 1892 error = scsipi_enqueue(xs); 1893 if (error) { 1894 if (poll == 0) { 1895 scsipi_printaddr(periph); 1896 printf("not polling, but enqueue failed with %d\n", 1897 error); 1898 panic("scsipi_execute_xs"); 1899 } 1900 1901 scsipi_printaddr(periph); 1902 printf("failed to enqueue polling command"); 1903 if (retries != 0) { 1904 printf(", retrying...\n"); 1905 delay(1000000); 1906 retries--; 1907 goto try_again; 1908 } 1909 printf("\n"); 1910 goto free_xs; 1911 } 1912 1913 restarted: 1914 scsipi_run_queue(chan); 1915 1916 /* 1917 * The xfer is enqueued, and possibly running. If it's to be 1918 * completed asynchronously, just return now. 1919 */ 1920 if (async) 1921 return (EJUSTRETURN); 1922 1923 /* 1924 * Not an asynchronous command; wait for it to complete. 1925 */ 1926 s = splbio(); 1927 while ((xs->xs_status & XS_STS_DONE) == 0) { 1928 if (poll) { 1929 scsipi_printaddr(periph); 1930 printf("polling command not done\n"); 1931 panic("scsipi_execute_xs"); 1932 } 1933 (void) tsleep(xs, PRIBIO, "xscmd", 0); 1934 } 1935 splx(s); 1936 1937 /* 1938 * Command is complete. scsipi_done() has awakened us to perform 1939 * the error handling. 1940 */ 1941 error = scsipi_complete(xs); 1942 if (error == ERESTART) 1943 goto restarted; 1944 1945 /* 1946 * Command completed successfully or fatal error occurred. Fall 1947 * into.... 1948 */ 1949 free_xs: 1950 s = splbio(); 1951 scsipi_put_xs(xs); 1952 splx(s); 1953 1954 /* 1955 * Kick the queue, keep it running in case it stopped for some 1956 * reason. 1957 */ 1958 scsipi_run_queue(chan); 1959 1960 return (error); 1961 } 1962 1963 /* 1964 * scsipi_completion_thread: 1965 * 1966 * This is the completion thread. We wait for errors on 1967 * asynchronous xfers, and perform the error handling 1968 * function, restarting the command, if necessary. 1969 */ 1970 void 1971 scsipi_completion_thread(arg) 1972 void *arg; 1973 { 1974 struct scsipi_channel *chan = arg; 1975 struct scsipi_xfer *xs; 1976 int s; 1977 1978 s = splbio(); 1979 chan->chan_flags |= SCSIPI_CHAN_TACTIVE; 1980 splx(s); 1981 for (;;) { 1982 s = splbio(); 1983 xs = TAILQ_FIRST(&chan->chan_complete); 1984 if (xs == NULL && chan->chan_tflags == 0) { 1985 /* nothing to do; wait */ 1986 (void) tsleep(&chan->chan_complete, PRIBIO, 1987 "sccomp", 0); 1988 splx(s); 1989 continue; 1990 } 1991 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 1992 /* call chan_callback from thread context */ 1993 chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK; 1994 chan->chan_callback(chan, chan->chan_callback_arg); 1995 splx(s); 1996 continue; 1997 } 1998 if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) { 1999 /* attempt to get more openings for this channel */ 2000 chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES; 2001 scsipi_adapter_request(chan, 2002 ADAPTER_REQ_GROW_RESOURCES, NULL); 2003 scsipi_channel_thaw(chan, 1); 2004 splx(s); 2005 continue; 2006 } 2007 if (chan->chan_tflags & SCSIPI_CHANT_KICK) { 2008 /* explicitly run the queues for this channel */ 2009 chan->chan_tflags &= ~SCSIPI_CHANT_KICK; 2010 scsipi_run_queue(chan); 2011 splx(s); 2012 continue; 2013 } 2014 if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) { 2015 splx(s); 2016 break; 2017 } 2018 if (xs) { 2019 TAILQ_REMOVE(&chan->chan_complete, xs, channel_q); 2020 splx(s); 2021 2022 /* 2023 * Have an xfer with an error; process it. 2024 */ 2025 (void) scsipi_complete(xs); 2026 2027 /* 2028 * Kick the queue; keep it running if it was stopped 2029 * for some reason. 2030 */ 2031 scsipi_run_queue(chan); 2032 } else { 2033 splx(s); 2034 } 2035 } 2036 2037 chan->chan_thread = NULL; 2038 2039 /* In case parent is waiting for us to exit. */ 2040 wakeup(&chan->chan_thread); 2041 2042 kthread_exit(0); 2043 } 2044 2045 /* 2046 * scsipi_create_completion_thread: 2047 * 2048 * Callback to actually create the completion thread. 2049 */ 2050 void 2051 scsipi_create_completion_thread(arg) 2052 void *arg; 2053 { 2054 struct scsipi_channel *chan = arg; 2055 struct scsipi_adapter *adapt = chan->chan_adapter; 2056 2057 if (kthread_create1(scsipi_completion_thread, chan, 2058 &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname, 2059 chan->chan_channel)) { 2060 printf("%s: unable to create completion thread for " 2061 "channel %d\n", adapt->adapt_dev->dv_xname, 2062 chan->chan_channel); 2063 panic("scsipi_create_completion_thread"); 2064 } 2065 } 2066 2067 /* 2068 * scsipi_thread_call_callback: 2069 * 2070 * request to call a callback from the completion thread 2071 */ 2072 int 2073 scsipi_thread_call_callback(chan, callback, arg) 2074 struct scsipi_channel *chan; 2075 void (*callback) __P((struct scsipi_channel *, void *)); 2076 void *arg; 2077 { 2078 int s; 2079 2080 s = splbio(); 2081 if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) { 2082 /* kernel thread doesn't exist yet */ 2083 splx(s); 2084 return ESRCH; 2085 } 2086 if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) { 2087 splx(s); 2088 return EBUSY; 2089 } 2090 scsipi_channel_freeze(chan, 1); 2091 chan->chan_callback = callback; 2092 chan->chan_callback_arg = arg; 2093 chan->chan_tflags |= SCSIPI_CHANT_CALLBACK; 2094 wakeup(&chan->chan_complete); 2095 splx(s); 2096 return(0); 2097 } 2098 2099 /* 2100 * scsipi_async_event: 2101 * 2102 * Handle an asynchronous event from an adapter. 2103 */ 2104 void 2105 scsipi_async_event(chan, event, arg) 2106 struct scsipi_channel *chan; 2107 scsipi_async_event_t event; 2108 void *arg; 2109 { 2110 int s; 2111 2112 s = splbio(); 2113 switch (event) { 2114 case ASYNC_EVENT_MAX_OPENINGS: 2115 scsipi_async_event_max_openings(chan, 2116 (struct scsipi_max_openings *)arg); 2117 break; 2118 2119 case ASYNC_EVENT_XFER_MODE: 2120 scsipi_async_event_xfer_mode(chan, 2121 (struct scsipi_xfer_mode *)arg); 2122 break; 2123 case ASYNC_EVENT_RESET: 2124 scsipi_async_event_channel_reset(chan); 2125 break; 2126 } 2127 splx(s); 2128 } 2129 2130 /* 2131 * scsipi_print_xfer_mode: 2132 * 2133 * Print a periph's capabilities. 2134 */ 2135 void 2136 scsipi_print_xfer_mode(periph) 2137 struct scsipi_periph *periph; 2138 { 2139 int period, freq, speed, mbs; 2140 2141 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0) 2142 return; 2143 2144 printf("%s: ", periph->periph_dev->dv_xname); 2145 if (periph->periph_mode & PERIPH_CAP_SYNC) { 2146 period = scsipi_sync_factor_to_period(periph->periph_period); 2147 printf("sync (%d.%dns offset %d)", 2148 period / 10, period % 10, periph->periph_offset); 2149 } else 2150 printf("async"); 2151 2152 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2153 printf(", 32-bit"); 2154 else if (periph->periph_mode & PERIPH_CAP_WIDE16) 2155 printf(", 16-bit"); 2156 else 2157 printf(", 8-bit"); 2158 2159 if (periph->periph_mode & PERIPH_CAP_SYNC) { 2160 freq = scsipi_sync_factor_to_freq(periph->periph_period); 2161 speed = freq; 2162 if (periph->periph_mode & PERIPH_CAP_WIDE32) 2163 speed *= 4; 2164 else if (periph->periph_mode & PERIPH_CAP_WIDE16) 2165 speed *= 2; 2166 mbs = speed / 1000; 2167 if (mbs > 0) 2168 printf(" (%d.%03dMB/s)", mbs, speed % 1000); 2169 else 2170 printf(" (%dKB/s)", speed % 1000); 2171 } 2172 2173 printf(" transfers"); 2174 2175 if (periph->periph_mode & PERIPH_CAP_TQING) 2176 printf(", tagged queueing"); 2177 2178 printf("\n"); 2179 } 2180 2181 /* 2182 * scsipi_async_event_max_openings: 2183 * 2184 * Update the maximum number of outstanding commands a 2185 * device may have. 2186 */ 2187 void 2188 scsipi_async_event_max_openings(chan, mo) 2189 struct scsipi_channel *chan; 2190 struct scsipi_max_openings *mo; 2191 { 2192 struct scsipi_periph *periph; 2193 int minlun, maxlun; 2194 2195 if (mo->mo_lun == -1) { 2196 /* 2197 * Wildcarded; apply it to all LUNs. 2198 */ 2199 minlun = 0; 2200 maxlun = chan->chan_nluns - 1; 2201 } else 2202 minlun = maxlun = mo->mo_lun; 2203 2204 for (; minlun <= maxlun; minlun++) { 2205 periph = scsipi_lookup_periph(chan, mo->mo_target, minlun); 2206 if (periph == NULL) 2207 continue; 2208 2209 if (mo->mo_openings < periph->periph_openings) 2210 periph->periph_openings = mo->mo_openings; 2211 else if (mo->mo_openings > periph->periph_openings && 2212 (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0) 2213 periph->periph_openings = mo->mo_openings; 2214 } 2215 } 2216 2217 /* 2218 * scsipi_async_event_xfer_mode: 2219 * 2220 * Update the xfer mode for all periphs sharing the 2221 * specified I_T Nexus. 2222 */ 2223 void 2224 scsipi_async_event_xfer_mode(chan, xm) 2225 struct scsipi_channel *chan; 2226 struct scsipi_xfer_mode *xm; 2227 { 2228 struct scsipi_periph *periph; 2229 int lun, announce, mode, period, offset; 2230 2231 for (lun = 0; lun < chan->chan_nluns; lun++) { 2232 periph = scsipi_lookup_periph(chan, xm->xm_target, lun); 2233 if (periph == NULL) 2234 continue; 2235 announce = 0; 2236 2237 /* 2238 * Clamp the xfer mode down to this periph's capabilities. 2239 */ 2240 mode = xm->xm_mode & periph->periph_cap; 2241 if (mode & PERIPH_CAP_SYNC) { 2242 period = xm->xm_period; 2243 offset = xm->xm_offset; 2244 } else { 2245 period = 0; 2246 offset = 0; 2247 } 2248 2249 /* 2250 * If we do not have a valid xfer mode yet, or the parameters 2251 * are different, announce them. 2252 */ 2253 if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 || 2254 periph->periph_mode != mode || 2255 periph->periph_period != period || 2256 periph->periph_offset != offset) 2257 announce = 1; 2258 2259 periph->periph_mode = mode; 2260 periph->periph_period = period; 2261 periph->periph_offset = offset; 2262 periph->periph_flags |= PERIPH_MODE_VALID; 2263 2264 if (announce) 2265 scsipi_print_xfer_mode(periph); 2266 } 2267 } 2268 2269 /* 2270 * scsipi_set_xfer_mode: 2271 * 2272 * Set the xfer mode for the specified I_T Nexus. 2273 */ 2274 void 2275 scsipi_set_xfer_mode(chan, target, immed) 2276 struct scsipi_channel *chan; 2277 int target, immed; 2278 { 2279 struct scsipi_xfer_mode xm; 2280 struct scsipi_periph *itperiph; 2281 int lun, s; 2282 2283 /* 2284 * Go to the minimal xfer mode. 2285 */ 2286 xm.xm_target = target; 2287 xm.xm_mode = 0; 2288 xm.xm_period = 0; /* ignored */ 2289 xm.xm_offset = 0; /* ignored */ 2290 2291 /* 2292 * Find the first LUN we know about on this I_T Nexus. 2293 */ 2294 for (lun = 0; lun < chan->chan_nluns; lun++) { 2295 itperiph = scsipi_lookup_periph(chan, target, lun); 2296 if (itperiph != NULL) 2297 break; 2298 } 2299 if (itperiph != NULL) { 2300 xm.xm_mode = itperiph->periph_cap; 2301 /* 2302 * Now issue the request to the adapter. 2303 */ 2304 s = splbio(); 2305 scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm); 2306 splx(s); 2307 /* 2308 * If we want this to happen immediately, issue a dummy 2309 * command, since most adapters can't really negotiate unless 2310 * they're executing a job. 2311 */ 2312 if (immed != 0) { 2313 (void) scsipi_test_unit_ready(itperiph, 2314 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST | 2315 XS_CTL_IGNORE_NOT_READY | 2316 XS_CTL_IGNORE_MEDIA_CHANGE); 2317 } 2318 } 2319 } 2320 2321 /* 2322 * scsipi_channel_reset: 2323 * 2324 * handle scsi bus reset 2325 * called at splbio 2326 */ 2327 void 2328 scsipi_async_event_channel_reset(chan) 2329 struct scsipi_channel *chan; 2330 { 2331 struct scsipi_xfer *xs, *xs_next; 2332 struct scsipi_periph *periph; 2333 int target, lun; 2334 2335 /* 2336 * Channel has been reset. Also mark as reset pending REQUEST_SENSE 2337 * commands; as the sense is not available any more. 2338 * can't call scsipi_done() from here, as the command has not been 2339 * sent to the adapter yet (this would corrupt accounting). 2340 */ 2341 2342 for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) { 2343 xs_next = TAILQ_NEXT(xs, channel_q); 2344 if (xs->xs_control & XS_CTL_REQSENSE) { 2345 TAILQ_REMOVE(&chan->chan_queue, xs, channel_q); 2346 xs->error = XS_RESET; 2347 if ((xs->xs_control & XS_CTL_ASYNC) != 0) 2348 TAILQ_INSERT_TAIL(&chan->chan_complete, xs, 2349 channel_q); 2350 } 2351 } 2352 wakeup(&chan->chan_complete); 2353 /* Catch xs with pending sense which may not have a REQSENSE xs yet */ 2354 for (target = 0; target < chan->chan_ntargets; target++) { 2355 if (target == chan->chan_id) 2356 continue; 2357 for (lun = 0; lun < chan->chan_nluns; lun++) { 2358 periph = chan->chan_periphs[target][lun]; 2359 if (periph) { 2360 xs = periph->periph_xscheck; 2361 if (xs) 2362 xs->error = XS_RESET; 2363 } 2364 } 2365 } 2366 } 2367 2368 /* 2369 * scsipi_target_detach: 2370 * 2371 * detach all periph associated with a I_T 2372 * must be called from valid thread context 2373 */ 2374 int 2375 scsipi_target_detach(chan, target, lun, flags) 2376 struct scsipi_channel *chan; 2377 int target, lun; 2378 int flags; 2379 { 2380 struct scsipi_periph *periph; 2381 int ctarget, mintarget, maxtarget; 2382 int clun, minlun, maxlun; 2383 int error; 2384 2385 if (target == -1) { 2386 mintarget = 0; 2387 maxtarget = chan->chan_ntargets; 2388 } else { 2389 if (target == chan->chan_id) 2390 return EINVAL; 2391 if (target < 0 || target >= chan->chan_ntargets) 2392 return EINVAL; 2393 mintarget = target; 2394 maxtarget = target + 1; 2395 } 2396 2397 if (lun == -1) { 2398 minlun = 0; 2399 maxlun = chan->chan_nluns; 2400 } else { 2401 if (lun < 0 || lun >= chan->chan_nluns) 2402 return EINVAL; 2403 minlun = lun; 2404 maxlun = lun + 1; 2405 } 2406 2407 for (ctarget = mintarget; ctarget < maxtarget; ctarget++) { 2408 if (ctarget == chan->chan_id) 2409 continue; 2410 2411 for (clun = minlun; clun < maxlun; clun++) { 2412 periph = scsipi_lookup_periph(chan, ctarget, clun); 2413 if (periph == NULL) 2414 continue; 2415 error = config_detach(periph->periph_dev, flags); 2416 if (error) 2417 return (error); 2418 scsipi_remove_periph(chan, periph); 2419 free(periph, M_DEVBUF); 2420 } 2421 } 2422 return(0); 2423 } 2424 2425 /* 2426 * scsipi_adapter_addref: 2427 * 2428 * Add a reference to the adapter pointed to by the provided 2429 * link, enabling the adapter if necessary. 2430 */ 2431 int 2432 scsipi_adapter_addref(adapt) 2433 struct scsipi_adapter *adapt; 2434 { 2435 int s, error = 0; 2436 2437 s = splbio(); 2438 if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) { 2439 error = (*adapt->adapt_enable)(adapt->adapt_dev, 1); 2440 if (error) 2441 adapt->adapt_refcnt--; 2442 } 2443 splx(s); 2444 return (error); 2445 } 2446 2447 /* 2448 * scsipi_adapter_delref: 2449 * 2450 * Delete a reference to the adapter pointed to by the provided 2451 * link, disabling the adapter if possible. 2452 */ 2453 void 2454 scsipi_adapter_delref(adapt) 2455 struct scsipi_adapter *adapt; 2456 { 2457 int s; 2458 2459 s = splbio(); 2460 if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL) 2461 (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0); 2462 splx(s); 2463 } 2464 2465 struct scsipi_syncparam { 2466 int ss_factor; 2467 int ss_period; /* ns * 10 */ 2468 } scsipi_syncparams[] = { 2469 { 0x09, 125 }, 2470 { 0x0a, 250 }, 2471 { 0x0b, 303 }, 2472 { 0x0c, 500 }, 2473 }; 2474 const int scsipi_nsyncparams = 2475 sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]); 2476 2477 int 2478 scsipi_sync_period_to_factor(period) 2479 int period; /* ns * 10 */ 2480 { 2481 int i; 2482 2483 for (i = 0; i < scsipi_nsyncparams; i++) { 2484 if (period <= scsipi_syncparams[i].ss_period) 2485 return (scsipi_syncparams[i].ss_factor); 2486 } 2487 2488 return ((period / 10) / 4); 2489 } 2490 2491 int 2492 scsipi_sync_factor_to_period(factor) 2493 int factor; 2494 { 2495 int i; 2496 2497 for (i = 0; i < scsipi_nsyncparams; i++) { 2498 if (factor == scsipi_syncparams[i].ss_factor) 2499 return (scsipi_syncparams[i].ss_period); 2500 } 2501 2502 return ((factor * 4) * 10); 2503 } 2504 2505 int 2506 scsipi_sync_factor_to_freq(factor) 2507 int factor; 2508 { 2509 int i; 2510 2511 for (i = 0; i < scsipi_nsyncparams; i++) { 2512 if (factor == scsipi_syncparams[i].ss_factor) 2513 return (10000000 / scsipi_syncparams[i].ss_period); 2514 } 2515 2516 return (10000000 / ((factor * 4) * 10)); 2517 } 2518 2519 #ifdef SCSIPI_DEBUG 2520 /* 2521 * Given a scsipi_xfer, dump the request, in all it's glory 2522 */ 2523 void 2524 show_scsipi_xs(xs) 2525 struct scsipi_xfer *xs; 2526 { 2527 2528 printf("xs(%p): ", xs); 2529 printf("xs_control(0x%08x)", xs->xs_control); 2530 printf("xs_status(0x%08x)", xs->xs_status); 2531 printf("periph(%p)", xs->xs_periph); 2532 printf("retr(0x%x)", xs->xs_retries); 2533 printf("timo(0x%x)", xs->timeout); 2534 printf("cmd(%p)", xs->cmd); 2535 printf("len(0x%x)", xs->cmdlen); 2536 printf("data(%p)", xs->data); 2537 printf("len(0x%x)", xs->datalen); 2538 printf("res(0x%x)", xs->resid); 2539 printf("err(0x%x)", xs->error); 2540 printf("bp(%p)", xs->bp); 2541 show_scsipi_cmd(xs); 2542 } 2543 2544 void 2545 show_scsipi_cmd(xs) 2546 struct scsipi_xfer *xs; 2547 { 2548 u_char *b = (u_char *) xs->cmd; 2549 int i = 0; 2550 2551 scsipi_printaddr(xs->xs_periph); 2552 printf(" command: "); 2553 2554 if ((xs->xs_control & XS_CTL_RESET) == 0) { 2555 while (i < xs->cmdlen) { 2556 if (i) 2557 printf(","); 2558 printf("0x%x", b[i++]); 2559 } 2560 printf("-[%d bytes]\n", xs->datalen); 2561 if (xs->datalen) 2562 show_mem(xs->data, min(64, xs->datalen)); 2563 } else 2564 printf("-RESET-\n"); 2565 } 2566 2567 void 2568 show_mem(address, num) 2569 u_char *address; 2570 int num; 2571 { 2572 int x; 2573 2574 printf("------------------------------"); 2575 for (x = 0; x < num; x++) { 2576 if ((x % 16) == 0) 2577 printf("\n%03d: ", x); 2578 printf("%02x ", *address++); 2579 } 2580 printf("\n------------------------------\n"); 2581 } 2582 #endif /* SCSIPI_DEBUG */ 2583