1 /* $NetBSD: isp_netbsd.c,v 1.41 2001/04/10 21:52:00 mjacob Exp $ */ 2 /* 3 * This driver, which is contained in NetBSD in the files: 4 * 5 * sys/dev/ic/isp.c 6 * sys/dev/ic/isp_inline.h 7 * sys/dev/ic/isp_netbsd.c 8 * sys/dev/ic/isp_netbsd.h 9 * sys/dev/ic/isp_target.c 10 * sys/dev/ic/isp_target.h 11 * sys/dev/ic/isp_tpublic.h 12 * sys/dev/ic/ispmbox.h 13 * sys/dev/ic/ispreg.h 14 * sys/dev/ic/ispvar.h 15 * sys/microcode/isp/asm_sbus.h 16 * sys/microcode/isp/asm_1040.h 17 * sys/microcode/isp/asm_1080.h 18 * sys/microcode/isp/asm_12160.h 19 * sys/microcode/isp/asm_2100.h 20 * sys/microcode/isp/asm_2200.h 21 * sys/pci/isp_pci.c 22 * sys/sbus/isp_sbus.c 23 * 24 * Is being actively maintained by Matthew Jacob (mjacob@netbsd.org). 25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris, 26 * Linux versions. This tends to be an interesting maintenance problem. 27 * 28 * Please coordinate with Matthew Jacob on changes you wish to make here. 29 */ 30 /* 31 * Platform (NetBSD) dependent common attachment code for Qlogic adapters. 32 * Matthew Jacob <mjacob@nas.nasa.gov> 33 */ 34 /* 35 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration 36 * All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. The name of the author may not be used to endorse or promote products 47 * derived from this software without specific prior written permission 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 59 */ 60 61 #include <dev/ic/isp_netbsd.h> 62 #include <sys/scsiio.h> 63 64 65 /* 66 * Set a timeout for the watchdogging of a command. 67 * 68 * The dimensional analysis is 69 * 70 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks 71 * 72 * = 73 * 74 * (milliseconds / 1000) * hz = ticks 75 * 76 * 77 * For timeouts less than 1 second, we'll get zero. Because of this, and 78 * because we want to establish *our* timeout to be longer than what the 79 * firmware might do, we just add 3 seconds at the back end. 80 */ 81 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz)) 82 83 static void ispminphys(struct buf *); 84 static int32_t ispcmd(XS_T *); 85 static int ispioctl (struct scsipi_link *, u_long, caddr_t, int, struct proc *); 86 87 static struct scsipi_device isp_dev = { NULL, NULL, NULL, NULL }; 88 static int isp_polled_cmd(struct ispsoftc *, XS_T *); 89 static void isp_dog(void *); 90 static void isp_command_requeue(void *); 91 static void isp_internal_restart(void *); 92 93 /* 94 * Complete attachment of hardware, include subdevices. 95 */ 96 void 97 isp_attach(struct ispsoftc *isp) 98 { 99 isp->isp_osinfo._adapter.scsipi_minphys = ispminphys; 100 isp->isp_osinfo._adapter.scsipi_ioctl = ispioctl; 101 isp->isp_osinfo._adapter.scsipi_cmd = ispcmd; 102 103 isp->isp_state = ISP_RUNSTATE; 104 isp->isp_osinfo._link.scsipi_scsi.channel = 105 (IS_DUALBUS(isp))? 0 : SCSI_CHANNEL_ONLY_ONE; 106 isp->isp_osinfo._link.adapter_softc = isp; 107 isp->isp_osinfo._link.device = &isp_dev; 108 isp->isp_osinfo._link.adapter = &isp->isp_osinfo._adapter; 109 isp->isp_osinfo._link.openings = isp->isp_maxcmds; 110 /* 111 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns. 112 */ 113 isp->isp_osinfo._link.scsipi_scsi.max_lun = 114 (isp->isp_maxluns < 7)? isp->isp_maxluns - 1 : 7; 115 TAILQ_INIT(&isp->isp_osinfo.waitq); /* The 2nd bus will share.. */ 116 117 if (IS_FC(isp)) { 118 isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_FC_TARG-1; 119 } else { 120 sdparam *sdp = isp->isp_param; 121 isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_TARGETS-1; 122 isp->isp_osinfo._link.scsipi_scsi.adapter_target = 123 sdp->isp_initiator_id; 124 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id; 125 if (IS_DUALBUS(isp)) { 126 isp->isp_osinfo._link_b = isp->isp_osinfo._link; 127 sdp++; 128 isp->isp_osinfo.discovered[1] = 129 1 << sdp->isp_initiator_id; 130 isp->isp_osinfo._link_b.scsipi_scsi.adapter_target = 131 sdp->isp_initiator_id; 132 isp->isp_osinfo._link_b.scsipi_scsi.channel = 1; 133 isp->isp_osinfo._link_b.scsipi_scsi.max_lun = 134 isp->isp_osinfo._link.scsipi_scsi.max_lun; 135 } 136 } 137 isp->isp_osinfo._link.type = BUS_SCSI; 138 139 /* 140 * Send a SCSI Bus Reset. 141 */ 142 if (IS_SCSI(isp)) { 143 int bus = 0; 144 ISP_LOCK(isp); 145 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus); 146 if (IS_DUALBUS(isp)) { 147 bus++; 148 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus); 149 } 150 ISP_UNLOCK(isp); 151 } else { 152 int defid; 153 fcparam *fcp = isp->isp_param; 154 delay(2 * 1000000); 155 defid = MAX_FC_TARG; 156 ISP_LOCK(isp); 157 /* 158 * We probably won't have clock interrupts running, 159 * so we'll be really short (smoke test, really) 160 * at this time. 161 */ 162 if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL)) { 163 (void) isp_control(isp, ISPCTL_PDB_SYNC, NULL); 164 if (fcp->isp_fwstate == FW_READY && 165 fcp->isp_loopstate >= LOOP_PDB_RCVD) { 166 defid = fcp->isp_loopid; 167 } 168 } 169 ISP_UNLOCK(isp); 170 isp->isp_osinfo._link.scsipi_scsi.adapter_target = defid; 171 } 172 173 /* 174 * After this point, we'll be doing the new configuration 175 * schema which allows interrups, so we can do tsleep/wakeup 176 * for mailbox stuff at that point. 177 */ 178 isp->isp_osinfo.no_mbox_ints = 0; 179 180 /* 181 * And attach children (if any). 182 */ 183 config_found((void *)isp, &isp->isp_osinfo._link, scsiprint); 184 if (IS_DUALBUS(isp)) { 185 config_found((void *)isp, &isp->isp_osinfo._link_b, scsiprint); 186 } 187 } 188 189 /* 190 * minphys our xfers 191 * 192 * Unfortunately, the buffer pointer describes the target device- not the 193 * adapter device, so we can't use the pointer to find out what kind of 194 * adapter we are and adjust accordingly. 195 */ 196 197 static void 198 ispminphys(struct buf *bp) 199 { 200 /* 201 * XX: Only the 1020 has a 24 bit limit. 202 */ 203 if (bp->b_bcount >= (1 << 24)) { 204 bp->b_bcount = (1 << 24); 205 } 206 minphys(bp); 207 } 208 209 static int 210 ispioctl(struct scsipi_link *sc_link, u_long cmd, caddr_t addr, 211 int flag, struct proc *p) 212 { 213 struct ispsoftc *isp = sc_link->adapter_softc; 214 int s, chan, retval = ENOTTY; 215 216 chan = (sc_link->scsipi_scsi.channel == SCSI_CHANNEL_ONLY_ONE)? 0 : 217 sc_link->scsipi_scsi.channel; 218 219 switch (cmd) { 220 case SCBUSACCEL: 221 { 222 struct scbusaccel_args *sp = (struct scbusaccel_args *)addr; 223 if (IS_SCSI(isp) && sp->sa_lun == 0) { 224 int dflags = 0; 225 sdparam *sdp = SDPARAM(isp); 226 227 sdp += chan; 228 if (sp->sa_flags & SC_ACCEL_TAGS) 229 dflags |= DPARM_TQING; 230 if (sp->sa_flags & SC_ACCEL_WIDE) 231 dflags |= DPARM_WIDE; 232 if (sp->sa_flags & SC_ACCEL_SYNC) 233 dflags |= DPARM_SYNC; 234 s = splbio(); 235 sdp->isp_devparam[sp->sa_target].dev_flags |= dflags; 236 dflags = sdp->isp_devparam[sp->sa_target].dev_flags; 237 sdp->isp_devparam[sp->sa_target].dev_update = 1; 238 isp->isp_update |= (1 << chan); 239 splx(s); 240 isp_prt(isp, ISP_LOGDEBUG1, 241 "ispioctl: device flags 0x%x for %d.%d.X", 242 dflags, chan, sp->sa_target); 243 } 244 retval = 0; 245 break; 246 } 247 case SCBUSIORESET: 248 s = splbio(); 249 if (isp_control(isp, ISPCTL_RESET_BUS, &chan)) 250 retval = EIO; 251 else 252 retval = 0; 253 (void) splx(s); 254 break; 255 case ISP_SDBLEV: 256 { 257 int olddblev = isp->isp_dblev; 258 isp->isp_dblev = *(int *)addr; 259 *(int *)addr = olddblev; 260 retval = 0; 261 break; 262 } 263 case ISP_RESETHBA: 264 ISP_LOCK(isp); 265 isp_reinit(isp); 266 ISP_UNLOCK(isp); 267 retval = 0; 268 break; 269 case ISP_FC_RESCAN: 270 if (IS_FC(isp)) { 271 ISP_LOCK(isp); 272 if (isp_fc_runstate(isp, 5 * 1000000)) { 273 retval = EIO; 274 } else { 275 retval = 0; 276 } 277 ISP_UNLOCK(isp); 278 } 279 break; 280 case ISP_FC_LIP: 281 if (IS_FC(isp)) { 282 ISP_LOCK(isp); 283 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 284 retval = EIO; 285 } else { 286 retval = 0; 287 } 288 ISP_UNLOCK(isp); 289 } 290 break; 291 case ISP_FC_GETDINFO: 292 { 293 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 294 struct lportdb *lp; 295 296 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 297 retval = EINVAL; 298 break; 299 } 300 ISP_LOCK(isp); 301 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 302 if (lp->valid) { 303 ifc->loopid = lp->loopid; 304 ifc->portid = lp->portid; 305 ifc->node_wwn = lp->node_wwn; 306 ifc->port_wwn = lp->port_wwn; 307 retval = 0; 308 } else { 309 retval = ENODEV; 310 } 311 ISP_UNLOCK(isp); 312 break; 313 } 314 default: 315 break; 316 } 317 return (retval); 318 } 319 320 321 static int32_t 322 ispcmd(XS_T *xs) 323 { 324 struct ispsoftc *isp; 325 int result, s; 326 327 isp = XS_ISP(xs); 328 s = splbio(); 329 if (isp->isp_state < ISP_RUNSTATE) { 330 DISABLE_INTS(isp); 331 isp_init(isp); 332 if (isp->isp_state != ISP_INITSTATE) { 333 ENABLE_INTS(isp); 334 (void) splx(s); 335 XS_SETERR(xs, HBA_BOTCH); 336 return (COMPLETE); 337 } 338 isp->isp_state = ISP_RUNSTATE; 339 ENABLE_INTS(isp); 340 } 341 342 /* 343 * Check for queue blockage... 344 */ 345 if (isp->isp_osinfo.blocked) { 346 if (xs->xs_control & XS_CTL_POLL) { 347 xs->error = XS_DRIVER_STUFFUP; 348 splx(s); 349 return (TRY_AGAIN_LATER); 350 } 351 TAILQ_INSERT_TAIL(&isp->isp_osinfo.waitq, xs, adapter_q); 352 splx(s); 353 return (SUCCESSFULLY_QUEUED); 354 } 355 356 if (xs->xs_control & XS_CTL_POLL) { 357 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints; 358 isp->isp_osinfo.no_mbox_ints = 1; 359 result = isp_polled_cmd(isp, xs); 360 isp->isp_osinfo.no_mbox_ints = ombi; 361 (void) splx(s); 362 return (result); 363 } 364 365 result = isp_start(xs); 366 #if 0 367 { 368 static int na[16] = { 0 }; 369 if (na[isp->isp_unit] < isp->isp_nactive) { 370 isp_prt(isp, ISP_LOGALL, "active hiwater %d", isp->isp_nactive); 371 na[isp->isp_unit] = isp->isp_nactive; 372 } 373 } 374 #endif 375 switch (result) { 376 case CMD_QUEUED: 377 result = SUCCESSFULLY_QUEUED; 378 if (xs->timeout) { 379 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs); 380 } 381 break; 382 case CMD_EAGAIN: 383 result = TRY_AGAIN_LATER; 384 break; 385 case CMD_RQLATER: 386 result = SUCCESSFULLY_QUEUED; 387 callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs); 388 break; 389 case CMD_COMPLETE: 390 result = COMPLETE; 391 break; 392 } 393 (void) splx(s); 394 return (result); 395 } 396 397 static int 398 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs) 399 { 400 int result; 401 int infinite = 0, mswait; 402 403 result = isp_start(xs); 404 405 switch (result) { 406 case CMD_QUEUED: 407 result = SUCCESSFULLY_QUEUED; 408 break; 409 case CMD_RQLATER: 410 case CMD_EAGAIN: 411 if (XS_NOERR(xs)) { 412 xs->error = XS_DRIVER_STUFFUP; 413 } 414 result = TRY_AGAIN_LATER; 415 break; 416 case CMD_COMPLETE: 417 result = COMPLETE; 418 break; 419 420 } 421 422 if (result != SUCCESSFULLY_QUEUED) { 423 return (result); 424 } 425 426 /* 427 * If we can't use interrupts, poll on completion. 428 */ 429 if ((mswait = XS_TIME(xs)) == 0) 430 infinite = 1; 431 432 while (mswait || infinite) { 433 if (isp_intr((void *)isp)) { 434 if (XS_CMD_DONE_P(xs)) { 435 break; 436 } 437 } 438 USEC_DELAY(1000); 439 mswait -= 1; 440 } 441 442 /* 443 * If no other error occurred but we didn't finish, 444 * something bad happened. 445 */ 446 if (XS_CMD_DONE_P(xs) == 0) { 447 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) { 448 isp_reinit(isp); 449 } 450 if (XS_NOERR(xs)) { 451 XS_SETERR(xs, HBA_BOTCH); 452 } 453 } 454 result = COMPLETE; 455 return (result); 456 } 457 458 void 459 isp_done(XS_T *xs) 460 { 461 XS_CMD_S_DONE(xs); 462 if (XS_CMD_WDOG_P(xs) == 0) { 463 struct ispsoftc *isp = XS_ISP(xs); 464 callout_stop(&xs->xs_callout); 465 if (XS_CMD_GRACE_P(xs)) { 466 isp_prt(isp, ISP_LOGDEBUG1, 467 "finished command on borrowed time"); 468 } 469 XS_CMD_S_CLEAR(xs); 470 scsipi_done(xs); 471 } 472 } 473 474 static void 475 isp_dog(void *arg) 476 { 477 XS_T *xs = arg; 478 struct ispsoftc *isp = XS_ISP(xs); 479 u_int16_t handle; 480 481 ISP_ILOCK(isp); 482 /* 483 * We've decided this command is dead. Make sure we're not trying 484 * to kill a command that's already dead by getting it's handle and 485 * and seeing whether it's still alive. 486 */ 487 handle = isp_find_handle(isp, xs); 488 if (handle) { 489 u_int16_t r, r1, i; 490 491 if (XS_CMD_DONE_P(xs)) { 492 isp_prt(isp, ISP_LOGDEBUG1, 493 "watchdog found done cmd (handle 0x%x)", handle); 494 ISP_IUNLOCK(isp); 495 return; 496 } 497 498 if (XS_CMD_WDOG_P(xs)) { 499 isp_prt(isp, ISP_LOGDEBUG1, 500 "recursive watchdog (handle 0x%x)", handle); 501 ISP_IUNLOCK(isp); 502 return; 503 } 504 505 XS_CMD_S_WDOG(xs); 506 507 i = 0; 508 do { 509 r = ISP_READ(isp, BIU_ISR); 510 USEC_DELAY(1); 511 r1 = ISP_READ(isp, BIU_ISR); 512 } while (r != r1 && ++i < 1000); 513 514 if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) { 515 isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)", 516 handle, r); 517 XS_CMD_C_WDOG(xs); 518 isp_done(xs); 519 } else if (XS_CMD_GRACE_P(xs)) { 520 isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)", 521 handle, r); 522 /* 523 * Make sure the command is *really* dead before we 524 * release the handle (and DMA resources) for reuse. 525 */ 526 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 527 528 /* 529 * After this point, the comamnd is really dead. 530 */ 531 if (XS_XFRLEN(xs)) { 532 ISP_DMAFREE(isp, xs, handle); 533 } 534 isp_destroy_handle(isp, handle); 535 XS_SETERR(xs, XS_TIMEOUT); 536 XS_CMD_S_CLEAR(xs); 537 isp_done(xs); 538 } else { 539 u_int16_t iptr, optr; 540 ispreq_t *mp; 541 isp_prt(isp, ISP_LOGDEBUG2, 542 "possible command timeout (%x, %x)", handle, r); 543 XS_CMD_C_WDOG(xs); 544 callout_reset(&xs->xs_callout, hz, isp_dog, xs); 545 if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) { 546 ISP_UNLOCK(isp); 547 return; 548 } 549 XS_CMD_S_GRACE(xs); 550 MEMZERO((void *) mp, sizeof (*mp)); 551 mp->req_header.rqs_entry_count = 1; 552 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 553 mp->req_modifier = SYNC_ALL; 554 mp->req_target = XS_CHANNEL(xs) << 7; 555 ISP_SWIZZLE_REQUEST(isp, mp); 556 ISP_ADD_REQUEST(isp, iptr); 557 } 558 } else { 559 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command"); 560 } 561 ISP_IUNLOCK(isp); 562 } 563 564 /* 565 * Free any associated resources prior to decommissioning and 566 * set the card to a known state (so it doesn't wake up and kick 567 * us when we aren't expecting it to). 568 * 569 * Locks are held before coming here. 570 */ 571 void 572 isp_uninit(struct ispsoftc *isp) 573 { 574 isp_lock(isp); 575 /* 576 * Leave with interrupts disabled. 577 */ 578 DISABLE_INTS(isp); 579 isp_unlock(isp); 580 } 581 582 /* 583 * Restart function for a command to be requeued later. 584 */ 585 static void 586 isp_command_requeue(void *arg) 587 { 588 struct scsipi_xfer *xs = arg; 589 struct ispsoftc *isp = XS_ISP(xs); 590 ISP_ILOCK(isp); 591 switch (ispcmd(xs)) { 592 case SUCCESSFULLY_QUEUED: 593 isp_prt(isp, ISP_LOGINFO, 594 "requeued commands for %d.%d", XS_TGT(xs), XS_LUN(xs)); 595 if (xs->timeout) { 596 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs); 597 } 598 break; 599 case TRY_AGAIN_LATER: 600 isp_prt(isp, ISP_LOGINFO, 601 "EAGAIN on requeue for %d.%d", XS_TGT(xs), XS_LUN(xs)); 602 callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs); 603 break; 604 case COMPLETE: 605 /* can only be an error */ 606 XS_CMD_S_DONE(xs); 607 callout_stop(&xs->xs_callout); 608 if (XS_NOERR(xs)) { 609 XS_SETERR(xs, HBA_BOTCH); 610 } 611 scsipi_done(xs); 612 break; 613 } 614 ISP_IUNLOCK(isp); 615 } 616 617 /* 618 * Restart function after a LOOP UP event (e.g.), 619 * done as a timeout for some hysteresis. 620 */ 621 static void 622 isp_internal_restart(void *arg) 623 { 624 struct ispsoftc *isp = arg; 625 int result, nrestarted = 0; 626 627 ISP_ILOCK(isp); 628 if (isp->isp_osinfo.blocked == 0) { 629 struct scsipi_xfer *xs; 630 while ((xs = TAILQ_FIRST(&isp->isp_osinfo.waitq)) != NULL) { 631 TAILQ_REMOVE(&isp->isp_osinfo.waitq, xs, adapter_q); 632 result = isp_start(xs); 633 if (result != CMD_QUEUED) { 634 isp_prt(isp, ISP_LOGERR, 635 "botched command restart (err=%d)", result); 636 XS_CMD_S_DONE(xs); 637 if (xs->error == XS_NOERROR) 638 xs->error = XS_DRIVER_STUFFUP; 639 callout_stop(&xs->xs_callout); 640 scsipi_done(xs); 641 } else if (xs->timeout) { 642 callout_reset(&xs->xs_callout, 643 _XT(xs), isp_dog, xs); 644 } 645 nrestarted++; 646 } 647 isp_prt(isp, ISP_LOGINFO, 648 "isp_restart requeued %d commands", nrestarted); 649 } 650 ISP_IUNLOCK(isp); 651 } 652 653 int 654 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 655 { 656 int bus, tgt; 657 int s = splbio(); 658 switch (cmd) { 659 case ISPASYNC_NEW_TGT_PARAMS: 660 if (IS_SCSI(isp) && isp->isp_dblev) { 661 sdparam *sdp = isp->isp_param; 662 char *wt; 663 int mhz, flags, period; 664 665 tgt = *((int *) arg); 666 bus = (tgt >> 16) & 0xffff; 667 tgt &= 0xffff; 668 sdp += bus; 669 flags = sdp->isp_devparam[tgt].cur_dflags; 670 period = sdp->isp_devparam[tgt].cur_period; 671 672 if ((flags & DPARM_SYNC) && period && 673 (sdp->isp_devparam[tgt].cur_offset) != 0) { 674 /* 675 * There's some ambiguity about our negotiated speed 676 * if we haven't detected LVD mode correctly (which 677 * seems to happen, unfortunately). If we're in LVD 678 * mode, then different rules apply about speed. 679 */ 680 if (sdp->isp_lvdmode || period < 0xc) { 681 switch (period) { 682 case 0x9: 683 mhz = 80; 684 break; 685 case 0xa: 686 mhz = 40; 687 break; 688 case 0xb: 689 mhz = 33; 690 break; 691 case 0xc: 692 mhz = 25; 693 break; 694 default: 695 mhz = 1000 / (period * 4); 696 break; 697 } 698 } else { 699 mhz = 1000 / (period * 4); 700 } 701 } else { 702 mhz = 0; 703 } 704 switch (flags & (DPARM_WIDE|DPARM_TQING)) { 705 case DPARM_WIDE: 706 wt = ", 16 bit wide"; 707 break; 708 case DPARM_TQING: 709 wt = ", Tagged Queueing Enabled"; 710 break; 711 case DPARM_WIDE|DPARM_TQING: 712 wt = ", 16 bit wide, Tagged Queueing Enabled"; 713 break; 714 default: 715 wt = " "; 716 break; 717 } 718 if (mhz) { 719 isp_prt(isp, ISP_LOGINFO, 720 "Bus %d Target %d at %dMHz Max Offset %d%s", 721 bus, tgt, mhz, sdp->isp_devparam[tgt].cur_offset, 722 wt); 723 } else { 724 isp_prt(isp, ISP_LOGINFO, 725 "Bus %d Target %d Async Mode%s", bus, tgt, wt); 726 } 727 break; 728 } 729 case ISPASYNC_BUS_RESET: 730 if (arg) 731 bus = *((int *) arg); 732 else 733 bus = 0; 734 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus); 735 break; 736 case ISPASYNC_LOOP_DOWN: 737 /* 738 * Hopefully we get here in time to minimize the number 739 * of commands we are firing off that are sure to die. 740 */ 741 isp->isp_osinfo.blocked = 1; 742 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 743 break; 744 case ISPASYNC_LOOP_UP: 745 isp->isp_osinfo.blocked = 0; 746 callout_reset(&isp->isp_osinfo._restart, 1, 747 isp_internal_restart, isp); 748 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 749 break; 750 case ISPASYNC_PROMENADE: 751 if (IS_FC(isp) && isp->isp_dblev) { 752 const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x " 753 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 754 const static char *roles[4] = { 755 "No", "Target", "Initiator", "Target/Initiator" 756 }; 757 fcparam *fcp = isp->isp_param; 758 int tgt = *((int *) arg); 759 struct lportdb *lp = &fcp->portdb[tgt]; 760 761 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 762 roles[lp->roles & 0x3], 763 (lp->valid)? "Arrived" : "Departed", 764 (u_int32_t) (lp->port_wwn >> 32), 765 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 766 (u_int32_t) (lp->node_wwn >> 32), 767 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 768 break; 769 } 770 case ISPASYNC_CHANGE_NOTIFY: 771 if (arg == (void *) 1) { 772 isp_prt(isp, ISP_LOGINFO, 773 "Name Server Database Changed"); 774 } else { 775 isp_prt(isp, ISP_LOGINFO, 776 "Name Server Database Changed"); 777 } 778 break; 779 case ISPASYNC_FABRIC_DEV: 780 { 781 int target, lrange; 782 struct lportdb *lp = NULL; 783 char *pt; 784 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg; 785 u_int32_t portid; 786 u_int64_t wwpn, wwnn; 787 fcparam *fcp = isp->isp_param; 788 789 portid = 790 (((u_int32_t) resp->snscb_port_id[0]) << 16) | 791 (((u_int32_t) resp->snscb_port_id[1]) << 8) | 792 (((u_int32_t) resp->snscb_port_id[2])); 793 794 wwpn = 795 (((u_int64_t)resp->snscb_portname[0]) << 56) | 796 (((u_int64_t)resp->snscb_portname[1]) << 48) | 797 (((u_int64_t)resp->snscb_portname[2]) << 40) | 798 (((u_int64_t)resp->snscb_portname[3]) << 32) | 799 (((u_int64_t)resp->snscb_portname[4]) << 24) | 800 (((u_int64_t)resp->snscb_portname[5]) << 16) | 801 (((u_int64_t)resp->snscb_portname[6]) << 8) | 802 (((u_int64_t)resp->snscb_portname[7])); 803 804 wwnn = 805 (((u_int64_t)resp->snscb_nodename[0]) << 56) | 806 (((u_int64_t)resp->snscb_nodename[1]) << 48) | 807 (((u_int64_t)resp->snscb_nodename[2]) << 40) | 808 (((u_int64_t)resp->snscb_nodename[3]) << 32) | 809 (((u_int64_t)resp->snscb_nodename[4]) << 24) | 810 (((u_int64_t)resp->snscb_nodename[5]) << 16) | 811 (((u_int64_t)resp->snscb_nodename[6]) << 8) | 812 (((u_int64_t)resp->snscb_nodename[7])); 813 if (portid == 0 || wwpn == 0) { 814 break; 815 } 816 817 switch (resp->snscb_port_type) { 818 case 1: 819 pt = " N_Port"; 820 break; 821 case 2: 822 pt = " NL_Port"; 823 break; 824 case 3: 825 pt = "F/NL_Port"; 826 break; 827 case 0x7f: 828 pt = " Nx_Port"; 829 break; 830 case 0x81: 831 pt = " F_port"; 832 break; 833 case 0x82: 834 pt = " FL_Port"; 835 break; 836 case 0x84: 837 pt = " E_port"; 838 break; 839 default: 840 pt = "?"; 841 break; 842 } 843 isp_prt(isp, ISP_LOGINFO, 844 "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x", 845 pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn), 846 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn)); 847 /* 848 * We're only interested in SCSI_FCP types (for now) 849 */ 850 if ((resp->snscb_fc4_types[2] & 1) == 0) { 851 break; 852 } 853 if (fcp->isp_topo != TOPO_F_PORT) 854 lrange = FC_SNS_ID+1; 855 else 856 lrange = 0; 857 /* 858 * Is it already in our list? 859 */ 860 for (target = lrange; target < MAX_FC_TARG; target++) { 861 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 862 continue; 863 } 864 lp = &fcp->portdb[target]; 865 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) { 866 lp->fabric_dev = 1; 867 break; 868 } 869 } 870 if (target < MAX_FC_TARG) { 871 break; 872 } 873 for (target = lrange; target < MAX_FC_TARG; target++) { 874 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 875 continue; 876 } 877 lp = &fcp->portdb[target]; 878 if (lp->port_wwn == 0) { 879 break; 880 } 881 } 882 if (target == MAX_FC_TARG) { 883 isp_prt(isp, ISP_LOGWARN, 884 "no more space for fabric devices"); 885 break; 886 } 887 lp->node_wwn = wwnn; 888 lp->port_wwn = wwpn; 889 lp->portid = portid; 890 lp->fabric_dev = 1; 891 break; 892 } 893 default: 894 break; 895 } 896 (void) splx(s); 897 return (0); 898 } 899 900 #include <machine/stdarg.h> 901 void 902 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 903 { 904 va_list ap; 905 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 906 return; 907 } 908 printf("%s: ", isp->isp_name); 909 va_start(ap, fmt); 910 vprintf(fmt, ap); 911 va_end(ap); 912 printf("\n"); 913 } 914