1 /* $NetBSD: isp_netbsd.c,v 1.96 2020/12/05 17:33:53 thorpej Exp $ */ 2 /* 3 * Platform (NetBSD) dependent common attachment code for Qlogic adapters. 4 */ 5 /* 6 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration 7 * All rights reserved. 8 * 9 * Additional Copyright (C) 2000-2007 by Matthew Jacob 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.96 2020/12/05 17:33:53 thorpej Exp $"); 37 38 #include <dev/ic/isp_netbsd.h> 39 #include <dev/ic/isp_ioctl.h> 40 #include <sys/scsiio.h> 41 42 /* 43 * Set a timeout for the watchdogging of a command. 44 * 45 * The dimensional analysis is 46 * 47 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks 48 * 49 * = 50 * 51 * (milliseconds / 1000) * hz = ticks 52 * 53 * 54 * For timeouts less than 1 second, we'll get zero. Because of this, and 55 * because we want to establish *our* timeout to be longer than what the 56 * firmware might do, we just add 3 seconds at the back end. 57 */ 58 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz)) 59 60 static void isp_config_interrupts(device_t); 61 static void ispminphys_1020(struct buf *); 62 static void ispminphys(struct buf *); 63 static void ispcmd(struct ispsoftc *, XS_T *); 64 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *); 65 static int 66 ispioctl(struct scsipi_channel *, u_long, void *, int, struct proc *); 67 68 static void isp_polled_cmd_wait(struct ispsoftc *, XS_T *); 69 static void isp_dog(void *); 70 static void isp_gdt(void *); 71 static void isp_ldt(void *); 72 static void isp_make_here(ispsoftc_t *, int); 73 static void isp_make_gone(ispsoftc_t *, int); 74 static void isp_fc_worker(void *); 75 76 static const char *roles[4] = { 77 "(none)", "Target", "Initiator", "Target/Initiator" 78 }; 79 static const char prom3[] = 80 "PortID %#06x Departed from Target %u because of %s"; 81 int isp_change_is_bad = 0; /* "changed" devices are bad */ 82 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */ 83 static int isp_fabric_hysteresis = 5; 84 #define isp_change_is_bad 0 85 86 /* 87 * Complete attachment of hardware, include subdevices. 88 */ 89 90 void 91 isp_attach(struct ispsoftc *isp) 92 { 93 device_t self = isp->isp_osinfo.dev; 94 int i; 95 96 isp->isp_state = ISP_RUNSTATE; 97 98 isp->isp_osinfo.adapter.adapt_dev = self; 99 isp->isp_osinfo.adapter.adapt_openings = isp->isp_maxcmds; 100 isp->isp_osinfo.loop_down_limit = 300; 101 102 /* 103 * It's not stated whether max_periph is limited by SPI 104 * tag uage, but let's assume that it is. 105 */ 106 isp->isp_osinfo.adapter.adapt_max_periph = uimin(isp->isp_maxcmds, 255); 107 isp->isp_osinfo.adapter.adapt_ioctl = ispioctl; 108 isp->isp_osinfo.adapter.adapt_request = isprequest; 109 if (isp->isp_type <= ISP_HA_SCSI_1020A) { 110 isp->isp_osinfo.adapter.adapt_minphys = ispminphys_1020; 111 } else { 112 isp->isp_osinfo.adapter.adapt_minphys = ispminphys; 113 } 114 115 callout_init(&isp->isp_osinfo.gdt, 0); 116 callout_setfunc(&isp->isp_osinfo.gdt, isp_gdt, isp); 117 callout_init(&isp->isp_osinfo.ldt, 0); 118 callout_setfunc(&isp->isp_osinfo.ldt, isp_ldt, isp); 119 if (IS_FC(isp)) { 120 if (kthread_create(PRI_NONE, 0, NULL, isp_fc_worker, isp, 121 &isp->isp_osinfo.thread, "%s:fc_thrd", 122 device_xname(self))) { 123 isp_prt(isp, ISP_LOGERR, 124 "unable to create FC worker thread"); 125 return; 126 } 127 } 128 129 for (i = 0; i != isp->isp_osinfo.adapter.adapt_nchannels; i++) { 130 isp->isp_osinfo.chan[i].chan_adapter = 131 &isp->isp_osinfo.adapter; 132 isp->isp_osinfo.chan[i].chan_bustype = &scsi_bustype; 133 isp->isp_osinfo.chan[i].chan_channel = i; 134 /* 135 * Until the midlayer is fixed to use REPORT LUNS, 136 * limit to 8 luns. 137 */ 138 isp->isp_osinfo.chan[i].chan_nluns = uimin(isp->isp_maxluns, 8); 139 if (IS_FC(isp)) { 140 isp->isp_osinfo.chan[i].chan_ntargets = MAX_FC_TARG; 141 if (ISP_CAP_2KLOGIN(isp) == 0 && MAX_FC_TARG > 256) { 142 isp->isp_osinfo.chan[i].chan_ntargets = 256; 143 } 144 isp->isp_osinfo.chan[i].chan_id = MAX_FC_TARG; 145 } else { 146 isp->isp_osinfo.chan[i].chan_ntargets = MAX_TARGETS; 147 isp->isp_osinfo.chan[i].chan_id = 148 SDPARAM(isp, i)->isp_initiator_id; 149 ISP_LOCK(isp); 150 (void) isp_control(isp, ISPCTL_RESET_BUS, i); 151 ISP_UNLOCK(isp); 152 } 153 } 154 155 /* 156 * Defer enabling mailbox interrupts until later. 157 */ 158 config_interrupts(self, isp_config_interrupts); 159 } 160 161 static void 162 isp_config_interrupts(device_t self) 163 { 164 int i; 165 struct ispsoftc *isp = device_private(self); 166 167 isp->isp_osinfo.mbox_sleep_ok = 1; 168 169 if (IS_FC(isp) && (FCPARAM(isp, 0)->isp_fwstate != FW_READY || 170 FCPARAM(isp, 0)->isp_loopstate != LOOP_READY)) { 171 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 172 "Starting Initial Loop Down Timer"); 173 callout_schedule(&isp->isp_osinfo.ldt, isp_quickboot_time * hz); 174 } 175 176 /* 177 * And attach children (if any). 178 */ 179 for (i = 0; i < isp->isp_osinfo.adapter.adapt_nchannels; i++) { 180 config_found(self, &isp->isp_osinfo.chan[i], scsiprint); 181 } 182 } 183 184 /* 185 * minphys our xfers 186 */ 187 static void 188 ispminphys_1020(struct buf *bp) 189 { 190 if (bp->b_bcount >= (1 << 24)) { 191 bp->b_bcount = (1 << 24); 192 } 193 minphys(bp); 194 } 195 196 static void 197 ispminphys(struct buf *bp) 198 { 199 if (bp->b_bcount >= (1 << 30)) { 200 bp->b_bcount = (1 << 30); 201 } 202 minphys(bp); 203 } 204 205 static int 206 ispioctl(struct scsipi_channel *chan, u_long cmd, void *addr, int flag, 207 struct proc *p) 208 { 209 struct ispsoftc *isp = device_private(chan->chan_adapter->adapt_dev); 210 int nr, bus, retval = ENOTTY; 211 212 switch (cmd) { 213 case ISP_SDBLEV: 214 { 215 int olddblev = isp->isp_dblev; 216 isp->isp_dblev = *(int *)addr; 217 *(int *)addr = olddblev; 218 retval = 0; 219 break; 220 } 221 case ISP_GETROLE: 222 bus = *(int *)addr; 223 if (bus < 0 || bus >= isp->isp_nchan) { 224 retval = -ENXIO; 225 break; 226 } 227 if (IS_FC(isp)) { 228 *(int *)addr = FCPARAM(isp, bus)->role; 229 } else { 230 *(int *)addr = SDPARAM(isp, bus)->role; 231 } 232 retval = 0; 233 break; 234 case ISP_SETROLE: 235 236 nr = *(int *)addr; 237 bus = nr >> 8; 238 if (bus < 0 || bus >= isp->isp_nchan) { 239 retval = -ENXIO; 240 break; 241 } 242 nr &= 0xff; 243 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 244 retval = EINVAL; 245 break; 246 } 247 if (IS_FC(isp)) { 248 *(int *)addr = FCPARAM(isp, bus)->role; 249 FCPARAM(isp, bus)->role = nr; 250 } else { 251 *(int *)addr = SDPARAM(isp, bus)->role; 252 SDPARAM(isp, bus)->role = nr; 253 } 254 retval = 0; 255 break; 256 257 case ISP_RESETHBA: 258 ISP_LOCK(isp); 259 isp_reinit(isp, 0); 260 ISP_UNLOCK(isp); 261 retval = 0; 262 break; 263 264 case ISP_RESCAN: 265 if (IS_FC(isp)) { 266 bus = *(int *)addr; 267 if (bus < 0 || bus >= isp->isp_nchan) { 268 retval = -ENXIO; 269 break; 270 } 271 ISP_LOCK(isp); 272 if (isp_fc_runstate(isp, bus, 5 * 1000000)) { 273 retval = EIO; 274 } else { 275 retval = 0; 276 } 277 ISP_UNLOCK(isp); 278 } 279 break; 280 281 case ISP_FC_LIP: 282 if (IS_FC(isp)) { 283 bus = *(int *)addr; 284 if (bus < 0 || bus >= isp->isp_nchan) { 285 retval = -ENXIO; 286 break; 287 } 288 ISP_LOCK(isp); 289 if (isp_control(isp, ISPCTL_SEND_LIP, bus)) { 290 retval = EIO; 291 } else { 292 retval = 0; 293 } 294 ISP_UNLOCK(isp); 295 } 296 break; 297 case ISP_FC_GETDINFO: 298 { 299 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 300 fcportdb_t *lp; 301 302 if (IS_SCSI(isp)) { 303 break; 304 } 305 if (ifc->loopid >= MAX_FC_TARG) { 306 retval = EINVAL; 307 break; 308 } 309 lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid]; 310 if (lp->state == FC_PORTDB_STATE_VALID) { 311 ifc->role = lp->roles; 312 ifc->loopid = lp->handle; 313 ifc->portid = lp->portid; 314 ifc->node_wwn = lp->node_wwn; 315 ifc->port_wwn = lp->port_wwn; 316 retval = 0; 317 } else { 318 retval = ENODEV; 319 } 320 break; 321 } 322 case ISP_GET_STATS: 323 { 324 isp_stats_t *sp = (isp_stats_t *) addr; 325 326 ISP_MEMZERO(sp, sizeof (*sp)); 327 sp->isp_stat_version = ISP_STATS_VERSION; 328 sp->isp_type = isp->isp_type; 329 sp->isp_revision = isp->isp_revision; 330 ISP_LOCK(isp); 331 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 332 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 333 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 334 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 335 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 336 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 337 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 338 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 339 ISP_UNLOCK(isp); 340 retval = 0; 341 break; 342 } 343 case ISP_CLR_STATS: 344 ISP_LOCK(isp); 345 isp->isp_intcnt = 0; 346 isp->isp_intbogus = 0; 347 isp->isp_intmboxc = 0; 348 isp->isp_intoasync = 0; 349 isp->isp_rsltccmplt = 0; 350 isp->isp_fphccmplt = 0; 351 isp->isp_rscchiwater = 0; 352 isp->isp_fpcchiwater = 0; 353 ISP_UNLOCK(isp); 354 retval = 0; 355 break; 356 case ISP_FC_GETHINFO: 357 { 358 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 359 bus = hba->fc_channel; 360 361 if (bus < 0 || bus >= isp->isp_nchan) { 362 retval = ENXIO; 363 break; 364 } 365 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 366 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 367 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 368 hba->fc_nchannels = isp->isp_nchan; 369 hba->fc_nports = isp->isp_nchan;/* XXXX 24XX STUFF? XXX */ 370 if (IS_FC(isp)) { 371 hba->fc_speed = FCPARAM(isp, bus)->isp_gbspeed; 372 hba->fc_topology = FCPARAM(isp, bus)->isp_topo + 1; 373 hba->fc_loopid = FCPARAM(isp, bus)->isp_loopid; 374 hba->nvram_node_wwn = FCPARAM(isp, bus)->isp_wwnn_nvram; 375 hba->nvram_port_wwn = FCPARAM(isp, bus)->isp_wwpn_nvram; 376 hba->active_node_wwn = FCPARAM(isp, bus)->isp_wwnn; 377 hba->active_port_wwn = FCPARAM(isp, bus)->isp_wwpn; 378 } else { 379 hba->fc_speed = 0; 380 hba->fc_topology = 0; 381 hba->nvram_node_wwn = 0ull; 382 hba->nvram_port_wwn = 0ull; 383 hba->active_node_wwn = 0ull; 384 hba->active_port_wwn = 0ull; 385 } 386 retval = 0; 387 break; 388 } 389 case ISP_TSK_MGMT: 390 { 391 int needmarker; 392 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 393 uint16_t loopid; 394 mbreg_t mbs; 395 396 if (IS_SCSI(isp)) { 397 break; 398 } 399 400 bus = fct->chan; 401 if (bus < 0 || bus >= isp->isp_nchan) { 402 retval = -ENXIO; 403 break; 404 } 405 406 memset(&mbs, 0, sizeof (mbs)); 407 needmarker = retval = 0; 408 loopid = fct->loopid; 409 if (ISP_CAP_2KLOGIN(isp) == 0) { 410 loopid <<= 8; 411 } 412 switch (fct->action) { 413 case IPT_CLEAR_ACA: 414 mbs.param[0] = MBOX_CLEAR_ACA; 415 mbs.param[1] = loopid; 416 mbs.param[2] = fct->lun; 417 break; 418 case IPT_TARGET_RESET: 419 mbs.param[0] = MBOX_TARGET_RESET; 420 mbs.param[1] = loopid; 421 needmarker = 1; 422 break; 423 case IPT_LUN_RESET: 424 mbs.param[0] = MBOX_LUN_RESET; 425 mbs.param[1] = loopid; 426 mbs.param[2] = fct->lun; 427 needmarker = 1; 428 break; 429 case IPT_CLEAR_TASK_SET: 430 mbs.param[0] = MBOX_CLEAR_TASK_SET; 431 mbs.param[1] = loopid; 432 mbs.param[2] = fct->lun; 433 needmarker = 1; 434 break; 435 case IPT_ABORT_TASK_SET: 436 mbs.param[0] = MBOX_ABORT_TASK_SET; 437 mbs.param[1] = loopid; 438 mbs.param[2] = fct->lun; 439 needmarker = 1; 440 break; 441 default: 442 retval = EINVAL; 443 break; 444 } 445 if (retval == 0) { 446 if (needmarker) { 447 FCPARAM(isp, bus)->sendmarker = 1; 448 } 449 ISP_LOCK(isp); 450 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 451 ISP_UNLOCK(isp); 452 if (retval) { 453 retval = EIO; 454 } 455 } 456 break; 457 } 458 case ISP_FC_GETDLIST: 459 { 460 isp_dlist_t local, *ua; 461 uint16_t nph, nphe, count, channel, lim; 462 struct wwnpair pair, *uptr; 463 464 if (IS_SCSI(isp)) { 465 retval = EINVAL; 466 break; 467 } 468 469 ua = *(isp_dlist_t **)addr; 470 if (copyin(ua, &local, sizeof (isp_dlist_t))) { 471 retval = EFAULT; 472 break; 473 } 474 lim = local.count; 475 channel = local.channel; 476 if (channel >= isp->isp_nchan) { 477 retval = EINVAL; 478 break; 479 } 480 481 ua = *(isp_dlist_t **)addr; 482 uptr = &ua->wwns[0]; 483 484 if (ISP_CAP_2KLOGIN(isp)) { 485 nphe = NPH_MAX_2K; 486 } else { 487 nphe = NPH_MAX; 488 } 489 for (count = 0, nph = 0; count < lim && nph != nphe; nph++) { 490 ISP_LOCK(isp); 491 retval = isp_control(isp, ISPCTL_GET_NAMES, channel, 492 nph, &pair.wwnn, &pair.wwpn); 493 ISP_UNLOCK(isp); 494 if (retval || (pair.wwpn == INI_NONE && 495 pair.wwnn == INI_NONE)) { 496 retval = 0; 497 continue; 498 } 499 if (copyout(&pair, (void *)uptr++, sizeof (pair))) { 500 retval = EFAULT; 501 break; 502 } 503 count++; 504 } 505 if (retval == 0) { 506 if (copyout(&count, (void *)&ua->count, 507 sizeof (count))) { 508 retval = EFAULT; 509 } 510 } 511 break; 512 } 513 case SCBUSIORESET: 514 ISP_LOCK(isp); 515 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel)) { 516 retval = EIO; 517 } else { 518 retval = 0; 519 } 520 ISP_UNLOCK(isp); 521 break; 522 default: 523 break; 524 } 525 return (retval); 526 } 527 528 static void 529 ispcmd(struct ispsoftc *isp, XS_T *xs) 530 { 531 volatile uint8_t ombi; 532 int lim, chan; 533 534 ISP_LOCK(isp); 535 if (isp->isp_state < ISP_RUNSTATE) { 536 ISP_DISABLE_INTS(isp); 537 isp_init(isp); 538 if (isp->isp_state != ISP_INITSTATE) { 539 ISP_ENABLE_INTS(isp); 540 ISP_UNLOCK(isp); 541 isp_prt(isp, ISP_LOGERR, "isp not at init state"); 542 XS_SETERR(xs, HBA_BOTCH); 543 scsipi_done(xs); 544 return; 545 } 546 isp->isp_state = ISP_RUNSTATE; 547 ISP_ENABLE_INTS(isp); 548 } 549 chan = XS_CHANNEL(xs); 550 551 /* 552 * Handle the case of a FC card where the FC thread hasn't 553 * fired up yet and we don't yet have a known loop state. 554 */ 555 if (IS_FC(isp) && (FCPARAM(isp, chan)->isp_fwstate != FW_READY || 556 FCPARAM(isp, chan)->isp_loopstate != LOOP_READY) && 557 isp->isp_osinfo.thread == NULL) { 558 ombi = isp->isp_osinfo.mbox_sleep_ok != 0; 559 int delay_time; 560 561 if (xs->xs_control & XS_CTL_POLL) { 562 isp->isp_osinfo.mbox_sleep_ok = 0; 563 } 564 565 if (isp->isp_osinfo.loop_checked == 0) { 566 delay_time = 10 * 1000000; 567 isp->isp_osinfo.loop_checked = 1; 568 } else { 569 delay_time = 250000; 570 } 571 572 if (isp_fc_runstate(isp, XS_CHANNEL(xs), delay_time) != 0) { 573 if (xs->xs_control & XS_CTL_POLL) { 574 isp->isp_osinfo.mbox_sleep_ok = ombi; 575 } 576 if (FCPARAM(isp, XS_CHANNEL(xs))->loop_seen_once == 0) { 577 XS_SETERR(xs, HBA_SELTIMEOUT); 578 scsipi_done(xs); 579 ISP_UNLOCK(isp); 580 return; 581 } 582 /* 583 * Otherwise, fall thru to be queued up for later. 584 */ 585 } else { 586 int wasblocked = 587 (isp->isp_osinfo.blocked || isp->isp_osinfo.paused); 588 isp->isp_osinfo.blocked = isp->isp_osinfo.paused = 0; 589 if (wasblocked) { 590 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 591 "THAW QUEUES @ LINE %d", __LINE__); 592 scsipi_channel_thaw(&isp->isp_osinfo.chan[chan], 593 1); 594 } 595 } 596 if (xs->xs_control & XS_CTL_POLL) { 597 isp->isp_osinfo.mbox_sleep_ok = ombi; 598 } 599 } 600 601 if (isp->isp_osinfo.paused) { 602 isp_prt(isp, ISP_LOGWARN, "I/O while paused"); 603 xs->error = XS_RESOURCE_SHORTAGE; 604 scsipi_done(xs); 605 ISP_UNLOCK(isp); 606 return; 607 } 608 if (isp->isp_osinfo.blocked) { 609 isp_prt(isp, ISP_LOGWARN, 610 "I/O while blocked with retries %d", xs->xs_retries); 611 if (xs->xs_retries) { 612 xs->error = XS_REQUEUE; 613 xs->xs_retries--; 614 } else { 615 XS_SETERR(xs, HBA_SELTIMEOUT); 616 } 617 scsipi_done(xs); 618 ISP_UNLOCK(isp); 619 return; 620 } 621 622 if (xs->xs_control & XS_CTL_POLL) { 623 ombi = isp->isp_osinfo.mbox_sleep_ok; 624 isp->isp_osinfo.mbox_sleep_ok = 0; 625 } 626 627 switch (isp_start(xs)) { 628 case CMD_QUEUED: 629 if (IS_FC(isp) && isp->isp_osinfo.wwns[XS_TGT(xs)] == 0) { 630 fcparam *fcp = FCPARAM(isp, XS_CHANNEL(xs)); 631 int dbidx = fcp->isp_dev_map[XS_TGT(xs)] - 1; 632 device_t dev = xs->xs_periph->periph_dev; 633 634 if (dbidx >= 0 && dev && 635 prop_dictionary_set_uint64(device_properties(dev), 636 "port-wwn", fcp->portdb[dbidx].port_wwn) == TRUE) { 637 isp->isp_osinfo.wwns[XS_TGT(xs)] = 638 fcp->portdb[dbidx].port_wwn; 639 } 640 } 641 if (xs->xs_control & XS_CTL_POLL) { 642 isp_polled_cmd_wait(isp, xs); 643 isp->isp_osinfo.mbox_sleep_ok = ombi; 644 } else if (xs->timeout) { 645 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs); 646 } 647 break; 648 case CMD_EAGAIN: 649 isp->isp_osinfo.paused = 1; 650 xs->error = XS_RESOURCE_SHORTAGE; 651 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 652 "FREEZE QUEUES @ LINE %d", __LINE__); 653 for (chan = 0; chan < isp->isp_nchan; chan++) { 654 scsipi_channel_freeze(&isp->isp_osinfo.chan[chan], 1); 655 } 656 scsipi_done(xs); 657 break; 658 case CMD_RQLATER: 659 /* 660 * We can only get RQLATER from FC devices (1 channel only) 661 * 662 * If we've never seen loop up see if if we've been down 663 * quickboot time, otherwise wait loop down limit time. 664 * If so, then we start giving up on commands. 665 */ 666 if (FCPARAM(isp, XS_CHANNEL(xs))->loop_seen_once == 0) { 667 lim = isp_quickboot_time; 668 } else { 669 lim = isp->isp_osinfo.loop_down_limit; 670 } 671 if (isp->isp_osinfo.loop_down_time >= lim) { 672 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 673 "RQLATER->SELTIMEOUT for %d (%d >= %d)", XS_TGT(xs), 674 isp->isp_osinfo.loop_down_time, lim); 675 XS_SETERR(xs, HBA_SELTIMEOUT); 676 scsipi_done(xs); 677 break; 678 } 679 if (isp->isp_osinfo.blocked == 0) { 680 isp->isp_osinfo.blocked = 1; 681 scsipi_channel_freeze(&isp->isp_osinfo.chan[chan], 1); 682 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 683 "FREEZE QUEUES @ LINE %d", __LINE__); 684 } else { 685 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 686 "RQLATER WITH FROZEN QUEUES @ LINE %d", __LINE__); 687 } 688 xs->error = XS_REQUEUE; 689 scsipi_done(xs); 690 break; 691 case CMD_COMPLETE: 692 scsipi_done(xs); 693 break; 694 } 695 ISP_UNLOCK(isp); 696 } 697 698 static void 699 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg) 700 { 701 struct ispsoftc *isp = device_private(chan->chan_adapter->adapt_dev); 702 703 switch (req) { 704 case ADAPTER_REQ_RUN_XFER: 705 ispcmd(isp, (XS_T *) arg); 706 break; 707 708 case ADAPTER_REQ_GROW_RESOURCES: 709 /* Not supported. */ 710 break; 711 712 case ADAPTER_REQ_SET_XFER_MODE: 713 if (IS_SCSI(isp)) { 714 struct scsipi_xfer_mode *xm = arg; 715 int dflags = 0; 716 sdparam *sdp = SDPARAM(isp, chan->chan_channel); 717 718 if (xm->xm_mode & PERIPH_CAP_TQING) 719 dflags |= DPARM_TQING; 720 if (xm->xm_mode & PERIPH_CAP_WIDE16) 721 dflags |= DPARM_WIDE; 722 if (xm->xm_mode & PERIPH_CAP_SYNC) 723 dflags |= DPARM_SYNC; 724 ISP_LOCK(isp); 725 sdp->isp_devparam[xm->xm_target].goal_flags |= dflags; 726 dflags = sdp->isp_devparam[xm->xm_target].goal_flags; 727 sdp->isp_devparam[xm->xm_target].dev_update = 1; 728 sdp->update = 1; 729 ISP_UNLOCK(isp); 730 isp_prt(isp, ISP_LOGDEBUG1, 731 "isprequest: device flags %#x for %d.%d.X", 732 dflags, chan->chan_channel, xm->xm_target); 733 break; 734 } 735 default: 736 break; 737 } 738 } 739 740 static void 741 isp_polled_cmd_wait(struct ispsoftc *isp, XS_T *xs) 742 { 743 int infinite = 0, mswait; 744 745 /* 746 * If we can't use interrupts, poll on completion. 747 */ 748 if ((mswait = XS_TIME(xs)) == 0) { 749 infinite = 1; 750 } 751 752 while (mswait || infinite) { 753 uint32_t isr; 754 uint16_t sema, mbox; 755 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 756 isp_intr(isp, isr, sema, mbox); 757 if (XS_CMD_DONE_P(xs)) { 758 break; 759 } 760 } 761 ISP_DELAY(1000); 762 mswait -= 1; 763 } 764 765 /* 766 * If no other error occurred but we didn't finish 767 * something bad happened, so abort the command. 768 */ 769 if (XS_CMD_DONE_P(xs) == 0) { 770 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) { 771 isp_reinit(isp, 0); 772 } 773 if (XS_NOERR(xs)) { 774 isp_prt(isp, ISP_LOGERR, "polled command timed out"); 775 XS_SETERR(xs, HBA_BOTCH); 776 } 777 } 778 scsipi_done(xs); 779 } 780 781 void 782 isp_done(XS_T *xs) 783 { 784 if (XS_CMD_WDOG_P(xs) == 0) { 785 struct ispsoftc *isp = XS_ISP(xs); 786 callout_stop(&xs->xs_callout); 787 if (XS_CMD_GRACE_P(xs)) { 788 isp_prt(isp, ISP_LOGDEBUG1, 789 "finished command on borrowed time"); 790 } 791 XS_CMD_S_CLEAR(xs); 792 /* 793 * Fixup- if we get a QFULL, we need 794 * to set XS_BUSY as the error. 795 */ 796 if (xs->status == SCSI_QUEUE_FULL) { 797 xs->error = XS_BUSY; 798 } 799 if (isp->isp_osinfo.paused) { 800 int i; 801 isp->isp_osinfo.paused = 0; 802 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 803 "THAW QUEUES @ LINE %d", __LINE__); 804 for (i = 0; i < isp->isp_nchan; i++) { 805 scsipi_channel_timed_thaw(&isp->isp_osinfo.chan[i]); 806 } 807 } 808 if (xs->error == XS_DRIVER_STUFFUP) { 809 isp_prt(isp, ISP_LOGERR, 810 "BOTCHED cmd for %d.%d.%d cmd %#x datalen %ld", 811 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), 812 XS_CDBP(xs)[0], (long) XS_XFRLEN(xs)); 813 } 814 scsipi_done(xs); 815 } 816 } 817 818 static void 819 isp_dog(void *arg) 820 { 821 XS_T *xs = arg; 822 struct ispsoftc *isp = XS_ISP(xs); 823 uint32_t handle; 824 int sok; 825 826 827 ISP_ILOCK(isp); 828 sok = isp->isp_osinfo.mbox_sleep_ok; 829 isp->isp_osinfo.mbox_sleep_ok = 0; 830 /* 831 * We've decided this command is dead. Make sure we're not trying 832 * to kill a command that's already dead by getting its handle and 833 * and seeing whether it's still alive. 834 */ 835 handle = isp_find_handle(isp, xs); 836 if (handle) { 837 uint32_t isr; 838 uint16_t mbox, sema; 839 840 if (XS_CMD_DONE_P(xs)) { 841 isp_prt(isp, ISP_LOGDEBUG1, 842 "watchdog found done cmd (handle %#x)", handle); 843 goto out; 844 } 845 846 if (XS_CMD_WDOG_P(xs)) { 847 isp_prt(isp, ISP_LOGDEBUG1, 848 "recursive watchdog (handle %#x)", handle); 849 goto out; 850 } 851 852 XS_CMD_S_WDOG(xs); 853 854 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 855 isp_intr(isp, isr, sema, mbox); 856 857 } 858 if (XS_CMD_DONE_P(xs)) { 859 isp_prt(isp, ISP_LOGDEBUG1, 860 "watchdog cleanup for handle %#x", handle); 861 XS_CMD_C_WDOG(xs); 862 isp_done(xs); 863 } else if (XS_CMD_GRACE_P(xs)) { 864 isp_prt(isp, ISP_LOGDEBUG1, 865 "watchdog timeout for handle %#x", handle); 866 /* 867 * Make sure the command is *really* dead before we 868 * release the handle (and DMA resources) for reuse. 869 */ 870 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 871 872 /* 873 * After this point, the command is really dead. 874 */ 875 if (XS_XFRLEN(xs)) { 876 ISP_DMAFREE(isp, xs, handle); 877 } 878 isp_destroy_handle(isp, handle); 879 XS_SETERR(xs, XS_TIMEOUT); 880 XS_CMD_S_CLEAR(xs); 881 isp_done(xs); 882 } else { 883 void *qe; 884 isp_marker_t local, *mp = &local; 885 isp_prt(isp, ISP_LOGDEBUG2, 886 "possible command timeout on handle %x", handle); 887 XS_CMD_C_WDOG(xs); 888 callout_reset(&xs->xs_callout, hz, isp_dog, xs); 889 qe = isp_getrqentry(isp); 890 if (qe == NULL) 891 goto out; 892 XS_CMD_S_GRACE(xs); 893 ISP_MEMZERO((void *) mp, sizeof (*mp)); 894 mp->mrk_header.rqs_entry_count = 1; 895 mp->mrk_header.rqs_entry_type = RQSTYPE_MARKER; 896 mp->mrk_modifier = SYNC_ALL; 897 mp->mrk_target = XS_CHANNEL(xs) << 7; 898 isp_put_marker(isp, mp, qe); 899 ISP_SYNC_REQUEST(isp); 900 } 901 } else { 902 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command"); 903 } 904 out: 905 isp->isp_osinfo.mbox_sleep_ok = sok; 906 ISP_IUNLOCK(isp); 907 } 908 909 /* 910 * Gone Device Timer Function- when we have decided that a device has gone 911 * away, we wait a specific period of time prior to telling the OS it has 912 * gone away. 913 * 914 * This timer function fires once a second and then scans the port database 915 * for devices that are marked dead but still have a virtual target assigned. 916 * We decrement a counter for that port database entry, and when it hits zero, 917 * we tell the OS the device has gone away. 918 */ 919 static void 920 isp_gdt(void *arg) 921 { 922 ispsoftc_t *isp = arg; 923 fcportdb_t *lp; 924 int dbidx, tgt, more_to_do = 0; 925 926 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired"); 927 ISP_LOCK(isp); 928 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 929 lp = &FCPARAM(isp, 0)->portdb[dbidx]; 930 931 if (lp->state != FC_PORTDB_STATE_ZOMBIE) { 932 continue; 933 } 934 if (lp->dev_map_idx == 0) { 935 continue; 936 } 937 if (lp->new_reserved == 0) { 938 continue; 939 } 940 lp->new_reserved -= 1; 941 if (lp->new_reserved != 0) { 942 more_to_do++; 943 continue; 944 } 945 tgt = lp->dev_map_idx - 1; 946 FCPARAM(isp, 0)->isp_dev_map[tgt] = 0; 947 lp->dev_map_idx = 0; 948 lp->state = FC_PORTDB_STATE_NIL; 949 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 950 "Gone Device Timeout"); 951 isp_make_gone(isp, tgt); 952 } 953 if (more_to_do) { 954 callout_schedule(&isp->isp_osinfo.gdt, hz); 955 } else { 956 isp->isp_osinfo.gdt_running = 0; 957 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 958 "stopping Gone Device Timer"); 959 } 960 ISP_UNLOCK(isp); 961 } 962 963 /* 964 * Loop Down Timer Function- when loop goes down, a timer is started and 965 * and after it expires we come here and take all probational devices that 966 * the OS knows about and the tell the OS that they've gone away. 967 * 968 * We don't clear the devices out of our port database because, when loop 969 * come back up, we have to do some actual cleanup with the chip at that 970 * point (implicit PLOGO, e.g., to get the chip's port database state right). 971 */ 972 static void 973 isp_ldt(void *arg) 974 { 975 ispsoftc_t *isp = arg; 976 fcportdb_t *lp; 977 int dbidx, tgt; 978 979 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired"); 980 ISP_LOCK(isp); 981 982 /* 983 * Notify to the OS all targets who we now consider have departed. 984 */ 985 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 986 lp = &FCPARAM(isp, 0)->portdb[dbidx]; 987 988 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) { 989 continue; 990 } 991 if (lp->dev_map_idx == 0) { 992 continue; 993 } 994 995 /* 996 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST! 997 */ 998 999 /* 1000 * Mark that we've announced that this device is gone.... 1001 */ 1002 lp->reserved = 1; 1003 1004 /* 1005 * but *don't* change the state of the entry. Just clear 1006 * any target id stuff and announce to CAM that the 1007 * device is gone. This way any necessary PLOGO stuff 1008 * will happen when loop comes back up. 1009 */ 1010 1011 tgt = lp->dev_map_idx - 1; 1012 FCPARAM(isp, 0)->isp_dev_map[tgt] = 0; 1013 lp->dev_map_idx = 0; 1014 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 1015 "Loop Down Timeout"); 1016 isp_make_gone(isp, tgt); 1017 } 1018 1019 /* 1020 * The loop down timer has expired. Wake up the kthread 1021 * to notice that fact (or make it false). 1022 */ 1023 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1; 1024 wakeup(&isp->isp_osinfo.thread); 1025 ISP_UNLOCK(isp); 1026 } 1027 1028 static void 1029 isp_make_here(ispsoftc_t *isp, int tgt) 1030 { 1031 isp_prt(isp, ISP_LOGINFO, "target %d has arrived", tgt); 1032 } 1033 1034 static void 1035 isp_make_gone(ispsoftc_t *isp, int tgt) 1036 { 1037 isp_prt(isp, ISP_LOGINFO, "target %d has departed", tgt); 1038 } 1039 1040 static void 1041 isp_fc_worker(void *arg) 1042 { 1043 ispsoftc_t *isp = arg; 1044 int slp = 0; 1045 int chan = 0; 1046 1047 int s = splbio(); 1048 /* 1049 * The first loop is for our usage where we have yet to have 1050 * gotten good fibre channel state. 1051 */ 1052 while (isp->isp_osinfo.thread != NULL) { 1053 int sok, lb, lim; 1054 1055 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "checking FC state"); 1056 sok = isp->isp_osinfo.mbox_sleep_ok; 1057 isp->isp_osinfo.mbox_sleep_ok = 1; 1058 lb = isp_fc_runstate(isp, chan, 250000); 1059 isp->isp_osinfo.mbox_sleep_ok = sok; 1060 if (lb) { 1061 /* 1062 * Increment loop down time by the last sleep interval 1063 */ 1064 isp->isp_osinfo.loop_down_time += slp; 1065 1066 if (lb < 0) { 1067 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1068 "FC loop not up (down count %d)", 1069 isp->isp_osinfo.loop_down_time); 1070 } else { 1071 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1072 "FC got to %d (down count %d)", 1073 lb, isp->isp_osinfo.loop_down_time); 1074 } 1075 1076 1077 /* 1078 * If we've never seen loop up and we've waited longer 1079 * than quickboot time, or we've seen loop up but we've 1080 * waited longer than loop_down_limit, give up and go 1081 * to sleep until loop comes up. 1082 */ 1083 if (FCPARAM(isp, 0)->loop_seen_once == 0) { 1084 lim = isp_quickboot_time; 1085 } else { 1086 lim = isp->isp_osinfo.loop_down_limit; 1087 } 1088 if (isp->isp_osinfo.loop_down_time >= lim) { 1089 /* 1090 * If we're now past our limit, release 1091 * the queues and let them come in and 1092 * either get HBA_SELTIMOUT or cause 1093 * another freeze. 1094 */ 1095 isp->isp_osinfo.blocked = 1; 1096 slp = 0; 1097 } else if (isp->isp_osinfo.loop_down_time < 10) { 1098 slp = 1; 1099 } else if (isp->isp_osinfo.loop_down_time < 30) { 1100 slp = 5; 1101 } else if (isp->isp_osinfo.loop_down_time < 60) { 1102 slp = 10; 1103 } else if (isp->isp_osinfo.loop_down_time < 120) { 1104 slp = 20; 1105 } else { 1106 slp = 30; 1107 } 1108 1109 } else { 1110 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1111 "FC state OK"); 1112 isp->isp_osinfo.loop_down_time = 0; 1113 slp = 0; 1114 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1115 "THAW QUEUES @ LINE %d", __LINE__); 1116 scsipi_channel_thaw(&isp->isp_osinfo.chan[chan], 1); 1117 } 1118 1119 /* 1120 * If we'd frozen the queues, unfreeze them now so that 1121 * we can start getting commands. If the FC state isn't 1122 * okay yet, they'll hit that in isp_start which will 1123 * freeze the queues again. 1124 */ 1125 if (isp->isp_osinfo.blocked) { 1126 isp->isp_osinfo.blocked = 0; 1127 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1128 "THAW QUEUES @ LINE %d", __LINE__); 1129 scsipi_channel_thaw(&isp->isp_osinfo.chan[chan], 1); 1130 } 1131 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "sleep time %d", slp); 1132 tsleep(&isp->isp_osinfo.thread, PRIBIO, "ispf", slp * hz); 1133 1134 /* 1135 * If slp is zero, we're waking up for the first time after 1136 * things have been okay. In this case, we set a deferral state 1137 * for all commands and delay hysteresis seconds before starting 1138 * the FC state evaluation. This gives the loop/fabric a chance 1139 * to settle. 1140 */ 1141 if (slp == 0 && isp_fabric_hysteresis) { 1142 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1143 "sleep hysteresis tick time %d", 1144 isp_fabric_hysteresis * hz); 1145 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT", 1146 (isp_fabric_hysteresis * hz)); 1147 } 1148 } 1149 splx(s); 1150 1151 /* In case parent is waiting for us to exit. */ 1152 wakeup(&isp->isp_osinfo.thread); 1153 kthread_exit(0); 1154 } 1155 1156 /* 1157 * Free any associated resources prior to decommissioning and 1158 * set the card to a known state (so it doesn't wake up and kick 1159 * us when we aren't expecting it to). 1160 * 1161 * Locks are held before coming here. 1162 */ 1163 void 1164 isp_uninit(struct ispsoftc *isp) 1165 { 1166 isp_lock(isp); 1167 /* 1168 * Leave with interrupts disabled. 1169 */ 1170 ISP_DISABLE_INTS(isp); 1171 isp_unlock(isp); 1172 } 1173 1174 void 1175 isp_async(struct ispsoftc *isp, ispasync_t cmd, ...) 1176 { 1177 int bus, tgt; 1178 const char *msg = NULL; 1179 static const char prom[] = 1180 "PortID %#06x handle %#x role %s %s\n" 1181 " WWNN %#08x%08x WWPN %#08x%08x"; 1182 static const char prom2[] = 1183 "PortID %#06x handle %#x role %s %s tgt %u\n" 1184 " WWNN %#08x%08x WWPN %#08x%08x"; 1185 fcportdb_t *lp; 1186 va_list ap; 1187 1188 switch (cmd) { 1189 case ISPASYNC_NEW_TGT_PARAMS: 1190 if (IS_SCSI(isp)) { 1191 sdparam *sdp; 1192 int flags; 1193 struct scsipi_xfer_mode xm; 1194 1195 va_start(ap, cmd); 1196 bus = va_arg(ap, int); 1197 tgt = va_arg(ap, int); 1198 va_end(ap); 1199 sdp = SDPARAM(isp, bus); 1200 flags = sdp->isp_devparam[tgt].actv_flags; 1201 1202 xm.xm_mode = 0; 1203 xm.xm_period = sdp->isp_devparam[tgt].actv_period; 1204 xm.xm_offset = sdp->isp_devparam[tgt].actv_offset; 1205 xm.xm_target = tgt; 1206 1207 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset) 1208 xm.xm_mode |= PERIPH_CAP_SYNC; 1209 if (flags & DPARM_WIDE) 1210 xm.xm_mode |= PERIPH_CAP_WIDE16; 1211 if (flags & DPARM_TQING) 1212 xm.xm_mode |= PERIPH_CAP_TQING; 1213 scsipi_async_event(&isp->isp_osinfo.chan[bus], 1214 ASYNC_EVENT_XFER_MODE, &xm); 1215 break; 1216 } 1217 /* FALLTHROUGH */ 1218 case ISPASYNC_BUS_RESET: 1219 va_start(ap, cmd); 1220 bus = va_arg(ap, int); 1221 va_end(ap); 1222 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus); 1223 scsipi_async_event(&isp->isp_osinfo.chan[bus], 1224 ASYNC_EVENT_RESET, NULL); 1225 break; 1226 case ISPASYNC_LIP: 1227 if (msg == NULL) { 1228 msg = "LIP Received"; 1229 } 1230 /* FALLTHROUGH */ 1231 case ISPASYNC_LOOP_RESET: 1232 if (msg == NULL) { 1233 msg = "LOOP Reset Received"; 1234 } 1235 /* FALLTHROUGH */ 1236 case ISPASYNC_LOOP_DOWN: 1237 if (msg == NULL) { 1238 msg = "Loop DOWN"; 1239 } 1240 va_start(ap, cmd); 1241 bus = va_arg(ap, int); 1242 va_end(ap); 1243 1244 /* 1245 * Don't do queue freezes or blockage until we have the 1246 * thread running and interrupts that can unfreeze/unblock us. 1247 */ 1248 if (isp->isp_osinfo.mbox_sleep_ok && 1249 isp->isp_osinfo.blocked == 0 && 1250 isp->isp_osinfo.thread) { 1251 isp->isp_osinfo.blocked = 1; 1252 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1253 "FREEZE QUEUES @ LINE %d", __LINE__); 1254 scsipi_channel_freeze(&isp->isp_osinfo.chan[bus], 1); 1255 if (callout_pending(&isp->isp_osinfo.ldt) == 0) { 1256 callout_schedule(&isp->isp_osinfo.ldt, 1257 isp->isp_osinfo.loop_down_limit * hz); 1258 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1259 "Starting Loop Down Timer"); 1260 } 1261 } 1262 isp_prt(isp, ISP_LOGINFO, "%s", msg); 1263 break; 1264 case ISPASYNC_LOOP_UP: 1265 /* 1266 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke 1267 * the FC worker thread. When the FC worker thread 1268 * is done, let *it* call scsipi_channel_thaw... 1269 */ 1270 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 1271 break; 1272 case ISPASYNC_DEV_ARRIVED: 1273 va_start(ap, cmd); 1274 bus = va_arg(ap, int); 1275 lp = va_arg(ap, fcportdb_t *); 1276 va_end(ap); 1277 lp->reserved = 0; 1278 if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && 1279 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) { 1280 int dbidx = lp - FCPARAM(isp, bus)->portdb; 1281 int i; 1282 1283 for (i = 0; i < MAX_FC_TARG; i++) { 1284 if (i >= FL_ID && i <= SNS_ID) { 1285 continue; 1286 } 1287 if (FCPARAM(isp, bus)->isp_dev_map[i] == 0) { 1288 break; 1289 } 1290 } 1291 if (i < MAX_FC_TARG) { 1292 FCPARAM(isp, bus)->isp_dev_map[i] = dbidx + 1; 1293 lp->dev_map_idx = i + 1; 1294 } else { 1295 isp_prt(isp, ISP_LOGWARN, "out of target ids"); 1296 isp_dump_portdb(isp, bus); 1297 } 1298 } 1299 if (lp->dev_map_idx) { 1300 tgt = lp->dev_map_idx - 1; 1301 isp_prt(isp, ISP_LOGCONFIG, prom2, 1302 lp->portid, lp->handle, 1303 roles[lp->roles], "arrived at", tgt, 1304 (uint32_t) (lp->node_wwn >> 32), 1305 (uint32_t) lp->node_wwn, 1306 (uint32_t) (lp->port_wwn >> 32), 1307 (uint32_t) lp->port_wwn); 1308 isp_make_here(isp, tgt); 1309 } else { 1310 isp_prt(isp, ISP_LOGCONFIG, prom, 1311 lp->portid, lp->handle, 1312 roles[lp->roles], "arrived", 1313 (uint32_t) (lp->node_wwn >> 32), 1314 (uint32_t) lp->node_wwn, 1315 (uint32_t) (lp->port_wwn >> 32), 1316 (uint32_t) lp->port_wwn); 1317 } 1318 break; 1319 case ISPASYNC_DEV_CHANGED: 1320 va_start(ap, cmd); 1321 bus = va_arg(ap, int); 1322 lp = va_arg(ap, fcportdb_t *); 1323 va_end(ap); 1324 if (isp_change_is_bad) { 1325 lp->state = FC_PORTDB_STATE_NIL; 1326 if (lp->dev_map_idx) { 1327 tgt = lp->dev_map_idx - 1; 1328 FCPARAM(isp, bus)->isp_dev_map[tgt] = 0; 1329 lp->dev_map_idx = 0; 1330 isp_prt(isp, ISP_LOGCONFIG, prom3, 1331 lp->portid, tgt, "change is bad"); 1332 isp_make_gone(isp, tgt); 1333 } else { 1334 isp_prt(isp, ISP_LOGCONFIG, prom, 1335 lp->portid, lp->handle, 1336 roles[lp->roles], 1337 "changed and departed", 1338 (uint32_t) (lp->node_wwn >> 32), 1339 (uint32_t) lp->node_wwn, 1340 (uint32_t) (lp->port_wwn >> 32), 1341 (uint32_t) lp->port_wwn); 1342 } 1343 } else { 1344 lp->portid = lp->new_portid; 1345 lp->roles = lp->new_roles; 1346 if (lp->dev_map_idx) { 1347 int t = lp->dev_map_idx - 1; 1348 FCPARAM(isp, bus)->isp_dev_map[t] = 1349 (lp - FCPARAM(isp, bus)->portdb) + 1; 1350 tgt = lp->dev_map_idx - 1; 1351 isp_prt(isp, ISP_LOGCONFIG, prom2, 1352 lp->portid, lp->handle, 1353 roles[lp->roles], "changed at", tgt, 1354 (uint32_t) (lp->node_wwn >> 32), 1355 (uint32_t) lp->node_wwn, 1356 (uint32_t) (lp->port_wwn >> 32), 1357 (uint32_t) lp->port_wwn); 1358 } else { 1359 isp_prt(isp, ISP_LOGCONFIG, prom, 1360 lp->portid, lp->handle, 1361 roles[lp->roles], "changed", 1362 (uint32_t) (lp->node_wwn >> 32), 1363 (uint32_t) lp->node_wwn, 1364 (uint32_t) (lp->port_wwn >> 32), 1365 (uint32_t) lp->port_wwn); 1366 } 1367 } 1368 break; 1369 case ISPASYNC_DEV_STAYED: 1370 va_start(ap, cmd); 1371 bus = va_arg(ap, int); 1372 lp = va_arg(ap, fcportdb_t *); 1373 va_end(ap); 1374 if (lp->dev_map_idx) { 1375 tgt = lp->dev_map_idx - 1; 1376 isp_prt(isp, ISP_LOGCONFIG, prom2, 1377 lp->portid, lp->handle, 1378 roles[lp->roles], "stayed at", tgt, 1379 (uint32_t) (lp->node_wwn >> 32), 1380 (uint32_t) lp->node_wwn, 1381 (uint32_t) (lp->port_wwn >> 32), 1382 (uint32_t) lp->port_wwn); 1383 } else { 1384 isp_prt(isp, ISP_LOGCONFIG, prom, 1385 lp->portid, lp->handle, 1386 roles[lp->roles], "stayed", 1387 (uint32_t) (lp->node_wwn >> 32), 1388 (uint32_t) lp->node_wwn, 1389 (uint32_t) (lp->port_wwn >> 32), 1390 (uint32_t) lp->port_wwn); 1391 } 1392 break; 1393 case ISPASYNC_DEV_GONE: 1394 va_start(ap, cmd); 1395 bus = va_arg(ap, int); 1396 lp = va_arg(ap, fcportdb_t *); 1397 va_end(ap); 1398 /* 1399 * If this has a virtual target and we haven't marked it 1400 * that we're going to have isp_gdt tell the OS it's gone, 1401 * set the isp_gdt timer running on it. 1402 * 1403 * If it isn't marked that isp_gdt is going to get rid of it, 1404 * announce that it's gone. 1405 */ 1406 if (lp->dev_map_idx && lp->reserved == 0) { 1407 lp->reserved = 1; 1408 lp->new_reserved = isp->isp_osinfo.gone_device_time; 1409 lp->state = FC_PORTDB_STATE_ZOMBIE; 1410 if (isp->isp_osinfo.gdt_running == 0) { 1411 isp->isp_osinfo.gdt_running = 1; 1412 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1413 "starting Gone Device Timer"); 1414 callout_schedule(&isp->isp_osinfo.gdt, hz); 1415 } 1416 tgt = lp->dev_map_idx - 1; 1417 isp_prt(isp, ISP_LOGCONFIG, prom2, 1418 lp->portid, lp->handle, 1419 roles[lp->roles], "gone zombie at", tgt, 1420 (uint32_t) (lp->node_wwn >> 32), 1421 (uint32_t) lp->node_wwn, 1422 (uint32_t) (lp->port_wwn >> 32), 1423 (uint32_t) lp->port_wwn); 1424 } else if (lp->reserved == 0) { 1425 isp_prt(isp, ISP_LOGCONFIG, prom, 1426 lp->portid, lp->handle, 1427 roles[lp->roles], "departed", 1428 (uint32_t) (lp->node_wwn >> 32), 1429 (uint32_t) lp->node_wwn, 1430 (uint32_t) (lp->port_wwn >> 32), 1431 (uint32_t) lp->port_wwn); 1432 } 1433 break; 1434 case ISPASYNC_CHANGE_NOTIFY: 1435 { 1436 int opt; 1437 1438 va_start(ap, cmd); 1439 bus = va_arg(ap, int); 1440 opt = va_arg(ap, int); 1441 va_end(ap); 1442 1443 if (opt == ISPASYNC_CHANGE_PDB) { 1444 msg = "Port Database Changed"; 1445 } else if (opt == ISPASYNC_CHANGE_SNS) { 1446 msg = "Name Server Database Changed"; 1447 } else { 1448 msg = "Other Change Notify"; 1449 } 1450 /* 1451 * If the loop down timer is running, cancel it. 1452 */ 1453 if (callout_pending(&isp->isp_osinfo.ldt)) { 1454 callout_stop(&isp->isp_osinfo.ldt); 1455 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1456 "Stopping Loop Down Timer"); 1457 } 1458 isp_prt(isp, ISP_LOGINFO, "%s", msg); 1459 /* 1460 * We can set blocked here because we know it's now okay 1461 * to try and run isp_fc_runstate (in order to build loop 1462 * state). But we don't try and freeze the midlayer's queue 1463 * if we have no thread that we can wake to later unfreeze 1464 * it. 1465 */ 1466 if (isp->isp_osinfo.blocked == 0) { 1467 isp->isp_osinfo.blocked = 1; 1468 if (isp->isp_osinfo.thread) { 1469 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1470 "FREEZE QUEUES @ LINE %d", __LINE__); 1471 scsipi_channel_freeze(&isp->isp_osinfo.chan[bus], 1); 1472 } 1473 } 1474 /* 1475 * Note that we have work for the thread to do, and 1476 * if the thread is here already, wake it up. 1477 */ 1478 if (isp->isp_osinfo.thread) { 1479 wakeup(&isp->isp_osinfo.thread); 1480 } else { 1481 isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet"); 1482 } 1483 break; 1484 } 1485 case ISPASYNC_FW_CRASH: 1486 { 1487 uint16_t mbox1; 1488 mbox1 = ISP_READ(isp, OUTMAILBOX1); 1489 if (IS_DUALBUS(isp)) { 1490 bus = ISP_READ(isp, OUTMAILBOX6); 1491 } else { 1492 bus = 0; 1493 } 1494 isp_prt(isp, ISP_LOGERR, 1495 "Internal Firmware Error on bus %d @ RISC Address %#x", 1496 bus, mbox1); 1497 if (IS_FC(isp)) { 1498 if (isp->isp_osinfo.blocked == 0) { 1499 isp->isp_osinfo.blocked = 1; 1500 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1501 "FREEZE QUEUES @ LINE %d", __LINE__); 1502 scsipi_channel_freeze(&isp->isp_osinfo.chan[bus], 1); 1503 } 1504 } 1505 mbox1 = isp->isp_osinfo.mbox_sleep_ok; 1506 isp->isp_osinfo.mbox_sleep_ok = 0; 1507 isp_reinit(isp, 0); 1508 isp->isp_osinfo.mbox_sleep_ok = mbox1; 1509 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 1510 break; 1511 } 1512 default: 1513 break; 1514 } 1515 } 1516 1517 void 1518 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 1519 { 1520 va_list ap; 1521 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 1522 return; 1523 } 1524 printf("%s: ", device_xname(isp->isp_osinfo.dev)); 1525 va_start(ap, fmt); 1526 vprintf(fmt, ap); 1527 va_end(ap); 1528 printf("\n"); 1529 } 1530 1531 void 1532 isp_xs_prt(struct ispsoftc *isp, XS_T *xs, int level, const char *fmt, ...) 1533 { 1534 va_list ap; 1535 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 1536 return; 1537 } 1538 scsipi_printaddr(xs->xs_periph); 1539 va_start(ap, fmt); 1540 vprintf(fmt, ap); 1541 va_end(ap); 1542 printf("\n"); 1543 } 1544 1545 void 1546 isp_lock(struct ispsoftc *isp) 1547 { 1548 int s = splbio(); 1549 if (isp->isp_osinfo.islocked++ == 0) { 1550 isp->isp_osinfo.splsaved = s; 1551 } else { 1552 splx(s); 1553 } 1554 } 1555 1556 void 1557 isp_unlock(struct ispsoftc *isp) 1558 { 1559 if (isp->isp_osinfo.islocked-- <= 1) { 1560 isp->isp_osinfo.islocked = 0; 1561 splx(isp->isp_osinfo.splsaved); 1562 } 1563 } 1564 1565 uint64_t 1566 isp_microtime_sub(struct timeval *b, struct timeval *a) 1567 { 1568 struct timeval x; 1569 uint64_t elapsed; 1570 timersub(b, a, &x); 1571 elapsed = GET_NANOSEC(&x); 1572 if (elapsed == 0) 1573 elapsed++; 1574 return (elapsed); 1575 } 1576 1577 int 1578 isp_mbox_acquire(ispsoftc_t *isp) 1579 { 1580 if (isp->isp_osinfo.mboxbsy) { 1581 return (1); 1582 } else { 1583 isp->isp_osinfo.mboxcmd_done = 0; 1584 isp->isp_osinfo.mboxbsy = 1; 1585 return (0); 1586 } 1587 } 1588 1589 void 1590 isp_mbox_wait_complete(struct ispsoftc *isp, mbreg_t *mbp) 1591 { 1592 unsigned int usecs = mbp->timeout; 1593 unsigned int maxc, olim, ilim; 1594 struct timeval start; 1595 1596 if (usecs == 0) { 1597 usecs = MBCMD_DEFAULT_TIMEOUT; 1598 } 1599 maxc = isp->isp_mbxwrk0 + 1; 1600 1601 microtime(&start); 1602 if (isp->isp_osinfo.mbox_sleep_ok) { 1603 int to; 1604 struct timeval tv, utv; 1605 1606 tv.tv_sec = 0; 1607 tv.tv_usec = 0; 1608 for (olim = 0; olim < maxc; olim++) { 1609 utv.tv_sec = usecs / 1000000; 1610 utv.tv_usec = usecs % 1000000; 1611 timeradd(&tv, &utv, &tv); 1612 } 1613 to = tvtohz(&tv); 1614 if (to == 0) 1615 to = 1; 1616 timeradd(&tv, &start, &tv); 1617 1618 isp->isp_osinfo.mbox_sleep_ok = 0; 1619 isp->isp_osinfo.mbox_sleeping = 1; 1620 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", to); 1621 isp->isp_osinfo.mbox_sleeping = 0; 1622 isp->isp_osinfo.mbox_sleep_ok = 1; 1623 } else { 1624 for (olim = 0; olim < maxc; olim++) { 1625 for (ilim = 0; ilim < usecs; ilim += 100) { 1626 uint32_t isr; 1627 uint16_t sema, mbox; 1628 if (isp->isp_osinfo.mboxcmd_done) { 1629 break; 1630 } 1631 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1632 isp_intr(isp, isr, sema, mbox); 1633 if (isp->isp_osinfo.mboxcmd_done) { 1634 break; 1635 } 1636 } 1637 ISP_DELAY(100); 1638 } 1639 if (isp->isp_osinfo.mboxcmd_done) { 1640 break; 1641 } 1642 } 1643 } 1644 if (isp->isp_osinfo.mboxcmd_done == 0) { 1645 struct timeval finish, elapsed; 1646 1647 microtime(&finish); 1648 timersub(&finish, &start, &elapsed); 1649 isp_prt(isp, ISP_LOGWARN, 1650 "%s Mailbox Command (%#x) Timeout (%juus actual)", 1651 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", 1652 isp->isp_lastmbxcmd, (intmax_t)(elapsed.tv_sec * 1000000) + 1653 elapsed.tv_usec); 1654 mbp->param[0] = MBOX_TIMEOUT; 1655 isp->isp_osinfo.mboxcmd_done = 1; 1656 } 1657 } 1658 1659 void 1660 isp_mbox_notify_done(ispsoftc_t *isp) 1661 { 1662 if (isp->isp_osinfo.mbox_sleeping) { 1663 wakeup(&isp->isp_mbxworkp); 1664 } 1665 isp->isp_osinfo.mboxcmd_done = 1; 1666 } 1667 1668 void 1669 isp_mbox_release(ispsoftc_t *isp) 1670 { 1671 isp->isp_osinfo.mboxbsy = 0; 1672 } 1673