1 /* $NetBSD: isp_netbsd.c,v 1.79 2009/05/12 14:25:17 cegger Exp $ */ 2 /* 3 * Platform (NetBSD) dependent common attachment code for Qlogic adapters. 4 */ 5 /* 6 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration 7 * All rights reserved. 8 * 9 * Additional Copyright (C) 2000-2007 by Matthew Jacob 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.79 2009/05/12 14:25:17 cegger Exp $"); 37 38 #include <dev/ic/isp_netbsd.h> 39 #include <dev/ic/isp_ioctl.h> 40 #include <sys/scsiio.h> 41 42 #include <sys/timevar.h> 43 44 /* 45 * Set a timeout for the watchdogging of a command. 46 * 47 * The dimensional analysis is 48 * 49 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks 50 * 51 * = 52 * 53 * (milliseconds / 1000) * hz = ticks 54 * 55 * 56 * For timeouts less than 1 second, we'll get zero. Because of this, and 57 * because we want to establish *our* timeout to be longer than what the 58 * firmware might do, we just add 3 seconds at the back end. 59 */ 60 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz)) 61 62 static void isp_config_interrupts(device_t); 63 static void ispminphys_1020(struct buf *); 64 static void ispminphys(struct buf *); 65 static void ispcmd(struct ispsoftc *, XS_T *); 66 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *); 67 static int 68 ispioctl(struct scsipi_channel *, u_long, void *, int, struct proc *); 69 70 static void isp_polled_cmd_wait(struct ispsoftc *, XS_T *); 71 static void isp_dog(void *); 72 static void isp_gdt(void *); 73 static void isp_ldt(void *); 74 static void isp_make_here(ispsoftc_t *, int); 75 static void isp_make_gone(ispsoftc_t *, int); 76 static void isp_fc_worker(void *); 77 78 static const char *roles[4] = { 79 "(none)", "Target", "Initiator", "Target/Initiator" 80 }; 81 static const char prom3[] = 82 "PortID 0x%06x Departed from Target %u because of %s"; 83 int isp_change_is_bad = 0; /* "changed" devices are bad */ 84 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */ 85 static int isp_fabric_hysteresis = 5; 86 #define isp_change_is_bad 0 87 88 /* 89 * Complete attachment of hardware, include subdevices. 90 */ 91 92 void 93 isp_attach(struct ispsoftc *isp) 94 { 95 int i; 96 isp->isp_state = ISP_RUNSTATE; 97 98 isp->isp_osinfo.adapter.adapt_dev = &isp->isp_osinfo.dev; 99 isp->isp_osinfo.adapter.adapt_openings = isp->isp_maxcmds; 100 isp->isp_osinfo.loop_down_limit = 300; 101 102 /* 103 * It's not stated whether max_periph is limited by SPI 104 * tag uage, but let's assume that it is. 105 */ 106 isp->isp_osinfo.adapter.adapt_max_periph = min(isp->isp_maxcmds, 255); 107 isp->isp_osinfo.adapter.adapt_ioctl = ispioctl; 108 isp->isp_osinfo.adapter.adapt_request = isprequest; 109 if (isp->isp_type <= ISP_HA_SCSI_1020A) { 110 isp->isp_osinfo.adapter.adapt_minphys = ispminphys_1020; 111 } else { 112 isp->isp_osinfo.adapter.adapt_minphys = ispminphys; 113 } 114 115 callout_init(&isp->isp_osinfo.gdt, 0); 116 callout_setfunc(&isp->isp_osinfo.gdt, isp_gdt, isp); 117 callout_init(&isp->isp_osinfo.ldt, 0); 118 callout_setfunc(&isp->isp_osinfo.ldt, isp_ldt, isp); 119 if (IS_FC(isp)) { 120 if (kthread_create(PRI_NONE, 0, NULL, isp_fc_worker, isp, 121 &isp->isp_osinfo.thread, "%s:fc_thrd", device_xname(&isp->isp_osinfo.dev))) { 122 isp_prt(isp, ISP_LOGERR, 123 "unable to create FC worker thread"); 124 return; 125 } 126 } 127 128 for (i = 0; i != isp->isp_osinfo.adapter.adapt_nchannels; i++) { 129 isp->isp_osinfo.chan[i].chan_adapter = 130 &isp->isp_osinfo.adapter; 131 isp->isp_osinfo.chan[i].chan_bustype = &scsi_bustype; 132 isp->isp_osinfo.chan[i].chan_channel = i; 133 /* 134 * Until the midlayer is fixed to use REPORT LUNS, 135 * limit to 8 luns. 136 */ 137 isp->isp_osinfo.chan[i].chan_nluns = min(isp->isp_maxluns, 8); 138 if (IS_FC(isp)) { 139 isp->isp_osinfo.chan[i].chan_ntargets = MAX_FC_TARG; 140 if (ISP_CAP_2KLOGIN(isp) == 0 && MAX_FC_TARG > 256) { 141 isp->isp_osinfo.chan[i].chan_ntargets = 256; 142 } 143 isp->isp_osinfo.chan[i].chan_id = MAX_FC_TARG; 144 } else { 145 isp->isp_osinfo.chan[i].chan_ntargets = MAX_TARGETS; 146 isp->isp_osinfo.chan[i].chan_id = 147 SDPARAM(isp, i)->isp_initiator_id; 148 ISP_LOCK(isp); 149 (void) isp_control(isp, ISPCTL_RESET_BUS, i); 150 ISP_UNLOCK(isp); 151 } 152 } 153 154 /* 155 * Defer enabling mailbox interrupts until later. 156 */ 157 config_interrupts((device_t) isp, isp_config_interrupts); 158 } 159 160 static void 161 isp_config_interrupts(device_t self) 162 { 163 int i; 164 struct ispsoftc *isp = (struct ispsoftc *) self; 165 166 isp->isp_osinfo.mbox_sleep_ok = 1; 167 168 if (IS_FC(isp) && (FCPARAM(isp, 0)->isp_fwstate != FW_READY || 169 FCPARAM(isp, 0)->isp_loopstate != LOOP_READY)) { 170 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 171 "Starting Initial Loop Down Timer"); 172 callout_schedule(&isp->isp_osinfo.ldt, isp_quickboot_time * hz); 173 } 174 175 /* 176 * And attach children (if any). 177 */ 178 for (i = 0; i < isp->isp_osinfo.adapter.adapt_nchannels; i++) { 179 config_found((void *)isp, &isp->isp_osinfo.chan[i], scsiprint); 180 } 181 } 182 183 /* 184 * minphys our xfers 185 */ 186 static void 187 ispminphys_1020(struct buf *bp) 188 { 189 if (bp->b_bcount >= (1 << 24)) { 190 bp->b_bcount = (1 << 24); 191 } 192 minphys(bp); 193 } 194 195 static void 196 ispminphys(struct buf *bp) 197 { 198 if (bp->b_bcount >= (1 << 30)) { 199 bp->b_bcount = (1 << 30); 200 } 201 minphys(bp); 202 } 203 204 static int 205 ispioctl(struct scsipi_channel *chan, u_long cmd, void *addr, int flag, 206 struct proc *p) 207 { 208 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev; 209 int nr, bus, retval = ENOTTY; 210 211 switch (cmd) { 212 case ISP_SDBLEV: 213 { 214 int olddblev = isp->isp_dblev; 215 isp->isp_dblev = *(int *)addr; 216 *(int *)addr = olddblev; 217 retval = 0; 218 break; 219 } 220 case ISP_GETROLE: 221 bus = *(int *)addr; 222 if (bus < 0 || bus >= isp->isp_nchan) { 223 retval = -ENXIO; 224 break; 225 } 226 if (IS_FC(isp)) { 227 *(int *)addr = FCPARAM(isp, bus)->role; 228 } else { 229 *(int *)addr = SDPARAM(isp, bus)->role; 230 } 231 retval = 0; 232 break; 233 case ISP_SETROLE: 234 235 nr = *(int *)addr; 236 bus = nr >> 8; 237 if (bus < 0 || bus >= isp->isp_nchan) { 238 retval = -ENXIO; 239 break; 240 } 241 nr &= 0xff; 242 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 243 retval = EINVAL; 244 break; 245 } 246 if (IS_FC(isp)) { 247 *(int *)addr = FCPARAM(isp, bus)->role; 248 FCPARAM(isp, bus)->role = nr; 249 } else { 250 *(int *)addr = SDPARAM(isp, bus)->role; 251 SDPARAM(isp, bus)->role = nr; 252 } 253 retval = 0; 254 break; 255 256 case ISP_RESETHBA: 257 ISP_LOCK(isp); 258 isp_reinit(isp); 259 ISP_UNLOCK(isp); 260 retval = 0; 261 break; 262 263 case ISP_RESCAN: 264 if (IS_FC(isp)) { 265 bus = *(int *)addr; 266 if (bus < 0 || bus >= isp->isp_nchan) { 267 retval = -ENXIO; 268 break; 269 } 270 ISP_LOCK(isp); 271 if (isp_fc_runstate(isp, bus, 5 * 1000000)) { 272 retval = EIO; 273 } else { 274 retval = 0; 275 } 276 ISP_UNLOCK(isp); 277 } 278 break; 279 280 case ISP_FC_LIP: 281 if (IS_FC(isp)) { 282 bus = *(int *)addr; 283 if (bus < 0 || bus >= isp->isp_nchan) { 284 retval = -ENXIO; 285 break; 286 } 287 ISP_LOCK(isp); 288 if (isp_control(isp, ISPCTL_SEND_LIP, bus)) { 289 retval = EIO; 290 } else { 291 retval = 0; 292 } 293 ISP_UNLOCK(isp); 294 } 295 break; 296 case ISP_FC_GETDINFO: 297 { 298 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 299 fcportdb_t *lp; 300 301 if (IS_SCSI(isp)) { 302 break; 303 } 304 if (ifc->loopid >= MAX_FC_TARG) { 305 retval = EINVAL; 306 break; 307 } 308 lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid]; 309 if (lp->state == FC_PORTDB_STATE_VALID) { 310 ifc->role = lp->roles; 311 ifc->loopid = lp->handle; 312 ifc->portid = lp->portid; 313 ifc->node_wwn = lp->node_wwn; 314 ifc->port_wwn = lp->port_wwn; 315 retval = 0; 316 } else { 317 retval = ENODEV; 318 } 319 break; 320 } 321 case ISP_GET_STATS: 322 { 323 isp_stats_t *sp = (isp_stats_t *) addr; 324 325 MEMZERO(sp, sizeof (*sp)); 326 sp->isp_stat_version = ISP_STATS_VERSION; 327 sp->isp_type = isp->isp_type; 328 sp->isp_revision = isp->isp_revision; 329 ISP_LOCK(isp); 330 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 331 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 332 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 333 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 334 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 335 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 336 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 337 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 338 ISP_UNLOCK(isp); 339 retval = 0; 340 break; 341 } 342 case ISP_CLR_STATS: 343 ISP_LOCK(isp); 344 isp->isp_intcnt = 0; 345 isp->isp_intbogus = 0; 346 isp->isp_intmboxc = 0; 347 isp->isp_intoasync = 0; 348 isp->isp_rsltccmplt = 0; 349 isp->isp_fphccmplt = 0; 350 isp->isp_rscchiwater = 0; 351 isp->isp_fpcchiwater = 0; 352 ISP_UNLOCK(isp); 353 retval = 0; 354 break; 355 case ISP_FC_GETHINFO: 356 { 357 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 358 bus = hba->fc_channel; 359 360 if (bus < 0 || bus >= isp->isp_nchan) { 361 retval = ENXIO; 362 break; 363 } 364 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 365 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 366 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 367 hba->fc_nchannels = isp->isp_nchan; 368 hba->fc_nports = isp->isp_nchan;/* XXXX 24XX STUFF? XXX */ 369 if (IS_FC(isp)) { 370 hba->fc_speed = FCPARAM(isp, bus)->isp_gbspeed; 371 hba->fc_topology = FCPARAM(isp, bus)->isp_topo + 1; 372 hba->fc_loopid = FCPARAM(isp, bus)->isp_loopid; 373 hba->nvram_node_wwn = FCPARAM(isp, bus)->isp_wwnn_nvram; 374 hba->nvram_port_wwn = FCPARAM(isp, bus)->isp_wwpn_nvram; 375 hba->active_node_wwn = FCPARAM(isp, bus)->isp_wwnn; 376 hba->active_port_wwn = FCPARAM(isp, bus)->isp_wwpn; 377 } else { 378 hba->fc_speed = 0; 379 hba->fc_topology = 0; 380 hba->nvram_node_wwn = 0ull; 381 hba->nvram_port_wwn = 0ull; 382 hba->active_node_wwn = 0ull; 383 hba->active_port_wwn = 0ull; 384 } 385 retval = 0; 386 break; 387 } 388 case ISP_TSK_MGMT: 389 { 390 int needmarker; 391 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 392 uint16_t loopid; 393 mbreg_t mbs; 394 395 if (IS_SCSI(isp)) { 396 break; 397 } 398 399 bus = fct->chan; 400 if (bus < 0 || bus >= isp->isp_nchan) { 401 retval = -ENXIO; 402 break; 403 } 404 405 memset(&mbs, 0, sizeof (mbs)); 406 needmarker = retval = 0; 407 loopid = fct->loopid; 408 if (ISP_CAP_2KLOGIN(isp) == 0) { 409 loopid <<= 8; 410 } 411 switch (fct->action) { 412 case IPT_CLEAR_ACA: 413 mbs.param[0] = MBOX_CLEAR_ACA; 414 mbs.param[1] = loopid; 415 mbs.param[2] = fct->lun; 416 break; 417 case IPT_TARGET_RESET: 418 mbs.param[0] = MBOX_TARGET_RESET; 419 mbs.param[1] = loopid; 420 needmarker = 1; 421 break; 422 case IPT_LUN_RESET: 423 mbs.param[0] = MBOX_LUN_RESET; 424 mbs.param[1] = loopid; 425 mbs.param[2] = fct->lun; 426 needmarker = 1; 427 break; 428 case IPT_CLEAR_TASK_SET: 429 mbs.param[0] = MBOX_CLEAR_TASK_SET; 430 mbs.param[1] = loopid; 431 mbs.param[2] = fct->lun; 432 needmarker = 1; 433 break; 434 case IPT_ABORT_TASK_SET: 435 mbs.param[0] = MBOX_ABORT_TASK_SET; 436 mbs.param[1] = loopid; 437 mbs.param[2] = fct->lun; 438 needmarker = 1; 439 break; 440 default: 441 retval = EINVAL; 442 break; 443 } 444 if (retval == 0) { 445 if (needmarker) { 446 FCPARAM(isp, bus)->sendmarker = 1; 447 } 448 ISP_LOCK(isp); 449 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 450 ISP_UNLOCK(isp); 451 if (retval) { 452 retval = EIO; 453 } 454 } 455 break; 456 } 457 case ISP_FC_GETDLIST: 458 { 459 isp_dlist_t local, *ua; 460 uint16_t nph, nphe, count, channel, lim; 461 struct wwnpair pair, *uptr; 462 463 if (IS_SCSI(isp)) { 464 retval = EINVAL; 465 break; 466 } 467 468 ua = *(isp_dlist_t **)addr; 469 if (copyin(ua, &local, sizeof (isp_dlist_t))) { 470 retval = EFAULT; 471 break; 472 } 473 lim = local.count; 474 channel = local.channel; 475 476 ua = *(isp_dlist_t **)addr; 477 uptr = &ua->wwns[0]; 478 479 if (ISP_CAP_2KLOGIN(isp)) { 480 nphe = NPH_MAX_2K; 481 } else { 482 nphe = NPH_MAX; 483 } 484 for (count = 0, nph = 0; count < lim && nph != nphe; nph++) { 485 ISP_LOCK(isp); 486 retval = isp_control(isp, ISPCTL_GET_NAMES, channel, 487 nph, &pair.wwnn, &pair.wwpn); 488 ISP_UNLOCK(isp); 489 if (retval || (pair.wwpn == INI_NONE && 490 pair.wwnn == INI_NONE)) { 491 retval = 0; 492 continue; 493 } 494 if (copyout(&pair, (void *)uptr++, sizeof (pair))) { 495 retval = EFAULT; 496 break; 497 } 498 count++; 499 } 500 if (retval == 0) { 501 if (copyout(&count, (void *)&ua->count, 502 sizeof (count))) { 503 retval = EFAULT; 504 } 505 } 506 break; 507 } 508 case SCBUSIORESET: 509 ISP_LOCK(isp); 510 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel)) { 511 retval = EIO; 512 } else { 513 retval = 0; 514 } 515 ISP_UNLOCK(isp); 516 break; 517 default: 518 break; 519 } 520 return (retval); 521 } 522 523 static void 524 ispcmd(struct ispsoftc *isp, XS_T *xs) 525 { 526 volatile uint8_t ombi; 527 int lim, chan; 528 529 ISP_LOCK(isp); 530 if (isp->isp_state < ISP_RUNSTATE) { 531 ISP_DISABLE_INTS(isp); 532 isp_init(isp); 533 if (isp->isp_state != ISP_INITSTATE) { 534 ISP_ENABLE_INTS(isp); 535 ISP_UNLOCK(isp); 536 isp_prt(isp, ISP_LOGERR, "isp not at init state"); 537 XS_SETERR(xs, HBA_BOTCH); 538 scsipi_done(xs); 539 return; 540 } 541 isp->isp_state = ISP_RUNSTATE; 542 ISP_ENABLE_INTS(isp); 543 } 544 chan = XS_CHANNEL(xs); 545 546 /* 547 * Handle the case of a FC card where the FC thread hasn't 548 * fired up yet and we don't yet have a known loop state. 549 */ 550 if (IS_FC(isp) && (FCPARAM(isp, chan)->isp_fwstate != FW_READY || 551 FCPARAM(isp, chan)->isp_loopstate != LOOP_READY) && 552 isp->isp_osinfo.thread == NULL) { 553 ombi = isp->isp_osinfo.mbox_sleep_ok != 0; 554 int delay_time; 555 556 if (xs->xs_control & XS_CTL_POLL) { 557 isp->isp_osinfo.mbox_sleep_ok = 0; 558 } 559 560 if (isp->isp_osinfo.loop_checked == 0) { 561 delay_time = 10 * 1000000; 562 isp->isp_osinfo.loop_checked = 1; 563 } else { 564 delay_time = 250000; 565 } 566 567 if (isp_fc_runstate(isp, XS_CHANNEL(xs), delay_time) != 0) { 568 if (xs->xs_control & XS_CTL_POLL) { 569 isp->isp_osinfo.mbox_sleep_ok = ombi; 570 } 571 if (FCPARAM(isp, XS_CHANNEL(xs))->loop_seen_once == 0) { 572 XS_SETERR(xs, HBA_SELTIMEOUT); 573 scsipi_done(xs); 574 ISP_UNLOCK(isp); 575 return; 576 } 577 /* 578 * Otherwise, fall thru to be queued up for later. 579 */ 580 } else { 581 int wasblocked = 582 (isp->isp_osinfo.blocked || isp->isp_osinfo.paused); 583 isp->isp_osinfo.blocked = isp->isp_osinfo.paused = 0; 584 if (wasblocked) { 585 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 586 "THAW QUEUES @ LINE %d", __LINE__); 587 scsipi_channel_thaw(&isp->isp_osinfo.chan[chan], 588 1); 589 } 590 } 591 if (xs->xs_control & XS_CTL_POLL) { 592 isp->isp_osinfo.mbox_sleep_ok = ombi; 593 } 594 } 595 596 if (isp->isp_osinfo.paused) { 597 isp_prt(isp, ISP_LOGWARN, "I/O while paused"); 598 xs->error = XS_RESOURCE_SHORTAGE; 599 scsipi_done(xs); 600 ISP_UNLOCK(isp); 601 return; 602 } 603 if (isp->isp_osinfo.blocked) { 604 isp_prt(isp, ISP_LOGWARN, 605 "I/O while blocked with retries %d", xs, xs->xs_retries); 606 if (xs->xs_retries) { 607 xs->error = XS_REQUEUE; 608 xs->xs_retries--; 609 } else { 610 XS_SETERR(xs, HBA_SELTIMEOUT); 611 } 612 scsipi_done(xs); 613 ISP_UNLOCK(isp); 614 return; 615 } 616 617 if (xs->xs_control & XS_CTL_POLL) { 618 ombi = isp->isp_osinfo.mbox_sleep_ok; 619 isp->isp_osinfo.mbox_sleep_ok = 0; 620 } 621 622 switch (isp_start(xs)) { 623 case CMD_QUEUED: 624 if (xs->xs_control & XS_CTL_POLL) { 625 isp_polled_cmd_wait(isp, xs); 626 isp->isp_osinfo.mbox_sleep_ok = ombi; 627 } else if (xs->timeout) { 628 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs); 629 } 630 break; 631 case CMD_EAGAIN: 632 isp->isp_osinfo.paused = 1; 633 xs->error = XS_RESOURCE_SHORTAGE; 634 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 635 "FREEZE QUEUES @ LINE %d", __LINE__); 636 for (chan = 0; chan < isp->isp_nchan; chan++) { 637 scsipi_channel_freeze(&isp->isp_osinfo.chan[chan], 1); 638 } 639 scsipi_done(xs); 640 break; 641 case CMD_RQLATER: 642 /* 643 * We can only get RQLATER from FC devices (1 channel only) 644 * 645 * If we've never seen loop up see if if we've been down 646 * quickboot time, otherwise wait loop down limit time. 647 * If so, then we start giving up on commands. 648 */ 649 if (FCPARAM(isp, XS_CHANNEL(xs))->loop_seen_once == 0) { 650 lim = isp_quickboot_time; 651 } else { 652 lim = isp->isp_osinfo.loop_down_limit; 653 } 654 if (isp->isp_osinfo.loop_down_time >= lim) { 655 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 656 "RQLATER->SELTIMEOUT for %d (%d >= %d)", XS_TGT(xs), 657 isp->isp_osinfo.loop_down_time, lim); 658 XS_SETERR(xs, HBA_SELTIMEOUT); 659 scsipi_done(xs); 660 break; 661 } 662 if (isp->isp_osinfo.blocked == 0) { 663 isp->isp_osinfo.blocked = 1; 664 scsipi_channel_freeze(&isp->isp_osinfo.chan[chan], 1); 665 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 666 "FREEZE QUEUES @ LINE %d", __LINE__); 667 } else { 668 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 669 "RQLATER WITH FROZEN QUEUES @ LINE %d", __LINE__); 670 } 671 xs->error = XS_REQUEUE; 672 scsipi_done(xs); 673 break; 674 case CMD_COMPLETE: 675 scsipi_done(xs); 676 break; 677 } 678 ISP_UNLOCK(isp); 679 } 680 681 static void 682 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg) 683 { 684 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev; 685 686 switch (req) { 687 case ADAPTER_REQ_RUN_XFER: 688 ispcmd(isp, (XS_T *) arg); 689 break; 690 691 case ADAPTER_REQ_GROW_RESOURCES: 692 /* Not supported. */ 693 break; 694 695 case ADAPTER_REQ_SET_XFER_MODE: 696 if (IS_SCSI(isp)) { 697 struct scsipi_xfer_mode *xm = arg; 698 int dflags = 0; 699 sdparam *sdp = SDPARAM(isp, chan->chan_channel); 700 701 if (xm->xm_mode & PERIPH_CAP_TQING) 702 dflags |= DPARM_TQING; 703 if (xm->xm_mode & PERIPH_CAP_WIDE16) 704 dflags |= DPARM_WIDE; 705 if (xm->xm_mode & PERIPH_CAP_SYNC) 706 dflags |= DPARM_SYNC; 707 ISP_LOCK(isp); 708 sdp->isp_devparam[xm->xm_target].goal_flags |= dflags; 709 dflags = sdp->isp_devparam[xm->xm_target].goal_flags; 710 sdp->isp_devparam[xm->xm_target].dev_update = 1; 711 sdp->update = 1; 712 ISP_UNLOCK(isp); 713 isp_prt(isp, ISP_LOGDEBUG1, 714 "isprequest: device flags 0x%x for %d.%d.X", 715 dflags, chan->chan_channel, xm->xm_target); 716 break; 717 } 718 default: 719 break; 720 } 721 } 722 723 static void 724 isp_polled_cmd_wait(struct ispsoftc *isp, XS_T *xs) 725 { 726 int infinite = 0, mswait; 727 728 /* 729 * If we can't use interrupts, poll on completion. 730 */ 731 if ((mswait = XS_TIME(xs)) == 0) { 732 infinite = 1; 733 } 734 735 while (mswait || infinite) { 736 uint32_t isr; 737 uint16_t sema, mbox; 738 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 739 isp_intr(isp, isr, sema, mbox); 740 if (XS_CMD_DONE_P(xs)) { 741 break; 742 } 743 } 744 USEC_DELAY(1000); 745 mswait -= 1; 746 } 747 748 /* 749 * If no other error occurred but we didn't finish 750 * something bad happened, so abort the command. 751 */ 752 if (XS_CMD_DONE_P(xs) == 0) { 753 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) { 754 isp_reinit(isp); 755 } 756 if (XS_NOERR(xs)) { 757 isp_prt(isp, ISP_LOGERR, "polled command timed out"); 758 XS_SETERR(xs, HBA_BOTCH); 759 } 760 } 761 scsipi_done(xs); 762 } 763 764 void 765 isp_done(XS_T *xs) 766 { 767 if (XS_CMD_WDOG_P(xs) == 0) { 768 struct ispsoftc *isp = XS_ISP(xs); 769 callout_stop(&xs->xs_callout); 770 if (XS_CMD_GRACE_P(xs)) { 771 isp_prt(isp, ISP_LOGDEBUG1, 772 "finished command on borrowed time"); 773 } 774 XS_CMD_S_CLEAR(xs); 775 /* 776 * Fixup- if we get a QFULL, we need 777 * to set XS_BUSY as the error. 778 */ 779 if (xs->status == SCSI_QUEUE_FULL) { 780 xs->error = XS_BUSY; 781 } 782 if (isp->isp_osinfo.paused) { 783 int i; 784 isp->isp_osinfo.paused = 0; 785 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 786 "THAW QUEUES @ LINE %d", __LINE__); 787 for (i = 0; i < isp->isp_nchan; i++) { 788 scsipi_channel_timed_thaw(&isp->isp_osinfo.chan[i]); 789 } 790 } 791 if (xs->error == XS_DRIVER_STUFFUP) { 792 isp_prt(isp, ISP_LOGERR, 793 "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld", 794 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), 795 XS_CDBP(xs)[0], (long) XS_XFRLEN(xs)); 796 } 797 scsipi_done(xs); 798 } 799 } 800 801 static void 802 isp_dog(void *arg) 803 { 804 XS_T *xs = arg; 805 struct ispsoftc *isp = XS_ISP(xs); 806 uint32_t handle; 807 808 809 ISP_ILOCK(isp); 810 /* 811 * We've decided this command is dead. Make sure we're not trying 812 * to kill a command that's already dead by getting it's handle and 813 * and seeing whether it's still alive. 814 */ 815 handle = isp_find_handle(isp, xs); 816 if (handle) { 817 uint32_t isr; 818 uint16_t mbox, sema; 819 820 if (XS_CMD_DONE_P(xs)) { 821 isp_prt(isp, ISP_LOGDEBUG1, 822 "watchdog found done cmd (handle 0x%x)", handle); 823 ISP_IUNLOCK(isp); 824 return; 825 } 826 827 if (XS_CMD_WDOG_P(xs)) { 828 isp_prt(isp, ISP_LOGDEBUG1, 829 "recursive watchdog (handle 0x%x)", handle); 830 ISP_IUNLOCK(isp); 831 return; 832 } 833 834 XS_CMD_S_WDOG(xs); 835 836 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 837 isp_intr(isp, isr, sema, mbox); 838 839 } 840 if (XS_CMD_DONE_P(xs)) { 841 isp_prt(isp, ISP_LOGDEBUG1, 842 "watchdog cleanup for handle 0x%x", handle); 843 XS_CMD_C_WDOG(xs); 844 isp_done(xs); 845 } else if (XS_CMD_GRACE_P(xs)) { 846 isp_prt(isp, ISP_LOGDEBUG1, 847 "watchdog timeout for handle 0x%x", handle); 848 /* 849 * Make sure the command is *really* dead before we 850 * release the handle (and DMA resources) for reuse. 851 */ 852 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 853 854 /* 855 * After this point, the command is really dead. 856 */ 857 if (XS_XFRLEN(xs)) { 858 ISP_DMAFREE(isp, xs, handle); 859 } 860 isp_destroy_handle(isp, handle); 861 XS_SETERR(xs, XS_TIMEOUT); 862 XS_CMD_S_CLEAR(xs); 863 isp_done(xs); 864 } else { 865 uint32_t nxti, optr; 866 void *qe; 867 isp_marker_t local, *mp = &local; 868 isp_prt(isp, ISP_LOGDEBUG2, 869 "possible command timeout on handle %x", handle); 870 XS_CMD_C_WDOG(xs); 871 callout_reset(&xs->xs_callout, hz, isp_dog, xs); 872 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 873 ISP_UNLOCK(isp); 874 return; 875 } 876 XS_CMD_S_GRACE(xs); 877 MEMZERO((void *) mp, sizeof (*mp)); 878 mp->mrk_header.rqs_entry_count = 1; 879 mp->mrk_header.rqs_entry_type = RQSTYPE_MARKER; 880 mp->mrk_modifier = SYNC_ALL; 881 mp->mrk_target = XS_CHANNEL(xs) << 7; 882 isp_put_marker(isp, mp, qe); 883 ISP_ADD_REQUEST(isp, nxti); 884 } 885 } else { 886 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command"); 887 } 888 ISP_IUNLOCK(isp); 889 } 890 891 /* 892 * Gone Device Timer Function- when we have decided that a device has gone 893 * away, we wait a specific period of time prior to telling the OS it has 894 * gone away. 895 * 896 * This timer function fires once a second and then scans the port database 897 * for devices that are marked dead but still have a virtual target assigned. 898 * We decrement a counter for that port database entry, and when it hits zero, 899 * we tell the OS the device has gone away. 900 */ 901 static void 902 isp_gdt(void *arg) 903 { 904 ispsoftc_t *isp = arg; 905 fcportdb_t *lp; 906 int dbidx, tgt, more_to_do = 0; 907 908 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired"); 909 ISP_LOCK(isp); 910 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 911 lp = &FCPARAM(isp, 0)->portdb[dbidx]; 912 913 if (lp->state != FC_PORTDB_STATE_ZOMBIE) { 914 continue; 915 } 916 if (lp->ini_map_idx == 0) { 917 continue; 918 } 919 if (lp->new_reserved == 0) { 920 continue; 921 } 922 lp->new_reserved -= 1; 923 if (lp->new_reserved != 0) { 924 more_to_do++; 925 continue; 926 } 927 tgt = lp->ini_map_idx - 1; 928 FCPARAM(isp, 0)->isp_ini_map[tgt] = 0; 929 lp->ini_map_idx = 0; 930 lp->state = FC_PORTDB_STATE_NIL; 931 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 932 "Gone Device Timeout"); 933 isp_make_gone(isp, tgt); 934 } 935 if (more_to_do) { 936 callout_schedule(&isp->isp_osinfo.gdt, hz); 937 } else { 938 isp->isp_osinfo.gdt_running = 0; 939 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 940 "stopping Gone Device Timer"); 941 } 942 ISP_UNLOCK(isp); 943 } 944 945 /* 946 * Loop Down Timer Function- when loop goes down, a timer is started and 947 * and after it expires we come here and take all probational devices that 948 * the OS knows about and the tell the OS that they've gone away. 949 * 950 * We don't clear the devices out of our port database because, when loop 951 * come back up, we have to do some actual cleanup with the chip at that 952 * point (implicit PLOGO, e.g., to get the chip's port database state right). 953 */ 954 static void 955 isp_ldt(void *arg) 956 { 957 ispsoftc_t *isp = arg; 958 fcportdb_t *lp; 959 int dbidx, tgt; 960 961 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired"); 962 ISP_LOCK(isp); 963 964 /* 965 * Notify to the OS all targets who we now consider have departed. 966 */ 967 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 968 lp = &FCPARAM(isp, 0)->portdb[dbidx]; 969 970 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) { 971 continue; 972 } 973 if (lp->ini_map_idx == 0) { 974 continue; 975 } 976 977 /* 978 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST! 979 */ 980 981 /* 982 * Mark that we've announced that this device is gone.... 983 */ 984 lp->reserved = 1; 985 986 /* 987 * but *don't* change the state of the entry. Just clear 988 * any target id stuff and announce to CAM that the 989 * device is gone. This way any necessary PLOGO stuff 990 * will happen when loop comes back up. 991 */ 992 993 tgt = lp->ini_map_idx - 1; 994 FCPARAM(isp, 0)->isp_ini_map[tgt] = 0; 995 lp->ini_map_idx = 0; 996 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 997 "Loop Down Timeout"); 998 isp_make_gone(isp, tgt); 999 } 1000 1001 /* 1002 * The loop down timer has expired. Wake up the kthread 1003 * to notice that fact (or make it false). 1004 */ 1005 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1; 1006 wakeup(&isp->isp_osinfo.thread); 1007 ISP_UNLOCK(isp); 1008 } 1009 1010 static void 1011 isp_make_here(ispsoftc_t *isp, int tgt) 1012 { 1013 isp_prt(isp, ISP_LOGINFO, "target %d has arrived", tgt); 1014 } 1015 1016 static void 1017 isp_make_gone(ispsoftc_t *isp, int tgt) 1018 { 1019 isp_prt(isp, ISP_LOGINFO, "target %d has departed", tgt); 1020 } 1021 1022 static void 1023 isp_fc_worker(void *arg) 1024 { 1025 void scsipi_run_queue(struct scsipi_channel *); 1026 ispsoftc_t *isp = arg; 1027 int slp = 0; 1028 int chan = 0; 1029 1030 int s = splbio(); 1031 /* 1032 * The first loop is for our usage where we have yet to have 1033 * gotten good fibre channel state. 1034 */ 1035 while (isp->isp_osinfo.thread != NULL) { 1036 int sok, lb, lim; 1037 1038 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "checking FC state"); 1039 sok = isp->isp_osinfo.mbox_sleep_ok; 1040 isp->isp_osinfo.mbox_sleep_ok = 1; 1041 lb = isp_fc_runstate(isp, chan, 250000); 1042 isp->isp_osinfo.mbox_sleep_ok = sok; 1043 if (lb) { 1044 /* 1045 * Increment loop down time by the last sleep interval 1046 */ 1047 isp->isp_osinfo.loop_down_time += slp; 1048 1049 if (lb < 0) { 1050 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1051 "FC loop not up (down count %d)", 1052 isp->isp_osinfo.loop_down_time); 1053 } else { 1054 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1055 "FC got to %d (down count %d)", 1056 lb, isp->isp_osinfo.loop_down_time); 1057 } 1058 1059 1060 /* 1061 * If we've never seen loop up and we've waited longer 1062 * than quickboot time, or we've seen loop up but we've 1063 * waited longer than loop_down_limit, give up and go 1064 * to sleep until loop comes up. 1065 */ 1066 if (FCPARAM(isp, 0)->loop_seen_once == 0) { 1067 lim = isp_quickboot_time; 1068 } else { 1069 lim = isp->isp_osinfo.loop_down_limit; 1070 } 1071 if (isp->isp_osinfo.loop_down_time >= lim) { 1072 /* 1073 * If we're now past our limit, release 1074 * the queues and let them come in and 1075 * either get HBA_SELTIMOUT or cause 1076 * another freeze. 1077 */ 1078 isp->isp_osinfo.blocked = 1; 1079 slp = 0; 1080 } else if (isp->isp_osinfo.loop_down_time < 10) { 1081 slp = 1; 1082 } else if (isp->isp_osinfo.loop_down_time < 30) { 1083 slp = 5; 1084 } else if (isp->isp_osinfo.loop_down_time < 60) { 1085 slp = 10; 1086 } else if (isp->isp_osinfo.loop_down_time < 120) { 1087 slp = 20; 1088 } else { 1089 slp = 30; 1090 } 1091 1092 } else { 1093 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1094 "FC state OK"); 1095 isp->isp_osinfo.loop_down_time = 0; 1096 slp = 0; 1097 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1098 "THAW QUEUES @ LINE %d", __LINE__); 1099 scsipi_channel_thaw(&isp->isp_osinfo.chan[chan], 1); 1100 } 1101 1102 /* 1103 * If we'd frozen the queues, unfreeze them now so that 1104 * we can start getting commands. If the FC state isn't 1105 * okay yet, they'll hit that in isp_start which will 1106 * freeze the queues again. 1107 */ 1108 if (isp->isp_osinfo.blocked) { 1109 isp->isp_osinfo.blocked = 0; 1110 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1111 "THAW QUEUES @ LINE %d", __LINE__); 1112 scsipi_channel_thaw(&isp->isp_osinfo.chan[chan], 1); 1113 } 1114 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "sleep time %d", slp); 1115 tsleep(&isp->isp_osinfo.thread, PRIBIO, "ispf", slp * hz); 1116 1117 /* 1118 * If slp is zero, we're waking up for the first time after 1119 * things have been okay. In this case, we set a deferral state 1120 * for all commands and delay hysteresis seconds before starting 1121 * the FC state evaluation. This gives the loop/fabric a chance 1122 * to settle. 1123 */ 1124 if (slp == 0 && isp_fabric_hysteresis) { 1125 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1126 "sleep hysteresis tick time %d", 1127 isp_fabric_hysteresis * hz); 1128 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT", 1129 (isp_fabric_hysteresis * hz)); 1130 } 1131 } 1132 splx(s); 1133 1134 /* In case parent is waiting for us to exit. */ 1135 wakeup(&isp->isp_osinfo.thread); 1136 kthread_exit(0); 1137 } 1138 1139 /* 1140 * Free any associated resources prior to decommissioning and 1141 * set the card to a known state (so it doesn't wake up and kick 1142 * us when we aren't expecting it to). 1143 * 1144 * Locks are held before coming here. 1145 */ 1146 void 1147 isp_uninit(struct ispsoftc *isp) 1148 { 1149 isp_lock(isp); 1150 /* 1151 * Leave with interrupts disabled. 1152 */ 1153 ISP_DISABLE_INTS(isp); 1154 isp_unlock(isp); 1155 } 1156 1157 void 1158 isp_async(struct ispsoftc *isp, ispasync_t cmd, ...) 1159 { 1160 int bus, tgt; 1161 const char *msg = NULL; 1162 static const char prom[] = 1163 "PortID 0x%06x handle 0x%x role %s %s\n" 1164 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 1165 static const char prom2[] = 1166 "PortID 0x%06x handle 0x%x role %s %s tgt %u\n" 1167 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 1168 fcportdb_t *lp; 1169 va_list ap; 1170 1171 switch (cmd) { 1172 case ISPASYNC_NEW_TGT_PARAMS: 1173 if (IS_SCSI(isp)) { 1174 sdparam *sdp; 1175 int flags; 1176 struct scsipi_xfer_mode xm; 1177 1178 va_start(ap, cmd); 1179 bus = va_arg(ap, int); 1180 tgt = va_arg(ap, int); 1181 va_end(ap); 1182 sdp = SDPARAM(isp, bus); 1183 flags = sdp->isp_devparam[tgt].actv_flags; 1184 1185 xm.xm_mode = 0; 1186 xm.xm_period = sdp->isp_devparam[tgt].actv_period; 1187 xm.xm_offset = sdp->isp_devparam[tgt].actv_offset; 1188 xm.xm_target = tgt; 1189 1190 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset) 1191 xm.xm_mode |= PERIPH_CAP_SYNC; 1192 if (flags & DPARM_WIDE) 1193 xm.xm_mode |= PERIPH_CAP_WIDE16; 1194 if (flags & DPARM_TQING) 1195 xm.xm_mode |= PERIPH_CAP_TQING; 1196 scsipi_async_event(&isp->isp_osinfo.chan[bus], 1197 ASYNC_EVENT_XFER_MODE, &xm); 1198 break; 1199 } 1200 case ISPASYNC_BUS_RESET: 1201 va_start(ap, cmd); 1202 bus = va_arg(ap, int); 1203 va_end(ap); 1204 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus); 1205 scsipi_async_event(&isp->isp_osinfo.chan[bus], 1206 ASYNC_EVENT_RESET, NULL); 1207 break; 1208 case ISPASYNC_LIP: 1209 if (msg == NULL) { 1210 msg = "LIP Received"; 1211 } 1212 /* FALLTHROUGH */ 1213 case ISPASYNC_LOOP_RESET: 1214 if (msg == NULL) { 1215 msg = "LOOP Reset Received"; 1216 } 1217 /* FALLTHROUGH */ 1218 case ISPASYNC_LOOP_DOWN: 1219 if (msg == NULL) { 1220 msg = "Loop DOWN"; 1221 } 1222 va_start(ap, cmd); 1223 bus = va_arg(ap, int); 1224 va_end(ap); 1225 1226 /* 1227 * Don't do queue freezes or blockage until we have the 1228 * thread running and interrupts that can unfreeze/unblock us. 1229 */ 1230 if (isp->isp_osinfo.mbox_sleep_ok && 1231 isp->isp_osinfo.blocked == 0 && 1232 isp->isp_osinfo.thread) { 1233 isp->isp_osinfo.blocked = 1; 1234 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1235 "FREEZE QUEUES @ LINE %d", __LINE__); 1236 scsipi_channel_freeze(&isp->isp_osinfo.chan[bus], 1); 1237 if (callout_pending(&isp->isp_osinfo.ldt) == 0) { 1238 callout_schedule(&isp->isp_osinfo.ldt, 1239 isp->isp_osinfo.loop_down_limit * hz); 1240 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1241 "Starting Loop Down Timer"); 1242 } 1243 } 1244 isp_prt(isp, ISP_LOGINFO, msg); 1245 break; 1246 case ISPASYNC_LOOP_UP: 1247 /* 1248 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke 1249 * the FC worker thread. When the FC worker thread 1250 * is done, let *it* call scsipi_channel_thaw... 1251 */ 1252 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 1253 break; 1254 case ISPASYNC_DEV_ARRIVED: 1255 va_start(ap, cmd); 1256 bus = va_arg(ap, int); 1257 lp = va_arg(ap, fcportdb_t *); 1258 va_end(ap); 1259 lp->reserved = 0; 1260 if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && 1261 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) { 1262 int dbidx = lp - FCPARAM(isp, bus)->portdb; 1263 int i; 1264 1265 for (i = 0; i < MAX_FC_TARG; i++) { 1266 if (i >= FL_ID && i <= SNS_ID) { 1267 continue; 1268 } 1269 if (FCPARAM(isp, bus)->isp_ini_map[i] == 0) { 1270 break; 1271 } 1272 } 1273 if (i < MAX_FC_TARG) { 1274 FCPARAM(isp, bus)->isp_ini_map[i] = dbidx + 1; 1275 lp->ini_map_idx = i + 1; 1276 } else { 1277 isp_prt(isp, ISP_LOGWARN, "out of target ids"); 1278 isp_dump_portdb(isp, bus); 1279 } 1280 } 1281 if (lp->ini_map_idx) { 1282 tgt = lp->ini_map_idx - 1; 1283 isp_prt(isp, ISP_LOGCONFIG, prom2, 1284 lp->portid, lp->handle, 1285 roles[lp->roles], "arrived at", tgt, 1286 (uint32_t) (lp->node_wwn >> 32), 1287 (uint32_t) lp->node_wwn, 1288 (uint32_t) (lp->port_wwn >> 32), 1289 (uint32_t) lp->port_wwn); 1290 isp_make_here(isp, tgt); 1291 } else { 1292 isp_prt(isp, ISP_LOGCONFIG, prom, 1293 lp->portid, lp->handle, 1294 roles[lp->roles], "arrived", 1295 (uint32_t) (lp->node_wwn >> 32), 1296 (uint32_t) lp->node_wwn, 1297 (uint32_t) (lp->port_wwn >> 32), 1298 (uint32_t) lp->port_wwn); 1299 } 1300 break; 1301 case ISPASYNC_DEV_CHANGED: 1302 va_start(ap, cmd); 1303 bus = va_arg(ap, int); 1304 lp = va_arg(ap, fcportdb_t *); 1305 va_end(ap); 1306 if (isp_change_is_bad) { 1307 lp->state = FC_PORTDB_STATE_NIL; 1308 if (lp->ini_map_idx) { 1309 tgt = lp->ini_map_idx - 1; 1310 FCPARAM(isp, bus)->isp_ini_map[tgt] = 0; 1311 lp->ini_map_idx = 0; 1312 isp_prt(isp, ISP_LOGCONFIG, prom3, 1313 lp->portid, tgt, "change is bad"); 1314 isp_make_gone(isp, tgt); 1315 } else { 1316 isp_prt(isp, ISP_LOGCONFIG, prom, 1317 lp->portid, lp->handle, 1318 roles[lp->roles], 1319 "changed and departed", 1320 (uint32_t) (lp->node_wwn >> 32), 1321 (uint32_t) lp->node_wwn, 1322 (uint32_t) (lp->port_wwn >> 32), 1323 (uint32_t) lp->port_wwn); 1324 } 1325 } else { 1326 lp->portid = lp->new_portid; 1327 lp->roles = lp->new_roles; 1328 if (lp->ini_map_idx) { 1329 int t = lp->ini_map_idx - 1; 1330 FCPARAM(isp, bus)->isp_ini_map[t] = 1331 (lp - FCPARAM(isp, bus)->portdb) + 1; 1332 tgt = lp->ini_map_idx - 1; 1333 isp_prt(isp, ISP_LOGCONFIG, prom2, 1334 lp->portid, lp->handle, 1335 roles[lp->roles], "changed at", tgt, 1336 (uint32_t) (lp->node_wwn >> 32), 1337 (uint32_t) lp->node_wwn, 1338 (uint32_t) (lp->port_wwn >> 32), 1339 (uint32_t) lp->port_wwn); 1340 } else { 1341 isp_prt(isp, ISP_LOGCONFIG, prom, 1342 lp->portid, lp->handle, 1343 roles[lp->roles], "changed", 1344 (uint32_t) (lp->node_wwn >> 32), 1345 (uint32_t) lp->node_wwn, 1346 (uint32_t) (lp->port_wwn >> 32), 1347 (uint32_t) lp->port_wwn); 1348 } 1349 } 1350 break; 1351 case ISPASYNC_DEV_STAYED: 1352 va_start(ap, cmd); 1353 bus = va_arg(ap, int); 1354 lp = va_arg(ap, fcportdb_t *); 1355 va_end(ap); 1356 if (lp->ini_map_idx) { 1357 tgt = lp->ini_map_idx - 1; 1358 isp_prt(isp, ISP_LOGCONFIG, prom2, 1359 lp->portid, lp->handle, 1360 roles[lp->roles], "stayed at", tgt, 1361 (uint32_t) (lp->node_wwn >> 32), 1362 (uint32_t) lp->node_wwn, 1363 (uint32_t) (lp->port_wwn >> 32), 1364 (uint32_t) lp->port_wwn); 1365 } else { 1366 isp_prt(isp, ISP_LOGCONFIG, prom, 1367 lp->portid, lp->handle, 1368 roles[lp->roles], "stayed", 1369 (uint32_t) (lp->node_wwn >> 32), 1370 (uint32_t) lp->node_wwn, 1371 (uint32_t) (lp->port_wwn >> 32), 1372 (uint32_t) lp->port_wwn); 1373 } 1374 break; 1375 case ISPASYNC_DEV_GONE: 1376 va_start(ap, cmd); 1377 bus = va_arg(ap, int); 1378 lp = va_arg(ap, fcportdb_t *); 1379 va_end(ap); 1380 /* 1381 * If this has a virtual target and we haven't marked it 1382 * that we're going to have isp_gdt tell the OS it's gone, 1383 * set the isp_gdt timer running on it. 1384 * 1385 * If it isn't marked that isp_gdt is going to get rid of it, 1386 * announce that it's gone. 1387 */ 1388 if (lp->ini_map_idx && lp->reserved == 0) { 1389 lp->reserved = 1; 1390 lp->new_reserved = isp->isp_osinfo.gone_device_time; 1391 lp->state = FC_PORTDB_STATE_ZOMBIE; 1392 if (isp->isp_osinfo.gdt_running == 0) { 1393 isp->isp_osinfo.gdt_running = 1; 1394 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1395 "starting Gone Device Timer"); 1396 callout_schedule(&isp->isp_osinfo.gdt, hz); 1397 } 1398 tgt = lp->ini_map_idx - 1; 1399 isp_prt(isp, ISP_LOGCONFIG, prom2, 1400 lp->portid, lp->handle, 1401 roles[lp->roles], "gone zombie at", tgt, 1402 (uint32_t) (lp->node_wwn >> 32), 1403 (uint32_t) lp->node_wwn, 1404 (uint32_t) (lp->port_wwn >> 32), 1405 (uint32_t) lp->port_wwn); 1406 } else if (lp->reserved == 0) { 1407 isp_prt(isp, ISP_LOGCONFIG, prom, 1408 lp->portid, lp->handle, 1409 roles[lp->roles], "departed", 1410 (uint32_t) (lp->node_wwn >> 32), 1411 (uint32_t) lp->node_wwn, 1412 (uint32_t) (lp->port_wwn >> 32), 1413 (uint32_t) lp->port_wwn); 1414 } 1415 break; 1416 case ISPASYNC_CHANGE_NOTIFY: 1417 { 1418 int opt; 1419 1420 va_start(ap, cmd); 1421 bus = va_arg(ap, int); 1422 opt = va_arg(ap, int); 1423 va_end(ap); 1424 1425 if (opt == ISPASYNC_CHANGE_PDB) { 1426 msg = "Port Database Changed"; 1427 } else if (opt == ISPASYNC_CHANGE_SNS) { 1428 msg = "Name Server Database Changed"; 1429 } else { 1430 msg = "Other Change Notify"; 1431 } 1432 /* 1433 * If the loop down timer is running, cancel it. 1434 */ 1435 if (callout_pending(&isp->isp_osinfo.ldt)) { 1436 callout_stop(&isp->isp_osinfo.ldt); 1437 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1438 "Stopping Loop Down Timer"); 1439 } 1440 isp_prt(isp, ISP_LOGINFO, msg); 1441 /* 1442 * We can set blocked here because we know it's now okay 1443 * to try and run isp_fc_runstate (in order to build loop 1444 * state). But we don't try and freeze the midlayer's queue 1445 * if we have no thread that we can wake to later unfreeze 1446 * it. 1447 */ 1448 if (isp->isp_osinfo.blocked == 0) { 1449 isp->isp_osinfo.blocked = 1; 1450 if (isp->isp_osinfo.thread) { 1451 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1452 "FREEZE QUEUES @ LINE %d", __LINE__); 1453 scsipi_channel_freeze(&isp->isp_osinfo.chan[bus], 1); 1454 } 1455 } 1456 /* 1457 * Note that we have work for the thread to do, and 1458 * if the thread is here already, wake it up. 1459 */ 1460 if (isp->isp_osinfo.thread) { 1461 wakeup(&isp->isp_osinfo.thread); 1462 } else { 1463 isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet"); 1464 } 1465 break; 1466 } 1467 case ISPASYNC_FW_CRASH: 1468 { 1469 uint16_t mbox1; 1470 mbox1 = ISP_READ(isp, OUTMAILBOX1); 1471 if (IS_DUALBUS(isp)) { 1472 bus = ISP_READ(isp, OUTMAILBOX6); 1473 } else { 1474 bus = 0; 1475 } 1476 isp_prt(isp, ISP_LOGERR, 1477 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 1478 bus, mbox1); 1479 if (IS_FC(isp)) { 1480 if (isp->isp_osinfo.blocked == 0) { 1481 isp->isp_osinfo.blocked = 1; 1482 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 1483 "FREEZE QUEUES @ LINE %d", __LINE__); 1484 scsipi_channel_freeze(&isp->isp_osinfo.chan[bus], 1); 1485 } 1486 } 1487 mbox1 = isp->isp_osinfo.mbox_sleep_ok; 1488 isp->isp_osinfo.mbox_sleep_ok = 0; 1489 isp_reinit(isp); 1490 isp->isp_osinfo.mbox_sleep_ok = mbox1; 1491 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 1492 break; 1493 } 1494 default: 1495 break; 1496 } 1497 } 1498 1499 void 1500 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 1501 { 1502 va_list ap; 1503 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 1504 return; 1505 } 1506 printf("%s: ", device_xname(&isp->isp_osinfo.dev)); 1507 va_start(ap, fmt); 1508 vprintf(fmt, ap); 1509 va_end(ap); 1510 printf("\n"); 1511 } 1512 1513 void 1514 isp_lock(struct ispsoftc *isp) 1515 { 1516 int s = splbio(); 1517 if (isp->isp_osinfo.islocked++ == 0) { 1518 isp->isp_osinfo.splsaved = s; 1519 } else { 1520 splx(s); 1521 } 1522 } 1523 1524 void 1525 isp_unlock(struct ispsoftc *isp) 1526 { 1527 if (isp->isp_osinfo.islocked-- <= 1) { 1528 isp->isp_osinfo.islocked = 0; 1529 splx(isp->isp_osinfo.splsaved); 1530 } 1531 } 1532 1533 uint64_t 1534 isp_microtime_sub(struct timeval *b, struct timeval *a) 1535 { 1536 struct timeval x; 1537 uint64_t elapsed; 1538 timersub(b, a, &x); 1539 elapsed = GET_NANOSEC(&x); 1540 if (elapsed == 0) 1541 elapsed++; 1542 return (elapsed); 1543 } 1544 1545 int 1546 isp_mbox_acquire(ispsoftc_t *isp) 1547 { 1548 if (isp->isp_osinfo.mboxbsy) { 1549 return (1); 1550 } else { 1551 isp->isp_osinfo.mboxcmd_done = 0; 1552 isp->isp_osinfo.mboxbsy = 1; 1553 return (0); 1554 } 1555 } 1556 1557 void 1558 isp_mbox_wait_complete(struct ispsoftc *isp, mbreg_t *mbp) 1559 { 1560 unsigned int usecs = mbp->timeout; 1561 unsigned int maxc, olim, ilim; 1562 struct timeval start; 1563 1564 if (usecs == 0) { 1565 usecs = MBCMD_DEFAULT_TIMEOUT; 1566 } 1567 maxc = isp->isp_mbxwrk0 + 1; 1568 1569 microtime(&start); 1570 if (isp->isp_osinfo.mbox_sleep_ok) { 1571 int to; 1572 struct timeval tv; 1573 1574 tv.tv_sec = 0; 1575 tv.tv_usec = 0; 1576 for (olim = 0; olim < maxc; olim++) { 1577 tv.tv_sec += (usecs / 1000000); 1578 tv.tv_usec += (usecs % 1000000); 1579 if (tv.tv_usec >= 100000) { 1580 tv.tv_sec++; 1581 tv.tv_usec -= 1000000; 1582 } 1583 } 1584 timeradd(&tv, &start, &tv); 1585 to = tvhzto(&tv); 1586 if (to == 0) 1587 to = 1; 1588 1589 isp->isp_osinfo.mbox_sleep_ok = 0; 1590 isp->isp_osinfo.mbox_sleeping = 1; 1591 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", to); 1592 isp->isp_osinfo.mbox_sleeping = 0; 1593 isp->isp_osinfo.mbox_sleep_ok = 1; 1594 } else { 1595 for (olim = 0; olim < maxc; olim++) { 1596 for (ilim = 0; ilim < usecs; ilim += 100) { 1597 uint32_t isr; 1598 uint16_t sema, mbox; 1599 if (isp->isp_osinfo.mboxcmd_done) { 1600 break; 1601 } 1602 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1603 isp_intr(isp, isr, sema, mbox); 1604 if (isp->isp_osinfo.mboxcmd_done) { 1605 break; 1606 } 1607 } 1608 USEC_DELAY(100); 1609 } 1610 if (isp->isp_osinfo.mboxcmd_done) { 1611 break; 1612 } 1613 } 1614 } 1615 if (isp->isp_osinfo.mboxcmd_done == 0) { 1616 struct timeval finish, elapsed; 1617 1618 microtime(&finish); 1619 timersub(&finish, &start, &elapsed); 1620 isp_prt(isp, ISP_LOGWARN, 1621 "%s Mailbox Command (0x%x) Timeout (%uus actual)", 1622 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", 1623 isp->isp_lastmbxcmd, (elapsed.tv_sec * 1000000) + 1624 elapsed.tv_usec); 1625 mbp->param[0] = MBOX_TIMEOUT; 1626 isp->isp_osinfo.mboxcmd_done = 1; 1627 } 1628 } 1629 1630 void 1631 isp_mbox_notify_done(ispsoftc_t *isp) 1632 { 1633 if (isp->isp_osinfo.mbox_sleeping) { 1634 wakeup(&isp->isp_mbxworkp); 1635 } 1636 isp->isp_osinfo.mboxcmd_done = 1; 1637 } 1638 1639 void 1640 isp_mbox_release(ispsoftc_t *isp) 1641 { 1642 isp->isp_osinfo.mboxbsy = 0; 1643 } 1644