1 /* $FreeBSD: src/sys/dev/isp/isp_freebsd.c,v 1.32.2.20 2002/10/11 18:49:25 mjacob Exp $ */ 2 /* 3 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 4 * 5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #include <dev/isp/isp_freebsd.h> 29 #include <sys/unistd.h> 30 #include <sys/kthread.h> 31 #include <machine/stdarg.h> /* for use by isp_prt below */ 32 #include <sys/conf.h> 33 #include <sys/ioccom.h> 34 #include <dev/isp/isp_ioctl.h> 35 36 37 static d_ioctl_t ispioctl; 38 static void isp_intr_enable(void *); 39 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); 40 static void isp_poll(struct cam_sim *); 41 static timeout_t isp_watchdog; 42 static void isp_kthread(void *); 43 static void isp_action(struct cam_sim *, union ccb *); 44 45 46 #define ISP_CDEV_MAJOR 248 47 static struct cdevsw isp_cdevsw = { 48 /* open */ nullopen, 49 /* close */ nullclose, 50 /* read */ noread, 51 /* write */ nowrite, 52 /* ioctl */ ispioctl, 53 /* poll */ nopoll, 54 /* mmap */ nommap, 55 /* strategy */ nostrategy, 56 /* name */ "isp", 57 /* maj */ ISP_CDEV_MAJOR, 58 /* dump */ nodump, 59 /* psize */ nopsize, 60 /* flags */ D_TAPE, 61 }; 62 63 static struct ispsoftc *isplist = NULL; 64 65 void 66 isp_attach(struct ispsoftc *isp) 67 { 68 int primary, secondary; 69 struct ccb_setasync csa; 70 struct cam_devq *devq; 71 struct cam_sim *sim; 72 struct cam_path *path; 73 74 /* 75 * Establish (in case of 12X0) which bus is the primary. 76 */ 77 78 primary = 0; 79 secondary = 1; 80 81 /* 82 * Create the device queue for our SIM(s). 83 */ 84 devq = cam_simq_alloc(isp->isp_maxcmds); 85 if (devq == NULL) { 86 return; 87 } 88 89 /* 90 * Construct our SIM entry. 91 */ 92 ISPLOCK_2_CAMLOCK(isp); 93 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 94 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 95 if (sim == NULL) { 96 cam_simq_free(devq); 97 CAMLOCK_2_ISPLOCK(isp); 98 return; 99 } 100 CAMLOCK_2_ISPLOCK(isp); 101 102 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 103 isp->isp_osinfo.ehook.ich_arg = isp; 104 ISPLOCK_2_CAMLOCK(isp); 105 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 106 cam_sim_free(sim, TRUE); 107 CAMLOCK_2_ISPLOCK(isp); 108 isp_prt(isp, ISP_LOGERR, 109 "could not establish interrupt enable hook"); 110 return; 111 } 112 113 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 114 cam_sim_free(sim, TRUE); 115 CAMLOCK_2_ISPLOCK(isp); 116 return; 117 } 118 119 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 120 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 121 xpt_bus_deregister(cam_sim_path(sim)); 122 cam_sim_free(sim, TRUE); 123 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 124 CAMLOCK_2_ISPLOCK(isp); 125 return; 126 } 127 128 xpt_setup_ccb(&csa.ccb_h, path, 5); 129 csa.ccb_h.func_code = XPT_SASYNC_CB; 130 csa.event_enable = AC_LOST_DEVICE; 131 csa.callback = isp_cam_async; 132 csa.callback_arg = sim; 133 xpt_action((union ccb *)&csa); 134 CAMLOCK_2_ISPLOCK(isp); 135 isp->isp_sim = sim; 136 isp->isp_path = path; 137 /* 138 * Create a kernel thread for fibre channel instances. We 139 * don't have dual channel FC cards. 140 */ 141 if (IS_FC(isp)) { 142 ISPLOCK_2_CAMLOCK(isp); 143 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 144 "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) { 145 xpt_bus_deregister(cam_sim_path(sim)); 146 cam_sim_free(sim, TRUE); 147 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 148 CAMLOCK_2_ISPLOCK(isp); 149 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 150 return; 151 } 152 CAMLOCK_2_ISPLOCK(isp); 153 } 154 155 156 /* 157 * If we have a second channel, construct SIM entry for that. 158 */ 159 if (IS_DUALBUS(isp)) { 160 ISPLOCK_2_CAMLOCK(isp); 161 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 162 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 163 if (sim == NULL) { 164 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 165 xpt_free_path(isp->isp_path); 166 cam_simq_free(devq); 167 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 168 return; 169 } 170 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 171 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 172 xpt_free_path(isp->isp_path); 173 cam_sim_free(sim, TRUE); 174 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 175 CAMLOCK_2_ISPLOCK(isp); 176 return; 177 } 178 179 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 180 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 181 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 182 xpt_free_path(isp->isp_path); 183 xpt_bus_deregister(cam_sim_path(sim)); 184 cam_sim_free(sim, TRUE); 185 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 186 CAMLOCK_2_ISPLOCK(isp); 187 return; 188 } 189 190 xpt_setup_ccb(&csa.ccb_h, path, 5); 191 csa.ccb_h.func_code = XPT_SASYNC_CB; 192 csa.event_enable = AC_LOST_DEVICE; 193 csa.callback = isp_cam_async; 194 csa.callback_arg = sim; 195 xpt_action((union ccb *)&csa); 196 CAMLOCK_2_ISPLOCK(isp); 197 isp->isp_sim2 = sim; 198 isp->isp_path2 = path; 199 } 200 /* 201 * Create device nodes 202 */ 203 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 204 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 205 206 if (isp->isp_role != ISP_ROLE_NONE) { 207 isp->isp_state = ISP_RUNSTATE; 208 } 209 if (isplist == NULL) { 210 isplist = isp; 211 } else { 212 struct ispsoftc *tmp = isplist; 213 while (tmp->isp_osinfo.next) { 214 tmp = tmp->isp_osinfo.next; 215 } 216 tmp->isp_osinfo.next = isp; 217 } 218 219 } 220 221 static INLINE void 222 isp_freeze_loopdown(struct ispsoftc *isp, char *msg) 223 { 224 if (isp->isp_osinfo.simqfrozen == 0) { 225 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 226 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 227 ISPLOCK_2_CAMLOCK(isp); 228 xpt_freeze_simq(isp->isp_sim, 1); 229 CAMLOCK_2_ISPLOCK(isp); 230 } else { 231 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 232 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 233 } 234 } 235 236 static int 237 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 238 { 239 struct ispsoftc *isp; 240 int retval = ENOTTY; 241 242 isp = isplist; 243 while (isp) { 244 if (minor(dev) == device_get_unit(isp->isp_dev)) { 245 break; 246 } 247 isp = isp->isp_osinfo.next; 248 } 249 if (isp == NULL) 250 return (ENXIO); 251 252 switch (cmd) { 253 #ifdef ISP_FW_CRASH_DUMP 254 case ISP_GET_FW_CRASH_DUMP: 255 { 256 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data; 257 size_t sz; 258 259 retval = 0; 260 if (IS_2200(isp)) 261 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 262 else 263 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 264 ISP_LOCK(isp); 265 if (ptr && *ptr) { 266 void *uaddr = *((void **) addr); 267 if (copyout(ptr, uaddr, sz)) { 268 retval = EFAULT; 269 } else { 270 *ptr = 0; 271 } 272 } else { 273 retval = ENXIO; 274 } 275 ISP_UNLOCK(isp); 276 break; 277 } 278 279 case ISP_FORCE_CRASH_DUMP: 280 ISP_LOCK(isp); 281 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)"); 282 isp_fw_dump(isp); 283 isp_reinit(isp); 284 ISP_UNLOCK(isp); 285 retval = 0; 286 break; 287 #endif 288 case ISP_SDBLEV: 289 { 290 int olddblev = isp->isp_dblev; 291 isp->isp_dblev = *(int *)addr; 292 *(int *)addr = olddblev; 293 retval = 0; 294 break; 295 } 296 case ISP_RESETHBA: 297 ISP_LOCK(isp); 298 isp_reinit(isp); 299 ISP_UNLOCK(isp); 300 retval = 0; 301 break; 302 case ISP_RESCAN: 303 if (IS_FC(isp)) { 304 ISP_LOCK(isp); 305 if (isp_fc_runstate(isp, 5 * 1000000)) { 306 retval = EIO; 307 } else { 308 retval = 0; 309 } 310 ISP_UNLOCK(isp); 311 } 312 break; 313 case ISP_FC_LIP: 314 if (IS_FC(isp)) { 315 ISP_LOCK(isp); 316 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 317 retval = EIO; 318 } else { 319 retval = 0; 320 } 321 ISP_UNLOCK(isp); 322 } 323 break; 324 case ISP_FC_GETDINFO: 325 { 326 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 327 struct lportdb *lp; 328 329 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 330 retval = EINVAL; 331 break; 332 } 333 ISP_LOCK(isp); 334 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 335 if (lp->valid) { 336 ifc->loopid = lp->loopid; 337 ifc->portid = lp->portid; 338 ifc->node_wwn = lp->node_wwn; 339 ifc->port_wwn = lp->port_wwn; 340 retval = 0; 341 } else { 342 retval = ENODEV; 343 } 344 ISP_UNLOCK(isp); 345 break; 346 } 347 case ISP_GET_STATS: 348 { 349 isp_stats_t *sp = (isp_stats_t *) addr; 350 351 MEMZERO(sp, sizeof (*sp)); 352 sp->isp_stat_version = ISP_STATS_VERSION; 353 sp->isp_type = isp->isp_type; 354 sp->isp_revision = isp->isp_revision; 355 ISP_LOCK(isp); 356 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 357 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 358 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 359 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 360 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 361 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 362 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 363 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 364 ISP_UNLOCK(isp); 365 retval = 0; 366 break; 367 } 368 case ISP_CLR_STATS: 369 ISP_LOCK(isp); 370 isp->isp_intcnt = 0; 371 isp->isp_intbogus = 0; 372 isp->isp_intmboxc = 0; 373 isp->isp_intoasync = 0; 374 isp->isp_rsltccmplt = 0; 375 isp->isp_fphccmplt = 0; 376 isp->isp_rscchiwater = 0; 377 isp->isp_fpcchiwater = 0; 378 ISP_UNLOCK(isp); 379 retval = 0; 380 break; 381 case ISP_FC_GETHINFO: 382 { 383 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 384 MEMZERO(hba, sizeof (*hba)); 385 ISP_LOCK(isp); 386 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 387 hba->fc_scsi_supported = 1; 388 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 389 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 390 hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn; 391 hba->active_port_wwn = FCPARAM(isp)->isp_portwwn; 392 ISP_UNLOCK(isp); 393 retval = 0; 394 break; 395 } 396 case ISP_GET_FC_PARAM: 397 { 398 struct isp_fc_param *f = (struct isp_fc_param *) addr; 399 400 if (!IS_FC(isp)) { 401 retval = EINVAL; 402 break; 403 } 404 f->parameter = 0; 405 if (strcmp(f->param_name, "framelength") == 0) { 406 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 407 retval = 0; 408 break; 409 } 410 if (strcmp(f->param_name, "exec_throttle") == 0) { 411 f->parameter = FCPARAM(isp)->isp_execthrottle; 412 retval = 0; 413 break; 414 } 415 if (strcmp(f->param_name, "fullduplex") == 0) { 416 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 417 f->parameter = 1; 418 retval = 0; 419 break; 420 } 421 if (strcmp(f->param_name, "loopid") == 0) { 422 f->parameter = FCPARAM(isp)->isp_loopid; 423 retval = 0; 424 break; 425 } 426 retval = EINVAL; 427 break; 428 } 429 case ISP_SET_FC_PARAM: 430 { 431 struct isp_fc_param *f = (struct isp_fc_param *) addr; 432 u_int32_t param = f->parameter; 433 434 if (!IS_FC(isp)) { 435 retval = EINVAL; 436 break; 437 } 438 f->parameter = 0; 439 if (strcmp(f->param_name, "framelength") == 0) { 440 if (param != 512 && param != 1024 && param != 1024) { 441 retval = EINVAL; 442 break; 443 } 444 FCPARAM(isp)->isp_maxfrmlen = param; 445 retval = 0; 446 break; 447 } 448 if (strcmp(f->param_name, "exec_throttle") == 0) { 449 if (param < 16 || param > 255) { 450 retval = EINVAL; 451 break; 452 } 453 FCPARAM(isp)->isp_execthrottle = param; 454 retval = 0; 455 break; 456 } 457 if (strcmp(f->param_name, "fullduplex") == 0) { 458 if (param != 0 && param != 1) { 459 retval = EINVAL; 460 break; 461 } 462 if (param) { 463 FCPARAM(isp)->isp_fwoptions |= 464 ICBOPT_FULL_DUPLEX; 465 } else { 466 FCPARAM(isp)->isp_fwoptions &= 467 ~ICBOPT_FULL_DUPLEX; 468 } 469 retval = 0; 470 break; 471 } 472 if (strcmp(f->param_name, "loopid") == 0) { 473 if (param < 0 || param > 125) { 474 retval = EINVAL; 475 break; 476 } 477 FCPARAM(isp)->isp_loopid = param; 478 retval = 0; 479 break; 480 } 481 retval = EINVAL; 482 break; 483 } 484 default: 485 break; 486 } 487 return (retval); 488 } 489 490 static void 491 isp_intr_enable(void *arg) 492 { 493 struct ispsoftc *isp = arg; 494 if (isp->isp_role != ISP_ROLE_NONE) { 495 ENABLE_INTS(isp); 496 } 497 /* Release our hook so that the boot can continue. */ 498 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 499 } 500 501 /* 502 * Put the target mode functions here, because some are inlines 503 */ 504 505 #ifdef ISP_TARGET_MODE 506 507 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t); 508 static INLINE int are_any_luns_enabled(struct ispsoftc *, int); 509 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t); 510 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *); 511 static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int); 512 static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int); 513 static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int); 514 static INLINE void isp_vsema_rqe(struct ispsoftc *, int); 515 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int); 516 static cam_status 517 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **); 518 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 519 static void isp_en_lun(struct ispsoftc *, union ccb *); 520 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 521 static timeout_t isp_refire_putback_atio; 522 static void isp_complete_ctio(union ccb *); 523 static void isp_target_putback_atio(union ccb *); 524 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 525 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 526 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 527 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 528 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *); 529 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *); 530 531 static INLINE int 532 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun) 533 { 534 tstate_t *tptr; 535 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 536 if (tptr == NULL) { 537 return (0); 538 } 539 do { 540 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 541 return (1); 542 } 543 } while ((tptr = tptr->next) != NULL); 544 return (0); 545 } 546 547 static INLINE int 548 are_any_luns_enabled(struct ispsoftc *isp, int port) 549 { 550 int lo, hi; 551 if (IS_DUALBUS(isp)) { 552 lo = (port * (LUN_HASH_SIZE >> 1)); 553 hi = lo + (LUN_HASH_SIZE >> 1); 554 } else { 555 lo = 0; 556 hi = LUN_HASH_SIZE; 557 } 558 for (lo = 0; lo < hi; lo++) { 559 if (isp->isp_osinfo.lun_hash[lo]) { 560 return (1); 561 } 562 } 563 return (0); 564 } 565 566 static INLINE tstate_t * 567 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun) 568 { 569 tstate_t *tptr = NULL; 570 571 if (lun == CAM_LUN_WILDCARD) { 572 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 573 tptr = &isp->isp_osinfo.tsdflt[bus]; 574 tptr->hold++; 575 return (tptr); 576 } 577 } else { 578 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 579 if (tptr == NULL) { 580 return (NULL); 581 } 582 } 583 584 do { 585 if (tptr->lun == lun && tptr->bus == bus) { 586 tptr->hold++; 587 return (tptr); 588 } 589 } while ((tptr = tptr->next) != NULL); 590 return (tptr); 591 } 592 593 static __inline void 594 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 595 { 596 if (tptr->hold) 597 tptr->hold--; 598 } 599 600 static __inline int 601 isp_psema_sig_rqe(struct ispsoftc *isp, int bus) 602 { 603 while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) { 604 isp->isp_osinfo.tmflags[bus] |= TM_WANTED; 605 if (tsleep(&isp->isp_osinfo.tmflags[bus], 606 PRIBIO|PCATCH, "i0", 0)) { 607 return (-1); 608 } 609 isp->isp_osinfo.tmflags[bus] |= TM_BUSY; 610 } 611 return (0); 612 } 613 614 static __inline int 615 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo) 616 { 617 if (tsleep(&isp->isp_osinfo.rstatus[bus], PRIBIO, "qt1", timo)) { 618 return (-1); 619 } 620 return (0); 621 } 622 623 static __inline void 624 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status) 625 { 626 isp->isp_osinfo.rstatus[bus] = status; 627 wakeup(&isp->isp_osinfo.rstatus[bus]); 628 } 629 630 static __inline void 631 isp_vsema_rqe(struct ispsoftc *isp, int bus) 632 { 633 if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) { 634 isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED; 635 wakeup(&isp->isp_osinfo.tmflags[bus]); 636 } 637 isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY; 638 } 639 640 static __inline atio_private_data_t * 641 isp_get_atpd(struct ispsoftc *isp, int tag) 642 { 643 atio_private_data_t *atp; 644 for (atp = isp->isp_osinfo.atpdp; 645 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 646 if (atp->tag == tag) 647 return (atp); 648 } 649 return (NULL); 650 } 651 652 static cam_status 653 create_lun_state(struct ispsoftc *isp, int bus, 654 struct cam_path *path, tstate_t **rslt) 655 { 656 cam_status status; 657 lun_id_t lun; 658 int hfx; 659 tstate_t *tptr, *new; 660 661 lun = xpt_path_lun_id(path); 662 if (lun < 0) { 663 return (CAM_LUN_INVALID); 664 } 665 if (is_lun_enabled(isp, bus, lun)) { 666 return (CAM_LUN_ALRDY_ENA); 667 } 668 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 669 if (new == NULL) { 670 return (CAM_RESRC_UNAVAIL); 671 } 672 673 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 674 xpt_path_target_id(path), xpt_path_lun_id(path)); 675 if (status != CAM_REQ_CMP) { 676 free(new, M_DEVBUF); 677 return (status); 678 } 679 new->bus = bus; 680 new->lun = lun; 681 SLIST_INIT(&new->atios); 682 SLIST_INIT(&new->inots); 683 new->hold = 1; 684 685 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 686 tptr = isp->isp_osinfo.lun_hash[hfx]; 687 if (tptr == NULL) { 688 isp->isp_osinfo.lun_hash[hfx] = new; 689 } else { 690 while (tptr->next) 691 tptr = tptr->next; 692 tptr->next = new; 693 } 694 *rslt = new; 695 return (CAM_REQ_CMP); 696 } 697 698 static INLINE void 699 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 700 { 701 int hfx; 702 tstate_t *lw, *pw; 703 704 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 705 if (tptr->hold) { 706 return; 707 } 708 pw = isp->isp_osinfo.lun_hash[hfx]; 709 if (pw == NULL) { 710 return; 711 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 712 isp->isp_osinfo.lun_hash[hfx] = pw->next; 713 } else { 714 lw = pw; 715 pw = lw->next; 716 while (pw) { 717 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 718 lw->next = pw->next; 719 break; 720 } 721 lw = pw; 722 pw = pw->next; 723 } 724 if (pw == NULL) { 725 return; 726 } 727 } 728 free(tptr, M_DEVBUF); 729 } 730 731 /* 732 * we enter with our locks held. 733 */ 734 static void 735 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 736 { 737 const char lfmt[] = "Lun now %sabled for target mode on channel %d"; 738 struct ccb_en_lun *cel = &ccb->cel; 739 tstate_t *tptr; 740 u_int16_t rstat; 741 int bus, cmd, av, wildcard; 742 lun_id_t lun; 743 target_id_t tgt; 744 745 746 bus = XS_CHANNEL(ccb) & 0x1; 747 tgt = ccb->ccb_h.target_id; 748 lun = ccb->ccb_h.target_lun; 749 750 /* 751 * Do some sanity checking first. 752 */ 753 754 if ((lun != CAM_LUN_WILDCARD) && 755 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 756 ccb->ccb_h.status = CAM_LUN_INVALID; 757 return; 758 } 759 760 if (IS_SCSI(isp)) { 761 sdparam *sdp = isp->isp_param; 762 sdp += bus; 763 if (tgt != CAM_TARGET_WILDCARD && 764 tgt != sdp->isp_initiator_id) { 765 ccb->ccb_h.status = CAM_TID_INVALID; 766 return; 767 } 768 } else { 769 if (tgt != CAM_TARGET_WILDCARD && 770 tgt != FCPARAM(isp)->isp_iid) { 771 ccb->ccb_h.status = CAM_TID_INVALID; 772 return; 773 } 774 /* 775 * This is as a good a place as any to check f/w capabilities. 776 */ 777 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) { 778 isp_prt(isp, ISP_LOGERR, 779 "firmware does not support target mode"); 780 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 781 return; 782 } 783 /* 784 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 785 * XXX: dorks with our already fragile enable/disable code. 786 */ 787 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 788 isp_prt(isp, ISP_LOGERR, 789 "firmware not SCCLUN capable"); 790 } 791 } 792 793 if (tgt == CAM_TARGET_WILDCARD) { 794 if (lun == CAM_LUN_WILDCARD) { 795 wildcard = 1; 796 } else { 797 ccb->ccb_h.status = CAM_LUN_INVALID; 798 return; 799 } 800 } else { 801 wildcard = 0; 802 } 803 804 /* 805 * Next check to see whether this is a target/lun wildcard action. 806 * 807 * If so, we know that we can accept commands for luns that haven't 808 * been enabled yet and send them upstream. Otherwise, we have to 809 * handle them locally (if we see them at all). 810 */ 811 812 if (wildcard) { 813 tptr = &isp->isp_osinfo.tsdflt[bus]; 814 if (cel->enable) { 815 if (isp->isp_osinfo.tmflags[bus] & 816 TM_WILDCARD_ENABLED) { 817 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 818 return; 819 } 820 ccb->ccb_h.status = 821 xpt_create_path(&tptr->owner, NULL, 822 xpt_path_path_id(ccb->ccb_h.path), 823 xpt_path_target_id(ccb->ccb_h.path), 824 xpt_path_lun_id(ccb->ccb_h.path)); 825 if (ccb->ccb_h.status != CAM_REQ_CMP) { 826 return; 827 } 828 SLIST_INIT(&tptr->atios); 829 SLIST_INIT(&tptr->inots); 830 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 831 } else { 832 if ((isp->isp_osinfo.tmflags[bus] & 833 TM_WILDCARD_ENABLED) == 0) { 834 ccb->ccb_h.status = CAM_REQ_CMP; 835 return; 836 } 837 if (tptr->hold) { 838 ccb->ccb_h.status = CAM_SCSI_BUSY; 839 return; 840 } 841 xpt_free_path(tptr->owner); 842 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 843 } 844 } 845 846 /* 847 * Now check to see whether this bus needs to be 848 * enabled/disabled with respect to target mode. 849 */ 850 av = bus << 31; 851 if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) { 852 av |= ENABLE_TARGET_FLAG; 853 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 854 if (av) { 855 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 856 if (wildcard) { 857 isp->isp_osinfo.tmflags[bus] &= 858 ~TM_WILDCARD_ENABLED; 859 xpt_free_path(tptr->owner); 860 } 861 return; 862 } 863 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 864 isp_prt(isp, ISP_LOGINFO, 865 "Target Mode enabled on channel %d", bus); 866 } else if (cel->enable == 0 && 867 (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) { 868 if (are_any_luns_enabled(isp, bus)) { 869 ccb->ccb_h.status = CAM_SCSI_BUSY; 870 return; 871 } 872 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 873 if (av) { 874 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 875 return; 876 } 877 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 878 isp_prt(isp, ISP_LOGINFO, 879 "Target Mode disabled on channel %d", bus); 880 } 881 882 if (wildcard) { 883 ccb->ccb_h.status = CAM_REQ_CMP; 884 return; 885 } 886 887 if (cel->enable) { 888 ccb->ccb_h.status = 889 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 890 if (ccb->ccb_h.status != CAM_REQ_CMP) { 891 return; 892 } 893 } else { 894 tptr = get_lun_statep(isp, bus, lun); 895 if (tptr == NULL) { 896 ccb->ccb_h.status = CAM_LUN_INVALID; 897 return; 898 } 899 } 900 901 if (isp_psema_sig_rqe(isp, bus)) { 902 rls_lun_statep(isp, tptr); 903 if (cel->enable) 904 destroy_lun_state(isp, tptr); 905 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 906 return; 907 } 908 909 if (cel->enable) { 910 u_int32_t seq = isp->isp_osinfo.rollinfo++; 911 int c, n, ulun = lun; 912 913 cmd = RQSTYPE_ENABLE_LUN; 914 c = DFLT_CMND_CNT; 915 n = DFLT_INOT_CNT; 916 if (IS_FC(isp) && lun != 0) { 917 cmd = RQSTYPE_MODIFY_LUN; 918 n = 0; 919 /* 920 * For SCC firmware, we only deal with setting 921 * (enabling or modifying) lun 0. 922 */ 923 ulun = 0; 924 } 925 rstat = LUN_ERR; 926 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { 927 xpt_print_path(ccb->ccb_h.path); 928 isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed"); 929 goto out; 930 } 931 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 932 xpt_print_path(ccb->ccb_h.path); 933 isp_prt(isp, ISP_LOGERR, 934 "wait for ENABLE/MODIFY LUN timed out"); 935 goto out; 936 } 937 rstat = isp->isp_osinfo.rstatus[bus]; 938 if (rstat != LUN_OK) { 939 xpt_print_path(ccb->ccb_h.path); 940 isp_prt(isp, ISP_LOGERR, 941 "ENABLE/MODIFY LUN returned 0x%x", rstat); 942 goto out; 943 } 944 } else { 945 int c, n, ulun = lun; 946 u_int32_t seq; 947 948 rstat = LUN_ERR; 949 seq = isp->isp_osinfo.rollinfo++; 950 cmd = -RQSTYPE_MODIFY_LUN; 951 952 c = DFLT_CMND_CNT; 953 n = DFLT_INOT_CNT; 954 if (IS_FC(isp) && lun != 0) { 955 n = 0; 956 /* 957 * For SCC firmware, we only deal with setting 958 * (enabling or modifying) lun 0. 959 */ 960 ulun = 0; 961 } 962 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { 963 xpt_print_path(ccb->ccb_h.path); 964 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); 965 goto out; 966 } 967 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 968 xpt_print_path(ccb->ccb_h.path); 969 isp_prt(isp, ISP_LOGERR, 970 "wait for MODIFY LUN timed out"); 971 goto out; 972 } 973 rstat = isp->isp_osinfo.rstatus[bus]; 974 if (rstat != LUN_OK) { 975 xpt_print_path(ccb->ccb_h.path); 976 isp_prt(isp, ISP_LOGERR, 977 "MODIFY LUN returned 0x%x", rstat); 978 goto out; 979 } 980 if (IS_FC(isp) && lun) { 981 goto out; 982 } 983 984 seq = isp->isp_osinfo.rollinfo++; 985 986 rstat = LUN_ERR; 987 cmd = -RQSTYPE_ENABLE_LUN; 988 if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) { 989 xpt_print_path(ccb->ccb_h.path); 990 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); 991 goto out; 992 } 993 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 994 xpt_print_path(ccb->ccb_h.path); 995 isp_prt(isp, ISP_LOGERR, 996 "wait for DISABLE LUN timed out"); 997 goto out; 998 } 999 rstat = isp->isp_osinfo.rstatus[bus]; 1000 if (rstat != LUN_OK) { 1001 xpt_print_path(ccb->ccb_h.path); 1002 isp_prt(isp, ISP_LOGWARN, 1003 "DISABLE LUN returned 0x%x", rstat); 1004 goto out; 1005 } 1006 if (are_any_luns_enabled(isp, bus) == 0) { 1007 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1008 if (av) { 1009 isp_prt(isp, ISP_LOGWARN, 1010 "disable target mode on channel %d failed", 1011 bus); 1012 goto out; 1013 } 1014 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1015 xpt_print_path(ccb->ccb_h.path); 1016 isp_prt(isp, ISP_LOGINFO, 1017 "Target Mode disabled on channel %d", bus); 1018 } 1019 } 1020 1021 out: 1022 isp_vsema_rqe(isp, bus); 1023 1024 if (rstat != LUN_OK) { 1025 xpt_print_path(ccb->ccb_h.path); 1026 isp_prt(isp, ISP_LOGWARN, 1027 "lun %sable failed", (cel->enable) ? "en" : "dis"); 1028 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1029 rls_lun_statep(isp, tptr); 1030 if (cel->enable) 1031 destroy_lun_state(isp, tptr); 1032 } else { 1033 xpt_print_path(ccb->ccb_h.path); 1034 isp_prt(isp, ISP_LOGINFO, lfmt, 1035 (cel->enable) ? "en" : "dis", bus); 1036 rls_lun_statep(isp, tptr); 1037 if (cel->enable == 0) { 1038 destroy_lun_state(isp, tptr); 1039 } 1040 ccb->ccb_h.status = CAM_REQ_CMP; 1041 } 1042 } 1043 1044 static cam_status 1045 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 1046 { 1047 tstate_t *tptr; 1048 struct ccb_hdr_slist *lp; 1049 struct ccb_hdr *curelm; 1050 int found; 1051 union ccb *accb = ccb->cab.abort_ccb; 1052 1053 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1054 if (IS_FC(isp) && (accb->ccb_h.target_id != 1055 ((fcparam *) isp->isp_param)->isp_loopid)) { 1056 return (CAM_PATH_INVALID); 1057 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1058 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1059 return (CAM_PATH_INVALID); 1060 } 1061 } 1062 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1063 if (tptr == NULL) { 1064 return (CAM_PATH_INVALID); 1065 } 1066 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1067 lp = &tptr->atios; 1068 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1069 lp = &tptr->inots; 1070 } else { 1071 rls_lun_statep(isp, tptr); 1072 return (CAM_UA_ABORT); 1073 } 1074 curelm = SLIST_FIRST(lp); 1075 found = 0; 1076 if (curelm == &accb->ccb_h) { 1077 found = 1; 1078 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1079 } else { 1080 while(curelm != NULL) { 1081 struct ccb_hdr *nextelm; 1082 1083 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1084 if (nextelm == &accb->ccb_h) { 1085 found = 1; 1086 SLIST_NEXT(curelm, sim_links.sle) = 1087 SLIST_NEXT(nextelm, sim_links.sle); 1088 break; 1089 } 1090 curelm = nextelm; 1091 } 1092 } 1093 rls_lun_statep(isp, tptr); 1094 if (found) { 1095 accb->ccb_h.status = CAM_REQ_ABORTED; 1096 return (CAM_REQ_CMP); 1097 } 1098 return(CAM_PATH_INVALID); 1099 } 1100 1101 static cam_status 1102 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 1103 { 1104 void *qe; 1105 struct ccb_scsiio *cso = &ccb->csio; 1106 u_int16_t *hp, save_handle; 1107 u_int16_t nxti, optr; 1108 u_int8_t local[QENTRY_LEN]; 1109 1110 1111 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1112 xpt_print_path(ccb->ccb_h.path); 1113 printf("Request Queue Overflow in isp_target_start_ctio\n"); 1114 return (CAM_RESRC_UNAVAIL); 1115 } 1116 bzero(local, QENTRY_LEN); 1117 1118 /* 1119 * We're either moving data or completing a command here. 1120 */ 1121 1122 if (IS_FC(isp)) { 1123 atio_private_data_t *atp; 1124 ct2_entry_t *cto = (ct2_entry_t *) local; 1125 1126 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1127 cto->ct_header.rqs_entry_count = 1; 1128 cto->ct_iid = cso->init_id; 1129 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 1130 cto->ct_lun = ccb->ccb_h.target_lun; 1131 } 1132 1133 atp = isp_get_atpd(isp, cso->tag_id); 1134 if (atp == NULL) { 1135 isp_prt(isp, ISP_LOGERR, 1136 "cannot find private data adjunct for tag %x", 1137 cso->tag_id); 1138 return (-1); 1139 } 1140 1141 cto->ct_rxid = cso->tag_id; 1142 if (cso->dxfer_len == 0) { 1143 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1144 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1145 cto->ct_flags |= CT2_SENDSTATUS; 1146 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1147 cto->ct_resid = 1148 atp->orig_datalen - atp->bytes_xfered; 1149 if (cto->ct_resid < 0) { 1150 cto->rsp.m1.ct_scsi_status |= 1151 CT2_DATA_OVER; 1152 } else if (cto->ct_resid > 0) { 1153 cto->rsp.m1.ct_scsi_status |= 1154 CT2_DATA_UNDER; 1155 } 1156 } 1157 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1158 int m = min(cso->sense_len, MAXRESPLEN); 1159 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 1160 cto->rsp.m1.ct_senselen = m; 1161 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1162 } 1163 } else { 1164 cto->ct_flags |= CT2_FLAG_MODE0; 1165 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1166 cto->ct_flags |= CT2_DATA_IN; 1167 } else { 1168 cto->ct_flags |= CT2_DATA_OUT; 1169 } 1170 cto->ct_reloff = atp->bytes_xfered; 1171 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1172 cto->ct_flags |= CT2_SENDSTATUS; 1173 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1174 cto->ct_resid = 1175 atp->orig_datalen - 1176 (atp->bytes_xfered + cso->dxfer_len); 1177 if (cto->ct_resid < 0) { 1178 cto->rsp.m0.ct_scsi_status |= 1179 CT2_DATA_OVER; 1180 } else if (cto->ct_resid > 0) { 1181 cto->rsp.m0.ct_scsi_status |= 1182 CT2_DATA_UNDER; 1183 } 1184 } else { 1185 atp->last_xframt = cso->dxfer_len; 1186 } 1187 /* 1188 * If we're sending data and status back together, 1189 * we can't also send back sense data as well. 1190 */ 1191 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1192 } 1193 1194 if (cto->ct_flags & CT2_SENDSTATUS) { 1195 isp_prt(isp, ISP_LOGTDEBUG0, 1196 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1197 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1198 cso->dxfer_len, cto->ct_resid); 1199 cto->ct_flags |= CT2_CCINCR; 1200 atp->state = ATPD_STATE_LAST_CTIO; 1201 } else 1202 atp->state = ATPD_STATE_CTIO; 1203 cto->ct_timeout = 10; 1204 hp = &cto->ct_syshandle; 1205 } else { 1206 ct_entry_t *cto = (ct_entry_t *) local; 1207 1208 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1209 cto->ct_header.rqs_entry_count = 1; 1210 cto->ct_iid = cso->init_id; 1211 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1212 cto->ct_tgt = ccb->ccb_h.target_id; 1213 cto->ct_lun = ccb->ccb_h.target_lun; 1214 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1215 if (AT_HAS_TAG(cso->tag_id)) { 1216 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id); 1217 cto->ct_flags |= CT_TQAE; 1218 } 1219 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1220 cto->ct_flags |= CT_NODISC; 1221 } 1222 if (cso->dxfer_len == 0) { 1223 cto->ct_flags |= CT_NO_DATA; 1224 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1225 cto->ct_flags |= CT_DATA_IN; 1226 } else { 1227 cto->ct_flags |= CT_DATA_OUT; 1228 } 1229 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1230 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1231 cto->ct_scsi_status = cso->scsi_status; 1232 cto->ct_resid = cso->resid; 1233 isp_prt(isp, ISP_LOGTDEBUG0, 1234 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1235 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1236 cso->tag_id); 1237 } 1238 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1239 cto->ct_timeout = 10; 1240 hp = &cto->ct_syshandle; 1241 } 1242 1243 if (isp_save_xs(isp, (XS_T *)ccb, hp)) { 1244 xpt_print_path(ccb->ccb_h.path); 1245 printf("No XFLIST pointers for isp_target_start_ctio\n"); 1246 return (CAM_RESRC_UNAVAIL); 1247 } 1248 1249 1250 /* 1251 * Call the dma setup routines for this entry (and any subsequent 1252 * CTIOs) if there's data to move, and then tell the f/w it's got 1253 * new things to play with. As with isp_start's usage of DMA setup, 1254 * any swizzling is done in the machine dependent layer. Because 1255 * of this, we put the request onto the queue area first in native 1256 * format. 1257 */ 1258 1259 save_handle = *hp; 1260 1261 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1262 case CMD_QUEUED: 1263 ISP_ADD_REQUEST(isp, nxti); 1264 return (CAM_REQ_INPROG); 1265 1266 case CMD_EAGAIN: 1267 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1268 isp_destroy_handle(isp, save_handle); 1269 return (CAM_RESRC_UNAVAIL); 1270 1271 default: 1272 isp_destroy_handle(isp, save_handle); 1273 return (XS_ERR(ccb)); 1274 } 1275 } 1276 1277 static void 1278 isp_refire_putback_atio(void *arg) 1279 { 1280 int s = splcam(); 1281 isp_target_putback_atio(arg); 1282 splx(s); 1283 } 1284 1285 static void 1286 isp_target_putback_atio(union ccb *ccb) 1287 { 1288 struct ispsoftc *isp; 1289 struct ccb_scsiio *cso; 1290 u_int16_t nxti, optr; 1291 void *qe; 1292 1293 isp = XS_ISP(ccb); 1294 1295 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1296 (void) timeout(isp_refire_putback_atio, ccb, 10); 1297 isp_prt(isp, ISP_LOGWARN, 1298 "isp_target_putback_atio: Request Queue Overflow"); 1299 return; 1300 } 1301 bzero(qe, QENTRY_LEN); 1302 cso = &ccb->csio; 1303 if (IS_FC(isp)) { 1304 at2_entry_t local, *at = &local; 1305 MEMZERO(at, sizeof (at2_entry_t)); 1306 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1307 at->at_header.rqs_entry_count = 1; 1308 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1309 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1310 } else { 1311 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1312 } 1313 at->at_status = CT_OK; 1314 at->at_rxid = cso->tag_id; 1315 at->at_iid = cso->ccb_h.target_id; 1316 isp_put_atio2(isp, at, qe); 1317 } else { 1318 at_entry_t local, *at = &local; 1319 MEMZERO(at, sizeof (at_entry_t)); 1320 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1321 at->at_header.rqs_entry_count = 1; 1322 at->at_iid = cso->init_id; 1323 at->at_iid |= XS_CHANNEL(ccb) << 7; 1324 at->at_tgt = cso->ccb_h.target_id; 1325 at->at_lun = cso->ccb_h.target_lun; 1326 at->at_status = CT_OK; 1327 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1328 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1329 isp_put_atio(isp, at, qe); 1330 } 1331 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1332 ISP_ADD_REQUEST(isp, nxti); 1333 isp_complete_ctio(ccb); 1334 } 1335 1336 static void 1337 isp_complete_ctio(union ccb *ccb) 1338 { 1339 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1340 ccb->ccb_h.status |= CAM_REQ_CMP; 1341 } 1342 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1343 xpt_done(ccb); 1344 } 1345 1346 /* 1347 * Handle ATIO stuff that the generic code can't. 1348 * This means handling CDBs. 1349 */ 1350 1351 static int 1352 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 1353 { 1354 tstate_t *tptr; 1355 int status, bus, iswildcard; 1356 struct ccb_accept_tio *atiop; 1357 1358 /* 1359 * The firmware status (except for the QLTM_SVALID bit) 1360 * indicates why this ATIO was sent to us. 1361 * 1362 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1363 * 1364 * If the DISCONNECTS DISABLED bit is set in the flags field, 1365 * we're still connected on the SCSI bus. 1366 */ 1367 status = aep->at_status; 1368 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1369 /* 1370 * Bus Phase Sequence error. We should have sense data 1371 * suggested by the f/w. I'm not sure quite yet what 1372 * to do about this for CAM. 1373 */ 1374 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1375 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1376 return (0); 1377 } 1378 if ((status & ~QLTM_SVALID) != AT_CDB) { 1379 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1380 status); 1381 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1382 return (0); 1383 } 1384 1385 bus = GET_BUS_VAL(aep->at_iid); 1386 tptr = get_lun_statep(isp, bus, aep->at_lun); 1387 if (tptr == NULL) { 1388 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1389 iswildcard = 1; 1390 } else { 1391 iswildcard = 0; 1392 } 1393 1394 if (tptr == NULL) { 1395 /* 1396 * Because we can't autofeed sense data back with 1397 * a command for parallel SCSI, we can't give back 1398 * a CHECK CONDITION. We'll give back a BUSY status 1399 * instead. This works out okay because the only 1400 * time we should, in fact, get this, is in the 1401 * case that somebody configured us without the 1402 * blackhole driver, so they get what they deserve. 1403 */ 1404 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1405 return (0); 1406 } 1407 1408 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1409 if (atiop == NULL) { 1410 /* 1411 * Because we can't autofeed sense data back with 1412 * a command for parallel SCSI, we can't give back 1413 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1414 * instead. This works out okay because the only time we 1415 * should, in fact, get this, is in the case that we've 1416 * run out of ATIOS. 1417 */ 1418 xpt_print_path(tptr->owner); 1419 isp_prt(isp, ISP_LOGWARN, 1420 "no ATIOS for lun %d from initiator %d on channel %d", 1421 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1422 if (aep->at_flags & AT_TQAE) 1423 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1424 else 1425 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1426 rls_lun_statep(isp, tptr); 1427 return (0); 1428 } 1429 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1430 if (iswildcard) { 1431 atiop->ccb_h.target_id = aep->at_tgt; 1432 atiop->ccb_h.target_lun = aep->at_lun; 1433 } 1434 if (aep->at_flags & AT_NODISC) { 1435 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1436 } else { 1437 atiop->ccb_h.flags = 0; 1438 } 1439 1440 if (status & QLTM_SVALID) { 1441 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1442 atiop->sense_len = amt; 1443 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1444 } else { 1445 atiop->sense_len = 0; 1446 } 1447 1448 atiop->init_id = GET_IID_VAL(aep->at_iid); 1449 atiop->cdb_len = aep->at_cdblen; 1450 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1451 atiop->ccb_h.status = CAM_CDB_RECVD; 1452 /* 1453 * Construct a tag 'id' based upon tag value (which may be 0..255) 1454 * and the handle (which we have to preserve). 1455 */ 1456 AT_MAKE_TAGID(atiop->tag_id, aep); 1457 if (aep->at_flags & AT_TQAE) { 1458 atiop->tag_action = aep->at_tag_type; 1459 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1460 } 1461 xpt_done((union ccb*)atiop); 1462 isp_prt(isp, ISP_LOGTDEBUG0, 1463 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1464 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1465 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1466 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1467 "nondisc" : "disconnecting"); 1468 rls_lun_statep(isp, tptr); 1469 return (0); 1470 } 1471 1472 static int 1473 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 1474 { 1475 lun_id_t lun; 1476 tstate_t *tptr; 1477 struct ccb_accept_tio *atiop; 1478 atio_private_data_t *atp; 1479 1480 /* 1481 * The firmware status (except for the QLTM_SVALID bit) 1482 * indicates why this ATIO was sent to us. 1483 * 1484 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1485 */ 1486 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1487 isp_prt(isp, ISP_LOGWARN, 1488 "bogus atio (0x%x) leaked to platform", aep->at_status); 1489 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1490 return (0); 1491 } 1492 1493 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1494 lun = aep->at_scclun; 1495 } else { 1496 lun = aep->at_lun; 1497 } 1498 tptr = get_lun_statep(isp, 0, lun); 1499 if (tptr == NULL) { 1500 isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun); 1501 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1502 } 1503 1504 if (tptr == NULL) { 1505 /* 1506 * What we'd like to know is whether or not we have a listener 1507 * upstream that really hasn't configured yet. If we do, then 1508 * we can give a more sensible reply here. If not, then we can 1509 * reject this out of hand. 1510 * 1511 * Choices for what to send were 1512 * 1513 * Not Ready, Unit Not Self-Configured Yet 1514 * (0x2,0x3e,0x00) 1515 * 1516 * for the former and 1517 * 1518 * Illegal Request, Logical Unit Not Supported 1519 * (0x5,0x25,0x00) 1520 * 1521 * for the latter. 1522 * 1523 * We used to decide whether there was at least one listener 1524 * based upon whether the black hole driver was configured. 1525 * However, recent config(8) changes have made this hard to do 1526 * at this time. 1527 * 1528 */ 1529 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1530 return (0); 1531 } 1532 1533 atp = isp_get_atpd(isp, 0); 1534 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1535 if (atiop == NULL || atp == NULL) { 1536 /* 1537 * Because we can't autofeed sense data back with 1538 * a command for parallel SCSI, we can't give back 1539 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1540 * instead. This works out okay because the only time we 1541 * should, in fact, get this, is in the case that we've 1542 * run out of ATIOS. 1543 */ 1544 xpt_print_path(tptr->owner); 1545 isp_prt(isp, ISP_LOGWARN, 1546 "no %s for lun %d from initiator %d", 1547 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1548 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1549 rls_lun_statep(isp, tptr); 1550 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1551 return (0); 1552 } 1553 atp->state = ATPD_STATE_ATIO; 1554 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1555 tptr->atio_count--; 1556 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d", 1557 lun, tptr->atio_count); 1558 1559 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1560 atiop->ccb_h.target_id = 1561 ((fcparam *)isp->isp_param)->isp_loopid; 1562 atiop->ccb_h.target_lun = lun; 1563 } 1564 /* 1565 * We don't get 'suggested' sense data as we do with SCSI cards. 1566 */ 1567 atiop->sense_len = 0; 1568 1569 atiop->init_id = aep->at_iid; 1570 atiop->cdb_len = ATIO2_CDBLEN; 1571 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1572 atiop->ccb_h.status = CAM_CDB_RECVD; 1573 atiop->tag_id = aep->at_rxid; 1574 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1575 case ATIO2_TC_ATTR_SIMPLEQ: 1576 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1577 break; 1578 case ATIO2_TC_ATTR_HEADOFQ: 1579 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1580 break; 1581 case ATIO2_TC_ATTR_ORDERED: 1582 atiop->tag_action = MSG_ORDERED_Q_TAG; 1583 break; 1584 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1585 case ATIO2_TC_ATTR_UNTAGGED: 1586 default: 1587 atiop->tag_action = 0; 1588 break; 1589 } 1590 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1591 1592 atp->tag = atiop->tag_id; 1593 atp->lun = lun; 1594 atp->orig_datalen = aep->at_datalen; 1595 atp->last_xframt = 0; 1596 atp->bytes_xfered = 0; 1597 atp->state = ATPD_STATE_CAM; 1598 xpt_done((union ccb*)atiop); 1599 1600 isp_prt(isp, ISP_LOGTDEBUG0, 1601 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1602 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1603 lun, aep->at_taskflags, aep->at_datalen); 1604 rls_lun_statep(isp, tptr); 1605 return (0); 1606 } 1607 1608 static int 1609 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) 1610 { 1611 union ccb *ccb; 1612 int sentstatus, ok, notify_cam, resid = 0; 1613 u_int16_t tval; 1614 1615 /* 1616 * CTIO and CTIO2 are close enough.... 1617 */ 1618 1619 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle); 1620 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1621 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1622 1623 if (IS_FC(isp)) { 1624 ct2_entry_t *ct = arg; 1625 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1626 if (atp == NULL) { 1627 isp_prt(isp, ISP_LOGERR, 1628 "cannot find adjunct for %x after I/O", 1629 ct->ct_rxid); 1630 return (0); 1631 } 1632 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1633 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1634 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1635 ccb->ccb_h.status |= CAM_SENT_SENSE; 1636 } 1637 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1638 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1639 resid = ct->ct_resid; 1640 atp->bytes_xfered += (atp->last_xframt - resid); 1641 atp->last_xframt = 0; 1642 } 1643 if (sentstatus || !ok) { 1644 atp->tag = 0; 1645 } 1646 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1647 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1648 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1649 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1650 resid, sentstatus? "FIN" : "MID"); 1651 tval = ct->ct_rxid; 1652 1653 /* XXX: should really come after isp_complete_ctio */ 1654 atp->state = ATPD_STATE_PDON; 1655 } else { 1656 ct_entry_t *ct = arg; 1657 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1658 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1659 /* 1660 * We *ought* to be able to get back to the original ATIO 1661 * here, but for some reason this gets lost. It's just as 1662 * well because it's squirrelled away as part of periph 1663 * private data. 1664 * 1665 * We can live without it as long as we continue to use 1666 * the auto-replenish feature for CTIOs. 1667 */ 1668 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1669 if (ct->ct_status & QLTM_SVALID) { 1670 char *sp = (char *)ct; 1671 sp += CTIO_SENSE_OFFSET; 1672 ccb->csio.sense_len = 1673 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1674 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1675 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1676 } 1677 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1678 resid = ct->ct_resid; 1679 } 1680 isp_prt(isp, ISP_LOGTDEBUG0, 1681 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1682 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1683 ct->ct_status, ct->ct_flags, resid, 1684 sentstatus? "FIN" : "MID"); 1685 tval = ct->ct_fwhandle; 1686 } 1687 ccb->csio.resid += resid; 1688 1689 /* 1690 * We're here either because intermediate data transfers are done 1691 * and/or the final status CTIO (which may have joined with a 1692 * Data Transfer) is done. 1693 * 1694 * In any case, for this platform, the upper layers figure out 1695 * what to do next, so all we do here is collect status and 1696 * pass information along. Any DMA handles have already been 1697 * freed. 1698 */ 1699 if (notify_cam == 0) { 1700 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1701 return (0); 1702 } 1703 1704 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1705 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1706 1707 if (!ok) { 1708 isp_target_putback_atio(ccb); 1709 } else { 1710 isp_complete_ctio(ccb); 1711 1712 } 1713 return (0); 1714 } 1715 1716 static int 1717 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp) 1718 { 1719 return (0); /* XXXX */ 1720 } 1721 1722 static int 1723 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp) 1724 { 1725 1726 switch (inp->in_status) { 1727 case IN_PORT_LOGOUT: 1728 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1729 inp->in_iid); 1730 break; 1731 case IN_PORT_CHANGED: 1732 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1733 inp->in_iid); 1734 break; 1735 case IN_GLOBAL_LOGO: 1736 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1737 break; 1738 case IN_ABORT_TASK: 1739 { 1740 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1741 struct ccb_immed_notify *inot = NULL; 1742 1743 if (atp) { 1744 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1745 if (tptr) { 1746 inot = (struct ccb_immed_notify *) 1747 SLIST_FIRST(&tptr->inots); 1748 if (inot) { 1749 SLIST_REMOVE_HEAD(&tptr->inots, 1750 sim_links.sle); 1751 } 1752 } 1753 isp_prt(isp, ISP_LOGWARN, 1754 "abort task RX_ID %x IID %d state %d", 1755 inp->in_seqid, inp->in_iid, atp->state); 1756 } else { 1757 isp_prt(isp, ISP_LOGWARN, 1758 "abort task RX_ID %x from iid %d, state unknown", 1759 inp->in_seqid, inp->in_iid); 1760 } 1761 if (inot) { 1762 inot->initiator_id = inp->in_iid; 1763 inot->sense_len = 0; 1764 inot->message_args[0] = MSG_ABORT_TAG; 1765 inot->message_args[1] = inp->in_seqid & 0xff; 1766 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1767 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 1768 xpt_done((union ccb *)inot); 1769 } 1770 break; 1771 } 1772 default: 1773 break; 1774 } 1775 return (0); 1776 } 1777 #endif 1778 1779 static void 1780 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) 1781 { 1782 struct cam_sim *sim; 1783 struct ispsoftc *isp; 1784 1785 sim = (struct cam_sim *)cbarg; 1786 isp = (struct ispsoftc *) cam_sim_softc(sim); 1787 switch (code) { 1788 case AC_LOST_DEVICE: 1789 if (IS_SCSI(isp)) { 1790 u_int16_t oflags, nflags; 1791 sdparam *sdp = isp->isp_param; 1792 int tgt; 1793 1794 tgt = xpt_path_target_id(path); 1795 if (tgt >= 0) { 1796 sdp += cam_sim_bus(sim); 1797 ISP_LOCK(isp); 1798 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1799 #ifndef ISP_TARGET_MODE 1800 nflags &= DPARM_SAFE_DFLT; 1801 if (isp->isp_loaded_fw) { 1802 nflags |= DPARM_NARROW | DPARM_ASYNC; 1803 } 1804 #else 1805 nflags = DPARM_DEFAULT; 1806 #endif 1807 oflags = sdp->isp_devparam[tgt].goal_flags; 1808 sdp->isp_devparam[tgt].goal_flags = nflags; 1809 sdp->isp_devparam[tgt].dev_update = 1; 1810 isp->isp_update |= (1 << cam_sim_bus(sim)); 1811 (void) isp_control(isp, 1812 ISPCTL_UPDATE_PARAMS, NULL); 1813 sdp->isp_devparam[tgt].goal_flags = oflags; 1814 ISP_UNLOCK(isp); 1815 } 1816 } 1817 break; 1818 default: 1819 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 1820 break; 1821 } 1822 } 1823 1824 static void 1825 isp_poll(struct cam_sim *sim) 1826 { 1827 struct ispsoftc *isp = cam_sim_softc(sim); 1828 u_int16_t isr, sema, mbox; 1829 1830 ISP_LOCK(isp); 1831 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1832 isp_intr(isp, isr, sema, mbox); 1833 } 1834 ISP_UNLOCK(isp); 1835 } 1836 1837 1838 static void 1839 isp_watchdog(void *arg) 1840 { 1841 XS_T *xs = arg; 1842 struct ispsoftc *isp = XS_ISP(xs); 1843 u_int32_t handle; 1844 int iok; 1845 1846 /* 1847 * We've decided this command is dead. Make sure we're not trying 1848 * to kill a command that's already dead by getting it's handle and 1849 * and seeing whether it's still alive. 1850 */ 1851 ISP_LOCK(isp); 1852 iok = isp->isp_osinfo.intsok; 1853 isp->isp_osinfo.intsok = 0; 1854 handle = isp_find_handle(isp, xs); 1855 if (handle) { 1856 u_int16_t isr, sema, mbox; 1857 1858 if (XS_CMD_DONE_P(xs)) { 1859 isp_prt(isp, ISP_LOGDEBUG1, 1860 "watchdog found done cmd (handle 0x%x)", handle); 1861 ISP_UNLOCK(isp); 1862 return; 1863 } 1864 1865 if (XS_CMD_WDOG_P(xs)) { 1866 isp_prt(isp, ISP_LOGDEBUG2, 1867 "recursive watchdog (handle 0x%x)", handle); 1868 ISP_UNLOCK(isp); 1869 return; 1870 } 1871 1872 XS_CMD_S_WDOG(xs); 1873 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1874 isp_intr(isp, isr, sema, mbox); 1875 } 1876 if (XS_CMD_DONE_P(xs)) { 1877 isp_prt(isp, ISP_LOGDEBUG2, 1878 "watchdog cleanup for handle 0x%x", handle); 1879 xpt_done((union ccb *) xs); 1880 } else if (XS_CMD_GRACE_P(xs)) { 1881 /* 1882 * Make sure the command is *really* dead before we 1883 * release the handle (and DMA resources) for reuse. 1884 */ 1885 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 1886 1887 /* 1888 * After this point, the comamnd is really dead. 1889 */ 1890 if (XS_XFRLEN(xs)) { 1891 ISP_DMAFREE(isp, xs, handle); 1892 } 1893 isp_destroy_handle(isp, handle); 1894 xpt_print_path(xs->ccb_h.path); 1895 isp_prt(isp, ISP_LOGWARN, 1896 "watchdog timeout for handle 0x%x", handle); 1897 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1898 XS_CMD_C_WDOG(xs); 1899 isp_done(xs); 1900 } else { 1901 u_int16_t nxti, optr; 1902 ispreq_t local, *mp= &local, *qe; 1903 1904 XS_CMD_C_WDOG(xs); 1905 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 1906 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) { 1907 ISP_UNLOCK(isp); 1908 return; 1909 } 1910 XS_CMD_S_GRACE(xs); 1911 MEMZERO((void *) mp, sizeof (*mp)); 1912 mp->req_header.rqs_entry_count = 1; 1913 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 1914 mp->req_modifier = SYNC_ALL; 1915 mp->req_target = XS_CHANNEL(xs) << 7; 1916 isp_put_request(isp, mp, qe); 1917 ISP_ADD_REQUEST(isp, nxti); 1918 } 1919 } else { 1920 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 1921 } 1922 isp->isp_osinfo.intsok = iok; 1923 ISP_UNLOCK(isp); 1924 } 1925 1926 static void 1927 isp_kthread(void *arg) 1928 { 1929 struct ispsoftc *isp = arg; 1930 int s; 1931 1932 s = splcam(); 1933 isp->isp_osinfo.intsok = 1; 1934 1935 /* 1936 * The first loop is for our usage where we have yet to have 1937 * gotten good fibre channel state. 1938 */ 1939 for (;;) { 1940 int wasfrozen; 1941 1942 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state"); 1943 while (isp_fc_runstate(isp, 2 * 1000000) != 0) { 1944 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood"); 1945 if (FCPARAM(isp)->isp_fwstate != FW_READY || 1946 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) { 1947 if (FCPARAM(isp)->loop_seen_once == 0 || 1948 isp->isp_osinfo.ktmature == 0) { 1949 break; 1950 } 1951 } 1952 tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz); 1953 1954 } 1955 1956 /* 1957 * Even if we didn't get good loop state we may be 1958 * unfreezing the SIMQ so that we can kill off 1959 * commands (if we've never seen loop before, for example). 1960 */ 1961 isp->isp_osinfo.ktmature = 1; 1962 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 1963 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 1964 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1965 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq"); 1966 ISPLOCK_2_CAMLOCK(isp); 1967 xpt_release_simq(isp->isp_sim, 1); 1968 CAMLOCK_2_ISPLOCK(isp); 1969 } 1970 tsleep(&isp->isp_osinfo.kproc, PRIBIO, "isp_fc_worker", 0); 1971 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called"); 1972 } 1973 } 1974 1975 static void 1976 isp_action(struct cam_sim *sim, union ccb *ccb) 1977 { 1978 int bus, tgt, error; 1979 struct ispsoftc *isp; 1980 struct ccb_trans_settings *cts; 1981 1982 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 1983 1984 isp = (struct ispsoftc *)cam_sim_softc(sim); 1985 ccb->ccb_h.sim_priv.entries[0].field = 0; 1986 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1987 if (isp->isp_state != ISP_RUNSTATE && 1988 ccb->ccb_h.func_code == XPT_SCSI_IO) { 1989 CAMLOCK_2_ISPLOCK(isp); 1990 isp_init(isp); 1991 if (isp->isp_state != ISP_INITSTATE) { 1992 ISP_UNLOCK(isp); 1993 /* 1994 * Lie. Say it was a selection timeout. 1995 */ 1996 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 1997 xpt_freeze_devq(ccb->ccb_h.path, 1); 1998 xpt_done(ccb); 1999 return; 2000 } 2001 isp->isp_state = ISP_RUNSTATE; 2002 ISPLOCK_2_CAMLOCK(isp); 2003 } 2004 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2005 2006 2007 switch (ccb->ccb_h.func_code) { 2008 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2009 /* 2010 * Do a couple of preliminary checks... 2011 */ 2012 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2013 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2014 ccb->ccb_h.status = CAM_REQ_INVALID; 2015 xpt_done(ccb); 2016 break; 2017 } 2018 } 2019 #ifdef DIAGNOSTIC 2020 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2021 ccb->ccb_h.status = CAM_PATH_INVALID; 2022 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2023 ccb->ccb_h.status = CAM_PATH_INVALID; 2024 } 2025 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2026 isp_prt(isp, ISP_LOGERR, 2027 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", 2028 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 2029 xpt_done(ccb); 2030 break; 2031 } 2032 #endif 2033 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 2034 CAMLOCK_2_ISPLOCK(isp); 2035 error = isp_start((XS_T *) ccb); 2036 switch (error) { 2037 case CMD_QUEUED: 2038 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2039 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2040 u_int64_t ticks = (u_int64_t) hz; 2041 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 2042 ticks = 60 * 1000 * ticks; 2043 else 2044 ticks = ccb->ccb_h.timeout * hz; 2045 ticks = ((ticks + 999) / 1000) + hz + hz; 2046 if (ticks >= 0x80000000) { 2047 isp_prt(isp, ISP_LOGERR, 2048 "timeout overflow"); 2049 ticks = 0x7fffffff; 2050 } 2051 ccb->ccb_h.timeout_ch = timeout(isp_watchdog, 2052 (caddr_t)ccb, (int)ticks); 2053 } else { 2054 callout_handle_init(&ccb->ccb_h.timeout_ch); 2055 } 2056 ISPLOCK_2_CAMLOCK(isp); 2057 break; 2058 case CMD_RQLATER: 2059 /* 2060 * This can only happen for Fibre Channel 2061 */ 2062 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 2063 if (FCPARAM(isp)->loop_seen_once == 0 && 2064 isp->isp_osinfo.ktmature) { 2065 ISPLOCK_2_CAMLOCK(isp); 2066 XS_SETERR(ccb, CAM_SEL_TIMEOUT); 2067 xpt_done(ccb); 2068 break; 2069 } 2070 wakeup(&isp->isp_osinfo.kproc); 2071 isp_freeze_loopdown(isp, "isp_action(RQLATER)"); 2072 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2073 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2074 ISPLOCK_2_CAMLOCK(isp); 2075 xpt_done(ccb); 2076 break; 2077 case CMD_EAGAIN: 2078 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2079 ISPLOCK_2_CAMLOCK(isp); 2080 xpt_done(ccb); 2081 break; 2082 case CMD_COMPLETE: 2083 isp_done((struct ccb_scsiio *) ccb); 2084 ISPLOCK_2_CAMLOCK(isp); 2085 break; 2086 default: 2087 isp_prt(isp, ISP_LOGERR, 2088 "What's this? 0x%x at %d in file %s", 2089 error, __LINE__, __FILE__); 2090 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2091 xpt_done(ccb); 2092 ISPLOCK_2_CAMLOCK(isp); 2093 } 2094 break; 2095 2096 #ifdef ISP_TARGET_MODE 2097 case XPT_EN_LUN: /* Enable LUN as a target */ 2098 { 2099 int iok; 2100 CAMLOCK_2_ISPLOCK(isp); 2101 iok = isp->isp_osinfo.intsok; 2102 isp->isp_osinfo.intsok = 0; 2103 isp_en_lun(isp, ccb); 2104 isp->isp_osinfo.intsok = iok; 2105 ISPLOCK_2_CAMLOCK(isp); 2106 xpt_done(ccb); 2107 break; 2108 } 2109 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2110 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2111 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2112 { 2113 tstate_t *tptr = 2114 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2115 if (tptr == NULL) { 2116 ccb->ccb_h.status = CAM_LUN_INVALID; 2117 xpt_done(ccb); 2118 break; 2119 } 2120 ccb->ccb_h.sim_priv.entries[0].field = 0; 2121 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2122 ccb->ccb_h.flags = 0; 2123 2124 CAMLOCK_2_ISPLOCK(isp); 2125 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2126 /* 2127 * Note that the command itself may not be done- 2128 * it may not even have had the first CTIO sent. 2129 */ 2130 tptr->atio_count++; 2131 isp_prt(isp, ISP_LOGTDEBUG0, 2132 "Put FREE ATIO2, lun %d, count now %d", 2133 ccb->ccb_h.target_lun, tptr->atio_count); 2134 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2135 sim_links.sle); 2136 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2137 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2138 sim_links.sle); 2139 } else { 2140 ; 2141 } 2142 rls_lun_statep(isp, tptr); 2143 ccb->ccb_h.status = CAM_REQ_INPROG; 2144 ISPLOCK_2_CAMLOCK(isp); 2145 break; 2146 } 2147 case XPT_CONT_TARGET_IO: 2148 { 2149 CAMLOCK_2_ISPLOCK(isp); 2150 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 2151 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 2152 isp_prt(isp, ISP_LOGWARN, 2153 "XPT_CONT_TARGET_IO: status 0x%x", 2154 ccb->ccb_h.status); 2155 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2156 ISPLOCK_2_CAMLOCK(isp); 2157 xpt_done(ccb); 2158 } else { 2159 ISPLOCK_2_CAMLOCK(isp); 2160 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2161 } 2162 break; 2163 } 2164 #endif 2165 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2166 2167 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2168 tgt = ccb->ccb_h.target_id; 2169 tgt |= (bus << 16); 2170 2171 CAMLOCK_2_ISPLOCK(isp); 2172 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2173 ISPLOCK_2_CAMLOCK(isp); 2174 if (error) { 2175 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2176 } else { 2177 ccb->ccb_h.status = CAM_REQ_CMP; 2178 } 2179 xpt_done(ccb); 2180 break; 2181 case XPT_ABORT: /* Abort the specified CCB */ 2182 { 2183 union ccb *accb = ccb->cab.abort_ccb; 2184 CAMLOCK_2_ISPLOCK(isp); 2185 switch (accb->ccb_h.func_code) { 2186 #ifdef ISP_TARGET_MODE 2187 case XPT_ACCEPT_TARGET_IO: 2188 case XPT_IMMED_NOTIFY: 2189 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2190 break; 2191 case XPT_CONT_TARGET_IO: 2192 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2193 ccb->ccb_h.status = CAM_UA_ABORT; 2194 break; 2195 #endif 2196 case XPT_SCSI_IO: 2197 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2198 if (error) { 2199 ccb->ccb_h.status = CAM_UA_ABORT; 2200 } else { 2201 ccb->ccb_h.status = CAM_REQ_CMP; 2202 } 2203 break; 2204 default: 2205 ccb->ccb_h.status = CAM_REQ_INVALID; 2206 break; 2207 } 2208 ISPLOCK_2_CAMLOCK(isp); 2209 xpt_done(ccb); 2210 break; 2211 } 2212 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2213 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2214 cts = &ccb->cts; 2215 if (!IS_CURRENT_SETTINGS(cts)) { 2216 ccb->ccb_h.status = CAM_REQ_INVALID; 2217 xpt_done(ccb); 2218 break; 2219 } 2220 tgt = cts->ccb_h.target_id; 2221 CAMLOCK_2_ISPLOCK(isp); 2222 if (IS_SCSI(isp)) { 2223 sdparam *sdp = isp->isp_param; 2224 u_int16_t *dptr; 2225 2226 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2227 2228 sdp += bus; 2229 /* 2230 * We always update (internally) from goal_flags 2231 * so any request to change settings just gets 2232 * vectored to that location. 2233 */ 2234 dptr = &sdp->isp_devparam[tgt].goal_flags; 2235 2236 /* 2237 * Note that these operations affect the 2238 * the goal flags (goal_flags)- not 2239 * the current state flags. Then we mark 2240 * things so that the next operation to 2241 * this HBA will cause the update to occur. 2242 */ 2243 if (cts->valid & CCB_TRANS_DISC_VALID) { 2244 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2245 *dptr |= DPARM_DISC; 2246 } else { 2247 *dptr &= ~DPARM_DISC; 2248 } 2249 } 2250 if (cts->valid & CCB_TRANS_TQ_VALID) { 2251 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2252 *dptr |= DPARM_TQING; 2253 } else { 2254 *dptr &= ~DPARM_TQING; 2255 } 2256 } 2257 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2258 switch (cts->bus_width) { 2259 case MSG_EXT_WDTR_BUS_16_BIT: 2260 *dptr |= DPARM_WIDE; 2261 break; 2262 default: 2263 *dptr &= ~DPARM_WIDE; 2264 } 2265 } 2266 /* 2267 * Any SYNC RATE of nonzero and SYNC_OFFSET 2268 * of nonzero will cause us to go to the 2269 * selected (from NVRAM) maximum value for 2270 * this device. At a later point, we'll 2271 * allow finer control. 2272 */ 2273 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2274 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2275 (cts->sync_offset > 0)) { 2276 *dptr |= DPARM_SYNC; 2277 } else { 2278 *dptr &= ~DPARM_SYNC; 2279 } 2280 *dptr |= DPARM_SAFE_DFLT; 2281 isp_prt(isp, ISP_LOGDEBUG0, 2282 "SET bus %d targ %d to flags %x off %x per %x", 2283 bus, tgt, sdp->isp_devparam[tgt].goal_flags, 2284 sdp->isp_devparam[tgt].goal_offset, 2285 sdp->isp_devparam[tgt].goal_period); 2286 sdp->isp_devparam[tgt].dev_update = 1; 2287 isp->isp_update |= (1 << bus); 2288 } 2289 ISPLOCK_2_CAMLOCK(isp); 2290 ccb->ccb_h.status = CAM_REQ_CMP; 2291 xpt_done(ccb); 2292 break; 2293 case XPT_GET_TRAN_SETTINGS: 2294 cts = &ccb->cts; 2295 tgt = cts->ccb_h.target_id; 2296 CAMLOCK_2_ISPLOCK(isp); 2297 if (IS_FC(isp)) { 2298 /* 2299 * a lot of normal SCSI things don't make sense. 2300 */ 2301 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2302 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2303 /* 2304 * How do you measure the width of a high 2305 * speed serial bus? Well, in bytes. 2306 * 2307 * Offset and period make no sense, though, so we set 2308 * (above) a 'base' transfer speed to be gigabit. 2309 */ 2310 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2311 } else { 2312 sdparam *sdp = isp->isp_param; 2313 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2314 u_int16_t dval, pval, oval; 2315 2316 sdp += bus; 2317 2318 if (IS_CURRENT_SETTINGS(cts)) { 2319 sdp->isp_devparam[tgt].dev_refresh = 1; 2320 isp->isp_update |= (1 << bus); 2321 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2322 NULL); 2323 dval = sdp->isp_devparam[tgt].actv_flags; 2324 oval = sdp->isp_devparam[tgt].actv_offset; 2325 pval = sdp->isp_devparam[tgt].actv_period; 2326 } else { 2327 dval = sdp->isp_devparam[tgt].nvrm_flags; 2328 oval = sdp->isp_devparam[tgt].nvrm_offset; 2329 pval = sdp->isp_devparam[tgt].nvrm_period; 2330 } 2331 2332 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2333 2334 if (dval & DPARM_DISC) { 2335 cts->flags |= CCB_TRANS_DISC_ENB; 2336 } 2337 if (dval & DPARM_TQING) { 2338 cts->flags |= CCB_TRANS_TAG_ENB; 2339 } 2340 if (dval & DPARM_WIDE) { 2341 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2342 } else { 2343 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2344 } 2345 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2346 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2347 2348 if ((dval & DPARM_SYNC) && oval != 0) { 2349 cts->sync_period = pval; 2350 cts->sync_offset = oval; 2351 cts->valid |= 2352 CCB_TRANS_SYNC_RATE_VALID | 2353 CCB_TRANS_SYNC_OFFSET_VALID; 2354 } 2355 isp_prt(isp, ISP_LOGDEBUG0, 2356 "GET %s bus %d targ %d to flags %x off %x per %x", 2357 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2358 bus, tgt, dval, oval, pval); 2359 } 2360 ISPLOCK_2_CAMLOCK(isp); 2361 ccb->ccb_h.status = CAM_REQ_CMP; 2362 xpt_done(ccb); 2363 break; 2364 2365 case XPT_CALC_GEOMETRY: 2366 { 2367 struct ccb_calc_geometry *ccg; 2368 u_int32_t secs_per_cylinder; 2369 u_int32_t size_mb; 2370 2371 ccg = &ccb->ccg; 2372 if (ccg->block_size == 0) { 2373 isp_prt(isp, ISP_LOGERR, 2374 "%d.%d XPT_CALC_GEOMETRY block size 0?", 2375 ccg->ccb_h.target_id, ccg->ccb_h.target_lun); 2376 ccb->ccb_h.status = CAM_REQ_INVALID; 2377 xpt_done(ccb); 2378 break; 2379 } 2380 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 2381 if (size_mb > 1024) { 2382 ccg->heads = 255; 2383 ccg->secs_per_track = 63; 2384 } else { 2385 ccg->heads = 64; 2386 ccg->secs_per_track = 32; 2387 } 2388 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2389 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 2390 ccb->ccb_h.status = CAM_REQ_CMP; 2391 xpt_done(ccb); 2392 break; 2393 } 2394 case XPT_RESET_BUS: /* Reset the specified bus */ 2395 bus = cam_sim_bus(sim); 2396 CAMLOCK_2_ISPLOCK(isp); 2397 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2398 ISPLOCK_2_CAMLOCK(isp); 2399 if (error) 2400 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2401 else { 2402 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2403 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2404 else if (isp->isp_path != NULL) 2405 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2406 ccb->ccb_h.status = CAM_REQ_CMP; 2407 } 2408 xpt_done(ccb); 2409 break; 2410 2411 case XPT_TERM_IO: /* Terminate the I/O process */ 2412 ccb->ccb_h.status = CAM_REQ_INVALID; 2413 xpt_done(ccb); 2414 break; 2415 2416 case XPT_PATH_INQ: /* Path routing inquiry */ 2417 { 2418 struct ccb_pathinq *cpi = &ccb->cpi; 2419 2420 cpi->version_num = 1; 2421 #ifdef ISP_TARGET_MODE 2422 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2423 #else 2424 cpi->target_sprt = 0; 2425 #endif 2426 cpi->hba_eng_cnt = 0; 2427 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2428 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 2429 cpi->bus_id = cam_sim_bus(sim); 2430 if (IS_FC(isp)) { 2431 cpi->hba_misc = PIM_NOBUSRESET; 2432 /* 2433 * Because our loop ID can shift from time to time, 2434 * make our initiator ID out of range of our bus. 2435 */ 2436 cpi->initiator_id = cpi->max_target + 1; 2437 2438 /* 2439 * Set base transfer capabilities for Fibre Channel. 2440 * Technically not correct because we don't know 2441 * what media we're running on top of- but we'll 2442 * look good if we always say 100MB/s. 2443 */ 2444 if (FCPARAM(isp)->isp_gbspeed == 2) 2445 cpi->base_transfer_speed = 200000; 2446 else 2447 cpi->base_transfer_speed = 100000; 2448 cpi->hba_inquiry = PI_TAG_ABLE; 2449 } else { 2450 sdparam *sdp = isp->isp_param; 2451 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 2452 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2453 cpi->hba_misc = 0; 2454 cpi->initiator_id = sdp->isp_initiator_id; 2455 cpi->base_transfer_speed = 3300; 2456 } 2457 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2458 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2459 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2460 cpi->unit_number = cam_sim_unit(sim); 2461 cpi->ccb_h.status = CAM_REQ_CMP; 2462 xpt_done(ccb); 2463 break; 2464 } 2465 default: 2466 ccb->ccb_h.status = CAM_REQ_INVALID; 2467 xpt_done(ccb); 2468 break; 2469 } 2470 } 2471 2472 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 2473 void 2474 isp_done(struct ccb_scsiio *sccb) 2475 { 2476 struct ispsoftc *isp = XS_ISP(sccb); 2477 2478 if (XS_NOERR(sccb)) 2479 XS_SETERR(sccb, CAM_REQ_CMP); 2480 2481 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2482 (sccb->scsi_status != SCSI_STATUS_OK)) { 2483 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2484 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 2485 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2486 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2487 } else { 2488 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2489 } 2490 } 2491 2492 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2493 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2494 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2495 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2496 xpt_freeze_devq(sccb->ccb_h.path, 1); 2497 isp_prt(isp, ISP_LOGDEBUG0, 2498 "freeze devq %d.%d cam sts %x scsi sts %x", 2499 sccb->ccb_h.target_id, sccb->ccb_h.target_lun, 2500 sccb->ccb_h.status, sccb->scsi_status); 2501 } 2502 } 2503 2504 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 2505 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2506 xpt_print_path(sccb->ccb_h.path); 2507 isp_prt(isp, ISP_LOGINFO, 2508 "cam completion status 0x%x", sccb->ccb_h.status); 2509 } 2510 2511 XS_CMD_S_DONE(sccb); 2512 if (XS_CMD_WDOG_P(sccb) == 0) { 2513 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 2514 if (XS_CMD_GRACE_P(sccb)) { 2515 isp_prt(isp, ISP_LOGDEBUG2, 2516 "finished command on borrowed time"); 2517 } 2518 XS_CMD_S_CLEAR(sccb); 2519 ISPLOCK_2_CAMLOCK(isp); 2520 xpt_done((union ccb *) sccb); 2521 CAMLOCK_2_ISPLOCK(isp); 2522 } 2523 } 2524 2525 int 2526 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 2527 { 2528 int bus, rv = 0; 2529 switch (cmd) { 2530 case ISPASYNC_NEW_TGT_PARAMS: 2531 { 2532 int flags, tgt; 2533 sdparam *sdp = isp->isp_param; 2534 struct ccb_trans_settings cts; 2535 struct cam_path *tmppath; 2536 2537 bzero(&cts, sizeof (struct ccb_trans_settings)); 2538 2539 tgt = *((int *)arg); 2540 bus = (tgt >> 16) & 0xffff; 2541 tgt &= 0xffff; 2542 sdp += bus; 2543 ISPLOCK_2_CAMLOCK(isp); 2544 if (xpt_create_path(&tmppath, NULL, 2545 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2546 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2547 CAMLOCK_2_ISPLOCK(isp); 2548 isp_prt(isp, ISP_LOGWARN, 2549 "isp_async cannot make temp path for %d.%d", 2550 tgt, bus); 2551 rv = -1; 2552 break; 2553 } 2554 CAMLOCK_2_ISPLOCK(isp); 2555 flags = sdp->isp_devparam[tgt].actv_flags; 2556 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 2557 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2558 if (flags & DPARM_DISC) { 2559 cts.flags |= CCB_TRANS_DISC_ENB; 2560 } 2561 if (flags & DPARM_TQING) { 2562 cts.flags |= CCB_TRANS_TAG_ENB; 2563 } 2564 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2565 cts.bus_width = (flags & DPARM_WIDE)? 2566 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2567 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 2568 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 2569 if (flags & DPARM_SYNC) { 2570 cts.valid |= 2571 CCB_TRANS_SYNC_RATE_VALID | 2572 CCB_TRANS_SYNC_OFFSET_VALID; 2573 } 2574 isp_prt(isp, ISP_LOGDEBUG2, 2575 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2576 bus, tgt, sdp->isp_devparam[tgt].actv_period, 2577 sdp->isp_devparam[tgt].actv_offset, flags); 2578 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 2579 ISPLOCK_2_CAMLOCK(isp); 2580 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 2581 xpt_free_path(tmppath); 2582 CAMLOCK_2_ISPLOCK(isp); 2583 break; 2584 } 2585 case ISPASYNC_BUS_RESET: 2586 bus = *((int *)arg); 2587 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2588 bus); 2589 if (bus > 0 && isp->isp_path2) { 2590 ISPLOCK_2_CAMLOCK(isp); 2591 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2592 CAMLOCK_2_ISPLOCK(isp); 2593 } else if (isp->isp_path) { 2594 ISPLOCK_2_CAMLOCK(isp); 2595 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2596 CAMLOCK_2_ISPLOCK(isp); 2597 } 2598 break; 2599 case ISPASYNC_LIP: 2600 if (isp->isp_path) { 2601 isp_freeze_loopdown(isp, "ISPASYNC_LIP"); 2602 } 2603 isp_prt(isp, ISP_LOGINFO, "LIP Received"); 2604 break; 2605 case ISPASYNC_LOOP_RESET: 2606 if (isp->isp_path) { 2607 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET"); 2608 } 2609 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received"); 2610 break; 2611 case ISPASYNC_LOOP_DOWN: 2612 if (isp->isp_path) { 2613 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN"); 2614 } 2615 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2616 break; 2617 case ISPASYNC_LOOP_UP: 2618 /* 2619 * Now we just note that Loop has come up. We don't 2620 * actually do anything because we're waiting for a 2621 * Change Notify before activating the FC cleanup 2622 * thread to look at the state of the loop again. 2623 */ 2624 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2625 break; 2626 case ISPASYNC_PROMENADE: 2627 { 2628 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2629 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2630 static const char *roles[4] = { 2631 "(none)", "Target", "Initiator", "Target/Initiator" 2632 }; 2633 fcparam *fcp = isp->isp_param; 2634 int tgt = *((int *) arg); 2635 struct lportdb *lp = &fcp->portdb[tgt]; 2636 2637 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2638 roles[lp->roles & 0x3], 2639 (lp->valid)? "Arrived" : "Departed", 2640 (u_int32_t) (lp->port_wwn >> 32), 2641 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 2642 (u_int32_t) (lp->node_wwn >> 32), 2643 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 2644 2645 break; 2646 } 2647 case ISPASYNC_CHANGE_NOTIFY: 2648 if (arg == ISPASYNC_CHANGE_PDB) { 2649 isp_prt(isp, ISP_LOGINFO, 2650 "Port Database Changed"); 2651 } else if (arg == ISPASYNC_CHANGE_SNS) { 2652 isp_prt(isp, ISP_LOGINFO, 2653 "Name Server Database Changed"); 2654 } 2655 wakeup(&isp->isp_osinfo.kproc); 2656 break; 2657 case ISPASYNC_FABRIC_DEV: 2658 { 2659 int target, base, lim; 2660 fcparam *fcp = isp->isp_param; 2661 struct lportdb *lp = NULL; 2662 struct lportdb *clp = (struct lportdb *) arg; 2663 char *pt; 2664 2665 switch (clp->port_type) { 2666 case 1: 2667 pt = " N_Port"; 2668 break; 2669 case 2: 2670 pt = " NL_Port"; 2671 break; 2672 case 3: 2673 pt = "F/NL_Port"; 2674 break; 2675 case 0x7f: 2676 pt = " Nx_Port"; 2677 break; 2678 case 0x81: 2679 pt = " F_port"; 2680 break; 2681 case 0x82: 2682 pt = " FL_Port"; 2683 break; 2684 case 0x84: 2685 pt = " E_port"; 2686 break; 2687 default: 2688 pt = " "; 2689 break; 2690 } 2691 2692 isp_prt(isp, ISP_LOGINFO, 2693 "%s Fabric Device @ PortID 0x%x", pt, clp->portid); 2694 2695 /* 2696 * If we don't have an initiator role we bail. 2697 * 2698 * We just use ISPASYNC_FABRIC_DEV for announcement purposes. 2699 */ 2700 2701 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) { 2702 break; 2703 } 2704 2705 /* 2706 * Is this entry for us? If so, we bail. 2707 */ 2708 2709 if (fcp->isp_portid == clp->portid) { 2710 break; 2711 } 2712 2713 /* 2714 * Else, the default policy is to find room for it in 2715 * our local port database. Later, when we execute 2716 * the call to isp_pdb_sync either this newly arrived 2717 * or already logged in device will be (re)announced. 2718 */ 2719 2720 if (fcp->isp_topo == TOPO_FL_PORT) 2721 base = FC_SNS_ID+1; 2722 else 2723 base = 0; 2724 2725 if (fcp->isp_topo == TOPO_N_PORT) 2726 lim = 1; 2727 else 2728 lim = MAX_FC_TARG; 2729 2730 /* 2731 * Is it already in our list? 2732 */ 2733 for (target = base; target < lim; target++) { 2734 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2735 continue; 2736 } 2737 lp = &fcp->portdb[target]; 2738 if (lp->port_wwn == clp->port_wwn && 2739 lp->node_wwn == clp->node_wwn) { 2740 lp->fabric_dev = 1; 2741 break; 2742 } 2743 } 2744 if (target < lim) { 2745 break; 2746 } 2747 for (target = base; target < lim; target++) { 2748 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2749 continue; 2750 } 2751 lp = &fcp->portdb[target]; 2752 if (lp->port_wwn == 0) { 2753 break; 2754 } 2755 } 2756 if (target == lim) { 2757 isp_prt(isp, ISP_LOGWARN, 2758 "out of space for fabric devices"); 2759 break; 2760 } 2761 lp->port_type = clp->port_type; 2762 lp->fc4_type = clp->fc4_type; 2763 lp->node_wwn = clp->node_wwn; 2764 lp->port_wwn = clp->port_wwn; 2765 lp->portid = clp->portid; 2766 lp->fabric_dev = 1; 2767 break; 2768 } 2769 #ifdef ISP_TARGET_MODE 2770 case ISPASYNC_TARGET_MESSAGE: 2771 { 2772 tmd_msg_t *mp = arg; 2773 isp_prt(isp, ISP_LOGALL, 2774 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x", 2775 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt, 2776 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval, 2777 mp->nt_msg[0]); 2778 break; 2779 } 2780 case ISPASYNC_TARGET_EVENT: 2781 { 2782 tmd_event_t *ep = arg; 2783 isp_prt(isp, ISP_LOGALL, 2784 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event); 2785 break; 2786 } 2787 case ISPASYNC_TARGET_ACTION: 2788 switch (((isphdr_t *)arg)->rqs_entry_type) { 2789 default: 2790 isp_prt(isp, ISP_LOGWARN, 2791 "event 0x%x for unhandled target action", 2792 ((isphdr_t *)arg)->rqs_entry_type); 2793 break; 2794 case RQSTYPE_NOTIFY: 2795 if (IS_SCSI(isp)) { 2796 rv = isp_handle_platform_notify_scsi(isp, 2797 (in_entry_t *) arg); 2798 } else { 2799 rv = isp_handle_platform_notify_fc(isp, 2800 (in_fcentry_t *) arg); 2801 } 2802 break; 2803 case RQSTYPE_ATIO: 2804 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 2805 break; 2806 case RQSTYPE_ATIO2: 2807 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 2808 break; 2809 case RQSTYPE_CTIO2: 2810 case RQSTYPE_CTIO: 2811 rv = isp_handle_platform_ctio(isp, arg); 2812 break; 2813 case RQSTYPE_ENABLE_LUN: 2814 case RQSTYPE_MODIFY_LUN: 2815 if (IS_DUALBUS(isp)) { 2816 bus = 2817 GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd); 2818 } else { 2819 bus = 0; 2820 } 2821 isp_cv_signal_rqe(isp, bus, 2822 ((lun_entry_t *)arg)->le_status); 2823 break; 2824 } 2825 break; 2826 #endif 2827 case ISPASYNC_FW_CRASH: 2828 { 2829 u_int16_t mbox1, mbox6; 2830 mbox1 = ISP_READ(isp, OUTMAILBOX1); 2831 if (IS_DUALBUS(isp)) { 2832 mbox6 = ISP_READ(isp, OUTMAILBOX6); 2833 } else { 2834 mbox6 = 0; 2835 } 2836 isp_prt(isp, ISP_LOGERR, 2837 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 2838 mbox6, mbox1); 2839 #ifdef ISP_FW_CRASH_DUMP 2840 /* 2841 * XXX: really need a thread to do this right. 2842 */ 2843 if (IS_FC(isp)) { 2844 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 2845 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 2846 isp_freeze_loopdown(isp, "f/w crash"); 2847 isp_fw_dump(isp); 2848 } 2849 isp_reinit(isp); 2850 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 2851 #endif 2852 break; 2853 } 2854 case ISPASYNC_UNHANDLED_RESPONSE: 2855 break; 2856 default: 2857 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 2858 break; 2859 } 2860 return (rv); 2861 } 2862 2863 2864 /* 2865 * Locks are held before coming here. 2866 */ 2867 void 2868 isp_uninit(struct ispsoftc *isp) 2869 { 2870 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 2871 DISABLE_INTS(isp); 2872 } 2873 2874 void 2875 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 2876 { 2877 va_list ap; 2878 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 2879 return; 2880 } 2881 printf("%s: ", device_get_nameunit(isp->isp_dev)); 2882 va_start(ap, fmt); 2883 vprintf(fmt, ap); 2884 va_end(ap); 2885 printf("\n"); 2886 } 2887