1 /* $FreeBSD: src/sys/dev/isp/isp_pci.c,v 1.78.2.4 2002/10/11 18:50:53 mjacob Exp $ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/bus.h> 35 36 #include <pci/pcireg.h> 37 #include <pci/pcivar.h> 38 39 #include <machine/bus_memio.h> 40 #include <machine/bus_pio.h> 41 #include <machine/bus.h> 42 #include <machine/resource.h> 43 #include <sys/rman.h> 44 #include <sys/malloc.h> 45 46 #include <dev/isp/isp_freebsd.h> 47 48 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 49 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 50 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 51 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 52 static int 53 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 54 static int 55 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 56 static int isp_pci_mbxdma(struct ispsoftc *); 57 static int 58 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 59 static void 60 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 61 62 static void isp_pci_reset1(struct ispsoftc *); 63 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 64 65 static struct ispmdvec mdvec = { 66 isp_pci_rd_isr, 67 isp_pci_rd_reg, 68 isp_pci_wr_reg, 69 isp_pci_mbxdma, 70 isp_pci_dmasetup, 71 isp_pci_dmateardown, 72 NULL, 73 isp_pci_reset1, 74 isp_pci_dumpregs, 75 NULL, 76 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 77 }; 78 79 static struct ispmdvec mdvec_1080 = { 80 isp_pci_rd_isr, 81 isp_pci_rd_reg_1080, 82 isp_pci_wr_reg_1080, 83 isp_pci_mbxdma, 84 isp_pci_dmasetup, 85 isp_pci_dmateardown, 86 NULL, 87 isp_pci_reset1, 88 isp_pci_dumpregs, 89 NULL, 90 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 91 }; 92 93 static struct ispmdvec mdvec_12160 = { 94 isp_pci_rd_isr, 95 isp_pci_rd_reg_1080, 96 isp_pci_wr_reg_1080, 97 isp_pci_mbxdma, 98 isp_pci_dmasetup, 99 isp_pci_dmateardown, 100 NULL, 101 isp_pci_reset1, 102 isp_pci_dumpregs, 103 NULL, 104 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 105 }; 106 107 static struct ispmdvec mdvec_2100 = { 108 isp_pci_rd_isr, 109 isp_pci_rd_reg, 110 isp_pci_wr_reg, 111 isp_pci_mbxdma, 112 isp_pci_dmasetup, 113 isp_pci_dmateardown, 114 NULL, 115 isp_pci_reset1, 116 isp_pci_dumpregs 117 }; 118 119 static struct ispmdvec mdvec_2200 = { 120 isp_pci_rd_isr, 121 isp_pci_rd_reg, 122 isp_pci_wr_reg, 123 isp_pci_mbxdma, 124 isp_pci_dmasetup, 125 isp_pci_dmateardown, 126 NULL, 127 isp_pci_reset1, 128 isp_pci_dumpregs 129 }; 130 131 static struct ispmdvec mdvec_2300 = { 132 isp_pci_rd_isr_2300, 133 isp_pci_rd_reg, 134 isp_pci_wr_reg, 135 isp_pci_mbxdma, 136 isp_pci_dmasetup, 137 isp_pci_dmateardown, 138 NULL, 139 isp_pci_reset1, 140 isp_pci_dumpregs 141 }; 142 143 #ifndef PCIM_CMD_INVEN 144 #define PCIM_CMD_INVEN 0x10 145 #endif 146 #ifndef PCIM_CMD_BUSMASTEREN 147 #define PCIM_CMD_BUSMASTEREN 0x0004 148 #endif 149 #ifndef PCIM_CMD_PERRESPEN 150 #define PCIM_CMD_PERRESPEN 0x0040 151 #endif 152 #ifndef PCIM_CMD_SEREN 153 #define PCIM_CMD_SEREN 0x0100 154 #endif 155 156 #ifndef PCIR_COMMAND 157 #define PCIR_COMMAND 0x04 158 #endif 159 160 #ifndef PCIR_CACHELNSZ 161 #define PCIR_CACHELNSZ 0x0c 162 #endif 163 164 #ifndef PCIR_LATTIMER 165 #define PCIR_LATTIMER 0x0d 166 #endif 167 168 #ifndef PCIR_ROMADDR 169 #define PCIR_ROMADDR 0x30 170 #endif 171 172 #ifndef PCI_VENDOR_QLOGIC 173 #define PCI_VENDOR_QLOGIC 0x1077 174 #endif 175 176 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 177 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 178 #endif 179 180 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 181 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 182 #endif 183 184 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 185 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 186 #endif 187 188 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 189 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 190 #endif 191 192 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 193 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 194 #endif 195 196 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 197 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 198 #endif 199 200 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 201 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 202 #endif 203 204 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 205 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 206 #endif 207 208 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 209 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 210 #endif 211 212 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 213 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 214 #endif 215 216 #define PCI_QLOGIC_ISP1020 \ 217 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 218 219 #define PCI_QLOGIC_ISP1080 \ 220 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 221 222 #define PCI_QLOGIC_ISP10160 \ 223 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 224 225 #define PCI_QLOGIC_ISP12160 \ 226 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 227 228 #define PCI_QLOGIC_ISP1240 \ 229 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 230 231 #define PCI_QLOGIC_ISP1280 \ 232 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 233 234 #define PCI_QLOGIC_ISP2100 \ 235 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 236 237 #define PCI_QLOGIC_ISP2200 \ 238 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 239 240 #define PCI_QLOGIC_ISP2300 \ 241 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 242 243 #define PCI_QLOGIC_ISP2312 \ 244 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 245 246 /* 247 * Odd case for some AMI raid cards... We need to *not* attach to this. 248 */ 249 #define AMI_RAID_SUBVENDOR_ID 0x101e 250 251 #define IO_MAP_REG 0x10 252 #define MEM_MAP_REG 0x14 253 254 #define PCI_DFLT_LTNCY 0x40 255 #define PCI_DFLT_LNSZ 0x10 256 257 static int isp_pci_probe (device_t); 258 static int isp_pci_attach (device_t); 259 260 261 struct isp_pcisoftc { 262 struct ispsoftc pci_isp; 263 device_t pci_dev; 264 struct resource * pci_reg; 265 bus_space_tag_t pci_st; 266 bus_space_handle_t pci_sh; 267 void * ih; 268 int16_t pci_poff[_NREG_BLKS]; 269 bus_dma_tag_t dmat; 270 bus_dmamap_t *dmaps; 271 }; 272 ispfwfunc *isp_get_firmware_p = NULL; 273 274 static device_method_t isp_pci_methods[] = { 275 /* Device interface */ 276 DEVMETHOD(device_probe, isp_pci_probe), 277 DEVMETHOD(device_attach, isp_pci_attach), 278 { 0, 0 } 279 }; 280 static void isp_pci_intr(void *); 281 282 static driver_t isp_pci_driver = { 283 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 284 }; 285 static devclass_t isp_devclass; 286 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 287 MODULE_VERSION(isp, 1); 288 289 static int 290 isp_pci_probe(device_t dev) 291 { 292 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 293 case PCI_QLOGIC_ISP1020: 294 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 295 break; 296 case PCI_QLOGIC_ISP1080: 297 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 298 break; 299 case PCI_QLOGIC_ISP1240: 300 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 301 break; 302 case PCI_QLOGIC_ISP1280: 303 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 304 break; 305 case PCI_QLOGIC_ISP10160: 306 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 307 break; 308 case PCI_QLOGIC_ISP12160: 309 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 310 return (ENXIO); 311 } 312 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 313 break; 314 case PCI_QLOGIC_ISP2100: 315 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 316 break; 317 case PCI_QLOGIC_ISP2200: 318 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 319 break; 320 case PCI_QLOGIC_ISP2300: 321 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 322 break; 323 case PCI_QLOGIC_ISP2312: 324 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 325 break; 326 default: 327 return (ENXIO); 328 } 329 if (device_get_unit(dev) == 0 && bootverbose) { 330 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 331 "Core Version %d.%d\n", 332 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 333 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 334 } 335 /* 336 * XXXX: Here is where we might load the f/w module 337 * XXXX: (or increase a reference count to it). 338 */ 339 return (0); 340 } 341 342 static int 343 isp_pci_attach(device_t dev) 344 { 345 struct resource *regs, *irq; 346 int unit, bitmap, rtp, rgd, iqd, m1, m2, isp_debug; 347 u_int32_t data, cmd, linesz, psize, basetype; 348 struct isp_pcisoftc *pcs; 349 struct ispsoftc *isp = NULL; 350 struct ispmdvec *mdvp; 351 quad_t wwn; 352 bus_size_t lim; 353 354 /* 355 * Figure out if we're supposed to skip this one. 356 */ 357 unit = device_get_unit(dev); 358 if (getenv_int("isp_disable", &bitmap)) { 359 if (bitmap & (1 << unit)) { 360 device_printf(dev, "not configuring\n"); 361 /* 362 * But return '0' to preserve HBA numbering. 363 */ 364 return (0); 365 } 366 } 367 368 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT); 369 if (pcs == NULL) { 370 device_printf(dev, "cannot allocate softc\n"); 371 return (ENOMEM); 372 } 373 bzero(pcs, sizeof (struct isp_pcisoftc)); 374 375 /* 376 * Figure out which we should try first - memory mapping or i/o mapping? 377 */ 378 #ifdef __alpha__ 379 m1 = PCIM_CMD_MEMEN; 380 m2 = PCIM_CMD_PORTEN; 381 #else 382 m1 = PCIM_CMD_PORTEN; 383 m2 = PCIM_CMD_MEMEN; 384 #endif 385 bitmap = 0; 386 if (getenv_int("isp_mem_map", &bitmap)) { 387 if (bitmap & (1 << unit)) { 388 m1 = PCIM_CMD_MEMEN; 389 m2 = PCIM_CMD_PORTEN; 390 } 391 } 392 bitmap = 0; 393 if (getenv_int("isp_io_map", &bitmap)) { 394 if (bitmap & (1 << unit)) { 395 m1 = PCIM_CMD_PORTEN; 396 m2 = PCIM_CMD_MEMEN; 397 } 398 } 399 400 linesz = PCI_DFLT_LNSZ; 401 irq = regs = NULL; 402 rgd = rtp = iqd = 0; 403 404 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 405 if (cmd & m1) { 406 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 407 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 408 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 409 } 410 if (regs == NULL && (cmd & m2)) { 411 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 412 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 413 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 414 } 415 if (regs == NULL) { 416 device_printf(dev, "unable to map any ports\n"); 417 goto bad; 418 } 419 if (bootverbose) 420 device_printf(dev, "using %s space register mapping\n", 421 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 422 pcs->pci_dev = dev; 423 pcs->pci_reg = regs; 424 pcs->pci_st = rman_get_bustag(regs); 425 pcs->pci_sh = rman_get_bushandle(regs); 426 427 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 428 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 429 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 430 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 431 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 432 mdvp = &mdvec; 433 basetype = ISP_HA_SCSI_UNKNOWN; 434 psize = sizeof (sdparam); 435 lim = BUS_SPACE_MAXSIZE_32BIT; 436 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 437 mdvp = &mdvec; 438 basetype = ISP_HA_SCSI_UNKNOWN; 439 psize = sizeof (sdparam); 440 lim = BUS_SPACE_MAXSIZE_24BIT; 441 } 442 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 443 mdvp = &mdvec_1080; 444 basetype = ISP_HA_SCSI_1080; 445 psize = sizeof (sdparam); 446 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 447 ISP1080_DMA_REGS_OFF; 448 } 449 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 450 mdvp = &mdvec_1080; 451 basetype = ISP_HA_SCSI_1240; 452 psize = 2 * sizeof (sdparam); 453 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 454 ISP1080_DMA_REGS_OFF; 455 } 456 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 457 mdvp = &mdvec_1080; 458 basetype = ISP_HA_SCSI_1280; 459 psize = 2 * sizeof (sdparam); 460 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 461 ISP1080_DMA_REGS_OFF; 462 } 463 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 464 mdvp = &mdvec_12160; 465 basetype = ISP_HA_SCSI_10160; 466 psize = sizeof (sdparam); 467 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 468 ISP1080_DMA_REGS_OFF; 469 } 470 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 471 mdvp = &mdvec_12160; 472 basetype = ISP_HA_SCSI_12160; 473 psize = 2 * sizeof (sdparam); 474 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 475 ISP1080_DMA_REGS_OFF; 476 } 477 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 478 mdvp = &mdvec_2100; 479 basetype = ISP_HA_FC_2100; 480 psize = sizeof (fcparam); 481 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 482 PCI_MBOX_REGS2100_OFF; 483 if (pci_get_revid(dev) < 3) { 484 /* 485 * XXX: Need to get the actual revision 486 * XXX: number of the 2100 FB. At any rate, 487 * XXX: lower cache line size for early revision 488 * XXX; boards. 489 */ 490 linesz = 1; 491 } 492 } 493 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 494 mdvp = &mdvec_2200; 495 basetype = ISP_HA_FC_2200; 496 psize = sizeof (fcparam); 497 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 498 PCI_MBOX_REGS2100_OFF; 499 } 500 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 501 mdvp = &mdvec_2300; 502 basetype = ISP_HA_FC_2300; 503 psize = sizeof (fcparam); 504 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 505 PCI_MBOX_REGS2300_OFF; 506 } 507 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) { 508 mdvp = &mdvec_2300; 509 basetype = ISP_HA_FC_2312; 510 psize = sizeof (fcparam); 511 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 512 PCI_MBOX_REGS2300_OFF; 513 } 514 isp = &pcs->pci_isp; 515 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT); 516 if (isp->isp_param == NULL) { 517 device_printf(dev, "cannot allocate parameter data\n"); 518 goto bad; 519 } 520 bzero(isp->isp_param, psize); 521 isp->isp_mdvec = mdvp; 522 isp->isp_type = basetype; 523 isp->isp_revision = pci_get_revid(dev); 524 #ifdef ISP_TARGET_MODE 525 isp->isp_role = ISP_ROLE_BOTH; 526 #else 527 isp->isp_role = ISP_DEFAULT_ROLES; 528 #endif 529 isp->isp_dev = dev; 530 531 532 /* 533 * Try and find firmware for this device. 534 */ 535 536 if (isp_get_firmware_p) { 537 int device = (int) pci_get_device(dev); 538 #ifdef ISP_TARGET_MODE 539 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 540 #else 541 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 542 #endif 543 } 544 545 /* 546 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 547 * are set. 548 */ 549 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 550 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 551 if (IS_2300(isp)) { /* per QLogic errata */ 552 cmd &= ~PCIM_CMD_INVEN; 553 } 554 if (IS_23XX(isp)) { 555 /* 556 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 557 */ 558 isp->isp_touched = 1; 559 560 } 561 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 562 563 /* 564 * Make sure the Cache Line Size register is set sensibly. 565 */ 566 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 567 if (data != linesz) { 568 data = PCI_DFLT_LNSZ; 569 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 570 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 571 } 572 573 /* 574 * Make sure the Latency Timer is sane. 575 */ 576 data = pci_read_config(dev, PCIR_LATTIMER, 1); 577 if (data < PCI_DFLT_LTNCY) { 578 data = PCI_DFLT_LTNCY; 579 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 580 pci_write_config(dev, PCIR_LATTIMER, data, 1); 581 } 582 583 /* 584 * Make sure we've disabled the ROM. 585 */ 586 data = pci_read_config(dev, PCIR_ROMADDR, 4); 587 data &= ~1; 588 pci_write_config(dev, PCIR_ROMADDR, data, 4); 589 590 iqd = 0; 591 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 592 1, RF_ACTIVE | RF_SHAREABLE); 593 if (irq == NULL) { 594 device_printf(dev, "could not allocate interrupt\n"); 595 goto bad; 596 } 597 598 if (getenv_int("isp_no_fwload", &bitmap)) { 599 if (bitmap & (1 << unit)) 600 isp->isp_confopts |= ISP_CFG_NORELOAD; 601 } 602 if (getenv_int("isp_fwload", &bitmap)) { 603 if (bitmap & (1 << unit)) 604 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 605 } 606 if (getenv_int("isp_no_nvram", &bitmap)) { 607 if (bitmap & (1 << unit)) 608 isp->isp_confopts |= ISP_CFG_NONVRAM; 609 } 610 if (getenv_int("isp_nvram", &bitmap)) { 611 if (bitmap & (1 << unit)) 612 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 613 } 614 if (getenv_int("isp_fcduplex", &bitmap)) { 615 if (bitmap & (1 << unit)) 616 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 617 } 618 if (getenv_int("isp_no_fcduplex", &bitmap)) { 619 if (bitmap & (1 << unit)) 620 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 621 } 622 if (getenv_int("isp_nport", &bitmap)) { 623 if (bitmap & (1 << unit)) 624 isp->isp_confopts |= ISP_CFG_NPORT; 625 } 626 627 /* 628 * Because the resource_*_value functions can neither return 629 * 64 bit integer values, nor can they be directly coerced 630 * to interpret the right hand side of the assignment as 631 * you want them to interpret it, we have to force WWN 632 * hint replacement to specify WWN strings with a leading 633 * 'w' (e..g w50000000aaaa0001). Sigh. 634 */ 635 if (getenv_quad("isp_portwwn", &wwn)) { 636 isp->isp_osinfo.default_port_wwn = wwn; 637 isp->isp_confopts |= ISP_CFG_OWNWWPN; 638 } 639 if (isp->isp_osinfo.default_port_wwn == 0) { 640 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 641 } 642 643 if (getenv_quad("isp_nodewwn", &wwn)) { 644 isp->isp_osinfo.default_node_wwn = wwn; 645 isp->isp_confopts |= ISP_CFG_OWNWWNN; 646 } 647 if (isp->isp_osinfo.default_node_wwn == 0) { 648 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 649 } 650 651 isp_debug = 0; 652 (void) getenv_int("isp_debug", &isp_debug); 653 if (bus_setup_intr(dev, irq, INTR_TYPE_CAM, isp_pci_intr, 654 isp, &pcs->ih)) { 655 device_printf(dev, "could not setup interrupt\n"); 656 goto bad; 657 } 658 659 #ifdef ISP_FW_CRASH_DUMP 660 bitmap = 0; 661 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 662 if (bitmap & (1 << unit) { 663 size_t amt = 0; 664 if (IS_2200(isp)) { 665 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 666 } else if (IS_23XX(isp)) { 667 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 668 } 669 if (amt) { 670 FCPARAM(isp)->isp_dump_data = 671 malloc(amt, M_DEVBUF, M_WAITOK); 672 bzero(FCPARAM(isp)->isp_dump_data, amt); 673 } else { 674 device_printf(dev, 675 "f/w crash dumps not supported for card\n"); 676 } 677 } 678 } 679 #endif 680 681 if (IS_2312(isp)) { 682 isp->isp_port = pci_get_function(dev); 683 } 684 685 /* 686 * Set up logging levels. 687 */ 688 if (isp_debug) { 689 isp->isp_dblev = isp_debug; 690 } else { 691 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 692 } 693 if (bootverbose) 694 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 695 696 /* 697 * Make sure we're in reset state. 698 */ 699 ISP_LOCK(isp); 700 isp_reset(isp); 701 702 if (isp->isp_state != ISP_RESETSTATE) { 703 ISP_UNLOCK(isp); 704 goto bad; 705 } 706 isp_init(isp); 707 if (isp->isp_state != ISP_INITSTATE) { 708 /* If we're a Fibre Channel Card, we allow deferred attach */ 709 if (IS_SCSI(isp)) { 710 isp_uninit(isp); 711 ISP_UNLOCK(isp); 712 goto bad; 713 } 714 } 715 isp_attach(isp); 716 if (isp->isp_state != ISP_RUNSTATE) { 717 /* If we're a Fibre Channel Card, we allow deferred attach */ 718 if (IS_SCSI(isp)) { 719 isp_uninit(isp); 720 ISP_UNLOCK(isp); 721 goto bad; 722 } 723 } 724 /* 725 * XXXX: Here is where we might unload the f/w module 726 * XXXX: (or decrease the reference count to it). 727 */ 728 ISP_UNLOCK(isp); 729 return (0); 730 731 bad: 732 733 if (pcs && pcs->ih) { 734 (void) bus_teardown_intr(dev, irq, pcs->ih); 735 } 736 737 if (irq) { 738 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 739 } 740 741 742 if (regs) { 743 (void) bus_release_resource(dev, rtp, rgd, regs); 744 } 745 746 if (pcs) { 747 if (pcs->pci_isp.isp_param) 748 free(pcs->pci_isp.isp_param, M_DEVBUF); 749 free(pcs, M_DEVBUF); 750 } 751 752 /* 753 * XXXX: Here is where we might unload the f/w module 754 * XXXX: (or decrease the reference count to it). 755 */ 756 return (ENXIO); 757 } 758 759 static void 760 isp_pci_intr(void *arg) 761 { 762 struct ispsoftc *isp = arg; 763 u_int16_t isr, sema, mbox; 764 765 ISP_LOCK(isp); 766 isp->isp_intcnt++; 767 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 768 isp->isp_intbogus++; 769 } else { 770 int iok = isp->isp_osinfo.intsok; 771 isp->isp_osinfo.intsok = 0; 772 isp_intr(isp, isr, sema, mbox); 773 isp->isp_osinfo.intsok = iok; 774 } 775 ISP_UNLOCK(isp); 776 } 777 778 779 #define IspVirt2Off(a, x) \ 780 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 781 _BLK_REG_SHFT] + ((x) & 0xff)) 782 783 #define BXR2(pcs, off) \ 784 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 785 #define BXW2(pcs, off, v) \ 786 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 787 788 789 static INLINE int 790 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 791 { 792 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 793 u_int16_t val0, val1; 794 int i = 0; 795 796 do { 797 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 798 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 799 } while (val0 != val1 && ++i < 1000); 800 if (val0 != val1) { 801 return (1); 802 } 803 *rp = val0; 804 return (0); 805 } 806 807 static int 808 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 809 u_int16_t *semap, u_int16_t *mbp) 810 { 811 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 812 u_int16_t isr, sema; 813 814 if (IS_2100(isp)) { 815 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 816 return (0); 817 } 818 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 819 return (0); 820 } 821 } else { 822 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 823 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 824 } 825 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 826 isr &= INT_PENDING_MASK(isp); 827 sema &= BIU_SEMA_LOCK; 828 if (isr == 0 && sema == 0) { 829 return (0); 830 } 831 *isrp = isr; 832 if ((*semap = sema) != 0) { 833 if (IS_2100(isp)) { 834 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 835 return (0); 836 } 837 } else { 838 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 839 } 840 } 841 return (1); 842 } 843 844 static int 845 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 846 u_int16_t *semap, u_int16_t *mbox0p) 847 { 848 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 849 u_int32_t r2hisr; 850 851 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 852 *isrp = 0; 853 return (0); 854 } 855 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 856 IspVirt2Off(pcs, BIU_R2HSTSLO)); 857 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 858 if ((r2hisr & BIU_R2HST_INTR) == 0) { 859 *isrp = 0; 860 return (0); 861 } 862 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 863 case ISPR2HST_ROM_MBX_OK: 864 case ISPR2HST_ROM_MBX_FAIL: 865 case ISPR2HST_MBX_OK: 866 case ISPR2HST_MBX_FAIL: 867 case ISPR2HST_ASYNC_EVENT: 868 *isrp = r2hisr & 0xffff; 869 *mbox0p = (r2hisr >> 16); 870 *semap = 1; 871 return (1); 872 case ISPR2HST_RIO_16: 873 *isrp = r2hisr & 0xffff; 874 *mbox0p = ASYNC_RIO1; 875 *semap = 1; 876 return (1); 877 case ISPR2HST_FPOST: 878 *isrp = r2hisr & 0xffff; 879 *mbox0p = ASYNC_CMD_CMPLT; 880 *semap = 1; 881 return (1); 882 case ISPR2HST_FPOST_CTIO: 883 *isrp = r2hisr & 0xffff; 884 *mbox0p = ASYNC_CTIO_DONE; 885 *semap = 1; 886 return (1); 887 case ISPR2HST_RSPQ_UPDATE: 888 *isrp = r2hisr & 0xffff; 889 *mbox0p = 0; 890 *semap = 0; 891 return (1); 892 default: 893 return (0); 894 } 895 } 896 897 static u_int16_t 898 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 899 { 900 u_int16_t rv; 901 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 902 int oldconf = 0; 903 904 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 905 /* 906 * We will assume that someone has paused the RISC processor. 907 */ 908 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 909 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 910 oldconf | BIU_PCI_CONF1_SXP); 911 } 912 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 913 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 914 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 915 } 916 return (rv); 917 } 918 919 static void 920 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 921 { 922 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 923 int oldconf = 0; 924 925 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 926 /* 927 * We will assume that someone has paused the RISC processor. 928 */ 929 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 930 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 931 oldconf | BIU_PCI_CONF1_SXP); 932 } 933 BXW2(pcs, IspVirt2Off(isp, regoff), val); 934 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 935 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 936 } 937 } 938 939 static u_int16_t 940 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 941 { 942 u_int16_t rv, oc = 0; 943 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 944 945 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 946 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 947 u_int16_t tc; 948 /* 949 * We will assume that someone has paused the RISC processor. 950 */ 951 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 952 tc = oc & ~BIU_PCI1080_CONF1_DMA; 953 if (regoff & SXP_BANK1_SELECT) 954 tc |= BIU_PCI1080_CONF1_SXP1; 955 else 956 tc |= BIU_PCI1080_CONF1_SXP0; 957 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 958 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 959 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 960 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 961 oc | BIU_PCI1080_CONF1_DMA); 962 } 963 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 964 if (oc) { 965 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 966 } 967 return (rv); 968 } 969 970 static void 971 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 972 { 973 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 974 int oc = 0; 975 976 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 977 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 978 u_int16_t tc; 979 /* 980 * We will assume that someone has paused the RISC processor. 981 */ 982 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 983 tc = oc & ~BIU_PCI1080_CONF1_DMA; 984 if (regoff & SXP_BANK1_SELECT) 985 tc |= BIU_PCI1080_CONF1_SXP1; 986 else 987 tc |= BIU_PCI1080_CONF1_SXP0; 988 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 989 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 990 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 991 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 992 oc | BIU_PCI1080_CONF1_DMA); 993 } 994 BXW2(pcs, IspVirt2Off(isp, regoff), val); 995 if (oc) { 996 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 997 } 998 } 999 1000 1001 struct imush { 1002 struct ispsoftc *isp; 1003 int error; 1004 }; 1005 1006 static void imc(void *, bus_dma_segment_t *, int, int); 1007 1008 static void 1009 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1010 { 1011 struct imush *imushp = (struct imush *) arg; 1012 if (error) { 1013 imushp->error = error; 1014 } else { 1015 struct ispsoftc *isp =imushp->isp; 1016 bus_addr_t addr = segs->ds_addr; 1017 1018 isp->isp_rquest_dma = addr; 1019 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1020 isp->isp_result_dma = addr; 1021 if (IS_FC(isp)) { 1022 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1023 FCPARAM(isp)->isp_scdma = addr; 1024 } 1025 } 1026 } 1027 1028 /* 1029 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1030 */ 1031 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1032 1033 static int 1034 isp_pci_mbxdma(struct ispsoftc *isp) 1035 { 1036 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1037 caddr_t base; 1038 u_int32_t len; 1039 int i, error, ns; 1040 bus_size_t alim, slim; 1041 struct imush im; 1042 1043 /* 1044 * Already been here? If so, leave... 1045 */ 1046 if (isp->isp_rquest) { 1047 return (0); 1048 } 1049 1050 #ifdef ISP_DAC_SUPPORTED 1051 alim = BUS_SPACE_UNRESTRICTED; 1052 #else 1053 alim = BUS_SPACE_MAXADDR_32BIT; 1054 #endif 1055 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1056 slim = BUS_SPACE_MAXADDR_32BIT; 1057 } else { 1058 slim = BUS_SPACE_MAXADDR_24BIT; 1059 } 1060 1061 ISP_UNLOCK(isp); 1062 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1063 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) { 1064 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1065 ISP_LOCK(isp); 1066 return(1); 1067 } 1068 1069 1070 len = sizeof (XS_T **) * isp->isp_maxcmds; 1071 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1072 if (isp->isp_xflist == NULL) { 1073 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1074 ISP_LOCK(isp); 1075 return (1); 1076 } 1077 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1078 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1079 if (pcs->dmaps == NULL) { 1080 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1081 free(isp->isp_xflist, M_DEVBUF); 1082 ISP_LOCK(isp); 1083 return (1); 1084 } 1085 1086 /* 1087 * Allocate and map the request, result queues, plus FC scratch area. 1088 */ 1089 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1090 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1091 if (IS_FC(isp)) { 1092 len += ISP2100_SCRLEN; 1093 } 1094 1095 ns = (len / PAGE_SIZE) + 1; 1096 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, alim, alim, 1097 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1098 isp_prt(isp, ISP_LOGERR, 1099 "cannot create a dma tag for control spaces"); 1100 free(pcs->dmaps, M_DEVBUF); 1101 free(isp->isp_xflist, M_DEVBUF); 1102 ISP_LOCK(isp); 1103 return (1); 1104 } 1105 1106 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1107 &isp->isp_cdmap) != 0) { 1108 isp_prt(isp, ISP_LOGERR, 1109 "cannot allocate %d bytes of CCB memory", len); 1110 bus_dma_tag_destroy(isp->isp_cdmat); 1111 free(isp->isp_xflist, M_DEVBUF); 1112 free(pcs->dmaps, M_DEVBUF); 1113 ISP_LOCK(isp); 1114 return (1); 1115 } 1116 1117 for (i = 0; i < isp->isp_maxcmds; i++) { 1118 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1119 if (error) { 1120 isp_prt(isp, ISP_LOGERR, 1121 "error %d creating per-cmd DMA maps", error); 1122 while (--i >= 0) { 1123 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1124 } 1125 goto bad; 1126 } 1127 } 1128 1129 im.isp = isp; 1130 im.error = 0; 1131 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1132 if (im.error) { 1133 isp_prt(isp, ISP_LOGERR, 1134 "error %d loading dma map for control areas", im.error); 1135 goto bad; 1136 } 1137 1138 isp->isp_rquest = base; 1139 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1140 isp->isp_result = base; 1141 if (IS_FC(isp)) { 1142 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1143 FCPARAM(isp)->isp_scratch = base; 1144 } 1145 ISP_LOCK(isp); 1146 return (0); 1147 1148 bad: 1149 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1150 bus_dma_tag_destroy(isp->isp_cdmat); 1151 free(isp->isp_xflist, M_DEVBUF); 1152 free(pcs->dmaps, M_DEVBUF); 1153 ISP_LOCK(isp); 1154 isp->isp_rquest = NULL; 1155 return (1); 1156 } 1157 1158 typedef struct { 1159 struct ispsoftc *isp; 1160 void *cmd_token; 1161 void *rq; 1162 u_int16_t *nxtip; 1163 u_int16_t optr; 1164 u_int error; 1165 } mush_t; 1166 1167 #define MUSHERR_NOQENTRIES -2 1168 1169 #ifdef ISP_TARGET_MODE 1170 /* 1171 * We need to handle DMA for target mode differently from initiator mode. 1172 * 1173 * DMA mapping and construction and submission of CTIO Request Entries 1174 * and rendevous for completion are very tightly coupled because we start 1175 * out by knowing (per platform) how much data we have to move, but we 1176 * don't know, up front, how many DMA mapping segments will have to be used 1177 * cover that data, so we don't know how many CTIO Request Entries we 1178 * will end up using. Further, for performance reasons we may want to 1179 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1180 * 1181 * The standard vector still goes through isp_pci_dmasetup, but the callback 1182 * for the DMA mapping routines comes here instead with the whole transfer 1183 * mapped and a pointer to a partially filled in already allocated request 1184 * queue entry. We finish the job. 1185 */ 1186 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1187 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1188 1189 #define STATUS_WITH_DATA 1 1190 1191 static void 1192 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1193 { 1194 mush_t *mp; 1195 struct ccb_scsiio *csio; 1196 struct ispsoftc *isp; 1197 struct isp_pcisoftc *pcs; 1198 bus_dmamap_t *dp; 1199 ct_entry_t *cto, *qe; 1200 u_int8_t scsi_status; 1201 u_int16_t curi, nxti, handle; 1202 u_int32_t sflags; 1203 int32_t resid; 1204 int nth_ctio, nctios, send_status; 1205 1206 mp = (mush_t *) arg; 1207 if (error) { 1208 mp->error = error; 1209 return; 1210 } 1211 1212 isp = mp->isp; 1213 csio = mp->cmd_token; 1214 cto = mp->rq; 1215 curi = isp->isp_reqidx; 1216 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1217 1218 cto->ct_xfrlen = 0; 1219 cto->ct_seg_count = 0; 1220 cto->ct_header.rqs_entry_count = 1; 1221 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1222 1223 if (nseg == 0) { 1224 cto->ct_header.rqs_seqno = 1; 1225 isp_prt(isp, ISP_LOGTDEBUG1, 1226 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1227 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1228 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1229 cto->ct_scsi_status, cto->ct_resid); 1230 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1231 isp_put_ctio(isp, cto, qe); 1232 return; 1233 } 1234 1235 nctios = nseg / ISP_RQDSEG; 1236 if (nseg % ISP_RQDSEG) { 1237 nctios++; 1238 } 1239 1240 /* 1241 * Save syshandle, and potentially any SCSI status, which we'll 1242 * reinsert on the last CTIO we're going to send. 1243 */ 1244 1245 handle = cto->ct_syshandle; 1246 cto->ct_syshandle = 0; 1247 cto->ct_header.rqs_seqno = 0; 1248 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1249 1250 if (send_status) { 1251 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1252 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1253 /* 1254 * Preserve residual. 1255 */ 1256 resid = cto->ct_resid; 1257 1258 /* 1259 * Save actual SCSI status. 1260 */ 1261 scsi_status = cto->ct_scsi_status; 1262 1263 #ifndef STATUS_WITH_DATA 1264 sflags |= CT_NO_DATA; 1265 /* 1266 * We can't do a status at the same time as a data CTIO, so 1267 * we need to synthesize an extra CTIO at this level. 1268 */ 1269 nctios++; 1270 #endif 1271 } else { 1272 sflags = scsi_status = resid = 0; 1273 } 1274 1275 cto->ct_resid = 0; 1276 cto->ct_scsi_status = 0; 1277 1278 pcs = (struct isp_pcisoftc *)isp; 1279 dp = &pcs->dmaps[isp_handle_index(handle)]; 1280 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1281 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1282 } else { 1283 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1284 } 1285 1286 nxti = *mp->nxtip; 1287 1288 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1289 int seglim; 1290 1291 seglim = nseg; 1292 if (seglim) { 1293 int seg; 1294 1295 if (seglim > ISP_RQDSEG) 1296 seglim = ISP_RQDSEG; 1297 1298 for (seg = 0; seg < seglim; seg++, nseg--) { 1299 /* 1300 * Unlike normal initiator commands, we don't 1301 * do any swizzling here. 1302 */ 1303 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1304 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1305 cto->ct_xfrlen += dm_segs->ds_len; 1306 dm_segs++; 1307 } 1308 cto->ct_seg_count = seg; 1309 } else { 1310 /* 1311 * This case should only happen when we're sending an 1312 * extra CTIO with final status. 1313 */ 1314 if (send_status == 0) { 1315 isp_prt(isp, ISP_LOGWARN, 1316 "tdma_mk ran out of segments"); 1317 mp->error = EINVAL; 1318 return; 1319 } 1320 } 1321 1322 /* 1323 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1324 * ct_tagtype, and ct_timeout have been carried over 1325 * unchanged from what our caller had set. 1326 * 1327 * The dataseg fields and the seg_count fields we just got 1328 * through setting. The data direction we've preserved all 1329 * along and only clear it if we're now sending status. 1330 */ 1331 1332 if (nth_ctio == nctios - 1) { 1333 /* 1334 * We're the last in a sequence of CTIOs, so mark 1335 * this CTIO and save the handle to the CCB such that 1336 * when this CTIO completes we can free dma resources 1337 * and do whatever else we need to do to finish the 1338 * rest of the command. We *don't* give this to the 1339 * firmware to work on- the caller will do that. 1340 */ 1341 1342 cto->ct_syshandle = handle; 1343 cto->ct_header.rqs_seqno = 1; 1344 1345 if (send_status) { 1346 cto->ct_scsi_status = scsi_status; 1347 cto->ct_flags |= sflags; 1348 cto->ct_resid = resid; 1349 } 1350 if (send_status) { 1351 isp_prt(isp, ISP_LOGTDEBUG1, 1352 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1353 "scsi status %x resid %d", 1354 cto->ct_fwhandle, csio->ccb_h.target_lun, 1355 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1356 cto->ct_scsi_status, cto->ct_resid); 1357 } else { 1358 isp_prt(isp, ISP_LOGTDEBUG1, 1359 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1360 cto->ct_fwhandle, csio->ccb_h.target_lun, 1361 cto->ct_iid, cto->ct_tag_val, 1362 cto->ct_flags); 1363 } 1364 isp_put_ctio(isp, cto, qe); 1365 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1366 if (nctios > 1) { 1367 MEMORYBARRIER(isp, SYNC_REQUEST, 1368 curi, QENTRY_LEN); 1369 } 1370 } else { 1371 ct_entry_t *oqe = qe; 1372 1373 /* 1374 * Make sure syshandle fields are clean 1375 */ 1376 cto->ct_syshandle = 0; 1377 cto->ct_header.rqs_seqno = 0; 1378 1379 isp_prt(isp, ISP_LOGTDEBUG1, 1380 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1381 cto->ct_fwhandle, csio->ccb_h.target_lun, 1382 cto->ct_iid, cto->ct_flags); 1383 1384 /* 1385 * Get a new CTIO 1386 */ 1387 qe = (ct_entry_t *) 1388 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1389 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1390 if (nxti == mp->optr) { 1391 isp_prt(isp, ISP_LOGTDEBUG0, 1392 "Queue Overflow in tdma_mk"); 1393 mp->error = MUSHERR_NOQENTRIES; 1394 return; 1395 } 1396 1397 /* 1398 * Now that we're done with the old CTIO, 1399 * flush it out to the request queue. 1400 */ 1401 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1402 isp_put_ctio(isp, cto, oqe); 1403 if (nth_ctio != 0) { 1404 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1405 QENTRY_LEN); 1406 } 1407 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1408 1409 /* 1410 * Reset some fields in the CTIO so we can reuse 1411 * for the next one we'll flush to the request 1412 * queue. 1413 */ 1414 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1415 cto->ct_header.rqs_entry_count = 1; 1416 cto->ct_header.rqs_flags = 0; 1417 cto->ct_status = 0; 1418 cto->ct_scsi_status = 0; 1419 cto->ct_xfrlen = 0; 1420 cto->ct_resid = 0; 1421 cto->ct_seg_count = 0; 1422 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1423 } 1424 } 1425 *mp->nxtip = nxti; 1426 } 1427 1428 /* 1429 * We don't have to do multiple CTIOs here. Instead, we can just do 1430 * continuation segments as needed. This greatly simplifies the code 1431 * improves performance. 1432 */ 1433 1434 static void 1435 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1436 { 1437 mush_t *mp; 1438 struct ccb_scsiio *csio; 1439 struct ispsoftc *isp; 1440 ct2_entry_t *cto, *qe; 1441 u_int16_t curi, nxti; 1442 int segcnt; 1443 1444 mp = (mush_t *) arg; 1445 if (error) { 1446 mp->error = error; 1447 return; 1448 } 1449 1450 isp = mp->isp; 1451 csio = mp->cmd_token; 1452 cto = mp->rq; 1453 1454 curi = isp->isp_reqidx; 1455 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1456 1457 if (nseg == 0) { 1458 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1459 isp_prt(isp, ISP_LOGWARN, 1460 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1461 "set (0x%x)", cto->ct_flags); 1462 mp->error = EINVAL; 1463 return; 1464 } 1465 /* 1466 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1467 * flags to NO DATA and clear relative offset flags. 1468 * We preserve the ct_resid and the response area. 1469 */ 1470 cto->ct_header.rqs_seqno = 1; 1471 cto->ct_seg_count = 0; 1472 cto->ct_reloff = 0; 1473 isp_prt(isp, ISP_LOGTDEBUG1, 1474 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1475 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1476 cto->ct_iid, cto->ct_flags, cto->ct_status, 1477 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1478 isp_put_ctio2(isp, cto, qe); 1479 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1480 return; 1481 } 1482 1483 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1484 isp_prt(isp, ISP_LOGERR, 1485 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1486 "(0x%x)", cto->ct_flags); 1487 mp->error = EINVAL; 1488 return; 1489 } 1490 1491 1492 nxti = *mp->nxtip; 1493 1494 /* 1495 * Set up the CTIO2 data segments. 1496 */ 1497 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1498 cto->ct_seg_count++, segcnt++) { 1499 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1500 dm_segs[segcnt].ds_addr; 1501 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1502 dm_segs[segcnt].ds_len; 1503 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1504 isp_prt(isp, ISP_LOGTDEBUG1, "isp_send_ctio2: ent0[%d]0x%x:%d", 1505 cto->ct_seg_count, dm_segs[segcnt].ds_addr, 1506 dm_segs[segcnt].ds_len); 1507 } 1508 1509 while (segcnt < nseg) { 1510 u_int16_t curip; 1511 int seg; 1512 ispcontreq_t local, *crq = &local, *qep; 1513 1514 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1515 curip = nxti; 1516 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1517 if (nxti == mp->optr) { 1518 ISP_UNLOCK(isp); 1519 isp_prt(isp, ISP_LOGTDEBUG0, 1520 "tdma_mkfc: request queue overflow"); 1521 mp->error = MUSHERR_NOQENTRIES; 1522 return; 1523 } 1524 cto->ct_header.rqs_entry_count++; 1525 MEMZERO((void *)crq, sizeof (*crq)); 1526 crq->req_header.rqs_entry_count = 1; 1527 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1528 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1529 segcnt++, seg++) { 1530 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1531 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1532 isp_prt(isp, ISP_LOGTDEBUG1, 1533 "isp_send_ctio2: ent%d[%d]%x:%u", 1534 cto->ct_header.rqs_entry_count-1, seg, 1535 dm_segs[segcnt].ds_addr, dm_segs[segcnt].ds_len); 1536 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1537 cto->ct_seg_count++; 1538 } 1539 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1540 isp_put_cont_req(isp, crq, qep); 1541 ISP_TDQE(isp, "cont entry", curi, qep); 1542 } 1543 1544 /* 1545 * No do final twiddling for the CTIO itself. 1546 */ 1547 cto->ct_header.rqs_seqno = 1; 1548 isp_prt(isp, ISP_LOGTDEBUG1, 1549 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1550 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1551 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1552 cto->ct_resid); 1553 isp_put_ctio2(isp, cto, qe); 1554 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1555 *mp->nxtip = nxti; 1556 } 1557 #endif 1558 1559 static void dma2(void *, bus_dma_segment_t *, int, int); 1560 1561 static void 1562 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1563 { 1564 mush_t *mp; 1565 struct ispsoftc *isp; 1566 struct ccb_scsiio *csio; 1567 struct isp_pcisoftc *pcs; 1568 bus_dmamap_t *dp; 1569 bus_dma_segment_t *eseg; 1570 ispreq_t *rq; 1571 int seglim, datalen; 1572 u_int16_t nxti; 1573 1574 mp = (mush_t *) arg; 1575 if (error) { 1576 mp->error = error; 1577 return; 1578 } 1579 1580 if (nseg < 1) { 1581 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1582 mp->error = EFAULT; 1583 return; 1584 } 1585 csio = mp->cmd_token; 1586 isp = mp->isp; 1587 rq = mp->rq; 1588 pcs = (struct isp_pcisoftc *)mp->isp; 1589 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1590 nxti = *mp->nxtip; 1591 1592 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1593 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1594 } else { 1595 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1596 } 1597 1598 datalen = XS_XFRLEN(csio); 1599 1600 /* 1601 * We're passed an initial partially filled in entry that 1602 * has most fields filled in except for data transfer 1603 * related values. 1604 * 1605 * Our job is to fill in the initial request queue entry and 1606 * then to start allocating and filling in continuation entries 1607 * until we've covered the entire transfer. 1608 */ 1609 1610 if (IS_FC(isp)) { 1611 seglim = ISP_RQDSEG_T2; 1612 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1613 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1614 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1615 } else { 1616 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1617 } 1618 } else { 1619 if (csio->cdb_len > 12) { 1620 seglim = 0; 1621 } else { 1622 seglim = ISP_RQDSEG; 1623 } 1624 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1625 rq->req_flags |= REQFLAG_DATA_IN; 1626 } else { 1627 rq->req_flags |= REQFLAG_DATA_OUT; 1628 } 1629 } 1630 1631 eseg = dm_segs + nseg; 1632 1633 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1634 if (IS_FC(isp)) { 1635 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1636 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1637 dm_segs->ds_addr; 1638 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1639 dm_segs->ds_len; 1640 } else { 1641 rq->req_dataseg[rq->req_seg_count].ds_base = 1642 dm_segs->ds_addr; 1643 rq->req_dataseg[rq->req_seg_count].ds_count = 1644 dm_segs->ds_len; 1645 } 1646 datalen -= dm_segs->ds_len; 1647 rq->req_seg_count++; 1648 dm_segs++; 1649 } 1650 1651 while (datalen > 0 && dm_segs != eseg) { 1652 u_int16_t onxti; 1653 ispcontreq_t local, *crq = &local, *cqe; 1654 1655 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1656 onxti = nxti; 1657 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1658 if (nxti == mp->optr) { 1659 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1660 mp->error = MUSHERR_NOQENTRIES; 1661 return; 1662 } 1663 rq->req_header.rqs_entry_count++; 1664 MEMZERO((void *)crq, sizeof (*crq)); 1665 crq->req_header.rqs_entry_count = 1; 1666 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1667 1668 seglim = 0; 1669 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1670 crq->req_dataseg[seglim].ds_base = 1671 dm_segs->ds_addr; 1672 crq->req_dataseg[seglim].ds_count = 1673 dm_segs->ds_len; 1674 rq->req_seg_count++; 1675 dm_segs++; 1676 seglim++; 1677 datalen -= dm_segs->ds_len; 1678 } 1679 isp_put_cont_req(isp, crq, cqe); 1680 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1681 } 1682 *mp->nxtip = nxti; 1683 } 1684 1685 static int 1686 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1687 u_int16_t *nxtip, u_int16_t optr) 1688 { 1689 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1690 ispreq_t *qep; 1691 bus_dmamap_t *dp = NULL; 1692 mush_t mush, *mp; 1693 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1694 1695 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1696 #ifdef ISP_TARGET_MODE 1697 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1698 if (IS_FC(isp)) { 1699 eptr = tdma_mkfc; 1700 } else { 1701 eptr = tdma_mk; 1702 } 1703 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1704 (csio->dxfer_len == 0)) { 1705 mp = &mush; 1706 mp->isp = isp; 1707 mp->cmd_token = csio; 1708 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1709 mp->nxtip = nxtip; 1710 mp->optr = optr; 1711 mp->error = 0; 1712 (*eptr)(mp, NULL, 0, 0); 1713 goto mbxsync; 1714 } 1715 } else 1716 #endif 1717 eptr = dma2; 1718 1719 1720 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1721 (csio->dxfer_len == 0)) { 1722 rq->req_seg_count = 1; 1723 goto mbxsync; 1724 } 1725 1726 /* 1727 * Do a virtual grapevine step to collect info for 1728 * the callback dma allocation that we have to use... 1729 */ 1730 mp = &mush; 1731 mp->isp = isp; 1732 mp->cmd_token = csio; 1733 mp->rq = rq; 1734 mp->nxtip = nxtip; 1735 mp->optr = optr; 1736 mp->error = 0; 1737 1738 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1739 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1740 int error, s; 1741 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1742 s = splsoftvm(); 1743 error = bus_dmamap_load(pcs->dmat, *dp, 1744 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1745 if (error == EINPROGRESS) { 1746 bus_dmamap_unload(pcs->dmat, *dp); 1747 mp->error = EINVAL; 1748 isp_prt(isp, ISP_LOGERR, 1749 "deferred dma allocation not supported"); 1750 } else if (error && mp->error == 0) { 1751 #ifdef DIAGNOSTIC 1752 isp_prt(isp, ISP_LOGERR, 1753 "error %d in dma mapping code", error); 1754 #endif 1755 mp->error = error; 1756 } 1757 splx(s); 1758 } else { 1759 /* Pointer to physical buffer */ 1760 struct bus_dma_segment seg; 1761 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1762 seg.ds_len = csio->dxfer_len; 1763 (*eptr)(mp, &seg, 1, 0); 1764 } 1765 } else { 1766 struct bus_dma_segment *segs; 1767 1768 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1769 isp_prt(isp, ISP_LOGERR, 1770 "Physical segment pointers unsupported"); 1771 mp->error = EINVAL; 1772 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1773 isp_prt(isp, ISP_LOGERR, 1774 "Virtual segment addresses unsupported"); 1775 mp->error = EINVAL; 1776 } else { 1777 /* Just use the segments provided */ 1778 segs = (struct bus_dma_segment *) csio->data_ptr; 1779 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1780 } 1781 } 1782 if (mp->error) { 1783 int retval = CMD_COMPLETE; 1784 if (mp->error == MUSHERR_NOQENTRIES) { 1785 retval = CMD_EAGAIN; 1786 } else if (mp->error == EFBIG) { 1787 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1788 } else if (mp->error == EINVAL) { 1789 XS_SETERR(csio, CAM_REQ_INVALID); 1790 } else { 1791 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1792 } 1793 return (retval); 1794 } 1795 mbxsync: 1796 switch (rq->req_header.rqs_entry_type) { 1797 case RQSTYPE_REQUEST: 1798 isp_put_request(isp, rq, qep); 1799 break; 1800 case RQSTYPE_CMDONLY: 1801 isp_put_extended_request(isp, (ispextreq_t *)rq, 1802 (ispextreq_t *)qep); 1803 break; 1804 case RQSTYPE_T2RQS: 1805 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 1806 break; 1807 } 1808 return (CMD_QUEUED); 1809 } 1810 1811 static void 1812 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 1813 { 1814 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1815 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 1816 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1817 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 1818 } else { 1819 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 1820 } 1821 bus_dmamap_unload(pcs->dmat, *dp); 1822 } 1823 1824 1825 static void 1826 isp_pci_reset1(struct ispsoftc *isp) 1827 { 1828 /* Make sure the BIOS is disabled */ 1829 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1830 /* and enable interrupts */ 1831 ENABLE_INTS(isp); 1832 } 1833 1834 static void 1835 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 1836 { 1837 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1838 if (msg) 1839 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1840 else 1841 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 1842 if (IS_SCSI(isp)) 1843 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1844 else 1845 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1846 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1847 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1848 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1849 1850 1851 if (IS_SCSI(isp)) { 1852 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1853 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1854 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1855 ISP_READ(isp, CDMA_FIFO_STS)); 1856 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1857 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1858 ISP_READ(isp, DDMA_FIFO_STS)); 1859 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 1860 ISP_READ(isp, SXP_INTERRUPT), 1861 ISP_READ(isp, SXP_GROSS_ERR), 1862 ISP_READ(isp, SXP_PINS_CTRL)); 1863 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 1864 } 1865 printf(" mbox regs: %x %x %x %x %x\n", 1866 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 1867 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 1868 ISP_READ(isp, OUTMAILBOX4)); 1869 printf(" PCI Status Command/Status=%x\n", 1870 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 1871 } 1872