1 /* $NetBSD: isp_pci.c,v 1.49 1999/12/20 00:33:17 mjacob Exp $ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * Matthew Jacob (mjacob@nas.nasa.gov) 5 */ 6 /* 7 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <dev/ic/isp_netbsd.h> 34 #include <dev/microcode/isp/asm_pci.h> 35 36 #include <dev/pci/pcireg.h> 37 #include <dev/pci/pcivar.h> 38 #include <dev/pci/pcidevs.h> 39 40 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int)); 41 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t)); 42 #ifndef ISP_DISABLE_1080_SUPPORT 43 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int)); 44 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t)); 45 #endif 46 static int isp_pci_mbxdma __P((struct ispsoftc *)); 47 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *, 48 ispreq_t *, u_int16_t *, u_int16_t)); 49 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *, 50 u_int32_t)); 51 static void isp_pci_reset1 __P((struct ispsoftc *)); 52 static void isp_pci_dumpregs __P((struct ispsoftc *)); 53 static int isp_pci_intr __P((void *)); 54 55 #ifndef ISP_CODE_ORG 56 #define ISP_CODE_ORG 0x1000 57 #endif 58 #ifndef ISP_1040_RISC_CODE 59 #define ISP_1040_RISC_CODE NULL 60 #endif 61 #ifndef ISP_1080_RISC_CODE 62 #define ISP_1080_RISC_CODE NULL 63 #endif 64 #ifndef ISP_2100_RISC_CODE 65 #define ISP_2100_RISC_CODE NULL 66 #endif 67 #ifndef ISP_2200_RISC_CODE 68 #define ISP_2200_RISC_CODE NULL 69 #endif 70 71 #ifndef ISP_DISABLE_1020_SUPPORT 72 static struct ispmdvec mdvec = { 73 isp_pci_rd_reg, 74 isp_pci_wr_reg, 75 isp_pci_mbxdma, 76 isp_pci_dmasetup, 77 isp_pci_dmateardown, 78 NULL, 79 isp_pci_reset1, 80 isp_pci_dumpregs, 81 ISP_1040_RISC_CODE, 82 0, 83 ISP_CODE_ORG, 84 0, 85 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64, 86 0 87 }; 88 #endif 89 90 #ifndef ISP_DISABLE_1080_SUPPORT 91 static struct ispmdvec mdvec_1080 = { 92 isp_pci_rd_reg_1080, 93 isp_pci_wr_reg_1080, 94 isp_pci_mbxdma, 95 isp_pci_dmasetup, 96 isp_pci_dmateardown, 97 NULL, 98 isp_pci_reset1, 99 isp_pci_dumpregs, 100 ISP_1080_RISC_CODE, 101 0, 102 ISP_CODE_ORG, 103 0, 104 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64, 105 0 106 }; 107 #endif 108 109 #ifndef ISP_DISABLE_2100_SUPPORT 110 static struct ispmdvec mdvec_2100 = { 111 isp_pci_rd_reg, 112 isp_pci_wr_reg, 113 isp_pci_mbxdma, 114 isp_pci_dmasetup, 115 isp_pci_dmateardown, 116 NULL, 117 isp_pci_reset1, 118 isp_pci_dumpregs, 119 ISP_2100_RISC_CODE, 120 0, 121 ISP_CODE_ORG, 122 0, 123 0, 124 0 125 }; 126 #endif 127 128 #ifndef ISP_DISABLE_2200_SUPPORT 129 static struct ispmdvec mdvec_2200 = { 130 isp_pci_rd_reg, 131 isp_pci_wr_reg, 132 isp_pci_mbxdma, 133 isp_pci_dmasetup, 134 isp_pci_dmateardown, 135 NULL, 136 isp_pci_reset1, 137 isp_pci_dumpregs, 138 ISP_2200_RISC_CODE, 139 0, 140 ISP_CODE_ORG, 141 0, 142 0, 143 0 144 }; 145 #endif 146 147 #ifndef PCI_VENDOR_QLOGIC 148 #define PCI_VENDOR_QLOGIC 0x1077 149 #endif 150 151 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 152 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 153 #endif 154 155 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 156 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 157 #endif 158 159 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 160 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 161 #endif 162 163 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 164 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 165 #endif 166 167 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 168 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 169 #endif 170 171 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 172 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 173 #endif 174 175 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 176 177 #define PCI_QLOGIC_ISP1080 \ 178 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 179 180 #define PCI_QLOGIC_ISP1240 \ 181 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 182 183 #define PCI_QLOGIC_ISP1280 \ 184 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 185 186 #define PCI_QLOGIC_ISP2100 \ 187 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 188 189 #define PCI_QLOGIC_ISP2200 \ 190 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 191 192 #define IO_MAP_REG 0x10 193 #define MEM_MAP_REG 0x14 194 #define PCIR_ROMADDR 0x30 195 196 #define PCI_DFLT_LTNCY 0x40 197 #define PCI_DFLT_LNSZ 0x10 198 199 200 static int isp_pci_probe __P((struct device *, struct cfdata *, void *)); 201 static void isp_pci_attach __P((struct device *, struct device *, void *)); 202 203 struct isp_pcisoftc { 204 struct ispsoftc pci_isp; 205 pci_chipset_tag_t pci_pc; 206 pcitag_t pci_tag; 207 bus_space_tag_t pci_st; 208 bus_space_handle_t pci_sh; 209 bus_dma_tag_t pci_dmat; 210 bus_dmamap_t pci_scratch_dmap; /* for fcp only */ 211 bus_dmamap_t pci_rquest_dmap; 212 bus_dmamap_t pci_result_dmap; 213 bus_dmamap_t *pci_xfer_dmap; 214 void * pci_ih; 215 int16_t pci_poff[_NREG_BLKS]; 216 }; 217 218 struct cfattach isp_pci_ca = { 219 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach 220 }; 221 222 static int 223 isp_pci_probe(parent, match, aux) 224 struct device *parent; 225 struct cfdata *match; 226 void *aux; 227 { 228 struct pci_attach_args *pa = aux; 229 switch (pa->pa_id) { 230 #ifndef ISP_DISABLE_1020_SUPPORT 231 case PCI_QLOGIC_ISP: 232 return (1); 233 #endif 234 #ifndef ISP_DISABLE_1080_SUPPORT 235 case PCI_QLOGIC_ISP1080: 236 case PCI_QLOGIC_ISP1240: 237 case PCI_QLOGIC_ISP1280: 238 return (1); 239 #endif 240 #ifndef ISP_DISABLE_2100_SUPPORT 241 case PCI_QLOGIC_ISP2100: 242 return (1); 243 #endif 244 #ifndef ISP_DISABLE_2200_SUPPORT 245 case PCI_QLOGIC_ISP2200: 246 return (1); 247 #endif 248 default: 249 return (0); 250 } 251 } 252 253 254 static void 255 isp_pci_attach(parent, self, aux) 256 struct device *parent, *self; 257 void *aux; 258 { 259 #ifdef DEBUG 260 static char oneshot = 1; 261 #endif 262 static char *nomem = "%s: no mem for sdparam table\n"; 263 u_int32_t data, rev, linesz = PCI_DFLT_LNSZ; 264 struct pci_attach_args *pa = aux; 265 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self; 266 struct ispsoftc *isp = &pcs->pci_isp; 267 bus_space_tag_t st, iot, memt; 268 bus_space_handle_t sh, ioh, memh; 269 pci_intr_handle_t ih; 270 const char *intrstr; 271 int ioh_valid, memh_valid, i; 272 ISP_LOCKVAL_DECL; 273 274 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG, 275 PCI_MAPREG_TYPE_IO, 0, 276 &iot, &ioh, NULL, NULL) == 0); 277 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG, 278 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 279 &memt, &memh, NULL, NULL) == 0); 280 281 if (memh_valid) { 282 st = memt; 283 sh = memh; 284 } else if (ioh_valid) { 285 st = iot; 286 sh = ioh; 287 } else { 288 printf(": unable to map device registers\n"); 289 return; 290 } 291 printf("\n"); 292 293 pcs->pci_st = st; 294 pcs->pci_sh = sh; 295 pcs->pci_dmat = pa->pa_dmat; 296 pcs->pci_pc = pa->pa_pc; 297 pcs->pci_tag = pa->pa_tag; 298 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 299 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 300 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 301 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 302 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 303 rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0xff; 304 305 #ifndef ISP_DISABLE_1020_SUPPORT 306 if (pa->pa_id == PCI_QLOGIC_ISP) { 307 isp->isp_mdvec = &mdvec; 308 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 309 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT); 310 if (isp->isp_param == NULL) { 311 printf(nomem, isp->isp_name); 312 return; 313 } 314 bzero(isp->isp_param, sizeof (sdparam)); 315 } 316 #endif 317 #ifndef ISP_DISABLE_1080_SUPPORT 318 if (pa->pa_id == PCI_QLOGIC_ISP1080) { 319 isp->isp_mdvec = &mdvec_1080; 320 isp->isp_type = ISP_HA_SCSI_1080; 321 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT); 322 if (isp->isp_param == NULL) { 323 printf(nomem, isp->isp_name); 324 return; 325 } 326 bzero(isp->isp_param, sizeof (sdparam)); 327 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 328 ISP1080_DMA_REGS_OFF; 329 } 330 if (pa->pa_id == PCI_QLOGIC_ISP1240) { 331 isp->isp_mdvec = &mdvec_1080; 332 isp->isp_type = ISP_HA_SCSI_1240; 333 isp->isp_param = 334 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT); 335 if (isp->isp_param == NULL) { 336 printf(nomem, isp->isp_name); 337 return; 338 } 339 bzero(isp->isp_param, 2 * sizeof (sdparam)); 340 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 341 ISP1080_DMA_REGS_OFF; 342 } 343 if (pa->pa_id == PCI_QLOGIC_ISP1280) { 344 isp->isp_mdvec = &mdvec_1080; 345 isp->isp_type = ISP_HA_SCSI_1280; 346 isp->isp_param = 347 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT); 348 if (isp->isp_param == NULL) { 349 printf(nomem, isp->isp_name); 350 return; 351 } 352 bzero(isp->isp_param, 2 * sizeof (sdparam)); 353 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 354 ISP1080_DMA_REGS_OFF; 355 } 356 #endif 357 #ifndef ISP_DISABLE_2100_SUPPORT 358 if (pa->pa_id == PCI_QLOGIC_ISP2100) { 359 isp->isp_mdvec = &mdvec_2100; 360 isp->isp_type = ISP_HA_FC_2100; 361 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT); 362 if (isp->isp_param == NULL) { 363 printf(nomem, isp->isp_name); 364 return; 365 } 366 bzero(isp->isp_param, sizeof (fcparam)); 367 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 368 PCI_MBOX_REGS2100_OFF; 369 if (rev < 3) { 370 /* 371 * XXX: Need to get the actual revision 372 * XXX: number of the 2100 FB. At any rate, 373 * XXX: lower cache line size for early revision 374 * XXX; boards. 375 */ 376 linesz = 1; 377 } 378 } 379 #endif 380 #ifndef ISP_DISABLE_2200_SUPPORT 381 if (pa->pa_id == PCI_QLOGIC_ISP2200) { 382 isp->isp_mdvec = &mdvec_2200; 383 isp->isp_type = ISP_HA_FC_2200; 384 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT); 385 if (isp->isp_param == NULL) { 386 printf(nomem, isp->isp_name); 387 return; 388 } 389 bzero(isp->isp_param, sizeof (fcparam)); 390 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 391 PCI_MBOX_REGS2100_OFF; 392 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG); 393 } 394 #endif 395 isp->isp_revision = rev; 396 397 /* 398 * Make sure that command register set sanely. 399 */ 400 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 401 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE; 402 403 /* 404 * Not so sure about these- but I think it's important that they get 405 * enabled...... 406 */ 407 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE; 408 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data); 409 410 /* 411 * Make sure that the latency timer, cache line size, 412 * and ROM is disabled. 413 */ 414 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 415 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT); 416 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 417 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT); 418 data |= (linesz << PCI_CACHELINE_SHIFT); 419 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data); 420 421 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR); 422 data &= ~1; 423 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data); 424 425 #ifdef DEBUG 426 if (oneshot) { 427 oneshot = 0; 428 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version " 429 "%d.%d Core Version %d.%d\n", 430 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 431 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 432 } 433 #endif 434 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin, 435 pa->pa_intrline, &ih)) { 436 printf("%s: couldn't map interrupt\n", isp->isp_name); 437 free(isp->isp_param, M_DEVBUF); 438 return; 439 } 440 intrstr = pci_intr_string(pa->pa_pc, ih); 441 if (intrstr == NULL) 442 intrstr = "<I dunno>"; 443 pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO, 444 isp_pci_intr, isp); 445 if (pcs->pci_ih == NULL) { 446 printf("%s: couldn't establish interrupt at %s\n", 447 isp->isp_name, intrstr); 448 free(isp->isp_param, M_DEVBUF); 449 return; 450 } 451 printf("%s: interrupting at %s\n", isp->isp_name, intrstr); 452 453 if (IS_FC(isp)) { 454 long foo; 455 /* 456 * This isn't very random, but it's the best we can do for 457 * the real edge case of cards that don't have WWNs. 458 */ 459 foo = (long) isp; 460 foo >>= 4; 461 foo &= 0x7; 462 while (version[foo]) 463 isp->isp_osinfo.seed += (int) version[foo++]; 464 isp->isp_osinfo.seed <<= 8; 465 isp->isp_osinfo.seed += (isp->isp_osinfo._dev.dv_unit + 1); 466 } 467 468 isp->isp_confopts = self->dv_cfdata->cf_flags; 469 ISP_LOCK(isp); 470 isp_reset(isp); 471 if (isp->isp_state != ISP_RESETSTATE) { 472 ISP_UNLOCK(isp); 473 free(isp->isp_param, M_DEVBUF); 474 return; 475 } 476 isp_init(isp); 477 if (isp->isp_state != ISP_INITSTATE) { 478 isp_uninit(isp); 479 ISP_UNLOCK(isp); 480 free(isp->isp_param, M_DEVBUF); 481 return; 482 } 483 484 /* 485 * Create the DMA maps for the data transfers. 486 */ 487 for (i = 0; i < isp->isp_maxcmds; i++) { 488 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS, 489 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT, 490 &pcs->pci_xfer_dmap[i])) { 491 printf("%s: can't create dma maps\n", 492 isp->isp_name); 493 isp_uninit(isp); 494 ISP_UNLOCK(isp); 495 return; 496 } 497 } 498 /* 499 * Do Generic attach now. 500 */ 501 isp_attach(isp); 502 if (isp->isp_state != ISP_RUNSTATE) { 503 isp_uninit(isp); 504 free(isp->isp_param, M_DEVBUF); 505 } 506 ISP_UNLOCK(isp); 507 } 508 509 static u_int16_t 510 isp_pci_rd_reg(isp, regoff) 511 struct ispsoftc *isp; 512 int regoff; 513 { 514 u_int16_t rv; 515 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 516 int offset, oldconf = 0; 517 518 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 519 /* 520 * We will assume that someone has paused the RISC processor. 521 */ 522 oldconf = isp_pci_rd_reg(isp, BIU_CONF1); 523 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP); 524 delay(250); 525 } 526 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 527 offset += (regoff & 0xff); 528 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); 529 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 530 isp_pci_wr_reg(isp, BIU_CONF1, oldconf); 531 delay(250); 532 } 533 return (rv); 534 } 535 536 static void 537 isp_pci_wr_reg(isp, regoff, val) 538 struct ispsoftc *isp; 539 int regoff; 540 u_int16_t val; 541 { 542 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 543 int offset, oldconf = 0; 544 545 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 546 /* 547 * We will assume that someone has paused the RISC processor. 548 */ 549 oldconf = isp_pci_rd_reg(isp, BIU_CONF1); 550 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP); 551 delay(250); 552 } 553 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 554 offset += (regoff & 0xff); 555 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); 556 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 557 isp_pci_wr_reg(isp, BIU_CONF1, oldconf); 558 delay(250); 559 } 560 } 561 562 #ifndef ISP_DISABLE_1080_SUPPORT 563 static u_int16_t 564 isp_pci_rd_reg_1080(isp, regoff) 565 struct ispsoftc *isp; 566 int regoff; 567 { 568 u_int16_t rv, oc = 0; 569 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 570 int offset; 571 572 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 573 u_int16_t tc; 574 /* 575 * We will assume that someone has paused the RISC processor. 576 */ 577 oc = isp_pci_rd_reg(isp, BIU_CONF1); 578 tc = oc & ~BIU_PCI1080_CONF1_DMA; 579 if (IS_1280(isp)) { 580 if (regoff & SXP_BANK1_SELECT) 581 tc |= BIU_PCI1080_CONF1_SXP0; 582 else 583 tc |= BIU_PCI1080_CONF1_SXP1; 584 } else { 585 tc |= BIU_PCI1080_CONF1_SXP0; 586 } 587 isp_pci_wr_reg(isp, BIU_CONF1, tc); 588 delay(250); 589 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 590 oc = isp_pci_rd_reg(isp, BIU_CONF1); 591 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA); 592 delay(250); 593 } 594 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 595 offset += (regoff & 0xff); 596 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); 597 /* 598 * Okay, because BIU_CONF1 is always nonzero 599 */ 600 if (oc) { 601 isp_pci_wr_reg(isp, BIU_CONF1, oc); 602 delay(250); 603 } 604 return (rv); 605 } 606 607 static void 608 isp_pci_wr_reg_1080(isp, regoff, val) 609 struct ispsoftc *isp; 610 int regoff; 611 u_int16_t val; 612 { 613 u_int16_t oc = 0; 614 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 615 int offset; 616 617 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 618 u_int16_t tc; 619 /* 620 * We will assume that someone has paused the RISC processor. 621 */ 622 oc = isp_pci_rd_reg(isp, BIU_CONF1); 623 tc = oc & ~BIU_PCI1080_CONF1_DMA; 624 if (IS_1280(isp)) { 625 if (regoff & SXP_BANK1_SELECT) 626 tc |= BIU_PCI1080_CONF1_SXP0; 627 else 628 tc |= BIU_PCI1080_CONF1_SXP1; 629 } else { 630 tc |= BIU_PCI1080_CONF1_SXP0; 631 } 632 isp_pci_wr_reg(isp, BIU_CONF1, tc); 633 delay(250); 634 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 635 oc = isp_pci_rd_reg(isp, BIU_CONF1); 636 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA); 637 delay(250); 638 } 639 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 640 offset += (regoff & 0xff); 641 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); 642 /* 643 * Okay, because BIU_CONF1 is always nonzero 644 */ 645 if (oc) { 646 isp_pci_wr_reg(isp, BIU_CONF1, oc); 647 delay(250); 648 } 649 } 650 #endif 651 652 static int 653 isp_pci_mbxdma(isp) 654 struct ispsoftc *isp; 655 { 656 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 657 bus_dma_segment_t seg; 658 bus_size_t len; 659 fcparam *fcp; 660 int rseg; 661 662 if (isp->isp_rquest_dma) /* been here before? */ 663 return (0); 664 665 len = isp->isp_maxcmds * sizeof (ISP_SCSI_XFER_T); 666 isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK); 667 if (isp->isp_xflist == NULL) { 668 printf("%s: cannot malloc xflist array\n", isp->isp_name); 669 return (1); 670 } 671 bzero(isp->isp_xflist, len); 672 len = isp->isp_maxcmds * sizeof (bus_dmamap_t); 673 pci->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 674 if (pci->pci_xfer_dmap == NULL) { 675 printf("%s: cannot malloc xflist array\n", isp->isp_name); 676 return (1); 677 } 678 679 /* 680 * Allocate and map the request queue. 681 */ 682 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); 683 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg, 684 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len, 685 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) 686 return (1); 687 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT, 688 &pci->pci_rquest_dmap) || bus_dmamap_load(pci->pci_dmat, 689 pci->pci_rquest_dmap, (caddr_t)isp->isp_rquest, len, NULL, 690 BUS_DMA_NOWAIT)) 691 return (1); 692 693 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr; 694 695 /* 696 * Allocate and map the result queue. 697 */ 698 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); 699 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg, 700 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len, 701 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) 702 return (1); 703 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT, 704 &pci->pci_result_dmap) || bus_dmamap_load(pci->pci_dmat, 705 pci->pci_result_dmap, (caddr_t)isp->isp_result, len, NULL, 706 BUS_DMA_NOWAIT)) 707 return (1); 708 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr; 709 710 if (IS_SCSI(isp)) { 711 return (0); 712 } 713 714 fcp = isp->isp_param; 715 len = ISP2100_SCRLEN; 716 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg, 717 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len, 718 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) 719 return (1); 720 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT, 721 &pci->pci_scratch_dmap) || bus_dmamap_load(pci->pci_dmat, 722 pci->pci_scratch_dmap, (caddr_t)fcp->isp_scratch, len, NULL, 723 BUS_DMA_NOWAIT)) 724 return (1); 725 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr; 726 return (0); 727 } 728 729 static int 730 isp_pci_dmasetup(isp, xs, rq, iptrp, optr) 731 struct ispsoftc *isp; 732 struct scsipi_xfer *xs; 733 ispreq_t *rq; 734 u_int16_t *iptrp; 735 u_int16_t optr; 736 { 737 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 738 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1]; 739 ispcontreq_t *crq; 740 int segcnt, seg, error, ovseg, seglim, drq; 741 742 if (xs->datalen == 0) { 743 rq->req_seg_count = 1; 744 goto mbxsync; 745 } 746 assert(rq->req_handle != 0 && rq->req_handle <= isp->isp_maxcmds); 747 if (xs->xs_control & XS_CTL_DATA_IN) { 748 drq = REQFLAG_DATA_IN; 749 } else { 750 drq = REQFLAG_DATA_OUT; 751 } 752 753 if (IS_FC(isp)) { 754 seglim = ISP_RQDSEG_T2; 755 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen; 756 ((ispreqt2_t *)rq)->req_flags |= drq; 757 } else { 758 seglim = ISP_RQDSEG; 759 rq->req_flags |= drq; 760 } 761 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen, 762 NULL, xs->xs_control & XS_CTL_NOSLEEP ? 763 BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 764 if (error) { 765 XS_SETERR(xs, HBA_BOTCH); 766 return (CMD_COMPLETE); 767 } 768 769 segcnt = dmap->dm_nsegs; 770 771 for (seg = 0, rq->req_seg_count = 0; 772 seg < segcnt && rq->req_seg_count < seglim; 773 seg++, rq->req_seg_count++) { 774 if (IS_FC(isp)) { 775 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 776 rq2->req_dataseg[rq2->req_seg_count].ds_count = 777 dmap->dm_segs[seg].ds_len; 778 rq2->req_dataseg[rq2->req_seg_count].ds_base = 779 dmap->dm_segs[seg].ds_addr; 780 } else { 781 rq->req_dataseg[rq->req_seg_count].ds_count = 782 dmap->dm_segs[seg].ds_len; 783 rq->req_dataseg[rq->req_seg_count].ds_base = 784 dmap->dm_segs[seg].ds_addr; 785 } 786 } 787 788 if (seg == segcnt) 789 goto dmasync; 790 791 do { 792 crq = (ispcontreq_t *) 793 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp); 794 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1); 795 if (*iptrp == optr) { 796 printf("%s: Request Queue Overflow++\n", isp->isp_name); 797 bus_dmamap_unload(pci->pci_dmat, dmap); 798 XS_SETERR(xs, HBA_BOTCH); 799 return (CMD_COMPLETE); 800 } 801 rq->req_header.rqs_entry_count++; 802 bzero((void *)crq, sizeof (*crq)); 803 crq->req_header.rqs_entry_count = 1; 804 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 805 806 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG; 807 rq->req_seg_count++, seg++, ovseg++) { 808 crq->req_dataseg[ovseg].ds_count = 809 dmap->dm_segs[seg].ds_len; 810 crq->req_dataseg[ovseg].ds_base = 811 dmap->dm_segs[seg].ds_addr; 812 } 813 } while (seg < segcnt); 814 815 dmasync: 816 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize, 817 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD : 818 BUS_DMASYNC_PREWRITE); 819 820 mbxsync: 821 ISP_SWIZZLE_REQUEST(isp, rq); 822 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0, 823 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 824 return (CMD_QUEUED); 825 } 826 827 static int 828 isp_pci_intr(arg) 829 void *arg; 830 { 831 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg; 832 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0, 833 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 834 return (isp_intr(arg)); 835 } 836 837 static void 838 isp_pci_dmateardown(isp, xs, handle) 839 struct ispsoftc *isp; 840 struct scsipi_xfer *xs; 841 u_int32_t handle; 842 { 843 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 844 bus_dmamap_t dmap; 845 assert(handle != 0 && handle <= isp->isp_maxcmds); 846 dmap = pci->pci_xfer_dmap[handle-1]; 847 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize, 848 xs->xs_control & XS_CTL_DATA_IN ? 849 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 850 bus_dmamap_unload(pci->pci_dmat, dmap); 851 } 852 853 static void 854 isp_pci_reset1(isp) 855 struct ispsoftc *isp; 856 { 857 /* Make sure the BIOS is disabled */ 858 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 859 } 860 861 static void 862 isp_pci_dumpregs(isp) 863 struct ispsoftc *isp; 864 { 865 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 866 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name, 867 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG)); 868 } 869