1 /* $NetBSD: isp_pci.c,v 1.51 2000/02/19 01:54:42 mjacob Exp $ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * Matthew Jacob (mjacob@nas.nasa.gov) 5 */ 6 /* 7 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <dev/ic/isp_netbsd.h> 34 #include <dev/microcode/isp/asm_pci.h> 35 36 #include <dev/pci/pcireg.h> 37 #include <dev/pci/pcivar.h> 38 #include <dev/pci/pcidevs.h> 39 40 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int)); 41 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t)); 42 #ifndef ISP_DISABLE_1080_SUPPORT 43 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int)); 44 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t)); 45 #endif 46 static int isp_pci_mbxdma __P((struct ispsoftc *)); 47 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *, 48 ispreq_t *, u_int16_t *, u_int16_t)); 49 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *, 50 u_int32_t)); 51 static void isp_pci_reset1 __P((struct ispsoftc *)); 52 static void isp_pci_dumpregs __P((struct ispsoftc *)); 53 static int isp_pci_intr __P((void *)); 54 55 #ifndef ISP_CODE_ORG 56 #define ISP_CODE_ORG 0x1000 57 #endif 58 #ifndef ISP_1040_RISC_CODE 59 #define ISP_1040_RISC_CODE NULL 60 #endif 61 #ifndef ISP_1080_RISC_CODE 62 #define ISP_1080_RISC_CODE NULL 63 #endif 64 #ifndef ISP_12160_RISC_CODE 65 #define ISP_12160_RISC_CODE NULL 66 #endif 67 #ifndef ISP_2100_RISC_CODE 68 #define ISP_2100_RISC_CODE NULL 69 #endif 70 #ifndef ISP_2200_RISC_CODE 71 #define ISP_2200_RISC_CODE NULL 72 #endif 73 74 #ifndef ISP_DISABLE_1020_SUPPORT 75 static struct ispmdvec mdvec = { 76 isp_pci_rd_reg, 77 isp_pci_wr_reg, 78 isp_pci_mbxdma, 79 isp_pci_dmasetup, 80 isp_pci_dmateardown, 81 NULL, 82 isp_pci_reset1, 83 isp_pci_dumpregs, 84 ISP_1040_RISC_CODE, 85 0, 86 ISP_CODE_ORG, 87 0, 88 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64, 89 0 90 }; 91 #endif 92 93 #ifndef ISP_DISABLE_1080_SUPPORT 94 static struct ispmdvec mdvec_1080 = { 95 isp_pci_rd_reg_1080, 96 isp_pci_wr_reg_1080, 97 isp_pci_mbxdma, 98 isp_pci_dmasetup, 99 isp_pci_dmateardown, 100 NULL, 101 isp_pci_reset1, 102 isp_pci_dumpregs, 103 ISP_1080_RISC_CODE, 104 0, 105 ISP_CODE_ORG, 106 0, 107 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64, 108 0 109 }; 110 #endif 111 112 #ifndef ISP_DISABLE_12160_SUPPORT 113 static struct ispmdvec mdvec_12160 = { 114 isp_pci_rd_reg_1080, 115 isp_pci_wr_reg_1080, 116 isp_pci_mbxdma, 117 isp_pci_dmasetup, 118 isp_pci_dmateardown, 119 NULL, 120 isp_pci_reset1, 121 isp_pci_dumpregs, 122 ISP_12160_RISC_CODE, 123 0, 124 ISP_CODE_ORG, 125 0, 126 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64, 127 0 128 }; 129 #endif 130 131 #ifndef ISP_DISABLE_2100_SUPPORT 132 static struct ispmdvec mdvec_2100 = { 133 isp_pci_rd_reg, 134 isp_pci_wr_reg, 135 isp_pci_mbxdma, 136 isp_pci_dmasetup, 137 isp_pci_dmateardown, 138 NULL, 139 isp_pci_reset1, 140 isp_pci_dumpregs, 141 ISP_2100_RISC_CODE, 142 0, 143 ISP_CODE_ORG, 144 0, 145 0, 146 0 147 }; 148 #endif 149 150 #ifndef ISP_DISABLE_2200_SUPPORT 151 static struct ispmdvec mdvec_2200 = { 152 isp_pci_rd_reg, 153 isp_pci_wr_reg, 154 isp_pci_mbxdma, 155 isp_pci_dmasetup, 156 isp_pci_dmateardown, 157 NULL, 158 isp_pci_reset1, 159 isp_pci_dumpregs, 160 ISP_2200_RISC_CODE, 161 0, 162 ISP_CODE_ORG, 163 0, 164 0, 165 0 166 }; 167 #endif 168 169 #ifndef PCI_VENDOR_QLOGIC 170 #define PCI_VENDOR_QLOGIC 0x1077 171 #endif 172 173 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 174 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 175 #endif 176 177 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 178 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 179 #endif 180 181 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 182 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 183 #endif 184 185 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 186 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 187 #endif 188 189 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 190 #define PCI_PRODUCT_QLOGIC_ISP12160 0x12160 191 #endif 192 193 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 194 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 195 #endif 196 197 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 198 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 199 #endif 200 201 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 202 203 #define PCI_QLOGIC_ISP1080 \ 204 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 205 206 #define PCI_QLOGIC_ISP1240 \ 207 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 208 209 #define PCI_QLOGIC_ISP1280 \ 210 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 211 212 #define PCI_QLOGIC_ISP12160 \ 213 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 214 215 #define PCI_QLOGIC_ISP2100 \ 216 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 217 218 #define PCI_QLOGIC_ISP2200 \ 219 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 220 221 #define IO_MAP_REG 0x10 222 #define MEM_MAP_REG 0x14 223 #define PCIR_ROMADDR 0x30 224 225 #define PCI_DFLT_LTNCY 0x40 226 #define PCI_DFLT_LNSZ 0x10 227 228 229 static int isp_pci_probe __P((struct device *, struct cfdata *, void *)); 230 static void isp_pci_attach __P((struct device *, struct device *, void *)); 231 232 struct isp_pcisoftc { 233 struct ispsoftc pci_isp; 234 pci_chipset_tag_t pci_pc; 235 pcitag_t pci_tag; 236 bus_space_tag_t pci_st; 237 bus_space_handle_t pci_sh; 238 bus_dma_tag_t pci_dmat; 239 bus_dmamap_t pci_scratch_dmap; /* for fcp only */ 240 bus_dmamap_t pci_rquest_dmap; 241 bus_dmamap_t pci_result_dmap; 242 bus_dmamap_t *pci_xfer_dmap; 243 void * pci_ih; 244 int16_t pci_poff[_NREG_BLKS]; 245 }; 246 247 struct cfattach isp_pci_ca = { 248 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach 249 }; 250 251 static int 252 isp_pci_probe(parent, match, aux) 253 struct device *parent; 254 struct cfdata *match; 255 void *aux; 256 { 257 struct pci_attach_args *pa = aux; 258 switch (pa->pa_id) { 259 #ifndef ISP_DISABLE_1020_SUPPORT 260 case PCI_QLOGIC_ISP: 261 return (1); 262 #endif 263 #ifndef ISP_DISABLE_1080_SUPPORT 264 case PCI_QLOGIC_ISP1080: 265 case PCI_QLOGIC_ISP1240: 266 case PCI_QLOGIC_ISP1280: 267 return (1); 268 #endif 269 #ifndef ISP_DISABLE_12160_SUPPORT 270 case PCI_QLOGIC_ISP12160: 271 return (1); 272 #endif 273 #ifndef ISP_DISABLE_2100_SUPPORT 274 case PCI_QLOGIC_ISP2100: 275 return (1); 276 #endif 277 #ifndef ISP_DISABLE_2200_SUPPORT 278 case PCI_QLOGIC_ISP2200: 279 return (1); 280 #endif 281 default: 282 return (0); 283 } 284 } 285 286 287 static void 288 isp_pci_attach(parent, self, aux) 289 struct device *parent, *self; 290 void *aux; 291 { 292 #ifdef DEBUG 293 static char oneshot = 1; 294 #endif 295 static char *nomem = "%s: no mem for sdparam table\n"; 296 u_int32_t data, rev, linesz = PCI_DFLT_LNSZ; 297 struct pci_attach_args *pa = aux; 298 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self; 299 struct ispsoftc *isp = &pcs->pci_isp; 300 bus_space_tag_t st, iot, memt; 301 bus_space_handle_t sh, ioh, memh; 302 pci_intr_handle_t ih; 303 const char *intrstr; 304 int ioh_valid, memh_valid, i; 305 ISP_LOCKVAL_DECL; 306 307 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG, 308 PCI_MAPREG_TYPE_IO, 0, 309 &iot, &ioh, NULL, NULL) == 0); 310 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG, 311 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 312 &memt, &memh, NULL, NULL) == 0); 313 314 if (memh_valid) { 315 st = memt; 316 sh = memh; 317 } else if (ioh_valid) { 318 st = iot; 319 sh = ioh; 320 } else { 321 printf(": unable to map device registers\n"); 322 return; 323 } 324 printf("\n"); 325 326 pcs->pci_st = st; 327 pcs->pci_sh = sh; 328 pcs->pci_dmat = pa->pa_dmat; 329 pcs->pci_pc = pa->pa_pc; 330 pcs->pci_tag = pa->pa_tag; 331 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 332 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 333 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 334 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 335 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 336 rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0xff; 337 338 #ifndef ISP_DISABLE_1020_SUPPORT 339 if (pa->pa_id == PCI_QLOGIC_ISP) { 340 isp->isp_mdvec = &mdvec; 341 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 342 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT); 343 if (isp->isp_param == NULL) { 344 printf(nomem, isp->isp_name); 345 return; 346 } 347 bzero(isp->isp_param, sizeof (sdparam)); 348 } 349 #endif 350 #ifndef ISP_DISABLE_1080_SUPPORT 351 if (pa->pa_id == PCI_QLOGIC_ISP1080) { 352 isp->isp_mdvec = &mdvec_1080; 353 isp->isp_type = ISP_HA_SCSI_1080; 354 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT); 355 if (isp->isp_param == NULL) { 356 printf(nomem, isp->isp_name); 357 return; 358 } 359 bzero(isp->isp_param, sizeof (sdparam)); 360 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 361 ISP1080_DMA_REGS_OFF; 362 } 363 if (pa->pa_id == PCI_QLOGIC_ISP1240) { 364 isp->isp_mdvec = &mdvec_1080; 365 isp->isp_type = ISP_HA_SCSI_1240; 366 isp->isp_param = 367 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT); 368 if (isp->isp_param == NULL) { 369 printf(nomem, isp->isp_name); 370 return; 371 } 372 bzero(isp->isp_param, 2 * sizeof (sdparam)); 373 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 374 ISP1080_DMA_REGS_OFF; 375 } 376 if (pa->pa_id == PCI_QLOGIC_ISP1280) { 377 isp->isp_mdvec = &mdvec_1080; 378 isp->isp_type = ISP_HA_SCSI_1280; 379 isp->isp_param = 380 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT); 381 if (isp->isp_param == NULL) { 382 printf(nomem, isp->isp_name); 383 return; 384 } 385 bzero(isp->isp_param, 2 * sizeof (sdparam)); 386 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 387 ISP1080_DMA_REGS_OFF; 388 } 389 #endif 390 #ifndef ISP_DISABLE_12160_SUPPORT 391 if (pa->pa_id == PCI_QLOGIC_ISP12160) { 392 isp->isp_mdvec = &mdvec_12160; 393 isp->isp_type = ISP_HA_SCSI_12160; 394 isp->isp_param = 395 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT); 396 if (isp->isp_param == NULL) { 397 printf(nomem, isp->isp_name); 398 return; 399 } 400 bzero(isp->isp_param, 2 * sizeof (sdparam)); 401 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 402 ISP1080_DMA_REGS_OFF; 403 } 404 #endif 405 #ifndef ISP_DISABLE_2100_SUPPORT 406 if (pa->pa_id == PCI_QLOGIC_ISP2100) { 407 isp->isp_mdvec = &mdvec_2100; 408 isp->isp_type = ISP_HA_FC_2100; 409 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT); 410 if (isp->isp_param == NULL) { 411 printf(nomem, isp->isp_name); 412 return; 413 } 414 bzero(isp->isp_param, sizeof (fcparam)); 415 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 416 PCI_MBOX_REGS2100_OFF; 417 if (rev < 3) { 418 /* 419 * XXX: Need to get the actual revision 420 * XXX: number of the 2100 FB. At any rate, 421 * XXX: lower cache line size for early revision 422 * XXX; boards. 423 */ 424 linesz = 1; 425 } 426 } 427 #endif 428 #ifndef ISP_DISABLE_2200_SUPPORT 429 if (pa->pa_id == PCI_QLOGIC_ISP2200) { 430 isp->isp_mdvec = &mdvec_2200; 431 isp->isp_type = ISP_HA_FC_2200; 432 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT); 433 if (isp->isp_param == NULL) { 434 printf(nomem, isp->isp_name); 435 return; 436 } 437 bzero(isp->isp_param, sizeof (fcparam)); 438 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 439 PCI_MBOX_REGS2100_OFF; 440 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG); 441 } 442 #endif 443 isp->isp_revision = rev; 444 445 /* 446 * Make sure that command register set sanely. 447 */ 448 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 449 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE; 450 451 /* 452 * Not so sure about these- but I think it's important that they get 453 * enabled...... 454 */ 455 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE; 456 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data); 457 458 /* 459 * Make sure that the latency timer, cache line size, 460 * and ROM is disabled. 461 */ 462 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 463 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT); 464 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 465 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT); 466 data |= (linesz << PCI_CACHELINE_SHIFT); 467 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data); 468 469 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR); 470 data &= ~1; 471 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data); 472 473 #ifdef DEBUG 474 if (oneshot) { 475 oneshot = 0; 476 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version " 477 "%d.%d Core Version %d.%d\n", 478 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 479 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 480 } 481 #endif 482 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin, 483 pa->pa_intrline, &ih)) { 484 printf("%s: couldn't map interrupt\n", isp->isp_name); 485 free(isp->isp_param, M_DEVBUF); 486 return; 487 } 488 intrstr = pci_intr_string(pa->pa_pc, ih); 489 if (intrstr == NULL) 490 intrstr = "<I dunno>"; 491 pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO, 492 isp_pci_intr, isp); 493 if (pcs->pci_ih == NULL) { 494 printf("%s: couldn't establish interrupt at %s\n", 495 isp->isp_name, intrstr); 496 free(isp->isp_param, M_DEVBUF); 497 return; 498 } 499 printf("%s: interrupting at %s\n", isp->isp_name, intrstr); 500 501 if (IS_FC(isp)) { 502 long foo; 503 /* 504 * This isn't very random, but it's the best we can do for 505 * the real edge case of cards that don't have WWNs. 506 */ 507 foo = (long) isp; 508 foo >>= 4; 509 foo &= 0x7; 510 while (version[foo]) 511 isp->isp_osinfo.seed += (int) version[foo++]; 512 isp->isp_osinfo.seed <<= 8; 513 isp->isp_osinfo.seed += (isp->isp_osinfo._dev.dv_unit + 1); 514 } 515 516 isp->isp_confopts = self->dv_cfdata->cf_flags; 517 ISP_LOCK(isp); 518 isp_reset(isp); 519 if (isp->isp_state != ISP_RESETSTATE) { 520 ISP_UNLOCK(isp); 521 free(isp->isp_param, M_DEVBUF); 522 return; 523 } 524 isp_init(isp); 525 if (isp->isp_state != ISP_INITSTATE) { 526 isp_uninit(isp); 527 ISP_UNLOCK(isp); 528 free(isp->isp_param, M_DEVBUF); 529 return; 530 } 531 532 /* 533 * Create the DMA maps for the data transfers. 534 */ 535 for (i = 0; i < isp->isp_maxcmds; i++) { 536 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS, 537 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT, 538 &pcs->pci_xfer_dmap[i])) { 539 printf("%s: can't create dma maps\n", 540 isp->isp_name); 541 isp_uninit(isp); 542 ISP_UNLOCK(isp); 543 return; 544 } 545 } 546 /* 547 * Do Generic attach now. 548 */ 549 isp_attach(isp); 550 if (isp->isp_state != ISP_RUNSTATE) { 551 isp_uninit(isp); 552 free(isp->isp_param, M_DEVBUF); 553 } 554 ISP_UNLOCK(isp); 555 } 556 557 static u_int16_t 558 isp_pci_rd_reg(isp, regoff) 559 struct ispsoftc *isp; 560 int regoff; 561 { 562 u_int16_t rv; 563 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 564 int offset, oldconf = 0; 565 566 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 567 /* 568 * We will assume that someone has paused the RISC processor. 569 */ 570 oldconf = isp_pci_rd_reg(isp, BIU_CONF1); 571 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP); 572 delay(250); 573 } 574 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 575 offset += (regoff & 0xff); 576 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); 577 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 578 isp_pci_wr_reg(isp, BIU_CONF1, oldconf); 579 delay(250); 580 } 581 return (rv); 582 } 583 584 static void 585 isp_pci_wr_reg(isp, regoff, val) 586 struct ispsoftc *isp; 587 int regoff; 588 u_int16_t val; 589 { 590 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 591 int offset, oldconf = 0; 592 593 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 594 /* 595 * We will assume that someone has paused the RISC processor. 596 */ 597 oldconf = isp_pci_rd_reg(isp, BIU_CONF1); 598 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP); 599 delay(250); 600 } 601 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 602 offset += (regoff & 0xff); 603 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); 604 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 605 isp_pci_wr_reg(isp, BIU_CONF1, oldconf); 606 delay(250); 607 } 608 } 609 610 #if !defined(ISP_DISABLE_1080_SUPPORT) && !defined(ISP_DISABLE_12160_SUPPORT) 611 static u_int16_t 612 isp_pci_rd_reg_1080(isp, regoff) 613 struct ispsoftc *isp; 614 int regoff; 615 { 616 u_int16_t rv, oc = 0; 617 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 618 int offset; 619 620 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 621 u_int16_t tc; 622 /* 623 * We will assume that someone has paused the RISC processor. 624 */ 625 oc = isp_pci_rd_reg(isp, BIU_CONF1); 626 tc = oc & ~BIU_PCI1080_CONF1_DMA; 627 if (IS_1280(isp)) { 628 if (regoff & SXP_BANK1_SELECT) 629 tc |= BIU_PCI1080_CONF1_SXP0; 630 else 631 tc |= BIU_PCI1080_CONF1_SXP1; 632 } else { 633 tc |= BIU_PCI1080_CONF1_SXP0; 634 } 635 isp_pci_wr_reg(isp, BIU_CONF1, tc); 636 delay(250); 637 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 638 oc = isp_pci_rd_reg(isp, BIU_CONF1); 639 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA); 640 delay(250); 641 } 642 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 643 offset += (regoff & 0xff); 644 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); 645 /* 646 * Okay, because BIU_CONF1 is always nonzero 647 */ 648 if (oc) { 649 isp_pci_wr_reg(isp, BIU_CONF1, oc); 650 delay(250); 651 } 652 return (rv); 653 } 654 655 static void 656 isp_pci_wr_reg_1080(isp, regoff, val) 657 struct ispsoftc *isp; 658 int regoff; 659 u_int16_t val; 660 { 661 u_int16_t oc = 0; 662 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 663 int offset; 664 665 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 666 u_int16_t tc; 667 /* 668 * We will assume that someone has paused the RISC processor. 669 */ 670 oc = isp_pci_rd_reg(isp, BIU_CONF1); 671 tc = oc & ~BIU_PCI1080_CONF1_DMA; 672 if (IS_1280(isp)) { 673 if (regoff & SXP_BANK1_SELECT) 674 tc |= BIU_PCI1080_CONF1_SXP0; 675 else 676 tc |= BIU_PCI1080_CONF1_SXP1; 677 } else { 678 tc |= BIU_PCI1080_CONF1_SXP0; 679 } 680 isp_pci_wr_reg(isp, BIU_CONF1, tc); 681 delay(250); 682 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 683 oc = isp_pci_rd_reg(isp, BIU_CONF1); 684 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA); 685 delay(250); 686 } 687 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 688 offset += (regoff & 0xff); 689 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); 690 /* 691 * Okay, because BIU_CONF1 is always nonzero 692 */ 693 if (oc) { 694 isp_pci_wr_reg(isp, BIU_CONF1, oc); 695 delay(250); 696 } 697 } 698 #endif 699 700 static int 701 isp_pci_mbxdma(isp) 702 struct ispsoftc *isp; 703 { 704 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 705 bus_dma_segment_t seg; 706 bus_size_t len; 707 fcparam *fcp; 708 int rseg; 709 710 if (isp->isp_rquest_dma) /* been here before? */ 711 return (0); 712 713 len = isp->isp_maxcmds * sizeof (ISP_SCSI_XFER_T); 714 isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK); 715 if (isp->isp_xflist == NULL) { 716 printf("%s: cannot malloc xflist array\n", isp->isp_name); 717 return (1); 718 } 719 bzero(isp->isp_xflist, len); 720 len = isp->isp_maxcmds * sizeof (bus_dmamap_t); 721 pci->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 722 if (pci->pci_xfer_dmap == NULL) { 723 printf("%s: cannot malloc xflist array\n", isp->isp_name); 724 return (1); 725 } 726 727 /* 728 * Allocate and map the request queue. 729 */ 730 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); 731 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg, 732 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len, 733 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) 734 return (1); 735 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT, 736 &pci->pci_rquest_dmap) || bus_dmamap_load(pci->pci_dmat, 737 pci->pci_rquest_dmap, (caddr_t)isp->isp_rquest, len, NULL, 738 BUS_DMA_NOWAIT)) 739 return (1); 740 741 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr; 742 743 /* 744 * Allocate and map the result queue. 745 */ 746 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); 747 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg, 748 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len, 749 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) 750 return (1); 751 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT, 752 &pci->pci_result_dmap) || bus_dmamap_load(pci->pci_dmat, 753 pci->pci_result_dmap, (caddr_t)isp->isp_result, len, NULL, 754 BUS_DMA_NOWAIT)) 755 return (1); 756 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr; 757 758 if (IS_SCSI(isp)) { 759 return (0); 760 } 761 762 fcp = isp->isp_param; 763 len = ISP2100_SCRLEN; 764 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg, 765 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len, 766 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) 767 return (1); 768 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT, 769 &pci->pci_scratch_dmap) || bus_dmamap_load(pci->pci_dmat, 770 pci->pci_scratch_dmap, (caddr_t)fcp->isp_scratch, len, NULL, 771 BUS_DMA_NOWAIT)) 772 return (1); 773 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr; 774 return (0); 775 } 776 777 static int 778 isp_pci_dmasetup(isp, xs, rq, iptrp, optr) 779 struct ispsoftc *isp; 780 struct scsipi_xfer *xs; 781 ispreq_t *rq; 782 u_int16_t *iptrp; 783 u_int16_t optr; 784 { 785 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 786 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1]; 787 ispcontreq_t *crq; 788 int segcnt, seg, error, ovseg, seglim, drq; 789 790 if (xs->datalen == 0) { 791 rq->req_seg_count = 1; 792 goto mbxsync; 793 } 794 assert(rq->req_handle != 0 && rq->req_handle <= isp->isp_maxcmds); 795 if (xs->xs_control & XS_CTL_DATA_IN) { 796 drq = REQFLAG_DATA_IN; 797 } else { 798 drq = REQFLAG_DATA_OUT; 799 } 800 801 if (IS_FC(isp)) { 802 seglim = ISP_RQDSEG_T2; 803 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen; 804 ((ispreqt2_t *)rq)->req_flags |= drq; 805 } else { 806 rq->req_flags |= drq; 807 if (XS_CDBLEN(xs) > 12) { 808 seglim = 0; 809 } else { 810 seglim = ISP_RQDSEG; 811 } 812 } 813 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen, 814 NULL, xs->xs_control & XS_CTL_NOSLEEP ? 815 BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 816 if (error) { 817 XS_SETERR(xs, HBA_BOTCH); 818 return (CMD_COMPLETE); 819 } 820 821 segcnt = dmap->dm_nsegs; 822 823 for (seg = 0, rq->req_seg_count = 0; 824 seg < segcnt && rq->req_seg_count < seglim; 825 seg++, rq->req_seg_count++) { 826 if (IS_FC(isp)) { 827 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 828 rq2->req_dataseg[rq2->req_seg_count].ds_count = 829 dmap->dm_segs[seg].ds_len; 830 rq2->req_dataseg[rq2->req_seg_count].ds_base = 831 dmap->dm_segs[seg].ds_addr; 832 } else { 833 rq->req_dataseg[rq->req_seg_count].ds_count = 834 dmap->dm_segs[seg].ds_len; 835 rq->req_dataseg[rq->req_seg_count].ds_base = 836 dmap->dm_segs[seg].ds_addr; 837 } 838 } 839 840 if (seg == segcnt) 841 goto dmasync; 842 843 do { 844 crq = (ispcontreq_t *) 845 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp); 846 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1); 847 if (*iptrp == optr) { 848 printf("%s: Request Queue Overflow++\n", isp->isp_name); 849 bus_dmamap_unload(pci->pci_dmat, dmap); 850 XS_SETERR(xs, HBA_BOTCH); 851 return (CMD_COMPLETE); 852 } 853 rq->req_header.rqs_entry_count++; 854 bzero((void *)crq, sizeof (*crq)); 855 crq->req_header.rqs_entry_count = 1; 856 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 857 858 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG; 859 rq->req_seg_count++, seg++, ovseg++) { 860 crq->req_dataseg[ovseg].ds_count = 861 dmap->dm_segs[seg].ds_len; 862 crq->req_dataseg[ovseg].ds_base = 863 dmap->dm_segs[seg].ds_addr; 864 } 865 } while (seg < segcnt); 866 867 dmasync: 868 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize, 869 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD : 870 BUS_DMASYNC_PREWRITE); 871 872 mbxsync: 873 ISP_SWIZZLE_REQUEST(isp, rq); 874 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0, 875 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 876 return (CMD_QUEUED); 877 } 878 879 static int 880 isp_pci_intr(arg) 881 void *arg; 882 { 883 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg; 884 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0, 885 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 886 return (isp_intr(arg)); 887 } 888 889 static void 890 isp_pci_dmateardown(isp, xs, handle) 891 struct ispsoftc *isp; 892 struct scsipi_xfer *xs; 893 u_int32_t handle; 894 { 895 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 896 bus_dmamap_t dmap; 897 assert(handle != 0 && handle <= isp->isp_maxcmds); 898 dmap = pci->pci_xfer_dmap[handle-1]; 899 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize, 900 xs->xs_control & XS_CTL_DATA_IN ? 901 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 902 bus_dmamap_unload(pci->pci_dmat, dmap); 903 } 904 905 static void 906 isp_pci_reset1(isp) 907 struct ispsoftc *isp; 908 { 909 /* Make sure the BIOS is disabled */ 910 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 911 } 912 913 static void 914 isp_pci_dumpregs(isp) 915 struct ispsoftc *isp; 916 { 917 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 918 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name, 919 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG)); 920 } 921