1 /* $NetBSD: pciide_common.c,v 1.37 2007/03/04 06:02:25 christos Exp $ */ 2 3 4 /* 5 * Copyright (c) 1999, 2000, 2001, 2003 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Manuel Bouyer. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 36 /* 37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed by Christopher G. Demetriou 50 * for the NetBSD Project. 51 * 4. The name of the author may not be used to endorse or promote products 52 * derived from this software without specific prior written permission 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 */ 65 66 /* 67 * PCI IDE controller driver. 68 * 69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 70 * sys/dev/pci/ppb.c, revision 1.16). 71 * 72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 74 * 5/16/94" from the PCI SIG. 75 * 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: pciide_common.c,v 1.37 2007/03/04 06:02:25 christos Exp $"); 80 81 #include <sys/param.h> 82 #include <sys/malloc.h> 83 84 #include <uvm/uvm_extern.h> 85 86 #include <dev/pci/pcireg.h> 87 #include <dev/pci/pcivar.h> 88 #include <dev/pci/pcidevs.h> 89 #include <dev/pci/pciidereg.h> 90 #include <dev/pci/pciidevar.h> 91 92 #include <dev/ic/wdcreg.h> 93 94 #ifdef ATADEBUG 95 int atadebug_pciide_mask = 0; 96 #endif 97 98 #if NATA_DMA 99 static const char dmaerrfmt[] = 100 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n"; 101 #endif 102 103 /* Default product description for devices not known from this controller */ 104 const struct pciide_product_desc default_product_desc = { 105 0, 106 0, 107 "Generic PCI IDE controller", 108 default_chip_map, 109 }; 110 111 const struct pciide_product_desc * 112 pciide_lookup_product(id, pp) 113 pcireg_t id; 114 const struct pciide_product_desc *pp; 115 { 116 for (; pp->chip_map != NULL; pp++) 117 if (PCI_PRODUCT(id) == pp->ide_product) 118 break; 119 120 if (pp->chip_map == NULL) 121 return NULL; 122 return pp; 123 } 124 125 void 126 pciide_common_attach(sc, pa, pp) 127 struct pciide_softc *sc; 128 struct pci_attach_args *pa; 129 const struct pciide_product_desc *pp; 130 { 131 pci_chipset_tag_t pc = pa->pa_pc; 132 pcitag_t tag = pa->pa_tag; 133 #if NATA_DMA 134 pcireg_t csr; 135 #endif 136 char devinfo[256]; 137 const char *displaydev; 138 139 aprint_naive(": disk controller\n"); 140 aprint_normal("\n"); 141 142 sc->sc_pci_id = pa->pa_id; 143 if (pp == NULL) { 144 /* should only happen for generic pciide devices */ 145 sc->sc_pp = &default_product_desc; 146 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo)); 147 displaydev = devinfo; 148 } else { 149 sc->sc_pp = pp; 150 displaydev = sc->sc_pp->ide_name; 151 } 152 153 /* if displaydev == NULL, printf is done in chip-specific map */ 154 if (displaydev) 155 aprint_normal("%s: %s (rev. 0x%02x)\n", 156 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, displaydev, 157 PCI_REVISION(pa->pa_class)); 158 159 sc->sc_pc = pa->pa_pc; 160 sc->sc_tag = pa->pa_tag; 161 162 #if NATA_DMA 163 /* Set up DMA defaults; these might be adjusted by chip_map. */ 164 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 165 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 166 #endif 167 168 #ifdef ATADEBUG 169 if (atadebug_pciide_mask & DEBUG_PROBE) 170 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); 171 #endif 172 sc->sc_pp->chip_map(sc, pa); 173 174 #if NATA_DMA 175 if (sc->sc_dma_ok) { 176 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 177 csr |= PCI_COMMAND_MASTER_ENABLE; 178 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 179 } 180 #endif 181 ATADEBUG_PRINT(("pciide: command/status register=%x\n", 182 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 183 } 184 185 /* tell whether the chip is enabled or not */ 186 int 187 pciide_chipen(sc, pa) 188 struct pciide_softc *sc; 189 struct pci_attach_args *pa; 190 { 191 pcireg_t csr; 192 193 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) { 194 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 195 PCI_COMMAND_STATUS_REG); 196 aprint_normal("%s: device disabled (at %s)\n", 197 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, 198 (csr & PCI_COMMAND_IO_ENABLE) == 0 ? 199 "device" : "bridge"); 200 return 0; 201 } 202 return 1; 203 } 204 205 void 206 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep) 207 struct pci_attach_args *pa; 208 struct pciide_channel *cp; 209 int compatchan; 210 bus_size_t *cmdsizep, *ctlsizep; 211 { 212 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 213 struct ata_channel *wdc_cp = &cp->ata_channel; 214 struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp); 215 int i; 216 217 cp->compat = 1; 218 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 219 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 220 221 wdr->cmd_iot = pa->pa_iot; 222 if (bus_space_map(wdr->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 223 PCIIDE_COMPAT_CMD_SIZE, 0, &wdr->cmd_baseioh) != 0) { 224 aprint_error("%s: couldn't map %s channel cmd regs\n", 225 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, cp->name); 226 goto bad; 227 } 228 229 wdr->ctl_iot = pa->pa_iot; 230 if (bus_space_map(wdr->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 231 PCIIDE_COMPAT_CTL_SIZE, 0, &wdr->ctl_ioh) != 0) { 232 aprint_error("%s: couldn't map %s channel ctl regs\n", 233 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, cp->name); 234 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 235 PCIIDE_COMPAT_CMD_SIZE); 236 goto bad; 237 } 238 239 for (i = 0; i < WDC_NREG; i++) { 240 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, 241 i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { 242 aprint_error("%s: couldn't subregion %s channel " 243 "cmd regs\n", 244 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, cp->name); 245 goto bad; 246 } 247 } 248 wdc_init_shadow_regs(wdc_cp); 249 wdr->data32iot = wdr->cmd_iot; 250 wdr->data32ioh = wdr->cmd_iohs[0]; 251 return; 252 253 bad: 254 cp->ata_channel.ch_flags |= ATACH_DISABLED; 255 return; 256 } 257 258 void 259 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr) 260 struct pci_attach_args * pa; 261 struct pciide_channel *cp; 262 bus_size_t *cmdsizep, *ctlsizep; 263 int (*pci_intr)(void *); 264 { 265 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 266 struct ata_channel *wdc_cp = &cp->ata_channel; 267 struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp); 268 const char *intrstr; 269 pci_intr_handle_t intrhandle; 270 int i; 271 272 cp->compat = 0; 273 274 if (sc->sc_pci_ih == NULL) { 275 if (pci_intr_map(pa, &intrhandle) != 0) { 276 aprint_error("%s: couldn't map native-PCI interrupt\n", 277 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname); 278 goto bad; 279 } 280 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 281 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 282 intrhandle, IPL_BIO, pci_intr, sc); 283 if (sc->sc_pci_ih != NULL) { 284 aprint_normal("%s: using %s for native-PCI interrupt\n", 285 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, 286 intrstr ? intrstr : "unknown interrupt"); 287 } else { 288 aprint_error( 289 "%s: couldn't establish native-PCI interrupt", 290 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname); 291 if (intrstr != NULL) 292 aprint_normal(" at %s", intrstr); 293 aprint_normal("\n"); 294 goto bad; 295 } 296 } 297 cp->ih = sc->sc_pci_ih; 298 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->ch_channel), 299 PCI_MAPREG_TYPE_IO, 0, 300 &wdr->cmd_iot, &wdr->cmd_baseioh, NULL, cmdsizep) != 0) { 301 aprint_error("%s: couldn't map %s channel cmd regs\n", 302 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, cp->name); 303 goto bad; 304 } 305 306 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->ch_channel), 307 PCI_MAPREG_TYPE_IO, 0, 308 &wdr->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) { 309 aprint_error("%s: couldn't map %s channel ctl regs\n", 310 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, cp->name); 311 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 312 *cmdsizep); 313 goto bad; 314 } 315 /* 316 * In native mode, 4 bytes of I/O space are mapped for the control 317 * register, the control register is at offset 2. Pass the generic 318 * code a handle for only one byte at the right offset. 319 */ 320 if (bus_space_subregion(wdr->ctl_iot, cp->ctl_baseioh, 2, 1, 321 &wdr->ctl_ioh) != 0) { 322 aprint_error("%s: unable to subregion %s channel ctl regs\n", 323 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, cp->name); 324 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 325 *cmdsizep); 326 bus_space_unmap(wdr->cmd_iot, cp->ctl_baseioh, *ctlsizep); 327 goto bad; 328 } 329 330 for (i = 0; i < WDC_NREG; i++) { 331 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, 332 i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { 333 aprint_error("%s: couldn't subregion %s channel " 334 "cmd regs\n", 335 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, cp->name); 336 goto bad; 337 } 338 } 339 wdc_init_shadow_regs(wdc_cp); 340 wdr->data32iot = wdr->cmd_iot; 341 wdr->data32ioh = wdr->cmd_iohs[0]; 342 return; 343 344 bad: 345 cp->ata_channel.ch_flags |= ATACH_DISABLED; 346 return; 347 } 348 349 #if NATA_DMA 350 void 351 pciide_mapreg_dma(sc, pa) 352 struct pciide_softc *sc; 353 struct pci_attach_args *pa; 354 { 355 pcireg_t maptype; 356 bus_addr_t addr; 357 struct pciide_channel *pc; 358 int reg, chan; 359 bus_size_t size; 360 361 /* 362 * Map DMA registers 363 * 364 * Note that sc_dma_ok is the right variable to test to see if 365 * DMA can be done. If the interface doesn't support DMA, 366 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 367 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 368 * non-zero if the interface supports DMA and the registers 369 * could be mapped. 370 * 371 * XXX Note that despite the fact that the Bus Master IDE specs 372 * XXX say that "The bus master IDE function uses 16 bytes of IO 373 * XXX space," some controllers (at least the United 374 * XXX Microelectronics UM8886BF) place it in memory space. 375 */ 376 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 377 PCIIDE_REG_BUS_MASTER_DMA); 378 379 switch (maptype) { 380 case PCI_MAPREG_TYPE_IO: 381 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 382 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 383 &addr, NULL, NULL) == 0); 384 if (sc->sc_dma_ok == 0) { 385 aprint_verbose( 386 ", but unused (couldn't query registers)"); 387 break; 388 } 389 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 390 && addr >= 0x10000) { 391 sc->sc_dma_ok = 0; 392 aprint_verbose( 393 ", but unused (registers at unsafe address " 394 "%#lx)", (unsigned long)addr); 395 break; 396 } 397 /* FALLTHROUGH */ 398 399 case PCI_MAPREG_MEM_TYPE_32BIT: 400 sc->sc_dma_ok = (pci_mapreg_map(pa, 401 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 402 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0); 403 sc->sc_dmat = pa->pa_dmat; 404 if (sc->sc_dma_ok == 0) { 405 aprint_verbose(", but unused (couldn't map registers)"); 406 } else { 407 sc->sc_wdcdev.dma_arg = sc; 408 sc->sc_wdcdev.dma_init = pciide_dma_init; 409 sc->sc_wdcdev.dma_start = pciide_dma_start; 410 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 411 } 412 413 if (device_cfdata(&sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & 414 PCIIDE_OPTIONS_NODMA) { 415 aprint_verbose( 416 ", but unused (forced off by config file)"); 417 sc->sc_dma_ok = 0; 418 } 419 break; 420 421 default: 422 sc->sc_dma_ok = 0; 423 aprint_verbose( 424 ", but unsupported register maptype (0x%x)", maptype); 425 } 426 427 if (sc->sc_dma_ok == 0) 428 return; 429 430 /* 431 * Set up the default handles for the DMA registers. 432 * Just reserve 32 bits for each handle, unless space 433 * doesn't permit it. 434 */ 435 for (chan = 0; chan < PCIIDE_NUM_CHANNELS; chan++) { 436 pc = &sc->pciide_channels[chan]; 437 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 438 size = 4; 439 if (size > (IDEDMA_SCH_OFFSET - reg)) 440 size = IDEDMA_SCH_OFFSET - reg; 441 if (bus_space_subregion(sc->sc_dma_iot, sc->sc_dma_ioh, 442 IDEDMA_SCH_OFFSET * chan + reg, size, 443 &pc->dma_iohs[reg]) != 0) { 444 sc->sc_dma_ok = 0; 445 aprint_verbose(", but can't subregion offset %d " 446 "size %lu", reg, (u_long)size); 447 return; 448 } 449 } 450 } 451 } 452 #endif /* NATA_DMA */ 453 454 int 455 pciide_compat_intr(arg) 456 void *arg; 457 { 458 struct pciide_channel *cp = arg; 459 460 #ifdef DIAGNOSTIC 461 /* should only be called for a compat channel */ 462 if (cp->compat == 0) 463 panic("pciide compat intr called for non-compat chan %p", cp); 464 #endif 465 return (wdcintr(&cp->ata_channel)); 466 } 467 468 int 469 pciide_pci_intr(arg) 470 void *arg; 471 { 472 struct pciide_softc *sc = arg; 473 struct pciide_channel *cp; 474 struct ata_channel *wdc_cp; 475 int i, rv, crv; 476 477 rv = 0; 478 for (i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) { 479 cp = &sc->pciide_channels[i]; 480 wdc_cp = &cp->ata_channel; 481 482 /* If a compat channel skip. */ 483 if (cp->compat) 484 continue; 485 /* if this channel not waiting for intr, skip */ 486 if ((wdc_cp->ch_flags & ATACH_IRQ_WAIT) == 0) 487 continue; 488 489 crv = wdcintr(wdc_cp); 490 if (crv == 0) 491 ; /* leave rv alone */ 492 else if (crv == 1) 493 rv = 1; /* claim the intr */ 494 else if (rv == 0) /* crv should be -1 in this case */ 495 rv = crv; /* if we've done no better, take it */ 496 } 497 return (rv); 498 } 499 500 #if NATA_DMA 501 void 502 pciide_channel_dma_setup(cp) 503 struct pciide_channel *cp; 504 { 505 int drive, s; 506 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 507 struct ata_drive_datas *drvp; 508 509 KASSERT(cp->ata_channel.ch_ndrive != 0); 510 511 for (drive = 0; drive < cp->ata_channel.ch_ndrive; drive++) { 512 drvp = &cp->ata_channel.ch_drive[drive]; 513 /* If no drive, skip */ 514 if ((drvp->drive_flags & DRIVE) == 0) 515 continue; 516 /* setup DMA if needed */ 517 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 518 (drvp->drive_flags & DRIVE_UDMA) == 0) || 519 sc->sc_dma_ok == 0) { 520 s = splbio(); 521 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 522 splx(s); 523 continue; 524 } 525 if (pciide_dma_table_setup(sc, cp->ata_channel.ch_channel, 526 drive) != 0) { 527 /* Abort DMA setup */ 528 s = splbio(); 529 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 530 splx(s); 531 continue; 532 } 533 } 534 } 535 536 #define NIDEDMA_TABLES(sc) \ 537 (MAXPHYS/(min((sc)->sc_dma_maxsegsz, PAGE_SIZE)) + 1) 538 539 int 540 pciide_dma_table_setup(sc, channel, drive) 541 struct pciide_softc *sc; 542 int channel, drive; 543 { 544 bus_dma_segment_t seg; 545 int error, rseg; 546 const bus_size_t dma_table_size = 547 sizeof(struct idedma_table) * NIDEDMA_TABLES(sc); 548 struct pciide_dma_maps *dma_maps = 549 &sc->pciide_channels[channel].dma_maps[drive]; 550 551 /* If table was already allocated, just return */ 552 if (dma_maps->dma_table) 553 return 0; 554 555 /* Allocate memory for the DMA tables and map it */ 556 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 557 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 558 BUS_DMA_NOWAIT)) != 0) { 559 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, 560 channel, "allocate", drive, error); 561 return error; 562 } 563 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 564 dma_table_size, 565 (void **)&dma_maps->dma_table, 566 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 567 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, 568 channel, "map", drive, error); 569 return error; 570 } 571 ATADEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, " 572 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size, 573 (unsigned long)seg.ds_addr), DEBUG_PROBE); 574 /* Create and load table DMA map for this disk */ 575 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 576 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 577 &dma_maps->dmamap_table)) != 0) { 578 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, 579 channel, "create", drive, error); 580 return error; 581 } 582 if ((error = bus_dmamap_load(sc->sc_dmat, 583 dma_maps->dmamap_table, 584 dma_maps->dma_table, 585 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 586 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, 587 channel, "load", drive, error); 588 return error; 589 } 590 ATADEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 591 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr), 592 DEBUG_PROBE); 593 /* Create a xfer DMA map for this drive */ 594 if ((error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, 595 NIDEDMA_TABLES(sc), sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 596 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 597 &dma_maps->dmamap_xfer)) != 0) { 598 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, 599 channel, "create xfer", drive, error); 600 return error; 601 } 602 return 0; 603 } 604 605 int 606 pciide_dma_dmamap_setup(sc, channel, drive, databuf, datalen, flags) 607 struct pciide_softc *sc; 608 int channel, drive; 609 void *databuf; 610 size_t datalen; 611 int flags; 612 { 613 int error, seg; 614 struct pciide_channel *cp = &sc->pciide_channels[channel]; 615 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 616 617 error = bus_dmamap_load(sc->sc_dmat, 618 dma_maps->dmamap_xfer, 619 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 620 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)); 621 if (error) { 622 printf(dmaerrfmt, sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, 623 channel, "load xfer", drive, error); 624 return error; 625 } 626 627 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 628 dma_maps->dmamap_xfer->dm_mapsize, 629 (flags & WDC_DMA_READ) ? 630 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 631 632 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 633 #ifdef DIAGNOSTIC 634 /* A segment must not cross a 64k boundary */ 635 { 636 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 637 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 638 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 639 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 640 printf("pciide_dma: segment %d physical addr 0x%lx" 641 " len 0x%lx not properly aligned\n", 642 seg, phys, len); 643 panic("pciide_dma: buf align"); 644 } 645 } 646 #endif 647 dma_maps->dma_table[seg].base_addr = 648 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 649 dma_maps->dma_table[seg].byte_count = 650 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 651 IDEDMA_BYTE_COUNT_MASK); 652 ATADEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 653 seg, le32toh(dma_maps->dma_table[seg].byte_count), 654 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 655 656 } 657 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 658 htole32(IDEDMA_BYTE_COUNT_EOT); 659 660 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 661 dma_maps->dmamap_table->dm_mapsize, 662 BUS_DMASYNC_PREWRITE); 663 664 #ifdef DIAGNOSTIC 665 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 666 printf("pciide_dma_dmamap_setup: addr 0x%lx " 667 "not properly aligned\n", 668 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr); 669 panic("pciide_dma_init: table align"); 670 } 671 #endif 672 /* remember flags */ 673 dma_maps->dma_flags = flags; 674 675 return 0; 676 } 677 678 int 679 pciide_dma_init(v, channel, drive, databuf, datalen, flags) 680 void *v; 681 int channel, drive; 682 void *databuf; 683 size_t datalen; 684 int flags; 685 { 686 struct pciide_softc *sc = v; 687 int error; 688 struct pciide_channel *cp = &sc->pciide_channels[channel]; 689 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 690 691 if ((error = pciide_dma_dmamap_setup(sc, channel, drive, 692 databuf, datalen, flags)) != 0) 693 return error; 694 /* Maps are ready. Start DMA function */ 695 /* Clear status bits */ 696 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 697 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0)); 698 /* Write table addr */ 699 bus_space_write_4(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_TBL], 0, 700 dma_maps->dmamap_table->dm_segs[0].ds_addr); 701 /* set read/write */ 702 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, 703 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 704 return 0; 705 } 706 707 void 708 pciide_dma_start(void *v, int channel, int drive) 709 { 710 struct pciide_softc *sc = v; 711 struct pciide_channel *cp = &sc->pciide_channels[channel]; 712 713 ATADEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 714 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, 715 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0) 716 | IDEDMA_CMD_START); 717 } 718 719 int 720 pciide_dma_finish(v, channel, drive, force) 721 void *v; 722 int channel, drive; 723 int force; 724 { 725 struct pciide_softc *sc = v; 726 u_int8_t status; 727 int error = 0; 728 struct pciide_channel *cp = &sc->pciide_channels[channel]; 729 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 730 731 status = bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0); 732 ATADEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 733 DEBUG_XFERS); 734 735 if (force == WDC_DMAEND_END && (status & IDEDMA_CTL_INTR) == 0) 736 return WDC_DMAST_NOIRQ; 737 738 /* stop DMA channel */ 739 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, 740 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0) 741 & ~IDEDMA_CMD_START); 742 743 /* Unload the map of the data buffer */ 744 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 745 dma_maps->dmamap_xfer->dm_mapsize, 746 (dma_maps->dma_flags & WDC_DMA_READ) ? 747 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 748 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 749 750 if ((status & IDEDMA_CTL_ERR) != 0 && force != WDC_DMAEND_ABRT_QUIET) { 751 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 752 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, channel, drive, 753 status); 754 error |= WDC_DMAST_ERR; 755 } 756 757 if ((status & IDEDMA_CTL_INTR) == 0 && force != WDC_DMAEND_ABRT_QUIET) { 758 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 759 "status=0x%x\n", sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, 760 channel, drive, status); 761 error |= WDC_DMAST_NOIRQ; 762 } 763 764 if ((status & IDEDMA_CTL_ACT) != 0 && force != WDC_DMAEND_ABRT_QUIET) { 765 /* data underrun, may be a valid condition for ATAPI */ 766 error |= WDC_DMAST_UNDER; 767 } 768 return error; 769 } 770 771 void 772 pciide_irqack(chp) 773 struct ata_channel *chp; 774 { 775 struct pciide_channel *cp = CHAN_TO_PCHAN(chp); 776 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 777 778 /* clear status bits in IDE DMA registers */ 779 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 780 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0)); 781 } 782 #endif /* NATA_DMA */ 783 784 /* some common code used by several chip_map */ 785 int 786 pciide_chansetup(sc, channel, interface) 787 struct pciide_softc *sc; 788 int channel; 789 pcireg_t interface; 790 { 791 struct pciide_channel *cp = &sc->pciide_channels[channel]; 792 sc->wdc_chanarray[channel] = &cp->ata_channel; 793 cp->name = PCIIDE_CHANNEL_NAME(channel); 794 cp->ata_channel.ch_channel = channel; 795 cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac; 796 cp->ata_channel.ch_queue = 797 malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT); 798 if (cp->ata_channel.ch_queue == NULL) { 799 aprint_error("%s %s channel: " 800 "can't allocate memory for command queue", 801 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, cp->name); 802 return 0; 803 } 804 cp->ata_channel.ch_ndrive = 2; 805 aprint_verbose("%s: %s channel %s to %s mode\n", 806 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, cp->name, 807 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 808 "configured" : "wired", 809 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 810 "native-PCI" : "compatibility"); 811 return 1; 812 } 813 814 /* some common code used by several chip channel_map */ 815 void 816 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr) 817 struct pci_attach_args *pa; 818 struct pciide_channel *cp; 819 pcireg_t interface; 820 bus_size_t *cmdsizep, *ctlsizep; 821 int (*pci_intr)(void *); 822 { 823 struct ata_channel *wdc_cp = &cp->ata_channel; 824 825 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->ch_channel)) 826 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr); 827 else { 828 pciide_mapregs_compat(pa, cp, wdc_cp->ch_channel, cmdsizep, 829 ctlsizep); 830 if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0) 831 pciide_map_compat_intr(pa, cp, wdc_cp->ch_channel); 832 } 833 wdcattach(wdc_cp); 834 } 835 836 /* 837 * generic code to map the compat intr. 838 */ 839 void 840 pciide_map_compat_intr(pa, cp, compatchan) 841 struct pci_attach_args *pa; 842 struct pciide_channel *cp; 843 int compatchan; 844 { 845 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 846 847 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 848 cp->ih = 849 pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_atac.atac_dev, 850 pa, compatchan, pciide_compat_intr, cp); 851 if (cp->ih == NULL) { 852 #endif 853 aprint_error("%s: no compatibility interrupt for use by %s " 854 "channel\n", sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, 855 cp->name); 856 cp->ata_channel.ch_flags |= ATACH_DISABLED; 857 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 858 } 859 #endif 860 } 861 862 void 863 default_chip_map(sc, pa) 864 struct pciide_softc *sc; 865 struct pci_attach_args *pa; 866 { 867 struct pciide_channel *cp; 868 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 869 pcireg_t csr; 870 int channel; 871 #if NATA_DMA 872 int drive; 873 u_int8_t idedma_ctl; 874 #endif 875 bus_size_t cmdsize, ctlsize; 876 const char *failreason; 877 struct wdc_regs *wdr; 878 879 if (pciide_chipen(sc, pa) == 0) 880 return; 881 882 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 883 #if NATA_DMA 884 aprint_verbose("%s: bus-master DMA support present", 885 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname); 886 if (sc->sc_pp == &default_product_desc && 887 (device_cfdata(&sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & 888 PCIIDE_OPTIONS_DMA) == 0) { 889 aprint_verbose(", but unused (no driver support)"); 890 sc->sc_dma_ok = 0; 891 } else { 892 pciide_mapreg_dma(sc, pa); 893 if (sc->sc_dma_ok != 0) 894 aprint_verbose(", used without full driver " 895 "support"); 896 } 897 #else 898 aprint_verbose("%s: bus-master DMA support present, but unused (no driver support)", 899 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname); 900 #endif /* NATA_DMA */ 901 } else { 902 aprint_verbose("%s: hardware does not support DMA", 903 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname); 904 #if NATA_DMA 905 sc->sc_dma_ok = 0; 906 #endif 907 } 908 aprint_verbose("\n"); 909 #if NATA_DMA 910 if (sc->sc_dma_ok) { 911 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; 912 sc->sc_wdcdev.irqack = pciide_irqack; 913 } 914 #endif 915 sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; 916 #if NATA_DMA 917 sc->sc_wdcdev.sc_atac.atac_dma_cap = 0; 918 #endif 919 920 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; 921 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; 922 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; 923 924 wdc_allocate_regs(&sc->sc_wdcdev); 925 926 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 927 channel++) { 928 cp = &sc->pciide_channels[channel]; 929 if (pciide_chansetup(sc, channel, interface) == 0) 930 continue; 931 wdr = CHAN_TO_WDC_REGS(&cp->ata_channel); 932 if (interface & PCIIDE_INTERFACE_PCI(channel)) 933 pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 934 pciide_pci_intr); 935 else 936 pciide_mapregs_compat(pa, cp, 937 cp->ata_channel.ch_channel, &cmdsize, &ctlsize); 938 if (cp->ata_channel.ch_flags & ATACH_DISABLED) 939 continue; 940 /* 941 * Check to see if something appears to be there. 942 */ 943 failreason = NULL; 944 /* 945 * In native mode, always enable the controller. It's 946 * not possible to have an ISA board using the same address 947 * anyway. 948 */ 949 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 950 wdcattach(&cp->ata_channel); 951 continue; 952 } 953 if (!wdcprobe(&cp->ata_channel)) { 954 failreason = "not responding; disabled or no drives?"; 955 goto next; 956 } 957 /* 958 * Now, make sure it's actually attributable to this PCI IDE 959 * channel by trying to access the channel again while the 960 * PCI IDE controller's I/O space is disabled. (If the 961 * channel no longer appears to be there, it belongs to 962 * this controller.) YUCK! 963 */ 964 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 965 PCI_COMMAND_STATUS_REG); 966 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 967 csr & ~PCI_COMMAND_IO_ENABLE); 968 if (wdcprobe(&cp->ata_channel)) 969 failreason = "other hardware responding at addresses"; 970 pci_conf_write(sc->sc_pc, sc->sc_tag, 971 PCI_COMMAND_STATUS_REG, csr); 972 next: 973 if (failreason) { 974 aprint_error("%s: %s channel ignored (%s)\n", 975 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, cp->name, 976 failreason); 977 cp->ata_channel.ch_flags |= ATACH_DISABLED; 978 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 979 cmdsize); 980 bus_space_unmap(wdr->ctl_iot, wdr->ctl_ioh, ctlsize); 981 } else { 982 pciide_map_compat_intr(pa, cp, 983 cp->ata_channel.ch_channel); 984 wdcattach(&cp->ata_channel); 985 } 986 } 987 988 #if NATA_DMA 989 if (sc->sc_dma_ok == 0) 990 return; 991 992 /* Allocate DMA maps */ 993 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 994 channel++) { 995 idedma_ctl = 0; 996 cp = &sc->pciide_channels[channel]; 997 for (drive = 0; drive < cp->ata_channel.ch_ndrive; drive++) { 998 /* 999 * we have not probed the drives yet, allocate 1000 * ressources for all of them. 1001 */ 1002 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1003 /* Abort DMA setup */ 1004 aprint_error( 1005 "%s:%d:%d: can't allocate DMA maps, " 1006 "using PIO transfers\n", 1007 sc->sc_wdcdev.sc_atac.atac_dev.dv_xname, 1008 channel, drive); 1009 sc->sc_dma_ok = 0; 1010 sc->sc_wdcdev.sc_atac.atac_cap &= ~ATAC_CAP_DMA; 1011 sc->sc_wdcdev.irqack = NULL; 1012 break; 1013 } 1014 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1015 } 1016 if (idedma_ctl != 0) { 1017 /* Add software bits in status register */ 1018 bus_space_write_1(sc->sc_dma_iot, 1019 cp->dma_iohs[IDEDMA_CTL], 0, idedma_ctl); 1020 } 1021 } 1022 #endif /* NATA_DMA */ 1023 } 1024 1025 void 1026 sata_setup_channel(chp) 1027 struct ata_channel *chp; 1028 { 1029 #if NATA_DMA 1030 struct ata_drive_datas *drvp; 1031 int drive; 1032 #if NATA_UDMA 1033 int s; 1034 #endif 1035 u_int32_t idedma_ctl; 1036 struct pciide_channel *cp = CHAN_TO_PCHAN(chp); 1037 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 1038 1039 /* setup DMA if needed */ 1040 pciide_channel_dma_setup(cp); 1041 1042 idedma_ctl = 0; 1043 1044 for (drive = 0; drive < cp->ata_channel.ch_ndrive; drive++) { 1045 drvp = &chp->ch_drive[drive]; 1046 /* If no drive, skip */ 1047 if ((drvp->drive_flags & DRIVE) == 0) 1048 continue; 1049 #if NATA_UDMA 1050 if (drvp->drive_flags & DRIVE_UDMA) { 1051 /* use Ultra/DMA */ 1052 s = splbio(); 1053 drvp->drive_flags &= ~DRIVE_DMA; 1054 splx(s); 1055 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1056 } else 1057 #endif /* NATA_UDMA */ 1058 if (drvp->drive_flags & DRIVE_DMA) { 1059 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1060 } 1061 } 1062 1063 /* 1064 * Nothing to do to setup modes; it is meaningless in S-ATA 1065 * (but many S-ATA drives still want to get the SET_FEATURE 1066 * command). 1067 */ 1068 if (idedma_ctl != 0) { 1069 /* Add software bits in status register */ 1070 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 1071 idedma_ctl); 1072 } 1073 #endif /* NATA_DMA */ 1074 } 1075