1 /* $NetBSD: pciide_common.c,v 1.41 2009/03/15 21:28:09 cegger Exp $ */ 2 3 4 /* 5 * Copyright (c) 1999, 2000, 2001, 2003 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Manuel Bouyer. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 36 /* 37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed by Christopher G. Demetriou 50 * for the NetBSD Project. 51 * 4. The name of the author may not be used to endorse or promote products 52 * derived from this software without specific prior written permission 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 */ 65 66 /* 67 * PCI IDE controller driver. 68 * 69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 70 * sys/dev/pci/ppb.c, revision 1.16). 71 * 72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 74 * 5/16/94" from the PCI SIG. 75 * 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: pciide_common.c,v 1.41 2009/03/15 21:28:09 cegger Exp $"); 80 81 #include <sys/param.h> 82 #include <sys/malloc.h> 83 84 #include <uvm/uvm_extern.h> 85 86 #include <dev/pci/pcireg.h> 87 #include <dev/pci/pcivar.h> 88 #include <dev/pci/pcidevs.h> 89 #include <dev/pci/pciidereg.h> 90 #include <dev/pci/pciidevar.h> 91 92 #include <dev/ic/wdcreg.h> 93 94 #ifdef ATADEBUG 95 int atadebug_pciide_mask = 0; 96 #endif 97 98 #if NATA_DMA 99 static const char dmaerrfmt[] = 100 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n"; 101 #endif 102 103 /* Default product description for devices not known from this controller */ 104 const struct pciide_product_desc default_product_desc = { 105 0, 106 0, 107 "Generic PCI IDE controller", 108 default_chip_map, 109 }; 110 111 const struct pciide_product_desc * 112 pciide_lookup_product(pcireg_t id, const struct pciide_product_desc *pp) 113 { 114 for (; pp->chip_map != NULL; pp++) 115 if (PCI_PRODUCT(id) == pp->ide_product) 116 break; 117 118 if (pp->chip_map == NULL) 119 return NULL; 120 return pp; 121 } 122 123 void 124 pciide_common_attach(struct pciide_softc *sc, struct pci_attach_args *pa, const struct pciide_product_desc *pp) 125 { 126 pci_chipset_tag_t pc = pa->pa_pc; 127 pcitag_t tag = pa->pa_tag; 128 #if NATA_DMA 129 pcireg_t csr; 130 #endif 131 char devinfo[256]; 132 const char *displaydev; 133 134 aprint_naive(": disk controller\n"); 135 aprint_normal("\n"); 136 137 sc->sc_pci_id = pa->pa_id; 138 if (pp == NULL) { 139 /* should only happen for generic pciide devices */ 140 sc->sc_pp = &default_product_desc; 141 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo)); 142 displaydev = devinfo; 143 } else { 144 sc->sc_pp = pp; 145 displaydev = sc->sc_pp->ide_name; 146 } 147 148 /* if displaydev == NULL, printf is done in chip-specific map */ 149 if (displaydev) 150 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 151 "%s (rev. 0x%02x)\n", displaydev, 152 PCI_REVISION(pa->pa_class)); 153 154 sc->sc_pc = pa->pa_pc; 155 sc->sc_tag = pa->pa_tag; 156 157 #if NATA_DMA 158 /* Set up DMA defaults; these might be adjusted by chip_map. */ 159 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 160 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 161 #endif 162 163 #ifdef ATADEBUG 164 if (atadebug_pciide_mask & DEBUG_PROBE) 165 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); 166 #endif 167 sc->sc_pp->chip_map(sc, pa); 168 169 #if NATA_DMA 170 if (sc->sc_dma_ok) { 171 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 172 csr |= PCI_COMMAND_MASTER_ENABLE; 173 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 174 } 175 #endif 176 ATADEBUG_PRINT(("pciide: command/status register=%x\n", 177 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 178 } 179 180 /* tell whether the chip is enabled or not */ 181 int 182 pciide_chipen(struct pciide_softc *sc, struct pci_attach_args *pa) 183 { 184 pcireg_t csr; 185 186 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) { 187 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 188 PCI_COMMAND_STATUS_REG); 189 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 190 "device disabled (at %s)\n", 191 (csr & PCI_COMMAND_IO_ENABLE) == 0 ? 192 "device" : "bridge"); 193 return 0; 194 } 195 return 1; 196 } 197 198 void 199 pciide_mapregs_compat(struct pci_attach_args *pa, struct pciide_channel *cp, int compatchan, bus_size_t *cmdsizep, bus_size_t *ctlsizep) 200 { 201 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 202 struct ata_channel *wdc_cp = &cp->ata_channel; 203 struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp); 204 int i; 205 206 cp->compat = 1; 207 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 208 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 209 210 wdr->cmd_iot = pa->pa_iot; 211 if (bus_space_map(wdr->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 212 PCIIDE_COMPAT_CMD_SIZE, 0, &wdr->cmd_baseioh) != 0) { 213 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 214 "couldn't map %s channel cmd regs\n", cp->name); 215 goto bad; 216 } 217 218 wdr->ctl_iot = pa->pa_iot; 219 if (bus_space_map(wdr->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 220 PCIIDE_COMPAT_CTL_SIZE, 0, &wdr->ctl_ioh) != 0) { 221 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 222 "couldn't map %s channel ctl regs\n", cp->name); 223 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 224 PCIIDE_COMPAT_CMD_SIZE); 225 goto bad; 226 } 227 228 for (i = 0; i < WDC_NREG; i++) { 229 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, 230 i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { 231 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 232 "couldn't subregion %s channel cmd regs\n", 233 cp->name); 234 goto bad; 235 } 236 } 237 wdc_init_shadow_regs(wdc_cp); 238 wdr->data32iot = wdr->cmd_iot; 239 wdr->data32ioh = wdr->cmd_iohs[0]; 240 return; 241 242 bad: 243 cp->ata_channel.ch_flags |= ATACH_DISABLED; 244 return; 245 } 246 247 void 248 pciide_mapregs_native(struct pci_attach_args *pa, 249 struct pciide_channel *cp, bus_size_t *cmdsizep, 250 bus_size_t *ctlsizep, int (*pci_intr)(void *)) 251 { 252 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 253 struct ata_channel *wdc_cp = &cp->ata_channel; 254 struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp); 255 const char *intrstr; 256 pci_intr_handle_t intrhandle; 257 int i; 258 259 cp->compat = 0; 260 261 if (sc->sc_pci_ih == NULL) { 262 if (pci_intr_map(pa, &intrhandle) != 0) { 263 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 264 "couldn't map native-PCI interrupt\n"); 265 goto bad; 266 } 267 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 268 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 269 intrhandle, IPL_BIO, pci_intr, sc); 270 if (sc->sc_pci_ih != NULL) { 271 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 272 "using %s for native-PCI interrupt\n", 273 intrstr ? intrstr : "unknown interrupt"); 274 } else { 275 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 276 "couldn't establish native-PCI interrupt"); 277 if (intrstr != NULL) 278 aprint_error(" at %s", intrstr); 279 aprint_error("\n"); 280 goto bad; 281 } 282 } 283 cp->ih = sc->sc_pci_ih; 284 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->ch_channel), 285 PCI_MAPREG_TYPE_IO, 0, 286 &wdr->cmd_iot, &wdr->cmd_baseioh, NULL, cmdsizep) != 0) { 287 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 288 "couldn't map %s channel cmd regs\n", cp->name); 289 goto bad; 290 } 291 292 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->ch_channel), 293 PCI_MAPREG_TYPE_IO, 0, 294 &wdr->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) { 295 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 296 "couldn't map %s channel ctl regs\n", cp->name); 297 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 298 *cmdsizep); 299 goto bad; 300 } 301 /* 302 * In native mode, 4 bytes of I/O space are mapped for the control 303 * register, the control register is at offset 2. Pass the generic 304 * code a handle for only one byte at the right offset. 305 */ 306 if (bus_space_subregion(wdr->ctl_iot, cp->ctl_baseioh, 2, 1, 307 &wdr->ctl_ioh) != 0) { 308 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 309 "unable to subregion %s channel ctl regs\n", cp->name); 310 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 311 *cmdsizep); 312 bus_space_unmap(wdr->cmd_iot, cp->ctl_baseioh, *ctlsizep); 313 goto bad; 314 } 315 316 for (i = 0; i < WDC_NREG; i++) { 317 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, 318 i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { 319 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 320 "couldn't subregion %s channel cmd regs\n", 321 cp->name); 322 goto bad; 323 } 324 } 325 wdc_init_shadow_regs(wdc_cp); 326 wdr->data32iot = wdr->cmd_iot; 327 wdr->data32ioh = wdr->cmd_iohs[0]; 328 return; 329 330 bad: 331 cp->ata_channel.ch_flags |= ATACH_DISABLED; 332 return; 333 } 334 335 #if NATA_DMA 336 void 337 pciide_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 338 { 339 pcireg_t maptype; 340 bus_addr_t addr; 341 struct pciide_channel *pc; 342 int reg, chan; 343 bus_size_t size; 344 345 /* 346 * Map DMA registers 347 * 348 * Note that sc_dma_ok is the right variable to test to see if 349 * DMA can be done. If the interface doesn't support DMA, 350 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 351 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 352 * non-zero if the interface supports DMA and the registers 353 * could be mapped. 354 * 355 * XXX Note that despite the fact that the Bus Master IDE specs 356 * XXX say that "The bus master IDE function uses 16 bytes of IO 357 * XXX space," some controllers (at least the United 358 * XXX Microelectronics UM8886BF) place it in memory space. 359 */ 360 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 361 PCIIDE_REG_BUS_MASTER_DMA); 362 363 switch (maptype) { 364 case PCI_MAPREG_TYPE_IO: 365 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 366 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 367 &addr, NULL, NULL) == 0); 368 if (sc->sc_dma_ok == 0) { 369 aprint_verbose( 370 ", but unused (couldn't query registers)"); 371 break; 372 } 373 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 374 && addr >= 0x10000) { 375 sc->sc_dma_ok = 0; 376 aprint_verbose( 377 ", but unused (registers at unsafe address " 378 "%#lx)", (unsigned long)addr); 379 break; 380 } 381 /* FALLTHROUGH */ 382 383 case PCI_MAPREG_MEM_TYPE_32BIT: 384 sc->sc_dma_ok = (pci_mapreg_map(pa, 385 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 386 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0); 387 sc->sc_dmat = pa->pa_dmat; 388 if (sc->sc_dma_ok == 0) { 389 aprint_verbose(", but unused (couldn't map registers)"); 390 } else { 391 sc->sc_wdcdev.dma_arg = sc; 392 sc->sc_wdcdev.dma_init = pciide_dma_init; 393 sc->sc_wdcdev.dma_start = pciide_dma_start; 394 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 395 } 396 397 if (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & 398 PCIIDE_OPTIONS_NODMA) { 399 aprint_verbose( 400 ", but unused (forced off by config file)"); 401 sc->sc_dma_ok = 0; 402 } 403 break; 404 405 default: 406 sc->sc_dma_ok = 0; 407 aprint_verbose( 408 ", but unsupported register maptype (0x%x)", maptype); 409 } 410 411 if (sc->sc_dma_ok == 0) 412 return; 413 414 /* 415 * Set up the default handles for the DMA registers. 416 * Just reserve 32 bits for each handle, unless space 417 * doesn't permit it. 418 */ 419 for (chan = 0; chan < PCIIDE_NUM_CHANNELS; chan++) { 420 pc = &sc->pciide_channels[chan]; 421 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 422 size = 4; 423 if (size > (IDEDMA_SCH_OFFSET - reg)) 424 size = IDEDMA_SCH_OFFSET - reg; 425 if (bus_space_subregion(sc->sc_dma_iot, sc->sc_dma_ioh, 426 IDEDMA_SCH_OFFSET * chan + reg, size, 427 &pc->dma_iohs[reg]) != 0) { 428 sc->sc_dma_ok = 0; 429 aprint_verbose(", but can't subregion offset %d " 430 "size %lu", reg, (u_long)size); 431 return; 432 } 433 } 434 } 435 } 436 #endif /* NATA_DMA */ 437 438 int 439 pciide_compat_intr(void *arg) 440 { 441 struct pciide_channel *cp = arg; 442 443 #ifdef DIAGNOSTIC 444 /* should only be called for a compat channel */ 445 if (cp->compat == 0) 446 panic("pciide compat intr called for non-compat chan %p", cp); 447 #endif 448 return (wdcintr(&cp->ata_channel)); 449 } 450 451 int 452 pciide_pci_intr(void *arg) 453 { 454 struct pciide_softc *sc = arg; 455 struct pciide_channel *cp; 456 struct ata_channel *wdc_cp; 457 int i, rv, crv; 458 459 rv = 0; 460 for (i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) { 461 cp = &sc->pciide_channels[i]; 462 wdc_cp = &cp->ata_channel; 463 464 /* If a compat channel skip. */ 465 if (cp->compat) 466 continue; 467 /* if this channel not waiting for intr, skip */ 468 if ((wdc_cp->ch_flags & ATACH_IRQ_WAIT) == 0) 469 continue; 470 471 crv = wdcintr(wdc_cp); 472 if (crv == 0) 473 ; /* leave rv alone */ 474 else if (crv == 1) 475 rv = 1; /* claim the intr */ 476 else if (rv == 0) /* crv should be -1 in this case */ 477 rv = crv; /* if we've done no better, take it */ 478 } 479 return (rv); 480 } 481 482 #if NATA_DMA 483 void 484 pciide_channel_dma_setup(struct pciide_channel *cp) 485 { 486 int drive, s; 487 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 488 struct ata_drive_datas *drvp; 489 490 KASSERT(cp->ata_channel.ch_ndrive != 0); 491 492 for (drive = 0; drive < cp->ata_channel.ch_ndrive; drive++) { 493 drvp = &cp->ata_channel.ch_drive[drive]; 494 /* If no drive, skip */ 495 if ((drvp->drive_flags & DRIVE) == 0) 496 continue; 497 /* setup DMA if needed */ 498 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 499 (drvp->drive_flags & DRIVE_UDMA) == 0) || 500 sc->sc_dma_ok == 0) { 501 s = splbio(); 502 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 503 splx(s); 504 continue; 505 } 506 if (pciide_dma_table_setup(sc, cp->ata_channel.ch_channel, 507 drive) != 0) { 508 /* Abort DMA setup */ 509 s = splbio(); 510 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 511 splx(s); 512 continue; 513 } 514 } 515 } 516 517 #define NIDEDMA_TABLES(sc) \ 518 (MAXPHYS/(min((sc)->sc_dma_maxsegsz, PAGE_SIZE)) + 1) 519 520 int 521 pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) 522 { 523 bus_dma_segment_t seg; 524 int error, rseg; 525 const bus_size_t dma_table_size = 526 sizeof(struct idedma_table) * NIDEDMA_TABLES(sc); 527 struct pciide_dma_maps *dma_maps = 528 &sc->pciide_channels[channel].dma_maps[drive]; 529 530 /* If table was already allocated, just return */ 531 if (dma_maps->dma_table) 532 return 0; 533 534 /* Allocate memory for the DMA tables and map it */ 535 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 536 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 537 BUS_DMA_NOWAIT)) != 0) { 538 aprint_error(dmaerrfmt, 539 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 540 "allocate", drive, error); 541 return error; 542 } 543 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 544 dma_table_size, 545 (void **)&dma_maps->dma_table, 546 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 547 aprint_error(dmaerrfmt, 548 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 549 "map", drive, error); 550 return error; 551 } 552 ATADEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, " 553 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size, 554 (unsigned long)seg.ds_addr), DEBUG_PROBE); 555 /* Create and load table DMA map for this disk */ 556 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 557 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 558 &dma_maps->dmamap_table)) != 0) { 559 aprint_error(dmaerrfmt, 560 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 561 "create", drive, error); 562 return error; 563 } 564 if ((error = bus_dmamap_load(sc->sc_dmat, 565 dma_maps->dmamap_table, 566 dma_maps->dma_table, 567 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 568 aprint_error(dmaerrfmt, 569 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 570 "load", drive, error); 571 return error; 572 } 573 ATADEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 574 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr), 575 DEBUG_PROBE); 576 /* Create a xfer DMA map for this drive */ 577 if ((error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, 578 NIDEDMA_TABLES(sc), sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 579 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 580 &dma_maps->dmamap_xfer)) != 0) { 581 aprint_error(dmaerrfmt, 582 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 583 "create xfer", drive, error); 584 return error; 585 } 586 return 0; 587 } 588 589 int 590 pciide_dma_dmamap_setup(struct pciide_softc *sc, int channel, int drive, void *databuf, size_t datalen, int flags) 591 { 592 int error, seg; 593 struct pciide_channel *cp = &sc->pciide_channels[channel]; 594 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 595 596 error = bus_dmamap_load(sc->sc_dmat, 597 dma_maps->dmamap_xfer, 598 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 599 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)); 600 if (error) { 601 aprint_error(dmaerrfmt, 602 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 603 "load xfer", drive, error); 604 return error; 605 } 606 607 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 608 dma_maps->dmamap_xfer->dm_mapsize, 609 (flags & WDC_DMA_READ) ? 610 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 611 612 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 613 #ifdef DIAGNOSTIC 614 /* A segment must not cross a 64k boundary */ 615 { 616 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 617 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 618 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 619 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 620 printf("pciide_dma: segment %d physical addr 0x%lx" 621 " len 0x%lx not properly aligned\n", 622 seg, phys, len); 623 panic("pciide_dma: buf align"); 624 } 625 } 626 #endif 627 dma_maps->dma_table[seg].base_addr = 628 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 629 dma_maps->dma_table[seg].byte_count = 630 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 631 IDEDMA_BYTE_COUNT_MASK); 632 ATADEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 633 seg, le32toh(dma_maps->dma_table[seg].byte_count), 634 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 635 636 } 637 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 638 htole32(IDEDMA_BYTE_COUNT_EOT); 639 640 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 641 dma_maps->dmamap_table->dm_mapsize, 642 BUS_DMASYNC_PREWRITE); 643 644 #ifdef DIAGNOSTIC 645 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 646 printf("pciide_dma_dmamap_setup: addr 0x%lx " 647 "not properly aligned\n", 648 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr); 649 panic("pciide_dma_init: table align"); 650 } 651 #endif 652 /* remember flags */ 653 dma_maps->dma_flags = flags; 654 655 return 0; 656 } 657 658 int 659 pciide_dma_init(void *v, int channel, int drive, void *databuf, size_t datalen, int flags) 660 { 661 struct pciide_softc *sc = v; 662 int error; 663 struct pciide_channel *cp = &sc->pciide_channels[channel]; 664 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 665 666 if ((error = pciide_dma_dmamap_setup(sc, channel, drive, 667 databuf, datalen, flags)) != 0) 668 return error; 669 /* Maps are ready. Start DMA function */ 670 /* Clear status bits */ 671 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 672 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0)); 673 /* Write table addr */ 674 bus_space_write_4(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_TBL], 0, 675 dma_maps->dmamap_table->dm_segs[0].ds_addr); 676 /* set read/write */ 677 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, 678 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 679 return 0; 680 } 681 682 void 683 pciide_dma_start(void *v, int channel, int drive) 684 { 685 struct pciide_softc *sc = v; 686 struct pciide_channel *cp = &sc->pciide_channels[channel]; 687 688 ATADEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 689 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, 690 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0) 691 | IDEDMA_CMD_START); 692 } 693 694 int 695 pciide_dma_finish(void *v, int channel, int drive, int force) 696 { 697 struct pciide_softc *sc = v; 698 u_int8_t status; 699 int error = 0; 700 struct pciide_channel *cp = &sc->pciide_channels[channel]; 701 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 702 703 status = bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0); 704 ATADEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 705 DEBUG_XFERS); 706 707 if (force == WDC_DMAEND_END && (status & IDEDMA_CTL_INTR) == 0) 708 return WDC_DMAST_NOIRQ; 709 710 /* stop DMA channel */ 711 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, 712 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0) 713 & ~IDEDMA_CMD_START); 714 715 /* Unload the map of the data buffer */ 716 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 717 dma_maps->dmamap_xfer->dm_mapsize, 718 (dma_maps->dma_flags & WDC_DMA_READ) ? 719 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 720 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 721 722 if ((status & IDEDMA_CTL_ERR) != 0 && force != WDC_DMAEND_ABRT_QUIET) { 723 aprint_error("%s:%d:%d: bus-master DMA error: status=0x%x\n", 724 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 725 drive, status); 726 error |= WDC_DMAST_ERR; 727 } 728 729 if ((status & IDEDMA_CTL_INTR) == 0 && force != WDC_DMAEND_ABRT_QUIET) { 730 aprint_error("%s:%d:%d: bus-master DMA error: missing " 731 "interrupt, status=0x%x\n", 732 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), 733 channel, drive, status); 734 error |= WDC_DMAST_NOIRQ; 735 } 736 737 if ((status & IDEDMA_CTL_ACT) != 0 && force != WDC_DMAEND_ABRT_QUIET) { 738 /* data underrun, may be a valid condition for ATAPI */ 739 error |= WDC_DMAST_UNDER; 740 } 741 return error; 742 } 743 744 void 745 pciide_irqack(struct ata_channel *chp) 746 { 747 struct pciide_channel *cp = CHAN_TO_PCHAN(chp); 748 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 749 750 /* clear status bits in IDE DMA registers */ 751 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 752 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0)); 753 } 754 #endif /* NATA_DMA */ 755 756 /* some common code used by several chip_map */ 757 int 758 pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) 759 { 760 struct pciide_channel *cp = &sc->pciide_channels[channel]; 761 sc->wdc_chanarray[channel] = &cp->ata_channel; 762 cp->name = PCIIDE_CHANNEL_NAME(channel); 763 cp->ata_channel.ch_channel = channel; 764 cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac; 765 cp->ata_channel.ch_queue = 766 malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT); 767 if (cp->ata_channel.ch_queue == NULL) { 768 aprint_error("%s %s channel: " 769 "can't allocate memory for command queue", 770 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cp->name); 771 return 0; 772 } 773 cp->ata_channel.ch_ndrive = 2; 774 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 775 "%s channel %s to %s mode\n", cp->name, 776 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 777 "configured" : "wired", 778 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 779 "native-PCI" : "compatibility"); 780 return 1; 781 } 782 783 /* some common code used by several chip channel_map */ 784 void 785 pciide_mapchan(struct pci_attach_args *pa, 786 struct pciide_channel *cp, 787 pcireg_t interface, bus_size_t *cmdsizep, 788 bus_size_t *ctlsizep, int (*pci_intr)(void *)) 789 { 790 struct ata_channel *wdc_cp = &cp->ata_channel; 791 792 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->ch_channel)) 793 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr); 794 else { 795 pciide_mapregs_compat(pa, cp, wdc_cp->ch_channel, cmdsizep, 796 ctlsizep); 797 if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0) 798 pciide_map_compat_intr(pa, cp, wdc_cp->ch_channel); 799 } 800 wdcattach(wdc_cp); 801 } 802 803 /* 804 * generic code to map the compat intr. 805 */ 806 void 807 pciide_map_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, int compatchan) 808 { 809 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 810 811 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 812 cp->ih = 813 pciide_machdep_compat_intr_establish(sc->sc_wdcdev.sc_atac.atac_dev, 814 pa, compatchan, pciide_compat_intr, cp); 815 if (cp->ih == NULL) { 816 #endif 817 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 818 "no compatibility interrupt for use by %s " 819 "channel\n", cp->name); 820 cp->ata_channel.ch_flags |= ATACH_DISABLED; 821 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 822 } 823 #endif 824 } 825 826 void 827 default_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 828 { 829 struct pciide_channel *cp; 830 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 831 pcireg_t csr; 832 int channel; 833 #if NATA_DMA 834 int drive; 835 u_int8_t idedma_ctl; 836 #endif 837 bus_size_t cmdsize, ctlsize; 838 const char *failreason; 839 struct wdc_regs *wdr; 840 841 if (pciide_chipen(sc, pa) == 0) 842 return; 843 844 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 845 #if NATA_DMA 846 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 847 "bus-master DMA support present"); 848 if (sc->sc_pp == &default_product_desc && 849 (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & 850 PCIIDE_OPTIONS_DMA) == 0) { 851 aprint_verbose(", but unused (no driver support)"); 852 sc->sc_dma_ok = 0; 853 } else { 854 pciide_mapreg_dma(sc, pa); 855 if (sc->sc_dma_ok != 0) 856 aprint_verbose(", used without full driver " 857 "support"); 858 } 859 #else 860 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 861 "bus-master DMA support present, but unused (no driver " 862 "support)"); 863 #endif /* NATA_DMA */ 864 } else { 865 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 866 "hardware does not support DMA"); 867 #if NATA_DMA 868 sc->sc_dma_ok = 0; 869 #endif 870 } 871 aprint_verbose("\n"); 872 #if NATA_DMA 873 if (sc->sc_dma_ok) { 874 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; 875 sc->sc_wdcdev.irqack = pciide_irqack; 876 } 877 #endif 878 sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; 879 #if NATA_DMA 880 sc->sc_wdcdev.sc_atac.atac_dma_cap = 0; 881 #endif 882 883 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; 884 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; 885 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; 886 887 wdc_allocate_regs(&sc->sc_wdcdev); 888 889 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 890 channel++) { 891 cp = &sc->pciide_channels[channel]; 892 if (pciide_chansetup(sc, channel, interface) == 0) 893 continue; 894 wdr = CHAN_TO_WDC_REGS(&cp->ata_channel); 895 if (interface & PCIIDE_INTERFACE_PCI(channel)) 896 pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 897 pciide_pci_intr); 898 else 899 pciide_mapregs_compat(pa, cp, 900 cp->ata_channel.ch_channel, &cmdsize, &ctlsize); 901 if (cp->ata_channel.ch_flags & ATACH_DISABLED) 902 continue; 903 /* 904 * Check to see if something appears to be there. 905 */ 906 failreason = NULL; 907 /* 908 * In native mode, always enable the controller. It's 909 * not possible to have an ISA board using the same address 910 * anyway. 911 */ 912 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 913 wdcattach(&cp->ata_channel); 914 continue; 915 } 916 if (!wdcprobe(&cp->ata_channel)) { 917 failreason = "not responding; disabled or no drives?"; 918 goto next; 919 } 920 /* 921 * Now, make sure it's actually attributable to this PCI IDE 922 * channel by trying to access the channel again while the 923 * PCI IDE controller's I/O space is disabled. (If the 924 * channel no longer appears to be there, it belongs to 925 * this controller.) YUCK! 926 */ 927 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 928 PCI_COMMAND_STATUS_REG); 929 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 930 csr & ~PCI_COMMAND_IO_ENABLE); 931 if (wdcprobe(&cp->ata_channel)) 932 failreason = "other hardware responding at addresses"; 933 pci_conf_write(sc->sc_pc, sc->sc_tag, 934 PCI_COMMAND_STATUS_REG, csr); 935 next: 936 if (failreason) { 937 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 938 "%s channel ignored (%s)\n", cp->name, failreason); 939 cp->ata_channel.ch_flags |= ATACH_DISABLED; 940 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 941 cmdsize); 942 bus_space_unmap(wdr->ctl_iot, wdr->ctl_ioh, ctlsize); 943 } else { 944 pciide_map_compat_intr(pa, cp, 945 cp->ata_channel.ch_channel); 946 wdcattach(&cp->ata_channel); 947 } 948 } 949 950 #if NATA_DMA 951 if (sc->sc_dma_ok == 0) 952 return; 953 954 /* Allocate DMA maps */ 955 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 956 channel++) { 957 idedma_ctl = 0; 958 cp = &sc->pciide_channels[channel]; 959 for (drive = 0; drive < cp->ata_channel.ch_ndrive; drive++) { 960 /* 961 * we have not probed the drives yet, allocate 962 * ressources for all of them. 963 */ 964 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 965 /* Abort DMA setup */ 966 aprint_error( 967 "%s:%d:%d: can't allocate DMA maps, " 968 "using PIO transfers\n", 969 device_xname( 970 sc->sc_wdcdev.sc_atac.atac_dev), 971 channel, drive); 972 sc->sc_dma_ok = 0; 973 sc->sc_wdcdev.sc_atac.atac_cap &= ~ATAC_CAP_DMA; 974 sc->sc_wdcdev.irqack = NULL; 975 break; 976 } 977 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 978 } 979 if (idedma_ctl != 0) { 980 /* Add software bits in status register */ 981 bus_space_write_1(sc->sc_dma_iot, 982 cp->dma_iohs[IDEDMA_CTL], 0, idedma_ctl); 983 } 984 } 985 #endif /* NATA_DMA */ 986 } 987 988 void 989 sata_setup_channel(struct ata_channel *chp) 990 { 991 #if NATA_DMA 992 struct ata_drive_datas *drvp; 993 int drive; 994 #if NATA_UDMA 995 int s; 996 #endif 997 u_int32_t idedma_ctl; 998 struct pciide_channel *cp = CHAN_TO_PCHAN(chp); 999 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 1000 1001 /* setup DMA if needed */ 1002 pciide_channel_dma_setup(cp); 1003 1004 idedma_ctl = 0; 1005 1006 for (drive = 0; drive < cp->ata_channel.ch_ndrive; drive++) { 1007 drvp = &chp->ch_drive[drive]; 1008 /* If no drive, skip */ 1009 if ((drvp->drive_flags & DRIVE) == 0) 1010 continue; 1011 #if NATA_UDMA 1012 if (drvp->drive_flags & DRIVE_UDMA) { 1013 /* use Ultra/DMA */ 1014 s = splbio(); 1015 drvp->drive_flags &= ~DRIVE_DMA; 1016 splx(s); 1017 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1018 } else 1019 #endif /* NATA_UDMA */ 1020 if (drvp->drive_flags & DRIVE_DMA) { 1021 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1022 } 1023 } 1024 1025 /* 1026 * Nothing to do to setup modes; it is meaningless in S-ATA 1027 * (but many S-ATA drives still want to get the SET_FEATURE 1028 * command). 1029 */ 1030 if (idedma_ctl != 0) { 1031 /* Add software bits in status register */ 1032 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 1033 idedma_ctl); 1034 } 1035 #endif /* NATA_DMA */ 1036 } 1037