1 /* $NetBSD: pciide.c,v 1.79 2000/07/07 13:54:25 bouyer Exp $ */ 2 3 4 /* 5 * Copyright (c) 1999 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36 37 /* 38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by Christopher G. Demetriou 51 * for the NetBSD Project. 52 * 4. The name of the author may not be used to endorse or promote products 53 * derived from this software without specific prior written permission 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 */ 66 67 /* 68 * PCI IDE controller driver. 69 * 70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 71 * sys/dev/pci/ppb.c, revision 1.16). 72 * 73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 75 * 5/16/94" from the PCI SIG. 76 * 77 */ 78 79 #ifndef WDCDEBUG 80 #define WDCDEBUG 81 #endif 82 83 #define DEBUG_DMA 0x01 84 #define DEBUG_XFERS 0x02 85 #define DEBUG_FUNCS 0x08 86 #define DEBUG_PROBE 0x10 87 #ifdef WDCDEBUG 88 int wdcdebug_pciide_mask = 0; 89 #define WDCDEBUG_PRINT(args, level) \ 90 if (wdcdebug_pciide_mask & (level)) printf args 91 #else 92 #define WDCDEBUG_PRINT(args, level) 93 #endif 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 99 #include <machine/endian.h> 100 101 #include <dev/pci/pcireg.h> 102 #include <dev/pci/pcivar.h> 103 #include <dev/pci/pcidevs.h> 104 #include <dev/pci/pciidereg.h> 105 #include <dev/pci/pciidevar.h> 106 #include <dev/pci/pciide_piix_reg.h> 107 #include <dev/pci/pciide_amd_reg.h> 108 #include <dev/pci/pciide_apollo_reg.h> 109 #include <dev/pci/pciide_cmd_reg.h> 110 #include <dev/pci/pciide_cy693_reg.h> 111 #include <dev/pci/pciide_sis_reg.h> 112 #include <dev/pci/pciide_acer_reg.h> 113 #include <dev/pci/pciide_pdc202xx_reg.h> 114 #include <dev/pci/pciide_opti_reg.h> 115 #include <dev/pci/pciide_hpt_reg.h> 116 #include <dev/pci/cy82c693var.h> 117 118 /* inlines for reading/writing 8-bit PCI registers */ 119 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t, 120 int)); 121 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t, 122 int, u_int8_t)); 123 124 static __inline u_int8_t 125 pciide_pci_read(pc, pa, reg) 126 pci_chipset_tag_t pc; 127 pcitag_t pa; 128 int reg; 129 { 130 131 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 132 ((reg & 0x03) * 8) & 0xff); 133 } 134 135 static __inline void 136 pciide_pci_write(pc, pa, reg, val) 137 pci_chipset_tag_t pc; 138 pcitag_t pa; 139 int reg; 140 u_int8_t val; 141 { 142 pcireg_t pcival; 143 144 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 145 pcival &= ~(0xff << ((reg & 0x03) * 8)); 146 pcival |= (val << ((reg & 0x03) * 8)); 147 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 148 } 149 150 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 151 152 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 153 void piix_setup_channel __P((struct channel_softc*)); 154 void piix3_4_setup_channel __P((struct channel_softc*)); 155 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 156 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*)); 157 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 158 159 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 160 void amd756_setup_channel __P((struct channel_softc*)); 161 162 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 163 void apollo_setup_channel __P((struct channel_softc*)); 164 165 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 166 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 167 void cmd0643_9_setup_channel __P((struct channel_softc*)); 168 void cmd_channel_map __P((struct pci_attach_args *, 169 struct pciide_softc *, int)); 170 int cmd_pci_intr __P((void *)); 171 void cmd646_9_irqack __P((struct channel_softc *)); 172 173 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 174 void cy693_setup_channel __P((struct channel_softc*)); 175 176 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 177 void sis_setup_channel __P((struct channel_softc*)); 178 179 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 180 void acer_setup_channel __P((struct channel_softc*)); 181 int acer_pci_intr __P((void *)); 182 183 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 184 void pdc202xx_setup_channel __P((struct channel_softc*)); 185 int pdc202xx_pci_intr __P((void *)); 186 187 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 188 void opti_setup_channel __P((struct channel_softc*)); 189 190 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 191 void hpt_setup_channel __P((struct channel_softc*)); 192 int hpt_pci_intr __P((void *)); 193 194 void pciide_channel_dma_setup __P((struct pciide_channel *)); 195 int pciide_dma_table_setup __P((struct pciide_softc*, int, int)); 196 int pciide_dma_init __P((void*, int, int, void *, size_t, int)); 197 void pciide_dma_start __P((void*, int, int)); 198 int pciide_dma_finish __P((void*, int, int, int)); 199 void pciide_irqack __P((struct channel_softc *)); 200 void pciide_print_modes __P((struct pciide_channel *)); 201 202 struct pciide_product_desc { 203 u_int32_t ide_product; 204 int ide_flags; 205 const char *ide_name; 206 /* map and setup chip, probe drives */ 207 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*)); 208 }; 209 210 /* Flags for ide_flags */ 211 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 212 213 /* Default product description for devices not known from this controller */ 214 const struct pciide_product_desc default_product_desc = { 215 0, 216 0, 217 "Generic PCI IDE controller", 218 default_chip_map, 219 }; 220 221 const struct pciide_product_desc pciide_intel_products[] = { 222 { PCI_PRODUCT_INTEL_82092AA, 223 0, 224 "Intel 82092AA IDE controller", 225 default_chip_map, 226 }, 227 { PCI_PRODUCT_INTEL_82371FB_IDE, 228 0, 229 "Intel 82371FB IDE controller (PIIX)", 230 piix_chip_map, 231 }, 232 { PCI_PRODUCT_INTEL_82371SB_IDE, 233 0, 234 "Intel 82371SB IDE Interface (PIIX3)", 235 piix_chip_map, 236 }, 237 { PCI_PRODUCT_INTEL_82371AB_IDE, 238 0, 239 "Intel 82371AB IDE controller (PIIX4)", 240 piix_chip_map, 241 }, 242 { PCI_PRODUCT_INTEL_82801AA_IDE, 243 0, 244 "Intel 82801AA IDE Controller (ICH)", 245 piix_chip_map, 246 }, 247 { PCI_PRODUCT_INTEL_82801AB_IDE, 248 0, 249 "Intel 82801AB IDE Controller (ICH0)", 250 piix_chip_map, 251 }, 252 { 0, 253 0, 254 NULL, 255 } 256 }; 257 258 const struct pciide_product_desc pciide_amd_products[] = { 259 { PCI_PRODUCT_AMD_PBC756_IDE, 260 0, 261 "Advanced Micro Devices AMD756 IDE Controller", 262 amd756_chip_map 263 }, 264 { 0, 265 0, 266 NULL, 267 } 268 }; 269 270 const struct pciide_product_desc pciide_cmd_products[] = { 271 { PCI_PRODUCT_CMDTECH_640, 272 0, 273 "CMD Technology PCI0640", 274 cmd_chip_map 275 }, 276 { PCI_PRODUCT_CMDTECH_643, 277 0, 278 "CMD Technology PCI0643", 279 cmd0643_9_chip_map, 280 }, 281 { PCI_PRODUCT_CMDTECH_646, 282 0, 283 "CMD Technology PCI0646", 284 cmd0643_9_chip_map, 285 }, 286 { PCI_PRODUCT_CMDTECH_648, 287 IDE_PCI_CLASS_OVERRIDE, 288 "CMD Technology PCI0648", 289 cmd0643_9_chip_map, 290 }, 291 { PCI_PRODUCT_CMDTECH_649, 292 IDE_PCI_CLASS_OVERRIDE, 293 "CMD Technology PCI0649", 294 cmd0643_9_chip_map, 295 }, 296 { 0, 297 0, 298 NULL, 299 } 300 }; 301 302 const struct pciide_product_desc pciide_via_products[] = { 303 { PCI_PRODUCT_VIATECH_VT82C586_IDE, 304 0, 305 "VIA Tech VT82C586 IDE Controller", 306 apollo_chip_map, 307 }, 308 { PCI_PRODUCT_VIATECH_VT82C586A_IDE, 309 0, 310 "VIA Tech VT82C586A IDE Controller", 311 apollo_chip_map, 312 }, 313 { 0, 314 0, 315 NULL, 316 } 317 }; 318 319 const struct pciide_product_desc pciide_cypress_products[] = { 320 { PCI_PRODUCT_CONTAQ_82C693, 321 0, 322 "Cypress 82C693 IDE Controller", 323 cy693_chip_map, 324 }, 325 { 0, 326 0, 327 NULL, 328 } 329 }; 330 331 const struct pciide_product_desc pciide_sis_products[] = { 332 { PCI_PRODUCT_SIS_5597_IDE, 333 0, 334 "Silicon Integrated System 5597/5598 IDE controller", 335 sis_chip_map, 336 }, 337 { 0, 338 0, 339 NULL, 340 } 341 }; 342 343 const struct pciide_product_desc pciide_acer_products[] = { 344 { PCI_PRODUCT_ALI_M5229, 345 0, 346 "Acer Labs M5229 UDMA IDE Controller", 347 acer_chip_map, 348 }, 349 { 0, 350 0, 351 NULL, 352 } 353 }; 354 355 const struct pciide_product_desc pciide_promise_products[] = { 356 { PCI_PRODUCT_PROMISE_ULTRA33, 357 IDE_PCI_CLASS_OVERRIDE, 358 "Promise Ultra33/ATA Bus Master IDE Accelerator", 359 pdc202xx_chip_map, 360 }, 361 { PCI_PRODUCT_PROMISE_ULTRA66, 362 IDE_PCI_CLASS_OVERRIDE, 363 "Promise Ultra66/ATA Bus Master IDE Accelerator", 364 pdc202xx_chip_map, 365 }, 366 { PCI_PRODUCT_PROMISE_ULTRA100, 367 IDE_PCI_CLASS_OVERRIDE, 368 "Promise Ultra100/ATA Bus Master IDE Accelerator", 369 pdc202xx_chip_map, 370 }, 371 { 0, 372 0, 373 NULL, 374 } 375 }; 376 377 const struct pciide_product_desc pciide_opti_products[] = { 378 { PCI_PRODUCT_OPTI_82C621, 379 0, 380 "OPTi 82c621 PCI IDE controller", 381 opti_chip_map, 382 }, 383 { PCI_PRODUCT_OPTI_82C568, 384 0, 385 "OPTi 82c568 (82c621 compatible) PCI IDE controller", 386 opti_chip_map, 387 }, 388 { PCI_PRODUCT_OPTI_82D568, 389 0, 390 "OPTi 82d568 (82c621 compatible) PCI IDE controller", 391 opti_chip_map, 392 }, 393 { 0, 394 0, 395 NULL, 396 } 397 }; 398 399 const struct pciide_product_desc pciide_triones_products[] = { 400 { PCI_PRODUCT_TRIONES_HPT366, 401 IDE_PCI_CLASS_OVERRIDE, 402 "Triones/Highpoint HPT366/370 IDE Controller", 403 hpt_chip_map, 404 }, 405 { 0, 406 0, 407 NULL, 408 } 409 }; 410 411 struct pciide_vendor_desc { 412 u_int32_t ide_vendor; 413 const struct pciide_product_desc *ide_products; 414 }; 415 416 const struct pciide_vendor_desc pciide_vendors[] = { 417 { PCI_VENDOR_INTEL, pciide_intel_products }, 418 { PCI_VENDOR_CMDTECH, pciide_cmd_products }, 419 { PCI_VENDOR_VIATECH, pciide_via_products }, 420 { PCI_VENDOR_CONTAQ, pciide_cypress_products }, 421 { PCI_VENDOR_SIS, pciide_sis_products }, 422 { PCI_VENDOR_ALI, pciide_acer_products }, 423 { PCI_VENDOR_PROMISE, pciide_promise_products }, 424 { PCI_VENDOR_AMD, pciide_amd_products }, 425 { PCI_VENDOR_OPTI, pciide_opti_products }, 426 { PCI_VENDOR_TRIONES, pciide_triones_products }, 427 { 0, NULL } 428 }; 429 430 /* options passed via the 'flags' config keyword */ 431 #define PCIIDE_OPTIONS_DMA 0x01 432 433 int pciide_match __P((struct device *, struct cfdata *, void *)); 434 void pciide_attach __P((struct device *, struct device *, void *)); 435 436 struct cfattach pciide_ca = { 437 sizeof(struct pciide_softc), pciide_match, pciide_attach 438 }; 439 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *)); 440 int pciide_mapregs_compat __P(( struct pci_attach_args *, 441 struct pciide_channel *, int, bus_size_t *, bus_size_t*)); 442 int pciide_mapregs_native __P((struct pci_attach_args *, 443 struct pciide_channel *, bus_size_t *, bus_size_t *, 444 int (*pci_intr) __P((void *)))); 445 void pciide_mapreg_dma __P((struct pciide_softc *, 446 struct pci_attach_args *)); 447 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t)); 448 void pciide_mapchan __P((struct pci_attach_args *, 449 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *, 450 int (*pci_intr) __P((void *)))); 451 int pciide_chan_candisable __P((struct pciide_channel *)); 452 void pciide_map_compat_intr __P(( struct pci_attach_args *, 453 struct pciide_channel *, int, int)); 454 int pciide_print __P((void *, const char *pnp)); 455 int pciide_compat_intr __P((void *)); 456 int pciide_pci_intr __P((void *)); 457 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t)); 458 459 const struct pciide_product_desc * 460 pciide_lookup_product(id) 461 u_int32_t id; 462 { 463 const struct pciide_product_desc *pp; 464 const struct pciide_vendor_desc *vp; 465 466 for (vp = pciide_vendors; vp->ide_products != NULL; vp++) 467 if (PCI_VENDOR(id) == vp->ide_vendor) 468 break; 469 470 if ((pp = vp->ide_products) == NULL) 471 return NULL; 472 473 for (; pp->ide_name != NULL; pp++) 474 if (PCI_PRODUCT(id) == pp->ide_product) 475 break; 476 477 if (pp->ide_name == NULL) 478 return NULL; 479 return pp; 480 } 481 482 int 483 pciide_match(parent, match, aux) 484 struct device *parent; 485 struct cfdata *match; 486 void *aux; 487 { 488 struct pci_attach_args *pa = aux; 489 const struct pciide_product_desc *pp; 490 491 /* 492 * Check the ID register to see that it's a PCI IDE controller. 493 * If it is, we assume that we can deal with it; it _should_ 494 * work in a standardized way... 495 */ 496 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 497 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 498 return (1); 499 } 500 501 /* 502 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE 503 * controllers. Let see if we can deal with it anyway. 504 */ 505 pp = pciide_lookup_product(pa->pa_id); 506 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) { 507 return (1); 508 } 509 510 return (0); 511 } 512 513 void 514 pciide_attach(parent, self, aux) 515 struct device *parent, *self; 516 void *aux; 517 { 518 struct pci_attach_args *pa = aux; 519 pci_chipset_tag_t pc = pa->pa_pc; 520 pcitag_t tag = pa->pa_tag; 521 struct pciide_softc *sc = (struct pciide_softc *)self; 522 pcireg_t csr; 523 char devinfo[256]; 524 const char *displaydev; 525 526 sc->sc_pp = pciide_lookup_product(pa->pa_id); 527 if (sc->sc_pp == NULL) { 528 sc->sc_pp = &default_product_desc; 529 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo); 530 displaydev = devinfo; 531 } else 532 displaydev = sc->sc_pp->ide_name; 533 534 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class)); 535 536 sc->sc_pc = pa->pa_pc; 537 sc->sc_tag = pa->pa_tag; 538 #ifdef WDCDEBUG 539 if (wdcdebug_pciide_mask & DEBUG_PROBE) 540 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); 541 #endif 542 sc->sc_pp->chip_map(sc, pa); 543 544 if (sc->sc_dma_ok) { 545 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 546 csr |= PCI_COMMAND_MASTER_ENABLE; 547 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 548 } 549 WDCDEBUG_PRINT(("pciide: command/status register=%x\n", 550 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 551 } 552 553 /* tell wether the chip is enabled or not */ 554 int 555 pciide_chipen(sc, pa) 556 struct pciide_softc *sc; 557 struct pci_attach_args *pa; 558 { 559 pcireg_t csr; 560 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) { 561 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 562 PCI_COMMAND_STATUS_REG); 563 printf("%s: device disabled (at %s)\n", 564 sc->sc_wdcdev.sc_dev.dv_xname, 565 (csr & PCI_COMMAND_IO_ENABLE) == 0 ? 566 "device" : "bridge"); 567 return 0; 568 } 569 return 1; 570 } 571 572 int 573 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep) 574 struct pci_attach_args *pa; 575 struct pciide_channel *cp; 576 int compatchan; 577 bus_size_t *cmdsizep, *ctlsizep; 578 { 579 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 580 struct channel_softc *wdc_cp = &cp->wdc_channel; 581 582 cp->compat = 1; 583 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 584 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 585 586 wdc_cp->cmd_iot = pa->pa_iot; 587 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 588 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 589 printf("%s: couldn't map %s channel cmd regs\n", 590 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 591 return (0); 592 } 593 594 wdc_cp->ctl_iot = pa->pa_iot; 595 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 596 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 597 printf("%s: couldn't map %s channel ctl regs\n", 598 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 599 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 600 PCIIDE_COMPAT_CMD_SIZE); 601 return (0); 602 } 603 604 return (1); 605 } 606 607 int 608 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr) 609 struct pci_attach_args * pa; 610 struct pciide_channel *cp; 611 bus_size_t *cmdsizep, *ctlsizep; 612 int (*pci_intr) __P((void *)); 613 { 614 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 615 struct channel_softc *wdc_cp = &cp->wdc_channel; 616 const char *intrstr; 617 pci_intr_handle_t intrhandle; 618 619 cp->compat = 0; 620 621 if (sc->sc_pci_ih == NULL) { 622 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin, 623 pa->pa_intrline, &intrhandle) != 0) { 624 printf("%s: couldn't map native-PCI interrupt\n", 625 sc->sc_wdcdev.sc_dev.dv_xname); 626 return 0; 627 } 628 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 629 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 630 intrhandle, IPL_BIO, pci_intr, sc); 631 if (sc->sc_pci_ih != NULL) { 632 printf("%s: using %s for native-PCI interrupt\n", 633 sc->sc_wdcdev.sc_dev.dv_xname, 634 intrstr ? intrstr : "unknown interrupt"); 635 } else { 636 printf("%s: couldn't establish native-PCI interrupt", 637 sc->sc_wdcdev.sc_dev.dv_xname); 638 if (intrstr != NULL) 639 printf(" at %s", intrstr); 640 printf("\n"); 641 return 0; 642 } 643 } 644 cp->ih = sc->sc_pci_ih; 645 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 646 PCI_MAPREG_TYPE_IO, 0, 647 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) { 648 printf("%s: couldn't map %s channel cmd regs\n", 649 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 650 return 0; 651 } 652 653 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 654 PCI_MAPREG_TYPE_IO, 0, 655 &wdc_cp->ctl_iot, &wdc_cp->ctl_ioh, NULL, ctlsizep) != 0) { 656 printf("%s: couldn't map %s channel ctl regs\n", 657 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 658 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 659 return 0; 660 } 661 return (1); 662 } 663 664 void 665 pciide_mapreg_dma(sc, pa) 666 struct pciide_softc *sc; 667 struct pci_attach_args *pa; 668 { 669 pcireg_t maptype; 670 671 /* 672 * Map DMA registers 673 * 674 * Note that sc_dma_ok is the right variable to test to see if 675 * DMA can be done. If the interface doesn't support DMA, 676 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 677 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 678 * non-zero if the interface supports DMA and the registers 679 * could be mapped. 680 * 681 * XXX Note that despite the fact that the Bus Master IDE specs 682 * XXX say that "The bus master IDE function uses 16 bytes of IO 683 * XXX space," some controllers (at least the United 684 * XXX Microelectronics UM8886BF) place it in memory space. 685 */ 686 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 687 PCIIDE_REG_BUS_MASTER_DMA); 688 689 switch (maptype) { 690 case PCI_MAPREG_TYPE_IO: 691 case PCI_MAPREG_MEM_TYPE_32BIT: 692 sc->sc_dma_ok = (pci_mapreg_map(pa, 693 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 694 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0); 695 sc->sc_dmat = pa->pa_dmat; 696 if (sc->sc_dma_ok == 0) { 697 printf(", but unused (couldn't map registers)"); 698 } else { 699 sc->sc_wdcdev.dma_arg = sc; 700 sc->sc_wdcdev.dma_init = pciide_dma_init; 701 sc->sc_wdcdev.dma_start = pciide_dma_start; 702 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 703 } 704 break; 705 706 default: 707 sc->sc_dma_ok = 0; 708 printf(", but unsupported register maptype (0x%x)", maptype); 709 } 710 } 711 712 int 713 pciide_compat_intr(arg) 714 void *arg; 715 { 716 struct pciide_channel *cp = arg; 717 718 #ifdef DIAGNOSTIC 719 /* should only be called for a compat channel */ 720 if (cp->compat == 0) 721 panic("pciide compat intr called for non-compat chan %p\n", cp); 722 #endif 723 return (wdcintr(&cp->wdc_channel)); 724 } 725 726 int 727 pciide_pci_intr(arg) 728 void *arg; 729 { 730 struct pciide_softc *sc = arg; 731 struct pciide_channel *cp; 732 struct channel_softc *wdc_cp; 733 int i, rv, crv; 734 735 rv = 0; 736 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 737 cp = &sc->pciide_channels[i]; 738 wdc_cp = &cp->wdc_channel; 739 740 /* If a compat channel skip. */ 741 if (cp->compat) 742 continue; 743 /* if this channel not waiting for intr, skip */ 744 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) 745 continue; 746 747 crv = wdcintr(wdc_cp); 748 if (crv == 0) 749 ; /* leave rv alone */ 750 else if (crv == 1) 751 rv = 1; /* claim the intr */ 752 else if (rv == 0) /* crv should be -1 in this case */ 753 rv = crv; /* if we've done no better, take it */ 754 } 755 return (rv); 756 } 757 758 void 759 pciide_channel_dma_setup(cp) 760 struct pciide_channel *cp; 761 { 762 int drive; 763 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 764 struct ata_drive_datas *drvp; 765 766 for (drive = 0; drive < 2; drive++) { 767 drvp = &cp->wdc_channel.ch_drive[drive]; 768 /* If no drive, skip */ 769 if ((drvp->drive_flags & DRIVE) == 0) 770 continue; 771 /* setup DMA if needed */ 772 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 773 (drvp->drive_flags & DRIVE_UDMA) == 0) || 774 sc->sc_dma_ok == 0) { 775 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 776 continue; 777 } 778 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 779 != 0) { 780 /* Abort DMA setup */ 781 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 782 continue; 783 } 784 } 785 } 786 787 int 788 pciide_dma_table_setup(sc, channel, drive) 789 struct pciide_softc *sc; 790 int channel, drive; 791 { 792 bus_dma_segment_t seg; 793 int error, rseg; 794 const bus_size_t dma_table_size = 795 sizeof(struct idedma_table) * NIDEDMA_TABLES; 796 struct pciide_dma_maps *dma_maps = 797 &sc->pciide_channels[channel].dma_maps[drive]; 798 799 /* If table was already allocated, just return */ 800 if (dma_maps->dma_table) 801 return 0; 802 803 /* Allocate memory for the DMA tables and map it */ 804 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 805 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 806 BUS_DMA_NOWAIT)) != 0) { 807 printf("%s:%d: unable to allocate table DMA for " 808 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 809 channel, drive, error); 810 return error; 811 } 812 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 813 dma_table_size, 814 (caddr_t *)&dma_maps->dma_table, 815 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 816 printf("%s:%d: unable to map table DMA for" 817 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 818 channel, drive, error); 819 return error; 820 } 821 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, " 822 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size, 823 seg.ds_addr), DEBUG_PROBE); 824 825 /* Create and load table DMA map for this disk */ 826 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 827 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 828 &dma_maps->dmamap_table)) != 0) { 829 printf("%s:%d: unable to create table DMA map for " 830 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 831 channel, drive, error); 832 return error; 833 } 834 if ((error = bus_dmamap_load(sc->sc_dmat, 835 dma_maps->dmamap_table, 836 dma_maps->dma_table, 837 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 838 printf("%s:%d: unable to load table DMA map for " 839 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 840 channel, drive, error); 841 return error; 842 } 843 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 844 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE); 845 /* Create a xfer DMA map for this drive */ 846 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 847 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN, 848 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 849 &dma_maps->dmamap_xfer)) != 0) { 850 printf("%s:%d: unable to create xfer DMA map for " 851 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 852 channel, drive, error); 853 return error; 854 } 855 return 0; 856 } 857 858 int 859 pciide_dma_init(v, channel, drive, databuf, datalen, flags) 860 void *v; 861 int channel, drive; 862 void *databuf; 863 size_t datalen; 864 int flags; 865 { 866 struct pciide_softc *sc = v; 867 int error, seg; 868 struct pciide_dma_maps *dma_maps = 869 &sc->pciide_channels[channel].dma_maps[drive]; 870 871 error = bus_dmamap_load(sc->sc_dmat, 872 dma_maps->dmamap_xfer, 873 databuf, datalen, NULL, BUS_DMA_NOWAIT); 874 if (error) { 875 printf("%s:%d: unable to load xfer DMA map for" 876 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 877 channel, drive, error); 878 return error; 879 } 880 881 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 882 dma_maps->dmamap_xfer->dm_mapsize, 883 (flags & WDC_DMA_READ) ? 884 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 885 886 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 887 #ifdef DIAGNOSTIC 888 /* A segment must not cross a 64k boundary */ 889 { 890 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 891 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 892 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 893 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 894 printf("pciide_dma: segment %d physical addr 0x%lx" 895 " len 0x%lx not properly aligned\n", 896 seg, phys, len); 897 panic("pciide_dma: buf align"); 898 } 899 } 900 #endif 901 dma_maps->dma_table[seg].base_addr = 902 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 903 dma_maps->dma_table[seg].byte_count = 904 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 905 IDEDMA_BYTE_COUNT_MASK); 906 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 907 seg, le32toh(dma_maps->dma_table[seg].byte_count), 908 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 909 910 } 911 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 912 htole32(IDEDMA_BYTE_COUNT_EOT); 913 914 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 915 dma_maps->dmamap_table->dm_mapsize, 916 BUS_DMASYNC_PREWRITE); 917 918 /* Maps are ready. Start DMA function */ 919 #ifdef DIAGNOSTIC 920 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 921 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 922 dma_maps->dmamap_table->dm_segs[0].ds_addr); 923 panic("pciide_dma_init: table align"); 924 } 925 #endif 926 927 /* Clear status bits */ 928 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 929 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, 930 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 931 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel)); 932 /* Write table addr */ 933 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 934 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel, 935 dma_maps->dmamap_table->dm_segs[0].ds_addr); 936 /* set read/write */ 937 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 938 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 939 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0); 940 /* remember flags */ 941 dma_maps->dma_flags = flags; 942 return 0; 943 } 944 945 void 946 pciide_dma_start(v, channel, drive) 947 void *v; 948 int channel, drive; 949 { 950 struct pciide_softc *sc = v; 951 952 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 953 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 954 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 955 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 956 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START); 957 } 958 959 int 960 pciide_dma_finish(v, channel, drive, force) 961 void *v; 962 int channel, drive; 963 int force; 964 { 965 struct pciide_softc *sc = v; 966 u_int8_t status; 967 int error = 0; 968 struct pciide_dma_maps *dma_maps = 969 &sc->pciide_channels[channel].dma_maps[drive]; 970 971 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 972 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel); 973 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 974 DEBUG_XFERS); 975 976 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) 977 return WDC_DMAST_NOIRQ; 978 979 /* stop DMA channel */ 980 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 981 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 982 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 983 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START); 984 985 /* Unload the map of the data buffer */ 986 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 987 dma_maps->dmamap_xfer->dm_mapsize, 988 (dma_maps->dma_flags & WDC_DMA_READ) ? 989 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 990 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 991 992 if ((status & IDEDMA_CTL_ERR) != 0) { 993 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 994 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 995 error |= WDC_DMAST_ERR; 996 } 997 998 if ((status & IDEDMA_CTL_INTR) == 0) { 999 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1000 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1001 drive, status); 1002 error |= WDC_DMAST_NOIRQ; 1003 } 1004 1005 if ((status & IDEDMA_CTL_ACT) != 0) { 1006 /* data underrun, may be a valid condition for ATAPI */ 1007 error |= WDC_DMAST_UNDER; 1008 } 1009 return error; 1010 } 1011 1012 void 1013 pciide_irqack(chp) 1014 struct channel_softc *chp; 1015 { 1016 struct pciide_channel *cp = (struct pciide_channel*)chp; 1017 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1018 1019 /* clear status bits in IDE DMA registers */ 1020 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1021 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel, 1022 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1023 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel)); 1024 } 1025 1026 /* some common code used by several chip_map */ 1027 int 1028 pciide_chansetup(sc, channel, interface) 1029 struct pciide_softc *sc; 1030 int channel; 1031 pcireg_t interface; 1032 { 1033 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1034 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1035 cp->name = PCIIDE_CHANNEL_NAME(channel); 1036 cp->wdc_channel.channel = channel; 1037 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1038 cp->wdc_channel.ch_queue = 1039 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1040 if (cp->wdc_channel.ch_queue == NULL) { 1041 printf("%s %s channel: " 1042 "can't allocate memory for command queue", 1043 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1044 return 0; 1045 } 1046 printf("%s: %s channel %s to %s mode\n", 1047 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1048 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 1049 "configured" : "wired", 1050 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 1051 "native-PCI" : "compatibility"); 1052 return 1; 1053 } 1054 1055 /* some common code used by several chip channel_map */ 1056 void 1057 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr) 1058 struct pci_attach_args *pa; 1059 struct pciide_channel *cp; 1060 pcireg_t interface; 1061 bus_size_t *cmdsizep, *ctlsizep; 1062 int (*pci_intr) __P((void *)); 1063 { 1064 struct channel_softc *wdc_cp = &cp->wdc_channel; 1065 1066 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 1067 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 1068 pci_intr); 1069 else 1070 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1071 wdc_cp->channel, cmdsizep, ctlsizep); 1072 1073 if (cp->hw_ok == 0) 1074 return; 1075 wdc_cp->data32iot = wdc_cp->cmd_iot; 1076 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 1077 wdcattach(wdc_cp); 1078 } 1079 1080 /* 1081 * Generic code to call to know if a channel can be disabled. Return 1 1082 * if channel can be disabled, 0 if not 1083 */ 1084 int 1085 pciide_chan_candisable(cp) 1086 struct pciide_channel *cp; 1087 { 1088 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1089 struct channel_softc *wdc_cp = &cp->wdc_channel; 1090 1091 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 1092 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 1093 printf("%s: disabling %s channel (no drives)\n", 1094 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1095 cp->hw_ok = 0; 1096 return 1; 1097 } 1098 return 0; 1099 } 1100 1101 /* 1102 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 1103 * Set hw_ok=0 on failure 1104 */ 1105 void 1106 pciide_map_compat_intr(pa, cp, compatchan, interface) 1107 struct pci_attach_args *pa; 1108 struct pciide_channel *cp; 1109 int compatchan, interface; 1110 { 1111 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1112 struct channel_softc *wdc_cp = &cp->wdc_channel; 1113 1114 if (cp->hw_ok == 0) 1115 return; 1116 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1117 return; 1118 1119 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 1120 pa, compatchan, pciide_compat_intr, cp); 1121 if (cp->ih == NULL) { 1122 printf("%s: no compatibility interrupt for use by %s " 1123 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1124 cp->hw_ok = 0; 1125 } 1126 } 1127 1128 void 1129 pciide_print_modes(cp) 1130 struct pciide_channel *cp; 1131 { 1132 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1133 int drive; 1134 struct channel_softc *chp; 1135 struct ata_drive_datas *drvp; 1136 1137 chp = &cp->wdc_channel; 1138 for (drive = 0; drive < 2; drive++) { 1139 drvp = &chp->ch_drive[drive]; 1140 if ((drvp->drive_flags & DRIVE) == 0) 1141 continue; 1142 printf("%s(%s:%d:%d): using PIO mode %d", 1143 drvp->drv_softc->dv_xname, 1144 sc->sc_wdcdev.sc_dev.dv_xname, 1145 chp->channel, drive, drvp->PIO_mode); 1146 if (drvp->drive_flags & DRIVE_DMA) 1147 printf(", DMA mode %d", drvp->DMA_mode); 1148 if (drvp->drive_flags & DRIVE_UDMA) 1149 printf(", Ultra-DMA mode %d", drvp->UDMA_mode); 1150 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) 1151 printf(" (using DMA data transfers)"); 1152 printf("\n"); 1153 } 1154 } 1155 1156 void 1157 default_chip_map(sc, pa) 1158 struct pciide_softc *sc; 1159 struct pci_attach_args *pa; 1160 { 1161 struct pciide_channel *cp; 1162 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1163 pcireg_t csr; 1164 int channel, drive; 1165 struct ata_drive_datas *drvp; 1166 u_int8_t idedma_ctl; 1167 bus_size_t cmdsize, ctlsize; 1168 char *failreason; 1169 1170 if (pciide_chipen(sc, pa) == 0) 1171 return; 1172 1173 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 1174 printf("%s: bus-master DMA support present", 1175 sc->sc_wdcdev.sc_dev.dv_xname); 1176 if (sc->sc_pp == &default_product_desc && 1177 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 1178 PCIIDE_OPTIONS_DMA) == 0) { 1179 printf(", but unused (no driver support)"); 1180 sc->sc_dma_ok = 0; 1181 } else { 1182 pciide_mapreg_dma(sc, pa); 1183 if (sc->sc_dma_ok != 0) 1184 printf(", used without full driver " 1185 "support"); 1186 } 1187 } else { 1188 printf("%s: hardware does not support DMA", 1189 sc->sc_wdcdev.sc_dev.dv_xname); 1190 sc->sc_dma_ok = 0; 1191 } 1192 printf("\n"); 1193 if (sc->sc_dma_ok) { 1194 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1195 sc->sc_wdcdev.irqack = pciide_irqack; 1196 } 1197 sc->sc_wdcdev.PIO_cap = 0; 1198 sc->sc_wdcdev.DMA_cap = 0; 1199 1200 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1201 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1202 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 1203 1204 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1205 cp = &sc->pciide_channels[channel]; 1206 if (pciide_chansetup(sc, channel, interface) == 0) 1207 continue; 1208 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 1209 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 1210 &ctlsize, pciide_pci_intr); 1211 } else { 1212 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1213 channel, &cmdsize, &ctlsize); 1214 } 1215 if (cp->hw_ok == 0) 1216 continue; 1217 /* 1218 * Check to see if something appears to be there. 1219 */ 1220 failreason = NULL; 1221 if (!wdcprobe(&cp->wdc_channel)) { 1222 failreason = "not responding; disabled or no drives?"; 1223 goto next; 1224 } 1225 /* 1226 * Now, make sure it's actually attributable to this PCI IDE 1227 * channel by trying to access the channel again while the 1228 * PCI IDE controller's I/O space is disabled. (If the 1229 * channel no longer appears to be there, it belongs to 1230 * this controller.) YUCK! 1231 */ 1232 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 1233 PCI_COMMAND_STATUS_REG); 1234 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1235 csr & ~PCI_COMMAND_IO_ENABLE); 1236 if (wdcprobe(&cp->wdc_channel)) 1237 failreason = "other hardware responding at addresses"; 1238 pci_conf_write(sc->sc_pc, sc->sc_tag, 1239 PCI_COMMAND_STATUS_REG, csr); 1240 next: 1241 if (failreason) { 1242 printf("%s: %s channel ignored (%s)\n", 1243 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1244 failreason); 1245 cp->hw_ok = 0; 1246 bus_space_unmap(cp->wdc_channel.cmd_iot, 1247 cp->wdc_channel.cmd_ioh, cmdsize); 1248 bus_space_unmap(cp->wdc_channel.ctl_iot, 1249 cp->wdc_channel.ctl_ioh, ctlsize); 1250 } else { 1251 pciide_map_compat_intr(pa, cp, channel, interface); 1252 } 1253 if (cp->hw_ok) { 1254 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 1255 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 1256 wdcattach(&cp->wdc_channel); 1257 } 1258 } 1259 1260 if (sc->sc_dma_ok == 0) 1261 return; 1262 1263 /* Allocate DMA maps */ 1264 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1265 idedma_ctl = 0; 1266 cp = &sc->pciide_channels[channel]; 1267 for (drive = 0; drive < 2; drive++) { 1268 drvp = &cp->wdc_channel.ch_drive[drive]; 1269 /* If no drive, skip */ 1270 if ((drvp->drive_flags & DRIVE) == 0) 1271 continue; 1272 if ((drvp->drive_flags & DRIVE_DMA) == 0) 1273 continue; 1274 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1275 /* Abort DMA setup */ 1276 printf("%s:%d:%d: can't allocate DMA maps, " 1277 "using PIO transfers\n", 1278 sc->sc_wdcdev.sc_dev.dv_xname, 1279 channel, drive); 1280 drvp->drive_flags &= ~DRIVE_DMA; 1281 } 1282 printf("%s:%d:%d: using DMA data transfers\n", 1283 sc->sc_wdcdev.sc_dev.dv_xname, 1284 channel, drive); 1285 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1286 } 1287 if (idedma_ctl != 0) { 1288 /* Add software bits in status register */ 1289 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1290 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1291 idedma_ctl); 1292 } 1293 } 1294 } 1295 1296 void 1297 piix_chip_map(sc, pa) 1298 struct pciide_softc *sc; 1299 struct pci_attach_args *pa; 1300 { 1301 struct pciide_channel *cp; 1302 int channel; 1303 u_int32_t idetim; 1304 bus_size_t cmdsize, ctlsize; 1305 1306 if (pciide_chipen(sc, pa) == 0) 1307 return; 1308 1309 printf("%s: bus-master DMA support present", 1310 sc->sc_wdcdev.sc_dev.dv_xname); 1311 pciide_mapreg_dma(sc, pa); 1312 printf("\n"); 1313 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1314 WDC_CAPABILITY_MODE; 1315 if (sc->sc_dma_ok) { 1316 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1317 sc->sc_wdcdev.irqack = pciide_irqack; 1318 switch(sc->sc_pp->ide_product) { 1319 case PCI_PRODUCT_INTEL_82371AB_IDE: 1320 case PCI_PRODUCT_INTEL_82801AA_IDE: 1321 case PCI_PRODUCT_INTEL_82801AB_IDE: 1322 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1323 } 1324 } 1325 sc->sc_wdcdev.PIO_cap = 4; 1326 sc->sc_wdcdev.DMA_cap = 2; 1327 sc->sc_wdcdev.UDMA_cap = 1328 (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) ? 4 : 2; 1329 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE) 1330 sc->sc_wdcdev.set_modes = piix_setup_channel; 1331 else 1332 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 1333 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1334 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1335 1336 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x", 1337 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1338 DEBUG_PROBE); 1339 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1340 WDCDEBUG_PRINT((", sidetim=0x%x", 1341 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1342 DEBUG_PROBE); 1343 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1344 WDCDEBUG_PRINT((", udamreg 0x%x", 1345 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1346 DEBUG_PROBE); 1347 } 1348 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1349 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) { 1350 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1351 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1352 DEBUG_PROBE); 1353 } 1354 1355 } 1356 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1357 1358 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1359 cp = &sc->pciide_channels[channel]; 1360 /* PIIX is compat-only */ 1361 if (pciide_chansetup(sc, channel, 0) == 0) 1362 continue; 1363 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1364 if ((PIIX_IDETIM_READ(idetim, channel) & 1365 PIIX_IDETIM_IDE) == 0) { 1366 printf("%s: %s channel ignored (disabled)\n", 1367 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1368 continue; 1369 } 1370 /* PIIX are compat-only pciide devices */ 1371 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 1372 if (cp->hw_ok == 0) 1373 continue; 1374 if (pciide_chan_candisable(cp)) { 1375 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 1376 channel); 1377 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 1378 idetim); 1379 } 1380 pciide_map_compat_intr(pa, cp, channel, 0); 1381 if (cp->hw_ok == 0) 1382 continue; 1383 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 1384 } 1385 1386 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 1387 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1388 DEBUG_PROBE); 1389 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1390 WDCDEBUG_PRINT((", sidetim=0x%x", 1391 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1392 DEBUG_PROBE); 1393 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1394 WDCDEBUG_PRINT((", udamreg 0x%x", 1395 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1396 DEBUG_PROBE); 1397 } 1398 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1399 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) { 1400 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1401 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1402 DEBUG_PROBE); 1403 } 1404 } 1405 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1406 } 1407 1408 void 1409 piix_setup_channel(chp) 1410 struct channel_softc *chp; 1411 { 1412 u_int8_t mode[2], drive; 1413 u_int32_t oidetim, idetim, idedma_ctl; 1414 struct pciide_channel *cp = (struct pciide_channel*)chp; 1415 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1416 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 1417 1418 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1419 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 1420 idedma_ctl = 0; 1421 1422 /* set up new idetim: Enable IDE registers decode */ 1423 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 1424 chp->channel); 1425 1426 /* setup DMA */ 1427 pciide_channel_dma_setup(cp); 1428 1429 /* 1430 * Here we have to mess up with drives mode: PIIX can't have 1431 * different timings for master and slave drives. 1432 * We need to find the best combination. 1433 */ 1434 1435 /* If both drives supports DMA, take the lower mode */ 1436 if ((drvp[0].drive_flags & DRIVE_DMA) && 1437 (drvp[1].drive_flags & DRIVE_DMA)) { 1438 mode[0] = mode[1] = 1439 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 1440 drvp[0].DMA_mode = mode[0]; 1441 drvp[1].DMA_mode = mode[1]; 1442 goto ok; 1443 } 1444 /* 1445 * If only one drive supports DMA, use its mode, and 1446 * put the other one in PIO mode 0 if mode not compatible 1447 */ 1448 if (drvp[0].drive_flags & DRIVE_DMA) { 1449 mode[0] = drvp[0].DMA_mode; 1450 mode[1] = drvp[1].PIO_mode; 1451 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 1452 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 1453 mode[1] = drvp[1].PIO_mode = 0; 1454 goto ok; 1455 } 1456 if (drvp[1].drive_flags & DRIVE_DMA) { 1457 mode[1] = drvp[1].DMA_mode; 1458 mode[0] = drvp[0].PIO_mode; 1459 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 1460 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 1461 mode[0] = drvp[0].PIO_mode = 0; 1462 goto ok; 1463 } 1464 /* 1465 * If both drives are not DMA, takes the lower mode, unless 1466 * one of them is PIO mode < 2 1467 */ 1468 if (drvp[0].PIO_mode < 2) { 1469 mode[0] = drvp[0].PIO_mode = 0; 1470 mode[1] = drvp[1].PIO_mode; 1471 } else if (drvp[1].PIO_mode < 2) { 1472 mode[1] = drvp[1].PIO_mode = 0; 1473 mode[0] = drvp[0].PIO_mode; 1474 } else { 1475 mode[0] = mode[1] = 1476 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 1477 drvp[0].PIO_mode = mode[0]; 1478 drvp[1].PIO_mode = mode[1]; 1479 } 1480 ok: /* The modes are setup */ 1481 for (drive = 0; drive < 2; drive++) { 1482 if (drvp[drive].drive_flags & DRIVE_DMA) { 1483 idetim |= piix_setup_idetim_timings( 1484 mode[drive], 1, chp->channel); 1485 goto end; 1486 } 1487 } 1488 /* If we are there, none of the drives are DMA */ 1489 if (mode[0] >= 2) 1490 idetim |= piix_setup_idetim_timings( 1491 mode[0], 0, chp->channel); 1492 else 1493 idetim |= piix_setup_idetim_timings( 1494 mode[1], 0, chp->channel); 1495 end: /* 1496 * timing mode is now set up in the controller. Enable 1497 * it per-drive 1498 */ 1499 for (drive = 0; drive < 2; drive++) { 1500 /* If no drive, skip */ 1501 if ((drvp[drive].drive_flags & DRIVE) == 0) 1502 continue; 1503 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 1504 if (drvp[drive].drive_flags & DRIVE_DMA) 1505 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1506 } 1507 if (idedma_ctl != 0) { 1508 /* Add software bits in status register */ 1509 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1510 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 1511 idedma_ctl); 1512 } 1513 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1514 pciide_print_modes(cp); 1515 } 1516 1517 void 1518 piix3_4_setup_channel(chp) 1519 struct channel_softc *chp; 1520 { 1521 struct ata_drive_datas *drvp; 1522 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 1523 struct pciide_channel *cp = (struct pciide_channel*)chp; 1524 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1525 int drive; 1526 int channel = chp->channel; 1527 1528 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1529 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 1530 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 1531 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 1532 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 1533 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 1534 PIIX_SIDETIM_RTC_MASK(channel)); 1535 1536 idedma_ctl = 0; 1537 /* If channel disabled, no need to go further */ 1538 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 1539 return; 1540 /* set up new idetim: Enable IDE registers decode */ 1541 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 1542 1543 /* setup DMA if needed */ 1544 pciide_channel_dma_setup(cp); 1545 1546 for (drive = 0; drive < 2; drive++) { 1547 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 1548 PIIX_UDMATIM_SET(0x3, channel, drive)); 1549 drvp = &chp->ch_drive[drive]; 1550 /* If no drive, skip */ 1551 if ((drvp->drive_flags & DRIVE) == 0) 1552 continue; 1553 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1554 (drvp->drive_flags & DRIVE_UDMA) == 0)) 1555 goto pio; 1556 1557 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1558 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) { 1559 ideconf |= PIIX_CONFIG_PINGPONG; 1560 } 1561 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) { 1562 /* setup Ultra/66 */ 1563 if (drvp->UDMA_mode > 2 && 1564 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1565 drvp->UDMA_mode = 2; 1566 if (drvp->UDMA_mode > 2) 1567 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 1568 else 1569 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 1570 } 1571 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1572 (drvp->drive_flags & DRIVE_UDMA)) { 1573 /* use Ultra/DMA */ 1574 drvp->drive_flags &= ~DRIVE_DMA; 1575 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 1576 udmareg |= PIIX_UDMATIM_SET( 1577 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 1578 } else { 1579 /* use Multiword DMA */ 1580 drvp->drive_flags &= ~DRIVE_UDMA; 1581 if (drive == 0) { 1582 idetim |= piix_setup_idetim_timings( 1583 drvp->DMA_mode, 1, channel); 1584 } else { 1585 sidetim |= piix_setup_sidetim_timings( 1586 drvp->DMA_mode, 1, channel); 1587 idetim =PIIX_IDETIM_SET(idetim, 1588 PIIX_IDETIM_SITRE, channel); 1589 } 1590 } 1591 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1592 1593 pio: /* use PIO mode */ 1594 idetim |= piix_setup_idetim_drvs(drvp); 1595 if (drive == 0) { 1596 idetim |= piix_setup_idetim_timings( 1597 drvp->PIO_mode, 0, channel); 1598 } else { 1599 sidetim |= piix_setup_sidetim_timings( 1600 drvp->PIO_mode, 0, channel); 1601 idetim =PIIX_IDETIM_SET(idetim, 1602 PIIX_IDETIM_SITRE, channel); 1603 } 1604 } 1605 if (idedma_ctl != 0) { 1606 /* Add software bits in status register */ 1607 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1608 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1609 idedma_ctl); 1610 } 1611 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1612 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 1613 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 1614 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 1615 pciide_print_modes(cp); 1616 } 1617 1618 1619 /* setup ISP and RTC fields, based on mode */ 1620 static u_int32_t 1621 piix_setup_idetim_timings(mode, dma, channel) 1622 u_int8_t mode; 1623 u_int8_t dma; 1624 u_int8_t channel; 1625 { 1626 1627 if (dma) 1628 return PIIX_IDETIM_SET(0, 1629 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 1630 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 1631 channel); 1632 else 1633 return PIIX_IDETIM_SET(0, 1634 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 1635 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 1636 channel); 1637 } 1638 1639 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 1640 static u_int32_t 1641 piix_setup_idetim_drvs(drvp) 1642 struct ata_drive_datas *drvp; 1643 { 1644 u_int32_t ret = 0; 1645 struct channel_softc *chp = drvp->chnl_softc; 1646 u_int8_t channel = chp->channel; 1647 u_int8_t drive = drvp->drive; 1648 1649 /* 1650 * If drive is using UDMA, timings setups are independant 1651 * So just check DMA and PIO here. 1652 */ 1653 if (drvp->drive_flags & DRIVE_DMA) { 1654 /* if mode = DMA mode 0, use compatible timings */ 1655 if ((drvp->drive_flags & DRIVE_DMA) && 1656 drvp->DMA_mode == 0) { 1657 drvp->PIO_mode = 0; 1658 return ret; 1659 } 1660 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1661 /* 1662 * PIO and DMA timings are the same, use fast timings for PIO 1663 * too, else use compat timings. 1664 */ 1665 if ((piix_isp_pio[drvp->PIO_mode] != 1666 piix_isp_dma[drvp->DMA_mode]) || 1667 (piix_rtc_pio[drvp->PIO_mode] != 1668 piix_rtc_dma[drvp->DMA_mode])) 1669 drvp->PIO_mode = 0; 1670 /* if PIO mode <= 2, use compat timings for PIO */ 1671 if (drvp->PIO_mode <= 2) { 1672 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 1673 channel); 1674 return ret; 1675 } 1676 } 1677 1678 /* 1679 * Now setup PIO modes. If mode < 2, use compat timings. 1680 * Else enable fast timings. Enable IORDY and prefetch/post 1681 * if PIO mode >= 3. 1682 */ 1683 1684 if (drvp->PIO_mode < 2) 1685 return ret; 1686 1687 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1688 if (drvp->PIO_mode >= 3) { 1689 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 1690 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 1691 } 1692 return ret; 1693 } 1694 1695 /* setup values in SIDETIM registers, based on mode */ 1696 static u_int32_t 1697 piix_setup_sidetim_timings(mode, dma, channel) 1698 u_int8_t mode; 1699 u_int8_t dma; 1700 u_int8_t channel; 1701 { 1702 if (dma) 1703 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 1704 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel); 1705 else 1706 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 1707 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel); 1708 } 1709 1710 void 1711 amd756_chip_map(sc, pa) 1712 struct pciide_softc *sc; 1713 struct pci_attach_args *pa; 1714 { 1715 struct pciide_channel *cp; 1716 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1717 int channel; 1718 pcireg_t chanenable; 1719 bus_size_t cmdsize, ctlsize; 1720 1721 if (pciide_chipen(sc, pa) == 0) 1722 return; 1723 printf("%s: bus-master DMA support present", 1724 sc->sc_wdcdev.sc_dev.dv_xname); 1725 pciide_mapreg_dma(sc, pa); 1726 printf("\n"); 1727 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1728 WDC_CAPABILITY_MODE; 1729 if (sc->sc_dma_ok) { 1730 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 1731 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 1732 sc->sc_wdcdev.irqack = pciide_irqack; 1733 } 1734 sc->sc_wdcdev.PIO_cap = 4; 1735 sc->sc_wdcdev.DMA_cap = 2; 1736 sc->sc_wdcdev.UDMA_cap = 4; 1737 sc->sc_wdcdev.set_modes = amd756_setup_channel; 1738 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1739 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1740 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 1741 1742 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable), 1743 DEBUG_PROBE); 1744 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1745 cp = &sc->pciide_channels[channel]; 1746 if (pciide_chansetup(sc, channel, interface) == 0) 1747 continue; 1748 1749 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 1750 printf("%s: %s channel ignored (disabled)\n", 1751 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1752 continue; 1753 } 1754 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 1755 pciide_pci_intr); 1756 1757 if (pciide_chan_candisable(cp)) 1758 chanenable &= ~AMD756_CHAN_EN(channel); 1759 pciide_map_compat_intr(pa, cp, channel, interface); 1760 if (cp->hw_ok == 0) 1761 continue; 1762 1763 amd756_setup_channel(&cp->wdc_channel); 1764 } 1765 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 1766 chanenable); 1767 return; 1768 } 1769 1770 void 1771 amd756_setup_channel(chp) 1772 struct channel_softc *chp; 1773 { 1774 u_int32_t udmatim_reg, datatim_reg; 1775 u_int8_t idedma_ctl; 1776 int mode, drive; 1777 struct ata_drive_datas *drvp; 1778 struct pciide_channel *cp = (struct pciide_channel*)chp; 1779 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1780 int rev = PCI_REVISION( 1781 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 1782 1783 idedma_ctl = 0; 1784 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 1785 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 1786 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 1787 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 1788 1789 /* setup DMA if needed */ 1790 pciide_channel_dma_setup(cp); 1791 1792 for (drive = 0; drive < 2; drive++) { 1793 drvp = &chp->ch_drive[drive]; 1794 /* If no drive, skip */ 1795 if ((drvp->drive_flags & DRIVE) == 0) 1796 continue; 1797 /* add timing values, setup DMA if needed */ 1798 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1799 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 1800 mode = drvp->PIO_mode; 1801 goto pio; 1802 } 1803 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1804 (drvp->drive_flags & DRIVE_UDMA)) { 1805 /* use Ultra/DMA */ 1806 drvp->drive_flags &= ~DRIVE_DMA; 1807 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 1808 AMD756_UDMA_EN_MTH(chp->channel, drive) | 1809 AMD756_UDMA_TIME(chp->channel, drive, 1810 amd756_udma_tim[drvp->UDMA_mode]); 1811 /* can use PIO timings, MW DMA unused */ 1812 mode = drvp->PIO_mode; 1813 } else { 1814 /* use Multiword DMA, but only if revision is OK */ 1815 drvp->drive_flags &= ~DRIVE_UDMA; 1816 #ifndef PCIIDE_AMD756_ENABLEDMA 1817 /* 1818 * The workaround doesn't seem to be necessary 1819 * with all drives, so it can be disabled by 1820 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 1821 * triggered. 1822 */ 1823 if (AMD756_CHIPREV_DISABLEDMA(rev)) { 1824 printf("%s:%d:%d: multi-word DMA disabled due " 1825 "to chip revision\n", 1826 sc->sc_wdcdev.sc_dev.dv_xname, 1827 chp->channel, drive); 1828 mode = drvp->PIO_mode; 1829 drvp->drive_flags &= ~DRIVE_DMA; 1830 goto pio; 1831 } 1832 #endif 1833 /* mode = min(pio, dma+2) */ 1834 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 1835 mode = drvp->PIO_mode; 1836 else 1837 mode = drvp->DMA_mode + 2; 1838 } 1839 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1840 1841 pio: /* setup PIO mode */ 1842 if (mode <= 2) { 1843 drvp->DMA_mode = 0; 1844 drvp->PIO_mode = 0; 1845 mode = 0; 1846 } else { 1847 drvp->PIO_mode = mode; 1848 drvp->DMA_mode = mode - 2; 1849 } 1850 datatim_reg |= 1851 AMD756_DATATIM_PULSE(chp->channel, drive, 1852 amd756_pio_set[mode]) | 1853 AMD756_DATATIM_RECOV(chp->channel, drive, 1854 amd756_pio_rec[mode]); 1855 } 1856 if (idedma_ctl != 0) { 1857 /* Add software bits in status register */ 1858 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1859 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 1860 idedma_ctl); 1861 } 1862 pciide_print_modes(cp); 1863 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 1864 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 1865 } 1866 1867 void 1868 apollo_chip_map(sc, pa) 1869 struct pciide_softc *sc; 1870 struct pci_attach_args *pa; 1871 { 1872 struct pciide_channel *cp; 1873 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1874 int channel; 1875 u_int32_t ideconf; 1876 bus_size_t cmdsize, ctlsize; 1877 1878 if (pciide_chipen(sc, pa) == 0) 1879 return; 1880 printf("%s: bus-master DMA support present", 1881 sc->sc_wdcdev.sc_dev.dv_xname); 1882 pciide_mapreg_dma(sc, pa); 1883 printf("\n"); 1884 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1885 WDC_CAPABILITY_MODE; 1886 if (sc->sc_dma_ok) { 1887 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1888 sc->sc_wdcdev.irqack = pciide_irqack; 1889 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE) 1890 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1891 } 1892 sc->sc_wdcdev.PIO_cap = 4; 1893 sc->sc_wdcdev.DMA_cap = 2; 1894 sc->sc_wdcdev.UDMA_cap = 2; 1895 sc->sc_wdcdev.set_modes = apollo_setup_channel; 1896 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1897 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1898 1899 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 1900 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 1901 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 1902 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 1903 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 1904 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 1905 DEBUG_PROBE); 1906 1907 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1908 cp = &sc->pciide_channels[channel]; 1909 if (pciide_chansetup(sc, channel, interface) == 0) 1910 continue; 1911 1912 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 1913 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 1914 printf("%s: %s channel ignored (disabled)\n", 1915 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1916 continue; 1917 } 1918 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 1919 pciide_pci_intr); 1920 if (cp->hw_ok == 0) 1921 continue; 1922 if (pciide_chan_candisable(cp)) { 1923 ideconf &= ~APO_IDECONF_EN(channel); 1924 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 1925 ideconf); 1926 } 1927 pciide_map_compat_intr(pa, cp, channel, interface); 1928 1929 if (cp->hw_ok == 0) 1930 continue; 1931 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 1932 } 1933 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 1934 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 1935 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 1936 } 1937 1938 void 1939 apollo_setup_channel(chp) 1940 struct channel_softc *chp; 1941 { 1942 u_int32_t udmatim_reg, datatim_reg; 1943 u_int8_t idedma_ctl; 1944 int mode, drive; 1945 struct ata_drive_datas *drvp; 1946 struct pciide_channel *cp = (struct pciide_channel*)chp; 1947 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1948 1949 idedma_ctl = 0; 1950 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 1951 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 1952 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 1953 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel); 1954 1955 /* setup DMA if needed */ 1956 pciide_channel_dma_setup(cp); 1957 1958 for (drive = 0; drive < 2; drive++) { 1959 drvp = &chp->ch_drive[drive]; 1960 /* If no drive, skip */ 1961 if ((drvp->drive_flags & DRIVE) == 0) 1962 continue; 1963 /* add timing values, setup DMA if needed */ 1964 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1965 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 1966 mode = drvp->PIO_mode; 1967 goto pio; 1968 } 1969 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1970 (drvp->drive_flags & DRIVE_UDMA)) { 1971 /* use Ultra/DMA */ 1972 drvp->drive_flags &= ~DRIVE_DMA; 1973 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 1974 APO_UDMA_EN_MTH(chp->channel, drive) | 1975 APO_UDMA_TIME(chp->channel, drive, 1976 apollo_udma_tim[drvp->UDMA_mode]); 1977 /* can use PIO timings, MW DMA unused */ 1978 mode = drvp->PIO_mode; 1979 } else { 1980 /* use Multiword DMA */ 1981 drvp->drive_flags &= ~DRIVE_UDMA; 1982 /* mode = min(pio, dma+2) */ 1983 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 1984 mode = drvp->PIO_mode; 1985 else 1986 mode = drvp->DMA_mode + 2; 1987 } 1988 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1989 1990 pio: /* setup PIO mode */ 1991 if (mode <= 2) { 1992 drvp->DMA_mode = 0; 1993 drvp->PIO_mode = 0; 1994 mode = 0; 1995 } else { 1996 drvp->PIO_mode = mode; 1997 drvp->DMA_mode = mode - 2; 1998 } 1999 datatim_reg |= 2000 APO_DATATIM_PULSE(chp->channel, drive, 2001 apollo_pio_set[mode]) | 2002 APO_DATATIM_RECOV(chp->channel, drive, 2003 apollo_pio_rec[mode]); 2004 } 2005 if (idedma_ctl != 0) { 2006 /* Add software bits in status register */ 2007 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2008 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2009 idedma_ctl); 2010 } 2011 pciide_print_modes(cp); 2012 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 2013 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 2014 } 2015 2016 void 2017 cmd_channel_map(pa, sc, channel) 2018 struct pci_attach_args *pa; 2019 struct pciide_softc *sc; 2020 int channel; 2021 { 2022 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2023 bus_size_t cmdsize, ctlsize; 2024 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 2025 int interface; 2026 2027 /* 2028 * The 0648/0649 can be told to identify as a RAID controller. 2029 * In this case, we have to fake interface 2030 */ 2031 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2032 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2033 PCIIDE_INTERFACE_SETTABLE(1); 2034 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 2035 CMD_CONF_DSA1) 2036 interface |= PCIIDE_INTERFACE_PCI(0) | 2037 PCIIDE_INTERFACE_PCI(1); 2038 } else { 2039 interface = PCI_INTERFACE(pa->pa_class); 2040 } 2041 2042 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2043 cp->name = PCIIDE_CHANNEL_NAME(channel); 2044 cp->wdc_channel.channel = channel; 2045 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2046 2047 if (channel > 0) { 2048 cp->wdc_channel.ch_queue = 2049 sc->pciide_channels[0].wdc_channel.ch_queue; 2050 } else { 2051 cp->wdc_channel.ch_queue = 2052 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2053 } 2054 if (cp->wdc_channel.ch_queue == NULL) { 2055 printf("%s %s channel: " 2056 "can't allocate memory for command queue", 2057 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2058 return; 2059 } 2060 2061 printf("%s: %s channel %s to %s mode\n", 2062 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2063 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 2064 "configured" : "wired", 2065 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 2066 "native-PCI" : "compatibility"); 2067 2068 /* 2069 * with a CMD PCI64x, if we get here, the first channel is enabled: 2070 * there's no way to disable the first channel without disabling 2071 * the whole device 2072 */ 2073 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 2074 printf("%s: %s channel ignored (disabled)\n", 2075 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2076 return; 2077 } 2078 2079 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 2080 if (cp->hw_ok == 0) 2081 return; 2082 if (channel == 1) { 2083 if (pciide_chan_candisable(cp)) { 2084 ctrl &= ~CMD_CTRL_2PORT; 2085 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2086 CMD_CTRL, ctrl); 2087 } 2088 } 2089 pciide_map_compat_intr(pa, cp, channel, interface); 2090 } 2091 2092 int 2093 cmd_pci_intr(arg) 2094 void *arg; 2095 { 2096 struct pciide_softc *sc = arg; 2097 struct pciide_channel *cp; 2098 struct channel_softc *wdc_cp; 2099 int i, rv, crv; 2100 u_int32_t priirq, secirq; 2101 2102 rv = 0; 2103 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2104 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2105 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2106 cp = &sc->pciide_channels[i]; 2107 wdc_cp = &cp->wdc_channel; 2108 /* If a compat channel skip. */ 2109 if (cp->compat) 2110 continue; 2111 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 2112 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 2113 crv = wdcintr(wdc_cp); 2114 if (crv == 0) 2115 printf("%s:%d: bogus intr\n", 2116 sc->sc_wdcdev.sc_dev.dv_xname, i); 2117 else 2118 rv = 1; 2119 } 2120 } 2121 return rv; 2122 } 2123 2124 void 2125 cmd_chip_map(sc, pa) 2126 struct pciide_softc *sc; 2127 struct pci_attach_args *pa; 2128 { 2129 int channel; 2130 2131 /* 2132 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2133 * and base adresses registers can be disabled at 2134 * hardware level. In this case, the device is wired 2135 * in compat mode and its first channel is always enabled, 2136 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2137 * In fact, it seems that the first channel of the CMD PCI0640 2138 * can't be disabled. 2139 */ 2140 2141 #ifdef PCIIDE_CMD064x_DISABLE 2142 if (pciide_chipen(sc, pa) == 0) 2143 return; 2144 #endif 2145 2146 printf("%s: hardware does not support DMA\n", 2147 sc->sc_wdcdev.sc_dev.dv_xname); 2148 sc->sc_dma_ok = 0; 2149 2150 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2151 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2152 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 2153 2154 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2155 cmd_channel_map(pa, sc, channel); 2156 } 2157 } 2158 2159 void 2160 cmd0643_9_chip_map(sc, pa) 2161 struct pciide_softc *sc; 2162 struct pci_attach_args *pa; 2163 { 2164 struct pciide_channel *cp; 2165 int channel; 2166 2167 /* 2168 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2169 * and base adresses registers can be disabled at 2170 * hardware level. In this case, the device is wired 2171 * in compat mode and its first channel is always enabled, 2172 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2173 * In fact, it seems that the first channel of the CMD PCI0640 2174 * can't be disabled. 2175 */ 2176 2177 #ifdef PCIIDE_CMD064x_DISABLE 2178 if (pciide_chipen(sc, pa) == 0) 2179 return; 2180 #endif 2181 printf("%s: bus-master DMA support present", 2182 sc->sc_wdcdev.sc_dev.dv_xname); 2183 pciide_mapreg_dma(sc, pa); 2184 printf("\n"); 2185 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2186 WDC_CAPABILITY_MODE; 2187 if (sc->sc_dma_ok) { 2188 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2189 switch (sc->sc_pp->ide_product) { 2190 case PCI_PRODUCT_CMDTECH_649: 2191 case PCI_PRODUCT_CMDTECH_648: 2192 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2193 sc->sc_wdcdev.UDMA_cap = 4; 2194 case PCI_PRODUCT_CMDTECH_646: 2195 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2196 break; 2197 default: 2198 sc->sc_wdcdev.irqack = pciide_irqack; 2199 } 2200 } 2201 2202 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2203 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2204 sc->sc_wdcdev.PIO_cap = 4; 2205 sc->sc_wdcdev.DMA_cap = 2; 2206 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 2207 2208 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 2209 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2210 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2211 DEBUG_PROBE); 2212 2213 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2214 cp = &sc->pciide_channels[channel]; 2215 cmd_channel_map(pa, sc, channel); 2216 if (cp->hw_ok == 0) 2217 continue; 2218 cmd0643_9_setup_channel(&cp->wdc_channel); 2219 } 2220 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 2221 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 2222 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2223 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2224 DEBUG_PROBE); 2225 } 2226 2227 void 2228 cmd0643_9_setup_channel(chp) 2229 struct channel_softc *chp; 2230 { 2231 struct ata_drive_datas *drvp; 2232 u_int8_t tim; 2233 u_int32_t idedma_ctl, udma_reg; 2234 int drive; 2235 struct pciide_channel *cp = (struct pciide_channel*)chp; 2236 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2237 2238 idedma_ctl = 0; 2239 /* setup DMA if needed */ 2240 pciide_channel_dma_setup(cp); 2241 2242 for (drive = 0; drive < 2; drive++) { 2243 drvp = &chp->ch_drive[drive]; 2244 /* If no drive, skip */ 2245 if ((drvp->drive_flags & DRIVE) == 0) 2246 continue; 2247 /* add timing values, setup DMA if needed */ 2248 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 2249 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 2250 if (drvp->drive_flags & DRIVE_UDMA) { 2251 /* UltraDMA on a 0648 or 0649 */ 2252 udma_reg = pciide_pci_read(sc->sc_pc, 2253 sc->sc_tag, CMD_UDMATIM(chp->channel)); 2254 if (drvp->UDMA_mode > 2 && 2255 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 2256 CMD_BICSR) & 2257 CMD_BICSR_80(chp->channel)) == 0) 2258 drvp->UDMA_mode = 2; 2259 if (drvp->UDMA_mode > 2) 2260 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 2261 else 2262 udma_reg |= CMD_UDMATIM_UDMA33(drive); 2263 udma_reg |= CMD_UDMATIM_UDMA(drive); 2264 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 2265 CMD_UDMATIM_TIM_OFF(drive)); 2266 udma_reg |= 2267 (cmd0648_9_tim_udma[drvp->UDMA_mode] << 2268 CMD_UDMATIM_TIM_OFF(drive)); 2269 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2270 CMD_UDMATIM(chp->channel), udma_reg); 2271 } else { 2272 /* 2273 * use Multiword DMA. 2274 * Timings will be used for both PIO and DMA, 2275 * so adjust DMA mode if needed 2276 * if we have a 0648/9, turn off UDMA 2277 */ 2278 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2279 udma_reg = pciide_pci_read(sc->sc_pc, 2280 sc->sc_tag, 2281 CMD_UDMATIM(chp->channel)); 2282 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 2283 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2284 CMD_UDMATIM(chp->channel), 2285 udma_reg); 2286 } 2287 if (drvp->PIO_mode >= 3 && 2288 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 2289 drvp->DMA_mode = drvp->PIO_mode - 2; 2290 } 2291 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 2292 } 2293 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2294 } 2295 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2296 CMD_DATA_TIM(chp->channel, drive), tim); 2297 } 2298 if (idedma_ctl != 0) { 2299 /* Add software bits in status register */ 2300 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2301 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2302 idedma_ctl); 2303 } 2304 pciide_print_modes(cp); 2305 } 2306 2307 void 2308 cmd646_9_irqack(chp) 2309 struct channel_softc *chp; 2310 { 2311 u_int32_t priirq, secirq; 2312 struct pciide_channel *cp = (struct pciide_channel*)chp; 2313 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2314 2315 if (chp->channel == 0) { 2316 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2317 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 2318 } else { 2319 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2320 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 2321 } 2322 pciide_irqack(chp); 2323 } 2324 2325 void 2326 cy693_chip_map(sc, pa) 2327 struct pciide_softc *sc; 2328 struct pci_attach_args *pa; 2329 { 2330 struct pciide_channel *cp; 2331 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2332 bus_size_t cmdsize, ctlsize; 2333 2334 if (pciide_chipen(sc, pa) == 0) 2335 return; 2336 /* 2337 * this chip has 2 PCI IDE functions, one for primary and one for 2338 * secondary. So we need to call pciide_mapregs_compat() with 2339 * the real channel 2340 */ 2341 if (pa->pa_function == 1) { 2342 sc->sc_cy_compatchan = 0; 2343 } else if (pa->pa_function == 2) { 2344 sc->sc_cy_compatchan = 1; 2345 } else { 2346 printf("%s: unexpected PCI function %d\n", 2347 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 2348 return; 2349 } 2350 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2351 printf("%s: bus-master DMA support present", 2352 sc->sc_wdcdev.sc_dev.dv_xname); 2353 pciide_mapreg_dma(sc, pa); 2354 } else { 2355 printf("%s: hardware does not support DMA", 2356 sc->sc_wdcdev.sc_dev.dv_xname); 2357 sc->sc_dma_ok = 0; 2358 } 2359 printf("\n"); 2360 2361 sc->sc_cy_handle = cy82c693_init(pa->pa_iot); 2362 if (sc->sc_cy_handle == NULL) { 2363 printf("%s: unable to map hyperCache control registers\n", 2364 sc->sc_wdcdev.sc_dev.dv_xname); 2365 sc->sc_dma_ok = 0; 2366 } 2367 2368 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2369 WDC_CAPABILITY_MODE; 2370 if (sc->sc_dma_ok) { 2371 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2372 sc->sc_wdcdev.irqack = pciide_irqack; 2373 } 2374 sc->sc_wdcdev.PIO_cap = 4; 2375 sc->sc_wdcdev.DMA_cap = 2; 2376 sc->sc_wdcdev.set_modes = cy693_setup_channel; 2377 2378 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2379 sc->sc_wdcdev.nchannels = 1; 2380 2381 /* Only one channel for this chip; if we are here it's enabled */ 2382 cp = &sc->pciide_channels[0]; 2383 sc->wdc_chanarray[0] = &cp->wdc_channel; 2384 cp->name = PCIIDE_CHANNEL_NAME(0); 2385 cp->wdc_channel.channel = 0; 2386 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2387 cp->wdc_channel.ch_queue = 2388 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2389 if (cp->wdc_channel.ch_queue == NULL) { 2390 printf("%s primary channel: " 2391 "can't allocate memory for command queue", 2392 sc->sc_wdcdev.sc_dev.dv_xname); 2393 return; 2394 } 2395 printf("%s: primary channel %s to ", 2396 sc->sc_wdcdev.sc_dev.dv_xname, 2397 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 2398 "configured" : "wired"); 2399 if (interface & PCIIDE_INTERFACE_PCI(0)) { 2400 printf("native-PCI"); 2401 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 2402 pciide_pci_intr); 2403 } else { 2404 printf("compatibility"); 2405 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan, 2406 &cmdsize, &ctlsize); 2407 } 2408 printf(" mode\n"); 2409 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2410 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2411 wdcattach(&cp->wdc_channel); 2412 if (pciide_chan_candisable(cp)) { 2413 pci_conf_write(sc->sc_pc, sc->sc_tag, 2414 PCI_COMMAND_STATUS_REG, 0); 2415 } 2416 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface); 2417 if (cp->hw_ok == 0) 2418 return; 2419 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 2420 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE); 2421 cy693_setup_channel(&cp->wdc_channel); 2422 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 2423 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 2424 } 2425 2426 void 2427 cy693_setup_channel(chp) 2428 struct channel_softc *chp; 2429 { 2430 struct ata_drive_datas *drvp; 2431 int drive; 2432 u_int32_t cy_cmd_ctrl; 2433 u_int32_t idedma_ctl; 2434 struct pciide_channel *cp = (struct pciide_channel*)chp; 2435 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2436 int dma_mode = -1; 2437 2438 cy_cmd_ctrl = idedma_ctl = 0; 2439 2440 /* setup DMA if needed */ 2441 pciide_channel_dma_setup(cp); 2442 2443 for (drive = 0; drive < 2; drive++) { 2444 drvp = &chp->ch_drive[drive]; 2445 /* If no drive, skip */ 2446 if ((drvp->drive_flags & DRIVE) == 0) 2447 continue; 2448 /* add timing values, setup DMA if needed */ 2449 if (drvp->drive_flags & DRIVE_DMA) { 2450 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2451 /* use Multiword DMA */ 2452 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 2453 dma_mode = drvp->DMA_mode; 2454 } 2455 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 2456 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 2457 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 2458 CY_CMD_CTRL_IOW_REC_OFF(drive)); 2459 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 2460 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 2461 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 2462 CY_CMD_CTRL_IOR_REC_OFF(drive)); 2463 } 2464 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 2465 chp->ch_drive[0].DMA_mode = dma_mode; 2466 chp->ch_drive[1].DMA_mode = dma_mode; 2467 2468 if (dma_mode == -1) 2469 dma_mode = 0; 2470 2471 if (sc->sc_cy_handle != NULL) { 2472 /* Note: `multiple' is implied. */ 2473 cy82c693_write(sc->sc_cy_handle, 2474 (sc->sc_cy_compatchan == 0) ? 2475 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 2476 } 2477 2478 pciide_print_modes(cp); 2479 2480 if (idedma_ctl != 0) { 2481 /* Add software bits in status register */ 2482 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2483 IDEDMA_CTL, idedma_ctl); 2484 } 2485 } 2486 2487 void 2488 sis_chip_map(sc, pa) 2489 struct pciide_softc *sc; 2490 struct pci_attach_args *pa; 2491 { 2492 struct pciide_channel *cp; 2493 int channel; 2494 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 2495 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2496 pcireg_t rev = PCI_REVISION(pa->pa_class); 2497 bus_size_t cmdsize, ctlsize; 2498 2499 if (pciide_chipen(sc, pa) == 0) 2500 return; 2501 printf("%s: bus-master DMA support present", 2502 sc->sc_wdcdev.sc_dev.dv_xname); 2503 pciide_mapreg_dma(sc, pa); 2504 printf("\n"); 2505 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2506 WDC_CAPABILITY_MODE; 2507 if (sc->sc_dma_ok) { 2508 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2509 sc->sc_wdcdev.irqack = pciide_irqack; 2510 if (rev >= 0xd0) 2511 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2512 } 2513 2514 sc->sc_wdcdev.PIO_cap = 4; 2515 sc->sc_wdcdev.DMA_cap = 2; 2516 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) 2517 sc->sc_wdcdev.UDMA_cap = 2; 2518 sc->sc_wdcdev.set_modes = sis_setup_channel; 2519 2520 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2521 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2522 2523 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 2524 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 2525 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE); 2526 2527 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2528 cp = &sc->pciide_channels[channel]; 2529 if (pciide_chansetup(sc, channel, interface) == 0) 2530 continue; 2531 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 2532 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 2533 printf("%s: %s channel ignored (disabled)\n", 2534 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2535 continue; 2536 } 2537 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2538 pciide_pci_intr); 2539 if (cp->hw_ok == 0) 2540 continue; 2541 if (pciide_chan_candisable(cp)) { 2542 if (channel == 0) 2543 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 2544 else 2545 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 2546 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 2547 sis_ctr0); 2548 } 2549 pciide_map_compat_intr(pa, cp, channel, interface); 2550 if (cp->hw_ok == 0) 2551 continue; 2552 sis_setup_channel(&cp->wdc_channel); 2553 } 2554 } 2555 2556 void 2557 sis_setup_channel(chp) 2558 struct channel_softc *chp; 2559 { 2560 struct ata_drive_datas *drvp; 2561 int drive; 2562 u_int32_t sis_tim; 2563 u_int32_t idedma_ctl; 2564 struct pciide_channel *cp = (struct pciide_channel*)chp; 2565 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2566 2567 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 2568 "channel %d 0x%x\n", chp->channel, 2569 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 2570 DEBUG_PROBE); 2571 sis_tim = 0; 2572 idedma_ctl = 0; 2573 /* setup DMA if needed */ 2574 pciide_channel_dma_setup(cp); 2575 2576 for (drive = 0; drive < 2; drive++) { 2577 drvp = &chp->ch_drive[drive]; 2578 /* If no drive, skip */ 2579 if ((drvp->drive_flags & DRIVE) == 0) 2580 continue; 2581 /* add timing values, setup DMA if needed */ 2582 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 2583 (drvp->drive_flags & DRIVE_UDMA) == 0) 2584 goto pio; 2585 2586 if (drvp->drive_flags & DRIVE_UDMA) { 2587 /* use Ultra/DMA */ 2588 drvp->drive_flags &= ~DRIVE_DMA; 2589 sis_tim |= sis_udma_tim[drvp->UDMA_mode] << 2590 SIS_TIM_UDMA_TIME_OFF(drive); 2591 sis_tim |= SIS_TIM_UDMA_EN(drive); 2592 } else { 2593 /* 2594 * use Multiword DMA 2595 * Timings will be used for both PIO and DMA, 2596 * so adjust DMA mode if needed 2597 */ 2598 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 2599 drvp->PIO_mode = drvp->DMA_mode + 2; 2600 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 2601 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 2602 drvp->PIO_mode - 2 : 0; 2603 if (drvp->DMA_mode == 0) 2604 drvp->PIO_mode = 0; 2605 } 2606 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2607 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] << 2608 SIS_TIM_ACT_OFF(drive); 2609 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 2610 SIS_TIM_REC_OFF(drive); 2611 } 2612 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 2613 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 2614 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 2615 if (idedma_ctl != 0) { 2616 /* Add software bits in status register */ 2617 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2618 IDEDMA_CTL, idedma_ctl); 2619 } 2620 pciide_print_modes(cp); 2621 } 2622 2623 void 2624 acer_chip_map(sc, pa) 2625 struct pciide_softc *sc; 2626 struct pci_attach_args *pa; 2627 { 2628 struct pciide_channel *cp; 2629 int channel; 2630 pcireg_t cr, interface; 2631 bus_size_t cmdsize, ctlsize; 2632 2633 if (pciide_chipen(sc, pa) == 0) 2634 return; 2635 printf("%s: bus-master DMA support present", 2636 sc->sc_wdcdev.sc_dev.dv_xname); 2637 pciide_mapreg_dma(sc, pa); 2638 printf("\n"); 2639 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2640 WDC_CAPABILITY_MODE; 2641 if (sc->sc_dma_ok) { 2642 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2643 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2644 sc->sc_wdcdev.irqack = pciide_irqack; 2645 } 2646 2647 sc->sc_wdcdev.PIO_cap = 4; 2648 sc->sc_wdcdev.DMA_cap = 2; 2649 sc->sc_wdcdev.UDMA_cap = 2; 2650 sc->sc_wdcdev.set_modes = acer_setup_channel; 2651 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2652 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2653 2654 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 2655 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 2656 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 2657 2658 /* Enable "microsoft register bits" R/W. */ 2659 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 2660 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 2661 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 2662 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 2663 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 2664 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 2665 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 2666 ~ACER_CHANSTATUSREGS_RO); 2667 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 2668 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 2669 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 2670 /* Don't use cr, re-read the real register content instead */ 2671 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 2672 PCI_CLASS_REG)); 2673 2674 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2675 cp = &sc->pciide_channels[channel]; 2676 if (pciide_chansetup(sc, channel, interface) == 0) 2677 continue; 2678 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 2679 printf("%s: %s channel ignored (disabled)\n", 2680 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2681 continue; 2682 } 2683 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2684 acer_pci_intr); 2685 if (cp->hw_ok == 0) 2686 continue; 2687 if (pciide_chan_candisable(cp)) { 2688 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 2689 pci_conf_write(sc->sc_pc, sc->sc_tag, 2690 PCI_CLASS_REG, cr); 2691 } 2692 pciide_map_compat_intr(pa, cp, channel, interface); 2693 acer_setup_channel(&cp->wdc_channel); 2694 } 2695 } 2696 2697 void 2698 acer_setup_channel(chp) 2699 struct channel_softc *chp; 2700 { 2701 struct ata_drive_datas *drvp; 2702 int drive; 2703 u_int32_t acer_fifo_udma; 2704 u_int32_t idedma_ctl; 2705 struct pciide_channel *cp = (struct pciide_channel*)chp; 2706 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2707 2708 idedma_ctl = 0; 2709 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 2710 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 2711 acer_fifo_udma), DEBUG_PROBE); 2712 /* setup DMA if needed */ 2713 pciide_channel_dma_setup(cp); 2714 2715 for (drive = 0; drive < 2; drive++) { 2716 drvp = &chp->ch_drive[drive]; 2717 /* If no drive, skip */ 2718 if ((drvp->drive_flags & DRIVE) == 0) 2719 continue; 2720 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 2721 "channel %d drive %d 0x%x\n", chp->channel, drive, 2722 pciide_pci_read(sc->sc_pc, sc->sc_tag, 2723 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 2724 /* clear FIFO/DMA mode */ 2725 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 2726 ACER_UDMA_EN(chp->channel, drive) | 2727 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 2728 2729 /* add timing values, setup DMA if needed */ 2730 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 2731 (drvp->drive_flags & DRIVE_UDMA) == 0) { 2732 acer_fifo_udma |= 2733 ACER_FTH_OPL(chp->channel, drive, 0x1); 2734 goto pio; 2735 } 2736 2737 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 2738 if (drvp->drive_flags & DRIVE_UDMA) { 2739 /* use Ultra/DMA */ 2740 drvp->drive_flags &= ~DRIVE_DMA; 2741 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 2742 acer_fifo_udma |= 2743 ACER_UDMA_TIM(chp->channel, drive, 2744 acer_udma[drvp->UDMA_mode]); 2745 } else { 2746 /* 2747 * use Multiword DMA 2748 * Timings will be used for both PIO and DMA, 2749 * so adjust DMA mode if needed 2750 */ 2751 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 2752 drvp->PIO_mode = drvp->DMA_mode + 2; 2753 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 2754 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 2755 drvp->PIO_mode - 2 : 0; 2756 if (drvp->DMA_mode == 0) 2757 drvp->PIO_mode = 0; 2758 } 2759 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2760 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 2761 ACER_IDETIM(chp->channel, drive), 2762 acer_pio[drvp->PIO_mode]); 2763 } 2764 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 2765 acer_fifo_udma), DEBUG_PROBE); 2766 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 2767 if (idedma_ctl != 0) { 2768 /* Add software bits in status register */ 2769 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2770 IDEDMA_CTL, idedma_ctl); 2771 } 2772 pciide_print_modes(cp); 2773 } 2774 2775 int 2776 acer_pci_intr(arg) 2777 void *arg; 2778 { 2779 struct pciide_softc *sc = arg; 2780 struct pciide_channel *cp; 2781 struct channel_softc *wdc_cp; 2782 int i, rv, crv; 2783 u_int32_t chids; 2784 2785 rv = 0; 2786 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 2787 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2788 cp = &sc->pciide_channels[i]; 2789 wdc_cp = &cp->wdc_channel; 2790 /* If a compat channel skip. */ 2791 if (cp->compat) 2792 continue; 2793 if (chids & ACER_CHIDS_INT(i)) { 2794 crv = wdcintr(wdc_cp); 2795 if (crv == 0) 2796 printf("%s:%d: bogus intr\n", 2797 sc->sc_wdcdev.sc_dev.dv_xname, i); 2798 else 2799 rv = 1; 2800 } 2801 } 2802 return rv; 2803 } 2804 2805 void 2806 hpt_chip_map(sc, pa) 2807 struct pciide_softc *sc; 2808 struct pci_attach_args *pa; 2809 { 2810 struct pciide_channel *cp; 2811 int i, compatchan, revision; 2812 pcireg_t interface; 2813 bus_size_t cmdsize, ctlsize; 2814 2815 if (pciide_chipen(sc, pa) == 0) 2816 return; 2817 revision = PCI_REVISION(pa->pa_class); 2818 2819 /* 2820 * when the chip is in native mode it identifies itself as a 2821 * 'misc mass storage'. Fake interface in this case. 2822 */ 2823 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 2824 interface = PCI_INTERFACE(pa->pa_class); 2825 } else { 2826 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2827 PCIIDE_INTERFACE_PCI(0); 2828 if (revision == HPT370_REV) 2829 interface |= PCIIDE_INTERFACE_PCI(1); 2830 } 2831 2832 printf("%s: bus-master DMA support present", 2833 sc->sc_wdcdev.sc_dev.dv_xname); 2834 pciide_mapreg_dma(sc, pa); 2835 printf("\n"); 2836 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2837 WDC_CAPABILITY_MODE; 2838 if (sc->sc_dma_ok) { 2839 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2840 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2841 sc->sc_wdcdev.irqack = pciide_irqack; 2842 } 2843 sc->sc_wdcdev.PIO_cap = 4; 2844 sc->sc_wdcdev.DMA_cap = 2; 2845 sc->sc_wdcdev.UDMA_cap = 4; 2846 2847 sc->sc_wdcdev.set_modes = hpt_setup_channel; 2848 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2849 if (revision == HPT366_REV) { 2850 /* 2851 * The 366 has 2 PCI IDE functions, one for primary and one 2852 * for secondary. So we need to call pciide_mapregs_compat() 2853 * with the real channel 2854 */ 2855 if (pa->pa_function == 0) { 2856 compatchan = 0; 2857 } else if (pa->pa_function == 1) { 2858 compatchan = 1; 2859 } else { 2860 printf("%s: unexpected PCI function %d\n", 2861 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 2862 return; 2863 } 2864 sc->sc_wdcdev.nchannels = 1; 2865 } else { 2866 sc->sc_wdcdev.nchannels = 2; 2867 } 2868 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2869 cp = &sc->pciide_channels[i]; 2870 if (sc->sc_wdcdev.nchannels > 1) { 2871 compatchan = i; 2872 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 2873 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 2874 printf("%s: %s channel ignored (disabled)\n", 2875 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2876 continue; 2877 } 2878 } 2879 if (pciide_chansetup(sc, i, interface) == 0) 2880 continue; 2881 if (interface & PCIIDE_INTERFACE_PCI(i)) { 2882 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 2883 &ctlsize, hpt_pci_intr); 2884 } else { 2885 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 2886 &cmdsize, &ctlsize); 2887 } 2888 if (cp->hw_ok == 0) 2889 return; 2890 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2891 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2892 wdcattach(&cp->wdc_channel); 2893 hpt_setup_channel(&cp->wdc_channel); 2894 } 2895 2896 return; 2897 } 2898 2899 2900 void 2901 hpt_setup_channel(chp) 2902 struct channel_softc *chp; 2903 { 2904 struct ata_drive_datas *drvp; 2905 int drive; 2906 int cable; 2907 u_int32_t before, after; 2908 u_int32_t idedma_ctl; 2909 struct pciide_channel *cp = (struct pciide_channel*)chp; 2910 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2911 2912 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 2913 2914 /* setup DMA if needed */ 2915 pciide_channel_dma_setup(cp); 2916 2917 idedma_ctl = 0; 2918 2919 /* Per drive settings */ 2920 for (drive = 0; drive < 2; drive++) { 2921 drvp = &chp->ch_drive[drive]; 2922 /* If no drive, skip */ 2923 if ((drvp->drive_flags & DRIVE) == 0) 2924 continue; 2925 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 2926 HPT_IDETIM(chp->channel, drive)); 2927 2928 /* add timing values, setup DMA if needed */ 2929 if (drvp->drive_flags & DRIVE_UDMA) { 2930 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 2931 drvp->UDMA_mode > 2) 2932 drvp->UDMA_mode = 2; 2933 after = (sc->sc_wdcdev.nchannels == 2) ? 2934 hpt370_udma[drvp->UDMA_mode] : 2935 hpt366_udma[drvp->UDMA_mode]; 2936 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2937 } else if (drvp->drive_flags & DRIVE_DMA) { 2938 /* 2939 * use Multiword DMA. 2940 * Timings will be used for both PIO and DMA, so adjust 2941 * DMA mode if needed 2942 */ 2943 if (drvp->PIO_mode >= 3 && 2944 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 2945 drvp->DMA_mode = drvp->PIO_mode - 2; 2946 } 2947 after = (sc->sc_wdcdev.nchannels == 2) ? 2948 hpt370_dma[drvp->DMA_mode] : 2949 hpt366_dma[drvp->DMA_mode]; 2950 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2951 } else { 2952 /* PIO only */ 2953 after = (sc->sc_wdcdev.nchannels == 2) ? 2954 hpt370_pio[drvp->PIO_mode] : 2955 hpt366_pio[drvp->PIO_mode]; 2956 } 2957 pci_conf_write(sc->sc_pc, sc->sc_tag, 2958 HPT_IDETIM(chp->channel, drive), after); 2959 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 2960 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname, 2961 after, before), DEBUG_PROBE); 2962 } 2963 if (idedma_ctl != 0) { 2964 /* Add software bits in status register */ 2965 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2966 IDEDMA_CTL, idedma_ctl); 2967 } 2968 pciide_print_modes(cp); 2969 } 2970 2971 int 2972 hpt_pci_intr(arg) 2973 void *arg; 2974 { 2975 struct pciide_softc *sc = arg; 2976 struct pciide_channel *cp; 2977 struct channel_softc *wdc_cp; 2978 int rv = 0; 2979 int dmastat, i, crv; 2980 2981 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2982 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2983 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 2984 if((dmastat & IDEDMA_CTL_INTR) == 0) 2985 continue; 2986 cp = &sc->pciide_channels[i]; 2987 wdc_cp = &cp->wdc_channel; 2988 crv = wdcintr(wdc_cp); 2989 if (crv == 0) { 2990 printf("%s:%d: bogus intr\n", 2991 sc->sc_wdcdev.sc_dev.dv_xname, i); 2992 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2993 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 2994 } else 2995 rv = 1; 2996 } 2997 return rv; 2998 } 2999 3000 3001 /* A macro to test product */ 3002 #define PDC_IS_262(sc) (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66) 3003 3004 void 3005 pdc202xx_chip_map(sc, pa) 3006 struct pciide_softc *sc; 3007 struct pci_attach_args *pa; 3008 { 3009 struct pciide_channel *cp; 3010 int channel; 3011 pcireg_t interface, st, mode; 3012 bus_size_t cmdsize, ctlsize; 3013 3014 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3015 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st), 3016 DEBUG_PROBE); 3017 if (pciide_chipen(sc, pa) == 0) 3018 return; 3019 3020 /* turn off RAID mode */ 3021 st &= ~PDC2xx_STATE_IDERAID; 3022 3023 /* 3024 * can't rely on the PCI_CLASS_REG content if the chip was in raid 3025 * mode. We have to fake interface 3026 */ 3027 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 3028 if (st & PDC2xx_STATE_NATIVE) 3029 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3030 3031 printf("%s: bus-master DMA support present", 3032 sc->sc_wdcdev.sc_dev.dv_xname); 3033 pciide_mapreg_dma(sc, pa); 3034 printf("\n"); 3035 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3036 WDC_CAPABILITY_MODE; 3037 if (sc->sc_dma_ok) { 3038 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3039 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3040 sc->sc_wdcdev.irqack = pciide_irqack; 3041 } 3042 sc->sc_wdcdev.PIO_cap = 4; 3043 sc->sc_wdcdev.DMA_cap = 2; 3044 if (PDC_IS_262(sc)) 3045 sc->sc_wdcdev.UDMA_cap = 4; 3046 else 3047 sc->sc_wdcdev.UDMA_cap = 2; 3048 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel; 3049 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3050 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3051 3052 /* setup failsafe defaults */ 3053 mode = 0; 3054 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 3055 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 3056 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 3057 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 3058 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3059 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 " 3060 "initial timings 0x%x, now 0x%x\n", channel, 3061 pci_conf_read(sc->sc_pc, sc->sc_tag, 3062 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 3063 DEBUG_PROBE); 3064 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0), 3065 mode | PDC2xx_TIM_IORDYp); 3066 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 " 3067 "initial timings 0x%x, now 0x%x\n", channel, 3068 pci_conf_read(sc->sc_pc, sc->sc_tag, 3069 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 3070 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1), 3071 mode); 3072 } 3073 3074 mode = PDC2xx_SCR_DMA; 3075 if (PDC_IS_262(sc)) { 3076 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 3077 } else { 3078 /* the BIOS set it up this way */ 3079 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 3080 } 3081 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 3082 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 3083 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n", 3084 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode), 3085 DEBUG_PROBE); 3086 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode); 3087 3088 /* controller initial state register is OK even without BIOS */ 3089 /* Set DMA mode to IDE DMA compatibility */ 3090 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 3091 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ), 3092 DEBUG_PROBE); 3093 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 3094 mode | 0x1); 3095 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 3096 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 3097 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 3098 mode | 0x1); 3099 3100 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3101 cp = &sc->pciide_channels[channel]; 3102 if (pciide_chansetup(sc, channel, interface) == 0) 3103 continue; 3104 if ((st & (PDC_IS_262(sc) ? 3105 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 3106 printf("%s: %s channel ignored (disabled)\n", 3107 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3108 continue; 3109 } 3110 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3111 pdc202xx_pci_intr); 3112 if (cp->hw_ok == 0) 3113 continue; 3114 if (pciide_chan_candisable(cp)) 3115 st &= ~(PDC_IS_262(sc) ? 3116 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 3117 pciide_map_compat_intr(pa, cp, channel, interface); 3118 pdc202xx_setup_channel(&cp->wdc_channel); 3119 } 3120 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st), 3121 DEBUG_PROBE); 3122 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 3123 return; 3124 } 3125 3126 void 3127 pdc202xx_setup_channel(chp) 3128 struct channel_softc *chp; 3129 { 3130 struct ata_drive_datas *drvp; 3131 int drive; 3132 pcireg_t mode, st; 3133 u_int32_t idedma_ctl, scr, atapi; 3134 struct pciide_channel *cp = (struct pciide_channel*)chp; 3135 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3136 int channel = chp->channel; 3137 3138 /* setup DMA if needed */ 3139 pciide_channel_dma_setup(cp); 3140 3141 idedma_ctl = 0; 3142 3143 /* Per channel settings */ 3144 if (PDC_IS_262(sc)) { 3145 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3146 PDC262_U66); 3147 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3148 /* Trimm UDMA mode */ 3149 if ((st & PDC262_STATE_80P(channel)) != 0 || 3150 (chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3151 chp->ch_drive[0].UDMA_mode <= 2) || 3152 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3153 chp->ch_drive[1].UDMA_mode <= 2)) { 3154 if (chp->ch_drive[0].UDMA_mode > 2) 3155 chp->ch_drive[0].UDMA_mode = 2; 3156 if (chp->ch_drive[1].UDMA_mode > 2) 3157 chp->ch_drive[1].UDMA_mode = 2; 3158 } 3159 /* Set U66 if needed */ 3160 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3161 chp->ch_drive[0].UDMA_mode > 2) || 3162 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3163 chp->ch_drive[1].UDMA_mode > 2)) 3164 scr |= PDC262_U66_EN(channel); 3165 else 3166 scr &= ~PDC262_U66_EN(channel); 3167 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3168 PDC262_U66, scr); 3169 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 3170 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 3171 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3172 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3173 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 3174 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3175 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3176 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 3177 atapi = 0; 3178 else 3179 atapi = PDC262_ATAPI_UDMA; 3180 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3181 PDC262_ATAPI(channel), atapi); 3182 } 3183 } 3184 for (drive = 0; drive < 2; drive++) { 3185 drvp = &chp->ch_drive[drive]; 3186 /* If no drive, skip */ 3187 if ((drvp->drive_flags & DRIVE) == 0) 3188 continue; 3189 mode = 0; 3190 if (drvp->drive_flags & DRIVE_UDMA) { 3191 mode = PDC2xx_TIM_SET_MB(mode, 3192 pdc2xx_udma_mb[drvp->UDMA_mode]); 3193 mode = PDC2xx_TIM_SET_MC(mode, 3194 pdc2xx_udma_mc[drvp->UDMA_mode]); 3195 drvp->drive_flags &= ~DRIVE_DMA; 3196 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3197 } else if (drvp->drive_flags & DRIVE_DMA) { 3198 mode = PDC2xx_TIM_SET_MB(mode, 3199 pdc2xx_dma_mb[drvp->DMA_mode]); 3200 mode = PDC2xx_TIM_SET_MC(mode, 3201 pdc2xx_dma_mc[drvp->DMA_mode]); 3202 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3203 } else { 3204 mode = PDC2xx_TIM_SET_MB(mode, 3205 pdc2xx_dma_mb[0]); 3206 mode = PDC2xx_TIM_SET_MC(mode, 3207 pdc2xx_dma_mc[0]); 3208 } 3209 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 3210 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 3211 if (drvp->drive_flags & DRIVE_ATA) 3212 mode |= PDC2xx_TIM_PRE; 3213 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 3214 if (drvp->PIO_mode >= 3) { 3215 mode |= PDC2xx_TIM_IORDY; 3216 if (drive == 0) 3217 mode |= PDC2xx_TIM_IORDYp; 3218 } 3219 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 3220 "timings 0x%x\n", 3221 sc->sc_wdcdev.sc_dev.dv_xname, 3222 chp->channel, drive, mode), DEBUG_PROBE); 3223 pci_conf_write(sc->sc_pc, sc->sc_tag, 3224 PDC2xx_TIM(chp->channel, drive), mode); 3225 } 3226 if (idedma_ctl != 0) { 3227 /* Add software bits in status register */ 3228 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3229 IDEDMA_CTL, idedma_ctl); 3230 } 3231 pciide_print_modes(cp); 3232 } 3233 3234 int 3235 pdc202xx_pci_intr(arg) 3236 void *arg; 3237 { 3238 struct pciide_softc *sc = arg; 3239 struct pciide_channel *cp; 3240 struct channel_softc *wdc_cp; 3241 int i, rv, crv; 3242 u_int32_t scr; 3243 3244 rv = 0; 3245 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 3246 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3247 cp = &sc->pciide_channels[i]; 3248 wdc_cp = &cp->wdc_channel; 3249 /* If a compat channel skip. */ 3250 if (cp->compat) 3251 continue; 3252 if (scr & PDC2xx_SCR_INT(i)) { 3253 crv = wdcintr(wdc_cp); 3254 if (crv == 0) 3255 printf("%s:%d: bogus intr\n", 3256 sc->sc_wdcdev.sc_dev.dv_xname, i); 3257 else 3258 rv = 1; 3259 } 3260 } 3261 return rv; 3262 } 3263 3264 void 3265 opti_chip_map(sc, pa) 3266 struct pciide_softc *sc; 3267 struct pci_attach_args *pa; 3268 { 3269 struct pciide_channel *cp; 3270 bus_size_t cmdsize, ctlsize; 3271 pcireg_t interface; 3272 u_int8_t init_ctrl; 3273 int channel; 3274 3275 if (pciide_chipen(sc, pa) == 0) 3276 return; 3277 printf("%s: bus-master DMA support present", 3278 sc->sc_wdcdev.sc_dev.dv_xname); 3279 pciide_mapreg_dma(sc, pa); 3280 printf("\n"); 3281 3282 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3283 WDC_CAPABILITY_MODE; 3284 sc->sc_wdcdev.PIO_cap = 4; 3285 if (sc->sc_dma_ok) { 3286 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3287 sc->sc_wdcdev.irqack = pciide_irqack; 3288 sc->sc_wdcdev.DMA_cap = 2; 3289 } 3290 sc->sc_wdcdev.set_modes = opti_setup_channel; 3291 3292 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3293 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3294 3295 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 3296 OPTI_REG_INIT_CONTROL); 3297 3298 interface = PCI_INTERFACE(pa->pa_class); 3299 3300 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3301 cp = &sc->pciide_channels[channel]; 3302 if (pciide_chansetup(sc, channel, interface) == 0) 3303 continue; 3304 if (channel == 1 && 3305 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 3306 printf("%s: %s channel ignored (disabled)\n", 3307 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3308 continue; 3309 } 3310 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3311 pciide_pci_intr); 3312 if (cp->hw_ok == 0) 3313 continue; 3314 pciide_map_compat_intr(pa, cp, channel, interface); 3315 if (cp->hw_ok == 0) 3316 continue; 3317 opti_setup_channel(&cp->wdc_channel); 3318 } 3319 } 3320 3321 void 3322 opti_setup_channel(chp) 3323 struct channel_softc *chp; 3324 { 3325 struct ata_drive_datas *drvp; 3326 struct pciide_channel *cp = (struct pciide_channel*)chp; 3327 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3328 int drive, spd; 3329 int mode[2]; 3330 u_int8_t rv, mr; 3331 3332 /* 3333 * The `Delay' and `Address Setup Time' fields of the 3334 * Miscellaneous Register are always zero initially. 3335 */ 3336 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 3337 mr &= ~(OPTI_MISC_DELAY_MASK | 3338 OPTI_MISC_ADDR_SETUP_MASK | 3339 OPTI_MISC_INDEX_MASK); 3340 3341 /* Prime the control register before setting timing values */ 3342 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 3343 3344 /* Determine the clockrate of the PCIbus the chip is attached to */ 3345 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 3346 spd &= OPTI_STRAP_PCI_SPEED_MASK; 3347 3348 /* setup DMA if needed */ 3349 pciide_channel_dma_setup(cp); 3350 3351 for (drive = 0; drive < 2; drive++) { 3352 drvp = &chp->ch_drive[drive]; 3353 /* If no drive, skip */ 3354 if ((drvp->drive_flags & DRIVE) == 0) { 3355 mode[drive] = -1; 3356 continue; 3357 } 3358 3359 if ((drvp->drive_flags & DRIVE_DMA)) { 3360 /* 3361 * Timings will be used for both PIO and DMA, 3362 * so adjust DMA mode if needed 3363 */ 3364 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3365 drvp->PIO_mode = drvp->DMA_mode + 2; 3366 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3367 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3368 drvp->PIO_mode - 2 : 0; 3369 if (drvp->DMA_mode == 0) 3370 drvp->PIO_mode = 0; 3371 3372 mode[drive] = drvp->DMA_mode + 5; 3373 } else 3374 mode[drive] = drvp->PIO_mode; 3375 3376 if (drive && mode[0] >= 0 && 3377 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 3378 /* 3379 * Can't have two drives using different values 3380 * for `Address Setup Time'. 3381 * Slow down the faster drive to compensate. 3382 */ 3383 int d = (opti_tim_as[spd][mode[0]] > 3384 opti_tim_as[spd][mode[1]]) ? 0 : 1; 3385 3386 mode[d] = mode[1-d]; 3387 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 3388 chp->ch_drive[d].DMA_mode = 0; 3389 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 3390 } 3391 } 3392 3393 for (drive = 0; drive < 2; drive++) { 3394 int m; 3395 if ((m = mode[drive]) < 0) 3396 continue; 3397 3398 /* Set the Address Setup Time and select appropriate index */ 3399 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 3400 rv |= OPTI_MISC_INDEX(drive); 3401 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 3402 3403 /* Set the pulse width and recovery timing parameters */ 3404 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 3405 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 3406 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 3407 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 3408 3409 /* Set the Enhanced Mode register appropriately */ 3410 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 3411 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 3412 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 3413 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 3414 } 3415 3416 /* Finally, enable the timings */ 3417 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 3418 3419 pciide_print_modes(cp); 3420 } 3421