1 /* $NetBSD: pciide.c,v 1.96 2000/12/04 20:25:40 fvdl Exp $ */ 2 3 4 /* 5 * Copyright (c) 1999 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36 37 /* 38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by Christopher G. Demetriou 51 * for the NetBSD Project. 52 * 4. The name of the author may not be used to endorse or promote products 53 * derived from this software without specific prior written permission 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 */ 66 67 /* 68 * PCI IDE controller driver. 69 * 70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 71 * sys/dev/pci/ppb.c, revision 1.16). 72 * 73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 75 * 5/16/94" from the PCI SIG. 76 * 77 */ 78 79 #ifndef WDCDEBUG 80 #define WDCDEBUG 81 #endif 82 83 #define DEBUG_DMA 0x01 84 #define DEBUG_XFERS 0x02 85 #define DEBUG_FUNCS 0x08 86 #define DEBUG_PROBE 0x10 87 #ifdef WDCDEBUG 88 int wdcdebug_pciide_mask = 0; 89 #define WDCDEBUG_PRINT(args, level) \ 90 if (wdcdebug_pciide_mask & (level)) printf args 91 #else 92 #define WDCDEBUG_PRINT(args, level) 93 #endif 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 99 #include <uvm/uvm_extern.h> 100 101 #include <machine/endian.h> 102 103 #include <dev/pci/pcireg.h> 104 #include <dev/pci/pcivar.h> 105 #include <dev/pci/pcidevs.h> 106 #include <dev/pci/pciidereg.h> 107 #include <dev/pci/pciidevar.h> 108 #include <dev/pci/pciide_piix_reg.h> 109 #include <dev/pci/pciide_amd_reg.h> 110 #include <dev/pci/pciide_apollo_reg.h> 111 #include <dev/pci/pciide_cmd_reg.h> 112 #include <dev/pci/pciide_cy693_reg.h> 113 #include <dev/pci/pciide_sis_reg.h> 114 #include <dev/pci/pciide_acer_reg.h> 115 #include <dev/pci/pciide_pdc202xx_reg.h> 116 #include <dev/pci/pciide_opti_reg.h> 117 #include <dev/pci/pciide_hpt_reg.h> 118 #include <dev/pci/cy82c693var.h> 119 120 #include "opt_pciide.h" 121 122 /* inlines for reading/writing 8-bit PCI registers */ 123 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t, 124 int)); 125 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t, 126 int, u_int8_t)); 127 128 static __inline u_int8_t 129 pciide_pci_read(pc, pa, reg) 130 pci_chipset_tag_t pc; 131 pcitag_t pa; 132 int reg; 133 { 134 135 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 136 ((reg & 0x03) * 8) & 0xff); 137 } 138 139 static __inline void 140 pciide_pci_write(pc, pa, reg, val) 141 pci_chipset_tag_t pc; 142 pcitag_t pa; 143 int reg; 144 u_int8_t val; 145 { 146 pcireg_t pcival; 147 148 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 149 pcival &= ~(0xff << ((reg & 0x03) * 8)); 150 pcival |= (val << ((reg & 0x03) * 8)); 151 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 152 } 153 154 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 155 156 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 157 void piix_setup_channel __P((struct channel_softc*)); 158 void piix3_4_setup_channel __P((struct channel_softc*)); 159 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 160 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*)); 161 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t)); 162 163 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 164 void amd756_setup_channel __P((struct channel_softc*)); 165 166 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 167 void apollo_setup_channel __P((struct channel_softc*)); 168 169 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 170 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 171 void cmd0643_9_setup_channel __P((struct channel_softc*)); 172 void cmd_channel_map __P((struct pci_attach_args *, 173 struct pciide_softc *, int)); 174 int cmd_pci_intr __P((void *)); 175 void cmd646_9_irqack __P((struct channel_softc *)); 176 177 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 178 void cy693_setup_channel __P((struct channel_softc*)); 179 180 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 181 void sis_setup_channel __P((struct channel_softc*)); 182 183 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 184 void acer_setup_channel __P((struct channel_softc*)); 185 int acer_pci_intr __P((void *)); 186 187 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 188 void pdc202xx_setup_channel __P((struct channel_softc*)); 189 int pdc202xx_pci_intr __P((void *)); 190 191 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 192 void opti_setup_channel __P((struct channel_softc*)); 193 194 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*)); 195 void hpt_setup_channel __P((struct channel_softc*)); 196 int hpt_pci_intr __P((void *)); 197 198 void pciide_channel_dma_setup __P((struct pciide_channel *)); 199 int pciide_dma_table_setup __P((struct pciide_softc*, int, int)); 200 int pciide_dma_init __P((void*, int, int, void *, size_t, int)); 201 void pciide_dma_start __P((void*, int, int)); 202 int pciide_dma_finish __P((void*, int, int, int)); 203 void pciide_irqack __P((struct channel_softc *)); 204 void pciide_print_modes __P((struct pciide_channel *)); 205 206 struct pciide_product_desc { 207 u_int32_t ide_product; 208 int ide_flags; 209 const char *ide_name; 210 /* map and setup chip, probe drives */ 211 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*)); 212 }; 213 214 /* Flags for ide_flags */ 215 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 216 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 217 218 /* Default product description for devices not known from this controller */ 219 const struct pciide_product_desc default_product_desc = { 220 0, 221 0, 222 "Generic PCI IDE controller", 223 default_chip_map, 224 }; 225 226 const struct pciide_product_desc pciide_intel_products[] = { 227 { PCI_PRODUCT_INTEL_82092AA, 228 0, 229 "Intel 82092AA IDE controller", 230 default_chip_map, 231 }, 232 { PCI_PRODUCT_INTEL_82371FB_IDE, 233 0, 234 "Intel 82371FB IDE controller (PIIX)", 235 piix_chip_map, 236 }, 237 { PCI_PRODUCT_INTEL_82371SB_IDE, 238 0, 239 "Intel 82371SB IDE Interface (PIIX3)", 240 piix_chip_map, 241 }, 242 { PCI_PRODUCT_INTEL_82371AB_IDE, 243 0, 244 "Intel 82371AB IDE controller (PIIX4)", 245 piix_chip_map, 246 }, 247 { PCI_PRODUCT_INTEL_82440MX_IDE, 248 0, 249 "Intel 82440MX IDE controller", 250 piix_chip_map 251 }, 252 { PCI_PRODUCT_INTEL_82801AA_IDE, 253 0, 254 "Intel 82801AA IDE Controller (ICH)", 255 piix_chip_map, 256 }, 257 { PCI_PRODUCT_INTEL_82801AB_IDE, 258 0, 259 "Intel 82801AB IDE Controller (ICH0)", 260 piix_chip_map, 261 }, 262 { PCI_PRODUCT_INTEL_82801BA_IDE, 263 0, 264 "Intel 82801BA IDE Controller (ICH2)", 265 piix_chip_map, 266 }, 267 { 0, 268 0, 269 NULL, 270 } 271 }; 272 273 const struct pciide_product_desc pciide_amd_products[] = { 274 { PCI_PRODUCT_AMD_PBC756_IDE, 275 0, 276 "Advanced Micro Devices AMD756 IDE Controller", 277 amd756_chip_map 278 }, 279 { 0, 280 0, 281 NULL, 282 } 283 }; 284 285 const struct pciide_product_desc pciide_cmd_products[] = { 286 { PCI_PRODUCT_CMDTECH_640, 287 0, 288 "CMD Technology PCI0640", 289 cmd_chip_map 290 }, 291 { PCI_PRODUCT_CMDTECH_643, 292 0, 293 "CMD Technology PCI0643", 294 cmd0643_9_chip_map, 295 }, 296 { PCI_PRODUCT_CMDTECH_646, 297 0, 298 "CMD Technology PCI0646", 299 cmd0643_9_chip_map, 300 }, 301 { PCI_PRODUCT_CMDTECH_648, 302 IDE_PCI_CLASS_OVERRIDE, 303 "CMD Technology PCI0648", 304 cmd0643_9_chip_map, 305 }, 306 { PCI_PRODUCT_CMDTECH_649, 307 IDE_PCI_CLASS_OVERRIDE, 308 "CMD Technology PCI0649", 309 cmd0643_9_chip_map, 310 }, 311 { 0, 312 0, 313 NULL, 314 } 315 }; 316 317 const struct pciide_product_desc pciide_via_products[] = { 318 { PCI_PRODUCT_VIATECH_VT82C586_IDE, 319 0, 320 "VIA Tech VT82C586 IDE Controller", 321 apollo_chip_map, 322 }, 323 { PCI_PRODUCT_VIATECH_VT82C586A_IDE, 324 0, 325 "VIA Tech VT82C586A IDE Controller", 326 apollo_chip_map, 327 }, 328 { 0, 329 0, 330 NULL, 331 } 332 }; 333 334 const struct pciide_product_desc pciide_cypress_products[] = { 335 { PCI_PRODUCT_CONTAQ_82C693, 336 IDE_16BIT_IOSPACE, 337 "Cypress 82C693 IDE Controller", 338 cy693_chip_map, 339 }, 340 { 0, 341 0, 342 NULL, 343 } 344 }; 345 346 const struct pciide_product_desc pciide_sis_products[] = { 347 { PCI_PRODUCT_SIS_5597_IDE, 348 0, 349 "Silicon Integrated System 5597/5598 IDE controller", 350 sis_chip_map, 351 }, 352 { 0, 353 0, 354 NULL, 355 } 356 }; 357 358 const struct pciide_product_desc pciide_acer_products[] = { 359 { PCI_PRODUCT_ALI_M5229, 360 0, 361 "Acer Labs M5229 UDMA IDE Controller", 362 acer_chip_map, 363 }, 364 { 0, 365 0, 366 NULL, 367 } 368 }; 369 370 const struct pciide_product_desc pciide_promise_products[] = { 371 { PCI_PRODUCT_PROMISE_ULTRA33, 372 IDE_PCI_CLASS_OVERRIDE /* |IDE_16BIT_IOSPACE */, 373 "Promise Ultra33/ATA Bus Master IDE Accelerator", 374 pdc202xx_chip_map, 375 }, 376 { PCI_PRODUCT_PROMISE_ULTRA66, 377 IDE_PCI_CLASS_OVERRIDE|IDE_16BIT_IOSPACE, 378 "Promise Ultra66/ATA Bus Master IDE Accelerator", 379 pdc202xx_chip_map, 380 }, 381 { PCI_PRODUCT_PROMISE_ULTRA100, 382 IDE_PCI_CLASS_OVERRIDE|IDE_16BIT_IOSPACE, 383 "Promise Ultra100/ATA Bus Master IDE Accelerator", 384 pdc202xx_chip_map, 385 }, 386 { PCI_PRODUCT_PROMISE_ULTRA100X, 387 IDE_PCI_CLASS_OVERRIDE|IDE_16BIT_IOSPACE, 388 "Promise Ultra100/ATA Bus Master IDE Accelerator", 389 pdc202xx_chip_map, 390 }, 391 { 0, 392 0, 393 NULL, 394 } 395 }; 396 397 const struct pciide_product_desc pciide_opti_products[] = { 398 { PCI_PRODUCT_OPTI_82C621, 399 0, 400 "OPTi 82c621 PCI IDE controller", 401 opti_chip_map, 402 }, 403 { PCI_PRODUCT_OPTI_82C568, 404 0, 405 "OPTi 82c568 (82c621 compatible) PCI IDE controller", 406 opti_chip_map, 407 }, 408 { PCI_PRODUCT_OPTI_82D568, 409 0, 410 "OPTi 82d568 (82c621 compatible) PCI IDE controller", 411 opti_chip_map, 412 }, 413 { 0, 414 0, 415 NULL, 416 } 417 }; 418 419 const struct pciide_product_desc pciide_triones_products[] = { 420 { PCI_PRODUCT_TRIONES_HPT366, 421 IDE_PCI_CLASS_OVERRIDE, 422 "Triones/Highpoint HPT366/370 IDE Controller", 423 hpt_chip_map, 424 }, 425 { 0, 426 0, 427 NULL, 428 } 429 }; 430 431 struct pciide_vendor_desc { 432 u_int32_t ide_vendor; 433 const struct pciide_product_desc *ide_products; 434 }; 435 436 const struct pciide_vendor_desc pciide_vendors[] = { 437 { PCI_VENDOR_INTEL, pciide_intel_products }, 438 { PCI_VENDOR_CMDTECH, pciide_cmd_products }, 439 { PCI_VENDOR_VIATECH, pciide_via_products }, 440 { PCI_VENDOR_CONTAQ, pciide_cypress_products }, 441 { PCI_VENDOR_SIS, pciide_sis_products }, 442 { PCI_VENDOR_ALI, pciide_acer_products }, 443 { PCI_VENDOR_PROMISE, pciide_promise_products }, 444 { PCI_VENDOR_AMD, pciide_amd_products }, 445 { PCI_VENDOR_OPTI, pciide_opti_products }, 446 { PCI_VENDOR_TRIONES, pciide_triones_products }, 447 { 0, NULL } 448 }; 449 450 /* options passed via the 'flags' config keyword */ 451 #define PCIIDE_OPTIONS_DMA 0x01 452 453 int pciide_match __P((struct device *, struct cfdata *, void *)); 454 void pciide_attach __P((struct device *, struct device *, void *)); 455 456 struct cfattach pciide_ca = { 457 sizeof(struct pciide_softc), pciide_match, pciide_attach 458 }; 459 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *)); 460 int pciide_mapregs_compat __P(( struct pci_attach_args *, 461 struct pciide_channel *, int, bus_size_t *, bus_size_t*)); 462 int pciide_mapregs_native __P((struct pci_attach_args *, 463 struct pciide_channel *, bus_size_t *, bus_size_t *, 464 int (*pci_intr) __P((void *)))); 465 void pciide_mapreg_dma __P((struct pciide_softc *, 466 struct pci_attach_args *)); 467 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t)); 468 void pciide_mapchan __P((struct pci_attach_args *, 469 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *, 470 int (*pci_intr) __P((void *)))); 471 int pciide_chan_candisable __P((struct pciide_channel *)); 472 void pciide_map_compat_intr __P(( struct pci_attach_args *, 473 struct pciide_channel *, int, int)); 474 int pciide_print __P((void *, const char *pnp)); 475 int pciide_compat_intr __P((void *)); 476 int pciide_pci_intr __P((void *)); 477 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t)); 478 479 const struct pciide_product_desc * 480 pciide_lookup_product(id) 481 u_int32_t id; 482 { 483 const struct pciide_product_desc *pp; 484 const struct pciide_vendor_desc *vp; 485 486 for (vp = pciide_vendors; vp->ide_products != NULL; vp++) 487 if (PCI_VENDOR(id) == vp->ide_vendor) 488 break; 489 490 if ((pp = vp->ide_products) == NULL) 491 return NULL; 492 493 for (; pp->ide_name != NULL; pp++) 494 if (PCI_PRODUCT(id) == pp->ide_product) 495 break; 496 497 if (pp->ide_name == NULL) 498 return NULL; 499 return pp; 500 } 501 502 int 503 pciide_match(parent, match, aux) 504 struct device *parent; 505 struct cfdata *match; 506 void *aux; 507 { 508 struct pci_attach_args *pa = aux; 509 const struct pciide_product_desc *pp; 510 511 /* 512 * Check the ID register to see that it's a PCI IDE controller. 513 * If it is, we assume that we can deal with it; it _should_ 514 * work in a standardized way... 515 */ 516 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 517 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 518 return (1); 519 } 520 521 /* 522 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE 523 * controllers. Let see if we can deal with it anyway. 524 */ 525 pp = pciide_lookup_product(pa->pa_id); 526 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) { 527 return (1); 528 } 529 530 return (0); 531 } 532 533 void 534 pciide_attach(parent, self, aux) 535 struct device *parent, *self; 536 void *aux; 537 { 538 struct pci_attach_args *pa = aux; 539 pci_chipset_tag_t pc = pa->pa_pc; 540 pcitag_t tag = pa->pa_tag; 541 struct pciide_softc *sc = (struct pciide_softc *)self; 542 pcireg_t csr; 543 char devinfo[256]; 544 const char *displaydev; 545 546 sc->sc_pp = pciide_lookup_product(pa->pa_id); 547 if (sc->sc_pp == NULL) { 548 sc->sc_pp = &default_product_desc; 549 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo); 550 displaydev = devinfo; 551 } else 552 displaydev = sc->sc_pp->ide_name; 553 554 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class)); 555 556 sc->sc_pc = pa->pa_pc; 557 sc->sc_tag = pa->pa_tag; 558 #ifdef WDCDEBUG 559 if (wdcdebug_pciide_mask & DEBUG_PROBE) 560 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); 561 #endif 562 sc->sc_pp->chip_map(sc, pa); 563 564 if (sc->sc_dma_ok) { 565 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 566 csr |= PCI_COMMAND_MASTER_ENABLE; 567 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 568 } 569 WDCDEBUG_PRINT(("pciide: command/status register=%x\n", 570 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 571 } 572 573 /* tell wether the chip is enabled or not */ 574 int 575 pciide_chipen(sc, pa) 576 struct pciide_softc *sc; 577 struct pci_attach_args *pa; 578 { 579 pcireg_t csr; 580 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) { 581 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 582 PCI_COMMAND_STATUS_REG); 583 printf("%s: device disabled (at %s)\n", 584 sc->sc_wdcdev.sc_dev.dv_xname, 585 (csr & PCI_COMMAND_IO_ENABLE) == 0 ? 586 "device" : "bridge"); 587 return 0; 588 } 589 return 1; 590 } 591 592 int 593 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep) 594 struct pci_attach_args *pa; 595 struct pciide_channel *cp; 596 int compatchan; 597 bus_size_t *cmdsizep, *ctlsizep; 598 { 599 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 600 struct channel_softc *wdc_cp = &cp->wdc_channel; 601 602 cp->compat = 1; 603 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 604 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 605 606 wdc_cp->cmd_iot = pa->pa_iot; 607 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 608 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 609 printf("%s: couldn't map %s channel cmd regs\n", 610 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 611 return (0); 612 } 613 614 wdc_cp->ctl_iot = pa->pa_iot; 615 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 616 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 617 printf("%s: couldn't map %s channel ctl regs\n", 618 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 619 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 620 PCIIDE_COMPAT_CMD_SIZE); 621 return (0); 622 } 623 624 return (1); 625 } 626 627 int 628 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr) 629 struct pci_attach_args * pa; 630 struct pciide_channel *cp; 631 bus_size_t *cmdsizep, *ctlsizep; 632 int (*pci_intr) __P((void *)); 633 { 634 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 635 struct channel_softc *wdc_cp = &cp->wdc_channel; 636 const char *intrstr; 637 pci_intr_handle_t intrhandle; 638 639 cp->compat = 0; 640 641 if (sc->sc_pci_ih == NULL) { 642 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin, 643 pa->pa_intrline, &intrhandle) != 0) { 644 printf("%s: couldn't map native-PCI interrupt\n", 645 sc->sc_wdcdev.sc_dev.dv_xname); 646 return 0; 647 } 648 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 649 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 650 intrhandle, IPL_BIO, pci_intr, sc); 651 if (sc->sc_pci_ih != NULL) { 652 printf("%s: using %s for native-PCI interrupt\n", 653 sc->sc_wdcdev.sc_dev.dv_xname, 654 intrstr ? intrstr : "unknown interrupt"); 655 } else { 656 printf("%s: couldn't establish native-PCI interrupt", 657 sc->sc_wdcdev.sc_dev.dv_xname); 658 if (intrstr != NULL) 659 printf(" at %s", intrstr); 660 printf("\n"); 661 return 0; 662 } 663 } 664 cp->ih = sc->sc_pci_ih; 665 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 666 PCI_MAPREG_TYPE_IO, 0, 667 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) { 668 printf("%s: couldn't map %s channel cmd regs\n", 669 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 670 return 0; 671 } 672 673 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 674 PCI_MAPREG_TYPE_IO, 0, 675 &wdc_cp->ctl_iot, &wdc_cp->ctl_ioh, NULL, ctlsizep) != 0) { 676 printf("%s: couldn't map %s channel ctl regs\n", 677 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 678 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 679 return 0; 680 } 681 return (1); 682 } 683 684 void 685 pciide_mapreg_dma(sc, pa) 686 struct pciide_softc *sc; 687 struct pci_attach_args *pa; 688 { 689 pcireg_t maptype; 690 bus_addr_t addr; 691 692 /* 693 * Map DMA registers 694 * 695 * Note that sc_dma_ok is the right variable to test to see if 696 * DMA can be done. If the interface doesn't support DMA, 697 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 698 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 699 * non-zero if the interface supports DMA and the registers 700 * could be mapped. 701 * 702 * XXX Note that despite the fact that the Bus Master IDE specs 703 * XXX say that "The bus master IDE function uses 16 bytes of IO 704 * XXX space," some controllers (at least the United 705 * XXX Microelectronics UM8886BF) place it in memory space. 706 */ 707 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 708 PCIIDE_REG_BUS_MASTER_DMA); 709 710 switch (maptype) { 711 case PCI_MAPREG_TYPE_IO: 712 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 713 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 714 &addr, NULL, NULL) == 0); 715 if (sc->sc_dma_ok == 0) { 716 printf(", but unused (couldn't query registers)"); 717 break; 718 } 719 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 720 && addr >= 0x10000) { 721 sc->sc_dma_ok = 0; 722 printf(", but unused (registers at unsafe address %#lx)", (unsigned long)addr); 723 break; 724 } 725 /* FALLTHROUGH */ 726 727 case PCI_MAPREG_MEM_TYPE_32BIT: 728 sc->sc_dma_ok = (pci_mapreg_map(pa, 729 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 730 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0); 731 sc->sc_dmat = pa->pa_dmat; 732 if (sc->sc_dma_ok == 0) { 733 printf(", but unused (couldn't map registers)"); 734 } else { 735 sc->sc_wdcdev.dma_arg = sc; 736 sc->sc_wdcdev.dma_init = pciide_dma_init; 737 sc->sc_wdcdev.dma_start = pciide_dma_start; 738 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 739 } 740 break; 741 742 default: 743 sc->sc_dma_ok = 0; 744 printf(", but unsupported register maptype (0x%x)", maptype); 745 } 746 } 747 748 int 749 pciide_compat_intr(arg) 750 void *arg; 751 { 752 struct pciide_channel *cp = arg; 753 754 #ifdef DIAGNOSTIC 755 /* should only be called for a compat channel */ 756 if (cp->compat == 0) 757 panic("pciide compat intr called for non-compat chan %p\n", cp); 758 #endif 759 return (wdcintr(&cp->wdc_channel)); 760 } 761 762 int 763 pciide_pci_intr(arg) 764 void *arg; 765 { 766 struct pciide_softc *sc = arg; 767 struct pciide_channel *cp; 768 struct channel_softc *wdc_cp; 769 int i, rv, crv; 770 771 rv = 0; 772 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 773 cp = &sc->pciide_channels[i]; 774 wdc_cp = &cp->wdc_channel; 775 776 /* If a compat channel skip. */ 777 if (cp->compat) 778 continue; 779 /* if this channel not waiting for intr, skip */ 780 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) 781 continue; 782 783 crv = wdcintr(wdc_cp); 784 if (crv == 0) 785 ; /* leave rv alone */ 786 else if (crv == 1) 787 rv = 1; /* claim the intr */ 788 else if (rv == 0) /* crv should be -1 in this case */ 789 rv = crv; /* if we've done no better, take it */ 790 } 791 return (rv); 792 } 793 794 void 795 pciide_channel_dma_setup(cp) 796 struct pciide_channel *cp; 797 { 798 int drive; 799 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 800 struct ata_drive_datas *drvp; 801 802 for (drive = 0; drive < 2; drive++) { 803 drvp = &cp->wdc_channel.ch_drive[drive]; 804 /* If no drive, skip */ 805 if ((drvp->drive_flags & DRIVE) == 0) 806 continue; 807 /* setup DMA if needed */ 808 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 809 (drvp->drive_flags & DRIVE_UDMA) == 0) || 810 sc->sc_dma_ok == 0) { 811 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 812 continue; 813 } 814 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 815 != 0) { 816 /* Abort DMA setup */ 817 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 818 continue; 819 } 820 } 821 } 822 823 int 824 pciide_dma_table_setup(sc, channel, drive) 825 struct pciide_softc *sc; 826 int channel, drive; 827 { 828 bus_dma_segment_t seg; 829 int error, rseg; 830 const bus_size_t dma_table_size = 831 sizeof(struct idedma_table) * NIDEDMA_TABLES; 832 struct pciide_dma_maps *dma_maps = 833 &sc->pciide_channels[channel].dma_maps[drive]; 834 835 /* If table was already allocated, just return */ 836 if (dma_maps->dma_table) 837 return 0; 838 839 /* Allocate memory for the DMA tables and map it */ 840 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 841 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 842 BUS_DMA_NOWAIT)) != 0) { 843 printf("%s:%d: unable to allocate table DMA for " 844 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 845 channel, drive, error); 846 return error; 847 } 848 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 849 dma_table_size, 850 (caddr_t *)&dma_maps->dma_table, 851 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 852 printf("%s:%d: unable to map table DMA for" 853 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 854 channel, drive, error); 855 return error; 856 } 857 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, " 858 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size, 859 (unsigned long)seg.ds_addr), DEBUG_PROBE); 860 861 /* Create and load table DMA map for this disk */ 862 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 863 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 864 &dma_maps->dmamap_table)) != 0) { 865 printf("%s:%d: unable to create table DMA map for " 866 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 867 channel, drive, error); 868 return error; 869 } 870 if ((error = bus_dmamap_load(sc->sc_dmat, 871 dma_maps->dmamap_table, 872 dma_maps->dma_table, 873 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 874 printf("%s:%d: unable to load table DMA map for " 875 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 876 channel, drive, error); 877 return error; 878 } 879 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 880 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr), 881 DEBUG_PROBE); 882 /* Create a xfer DMA map for this drive */ 883 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 884 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN, 885 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 886 &dma_maps->dmamap_xfer)) != 0) { 887 printf("%s:%d: unable to create xfer DMA map for " 888 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 889 channel, drive, error); 890 return error; 891 } 892 return 0; 893 } 894 895 int 896 pciide_dma_init(v, channel, drive, databuf, datalen, flags) 897 void *v; 898 int channel, drive; 899 void *databuf; 900 size_t datalen; 901 int flags; 902 { 903 struct pciide_softc *sc = v; 904 int error, seg; 905 struct pciide_dma_maps *dma_maps = 906 &sc->pciide_channels[channel].dma_maps[drive]; 907 908 error = bus_dmamap_load(sc->sc_dmat, 909 dma_maps->dmamap_xfer, 910 databuf, datalen, NULL, BUS_DMA_NOWAIT); 911 if (error) { 912 printf("%s:%d: unable to load xfer DMA map for" 913 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 914 channel, drive, error); 915 return error; 916 } 917 918 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 919 dma_maps->dmamap_xfer->dm_mapsize, 920 (flags & WDC_DMA_READ) ? 921 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 922 923 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 924 #ifdef DIAGNOSTIC 925 /* A segment must not cross a 64k boundary */ 926 { 927 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 928 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 929 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 930 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 931 printf("pciide_dma: segment %d physical addr 0x%lx" 932 " len 0x%lx not properly aligned\n", 933 seg, phys, len); 934 panic("pciide_dma: buf align"); 935 } 936 } 937 #endif 938 dma_maps->dma_table[seg].base_addr = 939 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 940 dma_maps->dma_table[seg].byte_count = 941 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 942 IDEDMA_BYTE_COUNT_MASK); 943 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 944 seg, le32toh(dma_maps->dma_table[seg].byte_count), 945 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 946 947 } 948 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 949 htole32(IDEDMA_BYTE_COUNT_EOT); 950 951 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 952 dma_maps->dmamap_table->dm_mapsize, 953 BUS_DMASYNC_PREWRITE); 954 955 /* Maps are ready. Start DMA function */ 956 #ifdef DIAGNOSTIC 957 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 958 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 959 dma_maps->dmamap_table->dm_segs[0].ds_addr); 960 panic("pciide_dma_init: table align"); 961 } 962 #endif 963 964 /* Clear status bits */ 965 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 966 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, 967 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 968 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel)); 969 /* Write table addr */ 970 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 971 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel, 972 dma_maps->dmamap_table->dm_segs[0].ds_addr); 973 /* set read/write */ 974 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 975 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 976 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0); 977 /* remember flags */ 978 dma_maps->dma_flags = flags; 979 return 0; 980 } 981 982 void 983 pciide_dma_start(v, channel, drive) 984 void *v; 985 int channel, drive; 986 { 987 struct pciide_softc *sc = v; 988 989 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 990 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 991 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 992 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 993 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START); 994 } 995 996 int 997 pciide_dma_finish(v, channel, drive, force) 998 void *v; 999 int channel, drive; 1000 int force; 1001 { 1002 struct pciide_softc *sc = v; 1003 u_int8_t status; 1004 int error = 0; 1005 struct pciide_dma_maps *dma_maps = 1006 &sc->pciide_channels[channel].dma_maps[drive]; 1007 1008 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1009 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel); 1010 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1011 DEBUG_XFERS); 1012 1013 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) 1014 return WDC_DMAST_NOIRQ; 1015 1016 /* stop DMA channel */ 1017 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1018 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel, 1019 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1020 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START); 1021 1022 /* Unload the map of the data buffer */ 1023 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1024 dma_maps->dmamap_xfer->dm_mapsize, 1025 (dma_maps->dma_flags & WDC_DMA_READ) ? 1026 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1027 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1028 1029 if ((status & IDEDMA_CTL_ERR) != 0) { 1030 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1031 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1032 error |= WDC_DMAST_ERR; 1033 } 1034 1035 if ((status & IDEDMA_CTL_INTR) == 0) { 1036 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1037 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1038 drive, status); 1039 error |= WDC_DMAST_NOIRQ; 1040 } 1041 1042 if ((status & IDEDMA_CTL_ACT) != 0) { 1043 /* data underrun, may be a valid condition for ATAPI */ 1044 error |= WDC_DMAST_UNDER; 1045 } 1046 return error; 1047 } 1048 1049 void 1050 pciide_irqack(chp) 1051 struct channel_softc *chp; 1052 { 1053 struct pciide_channel *cp = (struct pciide_channel*)chp; 1054 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1055 1056 /* clear status bits in IDE DMA registers */ 1057 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1058 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel, 1059 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1060 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel)); 1061 } 1062 1063 /* some common code used by several chip_map */ 1064 int 1065 pciide_chansetup(sc, channel, interface) 1066 struct pciide_softc *sc; 1067 int channel; 1068 pcireg_t interface; 1069 { 1070 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1071 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1072 cp->name = PCIIDE_CHANNEL_NAME(channel); 1073 cp->wdc_channel.channel = channel; 1074 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1075 cp->wdc_channel.ch_queue = 1076 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1077 if (cp->wdc_channel.ch_queue == NULL) { 1078 printf("%s %s channel: " 1079 "can't allocate memory for command queue", 1080 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1081 return 0; 1082 } 1083 printf("%s: %s channel %s to %s mode\n", 1084 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1085 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 1086 "configured" : "wired", 1087 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 1088 "native-PCI" : "compatibility"); 1089 return 1; 1090 } 1091 1092 /* some common code used by several chip channel_map */ 1093 void 1094 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr) 1095 struct pci_attach_args *pa; 1096 struct pciide_channel *cp; 1097 pcireg_t interface; 1098 bus_size_t *cmdsizep, *ctlsizep; 1099 int (*pci_intr) __P((void *)); 1100 { 1101 struct channel_softc *wdc_cp = &cp->wdc_channel; 1102 1103 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 1104 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 1105 pci_intr); 1106 else 1107 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1108 wdc_cp->channel, cmdsizep, ctlsizep); 1109 1110 if (cp->hw_ok == 0) 1111 return; 1112 wdc_cp->data32iot = wdc_cp->cmd_iot; 1113 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 1114 wdcattach(wdc_cp); 1115 } 1116 1117 /* 1118 * Generic code to call to know if a channel can be disabled. Return 1 1119 * if channel can be disabled, 0 if not 1120 */ 1121 int 1122 pciide_chan_candisable(cp) 1123 struct pciide_channel *cp; 1124 { 1125 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1126 struct channel_softc *wdc_cp = &cp->wdc_channel; 1127 1128 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 1129 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 1130 printf("%s: disabling %s channel (no drives)\n", 1131 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1132 cp->hw_ok = 0; 1133 return 1; 1134 } 1135 return 0; 1136 } 1137 1138 /* 1139 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 1140 * Set hw_ok=0 on failure 1141 */ 1142 void 1143 pciide_map_compat_intr(pa, cp, compatchan, interface) 1144 struct pci_attach_args *pa; 1145 struct pciide_channel *cp; 1146 int compatchan, interface; 1147 { 1148 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1149 struct channel_softc *wdc_cp = &cp->wdc_channel; 1150 1151 if (cp->hw_ok == 0) 1152 return; 1153 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1154 return; 1155 1156 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 1157 pa, compatchan, pciide_compat_intr, cp); 1158 if (cp->ih == NULL) { 1159 printf("%s: no compatibility interrupt for use by %s " 1160 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1161 cp->hw_ok = 0; 1162 } 1163 } 1164 1165 void 1166 pciide_print_modes(cp) 1167 struct pciide_channel *cp; 1168 { 1169 wdc_print_modes(&cp->wdc_channel); 1170 } 1171 1172 void 1173 default_chip_map(sc, pa) 1174 struct pciide_softc *sc; 1175 struct pci_attach_args *pa; 1176 { 1177 struct pciide_channel *cp; 1178 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1179 pcireg_t csr; 1180 int channel, drive; 1181 struct ata_drive_datas *drvp; 1182 u_int8_t idedma_ctl; 1183 bus_size_t cmdsize, ctlsize; 1184 char *failreason; 1185 1186 if (pciide_chipen(sc, pa) == 0) 1187 return; 1188 1189 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 1190 printf("%s: bus-master DMA support present", 1191 sc->sc_wdcdev.sc_dev.dv_xname); 1192 if (sc->sc_pp == &default_product_desc && 1193 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 1194 PCIIDE_OPTIONS_DMA) == 0) { 1195 printf(", but unused (no driver support)"); 1196 sc->sc_dma_ok = 0; 1197 } else { 1198 pciide_mapreg_dma(sc, pa); 1199 if (sc->sc_dma_ok != 0) 1200 printf(", used without full driver " 1201 "support"); 1202 } 1203 } else { 1204 printf("%s: hardware does not support DMA", 1205 sc->sc_wdcdev.sc_dev.dv_xname); 1206 sc->sc_dma_ok = 0; 1207 } 1208 printf("\n"); 1209 if (sc->sc_dma_ok) { 1210 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1211 sc->sc_wdcdev.irqack = pciide_irqack; 1212 } 1213 sc->sc_wdcdev.PIO_cap = 0; 1214 sc->sc_wdcdev.DMA_cap = 0; 1215 1216 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1217 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1218 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 1219 1220 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1221 cp = &sc->pciide_channels[channel]; 1222 if (pciide_chansetup(sc, channel, interface) == 0) 1223 continue; 1224 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 1225 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 1226 &ctlsize, pciide_pci_intr); 1227 } else { 1228 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1229 channel, &cmdsize, &ctlsize); 1230 } 1231 if (cp->hw_ok == 0) 1232 continue; 1233 /* 1234 * Check to see if something appears to be there. 1235 */ 1236 failreason = NULL; 1237 if (!wdcprobe(&cp->wdc_channel)) { 1238 failreason = "not responding; disabled or no drives?"; 1239 goto next; 1240 } 1241 /* 1242 * Now, make sure it's actually attributable to this PCI IDE 1243 * channel by trying to access the channel again while the 1244 * PCI IDE controller's I/O space is disabled. (If the 1245 * channel no longer appears to be there, it belongs to 1246 * this controller.) YUCK! 1247 */ 1248 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 1249 PCI_COMMAND_STATUS_REG); 1250 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1251 csr & ~PCI_COMMAND_IO_ENABLE); 1252 if (wdcprobe(&cp->wdc_channel)) 1253 failreason = "other hardware responding at addresses"; 1254 pci_conf_write(sc->sc_pc, sc->sc_tag, 1255 PCI_COMMAND_STATUS_REG, csr); 1256 next: 1257 if (failreason) { 1258 printf("%s: %s channel ignored (%s)\n", 1259 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1260 failreason); 1261 cp->hw_ok = 0; 1262 bus_space_unmap(cp->wdc_channel.cmd_iot, 1263 cp->wdc_channel.cmd_ioh, cmdsize); 1264 bus_space_unmap(cp->wdc_channel.ctl_iot, 1265 cp->wdc_channel.ctl_ioh, ctlsize); 1266 } else { 1267 pciide_map_compat_intr(pa, cp, channel, interface); 1268 } 1269 if (cp->hw_ok) { 1270 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 1271 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 1272 wdcattach(&cp->wdc_channel); 1273 } 1274 } 1275 1276 if (sc->sc_dma_ok == 0) 1277 return; 1278 1279 /* Allocate DMA maps */ 1280 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1281 idedma_ctl = 0; 1282 cp = &sc->pciide_channels[channel]; 1283 for (drive = 0; drive < 2; drive++) { 1284 drvp = &cp->wdc_channel.ch_drive[drive]; 1285 /* If no drive, skip */ 1286 if ((drvp->drive_flags & DRIVE) == 0) 1287 continue; 1288 if ((drvp->drive_flags & DRIVE_DMA) == 0) 1289 continue; 1290 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1291 /* Abort DMA setup */ 1292 printf("%s:%d:%d: can't allocate DMA maps, " 1293 "using PIO transfers\n", 1294 sc->sc_wdcdev.sc_dev.dv_xname, 1295 channel, drive); 1296 drvp->drive_flags &= ~DRIVE_DMA; 1297 } 1298 printf("%s:%d:%d: using DMA data transfers\n", 1299 sc->sc_wdcdev.sc_dev.dv_xname, 1300 channel, drive); 1301 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1302 } 1303 if (idedma_ctl != 0) { 1304 /* Add software bits in status register */ 1305 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1306 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1307 idedma_ctl); 1308 } 1309 } 1310 } 1311 1312 void 1313 piix_chip_map(sc, pa) 1314 struct pciide_softc *sc; 1315 struct pci_attach_args *pa; 1316 { 1317 struct pciide_channel *cp; 1318 int channel; 1319 u_int32_t idetim; 1320 bus_size_t cmdsize, ctlsize; 1321 1322 if (pciide_chipen(sc, pa) == 0) 1323 return; 1324 1325 printf("%s: bus-master DMA support present", 1326 sc->sc_wdcdev.sc_dev.dv_xname); 1327 pciide_mapreg_dma(sc, pa); 1328 printf("\n"); 1329 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1330 WDC_CAPABILITY_MODE; 1331 if (sc->sc_dma_ok) { 1332 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1333 sc->sc_wdcdev.irqack = pciide_irqack; 1334 switch(sc->sc_pp->ide_product) { 1335 case PCI_PRODUCT_INTEL_82371AB_IDE: 1336 case PCI_PRODUCT_INTEL_82440MX_IDE: 1337 case PCI_PRODUCT_INTEL_82801AA_IDE: 1338 case PCI_PRODUCT_INTEL_82801AB_IDE: 1339 case PCI_PRODUCT_INTEL_82801BA_IDE: 1340 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1341 } 1342 } 1343 sc->sc_wdcdev.PIO_cap = 4; 1344 sc->sc_wdcdev.DMA_cap = 2; 1345 switch(sc->sc_pp->ide_product) { 1346 case PCI_PRODUCT_INTEL_82801AA_IDE: 1347 case PCI_PRODUCT_INTEL_82801BA_IDE: 1348 sc->sc_wdcdev.UDMA_cap = 4; 1349 break; 1350 default: 1351 sc->sc_wdcdev.UDMA_cap = 2; 1352 } 1353 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE) 1354 sc->sc_wdcdev.set_modes = piix_setup_channel; 1355 else 1356 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 1357 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1358 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1359 1360 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x", 1361 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1362 DEBUG_PROBE); 1363 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1364 WDCDEBUG_PRINT((", sidetim=0x%x", 1365 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1366 DEBUG_PROBE); 1367 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1368 WDCDEBUG_PRINT((", udamreg 0x%x", 1369 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1370 DEBUG_PROBE); 1371 } 1372 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1373 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) { 1374 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1375 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1376 DEBUG_PROBE); 1377 } 1378 1379 } 1380 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1381 1382 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1383 cp = &sc->pciide_channels[channel]; 1384 /* PIIX is compat-only */ 1385 if (pciide_chansetup(sc, channel, 0) == 0) 1386 continue; 1387 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1388 if ((PIIX_IDETIM_READ(idetim, channel) & 1389 PIIX_IDETIM_IDE) == 0) { 1390 printf("%s: %s channel ignored (disabled)\n", 1391 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1392 continue; 1393 } 1394 /* PIIX are compat-only pciide devices */ 1395 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 1396 if (cp->hw_ok == 0) 1397 continue; 1398 if (pciide_chan_candisable(cp)) { 1399 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 1400 channel); 1401 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 1402 idetim); 1403 } 1404 pciide_map_compat_intr(pa, cp, channel, 0); 1405 if (cp->hw_ok == 0) 1406 continue; 1407 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 1408 } 1409 1410 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 1411 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1412 DEBUG_PROBE); 1413 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1414 WDCDEBUG_PRINT((", sidetim=0x%x", 1415 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1416 DEBUG_PROBE); 1417 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1418 WDCDEBUG_PRINT((", udamreg 0x%x", 1419 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1420 DEBUG_PROBE); 1421 } 1422 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1423 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) { 1424 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1425 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1426 DEBUG_PROBE); 1427 } 1428 } 1429 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1430 } 1431 1432 void 1433 piix_setup_channel(chp) 1434 struct channel_softc *chp; 1435 { 1436 u_int8_t mode[2], drive; 1437 u_int32_t oidetim, idetim, idedma_ctl; 1438 struct pciide_channel *cp = (struct pciide_channel*)chp; 1439 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1440 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 1441 1442 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1443 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 1444 idedma_ctl = 0; 1445 1446 /* set up new idetim: Enable IDE registers decode */ 1447 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 1448 chp->channel); 1449 1450 /* setup DMA */ 1451 pciide_channel_dma_setup(cp); 1452 1453 /* 1454 * Here we have to mess up with drives mode: PIIX can't have 1455 * different timings for master and slave drives. 1456 * We need to find the best combination. 1457 */ 1458 1459 /* If both drives supports DMA, take the lower mode */ 1460 if ((drvp[0].drive_flags & DRIVE_DMA) && 1461 (drvp[1].drive_flags & DRIVE_DMA)) { 1462 mode[0] = mode[1] = 1463 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 1464 drvp[0].DMA_mode = mode[0]; 1465 drvp[1].DMA_mode = mode[1]; 1466 goto ok; 1467 } 1468 /* 1469 * If only one drive supports DMA, use its mode, and 1470 * put the other one in PIO mode 0 if mode not compatible 1471 */ 1472 if (drvp[0].drive_flags & DRIVE_DMA) { 1473 mode[0] = drvp[0].DMA_mode; 1474 mode[1] = drvp[1].PIO_mode; 1475 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 1476 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 1477 mode[1] = drvp[1].PIO_mode = 0; 1478 goto ok; 1479 } 1480 if (drvp[1].drive_flags & DRIVE_DMA) { 1481 mode[1] = drvp[1].DMA_mode; 1482 mode[0] = drvp[0].PIO_mode; 1483 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 1484 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 1485 mode[0] = drvp[0].PIO_mode = 0; 1486 goto ok; 1487 } 1488 /* 1489 * If both drives are not DMA, takes the lower mode, unless 1490 * one of them is PIO mode < 2 1491 */ 1492 if (drvp[0].PIO_mode < 2) { 1493 mode[0] = drvp[0].PIO_mode = 0; 1494 mode[1] = drvp[1].PIO_mode; 1495 } else if (drvp[1].PIO_mode < 2) { 1496 mode[1] = drvp[1].PIO_mode = 0; 1497 mode[0] = drvp[0].PIO_mode; 1498 } else { 1499 mode[0] = mode[1] = 1500 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 1501 drvp[0].PIO_mode = mode[0]; 1502 drvp[1].PIO_mode = mode[1]; 1503 } 1504 ok: /* The modes are setup */ 1505 for (drive = 0; drive < 2; drive++) { 1506 if (drvp[drive].drive_flags & DRIVE_DMA) { 1507 idetim |= piix_setup_idetim_timings( 1508 mode[drive], 1, chp->channel); 1509 goto end; 1510 } 1511 } 1512 /* If we are there, none of the drives are DMA */ 1513 if (mode[0] >= 2) 1514 idetim |= piix_setup_idetim_timings( 1515 mode[0], 0, chp->channel); 1516 else 1517 idetim |= piix_setup_idetim_timings( 1518 mode[1], 0, chp->channel); 1519 end: /* 1520 * timing mode is now set up in the controller. Enable 1521 * it per-drive 1522 */ 1523 for (drive = 0; drive < 2; drive++) { 1524 /* If no drive, skip */ 1525 if ((drvp[drive].drive_flags & DRIVE) == 0) 1526 continue; 1527 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 1528 if (drvp[drive].drive_flags & DRIVE_DMA) 1529 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1530 } 1531 if (idedma_ctl != 0) { 1532 /* Add software bits in status register */ 1533 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1534 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 1535 idedma_ctl); 1536 } 1537 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1538 pciide_print_modes(cp); 1539 } 1540 1541 void 1542 piix3_4_setup_channel(chp) 1543 struct channel_softc *chp; 1544 { 1545 struct ata_drive_datas *drvp; 1546 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 1547 struct pciide_channel *cp = (struct pciide_channel*)chp; 1548 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1549 int drive; 1550 int channel = chp->channel; 1551 1552 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1553 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 1554 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 1555 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 1556 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 1557 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 1558 PIIX_SIDETIM_RTC_MASK(channel)); 1559 1560 idedma_ctl = 0; 1561 /* If channel disabled, no need to go further */ 1562 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 1563 return; 1564 /* set up new idetim: Enable IDE registers decode */ 1565 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 1566 1567 /* setup DMA if needed */ 1568 pciide_channel_dma_setup(cp); 1569 1570 for (drive = 0; drive < 2; drive++) { 1571 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 1572 PIIX_UDMATIM_SET(0x3, channel, drive)); 1573 drvp = &chp->ch_drive[drive]; 1574 /* If no drive, skip */ 1575 if ((drvp->drive_flags & DRIVE) == 0) 1576 continue; 1577 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1578 (drvp->drive_flags & DRIVE_UDMA) == 0)) 1579 goto pio; 1580 1581 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1582 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) { 1583 ideconf |= PIIX_CONFIG_PINGPONG; 1584 } 1585 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) { 1586 /* setup Ultra/66 */ 1587 if (drvp->UDMA_mode > 2 && 1588 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 1589 drvp->UDMA_mode = 2; 1590 if (drvp->UDMA_mode > 2) 1591 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 1592 else 1593 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 1594 } 1595 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1596 (drvp->drive_flags & DRIVE_UDMA)) { 1597 /* use Ultra/DMA */ 1598 drvp->drive_flags &= ~DRIVE_DMA; 1599 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 1600 udmareg |= PIIX_UDMATIM_SET( 1601 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 1602 } else { 1603 /* use Multiword DMA */ 1604 drvp->drive_flags &= ~DRIVE_UDMA; 1605 if (drive == 0) { 1606 idetim |= piix_setup_idetim_timings( 1607 drvp->DMA_mode, 1, channel); 1608 } else { 1609 sidetim |= piix_setup_sidetim_timings( 1610 drvp->DMA_mode, 1, channel); 1611 idetim =PIIX_IDETIM_SET(idetim, 1612 PIIX_IDETIM_SITRE, channel); 1613 } 1614 } 1615 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1616 1617 pio: /* use PIO mode */ 1618 idetim |= piix_setup_idetim_drvs(drvp); 1619 if (drive == 0) { 1620 idetim |= piix_setup_idetim_timings( 1621 drvp->PIO_mode, 0, channel); 1622 } else { 1623 sidetim |= piix_setup_sidetim_timings( 1624 drvp->PIO_mode, 0, channel); 1625 idetim =PIIX_IDETIM_SET(idetim, 1626 PIIX_IDETIM_SITRE, channel); 1627 } 1628 } 1629 if (idedma_ctl != 0) { 1630 /* Add software bits in status register */ 1631 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1632 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel), 1633 idedma_ctl); 1634 } 1635 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1636 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 1637 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 1638 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 1639 pciide_print_modes(cp); 1640 } 1641 1642 1643 /* setup ISP and RTC fields, based on mode */ 1644 static u_int32_t 1645 piix_setup_idetim_timings(mode, dma, channel) 1646 u_int8_t mode; 1647 u_int8_t dma; 1648 u_int8_t channel; 1649 { 1650 1651 if (dma) 1652 return PIIX_IDETIM_SET(0, 1653 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 1654 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 1655 channel); 1656 else 1657 return PIIX_IDETIM_SET(0, 1658 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 1659 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 1660 channel); 1661 } 1662 1663 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 1664 static u_int32_t 1665 piix_setup_idetim_drvs(drvp) 1666 struct ata_drive_datas *drvp; 1667 { 1668 u_int32_t ret = 0; 1669 struct channel_softc *chp = drvp->chnl_softc; 1670 u_int8_t channel = chp->channel; 1671 u_int8_t drive = drvp->drive; 1672 1673 /* 1674 * If drive is using UDMA, timings setups are independant 1675 * So just check DMA and PIO here. 1676 */ 1677 if (drvp->drive_flags & DRIVE_DMA) { 1678 /* if mode = DMA mode 0, use compatible timings */ 1679 if ((drvp->drive_flags & DRIVE_DMA) && 1680 drvp->DMA_mode == 0) { 1681 drvp->PIO_mode = 0; 1682 return ret; 1683 } 1684 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1685 /* 1686 * PIO and DMA timings are the same, use fast timings for PIO 1687 * too, else use compat timings. 1688 */ 1689 if ((piix_isp_pio[drvp->PIO_mode] != 1690 piix_isp_dma[drvp->DMA_mode]) || 1691 (piix_rtc_pio[drvp->PIO_mode] != 1692 piix_rtc_dma[drvp->DMA_mode])) 1693 drvp->PIO_mode = 0; 1694 /* if PIO mode <= 2, use compat timings for PIO */ 1695 if (drvp->PIO_mode <= 2) { 1696 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 1697 channel); 1698 return ret; 1699 } 1700 } 1701 1702 /* 1703 * Now setup PIO modes. If mode < 2, use compat timings. 1704 * Else enable fast timings. Enable IORDY and prefetch/post 1705 * if PIO mode >= 3. 1706 */ 1707 1708 if (drvp->PIO_mode < 2) 1709 return ret; 1710 1711 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 1712 if (drvp->PIO_mode >= 3) { 1713 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 1714 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 1715 } 1716 return ret; 1717 } 1718 1719 /* setup values in SIDETIM registers, based on mode */ 1720 static u_int32_t 1721 piix_setup_sidetim_timings(mode, dma, channel) 1722 u_int8_t mode; 1723 u_int8_t dma; 1724 u_int8_t channel; 1725 { 1726 if (dma) 1727 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 1728 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel); 1729 else 1730 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 1731 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel); 1732 } 1733 1734 void 1735 amd756_chip_map(sc, pa) 1736 struct pciide_softc *sc; 1737 struct pci_attach_args *pa; 1738 { 1739 struct pciide_channel *cp; 1740 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1741 int channel; 1742 pcireg_t chanenable; 1743 bus_size_t cmdsize, ctlsize; 1744 1745 if (pciide_chipen(sc, pa) == 0) 1746 return; 1747 printf("%s: bus-master DMA support present", 1748 sc->sc_wdcdev.sc_dev.dv_xname); 1749 pciide_mapreg_dma(sc, pa); 1750 printf("\n"); 1751 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1752 WDC_CAPABILITY_MODE; 1753 if (sc->sc_dma_ok) { 1754 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 1755 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 1756 sc->sc_wdcdev.irqack = pciide_irqack; 1757 } 1758 sc->sc_wdcdev.PIO_cap = 4; 1759 sc->sc_wdcdev.DMA_cap = 2; 1760 sc->sc_wdcdev.UDMA_cap = 4; 1761 sc->sc_wdcdev.set_modes = amd756_setup_channel; 1762 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1763 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1764 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 1765 1766 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable), 1767 DEBUG_PROBE); 1768 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1769 cp = &sc->pciide_channels[channel]; 1770 if (pciide_chansetup(sc, channel, interface) == 0) 1771 continue; 1772 1773 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 1774 printf("%s: %s channel ignored (disabled)\n", 1775 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1776 continue; 1777 } 1778 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 1779 pciide_pci_intr); 1780 1781 if (pciide_chan_candisable(cp)) 1782 chanenable &= ~AMD756_CHAN_EN(channel); 1783 pciide_map_compat_intr(pa, cp, channel, interface); 1784 if (cp->hw_ok == 0) 1785 continue; 1786 1787 amd756_setup_channel(&cp->wdc_channel); 1788 } 1789 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 1790 chanenable); 1791 return; 1792 } 1793 1794 void 1795 amd756_setup_channel(chp) 1796 struct channel_softc *chp; 1797 { 1798 u_int32_t udmatim_reg, datatim_reg; 1799 u_int8_t idedma_ctl; 1800 int mode, drive; 1801 struct ata_drive_datas *drvp; 1802 struct pciide_channel *cp = (struct pciide_channel*)chp; 1803 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1804 #ifndef PCIIDE_AMD756_ENABLEDMA 1805 int rev = PCI_REVISION( 1806 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 1807 #endif 1808 1809 idedma_ctl = 0; 1810 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 1811 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 1812 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 1813 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 1814 1815 /* setup DMA if needed */ 1816 pciide_channel_dma_setup(cp); 1817 1818 for (drive = 0; drive < 2; drive++) { 1819 drvp = &chp->ch_drive[drive]; 1820 /* If no drive, skip */ 1821 if ((drvp->drive_flags & DRIVE) == 0) 1822 continue; 1823 /* add timing values, setup DMA if needed */ 1824 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1825 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 1826 mode = drvp->PIO_mode; 1827 goto pio; 1828 } 1829 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1830 (drvp->drive_flags & DRIVE_UDMA)) { 1831 /* use Ultra/DMA */ 1832 drvp->drive_flags &= ~DRIVE_DMA; 1833 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 1834 AMD756_UDMA_EN_MTH(chp->channel, drive) | 1835 AMD756_UDMA_TIME(chp->channel, drive, 1836 amd756_udma_tim[drvp->UDMA_mode]); 1837 /* can use PIO timings, MW DMA unused */ 1838 mode = drvp->PIO_mode; 1839 } else { 1840 /* use Multiword DMA, but only if revision is OK */ 1841 drvp->drive_flags &= ~DRIVE_UDMA; 1842 #ifndef PCIIDE_AMD756_ENABLEDMA 1843 /* 1844 * The workaround doesn't seem to be necessary 1845 * with all drives, so it can be disabled by 1846 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 1847 * triggered. 1848 */ 1849 if (AMD756_CHIPREV_DISABLEDMA(rev)) { 1850 printf("%s:%d:%d: multi-word DMA disabled due " 1851 "to chip revision\n", 1852 sc->sc_wdcdev.sc_dev.dv_xname, 1853 chp->channel, drive); 1854 mode = drvp->PIO_mode; 1855 drvp->drive_flags &= ~DRIVE_DMA; 1856 goto pio; 1857 } 1858 #endif 1859 /* mode = min(pio, dma+2) */ 1860 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 1861 mode = drvp->PIO_mode; 1862 else 1863 mode = drvp->DMA_mode + 2; 1864 } 1865 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1866 1867 pio: /* setup PIO mode */ 1868 if (mode <= 2) { 1869 drvp->DMA_mode = 0; 1870 drvp->PIO_mode = 0; 1871 mode = 0; 1872 } else { 1873 drvp->PIO_mode = mode; 1874 drvp->DMA_mode = mode - 2; 1875 } 1876 datatim_reg |= 1877 AMD756_DATATIM_PULSE(chp->channel, drive, 1878 amd756_pio_set[mode]) | 1879 AMD756_DATATIM_RECOV(chp->channel, drive, 1880 amd756_pio_rec[mode]); 1881 } 1882 if (idedma_ctl != 0) { 1883 /* Add software bits in status register */ 1884 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1885 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 1886 idedma_ctl); 1887 } 1888 pciide_print_modes(cp); 1889 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 1890 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 1891 } 1892 1893 void 1894 apollo_chip_map(sc, pa) 1895 struct pciide_softc *sc; 1896 struct pci_attach_args *pa; 1897 { 1898 struct pciide_channel *cp; 1899 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1900 int rev = PCI_REVISION(pa->pa_class); 1901 int channel; 1902 u_int32_t ideconf; 1903 bus_size_t cmdsize, ctlsize; 1904 1905 if (pciide_chipen(sc, pa) == 0) 1906 return; 1907 printf("%s: bus-master DMA support present", 1908 sc->sc_wdcdev.sc_dev.dv_xname); 1909 pciide_mapreg_dma(sc, pa); 1910 printf("\n"); 1911 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1912 WDC_CAPABILITY_MODE; 1913 if (sc->sc_dma_ok) { 1914 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1915 sc->sc_wdcdev.irqack = pciide_irqack; 1916 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE 1917 && rev >= 6) 1918 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1919 } 1920 sc->sc_wdcdev.PIO_cap = 4; 1921 sc->sc_wdcdev.DMA_cap = 2; 1922 sc->sc_wdcdev.UDMA_cap = 2; 1923 sc->sc_wdcdev.set_modes = apollo_setup_channel; 1924 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1925 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1926 1927 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 1928 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 1929 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 1930 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 1931 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 1932 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 1933 DEBUG_PROBE); 1934 1935 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1936 cp = &sc->pciide_channels[channel]; 1937 if (pciide_chansetup(sc, channel, interface) == 0) 1938 continue; 1939 1940 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 1941 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 1942 printf("%s: %s channel ignored (disabled)\n", 1943 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1944 continue; 1945 } 1946 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 1947 pciide_pci_intr); 1948 if (cp->hw_ok == 0) 1949 continue; 1950 if (pciide_chan_candisable(cp)) { 1951 ideconf &= ~APO_IDECONF_EN(channel); 1952 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 1953 ideconf); 1954 } 1955 pciide_map_compat_intr(pa, cp, channel, interface); 1956 1957 if (cp->hw_ok == 0) 1958 continue; 1959 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 1960 } 1961 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 1962 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 1963 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 1964 } 1965 1966 void 1967 apollo_setup_channel(chp) 1968 struct channel_softc *chp; 1969 { 1970 u_int32_t udmatim_reg, datatim_reg; 1971 u_int8_t idedma_ctl; 1972 int mode, drive; 1973 struct ata_drive_datas *drvp; 1974 struct pciide_channel *cp = (struct pciide_channel*)chp; 1975 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1976 1977 idedma_ctl = 0; 1978 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 1979 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 1980 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 1981 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel); 1982 1983 /* setup DMA if needed */ 1984 pciide_channel_dma_setup(cp); 1985 1986 for (drive = 0; drive < 2; drive++) { 1987 drvp = &chp->ch_drive[drive]; 1988 /* If no drive, skip */ 1989 if ((drvp->drive_flags & DRIVE) == 0) 1990 continue; 1991 /* add timing values, setup DMA if needed */ 1992 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1993 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 1994 mode = drvp->PIO_mode; 1995 goto pio; 1996 } 1997 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 1998 (drvp->drive_flags & DRIVE_UDMA)) { 1999 /* use Ultra/DMA */ 2000 drvp->drive_flags &= ~DRIVE_DMA; 2001 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 2002 APO_UDMA_EN_MTH(chp->channel, drive) | 2003 APO_UDMA_TIME(chp->channel, drive, 2004 apollo_udma_tim[drvp->UDMA_mode]); 2005 /* can use PIO timings, MW DMA unused */ 2006 mode = drvp->PIO_mode; 2007 } else { 2008 /* use Multiword DMA */ 2009 drvp->drive_flags &= ~DRIVE_UDMA; 2010 /* mode = min(pio, dma+2) */ 2011 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2012 mode = drvp->PIO_mode; 2013 else 2014 mode = drvp->DMA_mode + 2; 2015 } 2016 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2017 2018 pio: /* setup PIO mode */ 2019 if (mode <= 2) { 2020 drvp->DMA_mode = 0; 2021 drvp->PIO_mode = 0; 2022 mode = 0; 2023 } else { 2024 drvp->PIO_mode = mode; 2025 drvp->DMA_mode = mode - 2; 2026 } 2027 datatim_reg |= 2028 APO_DATATIM_PULSE(chp->channel, drive, 2029 apollo_pio_set[mode]) | 2030 APO_DATATIM_RECOV(chp->channel, drive, 2031 apollo_pio_rec[mode]); 2032 } 2033 if (idedma_ctl != 0) { 2034 /* Add software bits in status register */ 2035 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2036 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2037 idedma_ctl); 2038 } 2039 pciide_print_modes(cp); 2040 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 2041 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 2042 } 2043 2044 void 2045 cmd_channel_map(pa, sc, channel) 2046 struct pci_attach_args *pa; 2047 struct pciide_softc *sc; 2048 int channel; 2049 { 2050 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2051 bus_size_t cmdsize, ctlsize; 2052 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 2053 int interface; 2054 2055 /* 2056 * The 0648/0649 can be told to identify as a RAID controller. 2057 * In this case, we have to fake interface 2058 */ 2059 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2060 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2061 PCIIDE_INTERFACE_SETTABLE(1); 2062 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 2063 CMD_CONF_DSA1) 2064 interface |= PCIIDE_INTERFACE_PCI(0) | 2065 PCIIDE_INTERFACE_PCI(1); 2066 } else { 2067 interface = PCI_INTERFACE(pa->pa_class); 2068 } 2069 2070 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2071 cp->name = PCIIDE_CHANNEL_NAME(channel); 2072 cp->wdc_channel.channel = channel; 2073 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2074 2075 if (channel > 0) { 2076 cp->wdc_channel.ch_queue = 2077 sc->pciide_channels[0].wdc_channel.ch_queue; 2078 } else { 2079 cp->wdc_channel.ch_queue = 2080 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2081 } 2082 if (cp->wdc_channel.ch_queue == NULL) { 2083 printf("%s %s channel: " 2084 "can't allocate memory for command queue", 2085 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2086 return; 2087 } 2088 2089 printf("%s: %s channel %s to %s mode\n", 2090 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2091 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 2092 "configured" : "wired", 2093 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 2094 "native-PCI" : "compatibility"); 2095 2096 /* 2097 * with a CMD PCI64x, if we get here, the first channel is enabled: 2098 * there's no way to disable the first channel without disabling 2099 * the whole device 2100 */ 2101 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 2102 printf("%s: %s channel ignored (disabled)\n", 2103 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2104 return; 2105 } 2106 2107 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 2108 if (cp->hw_ok == 0) 2109 return; 2110 if (channel == 1) { 2111 if (pciide_chan_candisable(cp)) { 2112 ctrl &= ~CMD_CTRL_2PORT; 2113 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2114 CMD_CTRL, ctrl); 2115 } 2116 } 2117 pciide_map_compat_intr(pa, cp, channel, interface); 2118 } 2119 2120 int 2121 cmd_pci_intr(arg) 2122 void *arg; 2123 { 2124 struct pciide_softc *sc = arg; 2125 struct pciide_channel *cp; 2126 struct channel_softc *wdc_cp; 2127 int i, rv, crv; 2128 u_int32_t priirq, secirq; 2129 2130 rv = 0; 2131 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2132 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2133 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2134 cp = &sc->pciide_channels[i]; 2135 wdc_cp = &cp->wdc_channel; 2136 /* If a compat channel skip. */ 2137 if (cp->compat) 2138 continue; 2139 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 2140 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 2141 crv = wdcintr(wdc_cp); 2142 if (crv == 0) 2143 printf("%s:%d: bogus intr\n", 2144 sc->sc_wdcdev.sc_dev.dv_xname, i); 2145 else 2146 rv = 1; 2147 } 2148 } 2149 return rv; 2150 } 2151 2152 void 2153 cmd_chip_map(sc, pa) 2154 struct pciide_softc *sc; 2155 struct pci_attach_args *pa; 2156 { 2157 int channel; 2158 2159 /* 2160 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2161 * and base adresses registers can be disabled at 2162 * hardware level. In this case, the device is wired 2163 * in compat mode and its first channel is always enabled, 2164 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2165 * In fact, it seems that the first channel of the CMD PCI0640 2166 * can't be disabled. 2167 */ 2168 2169 #ifdef PCIIDE_CMD064x_DISABLE 2170 if (pciide_chipen(sc, pa) == 0) 2171 return; 2172 #endif 2173 2174 printf("%s: hardware does not support DMA\n", 2175 sc->sc_wdcdev.sc_dev.dv_xname); 2176 sc->sc_dma_ok = 0; 2177 2178 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2179 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2180 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 2181 2182 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2183 cmd_channel_map(pa, sc, channel); 2184 } 2185 } 2186 2187 void 2188 cmd0643_9_chip_map(sc, pa) 2189 struct pciide_softc *sc; 2190 struct pci_attach_args *pa; 2191 { 2192 struct pciide_channel *cp; 2193 int channel; 2194 int rev = PCI_REVISION( 2195 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 2196 2197 /* 2198 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2199 * and base adresses registers can be disabled at 2200 * hardware level. In this case, the device is wired 2201 * in compat mode and its first channel is always enabled, 2202 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2203 * In fact, it seems that the first channel of the CMD PCI0640 2204 * can't be disabled. 2205 */ 2206 2207 #ifdef PCIIDE_CMD064x_DISABLE 2208 if (pciide_chipen(sc, pa) == 0) 2209 return; 2210 #endif 2211 printf("%s: bus-master DMA support present", 2212 sc->sc_wdcdev.sc_dev.dv_xname); 2213 pciide_mapreg_dma(sc, pa); 2214 printf("\n"); 2215 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2216 WDC_CAPABILITY_MODE; 2217 if (sc->sc_dma_ok) { 2218 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2219 switch (sc->sc_pp->ide_product) { 2220 case PCI_PRODUCT_CMDTECH_649: 2221 case PCI_PRODUCT_CMDTECH_648: 2222 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2223 sc->sc_wdcdev.UDMA_cap = 4; 2224 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2225 break; 2226 case PCI_PRODUCT_CMDTECH_646: 2227 if (rev >= CMD0646U2_REV) { 2228 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2229 sc->sc_wdcdev.UDMA_cap = 2; 2230 } else if (rev >= CMD0646U_REV) { 2231 /* 2232 * Linux's driver claims that the 646U is broken 2233 * with UDMA. Only enable it if we know what we're 2234 * doing 2235 */ 2236 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 2237 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2238 sc->sc_wdcdev.UDMA_cap = 2; 2239 #endif 2240 /* explicitely disable UDMA */ 2241 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2242 CMD_UDMATIM(0), 0); 2243 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2244 CMD_UDMATIM(1), 0); 2245 } 2246 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2247 break; 2248 default: 2249 sc->sc_wdcdev.irqack = pciide_irqack; 2250 } 2251 } 2252 2253 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2254 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2255 sc->sc_wdcdev.PIO_cap = 4; 2256 sc->sc_wdcdev.DMA_cap = 2; 2257 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 2258 2259 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 2260 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2261 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2262 DEBUG_PROBE); 2263 2264 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2265 cp = &sc->pciide_channels[channel]; 2266 cmd_channel_map(pa, sc, channel); 2267 if (cp->hw_ok == 0) 2268 continue; 2269 cmd0643_9_setup_channel(&cp->wdc_channel); 2270 } 2271 /* 2272 * note - this also makes sure we clear the irq disable and reset 2273 * bits 2274 */ 2275 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 2276 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 2277 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2278 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2279 DEBUG_PROBE); 2280 } 2281 2282 void 2283 cmd0643_9_setup_channel(chp) 2284 struct channel_softc *chp; 2285 { 2286 struct ata_drive_datas *drvp; 2287 u_int8_t tim; 2288 u_int32_t idedma_ctl, udma_reg; 2289 int drive; 2290 struct pciide_channel *cp = (struct pciide_channel*)chp; 2291 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2292 2293 idedma_ctl = 0; 2294 /* setup DMA if needed */ 2295 pciide_channel_dma_setup(cp); 2296 2297 for (drive = 0; drive < 2; drive++) { 2298 drvp = &chp->ch_drive[drive]; 2299 /* If no drive, skip */ 2300 if ((drvp->drive_flags & DRIVE) == 0) 2301 continue; 2302 /* add timing values, setup DMA if needed */ 2303 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 2304 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 2305 if (drvp->drive_flags & DRIVE_UDMA) { 2306 /* UltraDMA on a 646U2, 0648 or 0649 */ 2307 udma_reg = pciide_pci_read(sc->sc_pc, 2308 sc->sc_tag, CMD_UDMATIM(chp->channel)); 2309 if (drvp->UDMA_mode > 2 && 2310 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 2311 CMD_BICSR) & 2312 CMD_BICSR_80(chp->channel)) == 0) 2313 drvp->UDMA_mode = 2; 2314 if (drvp->UDMA_mode > 2) 2315 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 2316 else if (sc->sc_wdcdev.UDMA_cap > 2) 2317 udma_reg |= CMD_UDMATIM_UDMA33(drive); 2318 udma_reg |= CMD_UDMATIM_UDMA(drive); 2319 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 2320 CMD_UDMATIM_TIM_OFF(drive)); 2321 udma_reg |= 2322 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 2323 CMD_UDMATIM_TIM_OFF(drive)); 2324 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2325 CMD_UDMATIM(chp->channel), udma_reg); 2326 } else { 2327 /* 2328 * use Multiword DMA. 2329 * Timings will be used for both PIO and DMA, 2330 * so adjust DMA mode if needed 2331 * if we have a 0646U2/8/9, turn off UDMA 2332 */ 2333 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2334 udma_reg = pciide_pci_read(sc->sc_pc, 2335 sc->sc_tag, 2336 CMD_UDMATIM(chp->channel)); 2337 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 2338 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2339 CMD_UDMATIM(chp->channel), 2340 udma_reg); 2341 } 2342 if (drvp->PIO_mode >= 3 && 2343 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 2344 drvp->DMA_mode = drvp->PIO_mode - 2; 2345 } 2346 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 2347 } 2348 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2349 } 2350 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2351 CMD_DATA_TIM(chp->channel, drive), tim); 2352 } 2353 if (idedma_ctl != 0) { 2354 /* Add software bits in status register */ 2355 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2356 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel), 2357 idedma_ctl); 2358 } 2359 pciide_print_modes(cp); 2360 } 2361 2362 void 2363 cmd646_9_irqack(chp) 2364 struct channel_softc *chp; 2365 { 2366 u_int32_t priirq, secirq; 2367 struct pciide_channel *cp = (struct pciide_channel*)chp; 2368 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2369 2370 if (chp->channel == 0) { 2371 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2372 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 2373 } else { 2374 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2375 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 2376 } 2377 pciide_irqack(chp); 2378 } 2379 2380 void 2381 cy693_chip_map(sc, pa) 2382 struct pciide_softc *sc; 2383 struct pci_attach_args *pa; 2384 { 2385 struct pciide_channel *cp; 2386 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2387 bus_size_t cmdsize, ctlsize; 2388 2389 if (pciide_chipen(sc, pa) == 0) 2390 return; 2391 /* 2392 * this chip has 2 PCI IDE functions, one for primary and one for 2393 * secondary. So we need to call pciide_mapregs_compat() with 2394 * the real channel 2395 */ 2396 if (pa->pa_function == 1) { 2397 sc->sc_cy_compatchan = 0; 2398 } else if (pa->pa_function == 2) { 2399 sc->sc_cy_compatchan = 1; 2400 } else { 2401 printf("%s: unexpected PCI function %d\n", 2402 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 2403 return; 2404 } 2405 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2406 printf("%s: bus-master DMA support present", 2407 sc->sc_wdcdev.sc_dev.dv_xname); 2408 pciide_mapreg_dma(sc, pa); 2409 } else { 2410 printf("%s: hardware does not support DMA", 2411 sc->sc_wdcdev.sc_dev.dv_xname); 2412 sc->sc_dma_ok = 0; 2413 } 2414 printf("\n"); 2415 2416 sc->sc_cy_handle = cy82c693_init(pa->pa_iot); 2417 if (sc->sc_cy_handle == NULL) { 2418 printf("%s: unable to map hyperCache control registers\n", 2419 sc->sc_wdcdev.sc_dev.dv_xname); 2420 sc->sc_dma_ok = 0; 2421 } 2422 2423 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2424 WDC_CAPABILITY_MODE; 2425 if (sc->sc_dma_ok) { 2426 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2427 sc->sc_wdcdev.irqack = pciide_irqack; 2428 } 2429 sc->sc_wdcdev.PIO_cap = 4; 2430 sc->sc_wdcdev.DMA_cap = 2; 2431 sc->sc_wdcdev.set_modes = cy693_setup_channel; 2432 2433 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2434 sc->sc_wdcdev.nchannels = 1; 2435 2436 /* Only one channel for this chip; if we are here it's enabled */ 2437 cp = &sc->pciide_channels[0]; 2438 sc->wdc_chanarray[0] = &cp->wdc_channel; 2439 cp->name = PCIIDE_CHANNEL_NAME(0); 2440 cp->wdc_channel.channel = 0; 2441 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2442 cp->wdc_channel.ch_queue = 2443 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2444 if (cp->wdc_channel.ch_queue == NULL) { 2445 printf("%s primary channel: " 2446 "can't allocate memory for command queue", 2447 sc->sc_wdcdev.sc_dev.dv_xname); 2448 return; 2449 } 2450 printf("%s: primary channel %s to ", 2451 sc->sc_wdcdev.sc_dev.dv_xname, 2452 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 2453 "configured" : "wired"); 2454 if (interface & PCIIDE_INTERFACE_PCI(0)) { 2455 printf("native-PCI"); 2456 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 2457 pciide_pci_intr); 2458 } else { 2459 printf("compatibility"); 2460 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan, 2461 &cmdsize, &ctlsize); 2462 } 2463 printf(" mode\n"); 2464 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2465 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2466 wdcattach(&cp->wdc_channel); 2467 if (pciide_chan_candisable(cp)) { 2468 pci_conf_write(sc->sc_pc, sc->sc_tag, 2469 PCI_COMMAND_STATUS_REG, 0); 2470 } 2471 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface); 2472 if (cp->hw_ok == 0) 2473 return; 2474 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 2475 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE); 2476 cy693_setup_channel(&cp->wdc_channel); 2477 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 2478 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 2479 } 2480 2481 void 2482 cy693_setup_channel(chp) 2483 struct channel_softc *chp; 2484 { 2485 struct ata_drive_datas *drvp; 2486 int drive; 2487 u_int32_t cy_cmd_ctrl; 2488 u_int32_t idedma_ctl; 2489 struct pciide_channel *cp = (struct pciide_channel*)chp; 2490 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2491 int dma_mode = -1; 2492 2493 cy_cmd_ctrl = idedma_ctl = 0; 2494 2495 /* setup DMA if needed */ 2496 pciide_channel_dma_setup(cp); 2497 2498 for (drive = 0; drive < 2; drive++) { 2499 drvp = &chp->ch_drive[drive]; 2500 /* If no drive, skip */ 2501 if ((drvp->drive_flags & DRIVE) == 0) 2502 continue; 2503 /* add timing values, setup DMA if needed */ 2504 if (drvp->drive_flags & DRIVE_DMA) { 2505 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2506 /* use Multiword DMA */ 2507 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 2508 dma_mode = drvp->DMA_mode; 2509 } 2510 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 2511 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 2512 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 2513 CY_CMD_CTRL_IOW_REC_OFF(drive)); 2514 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 2515 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 2516 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 2517 CY_CMD_CTRL_IOR_REC_OFF(drive)); 2518 } 2519 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 2520 chp->ch_drive[0].DMA_mode = dma_mode; 2521 chp->ch_drive[1].DMA_mode = dma_mode; 2522 2523 if (dma_mode == -1) 2524 dma_mode = 0; 2525 2526 if (sc->sc_cy_handle != NULL) { 2527 /* Note: `multiple' is implied. */ 2528 cy82c693_write(sc->sc_cy_handle, 2529 (sc->sc_cy_compatchan == 0) ? 2530 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 2531 } 2532 2533 pciide_print_modes(cp); 2534 2535 if (idedma_ctl != 0) { 2536 /* Add software bits in status register */ 2537 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2538 IDEDMA_CTL, idedma_ctl); 2539 } 2540 } 2541 2542 void 2543 sis_chip_map(sc, pa) 2544 struct pciide_softc *sc; 2545 struct pci_attach_args *pa; 2546 { 2547 struct pciide_channel *cp; 2548 int channel; 2549 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 2550 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2551 pcireg_t rev = PCI_REVISION(pa->pa_class); 2552 bus_size_t cmdsize, ctlsize; 2553 2554 if (pciide_chipen(sc, pa) == 0) 2555 return; 2556 printf("%s: bus-master DMA support present", 2557 sc->sc_wdcdev.sc_dev.dv_xname); 2558 pciide_mapreg_dma(sc, pa); 2559 printf("\n"); 2560 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2561 WDC_CAPABILITY_MODE; 2562 if (sc->sc_dma_ok) { 2563 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2564 sc->sc_wdcdev.irqack = pciide_irqack; 2565 if (rev > 0xd0) 2566 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2567 } 2568 2569 sc->sc_wdcdev.PIO_cap = 4; 2570 sc->sc_wdcdev.DMA_cap = 2; 2571 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) 2572 sc->sc_wdcdev.UDMA_cap = 2; 2573 sc->sc_wdcdev.set_modes = sis_setup_channel; 2574 2575 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2576 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2577 2578 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 2579 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 2580 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE); 2581 2582 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2583 cp = &sc->pciide_channels[channel]; 2584 if (pciide_chansetup(sc, channel, interface) == 0) 2585 continue; 2586 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 2587 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 2588 printf("%s: %s channel ignored (disabled)\n", 2589 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2590 continue; 2591 } 2592 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2593 pciide_pci_intr); 2594 if (cp->hw_ok == 0) 2595 continue; 2596 if (pciide_chan_candisable(cp)) { 2597 if (channel == 0) 2598 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 2599 else 2600 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 2601 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 2602 sis_ctr0); 2603 } 2604 pciide_map_compat_intr(pa, cp, channel, interface); 2605 if (cp->hw_ok == 0) 2606 continue; 2607 sis_setup_channel(&cp->wdc_channel); 2608 } 2609 } 2610 2611 void 2612 sis_setup_channel(chp) 2613 struct channel_softc *chp; 2614 { 2615 struct ata_drive_datas *drvp; 2616 int drive; 2617 u_int32_t sis_tim; 2618 u_int32_t idedma_ctl; 2619 struct pciide_channel *cp = (struct pciide_channel*)chp; 2620 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2621 2622 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 2623 "channel %d 0x%x\n", chp->channel, 2624 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 2625 DEBUG_PROBE); 2626 sis_tim = 0; 2627 idedma_ctl = 0; 2628 /* setup DMA if needed */ 2629 pciide_channel_dma_setup(cp); 2630 2631 for (drive = 0; drive < 2; drive++) { 2632 drvp = &chp->ch_drive[drive]; 2633 /* If no drive, skip */ 2634 if ((drvp->drive_flags & DRIVE) == 0) 2635 continue; 2636 /* add timing values, setup DMA if needed */ 2637 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 2638 (drvp->drive_flags & DRIVE_UDMA) == 0) 2639 goto pio; 2640 2641 if (drvp->drive_flags & DRIVE_UDMA) { 2642 /* use Ultra/DMA */ 2643 drvp->drive_flags &= ~DRIVE_DMA; 2644 sis_tim |= sis_udma_tim[drvp->UDMA_mode] << 2645 SIS_TIM_UDMA_TIME_OFF(drive); 2646 sis_tim |= SIS_TIM_UDMA_EN(drive); 2647 } else { 2648 /* 2649 * use Multiword DMA 2650 * Timings will be used for both PIO and DMA, 2651 * so adjust DMA mode if needed 2652 */ 2653 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 2654 drvp->PIO_mode = drvp->DMA_mode + 2; 2655 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 2656 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 2657 drvp->PIO_mode - 2 : 0; 2658 if (drvp->DMA_mode == 0) 2659 drvp->PIO_mode = 0; 2660 } 2661 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2662 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] << 2663 SIS_TIM_ACT_OFF(drive); 2664 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 2665 SIS_TIM_REC_OFF(drive); 2666 } 2667 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 2668 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 2669 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 2670 if (idedma_ctl != 0) { 2671 /* Add software bits in status register */ 2672 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2673 IDEDMA_CTL, idedma_ctl); 2674 } 2675 pciide_print_modes(cp); 2676 } 2677 2678 void 2679 acer_chip_map(sc, pa) 2680 struct pciide_softc *sc; 2681 struct pci_attach_args *pa; 2682 { 2683 struct pciide_channel *cp; 2684 int channel; 2685 pcireg_t cr, interface; 2686 bus_size_t cmdsize, ctlsize; 2687 2688 if (pciide_chipen(sc, pa) == 0) 2689 return; 2690 printf("%s: bus-master DMA support present", 2691 sc->sc_wdcdev.sc_dev.dv_xname); 2692 pciide_mapreg_dma(sc, pa); 2693 printf("\n"); 2694 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2695 WDC_CAPABILITY_MODE; 2696 if (sc->sc_dma_ok) { 2697 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2698 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2699 sc->sc_wdcdev.irqack = pciide_irqack; 2700 } 2701 2702 sc->sc_wdcdev.PIO_cap = 4; 2703 sc->sc_wdcdev.DMA_cap = 2; 2704 sc->sc_wdcdev.UDMA_cap = 2; 2705 sc->sc_wdcdev.set_modes = acer_setup_channel; 2706 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2707 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2708 2709 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 2710 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 2711 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 2712 2713 /* Enable "microsoft register bits" R/W. */ 2714 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 2715 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 2716 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 2717 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 2718 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 2719 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 2720 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 2721 ~ACER_CHANSTATUSREGS_RO); 2722 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 2723 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 2724 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 2725 /* Don't use cr, re-read the real register content instead */ 2726 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 2727 PCI_CLASS_REG)); 2728 2729 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2730 cp = &sc->pciide_channels[channel]; 2731 if (pciide_chansetup(sc, channel, interface) == 0) 2732 continue; 2733 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 2734 printf("%s: %s channel ignored (disabled)\n", 2735 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2736 continue; 2737 } 2738 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2739 acer_pci_intr); 2740 if (cp->hw_ok == 0) 2741 continue; 2742 if (pciide_chan_candisable(cp)) { 2743 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 2744 pci_conf_write(sc->sc_pc, sc->sc_tag, 2745 PCI_CLASS_REG, cr); 2746 } 2747 pciide_map_compat_intr(pa, cp, channel, interface); 2748 acer_setup_channel(&cp->wdc_channel); 2749 } 2750 } 2751 2752 void 2753 acer_setup_channel(chp) 2754 struct channel_softc *chp; 2755 { 2756 struct ata_drive_datas *drvp; 2757 int drive; 2758 u_int32_t acer_fifo_udma; 2759 u_int32_t idedma_ctl; 2760 struct pciide_channel *cp = (struct pciide_channel*)chp; 2761 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2762 2763 idedma_ctl = 0; 2764 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 2765 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 2766 acer_fifo_udma), DEBUG_PROBE); 2767 /* setup DMA if needed */ 2768 pciide_channel_dma_setup(cp); 2769 2770 for (drive = 0; drive < 2; drive++) { 2771 drvp = &chp->ch_drive[drive]; 2772 /* If no drive, skip */ 2773 if ((drvp->drive_flags & DRIVE) == 0) 2774 continue; 2775 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 2776 "channel %d drive %d 0x%x\n", chp->channel, drive, 2777 pciide_pci_read(sc->sc_pc, sc->sc_tag, 2778 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 2779 /* clear FIFO/DMA mode */ 2780 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 2781 ACER_UDMA_EN(chp->channel, drive) | 2782 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 2783 2784 /* add timing values, setup DMA if needed */ 2785 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 2786 (drvp->drive_flags & DRIVE_UDMA) == 0) { 2787 acer_fifo_udma |= 2788 ACER_FTH_OPL(chp->channel, drive, 0x1); 2789 goto pio; 2790 } 2791 2792 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 2793 if (drvp->drive_flags & DRIVE_UDMA) { 2794 /* use Ultra/DMA */ 2795 drvp->drive_flags &= ~DRIVE_DMA; 2796 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 2797 acer_fifo_udma |= 2798 ACER_UDMA_TIM(chp->channel, drive, 2799 acer_udma[drvp->UDMA_mode]); 2800 } else { 2801 /* 2802 * use Multiword DMA 2803 * Timings will be used for both PIO and DMA, 2804 * so adjust DMA mode if needed 2805 */ 2806 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 2807 drvp->PIO_mode = drvp->DMA_mode + 2; 2808 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 2809 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 2810 drvp->PIO_mode - 2 : 0; 2811 if (drvp->DMA_mode == 0) 2812 drvp->PIO_mode = 0; 2813 } 2814 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2815 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 2816 ACER_IDETIM(chp->channel, drive), 2817 acer_pio[drvp->PIO_mode]); 2818 } 2819 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 2820 acer_fifo_udma), DEBUG_PROBE); 2821 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 2822 if (idedma_ctl != 0) { 2823 /* Add software bits in status register */ 2824 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2825 IDEDMA_CTL, idedma_ctl); 2826 } 2827 pciide_print_modes(cp); 2828 } 2829 2830 int 2831 acer_pci_intr(arg) 2832 void *arg; 2833 { 2834 struct pciide_softc *sc = arg; 2835 struct pciide_channel *cp; 2836 struct channel_softc *wdc_cp; 2837 int i, rv, crv; 2838 u_int32_t chids; 2839 2840 rv = 0; 2841 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 2842 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2843 cp = &sc->pciide_channels[i]; 2844 wdc_cp = &cp->wdc_channel; 2845 /* If a compat channel skip. */ 2846 if (cp->compat) 2847 continue; 2848 if (chids & ACER_CHIDS_INT(i)) { 2849 crv = wdcintr(wdc_cp); 2850 if (crv == 0) 2851 printf("%s:%d: bogus intr\n", 2852 sc->sc_wdcdev.sc_dev.dv_xname, i); 2853 else 2854 rv = 1; 2855 } 2856 } 2857 return rv; 2858 } 2859 2860 void 2861 hpt_chip_map(sc, pa) 2862 struct pciide_softc *sc; 2863 struct pci_attach_args *pa; 2864 { 2865 struct pciide_channel *cp; 2866 int i, compatchan, revision; 2867 pcireg_t interface; 2868 bus_size_t cmdsize, ctlsize; 2869 2870 if (pciide_chipen(sc, pa) == 0) 2871 return; 2872 revision = PCI_REVISION(pa->pa_class); 2873 2874 /* 2875 * when the chip is in native mode it identifies itself as a 2876 * 'misc mass storage'. Fake interface in this case. 2877 */ 2878 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 2879 interface = PCI_INTERFACE(pa->pa_class); 2880 } else { 2881 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2882 PCIIDE_INTERFACE_PCI(0); 2883 if (revision == HPT370_REV) 2884 interface |= PCIIDE_INTERFACE_PCI(1); 2885 } 2886 2887 printf("%s: bus-master DMA support present", 2888 sc->sc_wdcdev.sc_dev.dv_xname); 2889 pciide_mapreg_dma(sc, pa); 2890 printf("\n"); 2891 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2892 WDC_CAPABILITY_MODE; 2893 if (sc->sc_dma_ok) { 2894 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2895 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2896 sc->sc_wdcdev.irqack = pciide_irqack; 2897 } 2898 sc->sc_wdcdev.PIO_cap = 4; 2899 sc->sc_wdcdev.DMA_cap = 2; 2900 sc->sc_wdcdev.UDMA_cap = 4; 2901 2902 sc->sc_wdcdev.set_modes = hpt_setup_channel; 2903 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2904 if (revision == HPT366_REV) { 2905 /* 2906 * The 366 has 2 PCI IDE functions, one for primary and one 2907 * for secondary. So we need to call pciide_mapregs_compat() 2908 * with the real channel 2909 */ 2910 if (pa->pa_function == 0) { 2911 compatchan = 0; 2912 } else if (pa->pa_function == 1) { 2913 compatchan = 1; 2914 } else { 2915 printf("%s: unexpected PCI function %d\n", 2916 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 2917 return; 2918 } 2919 sc->sc_wdcdev.nchannels = 1; 2920 } else { 2921 sc->sc_wdcdev.nchannels = 2; 2922 } 2923 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2924 cp = &sc->pciide_channels[i]; 2925 if (sc->sc_wdcdev.nchannels > 1) { 2926 compatchan = i; 2927 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 2928 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 2929 printf("%s: %s channel ignored (disabled)\n", 2930 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2931 continue; 2932 } 2933 } 2934 if (pciide_chansetup(sc, i, interface) == 0) 2935 continue; 2936 if (interface & PCIIDE_INTERFACE_PCI(i)) { 2937 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 2938 &ctlsize, hpt_pci_intr); 2939 } else { 2940 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 2941 &cmdsize, &ctlsize); 2942 } 2943 if (cp->hw_ok == 0) 2944 return; 2945 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2946 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2947 wdcattach(&cp->wdc_channel); 2948 hpt_setup_channel(&cp->wdc_channel); 2949 } 2950 if (revision == HPT370_REV) { 2951 /* 2952 * HPT370_REV has a bit to disable interrupts, make sure 2953 * to clear it 2954 */ 2955 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 2956 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 2957 ~HPT_CSEL_IRQDIS); 2958 } 2959 return; 2960 } 2961 2962 2963 void 2964 hpt_setup_channel(chp) 2965 struct channel_softc *chp; 2966 { 2967 struct ata_drive_datas *drvp; 2968 int drive; 2969 int cable; 2970 u_int32_t before, after; 2971 u_int32_t idedma_ctl; 2972 struct pciide_channel *cp = (struct pciide_channel*)chp; 2973 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2974 2975 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 2976 2977 /* setup DMA if needed */ 2978 pciide_channel_dma_setup(cp); 2979 2980 idedma_ctl = 0; 2981 2982 /* Per drive settings */ 2983 for (drive = 0; drive < 2; drive++) { 2984 drvp = &chp->ch_drive[drive]; 2985 /* If no drive, skip */ 2986 if ((drvp->drive_flags & DRIVE) == 0) 2987 continue; 2988 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 2989 HPT_IDETIM(chp->channel, drive)); 2990 2991 /* add timing values, setup DMA if needed */ 2992 if (drvp->drive_flags & DRIVE_UDMA) { 2993 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 2994 drvp->UDMA_mode > 2) 2995 drvp->UDMA_mode = 2; 2996 after = (sc->sc_wdcdev.nchannels == 2) ? 2997 hpt370_udma[drvp->UDMA_mode] : 2998 hpt366_udma[drvp->UDMA_mode]; 2999 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3000 } else if (drvp->drive_flags & DRIVE_DMA) { 3001 /* 3002 * use Multiword DMA. 3003 * Timings will be used for both PIO and DMA, so adjust 3004 * DMA mode if needed 3005 */ 3006 if (drvp->PIO_mode >= 3 && 3007 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3008 drvp->DMA_mode = drvp->PIO_mode - 2; 3009 } 3010 after = (sc->sc_wdcdev.nchannels == 2) ? 3011 hpt370_dma[drvp->DMA_mode] : 3012 hpt366_dma[drvp->DMA_mode]; 3013 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3014 } else { 3015 /* PIO only */ 3016 after = (sc->sc_wdcdev.nchannels == 2) ? 3017 hpt370_pio[drvp->PIO_mode] : 3018 hpt366_pio[drvp->PIO_mode]; 3019 } 3020 pci_conf_write(sc->sc_pc, sc->sc_tag, 3021 HPT_IDETIM(chp->channel, drive), after); 3022 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 3023 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname, 3024 after, before), DEBUG_PROBE); 3025 } 3026 if (idedma_ctl != 0) { 3027 /* Add software bits in status register */ 3028 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3029 IDEDMA_CTL, idedma_ctl); 3030 } 3031 pciide_print_modes(cp); 3032 } 3033 3034 int 3035 hpt_pci_intr(arg) 3036 void *arg; 3037 { 3038 struct pciide_softc *sc = arg; 3039 struct pciide_channel *cp; 3040 struct channel_softc *wdc_cp; 3041 int rv = 0; 3042 int dmastat, i, crv; 3043 3044 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3045 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3046 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i); 3047 if((dmastat & IDEDMA_CTL_INTR) == 0) 3048 continue; 3049 cp = &sc->pciide_channels[i]; 3050 wdc_cp = &cp->wdc_channel; 3051 crv = wdcintr(wdc_cp); 3052 if (crv == 0) { 3053 printf("%s:%d: bogus intr\n", 3054 sc->sc_wdcdev.sc_dev.dv_xname, i); 3055 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3056 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat); 3057 } else 3058 rv = 1; 3059 } 3060 return rv; 3061 } 3062 3063 3064 /* A macro to test product */ 3065 #define PDC_IS_262(sc) \ 3066 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \ 3067 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \ 3068 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) 3069 3070 void 3071 pdc202xx_chip_map(sc, pa) 3072 struct pciide_softc *sc; 3073 struct pci_attach_args *pa; 3074 { 3075 struct pciide_channel *cp; 3076 int channel; 3077 pcireg_t interface, st, mode; 3078 bus_size_t cmdsize, ctlsize; 3079 3080 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3081 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st), 3082 DEBUG_PROBE); 3083 if (pciide_chipen(sc, pa) == 0) 3084 return; 3085 3086 /* turn off RAID mode */ 3087 st &= ~PDC2xx_STATE_IDERAID; 3088 3089 /* 3090 * can't rely on the PCI_CLASS_REG content if the chip was in raid 3091 * mode. We have to fake interface 3092 */ 3093 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 3094 if (st & PDC2xx_STATE_NATIVE) 3095 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3096 3097 printf("%s: bus-master DMA support present", 3098 sc->sc_wdcdev.sc_dev.dv_xname); 3099 pciide_mapreg_dma(sc, pa); 3100 printf("\n"); 3101 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3102 WDC_CAPABILITY_MODE; 3103 if (sc->sc_dma_ok) { 3104 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3105 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3106 sc->sc_wdcdev.irqack = pciide_irqack; 3107 } 3108 sc->sc_wdcdev.PIO_cap = 4; 3109 sc->sc_wdcdev.DMA_cap = 2; 3110 if (PDC_IS_262(sc)) 3111 sc->sc_wdcdev.UDMA_cap = 4; 3112 else 3113 sc->sc_wdcdev.UDMA_cap = 2; 3114 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel; 3115 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3116 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3117 3118 /* setup failsafe defaults */ 3119 mode = 0; 3120 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 3121 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 3122 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 3123 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 3124 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3125 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 " 3126 "initial timings 0x%x, now 0x%x\n", channel, 3127 pci_conf_read(sc->sc_pc, sc->sc_tag, 3128 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 3129 DEBUG_PROBE); 3130 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0), 3131 mode | PDC2xx_TIM_IORDYp); 3132 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 " 3133 "initial timings 0x%x, now 0x%x\n", channel, 3134 pci_conf_read(sc->sc_pc, sc->sc_tag, 3135 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 3136 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1), 3137 mode); 3138 } 3139 3140 mode = PDC2xx_SCR_DMA; 3141 if (PDC_IS_262(sc)) { 3142 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 3143 } else { 3144 /* the BIOS set it up this way */ 3145 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 3146 } 3147 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 3148 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 3149 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n", 3150 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode), 3151 DEBUG_PROBE); 3152 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode); 3153 3154 /* controller initial state register is OK even without BIOS */ 3155 /* Set DMA mode to IDE DMA compatibility */ 3156 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 3157 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ), 3158 DEBUG_PROBE); 3159 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 3160 mode | 0x1); 3161 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 3162 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 3163 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 3164 mode | 0x1); 3165 3166 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3167 cp = &sc->pciide_channels[channel]; 3168 if (pciide_chansetup(sc, channel, interface) == 0) 3169 continue; 3170 if ((st & (PDC_IS_262(sc) ? 3171 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 3172 printf("%s: %s channel ignored (disabled)\n", 3173 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3174 continue; 3175 } 3176 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3177 pdc202xx_pci_intr); 3178 if (cp->hw_ok == 0) 3179 continue; 3180 if (pciide_chan_candisable(cp)) 3181 st &= ~(PDC_IS_262(sc) ? 3182 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 3183 pciide_map_compat_intr(pa, cp, channel, interface); 3184 pdc202xx_setup_channel(&cp->wdc_channel); 3185 } 3186 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st), 3187 DEBUG_PROBE); 3188 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 3189 return; 3190 } 3191 3192 void 3193 pdc202xx_setup_channel(chp) 3194 struct channel_softc *chp; 3195 { 3196 struct ata_drive_datas *drvp; 3197 int drive; 3198 pcireg_t mode, st; 3199 u_int32_t idedma_ctl, scr, atapi; 3200 struct pciide_channel *cp = (struct pciide_channel*)chp; 3201 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3202 int channel = chp->channel; 3203 3204 /* setup DMA if needed */ 3205 pciide_channel_dma_setup(cp); 3206 3207 idedma_ctl = 0; 3208 3209 /* Per channel settings */ 3210 if (PDC_IS_262(sc)) { 3211 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3212 PDC262_U66); 3213 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 3214 /* Trimm UDMA mode */ 3215 if ((st & PDC262_STATE_80P(channel)) != 0 || 3216 (chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3217 chp->ch_drive[0].UDMA_mode <= 2) || 3218 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3219 chp->ch_drive[1].UDMA_mode <= 2)) { 3220 if (chp->ch_drive[0].UDMA_mode > 2) 3221 chp->ch_drive[0].UDMA_mode = 2; 3222 if (chp->ch_drive[1].UDMA_mode > 2) 3223 chp->ch_drive[1].UDMA_mode = 2; 3224 } 3225 /* Set U66 if needed */ 3226 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 3227 chp->ch_drive[0].UDMA_mode > 2) || 3228 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 3229 chp->ch_drive[1].UDMA_mode > 2)) 3230 scr |= PDC262_U66_EN(channel); 3231 else 3232 scr &= ~PDC262_U66_EN(channel); 3233 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3234 PDC262_U66, scr); 3235 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 3236 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 3237 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3238 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3239 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 3240 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 3241 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3242 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 3243 atapi = 0; 3244 else 3245 atapi = PDC262_ATAPI_UDMA; 3246 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 3247 PDC262_ATAPI(channel), atapi); 3248 } 3249 } 3250 for (drive = 0; drive < 2; drive++) { 3251 drvp = &chp->ch_drive[drive]; 3252 /* If no drive, skip */ 3253 if ((drvp->drive_flags & DRIVE) == 0) 3254 continue; 3255 mode = 0; 3256 if (drvp->drive_flags & DRIVE_UDMA) { 3257 mode = PDC2xx_TIM_SET_MB(mode, 3258 pdc2xx_udma_mb[drvp->UDMA_mode]); 3259 mode = PDC2xx_TIM_SET_MC(mode, 3260 pdc2xx_udma_mc[drvp->UDMA_mode]); 3261 drvp->drive_flags &= ~DRIVE_DMA; 3262 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3263 } else if (drvp->drive_flags & DRIVE_DMA) { 3264 mode = PDC2xx_TIM_SET_MB(mode, 3265 pdc2xx_dma_mb[drvp->DMA_mode]); 3266 mode = PDC2xx_TIM_SET_MC(mode, 3267 pdc2xx_dma_mc[drvp->DMA_mode]); 3268 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3269 } else { 3270 mode = PDC2xx_TIM_SET_MB(mode, 3271 pdc2xx_dma_mb[0]); 3272 mode = PDC2xx_TIM_SET_MC(mode, 3273 pdc2xx_dma_mc[0]); 3274 } 3275 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 3276 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 3277 if (drvp->drive_flags & DRIVE_ATA) 3278 mode |= PDC2xx_TIM_PRE; 3279 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 3280 if (drvp->PIO_mode >= 3) { 3281 mode |= PDC2xx_TIM_IORDY; 3282 if (drive == 0) 3283 mode |= PDC2xx_TIM_IORDYp; 3284 } 3285 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 3286 "timings 0x%x\n", 3287 sc->sc_wdcdev.sc_dev.dv_xname, 3288 chp->channel, drive, mode), DEBUG_PROBE); 3289 pci_conf_write(sc->sc_pc, sc->sc_tag, 3290 PDC2xx_TIM(chp->channel, drive), mode); 3291 } 3292 if (idedma_ctl != 0) { 3293 /* Add software bits in status register */ 3294 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3295 IDEDMA_CTL, idedma_ctl); 3296 } 3297 pciide_print_modes(cp); 3298 } 3299 3300 int 3301 pdc202xx_pci_intr(arg) 3302 void *arg; 3303 { 3304 struct pciide_softc *sc = arg; 3305 struct pciide_channel *cp; 3306 struct channel_softc *wdc_cp; 3307 int i, rv, crv; 3308 u_int32_t scr; 3309 3310 rv = 0; 3311 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 3312 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3313 cp = &sc->pciide_channels[i]; 3314 wdc_cp = &cp->wdc_channel; 3315 /* If a compat channel skip. */ 3316 if (cp->compat) 3317 continue; 3318 if (scr & PDC2xx_SCR_INT(i)) { 3319 crv = wdcintr(wdc_cp); 3320 if (crv == 0) 3321 printf("%s:%d: bogus intr\n", 3322 sc->sc_wdcdev.sc_dev.dv_xname, i); 3323 else 3324 rv = 1; 3325 } 3326 } 3327 return rv; 3328 } 3329 3330 void 3331 opti_chip_map(sc, pa) 3332 struct pciide_softc *sc; 3333 struct pci_attach_args *pa; 3334 { 3335 struct pciide_channel *cp; 3336 bus_size_t cmdsize, ctlsize; 3337 pcireg_t interface; 3338 u_int8_t init_ctrl; 3339 int channel; 3340 3341 if (pciide_chipen(sc, pa) == 0) 3342 return; 3343 printf("%s: bus-master DMA support present", 3344 sc->sc_wdcdev.sc_dev.dv_xname); 3345 pciide_mapreg_dma(sc, pa); 3346 printf("\n"); 3347 3348 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3349 WDC_CAPABILITY_MODE; 3350 sc->sc_wdcdev.PIO_cap = 4; 3351 if (sc->sc_dma_ok) { 3352 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3353 sc->sc_wdcdev.irqack = pciide_irqack; 3354 sc->sc_wdcdev.DMA_cap = 2; 3355 } 3356 sc->sc_wdcdev.set_modes = opti_setup_channel; 3357 3358 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3359 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3360 3361 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 3362 OPTI_REG_INIT_CONTROL); 3363 3364 interface = PCI_INTERFACE(pa->pa_class); 3365 3366 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3367 cp = &sc->pciide_channels[channel]; 3368 if (pciide_chansetup(sc, channel, interface) == 0) 3369 continue; 3370 if (channel == 1 && 3371 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 3372 printf("%s: %s channel ignored (disabled)\n", 3373 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3374 continue; 3375 } 3376 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3377 pciide_pci_intr); 3378 if (cp->hw_ok == 0) 3379 continue; 3380 pciide_map_compat_intr(pa, cp, channel, interface); 3381 if (cp->hw_ok == 0) 3382 continue; 3383 opti_setup_channel(&cp->wdc_channel); 3384 } 3385 } 3386 3387 void 3388 opti_setup_channel(chp) 3389 struct channel_softc *chp; 3390 { 3391 struct ata_drive_datas *drvp; 3392 struct pciide_channel *cp = (struct pciide_channel*)chp; 3393 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3394 int drive, spd; 3395 int mode[2]; 3396 u_int8_t rv, mr; 3397 3398 /* 3399 * The `Delay' and `Address Setup Time' fields of the 3400 * Miscellaneous Register are always zero initially. 3401 */ 3402 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 3403 mr &= ~(OPTI_MISC_DELAY_MASK | 3404 OPTI_MISC_ADDR_SETUP_MASK | 3405 OPTI_MISC_INDEX_MASK); 3406 3407 /* Prime the control register before setting timing values */ 3408 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 3409 3410 /* Determine the clockrate of the PCIbus the chip is attached to */ 3411 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 3412 spd &= OPTI_STRAP_PCI_SPEED_MASK; 3413 3414 /* setup DMA if needed */ 3415 pciide_channel_dma_setup(cp); 3416 3417 for (drive = 0; drive < 2; drive++) { 3418 drvp = &chp->ch_drive[drive]; 3419 /* If no drive, skip */ 3420 if ((drvp->drive_flags & DRIVE) == 0) { 3421 mode[drive] = -1; 3422 continue; 3423 } 3424 3425 if ((drvp->drive_flags & DRIVE_DMA)) { 3426 /* 3427 * Timings will be used for both PIO and DMA, 3428 * so adjust DMA mode if needed 3429 */ 3430 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3431 drvp->PIO_mode = drvp->DMA_mode + 2; 3432 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3433 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3434 drvp->PIO_mode - 2 : 0; 3435 if (drvp->DMA_mode == 0) 3436 drvp->PIO_mode = 0; 3437 3438 mode[drive] = drvp->DMA_mode + 5; 3439 } else 3440 mode[drive] = drvp->PIO_mode; 3441 3442 if (drive && mode[0] >= 0 && 3443 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 3444 /* 3445 * Can't have two drives using different values 3446 * for `Address Setup Time'. 3447 * Slow down the faster drive to compensate. 3448 */ 3449 int d = (opti_tim_as[spd][mode[0]] > 3450 opti_tim_as[spd][mode[1]]) ? 0 : 1; 3451 3452 mode[d] = mode[1-d]; 3453 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 3454 chp->ch_drive[d].DMA_mode = 0; 3455 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 3456 } 3457 } 3458 3459 for (drive = 0; drive < 2; drive++) { 3460 int m; 3461 if ((m = mode[drive]) < 0) 3462 continue; 3463 3464 /* Set the Address Setup Time and select appropriate index */ 3465 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 3466 rv |= OPTI_MISC_INDEX(drive); 3467 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 3468 3469 /* Set the pulse width and recovery timing parameters */ 3470 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 3471 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 3472 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 3473 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 3474 3475 /* Set the Enhanced Mode register appropriately */ 3476 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 3477 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 3478 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 3479 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 3480 } 3481 3482 /* Finally, enable the timings */ 3483 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 3484 3485 pciide_print_modes(cp); 3486 } 3487